feat: add Ingress and Service configurations for nginx deployment, and implement MetalLB and Traeik installation scripts

refactor: remove obsolete Traefik installation script

feat: add environment checks and configurations for Vagrant setup, including dnsmasq  MetalLB  and ingress
This commit is contained in:
jon brookes 2025-08-23 15:05:26 +01:00
parent bd222ce39e
commit b2b028a16c
19 changed files with 375 additions and 147 deletions

View file

@ -1,10 +1,9 @@
export VAGRANT_BRIDGE='Intel(R) Ethernet Connection (16) I219-V' # export VAGRANT_BRIDGE='Intel(R) Ethernet Connection (16) I219-V'
# Network configuration for Vagrant/Ansible
export WORKSTATION_IP="192.168.56.10" export WORKSTATION_IP="192.168.56.10"
export VM1_IP="192.168.56.80" export VM1_IP="192.168.56.80"
export VM2_IP="192.168.56.81" export VM2_IP="192.168.56.81"
export VM3_IP="192.168.56.82" export VM3_IP="192.168.56.82"
export VAGRANT_NETWORK_PREFIX="192.168.56" export VAGRANT_NETWORK_PREFIX="192.168.56"
export K3S_URL_IP="192.168.56.250" export K3S_URL_IP="192.168.56.250"
export METALLB_IP_RANGE="192.168.56.230-192.168.56.240"

1
.gitignore vendored
View file

@ -22,3 +22,4 @@ scripts/ansible_inventory.ini
scripts/ansible_inventory.ini scripts/ansible_inventory.ini
vagrant/dev/ubuntu/ansible/ansible_inventory.ini vagrant/dev/ubuntu/ansible/ansible_inventory.ini
*.cast *.cast
vagrant/dev/ubuntu/certs/

View file

@ -1,5 +1,13 @@
[ [
{
"name": "Checks for .envrc",
"function": "RunCommand",
"params": [
"./scripts/envrc_checks.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{ {
"name": "Create Vagrant nodes", "name": "Create Vagrant nodes",
"function": "RunCommand", "function": "RunCommand",
@ -9,7 +17,6 @@
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true
}, },
{ {
"name": "Configure Vagrant K3s", "name": "Configure Vagrant K3s",
"function": "RunCommand", "function": "RunCommand",
@ -19,8 +26,6 @@
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true
}, },
{ {
"name": "Create Vagrant workstation", "name": "Create Vagrant workstation",
"function": "RunCommand", "function": "RunCommand",

22
scripts/envrc_checks.sh Executable file
View file

@ -0,0 +1,22 @@
#!/usr/bin/env bash
# check if an .envrc file exists
if [ ! -f .envrc ]; then
echo ".envrc file not found"
cp .envrc.example .envrc
if [ $? -eq 0 ]; then
echo ".envrc file created from .envrc.example"
else
echo "Failed to create .envrc file"
exit 1
fi
else
echo ".envrc file found"
cp .envrc vagrant/dev/ubuntu/.envrc
if [ $? -eq 0 ]; then
echo ".envrc file synced to vagrant/dev/ubuntu/.envrc"
else
echo "Failed to sync .envrc file"
exit 1
fi
fi

View file

@ -1,135 +0,0 @@
#!/usr/bin/env bash
if kubectl -n kube-system get pods --no-headers 2>/dev/null | grep -q 'traefik'; then
echo "Traefik is already running in the 'kube-system' namespace. Upgrading instead."
# Create a temporary values file for more complex configuration
cat > /tmp/traefik-values.yaml <<EOF
ingressClass:
enabled: true
isDefaultClass: true
ports:
web:
port: 80
websecure:
port: 443
traefik:
port: 9000
turn-tcp:
port: 1194
exposedPort: 1194
protocol: TCP
turn-udp:
port: 1194
exposedPort: 1194
protocol: UDP
entryPoints:
turn-tcp:
address: ":1194/tcp"
turn-udp:
address: ":1194/udp"
api:
dashboard: true
insecure: true
ingressRoute:
dashboard:
enabled: true
ping: true
log:
level: INFO
# Add this service section to expose the ports properly
service:
enabled: true
type: LoadBalancer
annotations: {}
ports:
web:
port: 80
protocol: TCP
targetPort: web
websecure:
port: 443
protocol: TCP
targetPort: websecure
turn-tcp:
port: 1194
protocol: TCP
targetPort: turn-tcp
turn-udp:
port: 1194
protocol: UDP
targetPort: turn-udp
EOF
helm upgrade traefik traefik/traefik --namespace kube-system -f /tmp/traefik-values.yaml
else
echo "Installing Traefik..."
helm repo add traefik https://traefik.github.io/charts
helm repo update
# Create a temporary values file for more complex configuration
cat > /tmp/traefik-values.yaml <<EOF
ingressClass:
enabled: true
isDefaultClass: true
ports:
web:
port: 80
websecure:
port: 443
traefik:
port: 9000
turn-tcp:
port: 1194
exposedPort: 1194
protocol: TCP
turn-udp:
port: 1194
exposedPort: 1194
protocol: UDP
entryPoints:
turn-tcp:
address: ":1194/tcp"
turn-udp:
address: ":1194/udp"
api:
dashboard: true
insecure: true
ingressRoute:
dashboard:
enabled: true
ping: true
log:
level: INFO
# Add the service section here too for new installations
service:
enabled: true
type: LoadBalancer
annotations: {}
ports:
web:
port: 80
protocol: TCP
targetPort: web
websecure:
port: 443
protocol: TCP
targetPort: websecure
turn-tcp:
port: 1194
protocol: TCP
targetPort: turn-tcp
turn-udp:
port: 1194
protocol: UDP
targetPort: turn-udp
EOF
helm install traefik traefik/traefik --namespace kube-system -f /tmp/traefik-values.yaml
fi
echo "To access the dashboard:"
echo "kubectl port-forward -n kube-system \$(kubectl get pods -n kube-system -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
echo "Then visit http://localhost:9000/dashboard/ in your browser"

View file

@ -12,3 +12,5 @@ cd "$VAGRANT_DIR" || {
vagrant up workstation vagrant up workstation

View file

@ -1 +0,0 @@
export VAGRANT_BRIDGE=<preferred interface to bride to>

View file

@ -0,0 +1,78 @@
---
- name: Install Dnsmasq on workstation
hosts: localhost
become: true
become_user: root
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: Install dnsmasq
ansible.builtin.apt:
name: dnsmasq
state: present
- name: Stop systemd-resolved
ansible.builtin.systemd:
name: systemd-resolved
state: stopped
- name: Disable systemd-resolved
ansible.builtin.systemd:
name: systemd-resolved
enabled: false
- name: check to see if /etc/resolv.conf is a symlink
ansible.builtin.stat:
path: /etc/resolv.conf
register: resolv_conf
- name: Remove /etc/resolv.conf if it is a symlink
ansible.builtin.file:
path: /etc/resolv.conf
state: absent
when: resolv_conf.stat.islnk
- name: Ensure /etc/resolv.conf is a regular file
ansible.builtin.file:
path: /etc/resolv.conf
state: touch
- name: Ensure /etc/resolv.conf uses 127.0.0.1 for server
ansible.builtin.lineinfile:
path: /etc/resolv.conf
regexp: '^nameserver'
line: 'nameserver 127.0.0.1'
state: present
- name: Configure dnsmasq
ansible.builtin.copy:
dest: /etc/dnsmasq.d/k3s-cluster.conf
content: |
address=/{{ dnsmasq_k3s_domain }}
server=1.1.1.1
server=8.8.8.8
owner: root
group: root
mode: "0644"
notify: Restart dnsmasq
- name: Ensure conf-dir is uncommented in /etc/dnsmasq.conf
ansible.builtin.lineinfile:
path: /etc/dnsmasq.conf
regexp: '^#?conf-dir=/etc/dnsmasq.d'
line: 'conf-dir=/etc/dnsmasq.d'
state: present
owner: root
group: root
mode: '0644'
handlers:
- name: Restart dnsmasq
ansible.builtin.systemd:
name: dnsmasq
state: restarted

View file

@ -1,7 +1,10 @@
#!/usr/bin/env bash #!/usr/bin/env bash
sudo apt-get update sudo apt-get update
sudo apt-get install -y software-properties-common git vim python3.10-venv jq sudo apt-get install -y software-properties-common git vim python3.10-venv jq figlet
source /vagrant/.envrc
# Set up ansible environment for vagrant user # Set up ansible environment for vagrant user
sudo -u vagrant mkdir -p /home/vagrant/.ansible sudo -u vagrant mkdir -p /home/vagrant/.ansible
@ -103,6 +106,7 @@ if ! grep -qF "$BLOCK_START" "$BASHRC"; then
eval `ssh-agent -s` eval `ssh-agent -s`
ssh-add ~/machines/*/virtualbox/private_key ssh-add ~/machines/*/virtualbox/private_key
ssh-add -L ssh-add -L
source ~/vagrant/.envrc
EOF EOF
else else
echo "Provisioning block already present in $BASHRC" echo "Provisioning block already present in $BASHRC"
@ -144,7 +148,13 @@ if [ $? -ne 0 ]; then
fi fi
# copy_k8s_config.yaml # copy_k8s_config.yaml
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_dnsmasq.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1 exit 1
@ -158,3 +168,5 @@ if [ $? -ne 0 ]; then
exit 1 exit 1
fi fi

View file

@ -7,6 +7,8 @@ k3s_url_ip: "{{ lookup('env', 'K3S_URL_IP') | default('192.168.56.250', true) }}
workstation_ip: "{{ lookup('env', 'WORKSTATION_IP') | default('192.168.56.10', true) }}" workstation_ip: "{{ lookup('env', 'WORKSTATION_IP') | default('192.168.56.10', true) }}"
network_prefix: "{{ lookup('env', 'VAGRANT_NETWORK_PREFIX') | default('192.168.56', true) }}" network_prefix: "{{ lookup('env', 'VAGRANT_NETWORK_PREFIX') | default('192.168.56', true) }}"
dnsmasq_k3s_domain: "{{ lookup('env', 'DNSMASQ_K3S_DOMAIN') | default('headshed.it/192.168.56.230', true) }}"
# K3s configuration # K3s configuration
k3s_cluster_name: "dev-cluster" k3s_cluster_name: "dev-cluster"
k3s_token_file: "/opt/k3s-token" k3s_token_file: "/opt/k3s-token"

View file

@ -13,6 +13,14 @@ spec:
labels: labels:
app: nginx-storage app: nginx-storage
spec: spec:
initContainers:
- name: init-nginx-content
image: busybox
command: ["sh", "-c", "echo '<html><body><h1>Welcome to nginx!</h1><h2>using MVK</h2><p><a href=\"https://mvk.headshed.dev/\">https://mvk.headshed.dev/</a></p></body></html>' > /usr/share/nginx/html/index.html"]
volumeMounts:
- name: nginx-data
mountPath: /usr/share/nginx/html
containers: containers:
- name: nginx - name: nginx
image: nginx:stable image: nginx:stable

View file

@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-ingress
namespace: default
# This annotation is good practice to ensure it uses the right entrypoint
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
# This block is the key. It tells Ingress controllers like Traefik
# to use the specified secret for TLS termination for the listed hosts.
tls:
- hosts:
- "*.headshed.it" # Or a specific subdomain like test.headshed.it
secretName: wildcard-headshed-it-tls # <-- The name of the secret you created
rules:
- host: nginx.headshed.it # The actual domain you will use to access the service
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-storage # The name of the k8s service for your app
port:
number: 80 # The port your service is listening on

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-storage
namespace: default
spec:
selector:
app: nginx-storage
ports:
- protocol: TCP
port: 80
targetPort: 80

View file

@ -0,0 +1,8 @@
apiVersion: traefik.io/v1alpha1
kind: TLSStore
metadata:
name: default
namespace: traefik
spec:
defaultCertificate:
secretName: wildcard-headshed-it-tls

View file

@ -0,0 +1,29 @@
[
{
"name": "Install Helm",
"function": "RunCommand",
"params": [
"./scripts/helm_check_install.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "Install traefik",
"function": "RunCommand",
"params": [
"./scripts/install_traefik.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "Wait for Longhorn pods to come up",
"function": "RunCommand",
"params": [
"./scripts/wait_for_longhorn.sh"
],
"retryCount": 10,
"shouldAbort": true
}
]

View file

@ -0,0 +1,11 @@
[
{
"name": "Install metallb",
"function": "RunCommand",
"params": [
"./scripts/install_metallb.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -0,0 +1,15 @@
#!/usr/bin/env bash
# check to see if helm is installed
if ! command -v helm &> /dev/null; then
echo "Helm is not installed. Installing it now ..."
# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
if [ $? -ne 0 ]; then
echo "Failed to install Helm."
exit 1
fi
fi
helm version

View file

@ -0,0 +1,65 @@
#!/usr/bin/env bash
source /vagrant/.envrc
# Check if MetalLB is already installed by looking for the controller deployment
if ! kubectl get deployment -n metallb-system controller &>/dev/null; then
echo "Installing MetalLB..."
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/main/config/manifests/metallb-native.yaml
if [ $? -ne 0 ]; then
echo "Fatal: Failed to apply MetalLB manifest." >&2
exit 1
fi
# Wait for MetalLB components to be ready
echo "Waiting for MetalLB components to be ready..."
kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=app=metallb \
--timeout=90s
else
echo "MetalLB is already installed."
fi
# Wait for the webhook service to be ready
echo "Waiting for MetalLB webhook service to be ready..."
kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=component=webhook \
--timeout=90s
# Check if the IPAddressPool already exists
if ! kubectl get ipaddresspool -n metallb-system default &>/dev/null; then
echo "Creating MetalLB IPAddressPool..."
cat <<EOF | kubectl apply -f -
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: default
namespace: metallb-system
spec:
addresses:
- ${METALLB_IP_RANGE}
EOF
else
echo "MetalLB IPAddressPool already exists."
fi
# Check if the L2Advertisement already exists
if ! kubectl get l2advertisement -n metallb-system default &>/dev/null; then
echo "Creating MetalLB L2Advertisement..."
cat <<EOF | kubectl apply -f -
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: default
namespace: metallb-system
spec:
ipAddressPools:
- default
EOF
else
echo "MetalLB L2Advertisement already exists."
fi

View file

@ -0,0 +1,68 @@
#!/usr/bin/env bash
# Exit immediately if a command exits with a non-zero status.
set -e
TMPFILE=$(mktemp)
trap 'rm -f "$TMPFILE"' EXIT
cat > "$TMPFILE" <<EOF
ingressClass:
enabled: true
isDefaultClass: true
ports:
web:
port: 80
websecure:
port: 443
traefik:
port: 9000
api:
dashboard: true
insecure: true
ingressRoute:
dashboard:
enabled: true
ping: true
log:
level: INFO
service:
enabled: true
type: LoadBalancer
annotations: {}
ports:
web:
port: 80
protocol: TCP
targetPort: web
websecure:
port: 443
protocol: TCP
targetPort: websecure
EOF
if helm status traefik --namespace traefik &> /dev/null; then
echo "Traefik is already installed in the 'traefik' namespace. Upgrading..."
helm upgrade traefik traefik/traefik --namespace traefik -f "$TMPFILE"
else
echo "Installing Traefik..."
helm repo add traefik https://traefik.github.io/charts
helm repo update
# Using --create-namespace is good practice, though traefik will always exist.
helm install traefik traefik/traefik --namespace traefik --create-namespace -f "$TMPFILE"
fi
# Apply the TLS store configuration
kubectl apply -f k8s/traefik-tlsstore.yaml
if [ $? -ne 0 ]; then
echo "Failed to apply TLS store configuration."
exit 1
fi
echo
echo "To access the dashboard:"
echo "kubectl port-forward -n traefik \$(kubectl get pods -n traefik -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
echo "Then visit http://localhost:9000/dashboard/ in your browser"