added Ansible playbooks for k3s and keepalived installation, and updated provisioning script

This commit is contained in:
jon brookes 2025-08-07 10:21:27 +01:00
parent b12749a3f0
commit 9ce3197c0b
4 changed files with 120 additions and 0 deletions

View file

@ -0,0 +1,112 @@
---
- name: Install k3s on 3-node cluster
hosts: vm1,vm2,vm3
become: true
become_user: root
serial: 1 # Ensure tasks are executed one host at a time
tasks:
- name: Check if k3s is already installed
ansible.builtin.stat:
path: /usr/local/bin/k3s
register: k3s_binary
- name: Check if k3s token file exists
ansible.builtin.stat:
path: /opt/k3s-token
register: k3s_token_file
when: inventory_hostname == 'vm1'
- name: Generate and save k3s token if not present (first node)
ansible.builtin.copy:
dest: /opt/k3s-token
content: "{{ lookup('pipe', 'head -c 16 /dev/urandom | sha256sum | cut -d\" \" -f1') }}"
owner: root
group: root
mode: '0600'
force: false
register: generated_k3s_token
when: inventory_hostname == 'vm1' and not k3s_token_file.stat.exists
- name: Download k3s install script
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s_install.sh
mode: '0755'
when: not k3s_binary.stat.exists
- name: Ensure .kube directory exists
ansible.builtin.file:
path: /home/user/.kube
state: directory
mode: '0755'
when: inventory_hostname == 'vm1' and not k3s_binary.stat.exists
- name: Install k3s on first node
ansible.builtin.shell: |
set -o pipefail
# --write-kubeconfig-mode 644
K3S_TOKEN=$(cat /opt/k3s-token) /bin/bash /tmp/k3s_install.sh server --cluster-init --disable traefik --disable servicelb --tls-san 192.168.56.80 --node-name vm1 --node-ip 192.168.56.80
if [ $? -eq 0 ]; then
mkdir -p /home/vagrant/.kube && cp /etc/rancher/k3s/k3s.yaml /home/vagrant/.kube/config && chown vagrant:vagrant /home/vagrant/.kube/config
fi
args:
executable: /bin/bash
creates: /usr/local/bin/k3s
when: inventory_hostname == 'vm1' and not k3s_binary.stat.exists
- name: Read k3s token from master node (for subsequent nodes)
ansible.builtin.command: cat /opt/k3s-token
register: k3s_token_content
delegate_to: vm1
when: inventory_hostname != 'vm1' and not k3s_binary.stat.exists
changed_when: false
- name: Wait for k3s API server to be ready on master node
ansible.builtin.wait_for:
host: 192.168.56.80
port: 6443
timeout: 60
delegate_to: "{{ inventory_hostname }}"
when: inventory_hostname != 'vm1' and not k3s_binary.stat.exists
- name: Install k3s on subsequent nodes
ansible.builtin.shell: |
set -o pipefail
{% if inventory_hostname == 'vm2' %}
NODE_IP="192.168.56.81"
{% elif inventory_hostname == 'vm3' %}
NODE_IP="192.168.56.82"
{% else %}
NODE_IP="192.168.56.80"
{% endif %}
K3S_URL=https://192.168.56.80:6443 \
K3S_TOKEN={{ k3s_token_content.stdout }} \
INSTALL_K3S_EXEC="server --disable traefik --disable servicelb --node-name={{ inventory_hostname }} --node-ip ${NODE_IP}" \
/bin/bash /tmp/k3s_install.sh 2>&1
exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "K3S INSTALL FAILED - Service Status:"
systemctl status k3s.service --no-pager -l | head -20
echo "Recent logs:"
journalctl -u k3s.service --no-pager -l | tail -10
exit $exit_code
fi
args:
executable: /bin/bash
creates: /usr/local/bin/k3s
register: k3s_install_result
failed_when: false
when: inventory_hostname != 'vm1' and not k3s_binary.stat.exists
- name: Show k3s failure details
ansible.builtin.debug:
msg: "{{ k3s_install_result.stdout_lines[-30:] }}"
when: inventory_hostname != 'vm1' and not k3s_binary.stat.exists and k3s_install_result.rc != 0
- name: Fail if k3s installation failed
ansible.builtin.fail:
msg: "K3S installation failed on {{ inventory_hostname }}"
when: inventory_hostname != 'vm1' and not k3s_binary.stat.exists and k3s_install_result.rc != 0

View file

@ -0,0 +1,79 @@
---
- name: Install keepalived on 3-node cluster
hosts: vm1,vm2,vm3
become: true
become_user: root
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vault.yml
- vars.yml
vars:
tailscale_host: "{{ hostvars[inventory_hostname]['tailscale_host'] }}"
tasks:
# - name: Debug gathered facts
# ansible.builtin.debug:
# var: ansible_facts
# - name: List all network interfaces and their IPs
# ansible.builtin.debug:
# msg: "{{ item.key }}: {{ item.value.ipv4 | map(attribute='address') | list }}"
# with_dict: "{{ ansible_facts['network_interfaces'] }}"
# when: ansible_facts['network_interfaces'] is defined
- name: Detect interface with the desired IP range
ansible.builtin.set_fact:
keepalived_interface: "{{ item.key }}"
with_dict: "{{ ansible_facts['network_interfaces'] }}"
when: item.value.ipv4 is defined and item.value.ipv4 | selectattr('address', 'search', '^192\\.168\\.56\\.') | list | length > 0
register: detected_interface
- name: Set detected interface fact
ansible.builtin.set_fact:
keepalived_interface: "{{ detected_interface.ansible_facts.keepalived_interface }}"
when: detected_interface is defined and detected_interface.ansible_facts is defined
- name: Fallback to default interface if no match is found
ansible.builtin.set_fact:
keepalived_interface: "enp0s8"
when: keepalived_interface is not defined
- name: Fail if no interface is detected even after fallback
ansible.builtin.fail:
msg: "No interface with the desired IP range was detected, and fallback to default interface failed."
when: keepalived_interface is not defined
- name: Install keepalived
ansible.builtin.apt:
name: keepalived
state: present
- name: Configure keepalived on each node with decremented priority
ansible.builtin.copy:
dest: /etc/keepalived/keepalived.conf
content: |
vrrp_instance VI_1 {
state MASTER
interface {{ keepalived_interface }}
virtual_router_id 51
priority {{ 100 - (groups['vms'].index(inventory_hostname)) }}
advert_int 1
authentication {
auth_type PASS
auth_pass mysecret
}
virtual_ipaddress {
192.168.56.250
}
}
owner: root
group: root
mode: "0644"
- name: Enable and restart keepalived service
ansible.builtin.systemd:
name: keepalived
enabled: true
state: restarted

View file

@ -0,0 +1,120 @@
#!/usr/bin/env bash
sudo apt-get update
sudo apt-get install -y software-properties-common git vim python3.10-venv
# Set up ansible environment for vagrant user
sudo -u vagrant mkdir -p /home/vagrant/.ansible
sudo -u vagrant touch /home/vagrant/.ansible/ansible.cfg
# Create workspace and SSH directories
sudo -u vagrant mkdir -p /home/vagrant/ansible
sudo -u vagrant mkdir -p /home/vagrant/.ssh
sudo chmod 700 /home/vagrant/.ssh
# Copy the Vagrant private keys (these will be synced by Vagrant)
for i in {1..3}; do
sudo -u vagrant cp /vagrant/.vagrant/machines/vm$i/virtualbox/private_key /home/vagrant/.ssh/vm${i}_key
sudo chmod 600 /home/vagrant/.ssh/vm${i}_key
done
# Disable host key checking for easier learning
echo "[defaults]" > /home/vagrant/.ansible/ansible.cfg
echo "host_key_checking = False" >> /home/vagrant/.ansible/ansible.cfg
cp /vagrant/ansible/* /home/vagrant/ansible/
ANSIBLE_DIR=/home/vagrant/ansible
echo "Ansible directory: $ANSIBLE_DIR"
cd "$ANSIBLE_DIR" || {
echo "Failed to change directory to script directory: $ANSIBLE_DIR"
exit 1
}
if [ ! -d "venv" ]; then
echo "Creating Python virtual environment in ./venv..."
python3 -m venv venv
source "venv/bin/activate"
if [ $? -ne 0 ]; then
echo "Failed to activate virtual environment. Please check your Python installation."
exit 1
fi
echo "Virtual environment created and activated."
if [ -f "requirements.txt" ]; then
echo "Installing dependencies from requirements.txt..."
pip install --upgrade pip
pip install -r requirements.txt
if [ $? -ne 0 ]; then
echo "Failed to install dependencies from requirements.txt."
exit 1
fi
echo "Dependencies installed."
else
echo "requirements.txt not found. Skipping dependency installation."
fi
fi
ANSIBLE_VENV_DIR="$ANSIBLE_DIR/venv"
if [ -d "$ANSIBLE_VENV_DIR" ]; then
echo "Activating Ansible virtual environment..."
source "$ANSIBLE_VENV_DIR/bin/activate"
else
echo "Ansible virtual environment not found at $ANSIBLE_VENV_DIR. Please create it before running this script."
exit 1
fi
echo ""
ansible --version
cp -r /vagrant/.vagrant/machines /home/vagrant/machines
cp -r ~/.vagrant/machines /home/vagrant/machines
chmod 600 /home/vagrant/machines/*/virtualbox/private_key
chmod 600 ~/machines/*/virtualbox/private_key
eval `ssh-agent -s`
ssh-add ~/machines/*/virtualbox/private_key
BASHRC="/home/vagrant/.bashrc"
BLOCK_START="# ADDED BY infctl provisioning"
if ! grep -qF "$BLOCK_START" "$BASHRC"; then
echo "Appending provisioning block to $BASHRC"
cat <<'EOF' >> "$BASHRC"
# ADDED BY infctl provisioning
eval `ssh-agent -s`
ssh-add ~/machines/*/virtualbox/private_key
ssh-add -L
EOF
else
echo "Provisioning block already present in $BASHRC"
fi
echo ""
ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file ansible_inventory.ini -m ping all | cat
if [ $? -ne 0 ]; then
echo "Ansible ping failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
# install_keepalived.yaml
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini | cat
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
echo "Keepalived installation completed successfully."
# install_k3s_3node.yaml
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini | cat
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
echo "K3s installation completed successfully."

View file

@ -0,0 +1,36 @@
ansible==10.2.0
ansible-compat==24.10.0
ansible-core==2.17.2
ansible-lint==24.12.2
attrs==24.3.0
black==24.10.0
bracex==2.5.post1
certifi==2024.7.4
cffi==1.16.0
charset-normalizer==3.3.2
click==8.1.8
cryptography==43.0.0
filelock==3.16.1
idna==3.7
importlib_metadata==8.5.0
Jinja2==3.1.4
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
MarkupSafe==2.1.5
mypy-extensions==1.0.0
packaging==24.1
pathspec==0.12.1
platformdirs==4.3.6
pycparser==2.22
PyYAML==6.0.1
referencing==0.35.1
requests==2.32.3
resolvelib==1.0.1
rpds-py==0.22.3
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
subprocess-tee==0.4.2
urllib3==2.2.2
wcmatch==10.0
yamllint==1.35.1
zipp==3.21.0