update: Added Longhorn installation process and updated memory allocation for VMs

update: Added 'git' and 'vagrant' to required tools in pre-flight checks

fix: configured k3s install to use internal nic for flanel network

update: Added Longhorn installation process and updated memory allocation for VMs

update: Added 'git' and 'vagrant' to required tools in pre-flight checks

fix: configured k3s install to use internal nic for flanel network

fix: corrected JSON formatting for config json

update: reduce VM memory allocation to 2GB, add Longhorn installation scripts and prerequisites, and implement checks for existing pods

fix: merge issues

fix: merge issues

update: Added Longhorn installation process and updated memory allocation for VMs

update: Added 'git' and 'vagrant' to required tools in pre-flight checks

fix: configured k3s install to use internal nic for flanel network

update: Added Longhorn installation process and updated memory allocation for VMs

update: Added 'git' and 'vagrant' to required tools in pre-flight checks

fix: configured k3s install to use internal nic for flanel network

fix: corrected JSON formatting for config json

update: reduce VM memory allocation to 2GB, add Longhorn installation scripts and prerequisites, and implement checks for existing pods

update: improve error logging in RunJsonDeployment and RunCommand functions

update: add jq installation to provision script

update: add version flag

bump version

fix: improve error messages for config file reading

feat: add Windows gitbash installation support and improve binary download process

clean up tmp code

fix: increase timeout for some slower windows clients

feat: add Ingress and Service configurations for nginx deployment, and implement MetalLB  and Traeik installation scripts

refactor: remove obsolete Traefik installation script

feat: add environment checks and configurations for Vagrant setup, including dnsmasq  MetalLB  and ingress

feat: add deployment and installation scripts for infmon-cli, including Kubernetes configurations

feat: refactor customer project creation and add success/failure job scripts

refactor: rename customer references to project in configuration and application logic

feat: enhance JSON deployment handling with retry logic and command execution improvements

feat: enhance RunJsonDeployment with error handling and retry logic; add tests for configuration reading

feat: add automatic creation of base and config JSON files from examples if they do not exist

refactor: remove database package and related functionality; update app state initialization and error handling

refactor: update deployment handling to use ProjectConfig; improve error messages and logging

feat: enhance RunJsonDeployment retry logic with configurable delay; improve logging for retries

feat: implement LoadConfigs function for improved configuration loading; add logger setup

refactor: remove unused fields from BaseConfig and ProjectConfig structs for cleaner configuration management

refactor: clean up tests by removing obsolete functions and simplifying test cases

chore: update version to v0.0.5 in install script

feat: implement default configuration creation for BaseConfig and ProjectConfig; enhance validation logic

fix: enhance configuration parsing and loading; streamline flag handling and error reporting

refactor: remove obsolete configuration download logic from installation script
This commit is contained in:
jon brookes 2025-08-16 18:00:28 +01:00
parent d839fd5687
commit 11b1f1b637
61 changed files with 1573 additions and 761 deletions

View file

@ -0,0 +1,78 @@
---
- name: Install Dnsmasq on workstation
hosts: localhost
become: true
become_user: root
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: Install dnsmasq
ansible.builtin.apt:
name: dnsmasq
state: present
- name: Stop systemd-resolved
ansible.builtin.systemd:
name: systemd-resolved
state: stopped
- name: Disable systemd-resolved
ansible.builtin.systemd:
name: systemd-resolved
enabled: false
- name: check to see if /etc/resolv.conf is a symlink
ansible.builtin.stat:
path: /etc/resolv.conf
register: resolv_conf
- name: Remove /etc/resolv.conf if it is a symlink
ansible.builtin.file:
path: /etc/resolv.conf
state: absent
when: resolv_conf.stat.islnk
- name: Ensure /etc/resolv.conf is a regular file
ansible.builtin.file:
path: /etc/resolv.conf
state: touch
- name: Ensure /etc/resolv.conf uses 127.0.0.1 for server
ansible.builtin.lineinfile:
path: /etc/resolv.conf
regexp: '^nameserver'
line: 'nameserver 127.0.0.1'
state: present
- name: Configure dnsmasq
ansible.builtin.copy:
dest: /etc/dnsmasq.d/k3s-cluster.conf
content: |
address=/{{ dnsmasq_k3s_domain }}
server=1.1.1.1
server=8.8.8.8
owner: root
group: root
mode: "0644"
notify: Restart dnsmasq
- name: Ensure conf-dir is uncommented in /etc/dnsmasq.conf
ansible.builtin.lineinfile:
path: /etc/dnsmasq.conf
regexp: '^#?conf-dir=/etc/dnsmasq.d'
line: 'conf-dir=/etc/dnsmasq.d'
state: present
owner: root
group: root
mode: '0644'
handlers:
- name: Restart dnsmasq
ansible.builtin.systemd:
name: dnsmasq
state: restarted

View file

@ -55,7 +55,7 @@
- name: Install k3s on first node
ansible.builtin.shell: |
set -o pipefail
K3S_TOKEN=$(cat /opt/k3s-token) /bin/bash /tmp/k3s_install.sh server --cluster-init --disable traefik --disable servicelb --tls-san {{ k3s_url_ip }} --node-name vm1 --node-ip {{ vm1_ip }}
K3S_TOKEN=$(cat /opt/k3s-token) /bin/bash /tmp/k3s_install.sh server --cluster-init --disable traefik --disable servicelb --tls-san {{ k3s_url_ip }} --node-name vm1 --node-ip {{ vm1_ip }} --flannel-iface=enp0s8
if [ $? -eq 0 ]; then
mkdir -p /home/vagrant/.kube && cp /etc/rancher/k3s/k3s.yaml /home/vagrant/.kube/config && chown vagrant:vagrant /home/vagrant/.kube/config
fi
@ -91,7 +91,7 @@
{% endif %}
K3S_URL=https://{{ k3s_url_ip }}:6443 \
K3S_TOKEN={{ k3s_token_content.stdout }} \
INSTALL_K3S_EXEC="server --server https://{{ k3s_url_ip }}:6443 --disable traefik --disable servicelb --node-name={{ inventory_hostname }} --node-ip ${NODE_IP}" \
INSTALL_K3S_EXEC="server --server https://{{ k3s_url_ip }}:6443 --disable traefik --disable servicelb --node-name={{ inventory_hostname }} --node-ip ${NODE_IP} --flannel-iface=enp0s8" \
/bin/bash /tmp/k3s_install.sh 2>&1
exit_code=$?
if [ $exit_code -ne 0 ]; then

View file

@ -0,0 +1,47 @@
---
- name: Install k3s on 3-node cluster
hosts: vm1,vm2,vm3
become: true
become_user: root
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: Install open-iscsi on all nodes
ansible.builtin.package:
name: open-iscsi
state: present
- name: Install nfs-common on all nodes
ansible.builtin.package:
name: nfs-common
state: present
- name: Install cryptsetup and dmsetup packages
ansible.builtin.package:
name:
- cryptsetup
- dmsetup
state: present
- name: Load dm_crypt kernel module
community.general.modprobe:
name: dm_crypt
state: present
- name: Make dm_crypt module load on boot
ansible.builtin.lineinfile:
path: /etc/modules
line: dm_crypt
create: yes
- name: Check if dm_crypt module is loaded
ansible.builtin.shell: lsmod | grep dm_crypt
register: dm_crypt_check
failed_when: false
changed_when: false
- name: Show dm_crypt status
ansible.builtin.debug:
msg: "dm_crypt module is {{ 'loaded' if dm_crypt_check.rc == 0 else 'not loaded' }}"

View file

@ -1,18 +1,27 @@
#!/usr/bin/env bash
sudo apt-get update
sudo apt-get install -y software-properties-common git vim python3.10-venv
sudo apt-get update
sudo apt-get install -y software-properties-common git vim python3.10-venv jq figlet
source /vagrant/.envrc
# Set up ansible environment for vagrant user
sudo -u vagrant mkdir -p /home/vagrant/.ansible
sudo -u vagrant touch /home/vagrant/.ansible/ansible.cfg
# Create workspace and SSH directories
sudo -u vagrant mkdir -p /home/vagrant/ansible
sudo -u vagrant mkdir -p /home/vagrant/.ssh
sudo chmod 700 /home/vagrant/.ssh
# create directories and copy files to /home/vagrant
mkdir -p /home/vagrant/{ansible,scripts,pipelines,k8s}
sudo cp -r /vagrant/ansible/* /home/vagrant/ansible/
sudo cp -r /vagrant/scripts/* /home/vagrant/scripts/
sudo cp -r /vagrant/pipelines/* /home/vagrant/pipelines
sudo cp -r /vagrant/k8s/* /home/vagrant/k8s
sudo chmod +x /home/vagrant/pipelines/*.sh
# Copy the Vagrant private keys (these will be synced by Vagrant)
for i in {1..3}; do
sudo -u vagrant cp /vagrant/.vagrant/machines/vm$i/virtualbox/private_key /home/vagrant/.ssh/vm${i}_key
@ -82,7 +91,6 @@ if [ $? -ne 0 ]; then
exit 1
fi
cp -r /vagrant/ansible/* /home/vagrant/ansible/
eval `ssh-agent -s`
ssh-add # ~/machines/*/virtualbox/private_key
@ -98,12 +106,26 @@ if ! grep -qF "$BLOCK_START" "$BASHRC"; then
eval `ssh-agent -s`
ssh-add ~/machines/*/virtualbox/private_key
ssh-add -L
source /vagrant/.envrc
EOF
else
echo "Provisioning block already present in $BASHRC"
fi
ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3
echo
echo -------------------------
echo
su - vagrant
id
echo
echo -------------------------
echo
ssh-add ~/.ssh/vm*_key
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3
if [ $? -ne 0 ]; then
echo "Ansible ping failed. Please check your Vagrant VMs and network configuration."
@ -111,7 +133,7 @@ if [ $? -ne 0 ]; then
fi
# install_keepalived.yaml
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
@ -119,17 +141,32 @@ fi
echo "Keepalived installation completed."
# install_k3s_3node.yaml
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
# copy_k8s_config.yaml
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_dnsmasq.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
# check infctl
cd /home/vagrant
bash /home/vagrant/scripts/check_install_infctl.sh
if [ $? -ne 0 ]; then
echo "infctl check failed. Please check your installation."
exit 1
fi

View file

@ -7,6 +7,8 @@ k3s_url_ip: "{{ lookup('env', 'K3S_URL_IP') | default('192.168.56.250', true) }}
workstation_ip: "{{ lookup('env', 'WORKSTATION_IP') | default('192.168.56.10', true) }}"
network_prefix: "{{ lookup('env', 'VAGRANT_NETWORK_PREFIX') | default('192.168.56', true) }}"
dnsmasq_k3s_domain: "{{ lookup('env', 'DNSMASQ_K3S_DOMAIN') | default('headshed.it/192.168.56.230', true) }}"
# K3s configuration
k3s_cluster_name: "dev-cluster"
k3s_token_file: "/opt/k3s-token"