update: Added Longhorn installation process and updated memory allocation for VMs

update: Added 'git' and 'vagrant' to required tools in pre-flight checks

fix: configured k3s install to use internal nic for flanel network

update: Added Longhorn installation process and updated memory allocation for VMs

update: Added 'git' and 'vagrant' to required tools in pre-flight checks

fix: configured k3s install to use internal nic for flanel network

fix: corrected JSON formatting for config json

update: reduce VM memory allocation to 2GB, add Longhorn installation scripts and prerequisites, and implement checks for existing pods

update: improve error logging in RunJsonDeployment and RunCommand functions

update: add jq installation to provision script

update: add version flag
This commit is contained in:
jon brookes 2025-08-16 18:00:28 +01:00
parent d839fd5687
commit 636a0494ac
20 changed files with 385 additions and 21 deletions

View file

@ -66,12 +66,13 @@ func (app *AppState) RunJsonDeployment() []PipelineStep {
jsonFile := app.Config.DeploymentFile jsonFile := app.Config.DeploymentFile
if jsonFile == "" { if jsonFile == "" {
log.Fatal("no config specified with --deployment-file=<path_to_config_file>") log.Fatal("no config specified with [-f|--deployment-file]=<path_to_config_file>")
} }
file, err := os.Open(jsonFile) file, err := os.Open(jsonFile)
if err != nil { if err != nil {
slog.Error(fmt.Sprintf("Failed to open JSON file: %s", err)) slog.Error(fmt.Sprintf("Failed to open JSON file: %s", err))
os.Exit(1)
} }
defer file.Close() defer file.Close()
@ -212,8 +213,6 @@ func (app *AppState) CreatePipeline() error {
slog.Info(fmt.Sprintln("Project name added:", app.Customer.Project)) slog.Info(fmt.Sprintln("Project name added:", app.Customer.Project))
fmt.Printf("Port number assigned: %d\n", port) fmt.Printf("Port number assigned: %d\n", port)
app.Config.Port = port app.Config.Port = port
} else {
slog.Info(fmt.Sprintln("Project name already exists:", app.Customer.Project))
} }
err = app.SetUpNewCustomer() err = app.SetUpNewCustomer()

View file

@ -102,7 +102,7 @@ func RunCommand(command string) error {
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
stderr.WriteString(line + "\n") stderr.WriteString(line + "\n")
slog.Error(line) slog.Info(line)
} }
}() }()

View file

@ -15,5 +15,5 @@
"nginx_conf": "path_to/conf.d", "nginx_conf": "path_to/conf.d",
"admin_url": "admin_url.headshed.dev", "admin_url": "admin_url.headshed.dev",
"preview_url": "app-prv.headshed.dev", "preview_url": "app-prv.headshed.dev",
"ui_url": "ww2.headshed.dev", "ui_url": "ww2.headshed.dev"
} }

View file

@ -7,6 +7,8 @@ import (
"os" "os"
) )
const Version = "v0.0.4"
type BaseConfig struct { type BaseConfig struct {
ProjectsDirectory string `json:"projects_directory"` ProjectsDirectory string `json:"projects_directory"`
Env string `json:"env"` Env string `json:"env"`
@ -38,6 +40,8 @@ func ReadBaseConfig(path string) (BaseConfig, error) {
deploymentFileShorthand := flag.String("f", "", "shorthand for -deployment-file") deploymentFileShorthand := flag.String("f", "", "shorthand for -deployment-file")
helpFlag := flag.Bool("help", false, "show help") helpFlag := flag.Bool("help", false, "show help")
versionFlag := flag.Bool("version", false, "show version")
vFlag := flag.Bool("v", false, "show version (shorthand)")
flag.Parse() flag.Parse()
if *helpFlag { if *helpFlag {
@ -46,6 +50,12 @@ func ReadBaseConfig(path string) (BaseConfig, error) {
os.Exit(0) os.Exit(0)
} }
// Handle version flags
if *versionFlag || *vFlag {
fmt.Println("infctl-cli version:", Version)
os.Exit(0)
}
var config BaseConfig var config BaseConfig
if *deploymentFileShorthand != "" { if *deploymentFileShorthand != "" {
config.DeploymentFile = *deploymentFileShorthand config.DeploymentFile = *deploymentFileShorthand

View file

@ -0,0 +1,33 @@
[
{
"name": "Create Vagrant nodes",
"function": "RunCommand",
"params": [
"./scripts/failue.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "Configure Vagrant K3s",
"function": "RunCommand",
"params": [
"./scripts/configure_vagrant_k3s.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "Create Vagrant workstation",
"function": "RunCommand",
"params": [
"./scripts/install_vagrant_workstation.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -1,7 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# set -euo pipefail
# This script checks for Vagrant and VirtualBox prerequisites, # This script checks for Vagrant and VirtualBox prerequisites,
# ensures Vagrant VMs are running, and gathers network and # ensures Vagrant VMs are running, and gathers network and
# system information from the VMs. # system information from the VMs.

View file

@ -1,6 +1,6 @@
#!/usr/bin/env bash #!/usr/bin/env bash
required_tools=("infctl" "pwgen" "kubectl" "k3d" "helm" "jq" "docker") required_tools=("infctl" "pwgen" "kubectl" "k3d" "helm" "jq" "git" "docker" "vagrant")
MISSING=false MISSING=false
check_required_tools() { check_required_tools() {

24
scripts/failue.sh Executable file
View file

@ -0,0 +1,24 @@
#!/usr/bin/env bash
echo "crash"
sleep 1
echo "bang"
sleep 2
echo "wallop"
echo
echo
echo
echo "Houston, we have a problem"
echo
echo
echo
exit 1

View file

@ -34,7 +34,7 @@ Vagrant.configure("2") do |config|
end end
vm1.vm.provider "virtualbox" do |vb| vm1.vm.provider "virtualbox" do |vb|
vb.memory = "2048" # 2GB memory vb.memory = "2048" # 4GB memory
vb.cpus = 2 vb.cpus = 2
end end
@ -61,7 +61,7 @@ Vagrant.configure("2") do |config|
end end
vm2.vm.provider "virtualbox" do |vb| vm2.vm.provider "virtualbox" do |vb|
vb.memory = "2048" # 2GB memory vb.memory = "2048" # 4GB memory
vb.cpus = 2 vb.cpus = 2
end end
@ -88,7 +88,7 @@ Vagrant.configure("2") do |config|
end end
vm3.vm.provider "virtualbox" do |vb| vm3.vm.provider "virtualbox" do |vb|
vb.memory = "2048" # 2GB memory vb.memory = "2048" # 4GB memory
vb.cpus = 2 vb.cpus = 2
end end

View file

@ -55,7 +55,7 @@
- name: Install k3s on first node - name: Install k3s on first node
ansible.builtin.shell: | ansible.builtin.shell: |
set -o pipefail set -o pipefail
K3S_TOKEN=$(cat /opt/k3s-token) /bin/bash /tmp/k3s_install.sh server --cluster-init --disable traefik --disable servicelb --tls-san {{ k3s_url_ip }} --node-name vm1 --node-ip {{ vm1_ip }} K3S_TOKEN=$(cat /opt/k3s-token) /bin/bash /tmp/k3s_install.sh server --cluster-init --disable traefik --disable servicelb --tls-san {{ k3s_url_ip }} --node-name vm1 --node-ip {{ vm1_ip }} --flannel-iface=enp0s8
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
mkdir -p /home/vagrant/.kube && cp /etc/rancher/k3s/k3s.yaml /home/vagrant/.kube/config && chown vagrant:vagrant /home/vagrant/.kube/config mkdir -p /home/vagrant/.kube && cp /etc/rancher/k3s/k3s.yaml /home/vagrant/.kube/config && chown vagrant:vagrant /home/vagrant/.kube/config
fi fi
@ -91,7 +91,7 @@
{% endif %} {% endif %}
K3S_URL=https://{{ k3s_url_ip }}:6443 \ K3S_URL=https://{{ k3s_url_ip }}:6443 \
K3S_TOKEN={{ k3s_token_content.stdout }} \ K3S_TOKEN={{ k3s_token_content.stdout }} \
INSTALL_K3S_EXEC="server --server https://{{ k3s_url_ip }}:6443 --disable traefik --disable servicelb --node-name={{ inventory_hostname }} --node-ip ${NODE_IP}" \ INSTALL_K3S_EXEC="server --server https://{{ k3s_url_ip }}:6443 --disable traefik --disable servicelb --node-name={{ inventory_hostname }} --node-ip ${NODE_IP} --flannel-iface=enp0s8" \
/bin/bash /tmp/k3s_install.sh 2>&1 /bin/bash /tmp/k3s_install.sh 2>&1
exit_code=$? exit_code=$?
if [ $exit_code -ne 0 ]; then if [ $exit_code -ne 0 ]; then

View file

@ -0,0 +1,47 @@
---
- name: Install k3s on 3-node cluster
hosts: vm1,vm2,vm3
become: true
become_user: root
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: Install open-iscsi on all nodes
ansible.builtin.package:
name: open-iscsi
state: present
- name: Install nfs-common on all nodes
ansible.builtin.package:
name: nfs-common
state: present
- name: Install cryptsetup and dmsetup packages
ansible.builtin.package:
name:
- cryptsetup
- dmsetup
state: present
- name: Load dm_crypt kernel module
community.general.modprobe:
name: dm_crypt
state: present
- name: Make dm_crypt module load on boot
ansible.builtin.lineinfile:
path: /etc/modules
line: dm_crypt
create: yes
- name: Check if dm_crypt module is loaded
ansible.builtin.shell: lsmod | grep dm_crypt
register: dm_crypt_check
failed_when: false
changed_when: false
- name: Show dm_crypt status
ansible.builtin.debug:
msg: "dm_crypt module is {{ 'loaded' if dm_crypt_check.rc == 0 else 'not loaded' }}"

View file

@ -1,18 +1,24 @@
#!/usr/bin/env bash #!/usr/bin/env bash
sudo apt-get update sudo apt-get update
sudo apt-get install -y software-properties-common git vim python3.10-venv sudo apt-get install -y software-properties-common git vim python3.10-venv jq
# Set up ansible environment for vagrant user # Set up ansible environment for vagrant user
sudo -u vagrant mkdir -p /home/vagrant/.ansible sudo -u vagrant mkdir -p /home/vagrant/.ansible
sudo -u vagrant touch /home/vagrant/.ansible/ansible.cfg sudo -u vagrant touch /home/vagrant/.ansible/ansible.cfg
# Create workspace and SSH directories # Create workspace and SSH directories
sudo -u vagrant mkdir -p /home/vagrant/ansible
sudo -u vagrant mkdir -p /home/vagrant/.ssh sudo -u vagrant mkdir -p /home/vagrant/.ssh
sudo chmod 700 /home/vagrant/.ssh sudo chmod 700 /home/vagrant/.ssh
# create directories and copy files to /home/vagrant
mkdir -p /home/vagrant/{ansible,scripts,pipelines,k8s}
sudo cp -r /vagrant/ansible/* /home/vagrant/ansible/
sudo cp -r /vagrant/scripts/* /home/vagrant/scripts/
sudo cp -r /vagrant/pipelines/* /home/vagrant/pipelines
sudo cp -r /vagrant/k8s/* /home/vagrant/k8s
sudo chmod +x /home/vagrant/pipelines/*.sh
# Copy the Vagrant private keys (these will be synced by Vagrant) # Copy the Vagrant private keys (these will be synced by Vagrant)
for i in {1..3}; do for i in {1..3}; do
sudo -u vagrant cp /vagrant/.vagrant/machines/vm$i/virtualbox/private_key /home/vagrant/.ssh/vm${i}_key sudo -u vagrant cp /vagrant/.vagrant/machines/vm$i/virtualbox/private_key /home/vagrant/.ssh/vm${i}_key
@ -82,7 +88,6 @@ if [ $? -ne 0 ]; then
exit 1 exit 1
fi fi
cp -r /vagrant/ansible/* /home/vagrant/ansible/
eval `ssh-agent -s` eval `ssh-agent -s`
ssh-add # ~/machines/*/virtualbox/private_key ssh-add # ~/machines/*/virtualbox/private_key
@ -103,7 +108,20 @@ else
echo "Provisioning block already present in $BASHRC" echo "Provisioning block already present in $BASHRC"
fi fi
ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3 echo
echo -------------------------
echo
su - vagrant
id
echo
echo -------------------------
echo
ssh-add ~/.ssh/vm*_key
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Ansible ping failed. Please check your Vagrant VMs and network configuration." echo "Ansible ping failed. Please check your Vagrant VMs and network configuration."
@ -111,7 +129,7 @@ if [ $? -ne 0 ]; then
fi fi
# install_keepalived.yaml # install_keepalived.yaml
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1 exit 1
@ -119,17 +137,24 @@ fi
echo "Keepalived installation completed." echo "Keepalived installation completed."
# install_k3s_3node.yaml # install_k3s_3node.yaml
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1 exit 1
fi fi
# copy_k8s_config.yaml # copy_k8s_config.yaml
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1 exit 1
fi fi
# check infctl
cd /home/vagrant
bash /home/vagrant/scripts/check_install_infctl.sh
if [ $? -ne 0 ]; then
echo "infctl check failed. Please check your installation."
exit 1
fi

View file

@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-with-storage
namespace: default
spec:
selector:
matchLabels:
app: nginx-storage
replicas: 1
template:
metadata:
labels:
app: nginx-storage
spec:
containers:
- name: nginx
image: nginx:stable
ports:
- containerPort: 80
volumeMounts:
- name: nginx-data
mountPath: /usr/share/nginx/html
volumes:
- name: nginx-data
persistentVolumeClaim:
claimName: nginx-data-pvc

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx-data-pvc
namespace: default
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi

View file

@ -0,0 +1,29 @@
[
{
"name": "Install Longhorn pre-requisites",
"function": "RunCommand",
"params": [
"./scripts/longhorn_prereqs.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "Install Longhorn",
"function": "RunCommand",
"params": [
"./scripts/install_longhorn.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "Wait for Longhorn pods to come up",
"function": "RunCommand",
"params": [
"./scripts/wait_for_longhorn.sh"
],
"retryCount": 10,
"shouldAbort": true
}
]

View file

@ -0,0 +1,35 @@
#!/usr/bin/env bash
# function to install infctl
install_infctl() {
echo "Installing infctl..."
# Add installation commands here
curl -L https://codeberg.org/headshed/infctl-cli/raw/branch/main/install.sh | bash
}
if ! command -v infctl &> /dev/null
then
echo "infctl could not be found, installing..."
install_infctl
fi
# base.json.example config.json.example
# https://codeberg.org/headshed/infctl-cli/raw/branch/main/base.json.example
# https://codeberg.org/headshed/infctl-cli/raw/branch/main/config.json.example
if [ ! -f "base.json" ]; then
echo "base.json not found in home directory, downloading..."
curl -o "base.json" https://codeberg.org/headshed/infctl-cli/raw/branch/main/base.json.example
fi
if [ ! -f "config.json" ]; then
echo "config.json not found in home directory, downloading..."
curl -o "config.json" https://codeberg.org/headshed/infctl-cli/raw/branch/main/config.json.example
fi
echo "infctl is installed and ready to use."

View file

@ -0,0 +1,22 @@
#!/usr/bin/env bash
echo
echo "vagrant longhorn installation"
echo
ssh-add ~/.ssh/vm*_key
source /home/vagrant/ansible/venv/bin/activate
# Check if there are any pods in the longhorn-system namespace
if kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -q '^[^ ]'; then
echo "Pods already exist in the longhorn-system namespace. Skipping installation."
exit 0
fi
# https://github.com/longhorn/longhorn/releases
# v1.8.1 in prod 1.9.1 is latest
LONGHORN_RELEASE="v1.8.1"
LONGHORN_RELEASE_URL="https://raw.githubusercontent.com/longhorn/longhorn/$LONGHORN_RELEASE/deploy/longhorn.yaml"
echo "Applying Longhorn release $LONGHORN_RELEASE..."
echo "Using Longhorn release URL: $LONGHORN_RELEASE_URL"
kubectl apply -f $LONGHORN_RELEASE_URL

View file

@ -0,0 +1,54 @@
#!/usr/bin/env bash
echo
echo "vagrant longhorn installation"
echo
ssh-add ~/.ssh/vm*_key
source /home/vagrant/ansible/venv/bin/activate
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3
if [ $? -ne 0 ]; then
echo "Ansible ping failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
echo "Ansible ping successful."
# Check if there are any pods in the longhorn-system namespace
if kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -q '^[^ ]'; then
echo "Pods already exist in the longhorn-system namespace. Skipping installation."
exit 0
fi
echo "Installing Longhorn prerequisites..."
# install_longhorn_prereqs.yaml
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook ~/ansible/install_longhorn_prereqs.yaml --inventory-file /home/vagrant/ansible/ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check the playbook and your inventory."
exit 1
fi
echo "installing Longhorn ..."
# https://github.com/longhorn/longhorn/releases
# v1.8.1 in prod 1.9.1 is latest
LONGHORN_RELEASE="v1.8.1"
LONGHORN_RELEASE_URL="https://raw.githubusercontent.com/longhorn/longhorn/$LONGHORN_RELEASE/deploy/longhorn.yaml"
echo "Applying Longhorn release $LONGHORN_RELEASE..."
echo "Using Longhorn release URL: $LONGHORN_RELEASE_URL"
kubectl apply -f $LONGHORN_RELEASE_URL
# Wait for all pods in longhorn-system namespace to be ready
echo "Waiting for Longhorn pods to be ready..."
while true; do
not_ready=$(kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -vE 'Running|Completed' | wc -l)
total=$(kubectl -n longhorn-system get pods --no-headers 2>/dev/null | wc -l)
if [[ $total -gt 0 && $not_ready -eq 0 ]]; then
echo "All Longhorn pods are ready."
break
fi
sleep 10
done

View file

@ -0,0 +1,32 @@
#!/usr/bin/env bash
echo
echo "vagrant longhorn prerequisites"
echo
ssh-add ~/.ssh/vm*_key
source /home/vagrant/ansible/venv/bin/activate
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3
if [ $? -ne 0 ]; then
echo "Ansible ping failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
echo "Ansible ping successful."
# Check if there are any pods in the longhorn-system namespace
if kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -q '^[^ ]'; then
echo "Pods already exist in the longhorn-system namespace. Skipping installation."
exit 0
fi
exit
echo "Installing Longhorn prerequisites..."
# install_longhorn_prereqs.yaml
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook ~/ansible/install_longhorn_prereqs.yaml --inventory-file /home/vagrant/ansible/ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check the playbook and your inventory."
exit 1
fi

View file

@ -0,0 +1,17 @@
#!/usr/bin/env bash
echo
echo "wait for longhorn installation"
echo
ssh-add ~/.ssh/vm*_key
source /home/vagrant/ansible/venv/bin/activate
while true; do
not_ready=$(kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -vE 'Running|Completed' | wc -l)
total=$(kubectl -n longhorn-system get pods --no-headers 2>/dev/null | wc -l)
if [[ $total -gt 0 && $not_ready -eq 0 ]]; then
echo "All Longhorn pods are ready."
break
fi
sleep 10
done