From 636a0494ac4933a53c94ae7a42dbb5b50811e57d Mon Sep 17 00:00:00 2001 From: jon brookes Date: Sat, 16 Aug 2025 18:00:28 +0100 Subject: [PATCH] update: Added Longhorn installation process and updated memory allocation for VMs update: Added 'git' and 'vagrant' to required tools in pre-flight checks fix: configured k3s install to use internal nic for flanel network update: Added Longhorn installation process and updated memory allocation for VMs update: Added 'git' and 'vagrant' to required tools in pre-flight checks fix: configured k3s install to use internal nic for flanel network fix: corrected JSON formatting for config json update: reduce VM memory allocation to 2GB, add Longhorn installation scripts and prerequisites, and implement checks for existing pods update: improve error logging in RunJsonDeployment and RunCommand functions update: add jq installation to provision script update: add version flag --- app/app.go | 5 +- app/k8s.go | 2 +- config.json.example | 2 +- config/base.go | 10 ++++ pipelines/dev/failing.json | 33 ++++++++++++ scripts/configure_vagrant_k3s.sh | 2 - scripts/dev-pre-flight-checks.sh | 2 +- scripts/failue.sh | 24 +++++++++ vagrant/dev/ubuntu/Vagrantfile | 6 +-- .../dev/ubuntu/ansible/install_k3s_3node.yaml | 4 +- .../ansible/install_longhorn_prereqs.yaml | 47 ++++++++++++++++ .../ubuntu/ansible/provision_workstation.sh | 41 +++++++++++--- .../dev/ubuntu/k8s/nginx-test/deployment.yaml | 27 ++++++++++ vagrant/dev/ubuntu/k8s/nginx-test/pvc.yaml | 12 +++++ .../ubuntu/pipelines/vagrant-longhorn.json | 29 ++++++++++ .../ubuntu/scripts/check_install_infctl.sh | 35 ++++++++++++ .../dev/ubuntu/scripts/install_longhorn.sh | 22 ++++++++ .../scripts/install_vagrant_longhorn.sh | 54 +++++++++++++++++++ .../dev/ubuntu/scripts/longhorn_prereqs.sh | 32 +++++++++++ .../dev/ubuntu/scripts/wait_for_longhorn.sh | 17 ++++++ 20 files changed, 385 insertions(+), 21 deletions(-) create mode 100644 pipelines/dev/failing.json create mode 100755 scripts/failue.sh create mode 100644 vagrant/dev/ubuntu/ansible/install_longhorn_prereqs.yaml create mode 100644 vagrant/dev/ubuntu/k8s/nginx-test/deployment.yaml create mode 100644 vagrant/dev/ubuntu/k8s/nginx-test/pvc.yaml create mode 100644 vagrant/dev/ubuntu/pipelines/vagrant-longhorn.json create mode 100755 vagrant/dev/ubuntu/scripts/check_install_infctl.sh create mode 100755 vagrant/dev/ubuntu/scripts/install_longhorn.sh create mode 100755 vagrant/dev/ubuntu/scripts/install_vagrant_longhorn.sh create mode 100755 vagrant/dev/ubuntu/scripts/longhorn_prereqs.sh create mode 100755 vagrant/dev/ubuntu/scripts/wait_for_longhorn.sh diff --git a/app/app.go b/app/app.go index 1d51443..764fbc7 100644 --- a/app/app.go +++ b/app/app.go @@ -66,12 +66,13 @@ func (app *AppState) RunJsonDeployment() []PipelineStep { jsonFile := app.Config.DeploymentFile if jsonFile == "" { - log.Fatal("no config specified with --deployment-file=") + log.Fatal("no config specified with [-f|--deployment-file]=") } file, err := os.Open(jsonFile) if err != nil { slog.Error(fmt.Sprintf("Failed to open JSON file: %s", err)) + os.Exit(1) } defer file.Close() @@ -212,8 +213,6 @@ func (app *AppState) CreatePipeline() error { slog.Info(fmt.Sprintln("Project name added:", app.Customer.Project)) fmt.Printf("Port number assigned: %d\n", port) app.Config.Port = port - } else { - slog.Info(fmt.Sprintln("Project name already exists:", app.Customer.Project)) } err = app.SetUpNewCustomer() diff --git a/app/k8s.go b/app/k8s.go index 86f7a70..9deb6ea 100644 --- a/app/k8s.go +++ b/app/k8s.go @@ -102,7 +102,7 @@ func RunCommand(command string) error { for scanner.Scan() { line := scanner.Text() stderr.WriteString(line + "\n") - slog.Error(line) + slog.Info(line) } }() diff --git a/config.json.example b/config.json.example index 30f9cc6..e8b82d4 100644 --- a/config.json.example +++ b/config.json.example @@ -15,5 +15,5 @@ "nginx_conf": "path_to/conf.d", "admin_url": "admin_url.headshed.dev", "preview_url": "app-prv.headshed.dev", - "ui_url": "ww2.headshed.dev", + "ui_url": "ww2.headshed.dev" } \ No newline at end of file diff --git a/config/base.go b/config/base.go index acbd4d2..7e2ce5f 100644 --- a/config/base.go +++ b/config/base.go @@ -7,6 +7,8 @@ import ( "os" ) +const Version = "v0.0.4" + type BaseConfig struct { ProjectsDirectory string `json:"projects_directory"` Env string `json:"env"` @@ -38,6 +40,8 @@ func ReadBaseConfig(path string) (BaseConfig, error) { deploymentFileShorthand := flag.String("f", "", "shorthand for -deployment-file") helpFlag := flag.Bool("help", false, "show help") + versionFlag := flag.Bool("version", false, "show version") + vFlag := flag.Bool("v", false, "show version (shorthand)") flag.Parse() if *helpFlag { @@ -46,6 +50,12 @@ func ReadBaseConfig(path string) (BaseConfig, error) { os.Exit(0) } + // Handle version flags + if *versionFlag || *vFlag { + fmt.Println("infctl-cli version:", Version) + os.Exit(0) + } + var config BaseConfig if *deploymentFileShorthand != "" { config.DeploymentFile = *deploymentFileShorthand diff --git a/pipelines/dev/failing.json b/pipelines/dev/failing.json new file mode 100644 index 0000000..d369347 --- /dev/null +++ b/pipelines/dev/failing.json @@ -0,0 +1,33 @@ +[ + + { + "name": "Create Vagrant nodes", + "function": "RunCommand", + "params": [ + "./scripts/failue.sh" + ], + "retryCount": 0, + "shouldAbort": true + }, + + { + "name": "Configure Vagrant K3s", + "function": "RunCommand", + "params": [ + "./scripts/configure_vagrant_k3s.sh" + ], + "retryCount": 0, + "shouldAbort": true + }, + + + { + "name": "Create Vagrant workstation", + "function": "RunCommand", + "params": [ + "./scripts/install_vagrant_workstation.sh" + ], + "retryCount": 0, + "shouldAbort": true + } +] diff --git a/scripts/configure_vagrant_k3s.sh b/scripts/configure_vagrant_k3s.sh index 29aabdf..e7ab40a 100755 --- a/scripts/configure_vagrant_k3s.sh +++ b/scripts/configure_vagrant_k3s.sh @@ -1,7 +1,5 @@ #!/usr/bin/env bash -# set -euo pipefail - # This script checks for Vagrant and VirtualBox prerequisites, # ensures Vagrant VMs are running, and gathers network and # system information from the VMs. diff --git a/scripts/dev-pre-flight-checks.sh b/scripts/dev-pre-flight-checks.sh index 4523443..2ffdde6 100755 --- a/scripts/dev-pre-flight-checks.sh +++ b/scripts/dev-pre-flight-checks.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -required_tools=("infctl" "pwgen" "kubectl" "k3d" "helm" "jq" "docker") +required_tools=("infctl" "pwgen" "kubectl" "k3d" "helm" "jq" "git" "docker" "vagrant") MISSING=false check_required_tools() { diff --git a/scripts/failue.sh b/scripts/failue.sh new file mode 100755 index 0000000..01a9581 --- /dev/null +++ b/scripts/failue.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + + +echo "crash" + +sleep 1 + +echo "bang" + +sleep 2 + +echo "wallop" + +echo +echo +echo + +echo "Houston, we have a problem" + +echo +echo +echo + +exit 1 diff --git a/vagrant/dev/ubuntu/Vagrantfile b/vagrant/dev/ubuntu/Vagrantfile index 98fc79d..6a3e7cd 100644 --- a/vagrant/dev/ubuntu/Vagrantfile +++ b/vagrant/dev/ubuntu/Vagrantfile @@ -34,7 +34,7 @@ Vagrant.configure("2") do |config| end vm1.vm.provider "virtualbox" do |vb| - vb.memory = "2048" # 2GB memory + vb.memory = "2048" # 4GB memory vb.cpus = 2 end @@ -61,7 +61,7 @@ Vagrant.configure("2") do |config| end vm2.vm.provider "virtualbox" do |vb| - vb.memory = "2048" # 2GB memory + vb.memory = "2048" # 4GB memory vb.cpus = 2 end @@ -88,7 +88,7 @@ Vagrant.configure("2") do |config| end vm3.vm.provider "virtualbox" do |vb| - vb.memory = "2048" # 2GB memory + vb.memory = "2048" # 4GB memory vb.cpus = 2 end diff --git a/vagrant/dev/ubuntu/ansible/install_k3s_3node.yaml b/vagrant/dev/ubuntu/ansible/install_k3s_3node.yaml index bd96230..a8ed7b3 100644 --- a/vagrant/dev/ubuntu/ansible/install_k3s_3node.yaml +++ b/vagrant/dev/ubuntu/ansible/install_k3s_3node.yaml @@ -55,7 +55,7 @@ - name: Install k3s on first node ansible.builtin.shell: | set -o pipefail - K3S_TOKEN=$(cat /opt/k3s-token) /bin/bash /tmp/k3s_install.sh server --cluster-init --disable traefik --disable servicelb --tls-san {{ k3s_url_ip }} --node-name vm1 --node-ip {{ vm1_ip }} + K3S_TOKEN=$(cat /opt/k3s-token) /bin/bash /tmp/k3s_install.sh server --cluster-init --disable traefik --disable servicelb --tls-san {{ k3s_url_ip }} --node-name vm1 --node-ip {{ vm1_ip }} --flannel-iface=enp0s8 if [ $? -eq 0 ]; then mkdir -p /home/vagrant/.kube && cp /etc/rancher/k3s/k3s.yaml /home/vagrant/.kube/config && chown vagrant:vagrant /home/vagrant/.kube/config fi @@ -91,7 +91,7 @@ {% endif %} K3S_URL=https://{{ k3s_url_ip }}:6443 \ K3S_TOKEN={{ k3s_token_content.stdout }} \ - INSTALL_K3S_EXEC="server --server https://{{ k3s_url_ip }}:6443 --disable traefik --disable servicelb --node-name={{ inventory_hostname }} --node-ip ${NODE_IP}" \ + INSTALL_K3S_EXEC="server --server https://{{ k3s_url_ip }}:6443 --disable traefik --disable servicelb --node-name={{ inventory_hostname }} --node-ip ${NODE_IP} --flannel-iface=enp0s8" \ /bin/bash /tmp/k3s_install.sh 2>&1 exit_code=$? if [ $exit_code -ne 0 ]; then diff --git a/vagrant/dev/ubuntu/ansible/install_longhorn_prereqs.yaml b/vagrant/dev/ubuntu/ansible/install_longhorn_prereqs.yaml new file mode 100644 index 0000000..c22a182 --- /dev/null +++ b/vagrant/dev/ubuntu/ansible/install_longhorn_prereqs.yaml @@ -0,0 +1,47 @@ +--- +- name: Install k3s on 3-node cluster + hosts: vm1,vm2,vm3 + become: true + become_user: root + serial: 1 # Ensure tasks are executed one host at a time + vars_files: + - vars.yaml + + tasks: + - name: Install open-iscsi on all nodes + ansible.builtin.package: + name: open-iscsi + state: present + + - name: Install nfs-common on all nodes + ansible.builtin.package: + name: nfs-common + state: present + + - name: Install cryptsetup and dmsetup packages + ansible.builtin.package: + name: + - cryptsetup + - dmsetup + state: present + + - name: Load dm_crypt kernel module + community.general.modprobe: + name: dm_crypt + state: present + + - name: Make dm_crypt module load on boot + ansible.builtin.lineinfile: + path: /etc/modules + line: dm_crypt + create: yes + + - name: Check if dm_crypt module is loaded + ansible.builtin.shell: lsmod | grep dm_crypt + register: dm_crypt_check + failed_when: false + changed_when: false + + - name: Show dm_crypt status + ansible.builtin.debug: + msg: "dm_crypt module is {{ 'loaded' if dm_crypt_check.rc == 0 else 'not loaded' }}" \ No newline at end of file diff --git a/vagrant/dev/ubuntu/ansible/provision_workstation.sh b/vagrant/dev/ubuntu/ansible/provision_workstation.sh index 45605b7..0888e78 100644 --- a/vagrant/dev/ubuntu/ansible/provision_workstation.sh +++ b/vagrant/dev/ubuntu/ansible/provision_workstation.sh @@ -1,18 +1,24 @@ #!/usr/bin/env bash sudo apt-get update -sudo apt-get install -y software-properties-common git vim python3.10-venv - +sudo apt-get install -y software-properties-common git vim python3.10-venv jq # Set up ansible environment for vagrant user sudo -u vagrant mkdir -p /home/vagrant/.ansible sudo -u vagrant touch /home/vagrant/.ansible/ansible.cfg # Create workspace and SSH directories -sudo -u vagrant mkdir -p /home/vagrant/ansible sudo -u vagrant mkdir -p /home/vagrant/.ssh sudo chmod 700 /home/vagrant/.ssh +# create directories and copy files to /home/vagrant +mkdir -p /home/vagrant/{ansible,scripts,pipelines,k8s} +sudo cp -r /vagrant/ansible/* /home/vagrant/ansible/ +sudo cp -r /vagrant/scripts/* /home/vagrant/scripts/ +sudo cp -r /vagrant/pipelines/* /home/vagrant/pipelines +sudo cp -r /vagrant/k8s/* /home/vagrant/k8s +sudo chmod +x /home/vagrant/pipelines/*.sh + # Copy the Vagrant private keys (these will be synced by Vagrant) for i in {1..3}; do sudo -u vagrant cp /vagrant/.vagrant/machines/vm$i/virtualbox/private_key /home/vagrant/.ssh/vm${i}_key @@ -82,7 +88,6 @@ if [ $? -ne 0 ]; then exit 1 fi -cp -r /vagrant/ansible/* /home/vagrant/ansible/ eval `ssh-agent -s` ssh-add # ~/machines/*/virtualbox/private_key @@ -103,7 +108,20 @@ else echo "Provisioning block already present in $BASHRC" fi -ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3 +echo +echo ------------------------- +echo + +su - vagrant +id + +echo +echo ------------------------- +echo + +ssh-add ~/.ssh/vm*_key + +ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3 if [ $? -ne 0 ]; then echo "Ansible ping failed. Please check your Vagrant VMs and network configuration." @@ -111,7 +129,7 @@ if [ $? -ne 0 ]; then fi # install_keepalived.yaml -ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini +ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini if [ $? -ne 0 ]; then echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." exit 1 @@ -119,17 +137,24 @@ fi echo "Keepalived installation completed." # install_k3s_3node.yaml -ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini +ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini if [ $? -ne 0 ]; then echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." exit 1 fi # copy_k8s_config.yaml -ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini +ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini if [ $? -ne 0 ]; then echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." exit 1 fi +# check infctl +cd /home/vagrant +bash /home/vagrant/scripts/check_install_infctl.sh +if [ $? -ne 0 ]; then + echo "infctl check failed. Please check your installation." + exit 1 +fi diff --git a/vagrant/dev/ubuntu/k8s/nginx-test/deployment.yaml b/vagrant/dev/ubuntu/k8s/nginx-test/deployment.yaml new file mode 100644 index 0000000..63ec4fe --- /dev/null +++ b/vagrant/dev/ubuntu/k8s/nginx-test/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-with-storage + namespace: default +spec: + selector: + matchLabels: + app: nginx-storage + replicas: 1 + template: + metadata: + labels: + app: nginx-storage + spec: + containers: + - name: nginx + image: nginx:stable + ports: + - containerPort: 80 + volumeMounts: + - name: nginx-data + mountPath: /usr/share/nginx/html + volumes: + - name: nginx-data + persistentVolumeClaim: + claimName: nginx-data-pvc diff --git a/vagrant/dev/ubuntu/k8s/nginx-test/pvc.yaml b/vagrant/dev/ubuntu/k8s/nginx-test/pvc.yaml new file mode 100644 index 0000000..9816354 --- /dev/null +++ b/vagrant/dev/ubuntu/k8s/nginx-test/pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nginx-data-pvc + namespace: default +spec: + accessModes: + - ReadWriteOnce + storageClassName: longhorn + resources: + requests: + storage: 1Gi diff --git a/vagrant/dev/ubuntu/pipelines/vagrant-longhorn.json b/vagrant/dev/ubuntu/pipelines/vagrant-longhorn.json new file mode 100644 index 0000000..85e4322 --- /dev/null +++ b/vagrant/dev/ubuntu/pipelines/vagrant-longhorn.json @@ -0,0 +1,29 @@ +[ + { + "name": "Install Longhorn pre-requisites", + "function": "RunCommand", + "params": [ + "./scripts/longhorn_prereqs.sh" + ], + "retryCount": 0, + "shouldAbort": true + }, + { + "name": "Install Longhorn", + "function": "RunCommand", + "params": [ + "./scripts/install_longhorn.sh" + ], + "retryCount": 0, + "shouldAbort": true + }, + { + "name": "Wait for Longhorn pods to come up", + "function": "RunCommand", + "params": [ + "./scripts/wait_for_longhorn.sh" + ], + "retryCount": 10, + "shouldAbort": true + } +] \ No newline at end of file diff --git a/vagrant/dev/ubuntu/scripts/check_install_infctl.sh b/vagrant/dev/ubuntu/scripts/check_install_infctl.sh new file mode 100755 index 0000000..fb7f66f --- /dev/null +++ b/vagrant/dev/ubuntu/scripts/check_install_infctl.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# function to install infctl +install_infctl() { + echo "Installing infctl..." + # Add installation commands here + curl -L https://codeberg.org/headshed/infctl-cli/raw/branch/main/install.sh | bash + +} + +if ! command -v infctl &> /dev/null +then + echo "infctl could not be found, installing..." + install_infctl +fi + +# base.json.example config.json.example + +# https://codeberg.org/headshed/infctl-cli/raw/branch/main/base.json.example + +# https://codeberg.org/headshed/infctl-cli/raw/branch/main/config.json.example + +if [ ! -f "base.json" ]; then + echo "base.json not found in home directory, downloading..." + curl -o "base.json" https://codeberg.org/headshed/infctl-cli/raw/branch/main/base.json.example +fi + +if [ ! -f "config.json" ]; then + echo "config.json not found in home directory, downloading..." + curl -o "config.json" https://codeberg.org/headshed/infctl-cli/raw/branch/main/config.json.example +fi + + + +echo "infctl is installed and ready to use." diff --git a/vagrant/dev/ubuntu/scripts/install_longhorn.sh b/vagrant/dev/ubuntu/scripts/install_longhorn.sh new file mode 100755 index 0000000..51c565e --- /dev/null +++ b/vagrant/dev/ubuntu/scripts/install_longhorn.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +echo +echo "vagrant longhorn installation" +echo + +ssh-add ~/.ssh/vm*_key +source /home/vagrant/ansible/venv/bin/activate +# Check if there are any pods in the longhorn-system namespace +if kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -q '^[^ ]'; then + echo "Pods already exist in the longhorn-system namespace. Skipping installation." + exit 0 +fi +# https://github.com/longhorn/longhorn/releases +# v1.8.1 in prod 1.9.1 is latest +LONGHORN_RELEASE="v1.8.1" +LONGHORN_RELEASE_URL="https://raw.githubusercontent.com/longhorn/longhorn/$LONGHORN_RELEASE/deploy/longhorn.yaml" + +echo "Applying Longhorn release $LONGHORN_RELEASE..." +echo "Using Longhorn release URL: $LONGHORN_RELEASE_URL" + +kubectl apply -f $LONGHORN_RELEASE_URL diff --git a/vagrant/dev/ubuntu/scripts/install_vagrant_longhorn.sh b/vagrant/dev/ubuntu/scripts/install_vagrant_longhorn.sh new file mode 100755 index 0000000..368519c --- /dev/null +++ b/vagrant/dev/ubuntu/scripts/install_vagrant_longhorn.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +echo +echo "vagrant longhorn installation" +echo + +ssh-add ~/.ssh/vm*_key +source /home/vagrant/ansible/venv/bin/activate +ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3 +if [ $? -ne 0 ]; then + echo "Ansible ping failed. Please check your Vagrant VMs and network configuration." + exit 1 +fi +echo "Ansible ping successful." + +# Check if there are any pods in the longhorn-system namespace +if kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -q '^[^ ]'; then + echo "Pods already exist in the longhorn-system namespace. Skipping installation." + exit 0 +fi + +echo "Installing Longhorn prerequisites..." + + +# install_longhorn_prereqs.yaml +ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook ~/ansible/install_longhorn_prereqs.yaml --inventory-file /home/vagrant/ansible/ansible_inventory.ini +if [ $? -ne 0 ]; then + echo "Ansible playbook failed. Please check the playbook and your inventory." + exit 1 +fi + +echo "installing Longhorn ..." + +# https://github.com/longhorn/longhorn/releases +# v1.8.1 in prod 1.9.1 is latest +LONGHORN_RELEASE="v1.8.1" +LONGHORN_RELEASE_URL="https://raw.githubusercontent.com/longhorn/longhorn/$LONGHORN_RELEASE/deploy/longhorn.yaml" + +echo "Applying Longhorn release $LONGHORN_RELEASE..." +echo "Using Longhorn release URL: $LONGHORN_RELEASE_URL" + +kubectl apply -f $LONGHORN_RELEASE_URL + +# Wait for all pods in longhorn-system namespace to be ready +echo "Waiting for Longhorn pods to be ready..." +while true; do + not_ready=$(kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -vE 'Running|Completed' | wc -l) + total=$(kubectl -n longhorn-system get pods --no-headers 2>/dev/null | wc -l) + if [[ $total -gt 0 && $not_ready -eq 0 ]]; then + echo "All Longhorn pods are ready." + break + fi + sleep 10 +done diff --git a/vagrant/dev/ubuntu/scripts/longhorn_prereqs.sh b/vagrant/dev/ubuntu/scripts/longhorn_prereqs.sh new file mode 100755 index 0000000..9e00ade --- /dev/null +++ b/vagrant/dev/ubuntu/scripts/longhorn_prereqs.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +echo +echo "vagrant longhorn prerequisites" +echo + +ssh-add ~/.ssh/vm*_key +source /home/vagrant/ansible/venv/bin/activate +ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3 +if [ $? -ne 0 ]; then + echo "Ansible ping failed. Please check your Vagrant VMs and network configuration." + exit 1 +fi +echo "Ansible ping successful." + +# Check if there are any pods in the longhorn-system namespace +if kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -q '^[^ ]'; then + echo "Pods already exist in the longhorn-system namespace. Skipping installation." + exit 0 +fi + +exit + +echo "Installing Longhorn prerequisites..." + + +# install_longhorn_prereqs.yaml +ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook ~/ansible/install_longhorn_prereqs.yaml --inventory-file /home/vagrant/ansible/ansible_inventory.ini +if [ $? -ne 0 ]; then + echo "Ansible playbook failed. Please check the playbook and your inventory." + exit 1 +fi diff --git a/vagrant/dev/ubuntu/scripts/wait_for_longhorn.sh b/vagrant/dev/ubuntu/scripts/wait_for_longhorn.sh new file mode 100755 index 0000000..b07aa3b --- /dev/null +++ b/vagrant/dev/ubuntu/scripts/wait_for_longhorn.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +echo +echo "wait for longhorn installation" +echo + +ssh-add ~/.ssh/vm*_key +source /home/vagrant/ansible/venv/bin/activate +while true; do + not_ready=$(kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -vE 'Running|Completed' | wc -l) + total=$(kubectl -n longhorn-system get pods --no-headers 2>/dev/null | wc -l) + if [[ $total -gt 0 && $not_ready -eq 0 ]]; then + echo "All Longhorn pods are ready." + break + fi + sleep 10 +done