Compare commits

..

13 commits

Author SHA1 Message Date
jon brookes
2f11890779 feat: add .env file check and load it in startup script 2025-10-14 18:15:32 +01:00
jon brookes
a8f25e733c Merge branch 'feat/mvk-gcloud-template' of ssh://codeberg.org/headshed/infctl-cli into feat/mvk-gcloud-template 2025-10-14 18:05:46 +01:00
jon brookes
e0906c821d fix: change to repo dir 2025-10-14 18:01:55 +01:00
jon brookes
8f19558826 feat: update Forgejo deployment
add DNS update step and complete forgejo deployment after build
2025-10-14 17:52:00 +01:00
jon brookes
02b114e0e6 feat: add scripts for pre-flight checks and user input wait in k3s pipeline 2025-10-14 17:49:59 +01:00
jon brookes
f23e1c41ff feat: add .env file existence check and load it in startup script 2025-10-14 16:50:12 +01:00
jon brookes
b4c0f17b12 feat: add script to copy .env file to k3s-vm-1 after pre-flight checks 2025-10-14 16:32:05 +01:00
jon brookes
bb4d0cc701 feat: update Forgejo deployment URL and add installation check in startup script 2025-10-14 15:58:09 +01:00
jon brookes
8faa97a8bb feat: env INSTALL_LONGHORN
Add Ansible playbooks for Longhorn, MetalLB, and Traefik installation

conditional on presence of INSTALL_LONGHORN=true
2025-10-10 13:33:11 +01:00
jon brookes
80f4e5a53b fix: Update cert-manager
improve installation script and increase max readines retries for cert-manager
2025-10-08 15:03:24 +01:00
jon brookes
9fc84486a1 test cloudflare terraform dns updates 2025-10-04 12:24:03 +01:00
jon brookes
e0891f6c09 fix: Add create_tfvars script and update pipeline configuration 2025-10-03 15:46:30 +01:00
jon brookes
2ab7872af1 Add Google Cloud K3s infrastructure support
- Add Terraform configuration for GCP instance and storage
- Add startup script for K3s installation and configuration
- Add pipeline scripts for deployment and management
- Add Forgejo deployment manifests and configuration
2025-10-02 15:41:50 +01:00
34 changed files with 773 additions and 138 deletions

3
.env.gcloud-example Normal file
View file

@ -0,0 +1,3 @@
PROJECT_NAME="the name of your gcp project, often referred to as the project"
EMAIL="your email address to identify yourself with letsencrypt"
APP_DOMAIN_NAME="your domain name for the app, e.g., frgdr.some-domain.com"

3
.gitignore vendored
View file

@ -30,3 +30,6 @@ terraform.tfstate**
*history*.txt
*.tfvars
gcloud/tf/.env
gcloud/tf/k3s/forgejo/issuer.yaml
gcloud/tf/k3s/forgejo/ingress.yaml
.env

View file

View file

@ -7,8 +7,7 @@ resource "google_compute_firewall" "allow_http" {
allow {
protocol = "tcp"
ports = [
"80", "443", // http/https
"30080" // ports opened to access the python API via NodePort
"80", "443" // http/https
]
}

View file

@ -0,0 +1,68 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: forgejo-deployment
namespace: forgejo
labels:
app: forgejo-app
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: forgejo-app
template:
metadata:
labels:
app: forgejo-app
spec:
terminationGracePeriodSeconds: 10
containers:
- name: forgejo
image: codeberg.org/forgejo/forgejo:11.0.6
imagePullPolicy: IfNotPresent
env:
- name: FORGEJO__repository__ENABLE_PUSH_CREATE_USER
value: "true"
- name: FORGEJO__server__ROOT_URL
value: "https://frgdr.headshed.dev/"
- name: FORGEJO__repository__DEFAULT_BRANCH
value: "main"
- name: FORGEJO__server__LFS_START_SERVER
value: "true"
- name: FORGEJO__security__INSTALL_LOCK
value: "true"
- name: FORGEJO__service__DISABLE_REGISTRATION
value: "false"
ports:
- name: http
containerPort: 3000
protocol: TCP
- name: ssh
containerPort: 22
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
tty: true
volumeMounts:
- name: forgejo-data
mountPath: /data
# - name: forgejo-timezone
# mountPath: /etc/timezone
# - name: forgejo-localtime
# mountPath: /etc/localtime
volumes:
- name: forgejo-data
persistentVolumeClaim:
claimName: forgejo-data-pvc
# - name: forgejo-timezone
# configMap:
# name: forgejo-timezone
# - name: forgejo-localtime
# configMap:
# name: forgejo-localtime

View file

@ -0,0 +1,24 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: tls-forgejo-ingress-http
namespace: forgejo
annotations:
cert-manager.io/issuer: "le-cluster-issuer-http"
spec:
tls:
- hosts:
- ${APP_DOMAIN_NAME}
secretName: tls-frg-ingress-http
rules:
- host: ${APP_DOMAIN_NAME}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: forgejo-app-service
port:
name: web

View file

@ -0,0 +1,17 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: le-cluster-issuer-http
namespace: forgejo
spec:
acme:
email: ${EMAIL}
# We use the staging server here for testing to avoid throttling.
server: https://acme-staging-v02.api.letsencrypt.org/directory
# server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: http-issuer-account-key
solvers:
- http01:
ingress:
class: traefik

View file

@ -0,0 +1,26 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: forgejo-local-pv
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/disks/app-data/forgejo
storageClassName: local-path
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: forgejo-data-pvc
namespace: forgejo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
volumeName: forgejo-local-pv
storageClassName: local-path

View file

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: forgejo-app-service
namespace: forgejo
spec:
selector:
app: forgejo-app
ports:
- name: web
protocol: TCP
port: 3000
targetPort: 3000

View file

@ -19,15 +19,12 @@ resource "google_compute_instance" "k3s" {
}
}
// Configuration to be a Spot Instance, to reduce costs
scheduling {
automatic_restart = true
}
// ensures that the instance is a Spot VM
// means it can be preempted, but it's cheaper
# scheduling {
# preemptible = false
# automatic_restart = true
# automatic_restart = false
# provisioning_model = "SPOT"
# instance_termination_action = "STOP"
# preemptible = true
# }
// attach a disk for K3S
@ -80,67 +77,9 @@ resource "google_compute_disk" "app_data_disk" {
zone = var.zone
}
// load balancer ....
# resource "google_compute_health_check" "http_health_check" {
# name = "http-health-check"
# check_interval_sec = 5
# timeout_sec = 5
# healthy_threshold = 2
# unhealthy_threshold = 2
# http_health_check {
# port = 80
# }
# }
resource "google_compute_http_health_check" "http_health_check" {
name = "http-health-check"
request_path = "/"
port = 80
check_interval_sec = 5
timeout_sec = 5
healthy_threshold = 2
unhealthy_threshold = 2
}
# resource "google_compute_target_pool" "k3s_pool" {
# name = "k3s-target-pool"
# instances = [google_compute_instance.k3s.self_link]
# health_checks = [google_compute_health_check.http_health_check.self_link]
# }
resource "google_compute_target_pool" "k3s_pool" {
name = "k3s-target-pool"
instances = [google_compute_instance.k3s.self_link]
health_checks = [google_compute_http_health_check.http_health_check.self_link]
}
resource "google_compute_forwarding_rule" "http_forwarding_rule" {
name = "http-forwarding-rule"
target = google_compute_target_pool.k3s_pool.self_link
port_range = "80"
ip_protocol = "TCP"
load_balancing_scheme = "EXTERNAL"
}
resource "google_compute_forwarding_rule" "https_forwarding_rule" {
name = "https-forwarding-rule"
target = google_compute_target_pool.k3s_pool.self_link
port_range = "443"
ip_protocol = "TCP"
load_balancing_scheme = "EXTERNAL"
}
// Outputs
// ----------------------------------
data "google_project" "project" {
project_id = var.project_name # Use variable from tfvars
}
@ -154,7 +93,3 @@ output "k3s_vm_public_ip" {
description = "Ephemeral public IP of the k3s VM"
}
output "load_balancer_ip" {
value = google_compute_forwarding_rule.http_forwarding_rule.ip_address
description = "External IP address of the load balancer (HTTP)"
}

View file

@ -4,6 +4,10 @@ terraform {
source = "hashicorp/google"
version = "~> 4.0"
}
# cloudflare = {
# source = "cloudflare/cloudflare"
# version = "~> 5"
# }
}
}
@ -16,9 +20,45 @@ provider "google" {
project = var.project_name # Use variable from tfvars
region = "us-central1" # Replace with your desired region
}
# provider "google" {
# credentials = file("<my-gcp-creds>.json")
# project = var.project_name
# region = var.region
# zone = var.zone
# }
# provider "cloudflare" {
# api_token = var.cloudflare_api_token
# }
# variable "cloudflare_api_token" {
# description = "Cloudflare API token"
# sensitive = true
# }
# variable "cloudflare_account_id" {
# description = "Cloudflare Account ID"
# sensitive = true
# }
# variable "cloudflare_zone_id" {
# description = "Cloudflare Zone ID"
# sensitive = true
# }
# variable "cloudflare_domain" {
# description = "Cloudflare Domain"
# sensitive = true
# }
# resource "cloudflare_dns_record" "frgdr" {
# zone_id = var.cloudflare_zone_id
# name = "frgdr"
# content = google_compute_instance.k3s.network_interface[0].access_config[0].nat_ip
# type = "A"
# ttl = 300
# proxied = false
# comment = "Application domain record"
# }

View file

@ -0,0 +1,56 @@
[
{
"name": "run pre-flight checks",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/pre-flight-checks.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "list gcloud infrastructure",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/list_gloud_infra.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "create tfvars",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/create_tfvars.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "run tofu",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/run_tofu.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "wait for user input to continue",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/wait_for_user_input_dns.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "copy .env to k3s-vm-1",
"function": "RunCommand",
"params": [
"gcloud/tf/scripts/copy_env_to_first_node.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -3,7 +3,7 @@
"name": "run pre-flight checks",
"function": "RunCommand",
"params": [
"./scripts/pre-flight-checks.sh"
"./gcloud/tf/scripts/pre-flight-checks.sh"
],
"retryCount": 0,
"shouldAbort": true
@ -12,7 +12,16 @@
"name": "list gcloud infrastructure",
"function": "RunCommand",
"params": [
"./scripts/list_gloud_infra.sh"
"./gcloud/tf/scripts/list_gloud_infra.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "create tfvars",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/create_tfvars.sh"
],
"retryCount": 0,
"shouldAbort": true
@ -21,7 +30,16 @@
"name": "run tofu",
"function": "RunCommand",
"params": [
"./scripts/run_tofu.sh"
"./gcloud/tf/scripts/run_tofu.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "copy .env to k3s-vm-1",
"function": "RunCommand",
"params": [
"gcloud/tf/scripts/copy_env_to_first_node.sh"
],
"retryCount": 0,
"shouldAbort": true

View file

@ -0,0 +1,34 @@
#!/usr/bin/env bash
if kubectl -n cert-manager get pods 2>/dev/null | grep -q 'Running'; then
echo "cert-manager pods already running. Skipping installation."
exit 0
fi
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml
echo "Waiting for cert-manager pods to be in 'Running' state..."
MAX_RETRIES=10
RETRY=0
while [ $RETRY -lt $MAX_RETRIES ]; do
NOT_READY_PODS=$(kubectl -n cert-manager get pods --no-headers | grep -v 'Running' | wc -l)
if [ "$NOT_READY_PODS" -eq 0 ]; then
echo "All cert-manager pods are running."
break
else
echo "$NOT_READY_PODS pods are not ready yet. Waiting..."
RETRY=$((RETRY + 1))
sleep 5
fi
done
if [ "$NOT_READY_PODS" -ne 0 ]; then
echo "Failed to get all cert-manager pods running after $MAX_RETRIES attempts."
exit 1
fi

View file

@ -0,0 +1,31 @@
#!/usr/bin/env bash
source .env
for i in {1..10}; do
# Check if the instance is running
INSTANCE_STATUS=$(gcloud compute instances describe k3s-vm-1 --zone=us-central1-a --project="$PROJECT_NAME" --format='get(status)')
if [[ "$INSTANCE_STATUS" != "RUNNING" ]]; then
echo "Instance k3s-vm-1 is not running. Attempt $i/10. Waiting 5 seconds..."
sleep 5
continue
fi
# Check if the directory exists on the remote host
if gcloud compute ssh k3s-vm-1 --zone=us-central1-a --project="$PROJECT_NAME" --command="test -d /opt/src/infctl-cli/"; then
echo "/opt/src/infctl-cli/ exists on k3s-vm-1."
break
else
echo "/opt/src/infctl-cli/ does not exist yet. Attempt $i/10. Waiting 5 seconds..."
sleep 5
fi
done
# Final check after loop
if ! gcloud compute ssh k3s-vm-1 --zone=us-central1-a --project="$PROJECT_NAME" --command="test -d /opt/src/infctl-cli/"; then
echo "ERROR: /opt/src/infctl-cli/ does not exist on k3s-vm-1 after 10 attempts. Exiting."
exit 1
fi
gcloud compute scp .env k3s-vm-1:/opt/src/infctl-cli/.env --zone=us-central1-a --project=$PROJECT_NAME

View file

@ -0,0 +1,32 @@
#!/bin/bash
set -a
# read environment variables from .env file
# for value $PROJECT_NAME
. .env
# Check if PROJECT_NAME environment variable is set
if [ -z "$PROJECT_NAME" ]; then
echo "Error: PROJECT_NAME environment variable is not set."
echo "Please set the PROJECT_NAME variable and try again."
exit 1
fi
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
# Define the template file path and output file path
TEMPLATE_FILE="../terraform.tfvars.template"
OUTPUT_FILE="../terraform.tfvars"
# Use envsubst to substitute the PROJECT_NAME variable into the template
envsubst < "$TEMPLATE_FILE" > "$OUTPUT_FILE"
if [ $? -ne 0 ]; then
echo "Error: Failed to substitute variables in the template."
exit 1
fi
echo "tfvars has been created at $OUTPUT_FILE"

View file

@ -0,0 +1,30 @@
#!/bin/bash
set -a
# read environment variables from .env file
# for value of APP_DOMAIN_NAME
. .env
if [ -z "$APP_DOMAIN_NAME" ]; then
echo "Error: APP_DOMAIN_NAME environment variable is not set. Please set it in the .env file."
exit 1
fi
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
# Define the template file path and output file path
TEMPLATE_FILE="../../k3s/forgejo/ingress.yaml.template"
OUTPUT_FILE="../../k3s/forgejo/ingress.yaml"
# Use envsubst to substitute the APP_DOMAIN_NAME variable into the template
envsubst < "$TEMPLATE_FILE" > "$OUTPUT_FILE"
if [ $? -ne 0 ]; then
echo "Error: Failed to substitute variables in the template."
exit 1
fi
echo "Ingress configuration has been created at $OUTPUT_FILE"

View file

@ -0,0 +1,33 @@
#!/bin/bash
set -a
# read environment variables from .env file
# for value of EMAIL
. .env
# Check if EMAIL environment variable is set
if [ -z "$EMAIL" ]; then
echo "Error: EMAIL environment variable is not set."
echo "Please set the EMAIL variable and try again."
exit 1
fi
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
# Define the template file path and output file path
TEMPLATE_FILE="../../k3s/forgejo/issuer.yaml.template"
OUTPUT_FILE="../../k3s/forgejo/issuer.yaml"
# Use envsubst to substitute the EMAIL variable into the template
envsubst < "$TEMPLATE_FILE" > "$OUTPUT_FILE"
if [ $? -ne 0 ]; then
echo "Error: Failed to substitute variables in the template."
exit 1
fi
echo "Issuer configuration has been created at $OUTPUT_FILE"

View file

@ -0,0 +1,45 @@
#!/bin/bash
set -e
echo "Installing Forgejo"
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
# Define namespace
NAMESPACE="forgejo"
MANIFESTS_DIR="${SCRIPT_DIR}/../../k3s/forgejo"
echo "Creating namespace..."
if ! kubectl get namespace "${NAMESPACE}" >/dev/null 2>&1; then
kubectl create namespace "${NAMESPACE}"
else
echo "Namespace '${NAMESPACE}' already exists."
fi
echo "Creating PersistentVolumeClaim..."
kubectl apply -f ${MANIFESTS_DIR}/pvc.yaml
echo "Creating Service..."
kubectl apply -f ${MANIFESTS_DIR}/service.yaml
echo "Creating Deployment..."
kubectl apply -f ${MANIFESTS_DIR}/deployment.yaml
echo "Creating Certificate Issuer..."
kubectl apply -f ${MANIFESTS_DIR}/issuer.yaml
echo "Creating Ingress..."
kubectl apply -f ${MANIFESTS_DIR}/ingress.yaml
echo "Forgejo installation complete."
echo "Verify deployment with: kubectl -n ${NAMESPACE} get pods,svc,ingress,pvc"
exit;
# Note: The ingressTCP.yaml is for a different application (galene) and should be applied separately
# echo "Note: The ingressTCP.yaml is for the galene application and has not been applied."

View file

@ -0,0 +1,47 @@
[
{
"name": "install cert-manager",
"function": "RunCommand",
"params": [
"gcloud/tf/scripts/cert-manager/install_cert-manager.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "install traefik",
"function": "RunCommand",
"params": [
"gcloud/tf/scripts/install_traefik.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "create forgejo ingress",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/forgejo/create_ingress.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "create forgejo issuer",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/forgejo/create_issuer.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "install forgejo",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/forgejo/install_forgejo.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

0
gcloud/tf/scripts/install_traefik.sh Normal file → Executable file
View file

View file

@ -1,8 +1,11 @@
#!/bin/bash
# Redirect all output to a log file for reliability
exec > /tmp/startup.log 2>&1
INFCTL_GIT_REPO="https://codeberg.org/headshed/infctl-cli.git"
INFCTL_GIT_REPO_BRANCH="feature/gcloud-k3s"
INFCTL_INSTALL_DIR="/opt/infctl-cli"
INFCTL_GIT_REPO_BRANCH="main"
INFCTL_INSTALL_DIR="/opt/src"
# ensure only run once
if [[ -f /etc/startup_was_launched ]]; then exit 0; fi
@ -100,3 +103,32 @@ if [[ ! -d "$INFCTL_INSTALL_DIR" ]]; then
chown -R user:user "$INFCTL_INSTALL_DIR"
fi
for i in {1..100}; do
if [[ -f /opt/src/infctl-cli/.env ]]; then
echo ".env file found."
break
else
echo ".env file not found. Attempt $i/100. Waiting 5 seconds..."
sleep 5
fi
done
# Final check after loop
if [[ ! -f /opt/src/infctl-cli/.env ]]; then
echo "ERROR: .env file not found after 10 attempts. Exiting."
exit 1
fi
# load .env file
source /opt/src/infctl-cli/.env
cd $INFCTL_INSTALL_DIR/infctl-cli || "echo 'Failed to change directory to $INFCTL_INSTALL_DIR/infctl-cli' ; exit 1"
# check to see if INSTALL_FORGEJO is set to "true"
if [[ "$INSTALL_FORGEJO" == "true" ]]; then
# install forgejo using infctl
# ....
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
LOG_FORMAT=none infctl -f "${INFCTL_INSTALL_DIR}/infctl-cli/gcloud/tf/scripts/install-forgejo-pipeline.json"
touch /etc/forgejo_was_installed
fi

View file

@ -47,4 +47,19 @@ echo
kubectl version --client
echo
echo "🧪 checking we have envsubst insatalled..."
if ! command -v envsubst &> /dev/null
then
echo "❌ envsubst could not be found, please install it first"
echo
echo "on ubuntu you can install it with: sudo apt-get install -y gettext-base"
echo
exit 1
fi
echo "✅ envsubst is installed,..."
echo
envsubst --version
echo
echo "✅ Pre-flight checks passed. You are ready to proceed 🙂"
echo

View file

@ -1,5 +1,12 @@
#!/usr/bin/env bash
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
TF_DIR="../"
cd "$TF_DIR" || { echo "Failed to change directory to $TF_DIR"; exit 1; }
if [[ -d ".terraform" && -f ".terraform.lock.hcl" ]]; then
echo "✅ Terraform already initialized"
# tofu init

View file

@ -0,0 +1,11 @@
#!/usr/bin/env bash
echo "Please configure DNS using the IP address from the previous stage."
echo "you have 120 seconds."
for i in {120..1}; do
echo -ne "Time remaining: $i seconds\r"
sleep 1
done
echo ""
exit 0

View file

@ -0,0 +1,13 @@
// Your GCP project name
// it will be refererred as the project id
// in Google Cloud
// ----------------------------------
project_name = "${PROJECT_NAME}"
// where to deploy to
// region
region = "us-central1"
zone = "us-central1-a"
// application name
app_name = "${PROJECT_NAME}-k3s-cluster"

View file

@ -1,20 +1,29 @@
#!/usr/bin/env bash
# sleep 5
for i in {1..5}; do
echo "working ..."
sleep 0.5
done
echo "crash"
sleep 2
# sleep 1
echo "not working ..."
echo "bang"
sleep 1
# sleep 2
figlet "boom"
echo "wallop"
sleep 1
# sleep 1
figlet "bang"
echo "Houston, we have a problem"
sleep 2
echo "oh dear, oh my..."
sleep 1
figlet "Houston, we have a problem"
sleep 1

View file

@ -1,20 +1,29 @@
#!/usr/bin/env bash
# sleep 5
for i in {1..5}; do
echo "working ..."
sleep 0.5
done
echo "bish"
sleep 2
# sleep 1
echo "still working ..."
echo "bash"
sleep 1
# sleep 2
figlet "bish"
echo "bosh"
sleep 1
# sleep 1
figlet "bash"
echo "lovely jubbly"
sleep 2
figlet "bosh"
sleep 1
figlet "LOVELY JUBBLY"
sleep 1

View file

@ -117,7 +117,11 @@ Vagrant.configure("2") do |config|
vb.cpus = 1
end
ws.vm.provision "shell", path: "ansible/provision_workstation.sh"
ws.vm.provision "shell",
path: "ansible/provision_workstation.sh",
env: {
"INSTALL_LONGHORN" => ENV['INSTALL_LONGHORN'] || "false"
}
end

View file

@ -0,0 +1,16 @@
---
- name: Install longhorn using infctl
hosts: localhost
become: true
become_user: vagrant
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: run infctl longhorn pipeline
ansible.builtin.command: >
bash -c 'cd /home/vagrant && LOG_FILE=/tmp/longhorn_log.txt LOG_FORMAT=basic infctl -f pipelines/vagrant-longhorn.json'
register: longhorn_result
ignore_errors: false

View file

@ -0,0 +1,16 @@
---
- name: Install metallb using infctl
hosts: localhost
become: true
become_user: vagrant
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: run ======== infctl metallb pipeline
ansible.builtin.command: >
bash -c 'cd /home/vagrant && LOG_FILE=/tmp/metallb_log.txt LOG_FORMAT=basic infctl -f ./pipelines/vagrant-metallb.json'
register: metallb_result
ignore_errors: false

View file

@ -0,0 +1,20 @@
---
- name: Install traefik using infctl
hosts: localhost
become: true
become_user: vagrant
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: run infctl traefik pipeline
ansible.builtin.command: infctl -f pipelines/vagrant-ingress.json
args:
chdir: /home/vagrant
environment:
LOG_FILE: /tmp/traefik_log.txt
LOG_FORMAT: none
register: traefik_result
ignore_errors: false

View file

@ -4,6 +4,7 @@
sudo apt-get update
sudo apt-get install -y software-properties-common git vim python3.10-venv jq figlet
# shellcheck disable=SC1091
source /vagrant/.envrc
# Set up ansible environment for vagrant user
@ -24,10 +25,10 @@ sudo chmod +x /home/vagrant/pipelines/*.sh
# Copy the Vagrant private keys (these will be synced by Vagrant)
for i in {1..3}; do
sudo -u vagrant cp /vagrant/.vagrant/machines/vm$i/virtualbox/private_key /home/vagrant/.ssh/vm${i}_key
sudo -u root cp /vagrant/.vagrant/machines/vm$i/virtualbox/private_key /root/.ssh/vm${i}_key
sudo chmod 600 /home/vagrant/.ssh/vm${i}_key
sudo chmod 600 /root/.ssh/vm${i}_key
sudo -u vagrant cp "/vagrant/.vagrant/machines/vm$i/virtualbox/private_key" "/home/vagrant/.ssh/vm${i}_key"
sudo -u root cp "/vagrant/.vagrant/machines/vm$i/virtualbox/private_key" "/root/.ssh/vm${i}_key"
sudo chmod 600 "/home/vagrant/.ssh/vm${i}_key"
sudo chmod 600 "/root/.ssh/vm${i}_key"
done
# Disable host key checking for easier learning
@ -46,18 +47,17 @@ cd "$ANSIBLE_DIR" || {
if [ ! -d "venv" ]; then
echo "Creating Python virtual environment in ./venv..."
python3 -m venv venv
source "venv/bin/activate"
if [ $? -ne 0 ]; then
# shellcheck disable=SC1091
if ! source "venv/bin/activate"; then
echo "Failed to activate virtual environment. Please check your Python installation."
exit 1
fi
echo "Virtual environment created and activated."
cp /vagrant/ansible/requirements.txt .
cp "/vagrant/ansible/requirements.txt" .
if [ -f "requirements.txt" ]; then
echo "Installing dependencies from requirements.txt..."
pip install --upgrade pip
pip install -r requirements.txt
if [ $? -ne 0 ]; then
if ! pip install -r requirements.txt; then
echo "Failed to install dependencies from requirements.txt."
exit 1
fi
@ -76,7 +76,13 @@ ls -al "$ANSIBLE_VENV_DIR/bin/activate"
if [ -d "$ANSIBLE_VENV_DIR" ]; then
echo "Activating Ansible virtual environment..."
if [ -f "$ANSIBLE_VENV_DIR/bin/activate" ]; then
# shellcheck source=/dev/null
source "$ANSIBLE_VENV_DIR/bin/activate"
else
echo "Virtualenv activate script not found!" >&2
exit 1
fi
else
echo "Ansible virtual environment not found at $ANSIBLE_VENV_DIR. Please create it before running this script."
exit 1
@ -86,13 +92,13 @@ echo ""
ansible --version
if [ $? -ne 0 ]; then
if ! ansible --version; then
echo "Ansible is not installed or not found in the virtual environment. Please check your installation."
exit 1
fi
eval `ssh-agent -s`
eval "$(ssh-agent -s)"
ssh-add # ~/machines/*/virtualbox/private_key
BASHRC="/home/vagrant/.bashrc"
@ -103,9 +109,10 @@ if ! grep -qF "$BLOCK_START" "$BASHRC"; then
cat <<'EOF' >> "$BASHRC"
# ADDED BY infctl provisioning
eval `ssh-agent -s`
eval "$(ssh-agent -s)"
ssh-add ~/machines/*/virtualbox/private_key
ssh-add -L
# shellcheck disable=SC1091
source /vagrant/.envrc
EOF
else
@ -125,48 +132,63 @@ echo
ssh-add ~/.ssh/vm*_key
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3
if [ $? -ne 0 ]; then
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3; then
echo "Ansible ping failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
# install_keepalived.yaml
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
echo "Keepalived installation completed."
# install_k3s_3node.yaml
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
# copy_k8s_config.yaml
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_dnsmasq.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_dnsmasq.yaml --inventory-file ansible_inventory.ini; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
# Wait for Kubernetes API to be ready
echo "Waiting for 30 seconds for Kubernetes API to be ready..."
sleep 30
echo "done waiting for kubernetes API"
# check infctl
cd /home/vagrant
bash /home/vagrant/scripts/check_install_infctl.sh
if [ $? -ne 0 ]; then
cd /home/vagrant || exit
if ! bash /home/vagrant/scripts/check_install_infctl.sh; then
echo "infctl check failed. Please check your installation."
exit 1
fi
# Optionally install Longhorn, MetalLB, and Traefik
if [ "${INSTALL_LONGHORN}" = "true" ]; then
cd /home/vagrant/ansible || { echo "Failed to change directory to /home/vagrant/ansible"; exit 1; }
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_longhorn.yaml --inventory-file ansible_inventory.ini; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_metallb.yaml --inventory-file ansible_inventory.ini; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_traefik.yaml --inventory-file ansible_inventory.ini; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
fi

View file

@ -12,24 +12,31 @@ if ! kubectl get deployment -n metallb-system controller &>/dev/null; then
exit 1
fi
# Wait for MetalLB components to be ready
echo "Waiting for MetalLB components to be ready..."
kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=app=metallb \
--timeout=90s
echo "Waiting for MetalLB pods to be in 'Running' state..."
MAX_RETRIES=10
RETRY=0
while [ $RETRY -lt $MAX_RETRIES ]; do
NOT_READY_PODS=$(kubectl -n metallb-system get pods --no-headers | grep -v 'Running' | wc -l)
if [ "$NOT_READY_PODS" -eq 0 ]; then
echo "All MetalLB pods are running."
break
else
echo "$NOT_READY_PODS MetalLB pods are not ready yet. Waiting..."
RETRY=$((RETRY + 1))
sleep 5
fi
done
if [ "$NOT_READY_PODS" -ne 0 ]; then
echo "Failed to get all MetalLB pods running after $MAX_RETRIES attempts."
exit 1
fi
else
echo "MetalLB is already installed."
fi
# Wait for the webhook service to be ready
echo "Waiting for MetalLB webhook service to be ready..."
kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=component=webhook \
--timeout=90s
# Check if the IPAddressPool already exists
if ! kubectl get ipaddresspool -n metallb-system default &>/dev/null; then
echo "Creating MetalLB IPAddressPool..."