Compare commits

..

7 commits

34 changed files with 138 additions and 773 deletions

View file

@ -1,3 +0,0 @@
PROJECT_NAME="the name of your gcp project, often referred to as the project"
EMAIL="your email address to identify yourself with letsencrypt"
APP_DOMAIN_NAME="your domain name for the app, e.g., frgdr.some-domain.com"

3
.gitignore vendored
View file

@ -30,6 +30,3 @@ terraform.tfstate**
*history*.txt *history*.txt
*.tfvars *.tfvars
gcloud/tf/.env gcloud/tf/.env
gcloud/tf/k3s/forgejo/issuer.yaml
gcloud/tf/k3s/forgejo/ingress.yaml
.env

0
gcloud/tf/doit.tf Normal file
View file

View file

@ -7,7 +7,8 @@ resource "google_compute_firewall" "allow_http" {
allow { allow {
protocol = "tcp" protocol = "tcp"
ports = [ ports = [
"80", "443" // http/https "80", "443", // http/https
"30080" // ports opened to access the python API via NodePort
] ]
} }

View file

@ -1,68 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: forgejo-deployment
namespace: forgejo
labels:
app: forgejo-app
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: forgejo-app
template:
metadata:
labels:
app: forgejo-app
spec:
terminationGracePeriodSeconds: 10
containers:
- name: forgejo
image: codeberg.org/forgejo/forgejo:11.0.6
imagePullPolicy: IfNotPresent
env:
- name: FORGEJO__repository__ENABLE_PUSH_CREATE_USER
value: "true"
- name: FORGEJO__server__ROOT_URL
value: "https://frgdr.headshed.dev/"
- name: FORGEJO__repository__DEFAULT_BRANCH
value: "main"
- name: FORGEJO__server__LFS_START_SERVER
value: "true"
- name: FORGEJO__security__INSTALL_LOCK
value: "true"
- name: FORGEJO__service__DISABLE_REGISTRATION
value: "false"
ports:
- name: http
containerPort: 3000
protocol: TCP
- name: ssh
containerPort: 22
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
tty: true
volumeMounts:
- name: forgejo-data
mountPath: /data
# - name: forgejo-timezone
# mountPath: /etc/timezone
# - name: forgejo-localtime
# mountPath: /etc/localtime
volumes:
- name: forgejo-data
persistentVolumeClaim:
claimName: forgejo-data-pvc
# - name: forgejo-timezone
# configMap:
# name: forgejo-timezone
# - name: forgejo-localtime
# configMap:
# name: forgejo-localtime

View file

@ -1,24 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: tls-forgejo-ingress-http
namespace: forgejo
annotations:
cert-manager.io/issuer: "le-cluster-issuer-http"
spec:
tls:
- hosts:
- ${APP_DOMAIN_NAME}
secretName: tls-frg-ingress-http
rules:
- host: ${APP_DOMAIN_NAME}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: forgejo-app-service
port:
name: web

View file

@ -1,17 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: le-cluster-issuer-http
namespace: forgejo
spec:
acme:
email: ${EMAIL}
# We use the staging server here for testing to avoid throttling.
server: https://acme-staging-v02.api.letsencrypt.org/directory
# server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: http-issuer-account-key
solvers:
- http01:
ingress:
class: traefik

View file

@ -1,26 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: forgejo-local-pv
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/disks/app-data/forgejo
storageClassName: local-path
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: forgejo-data-pvc
namespace: forgejo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
volumeName: forgejo-local-pv
storageClassName: local-path

View file

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: forgejo-app-service
namespace: forgejo
spec:
selector:
app: forgejo-app
ports:
- name: web
protocol: TCP
port: 3000
targetPort: 3000

View file

@ -19,12 +19,15 @@ resource "google_compute_instance" "k3s" {
} }
} }
// ensures that the instance is a Spot VM // Configuration to be a Spot Instance, to reduce costs
// means it can be preempted, but it's cheaper scheduling {
automatic_restart = true
}
# scheduling { # scheduling {
# automatic_restart = false # preemptible = false
# provisioning_model = "SPOT" # automatic_restart = true
# preemptible = true # provisioning_model = "SPOT"
# instance_termination_action = "STOP"
# } # }
// attach a disk for K3S // attach a disk for K3S
@ -77,9 +80,67 @@ resource "google_compute_disk" "app_data_disk" {
zone = var.zone zone = var.zone
} }
// Outputs
// load balancer ....
# resource "google_compute_health_check" "http_health_check" {
# name = "http-health-check"
# check_interval_sec = 5
# timeout_sec = 5
# healthy_threshold = 2
# unhealthy_threshold = 2
# http_health_check {
# port = 80
# }
# }
resource "google_compute_http_health_check" "http_health_check" {
name = "http-health-check"
request_path = "/"
port = 80
check_interval_sec = 5
timeout_sec = 5
healthy_threshold = 2
unhealthy_threshold = 2
}
# resource "google_compute_target_pool" "k3s_pool" {
# name = "k3s-target-pool"
# instances = [google_compute_instance.k3s.self_link]
# health_checks = [google_compute_health_check.http_health_check.self_link]
# }
resource "google_compute_target_pool" "k3s_pool" {
name = "k3s-target-pool"
instances = [google_compute_instance.k3s.self_link]
health_checks = [google_compute_http_health_check.http_health_check.self_link]
}
resource "google_compute_forwarding_rule" "http_forwarding_rule" {
name = "http-forwarding-rule"
target = google_compute_target_pool.k3s_pool.self_link
port_range = "80"
ip_protocol = "TCP"
load_balancing_scheme = "EXTERNAL"
}
resource "google_compute_forwarding_rule" "https_forwarding_rule" {
name = "https-forwarding-rule"
target = google_compute_target_pool.k3s_pool.self_link
port_range = "443"
ip_protocol = "TCP"
load_balancing_scheme = "EXTERNAL"
}
// ---------------------------------- // ----------------------------------
data "google_project" "project" { data "google_project" "project" {
project_id = var.project_name # Use variable from tfvars project_id = var.project_name # Use variable from tfvars
} }
@ -93,3 +154,7 @@ output "k3s_vm_public_ip" {
description = "Ephemeral public IP of the k3s VM" description = "Ephemeral public IP of the k3s VM"
} }
output "load_balancer_ip" {
value = google_compute_forwarding_rule.http_forwarding_rule.ip_address
description = "External IP address of the load balancer (HTTP)"
}

View file

@ -4,10 +4,6 @@ terraform {
source = "hashicorp/google" source = "hashicorp/google"
version = "~> 4.0" version = "~> 4.0"
} }
# cloudflare = {
# source = "cloudflare/cloudflare"
# version = "~> 5"
# }
} }
} }
@ -20,45 +16,9 @@ provider "google" {
project = var.project_name # Use variable from tfvars project = var.project_name # Use variable from tfvars
region = "us-central1" # Replace with your desired region region = "us-central1" # Replace with your desired region
} }
# provider "google" { # provider "google" {
# credentials = file("<my-gcp-creds>.json") # credentials = file("<my-gcp-creds>.json")
# project = var.project_name # project = var.project_name
# region = var.region # region = var.region
# zone = var.zone # zone = var.zone
# } # }
# provider "cloudflare" {
# api_token = var.cloudflare_api_token
# }
# variable "cloudflare_api_token" {
# description = "Cloudflare API token"
# sensitive = true
# }
# variable "cloudflare_account_id" {
# description = "Cloudflare Account ID"
# sensitive = true
# }
# variable "cloudflare_zone_id" {
# description = "Cloudflare Zone ID"
# sensitive = true
# }
# variable "cloudflare_domain" {
# description = "Cloudflare Domain"
# sensitive = true
# }
# resource "cloudflare_dns_record" "frgdr" {
# zone_id = var.cloudflare_zone_id
# name = "frgdr"
# content = google_compute_instance.k3s.network_interface[0].access_config[0].nat_ip
# type = "A"
# ttl = 300
# proxied = false
# comment = "Application domain record"
# }

View file

@ -1,56 +0,0 @@
[
{
"name": "run pre-flight checks",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/pre-flight-checks.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "list gcloud infrastructure",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/list_gloud_infra.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "create tfvars",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/create_tfvars.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "run tofu",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/run_tofu.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "wait for user input to continue",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/wait_for_user_input_dns.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "copy .env to k3s-vm-1",
"function": "RunCommand",
"params": [
"gcloud/tf/scripts/copy_env_to_first_node.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -3,7 +3,7 @@
"name": "run pre-flight checks", "name": "run pre-flight checks",
"function": "RunCommand", "function": "RunCommand",
"params": [ "params": [
"./gcloud/tf/scripts/pre-flight-checks.sh" "./scripts/pre-flight-checks.sh"
], ],
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true
@ -12,16 +12,7 @@
"name": "list gcloud infrastructure", "name": "list gcloud infrastructure",
"function": "RunCommand", "function": "RunCommand",
"params": [ "params": [
"./gcloud/tf/scripts/list_gloud_infra.sh" "./scripts/list_gloud_infra.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "create tfvars",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/create_tfvars.sh"
], ],
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true
@ -30,16 +21,7 @@
"name": "run tofu", "name": "run tofu",
"function": "RunCommand", "function": "RunCommand",
"params": [ "params": [
"./gcloud/tf/scripts/run_tofu.sh" "./scripts/run_tofu.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "copy .env to k3s-vm-1",
"function": "RunCommand",
"params": [
"gcloud/tf/scripts/copy_env_to_first_node.sh"
], ],
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true

View file

@ -1,34 +0,0 @@
#!/usr/bin/env bash
if kubectl -n cert-manager get pods 2>/dev/null | grep -q 'Running'; then
echo "cert-manager pods already running. Skipping installation."
exit 0
fi
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml
echo "Waiting for cert-manager pods to be in 'Running' state..."
MAX_RETRIES=10
RETRY=0
while [ $RETRY -lt $MAX_RETRIES ]; do
NOT_READY_PODS=$(kubectl -n cert-manager get pods --no-headers | grep -v 'Running' | wc -l)
if [ "$NOT_READY_PODS" -eq 0 ]; then
echo "All cert-manager pods are running."
break
else
echo "$NOT_READY_PODS pods are not ready yet. Waiting..."
RETRY=$((RETRY + 1))
sleep 5
fi
done
if [ "$NOT_READY_PODS" -ne 0 ]; then
echo "Failed to get all cert-manager pods running after $MAX_RETRIES attempts."
exit 1
fi

View file

@ -1,31 +0,0 @@
#!/usr/bin/env bash
source .env
for i in {1..10}; do
# Check if the instance is running
INSTANCE_STATUS=$(gcloud compute instances describe k3s-vm-1 --zone=us-central1-a --project="$PROJECT_NAME" --format='get(status)')
if [[ "$INSTANCE_STATUS" != "RUNNING" ]]; then
echo "Instance k3s-vm-1 is not running. Attempt $i/10. Waiting 5 seconds..."
sleep 5
continue
fi
# Check if the directory exists on the remote host
if gcloud compute ssh k3s-vm-1 --zone=us-central1-a --project="$PROJECT_NAME" --command="test -d /opt/src/infctl-cli/"; then
echo "/opt/src/infctl-cli/ exists on k3s-vm-1."
break
else
echo "/opt/src/infctl-cli/ does not exist yet. Attempt $i/10. Waiting 5 seconds..."
sleep 5
fi
done
# Final check after loop
if ! gcloud compute ssh k3s-vm-1 --zone=us-central1-a --project="$PROJECT_NAME" --command="test -d /opt/src/infctl-cli/"; then
echo "ERROR: /opt/src/infctl-cli/ does not exist on k3s-vm-1 after 10 attempts. Exiting."
exit 1
fi
gcloud compute scp .env k3s-vm-1:/opt/src/infctl-cli/.env --zone=us-central1-a --project=$PROJECT_NAME

View file

@ -1,32 +0,0 @@
#!/bin/bash
set -a
# read environment variables from .env file
# for value $PROJECT_NAME
. .env
# Check if PROJECT_NAME environment variable is set
if [ -z "$PROJECT_NAME" ]; then
echo "Error: PROJECT_NAME environment variable is not set."
echo "Please set the PROJECT_NAME variable and try again."
exit 1
fi
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
# Define the template file path and output file path
TEMPLATE_FILE="../terraform.tfvars.template"
OUTPUT_FILE="../terraform.tfvars"
# Use envsubst to substitute the PROJECT_NAME variable into the template
envsubst < "$TEMPLATE_FILE" > "$OUTPUT_FILE"
if [ $? -ne 0 ]; then
echo "Error: Failed to substitute variables in the template."
exit 1
fi
echo "tfvars has been created at $OUTPUT_FILE"

View file

@ -1,30 +0,0 @@
#!/bin/bash
set -a
# read environment variables from .env file
# for value of APP_DOMAIN_NAME
. .env
if [ -z "$APP_DOMAIN_NAME" ]; then
echo "Error: APP_DOMAIN_NAME environment variable is not set. Please set it in the .env file."
exit 1
fi
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
# Define the template file path and output file path
TEMPLATE_FILE="../../k3s/forgejo/ingress.yaml.template"
OUTPUT_FILE="../../k3s/forgejo/ingress.yaml"
# Use envsubst to substitute the APP_DOMAIN_NAME variable into the template
envsubst < "$TEMPLATE_FILE" > "$OUTPUT_FILE"
if [ $? -ne 0 ]; then
echo "Error: Failed to substitute variables in the template."
exit 1
fi
echo "Ingress configuration has been created at $OUTPUT_FILE"

View file

@ -1,33 +0,0 @@
#!/bin/bash
set -a
# read environment variables from .env file
# for value of EMAIL
. .env
# Check if EMAIL environment variable is set
if [ -z "$EMAIL" ]; then
echo "Error: EMAIL environment variable is not set."
echo "Please set the EMAIL variable and try again."
exit 1
fi
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
# Define the template file path and output file path
TEMPLATE_FILE="../../k3s/forgejo/issuer.yaml.template"
OUTPUT_FILE="../../k3s/forgejo/issuer.yaml"
# Use envsubst to substitute the EMAIL variable into the template
envsubst < "$TEMPLATE_FILE" > "$OUTPUT_FILE"
if [ $? -ne 0 ]; then
echo "Error: Failed to substitute variables in the template."
exit 1
fi
echo "Issuer configuration has been created at $OUTPUT_FILE"

View file

@ -1,45 +0,0 @@
#!/bin/bash
set -e
echo "Installing Forgejo"
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
# Define namespace
NAMESPACE="forgejo"
MANIFESTS_DIR="${SCRIPT_DIR}/../../k3s/forgejo"
echo "Creating namespace..."
if ! kubectl get namespace "${NAMESPACE}" >/dev/null 2>&1; then
kubectl create namespace "${NAMESPACE}"
else
echo "Namespace '${NAMESPACE}' already exists."
fi
echo "Creating PersistentVolumeClaim..."
kubectl apply -f ${MANIFESTS_DIR}/pvc.yaml
echo "Creating Service..."
kubectl apply -f ${MANIFESTS_DIR}/service.yaml
echo "Creating Deployment..."
kubectl apply -f ${MANIFESTS_DIR}/deployment.yaml
echo "Creating Certificate Issuer..."
kubectl apply -f ${MANIFESTS_DIR}/issuer.yaml
echo "Creating Ingress..."
kubectl apply -f ${MANIFESTS_DIR}/ingress.yaml
echo "Forgejo installation complete."
echo "Verify deployment with: kubectl -n ${NAMESPACE} get pods,svc,ingress,pvc"
exit;
# Note: The ingressTCP.yaml is for a different application (galene) and should be applied separately
# echo "Note: The ingressTCP.yaml is for the galene application and has not been applied."

View file

@ -1,47 +0,0 @@
[
{
"name": "install cert-manager",
"function": "RunCommand",
"params": [
"gcloud/tf/scripts/cert-manager/install_cert-manager.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "install traefik",
"function": "RunCommand",
"params": [
"gcloud/tf/scripts/install_traefik.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "create forgejo ingress",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/forgejo/create_ingress.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "create forgejo issuer",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/forgejo/create_issuer.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "install forgejo",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/forgejo/install_forgejo.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

0
gcloud/tf/scripts/install_traefik.sh Executable file → Normal file
View file

View file

@ -1,11 +1,8 @@
#!/bin/bash #!/bin/bash
# Redirect all output to a log file for reliability
exec > /tmp/startup.log 2>&1
INFCTL_GIT_REPO="https://codeberg.org/headshed/infctl-cli.git" INFCTL_GIT_REPO="https://codeberg.org/headshed/infctl-cli.git"
INFCTL_GIT_REPO_BRANCH="main" INFCTL_GIT_REPO_BRANCH="feature/gcloud-k3s"
INFCTL_INSTALL_DIR="/opt/src" INFCTL_INSTALL_DIR="/opt/infctl-cli"
# ensure only run once # ensure only run once
if [[ -f /etc/startup_was_launched ]]; then exit 0; fi if [[ -f /etc/startup_was_launched ]]; then exit 0; fi
@ -103,32 +100,3 @@ if [[ ! -d "$INFCTL_INSTALL_DIR" ]]; then
chown -R user:user "$INFCTL_INSTALL_DIR" chown -R user:user "$INFCTL_INSTALL_DIR"
fi fi
for i in {1..100}; do
if [[ -f /opt/src/infctl-cli/.env ]]; then
echo ".env file found."
break
else
echo ".env file not found. Attempt $i/100. Waiting 5 seconds..."
sleep 5
fi
done
# Final check after loop
if [[ ! -f /opt/src/infctl-cli/.env ]]; then
echo "ERROR: .env file not found after 10 attempts. Exiting."
exit 1
fi
# load .env file
source /opt/src/infctl-cli/.env
cd $INFCTL_INSTALL_DIR/infctl-cli || "echo 'Failed to change directory to $INFCTL_INSTALL_DIR/infctl-cli' ; exit 1"
# check to see if INSTALL_FORGEJO is set to "true"
if [[ "$INSTALL_FORGEJO" == "true" ]]; then
# install forgejo using infctl
# ....
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
LOG_FORMAT=none infctl -f "${INFCTL_INSTALL_DIR}/infctl-cli/gcloud/tf/scripts/install-forgejo-pipeline.json"
touch /etc/forgejo_was_installed
fi

View file

@ -47,19 +47,4 @@ echo
kubectl version --client kubectl version --client
echo echo
echo "🧪 checking we have envsubst insatalled..."
if ! command -v envsubst &> /dev/null
then
echo "❌ envsubst could not be found, please install it first"
echo
echo "on ubuntu you can install it with: sudo apt-get install -y gettext-base"
echo
exit 1
fi
echo "✅ envsubst is installed,..."
echo
envsubst --version
echo
echo "✅ Pre-flight checks passed. You are ready to proceed 🙂"
echo

View file

@ -1,12 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
TF_DIR="../"
cd "$TF_DIR" || { echo "Failed to change directory to $TF_DIR"; exit 1; }
if [[ -d ".terraform" && -f ".terraform.lock.hcl" ]]; then if [[ -d ".terraform" && -f ".terraform.lock.hcl" ]]; then
echo "✅ Terraform already initialized" echo "✅ Terraform already initialized"
# tofu init # tofu init

View file

@ -1,11 +0,0 @@
#!/usr/bin/env bash
echo "Please configure DNS using the IP address from the previous stage."
echo "you have 120 seconds."
for i in {120..1}; do
echo -ne "Time remaining: $i seconds\r"
sleep 1
done
echo ""
exit 0

View file

@ -1,13 +0,0 @@
// Your GCP project name
// it will be refererred as the project id
// in Google Cloud
// ----------------------------------
project_name = "${PROJECT_NAME}"
// where to deploy to
// region
region = "us-central1"
zone = "us-central1-a"
// application name
app_name = "${PROJECT_NAME}-k3s-cluster"

View file

@ -1,29 +1,20 @@
#!/usr/bin/env bash #!/usr/bin/env bash
for i in {1..5}; do # sleep 5
echo "working ..."
sleep 0.5
done
sleep 2 echo "crash"
echo "not working ..." # sleep 1
sleep 1 echo "bang"
figlet "boom" # sleep 2
sleep 1 echo "wallop"
figlet "bang" # sleep 1
sleep 2 echo "Houston, we have a problem"
echo "oh dear, oh my..."
sleep 1
figlet "Houston, we have a problem"
sleep 1 sleep 1

View file

@ -1,29 +1,20 @@
#!/usr/bin/env bash #!/usr/bin/env bash
for i in {1..5}; do # sleep 5
echo "working ..."
sleep 0.5
done
sleep 2 echo "bish"
echo "still working ..." # sleep 1
sleep 1 echo "bash"
figlet "bish" # sleep 2
sleep 1 echo "bosh"
figlet "bash" # sleep 1
sleep 2 echo "lovely jubbly"
figlet "bosh"
sleep 1
figlet "LOVELY JUBBLY"
sleep 1 sleep 1

View file

@ -117,11 +117,7 @@ Vagrant.configure("2") do |config|
vb.cpus = 1 vb.cpus = 1
end end
ws.vm.provision "shell", ws.vm.provision "shell", path: "ansible/provision_workstation.sh"
path: "ansible/provision_workstation.sh",
env: {
"INSTALL_LONGHORN" => ENV['INSTALL_LONGHORN'] || "false"
}
end end

View file

@ -1,16 +0,0 @@
---
- name: Install longhorn using infctl
hosts: localhost
become: true
become_user: vagrant
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: run infctl longhorn pipeline
ansible.builtin.command: >
bash -c 'cd /home/vagrant && LOG_FILE=/tmp/longhorn_log.txt LOG_FORMAT=basic infctl -f pipelines/vagrant-longhorn.json'
register: longhorn_result
ignore_errors: false

View file

@ -1,16 +0,0 @@
---
- name: Install metallb using infctl
hosts: localhost
become: true
become_user: vagrant
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: run ======== infctl metallb pipeline
ansible.builtin.command: >
bash -c 'cd /home/vagrant && LOG_FILE=/tmp/metallb_log.txt LOG_FORMAT=basic infctl -f ./pipelines/vagrant-metallb.json'
register: metallb_result
ignore_errors: false

View file

@ -1,20 +0,0 @@
---
- name: Install traefik using infctl
hosts: localhost
become: true
become_user: vagrant
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: run infctl traefik pipeline
ansible.builtin.command: infctl -f pipelines/vagrant-ingress.json
args:
chdir: /home/vagrant
environment:
LOG_FILE: /tmp/traefik_log.txt
LOG_FORMAT: none
register: traefik_result
ignore_errors: false

View file

@ -4,8 +4,7 @@
sudo apt-get update sudo apt-get update
sudo apt-get install -y software-properties-common git vim python3.10-venv jq figlet sudo apt-get install -y software-properties-common git vim python3.10-venv jq figlet
# shellcheck disable=SC1091 source /vagrant/.envrc
source /vagrant/.envrc
# Set up ansible environment for vagrant user # Set up ansible environment for vagrant user
sudo -u vagrant mkdir -p /home/vagrant/.ansible sudo -u vagrant mkdir -p /home/vagrant/.ansible
@ -25,10 +24,10 @@ sudo chmod +x /home/vagrant/pipelines/*.sh
# Copy the Vagrant private keys (these will be synced by Vagrant) # Copy the Vagrant private keys (these will be synced by Vagrant)
for i in {1..3}; do for i in {1..3}; do
sudo -u vagrant cp "/vagrant/.vagrant/machines/vm$i/virtualbox/private_key" "/home/vagrant/.ssh/vm${i}_key" sudo -u vagrant cp /vagrant/.vagrant/machines/vm$i/virtualbox/private_key /home/vagrant/.ssh/vm${i}_key
sudo -u root cp "/vagrant/.vagrant/machines/vm$i/virtualbox/private_key" "/root/.ssh/vm${i}_key" sudo -u root cp /vagrant/.vagrant/machines/vm$i/virtualbox/private_key /root/.ssh/vm${i}_key
sudo chmod 600 "/home/vagrant/.ssh/vm${i}_key" sudo chmod 600 /home/vagrant/.ssh/vm${i}_key
sudo chmod 600 "/root/.ssh/vm${i}_key" sudo chmod 600 /root/.ssh/vm${i}_key
done done
# Disable host key checking for easier learning # Disable host key checking for easier learning
@ -47,17 +46,18 @@ cd "$ANSIBLE_DIR" || {
if [ ! -d "venv" ]; then if [ ! -d "venv" ]; then
echo "Creating Python virtual environment in ./venv..." echo "Creating Python virtual environment in ./venv..."
python3 -m venv venv python3 -m venv venv
# shellcheck disable=SC1091 source "venv/bin/activate"
if ! source "venv/bin/activate"; then if [ $? -ne 0 ]; then
echo "Failed to activate virtual environment. Please check your Python installation." echo "Failed to activate virtual environment. Please check your Python installation."
exit 1 exit 1
fi fi
echo "Virtual environment created and activated." echo "Virtual environment created and activated."
cp "/vagrant/ansible/requirements.txt" . cp /vagrant/ansible/requirements.txt .
if [ -f "requirements.txt" ]; then if [ -f "requirements.txt" ]; then
echo "Installing dependencies from requirements.txt..." echo "Installing dependencies from requirements.txt..."
pip install --upgrade pip pip install --upgrade pip
if ! pip install -r requirements.txt; then pip install -r requirements.txt
if [ $? -ne 0 ]; then
echo "Failed to install dependencies from requirements.txt." echo "Failed to install dependencies from requirements.txt."
exit 1 exit 1
fi fi
@ -76,13 +76,7 @@ ls -al "$ANSIBLE_VENV_DIR/bin/activate"
if [ -d "$ANSIBLE_VENV_DIR" ]; then if [ -d "$ANSIBLE_VENV_DIR" ]; then
echo "Activating Ansible virtual environment..." echo "Activating Ansible virtual environment..."
if [ -f "$ANSIBLE_VENV_DIR/bin/activate" ]; then source "$ANSIBLE_VENV_DIR/bin/activate"
# shellcheck source=/dev/null
source "$ANSIBLE_VENV_DIR/bin/activate"
else
echo "Virtualenv activate script not found!" >&2
exit 1
fi
else else
echo "Ansible virtual environment not found at $ANSIBLE_VENV_DIR. Please create it before running this script." echo "Ansible virtual environment not found at $ANSIBLE_VENV_DIR. Please create it before running this script."
exit 1 exit 1
@ -92,13 +86,13 @@ echo ""
ansible --version ansible --version
if ! ansible --version; then if [ $? -ne 0 ]; then
echo "Ansible is not installed or not found in the virtual environment. Please check your installation." echo "Ansible is not installed or not found in the virtual environment. Please check your installation."
exit 1 exit 1
fi fi
eval "$(ssh-agent -s)" eval `ssh-agent -s`
ssh-add # ~/machines/*/virtualbox/private_key ssh-add # ~/machines/*/virtualbox/private_key
BASHRC="/home/vagrant/.bashrc" BASHRC="/home/vagrant/.bashrc"
@ -109,11 +103,10 @@ if ! grep -qF "$BLOCK_START" "$BASHRC"; then
cat <<'EOF' >> "$BASHRC" cat <<'EOF' >> "$BASHRC"
# ADDED BY infctl provisioning # ADDED BY infctl provisioning
eval "$(ssh-agent -s)" eval `ssh-agent -s`
ssh-add ~/machines/*/virtualbox/private_key ssh-add ~/machines/*/virtualbox/private_key
ssh-add -L ssh-add -L
# shellcheck disable=SC1091 source /vagrant/.envrc
source /vagrant/.envrc
EOF EOF
else else
echo "Provisioning block already present in $BASHRC" echo "Provisioning block already present in $BASHRC"
@ -121,7 +114,7 @@ fi
echo echo
echo ------------------------- echo -------------------------
echo echo
su - vagrant su - vagrant
id id
@ -132,63 +125,48 @@ echo
ssh-add ~/.ssh/vm*_key ssh-add ~/.ssh/vm*_key
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3; then ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping vm1,vm2,vm3
if [ $? -ne 0 ]; then
echo "Ansible ping failed. Please check your Vagrant VMs and network configuration." echo "Ansible ping failed. Please check your Vagrant VMs and network configuration."
exit 1 exit 1
fi fi
# install_keepalived.yaml # install_keepalived.yaml
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini; then ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_keepalived.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1 exit 1
fi fi
echo "Keepalived installation completed." echo "Keepalived installation completed."
# install_k3s_3node.yaml # install_k3s_3node.yaml
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini; then ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1 exit 1
fi fi
# copy_k8s_config.yaml # copy_k8s_config.yaml
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini; then ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1 exit 1
fi fi
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_dnsmasq.yaml --inventory-file ansible_inventory.ini; then ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_dnsmasq.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1 exit 1
fi fi
# Wait for Kubernetes API to be ready
echo "Waiting for 30 seconds for Kubernetes API to be ready..."
sleep 30
echo "done waiting for kubernetes API"
# check infctl # check infctl
cd /home/vagrant || exit cd /home/vagrant
if ! bash /home/vagrant/scripts/check_install_infctl.sh; then bash /home/vagrant/scripts/check_install_infctl.sh
if [ $? -ne 0 ]; then
echo "infctl check failed. Please check your installation." echo "infctl check failed. Please check your installation."
exit 1 exit 1
fi fi
# Optionally install Longhorn, MetalLB, and Traefik
if [ "${INSTALL_LONGHORN}" = "true" ]; then
cd /home/vagrant/ansible || { echo "Failed to change directory to /home/vagrant/ansible"; exit 1; }
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_longhorn.yaml --inventory-file ansible_inventory.ini; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_metallb.yaml --inventory-file ansible_inventory.ini; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
if ! ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_traefik.yaml --inventory-file ansible_inventory.ini; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
fi

View file

@ -12,31 +12,24 @@ if ! kubectl get deployment -n metallb-system controller &>/dev/null; then
exit 1 exit 1
fi fi
echo "Waiting for MetalLB pods to be in 'Running' state..." # Wait for MetalLB components to be ready
MAX_RETRIES=10 echo "Waiting for MetalLB components to be ready..."
RETRY=0 kubectl wait --namespace metallb-system \
--for=condition=ready pod \
while [ $RETRY -lt $MAX_RETRIES ]; do --selector=app=metallb \
NOT_READY_PODS=$(kubectl -n metallb-system get pods --no-headers | grep -v 'Running' | wc -l) --timeout=90s
if [ "$NOT_READY_PODS" -eq 0 ]; then
echo "All MetalLB pods are running."
break
else
echo "$NOT_READY_PODS MetalLB pods are not ready yet. Waiting..."
RETRY=$((RETRY + 1))
sleep 5
fi
done
if [ "$NOT_READY_PODS" -ne 0 ]; then
echo "Failed to get all MetalLB pods running after $MAX_RETRIES attempts."
exit 1
fi
else else
echo "MetalLB is already installed." echo "MetalLB is already installed."
fi fi
# Wait for the webhook service to be ready
echo "Waiting for MetalLB webhook service to be ready..."
kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=component=webhook \
--timeout=90s
# Check if the IPAddressPool already exists # Check if the IPAddressPool already exists
if ! kubectl get ipaddresspool -n metallb-system default &>/dev/null; then if ! kubectl get ipaddresspool -n metallb-system default &>/dev/null; then
echo "Creating MetalLB IPAddressPool..." echo "Creating MetalLB IPAddressPool..."