Add Google Cloud K3s infrastructure support

- Add Terraform configuration for GCP instance and storage
- Add startup script for K3s installation and configuration
- Add pipeline scripts for deployment and management
- Add Forgejo deployment manifests and configuration
This commit is contained in:
jon brookes 2025-09-06 19:03:55 +01:00
parent 7384722305
commit 2ab7872af1
30 changed files with 1024 additions and 324 deletions

10
gcloud/tf/Dockerfile Normal file
View file

@ -0,0 +1,10 @@
FROM python:3.12-slim
# Install dependencies
RUN pip install --no-cache-dir gunicorn httpbin
# Expose the application port
EXPOSE 80
# Launch the application
CMD ["gunicorn", "-b", "0.0.0.0:80", "httpbin:app"]

0
gcloud/tf/doit.tf Normal file
View file

16
gcloud/tf/firewall.tf Normal file
View file

@ -0,0 +1,16 @@
// Firewall
// ----------------------------------
resource "google_compute_firewall" "allow_http" {
name = "allow-http"
network = "default"
allow {
protocol = "tcp"
ports = [
"80", "443" // http/https
]
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["web"]
}

View file

@ -0,0 +1,68 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: forgejo-deployment
namespace: forgejo
labels:
app: forgejo-app
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: forgejo-app
template:
metadata:
labels:
app: forgejo-app
spec:
terminationGracePeriodSeconds: 10
containers:
- name: forgejo
image: codeberg.org/forgejo/forgejo:11.0.6
imagePullPolicy: IfNotPresent
env:
- name: FORGEJO__repository__ENABLE_PUSH_CREATE_USER
value: "true"
- name: FORGEJO__server__ROOT_URL
value: "https://frg.headshed.dev/"
- name: FORGEJO__repository__DEFAULT_BRANCH
value: "main"
- name: FORGEJO__server__LFS_START_SERVER
value: "true"
- name: FORGEJO__security__INSTALL_LOCK
value: "true"
- name: FORGEJO__service__DISABLE_REGISTRATION
value: "false"
ports:
- name: http
containerPort: 3000
protocol: TCP
- name: ssh
containerPort: 22
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
tty: true
volumeMounts:
- name: forgejo-data
mountPath: /data
# - name: forgejo-timezone
# mountPath: /etc/timezone
# - name: forgejo-localtime
# mountPath: /etc/localtime
volumes:
- name: forgejo-data
persistentVolumeClaim:
claimName: forgejo-data-pvc
# - name: forgejo-timezone
# configMap:
# name: forgejo-timezone
# - name: forgejo-localtime
# configMap:
# name: forgejo-localtime

View file

@ -0,0 +1,24 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: tls-forgejo-ingress-http
namespace: forgejo
annotations:
cert-manager.io/issuer: "le-cluster-issuer-http"
spec:
tls:
- hosts:
- ${APP_DOMAIN_NAME}
secretName: tls-frg-ingress-http
rules:
- host: ${APP_DOMAIN_NAME}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: forgejo-app-service
port:
name: web

View file

@ -0,0 +1,17 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: le-cluster-issuer-http
namespace: forgejo
spec:
acme:
email: ${EMAIL}
# We use the staging server here for testing to avoid throttling.
server: https://acme-staging-v02.api.letsencrypt.org/directory
# server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: http-issuer-account-key
solvers:
- http01:
ingress:
class: traefik

View file

@ -0,0 +1,26 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: forgejo-local-pv
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /mnt/disks/app-data/forgejo
storageClassName: local-path
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: forgejo-data-pvc
namespace: forgejo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
volumeName: forgejo-local-pv
storageClassName: local-path

View file

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: forgejo-app-service
namespace: forgejo
spec:
selector:
app: forgejo-app
ports:
- name: web
protocol: TCP
port: 3000
targetPort: 3000

95
gcloud/tf/main.tf Normal file
View file

@ -0,0 +1,95 @@
// Compute
// ----------------------------------
// The instance for K3S
resource "google_compute_instance" "k3s" {
name = "k3s-vm-1"
machine_type = "e2-small" # This instance will have 2 Gb of RAM
zone = var.zone
tags = ["web"]
// Set the boot disk and the image (10 Gb)
boot_disk {
initialize_params {
image = "debian-cloud/debian-12"
size = 10
}
}
// ensures that the instance is a Spot VM
// means it can be preempted, but it's cheaper
# scheduling {
# automatic_restart = false
# provisioning_model = "SPOT"
# preemptible = true
# }
// attach a disk for K3S
attached_disk {
source = google_compute_disk.k3s_disk.id
device_name = "k3s-disk"
}
// attach a disk for app data
attached_disk {
source = google_compute_disk.app_data_disk.id
device_name = "app-data-disk"
}
network_interface {
network = "default"
// enable ephemeral ip
access_config {}
}
labels = {
env = var.env
region = var.region
app = var.app_name
sensitive = "false"
}
metadata_startup_script = file("scripts/k3s-vm-startup.sh")
allow_stopping_for_update = true
}
// Storage
// ----------------------------------
// The disk attached to the instance (15 Gb)
resource "google_compute_disk" "k3s_disk" {
name = "k3s-disk"
size = 15
type = "pd-standard"
zone = var.zone
}
// The disk for app data (20 Gb)
resource "google_compute_disk" "app_data_disk" {
name = "app-data-disk"
size = 20
type = "pd-standard"
zone = var.zone
}
// Outputs
// ----------------------------------
data "google_project" "project" {
project_id = var.project_name # Use variable from tfvars
}
output "project_number" {
value = data.google_project.project.number
}
output "k3s_vm_public_ip" {
value = google_compute_instance.k3s.network_interface[0].access_config[0].nat_ip
description = "Ephemeral public IP of the k3s VM"
}

24
gcloud/tf/provider.tf Normal file
View file

@ -0,0 +1,24 @@
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "~> 4.0"
}
}
}
// Provider
// ----------------------------------
// Connect to the GCP project
provider "google" {
# Configuration options
project = var.project_name # Use variable from tfvars
region = "us-central1" # Replace with your desired region
}
# provider "google" {
# credentials = file("<my-gcp-creds>.json")
# project = var.project_name
# region = var.region
# zone = var.zone
# }

14
gcloud/tf/registry.tf Normal file
View file

@ -0,0 +1,14 @@
// Registry
// ----------------------------------
// The Artifact Registry repository for our app
resource "google_artifact_registry_repository" "app-repo" {
location = var.region
repository_id = "app-repo"
description = "App Docker repository"
format = "DOCKER"
docker_config {
immutable_tags = true
}
}

37
gcloud/tf/remote_state.tf Normal file
View file

@ -0,0 +1,37 @@
// Remote state
// ----------------------------------
# variable "bucket_name" {
# type = string
# default = "your-project-name-k3s-bucket"
# description = "your-project-name k3s Bucket"
# }
# terraform {
# # Use a shared bucket (wich allows collaborative work)
# backend "gcs" {
# bucket = "<my-bucket-for-states>"
# prefix = "k3s-infra"
# }
# // Set versions
# required_version = ">=1.8.0"
# required_providers {
# google = {
# source = "hashicorp/google"
# version = ">=4.0.0"
# }
# }
# }
// The bucket where you can store other data
# resource "google_storage_bucket" "k3s-storage" {
# name = var.bucket_name
# location = var.region
# labels = {
# env = var.env
# region = var.region
# app = var.app_name
# sensitive = "false"
# }
# }

View file

@ -0,0 +1,29 @@
[
{
"name": "run pre-flight checks",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/pre-flight-checks.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "list gcloud infrastructure",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/list_gloud_infra.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "run tofu",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/run_tofu.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -0,0 +1,11 @@
#!/usr/bin/env bash
if kubectl -n cert-manager get pods 2>/dev/null | grep -q 'Running'; then
echo "cert-manager pods already running. Skipping installation."
exit 0
fi
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml

View file

@ -0,0 +1,30 @@
#!/bin/bash
set -a
# read environment variables from .env file
# for value of APP_DOMAIN_NAME
. .env
if [ -z "$APP_DOMAIN_NAME" ]; then
echo "Error: APP_DOMAIN_NAME environment variable is not set. Please set it in the .env file."
exit 1
fi
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
# Define the template file path and output file path
TEMPLATE_FILE="../../k3s/forgejo/ingress.yaml.template"
OUTPUT_FILE="../../k3s/forgejo/ingress.yaml"
# Use envsubst to substitute the APP_DOMAIN_NAME variable into the template
envsubst < "$TEMPLATE_FILE" > "$OUTPUT_FILE"
if [ $? -ne 0 ]; then
echo "Error: Failed to substitute variables in the template."
exit 1
fi
echo "Ingress configuration has been created at $OUTPUT_FILE"

View file

@ -0,0 +1,33 @@
#!/bin/bash
set -a
# read environment variables from .env file
# for value of EMAIL
. .env
# Check if EMAIL environment variable is set
if [ -z "$EMAIL" ]; then
echo "Error: EMAIL environment variable is not set."
echo "Please set the EMAIL variable and try again."
exit 1
fi
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
# Define the template file path and output file path
TEMPLATE_FILE="../../k3s/forgejo/issuer.yaml.template"
OUTPUT_FILE="../../k3s/forgejo/issuer.yaml"
# Use envsubst to substitute the EMAIL variable into the template
envsubst < "$TEMPLATE_FILE" > "$OUTPUT_FILE"
if [ $? -ne 0 ]; then
echo "Error: Failed to substitute variables in the template."
exit 1
fi
echo "Issuer configuration has been created at $OUTPUT_FILE"

View file

@ -0,0 +1,45 @@
#!/bin/bash
set -e
echo "Installing Forgejo"
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
# Define namespace
NAMESPACE="forgejo"
MANIFESTS_DIR="${SCRIPT_DIR}/../../k3s/forgejo"
echo "Creating namespace..."
if ! kubectl get namespace "${NAMESPACE}" >/dev/null 2>&1; then
kubectl create namespace "${NAMESPACE}"
else
echo "Namespace '${NAMESPACE}' already exists."
fi
echo "Creating PersistentVolumeClaim..."
kubectl apply -f ${MANIFESTS_DIR}/pvc.yaml
echo "Creating Service..."
kubectl apply -f ${MANIFESTS_DIR}/service.yaml
echo "Creating Deployment..."
kubectl apply -f ${MANIFESTS_DIR}/deployment.yaml
echo "Creating Certificate Issuer..."
kubectl apply -f ${MANIFESTS_DIR}/issuer.yaml
echo "Creating Ingress..."
kubectl apply -f ${MANIFESTS_DIR}/ingress.yaml
echo "Forgejo installation complete."
echo "Verify deployment with: kubectl -n ${NAMESPACE} get pods,svc,ingress,pvc"
exit;
# Note: The ingressTCP.yaml is for a different application (galene) and should be applied separately
# echo "Note: The ingressTCP.yaml is for the galene application and has not been applied."

View file

@ -0,0 +1,47 @@
[
{
"name": "install cert-manager",
"function": "RunCommand",
"params": [
"gcloud/tf/scripts/cert-manager/install_cert-manager.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "install traefik",
"function": "RunCommand",
"params": [
"gcloud/tf/scripts/install_traefik.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "create forgejo ingress",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/forgejo/create_ingress.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "create forgejo issuer",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/forgejo/create_issuer.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "install forgejo",
"function": "RunCommand",
"params": [
"./gcloud/tf/scripts/forgejo/install_forgejo.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -0,0 +1,64 @@
#!/usr/bin/env bash
# Exit immediately if a command exits with a non-zero status.
set -e
TMPFILE=$(mktemp /tmp/traefik-values-XXXXXX.yaml)
cat > "$TMPFILE" <<EOF
ingressClass:
enabled: true
isDefaultClass: true
ports:
web:
port: 80
hostPort: 80
websecure:
port: 443
hostPort: 443
traefik:
port: 9000
api:
dashboard: true
insecure: true
ingressRoute:
dashboard:
enabled: true
ping: true
log:
level: INFO
service:
enabled: true
type: ClusterIP
annotations: {}
ports:
web:
port: 80
protocol: TCP
targetPort: web
websecure:
port: 443
protocol: TCP
targetPort: websecure
EOF
if helm status traefik --namespace traefik &> /dev/null; then
echo "Traefik is already installed in the 'traefik' namespace. Upgrading..."
helm upgrade traefik traefik/traefik --namespace traefik -f "$TMPFILE"
else
echo "Installing Traefik..."
helm repo add traefik https://traefik.github.io/charts
helm repo update
# Using --create-namespace is good practice, though traefik will always exist.
helm install traefik traefik/traefik --namespace traefik --create-namespace -f "$TMPFILE"
fi
# echo
# echo "To access the dashboard:"
# echo "kubectl port-forward -n traefik \$(kubectl get pods -n traefik -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
# echo "Then visit http://localhost:9000/dashboard/ in your browser"

View file

@ -0,0 +1,102 @@
#!/bin/bash
INFCTL_GIT_REPO="https://codeberg.org/headshed/infctl-cli.git"
INFCTL_GIT_REPO_BRANCH="feature/gcloud-k3s"
INFCTL_INSTALL_DIR="/opt/src"
# ensure only run once
if [[ -f /etc/startup_was_launched ]]; then exit 0; fi
touch /etc/startup_was_launched
# Format the k3s disk if not already formatted
# This creates an ext4 filesystem on the specified
# disk with no reserved space for root, forces the operation,
# fully initializes inode tables and the journal, and enables
# discard/TRIM for better performance on SSDs or
# thin-provisioned storage.
if ! lsblk | grep -q "/var/lib/rancher/k3s"; then
mkfs.ext4 -m 0 -F -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/disk/by-id/google-k3s-disk
mkdir -p /var/lib/rancher/k3s
mount -o discard,defaults /dev/disk/by-id/google-k3s-disk /var/lib/rancher/k3s
chmod a+w /var/lib/rancher/k3s
fi
# A disk named k3s-disk in your Terraform configuration will
# appear as /dev/disk/by-id/google-k3s-disk.
# Format the app-data-disk if not already formatted
if ! lsblk | grep -q "/mnt/disks/app-data"; then
mkfs.ext4 -m 0 -F -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/disk/by-id/google-app-data-disk
mkdir -p /mnt/disks/app-data
mount -o discard,defaults /dev/disk/by-id/google-app-data-disk /mnt/disks/app-data
chmod a+w /mnt/disks/app-data
fi
# Similarly, a disk named app-data-disk will appear as /dev/
# disk/by-id/google-app-data-disk.
# Add to /etc/fstab for persistence (only if not already present)
if ! grep -q "/var/lib/rancher/k3s" /etc/fstab; then
echo "/dev/disk/by-id/google-k3s-disk /var/lib/rancher/k3s ext4 defaults,discard 0 0" >> /etc/fstab
fi
if ! grep -q "/mnt/disks/app-data" /etc/fstab; then
echo "/dev/disk/by-id/google-app-data-disk /mnt/disks/app-data ext4 defaults,discard 0 0" >> /etc/fstab
fi
# apt install
apt update
apt install -y ncdu htop git curl
# helm install
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
chmod 700 get_helm.sh
/bin/bash get_helm.sh
# user bashrc config
rc=/home/user/.bashrc
{
echo "export KUBECONFIG=~/.kube/config"
echo "alias l='ls -lah'"
echo "alias ll='ls -lh'"
echo "alias k=kubectl"
echo "export dry='--dry-run=client'"
echo "export o='-oyaml'"
echo "alias kcd='kubectl config use-context'"
echo "source <(kubectl completion bash)"
echo "complete -F __start_kubectl k"
echo "alias k='kubectl'"
} >> $rc
# Install k3s
k3s_version="v1.32.8+k3s1"
curl -sfL https://get.k3s.io \
| \
INSTALL_K3S_VERSION="$k3s_version" sh -s - server \
--cluster-init \
--disable traefik \
--disable servicelb
# Set up kubeconfig for the 'user' user
mkdir -p /home/user/.kube
chown user:user /home/user/.kube
chmod 700 /home/user/.kube
# Copy the kubeconfig file to the user's home directory
# for easier access
cp /etc/rancher/k3s/k3s.yaml /home/user/.kube/config
chown user:user /home/user/.kube/config
# install infctl
curl -L https://codeberg.org/headshed/infctl-cli/raw/branch/main/install.sh | bash
# clone infctl repo if not already present
if [[ ! -d "$INFCTL_INSTALL_DIR" ]]; then
mkdir -p "$INFCTL_INSTALL_DIR"
cd ${INFCTL_INSTALL_DIR} || "echo 'Failed to change directory to $INFCTL_INSTALL_DIR' ; exit 1"
git clone --branch "$INFCTL_GIT_REPO_BRANCH" "$INFCTL_GIT_REPO" || "echo 'Failed to clone $INFCTL_GIT_REPO' ; exit 1"
chown -R user:user "$INFCTL_INSTALL_DIR"
fi

View file

@ -0,0 +1,16 @@
#!/usr/bin/env bash
. .env
if [ -z "$PROJECT_NAME" ]; then
echo "❌ PROJECT_NAME is not set. Please add PROJECT_NAME=<your_project_name> to your .env file before running this script."
exit 1
fi
gcloud compute instances list --project="$PROJECT_NAME" && gcloud compute disks list --project="$PROJECT_NAME" && gcloud compute firewall-rules list --project="$PROJECT_NAME" && gcloud storage buckets list --project="$PROJECT_NAME"
if [ $? -ne 0 ]; then
echo "❌ gcloud is not authenticated, please run 'gcloud auth login' first"
echo
exit 1
fi

View file

@ -0,0 +1,65 @@
#!/usr/bin/env bash
echo "🧪 checking we have tofu insatalled..."
if ! command -v tofu &> /dev/null
then
echo "❌ tofu could not be found, please install it first"
echo
echo "see https://opentofu.org/docs/intro/install/standalone/"
echo
echo "and https://opentofu.org/docs/intro/install/ for more details"
echo
exit 1
fi
echo "✅ tofu is installed,..."
echo
tofu version
echo
echo "🧪 checking we have gcloud insatalled..."
if ! command -v gcloud &> /dev/null
then
echo "❌ gcloud could not be found, please install it first"
echo
echo "see https://cloud.google.com/sdk/docs/install"
echo
exit 1
fi
echo "✅ gcloud is installed,..."
echo
gcloud version
echo
echo "🧪 checking we have kubectl insatalled..."
if ! command -v kubectl &> /dev/null
then
echo "❌ kubectl could not be found, please install it first"
echo
echo "see https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/"
echo
exit 1
fi
echo "✅ kubectl is installed,..."
echo
kubectl version --client
echo
echo "🧪 checking we have envsubst insatalled..."
if ! command -v envsubst &> /dev/null
then
echo "❌ envsubst could not be found, please install it first"
echo
echo "on ubuntu you can install it with: sudo apt-get install -y gettext-base"
echo
exit 1
fi
echo "✅ envsubst is installed,..."
echo
envsubst --version
echo
echo "✅ Pre-flight checks passed. You are ready to proceed 🙂"
echo

29
gcloud/tf/scripts/run_tofu.sh Executable file
View file

@ -0,0 +1,29 @@
#!/usr/bin/env bash
# Get the directory where the script is located
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
cd "$SCRIPT_DIR" || { echo "Failed to change directory to $SCRIPT_DIR"; exit 1; }
TF_DIR="../"
cd "$TF_DIR" || { echo "Failed to change directory to $TF_DIR"; exit 1; }
if [[ -d ".terraform" && -f ".terraform.lock.hcl" ]]; then
echo "✅ Terraform already initialized"
# tofu init
else
echo "⚠️ Initializing Terraform..."
tofu init
fi
if [[ $? -ne 0 ]]; then
echo "❌ tofu init failed, please check the output above"
exit 1
fi
# tofu apply with auto-approve to make it non-interactive
tofu apply -auto-approve
if [[ $? -ne 0 ]]; then
echo "❌ tofu apply failed, please check the output above"
exit 1
fi

View file

@ -0,0 +1,14 @@
// Your GCP project name
// it will be refererred as the project id
// in Google Cloud
// ----------------------------------
project_name = "<your gpc project name>"
// application name
app_name = "your-projects-k3s-cluster"
// where to deploy to
// region
region = "us-central1"
zone = "us-central1-a"

28
gcloud/tf/vars.tf Normal file
View file

@ -0,0 +1,28 @@
// Env vars
// ----------------------------------
variable "project_name" {
type = string
}
variable "env" {
type = string
default = "dev"
description = "Environment"
}
variable "region" {
type = string
description = "GCP Region"
}
variable "zone" {
type = string
description = "GCP Zone"
}
variable "app_name" {
type = string
description = "Application name"
}