chore/test-local-dev-builds (#2)
tested initial dev deployment to local k3d Reviewed-on: https://codeberg.org/headshed/infctl-cli/pulls/2 Co-authored-by: jon brookes <jon@headshed.dev> Co-committed-by: jon brookes <jon@headshed.dev>
This commit is contained in:
parent
506142ccd7
commit
e64666340a
35 changed files with 2625 additions and 334 deletions
|
|
@ -1,9 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
output=$(kubectl -n postgres-operator get pods --selector=postgres-operator.crunchydata.com/control-plane=postgres-operator --field-selector=status.phase=Running 2>&1)
|
||||
if echo "$output" | grep -iq 'running'; then
|
||||
echo "At least one pod is running."
|
||||
else
|
||||
echo "No running pods found."
|
||||
exit 1
|
||||
fi
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
if kubectl get secret app-key-secret -n infctl >/dev/null 2>&1; then
|
||||
echo "Secret app-key-secret already exists in namespace infctl. Exiting."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
generate_app_key() {
|
||||
APP_KEY=$(docker run --rm \
|
||||
--entrypoint /bin/sh \
|
||||
$APP_CONTAINER \
|
||||
-c "cd /var/www && \
|
||||
cp .env.example .env && \
|
||||
php artisan key:generate --force > /dev/null 2>&1 && \
|
||||
grep 'APP_KEY' .env | sed 's/APP_KEY=//'")
|
||||
|
||||
APP_KEY=$(echo "$APP_KEY" | tr -d '\r\n')
|
||||
|
||||
}
|
||||
|
||||
generate_app_key
|
||||
|
||||
echo "Extracted APP_KEY: $APP_KEY"
|
||||
|
||||
kubectl create secret generic app-key-secret \
|
||||
--from-literal=app_key="$APP_KEY" \
|
||||
-n infctl --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
temp_file=$(mktemp)
|
||||
|
||||
kubectl -n infctl create secret generic aws-credentials -o yaml --dry-run=client \
|
||||
--from-literal access-key=$AWS_ACCESS_KEY_ID \
|
||||
--from-literal secret-key=$AWS_SECRET_ACCESS_KEY > "$temp_file"
|
||||
|
||||
|
||||
kubectl apply -f $temp_file
|
||||
rm $temp_file
|
||||
|
||||
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if kubectl -n cert-manager get secret cloudflare-api-token-secret &>/dev/null; then
|
||||
echo "Secret 'cloudflare-api-token-secret' already exists in 'cert-manager' namespace. Skipping."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
kubectl create secret generic cloudflare-api-token-secret --from-literal=api-token=$API_TOKEN --namespace='cert-manager'
|
||||
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
NS="postgres-operator"
|
||||
|
||||
USER=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.user}' | base64 -d)
|
||||
PASSWORD=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.password}' | base64 -d)
|
||||
HOST=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.host}' | base64 -d)
|
||||
PORT=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.port}' | base64 -d)
|
||||
DBNAME=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.dbname}' | base64 -d)
|
||||
PG_URI=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.uri}' | base64 -d)
|
||||
|
||||
SECRET_YAML=$(kubectl -n infctl create secret generic pg-credentials -o yaml --dry-run=client --from-literal=username="$USER" --from-literal=password="$PASSWORD" --from-literal=host="$HOST" --from-literal=dbname="$DBNAME")
|
||||
|
||||
echo "$SECRET_YAML" | kubectl apply -f -
|
||||
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
EXAMOPLES_DIR=/home/user/projects/crunchy/postgres-operator-examples
|
||||
|
||||
cd $EXAMOPLES_DIR # || echo "Directory $EXAMOPLES_DIR does not exist" && exit 1
|
||||
|
||||
kubectl apply -k kustomize/postgres
|
||||
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
EXAMOPLES_DIR=/home/user/projects/crunchy/postgres-operator-examples
|
||||
|
||||
cd $EXAMOPLES_DIR # || echo "Directory $EXAMOPLES_DIR does not exist" && exit 1
|
||||
|
||||
pwd
|
||||
|
||||
# exit 1
|
||||
|
||||
kubectl apply -k kustomize/install/namespace
|
||||
kubectl apply --server-side -k kustomize/install/default
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPT=scripts/init-data-ctl.sh
|
||||
|
||||
CONFIGMAP=$(kubectl -n infctl create configmap init-data-script --from-file=init-data.sh=$SCRIPT --dry-run=client -o yaml)
|
||||
|
||||
echo "$CONFIGMAP" | kubectl apply -f -
|
||||
5
scripts/create_k3d_cluster.sh
Executable file
5
scripts/create_k3d_cluster.sh
Executable file
|
|
@ -0,0 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
mkdir -p /mnt/data
|
||||
k3d cluster create --config files/ctl/k3d_config.yml
|
||||
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
CREATE_CONFIGMAP=$(kubectl -n infctl create configmap merge-data-script --from-file=scripts/merge_data_ctl.sh --dry-run=client -o yaml)
|
||||
|
||||
echo $CREATE_CONFIGMAP
|
||||
|
||||
echo "$CREATE_CONFIGMAP" | kubectl -n infctl apply -f -
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create or update the configmap."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
NGINX_CONFIGMAP=$(kubectl -n infctl create configmap nginx-config --from-file files/ctl/nginx/default.conf --dry-run=client -oyaml)
|
||||
|
||||
if [ -z "$NGINX_CONFIGMAP" ]; then
|
||||
echo "Failed to create NGINX configmap."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$NGINX_CONFIGMAP" | kubectl apply -f -
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to apply NGINX configmap."
|
||||
exit 1
|
||||
fi
|
||||
echo "NGINX configmap created successfully."
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: pg-cluster
|
||||
namespace: pg-cluster
|
||||
|
||||
spec:
|
||||
instances: 2
|
||||
storage:
|
||||
size: 2Gi
|
||||
EOF
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
NS="pg-cluster"
|
||||
|
||||
USERNAME=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.username}' | base64 -d)
|
||||
PASSWORD=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.password}' | base64 -d)
|
||||
HOST=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.host}' | base64 -d)
|
||||
PORT=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.port}' | base64 -d)
|
||||
DBNAME=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.dbname}' | base64 -d)
|
||||
PG_URI=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.uri}' | base64 -d)
|
||||
postgres_fqdn="${HOST}.${NS}.svc.cluster.local"
|
||||
|
||||
echo ""
|
||||
|
||||
echo "this script needs to be sourced"
|
||||
|
||||
echo "then run a command to use it like "
|
||||
|
||||
echo ""
|
||||
|
||||
echo 'kubectl -n infctl create secret generic pg-credentials -o yaml --dry-run=client --from-literal username=$USERNAME --from-literal password=$PASSWORD --from-literal host=$postgres_fqdn --from-literal dbname=$DBNAME'
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
kubectl -n infctl delete secrets redis-auth
|
||||
|
||||
kubectl get secret redis-auth -n redis -o yaml | sed "s/namespace: redis/namespace: infctl/" | kubectl apply -n infctl -f -
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if kubectl get secret registry-credentials -n infctl >/dev/null 2>&1; then
|
||||
echo "Secret 'registry-credentials' already exists in namespace 'infctl'. Skipping."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
echo "Container Registry Server: $SERVER"
|
||||
|
||||
kubectl create secret docker-registry registry-credentials \
|
||||
--docker-server=$SERVER \
|
||||
--docker-username=$USER \
|
||||
--docker-password=$PASSWORD \
|
||||
--docker-email=$EMAIL \
|
||||
-n infctl
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to create the docker-registry secret."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Docker registry secret created successfully."
|
||||
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
CREATE_SMTP_CREDS=$(kubectl -n infctl create secret generic smtp-credentials -o yaml --dry-run=client --from-literal user=$SMTP_USER --from-literal password=$SMTP_PASS)
|
||||
|
||||
echo "$CREATE_SMTP_CREDS" | kubectl apply -f -
|
||||
|
||||
23
scripts/dev-pre-flight-checks.sh
Executable file
23
scripts/dev-pre-flight-checks.sh
Executable file
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
required_tools=("infctl" "pwgen" "kubectl" "k3d" "helm" "jq" "docker")
|
||||
|
||||
MISSING=false
|
||||
check_required_tools() {
|
||||
for tool in "${required_tools[@]}"; do
|
||||
if ! command -v "$tool" &> /dev/null; then
|
||||
echo "Error: $tool is not installed. Please install it to continue."
|
||||
MISSING=true
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
check_required_tools
|
||||
|
||||
if [ "$MISSING" = true ]; then
|
||||
echo "Pre-flight checks failed. Please install the missing tools and try again."
|
||||
exit 1
|
||||
else
|
||||
echo "Pre-flight checks have passed."
|
||||
fi
|
||||
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
LOG_FILE="/var/log/init-data.log"
|
||||
mkdir -p /var/log
|
||||
|
||||
|
||||
echo "env variables" | tee -a "$LOG_FILE"
|
||||
env | tee -a "$LOG_FILE"
|
||||
ls -lirt /var/www/public | tee -a "$LOG_FILE"
|
||||
|
||||
mkdir -p /var/www/{public,storage,database}
|
||||
|
||||
# Function to log errors and continue
|
||||
log_error() {
|
||||
echo "[ERROR] $1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Check if public directory is empty
|
||||
if [ -z "$(find /var/www/public -type f -o -type d -not -name "lost+found" -not -path "/var/www/public" 2>/dev/null)" ]; then
|
||||
echo "Public directory is empty, copying data from S3..." | tee -a "$LOG_FILE"
|
||||
aws s3 cp $S3_BUCKET/assets/public.tar /var/www/public/ 2>>"$LOG_FILE" || log_error "Failed to copy public data from S3"
|
||||
else
|
||||
echo "Public directory already has data, skipping S3 copy..." | tee -a "$LOG_FILE"
|
||||
fi
|
||||
|
||||
# Check if storage directory is empty
|
||||
if [ -z "$(find /var/www/storage -type f -o -type d -not -name "lost+found" -not -path "/var/www/storage" 2>/dev/null)" ]; then
|
||||
echo "Storage directory is empty, copying data from S3..." | tee -a "$LOG_FILE"
|
||||
aws s3 cp $S3_BUCKET/assets/storage.tar /var/www/storage/ 2>>"$LOG_FILE" || log_error "Failed to copy storage data from S3"
|
||||
else
|
||||
echo "Storage directory already has data, skipping S3 copy..." | tee -a "$LOG_FILE"
|
||||
fi
|
||||
|
||||
# Check if database directory is empty
|
||||
if [ -z "$(ls -A /var/www/database 2>/dev/null)" ]; then
|
||||
echo "Database directory is empty, copying data from S3..." | tee -a "$LOG_FILE"
|
||||
aws s3 cp $S3_BUCKET/assets/database.tar /var/www/database/ 2>>"$LOG_FILE" || log_error "Failed to copy database data from S3"
|
||||
else
|
||||
echo "Database directory already has data, skipping S3 copy..." | tee -a "$LOG_FILE"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
echo "Script completed. Check $LOG_FILE for details."
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if kubectl -n cnpg-system get pods | grep cnpg &>/dev/null; then
|
||||
echo "CloudNativePG pods already running. Skipping installation."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
echo "Installing CloudNativePG..."
|
||||
|
||||
helm repo add cnpg https://cloudnative-pg.github.io/charts
|
||||
helm upgrade --install cnpg \
|
||||
--namespace cnpg-system \
|
||||
--set config.clusterWide=true \
|
||||
--skip-crds \
|
||||
--force \
|
||||
cnpg/cloudnative-pg
|
||||
|
||||
|
||||
# had to do this
|
||||
|
||||
# kubectl get mutatingwebhookconfiguration,validatingwebhookconfiguration,crd -A | grep cnpg
|
||||
# Delete Conflicting Resources (if safe to do so):
|
||||
|
||||
# kubtctl delete <anyting in the above list>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.17/releases/cnpg-1.17.5.yaml
|
||||
|
||||
# kubectl patch configmap cnpg-config -n cnpg-system --type merge -p '{"data":{"config":"clusterWide: true"}}'
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Check if there are any pods in the longhorn-system namespace
|
||||
if kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -q '^[^ ]'; then
|
||||
echo "Pods already exist in the longhorn-system namespace. Skipping installation."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.8.1/deploy/longhorn.yaml
|
||||
|
|
@ -1,10 +1,13 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if kubectl -n traefik get pods --no-headers 2>/dev/null | grep -q 'Running'; then
|
||||
echo "Traefik is already running in the 'traefik' namespace. Upgrading instead."
|
||||
if kubectl -n kube-system get pods --no-headers 2>/dev/null | grep -q 'traefik'; then
|
||||
echo "Traefik is already running in the 'kube-system' namespace. Upgrading instead."
|
||||
|
||||
# Create a temporary values file for more complex configuration
|
||||
cat > /tmp/traefik-values.yaml <<EOF
|
||||
ingressClass:
|
||||
enabled: true
|
||||
isDefaultClass: true
|
||||
ports:
|
||||
web:
|
||||
port: 80
|
||||
|
|
@ -58,7 +61,7 @@ service:
|
|||
targetPort: turn-udp
|
||||
EOF
|
||||
|
||||
helm upgrade traefik traefik/traefik --namespace traefik -f /tmp/traefik-values.yaml
|
||||
helm upgrade traefik traefik/traefik --namespace kube-system -f /tmp/traefik-values.yaml
|
||||
|
||||
else
|
||||
echo "Installing Traefik..."
|
||||
|
|
@ -68,6 +71,9 @@ else
|
|||
|
||||
# Create a temporary values file for more complex configuration
|
||||
cat > /tmp/traefik-values.yaml <<EOF
|
||||
ingressClass:
|
||||
enabled: true
|
||||
isDefaultClass: true
|
||||
ports:
|
||||
web:
|
||||
port: 80
|
||||
|
|
@ -121,48 +127,48 @@ service:
|
|||
targetPort: turn-udp
|
||||
EOF
|
||||
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace -f /tmp/traefik-values.yaml
|
||||
helm install traefik traefik/traefik --namespace kube-system -f /tmp/traefik-values.yaml
|
||||
fi
|
||||
|
||||
|
||||
|
||||
cat > traefik-turn-service.yaml << EOF
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: traefik-turn
|
||||
namespace: traefik
|
||||
labels:
|
||||
app.kubernetes.io/instance: traefik-traefik
|
||||
app.kubernetes.io/name: traefik
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- name: turn-tcp
|
||||
port: 1194
|
||||
protocol: TCP
|
||||
targetPort: turn-tcp
|
||||
- name: turn-udp
|
||||
port: 1194
|
||||
protocol: UDP
|
||||
targetPort: turn-udp
|
||||
selector:
|
||||
app.kubernetes.io/instance: traefik-traefik
|
||||
app.kubernetes.io/name: traefik
|
||||
EOF
|
||||
# cat > traefik-turn-service.yaml << EOF
|
||||
# apiVersion: v1
|
||||
# kind: Service
|
||||
# metadata:
|
||||
# name: traefik-turn
|
||||
# namespace: kube-system
|
||||
# labels:
|
||||
# app.kubernetes.io/instance: traefik-traefik
|
||||
# app.kubernetes.io/name: traefik
|
||||
# spec:
|
||||
# type: LoadBalancer
|
||||
# ports:
|
||||
# - name: turn-tcp
|
||||
# port: 1194
|
||||
# protocol: TCP
|
||||
# targetPort: turn-tcp
|
||||
# - name: turn-udp
|
||||
# port: 1194
|
||||
# protocol: UDP
|
||||
# targetPort: turn-udp
|
||||
# selector:
|
||||
# app.kubernetes.io/instance: traefik-traefik
|
||||
# app.kubernetes.io/name: traefik
|
||||
# EOF
|
||||
|
||||
kubectl apply -f traefik-turn-service.yaml
|
||||
# kubectl apply -f traefik-turn-service.yaml
|
||||
|
||||
rm -f traefik-turn-service.yaml
|
||||
# rm -f traefik-turn-service.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
echo "Don't forget to create TCP and UDP ingress routes for the TURN server with:"
|
||||
echo "kubectl apply -f k8s-manifests/galene/ingressroute-tcp.yaml"
|
||||
echo "kubectl apply -f k8s-manifests/galene/ingressroute-udp.yaml"
|
||||
echo ""
|
||||
# echo "Don't forget to create TCP and UDP ingress routes for the TURN server with:"
|
||||
# echo "kubectl apply -f k8s-manifests/galene/ingressroute-tcp.yaml"
|
||||
# echo "kubectl apply -f k8s-manifests/galene/ingressroute-udp.yaml"
|
||||
# echo ""
|
||||
echo "To access the dashboard:"
|
||||
echo "kubectl port-forward -n traefik \$(kubectl get pods -n traefik -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
|
||||
echo "kubectl port-forward -n kube-system \$(kubectl get pods -n kube-system -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
|
||||
echo "Then visit http://localhost:9000/dashboard/ in your browser"
|
||||
|
|
@ -1,5 +1,10 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if ! command -v pwgen &> /dev/null; then
|
||||
echo "Error: 'pwgen' command not found. Please install it (e.g., 'sudo apt install pwgen') and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NAMESPACE=redis
|
||||
REDIS_SECRET=redis-auth
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue