tested local dev k3d single node cluster local storage + removed redundant scripts
This commit is contained in:
parent
506142ccd7
commit
62ab3f88cd
34 changed files with 2602 additions and 334 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
|
@ -12,5 +12,5 @@ scripts/galene/groups/
|
|||
scripts/galene/data/
|
||||
.envrc
|
||||
.vscode
|
||||
*.json
|
||||
bin
|
||||
deleted
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# INFCTL CLI
|
||||
|
||||
A command-line tool for automated deployment and management of an MVK (Minimal Viable Kubernetes) infrastrucure. The CLI orchestrates Kubernetes deployments by executing shell scripts and applying Kubernetes manifests through a JSON-defined pipeline approach.
|
||||
A command-line tool for automated deployment and management of an [MVK (Minimal Viable Kubernetes) infrastructure](https://mvk.headshed.dev/). The CLI orchestrates Kubernetes deployments by executing shell scripts and applying Kubernetes manifests through a JSON-defined pipeline approach.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
|
|
@ -337,6 +337,4 @@ The CLI uses structured JSON logging. Debug logs are enabled by default and incl
|
|||
## License
|
||||
|
||||
This project is licensed under the GNU General Public License v3.0. See the [LICENSE](./LICENSE) file for details.
|
||||
## License
|
||||
|
||||
This project is licensed under the GNU General Public License v3.0. See the [LICENSE](./LICENSE) file for details.
|
||||
|
|
|
|||
|
|
@ -188,7 +188,7 @@ func (app *AppState) SetUpNewCustomer() error {
|
|||
|
||||
steps := app.getPipeline()
|
||||
app.runPipeline(steps)
|
||||
slog.Info(fmt.Sprintln("🎉 Customer setup complete!"))
|
||||
slog.Info(fmt.Sprintln("🎉 Pipeline setup complete!"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ func k8sCreateNamespace(project string) error {
|
|||
}
|
||||
|
||||
func RunCommand(command string) error {
|
||||
slog.Debug(fmt.Sprintf("🐞 Running script command: %s", command))
|
||||
slog.Debug(fmt.Sprintf("🐞 Running command: %s", command))
|
||||
cmd := exec.Command("sh", "-c", command)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
|
|
@ -78,8 +78,8 @@ func RunCommand(command string) error {
|
|||
return fmt.Errorf("failed to run script command: %w", err)
|
||||
}
|
||||
output := stdout.String()
|
||||
slog.Debug(fmt.Sprintf("RunCommand command executed successfully: %s", command))
|
||||
slog.Debug(fmt.Sprintf("RunCommand command output: %s", output))
|
||||
slog.Debug(fmt.Sprintf("RunCommand executed successfully: %s", command))
|
||||
slog.Debug(fmt.Sprintf("RunCommand output: %s", output))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
14
files/ctl/k3d_config.yml
Normal file
14
files/ctl/k3d_config.yml
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: k3d.io/v1alpha4
|
||||
kind: Simple
|
||||
metadata:
|
||||
name: mycluster
|
||||
servers: 1
|
||||
agents: 0
|
||||
options:
|
||||
k3s:
|
||||
extraArgs:
|
||||
- arg: "--kubelet-arg=--kube-reserved=cpu=2,memory=4Gi"
|
||||
nodeFilters:
|
||||
- all
|
||||
volumes:
|
||||
- volume: /mnt/data:/mnt/data
|
||||
4
k8s-manifests/ctl-metallb/kustomization.yaml
Normal file
4
k8s-manifests/ctl-metallb/kustomization.yaml
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
namespace: metallb-system
|
||||
|
||||
resources:
|
||||
- github.com/metallb/metallb/config/native?ref=v0.14.9
|
||||
5
k8s-manifests/ctl-redis/kustomization.yaml
Normal file
5
k8s-manifests/ctl-redis/kustomization.yaml
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- redis-configmap.yaml
|
||||
- redis.yaml
|
||||
2291
k8s-manifests/ctl-redis/redis-configmap.yaml
Normal file
2291
k8s-manifests/ctl-redis/redis-configmap.yaml
Normal file
File diff suppressed because it is too large
Load diff
90
k8s-manifests/ctl-redis/redis.yaml
Normal file
90
k8s-manifests/ctl-redis/redis.yaml
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: redis
|
||||
namespace: redis
|
||||
spec:
|
||||
serviceName: redis
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
initContainers:
|
||||
- name: config
|
||||
image: redis:7.0.10-alpine
|
||||
command: [ "sh", "-c" ]
|
||||
args:
|
||||
- |
|
||||
cp /tmp/redis/redis.conf /etc/redis/redis.conf
|
||||
|
||||
echo "setting up standalone redis..."
|
||||
MASTER_FDQN=`hostname -f | sed -e 's/redis-[0-9]\./redis-0./'`
|
||||
|
||||
# Inject the password from the mounted secret
|
||||
echo "masterauth $REDIS_PASSWORD" >> /etc/redis/redis.conf
|
||||
echo "requirepass $REDIS_PASSWORD" >> /etc/redis/redis.conf
|
||||
|
||||
if [ "$(hostname)" = "redis-0" ]; then
|
||||
echo "this is redis-0, not updating config..."
|
||||
else
|
||||
echo "updating redis.conf..."
|
||||
echo "replicaof $MASTER_FDQN 6379" >> /etc/redis/redis.conf
|
||||
fi
|
||||
env:
|
||||
- name: REDIS_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: redis-auth
|
||||
key: password
|
||||
volumeMounts:
|
||||
- name: redis-config
|
||||
mountPath: /etc/redis/
|
||||
- name: config
|
||||
mountPath: /tmp/redis/
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:7.0.10-alpine
|
||||
command: ["redis-server"]
|
||||
args: ["/etc/redis/redis.conf"]
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
name: redis
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
- name: redis-config
|
||||
mountPath: /etc/redis/
|
||||
volumes:
|
||||
- name: redis-config
|
||||
emptyDir: {}
|
||||
- name: config
|
||||
configMap:
|
||||
name: redis-config
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "local-storage"
|
||||
resources:
|
||||
requests:
|
||||
storage: 64Mi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis
|
||||
namespace: redis
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: 6379
|
||||
targetPort: 6379
|
||||
name: redis
|
||||
selector:
|
||||
app: redis
|
||||
28
k8s-manifests/ctl/local-storage.yml
Normal file
28
k8s-manifests/ctl/local-storage.yml
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-storage
|
||||
provisioner: kubernetes.io/no-provisioner
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: local-pv
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: local-storage
|
||||
local:
|
||||
path: /mnt/data
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- k3d-mycluster-server-0
|
||||
|
|
@ -7,7 +7,7 @@
|
|||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "run inf redis secret",
|
||||
"name": "create php configmap",
|
||||
"function": "RunCommand",
|
||||
"params": ["./scripts/create_php_configmap_ctl.sh"],
|
||||
"retryCount": 0,
|
||||
|
|
|
|||
65
pipelines/dev/infctl.json
Normal file
65
pipelines/dev/infctl.json
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
[
|
||||
{
|
||||
"name": "create php configmap",
|
||||
"function": "RunCommand",
|
||||
"params": [
|
||||
"./scripts/create_php_configmap_ctl.sh"
|
||||
],
|
||||
"retryCount": 0,
|
||||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "create redis secret",
|
||||
"function": "RunCommand",
|
||||
"params": [
|
||||
"./scripts/redis_secret.sh"
|
||||
],
|
||||
"retryCount": 0,
|
||||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "run metallb kustomize",
|
||||
"function": "RunCommand",
|
||||
"params": [
|
||||
"kubectl apply -k k8s-manifests/ctl-metallb"
|
||||
],
|
||||
"retryCount": 0,
|
||||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "configure local storage",
|
||||
"function": "RunCommand",
|
||||
"params": [
|
||||
"kubectl apply -f k8s-manifests/ctl/local-storage.yml"
|
||||
],
|
||||
"retryCount": 3,
|
||||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "run cert-manager installation",
|
||||
"function": "RunCommand",
|
||||
"params": [
|
||||
"scripts/install_cert-manager.sh"
|
||||
],
|
||||
"retryCount": 3,
|
||||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "run non-resilant redis kustomize",
|
||||
"function": "RunCommand",
|
||||
"params": [
|
||||
"kubectl apply -k k8s-manifests/ctl-redis"
|
||||
],
|
||||
"retryCount": 0,
|
||||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "run traefik installation",
|
||||
"function": "RunCommand",
|
||||
"params": [
|
||||
"scripts/install_traefik.sh"
|
||||
],
|
||||
"retryCount": 1,
|
||||
"shouldAbort": true
|
||||
}
|
||||
]
|
||||
47
pipelines/infctl-ns.json
Normal file
47
pipelines/infctl-ns.json
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
[
|
||||
{
|
||||
"name": "ensure infctl namespace exists",
|
||||
"function": "k8sNamespaceExists",
|
||||
"params": [
|
||||
"infctl"
|
||||
],
|
||||
"retryCount": 0,
|
||||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "ensure redis namespace exists",
|
||||
"function": "k8sNamespaceExists",
|
||||
"params": [
|
||||
"redis"
|
||||
],
|
||||
"retryCount": 0,
|
||||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "ensure metallb-system namespace exists",
|
||||
"function": "k8sNamespaceExists",
|
||||
"params": [
|
||||
"metallb-system"
|
||||
],
|
||||
"retryCount": 0,
|
||||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "ensure longhorn-system namespace exists",
|
||||
"function": "k8sNamespaceExists",
|
||||
"params": [
|
||||
"longhorn-system"
|
||||
],
|
||||
"retryCount": 0,
|
||||
"shouldAbort": true
|
||||
},
|
||||
{
|
||||
"name": "ensure cert-manager namespace exists",
|
||||
"function": "k8sNamespaceExists",
|
||||
"params": [
|
||||
"cert-manager"
|
||||
],
|
||||
"retryCount": 0,
|
||||
"shouldAbort": true
|
||||
}
|
||||
]
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
output=$(kubectl -n postgres-operator get pods --selector=postgres-operator.crunchydata.com/control-plane=postgres-operator --field-selector=status.phase=Running 2>&1)
|
||||
if echo "$output" | grep -iq 'running'; then
|
||||
echo "At least one pod is running."
|
||||
else
|
||||
echo "No running pods found."
|
||||
exit 1
|
||||
fi
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
if kubectl get secret app-key-secret -n infctl >/dev/null 2>&1; then
|
||||
echo "Secret app-key-secret already exists in namespace infctl. Exiting."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
generate_app_key() {
|
||||
APP_KEY=$(docker run --rm \
|
||||
--entrypoint /bin/sh \
|
||||
$APP_CONTAINER \
|
||||
-c "cd /var/www && \
|
||||
cp .env.example .env && \
|
||||
php artisan key:generate --force > /dev/null 2>&1 && \
|
||||
grep 'APP_KEY' .env | sed 's/APP_KEY=//'")
|
||||
|
||||
APP_KEY=$(echo "$APP_KEY" | tr -d '\r\n')
|
||||
|
||||
}
|
||||
|
||||
generate_app_key
|
||||
|
||||
echo "Extracted APP_KEY: $APP_KEY"
|
||||
|
||||
kubectl create secret generic app-key-secret \
|
||||
--from-literal=app_key="$APP_KEY" \
|
||||
-n infctl --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
temp_file=$(mktemp)
|
||||
|
||||
kubectl -n infctl create secret generic aws-credentials -o yaml --dry-run=client \
|
||||
--from-literal access-key=$AWS_ACCESS_KEY_ID \
|
||||
--from-literal secret-key=$AWS_SECRET_ACCESS_KEY > "$temp_file"
|
||||
|
||||
|
||||
kubectl apply -f $temp_file
|
||||
rm $temp_file
|
||||
|
||||
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if kubectl -n cert-manager get secret cloudflare-api-token-secret &>/dev/null; then
|
||||
echo "Secret 'cloudflare-api-token-secret' already exists in 'cert-manager' namespace. Skipping."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
kubectl create secret generic cloudflare-api-token-secret --from-literal=api-token=$API_TOKEN --namespace='cert-manager'
|
||||
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
NS="postgres-operator"
|
||||
|
||||
USER=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.user}' | base64 -d)
|
||||
PASSWORD=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.password}' | base64 -d)
|
||||
HOST=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.host}' | base64 -d)
|
||||
PORT=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.port}' | base64 -d)
|
||||
DBNAME=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.dbname}' | base64 -d)
|
||||
PG_URI=$(kubectl -n $NS get secrets ctl-pguser-ctl-controller -o jsonpath='{.data.uri}' | base64 -d)
|
||||
|
||||
SECRET_YAML=$(kubectl -n infctl create secret generic pg-credentials -o yaml --dry-run=client --from-literal=username="$USER" --from-literal=password="$PASSWORD" --from-literal=host="$HOST" --from-literal=dbname="$DBNAME")
|
||||
|
||||
echo "$SECRET_YAML" | kubectl apply -f -
|
||||
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
EXAMOPLES_DIR=/home/user/projects/crunchy/postgres-operator-examples
|
||||
|
||||
cd $EXAMOPLES_DIR # || echo "Directory $EXAMOPLES_DIR does not exist" && exit 1
|
||||
|
||||
kubectl apply -k kustomize/postgres
|
||||
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
EXAMOPLES_DIR=/home/user/projects/crunchy/postgres-operator-examples
|
||||
|
||||
cd $EXAMOPLES_DIR # || echo "Directory $EXAMOPLES_DIR does not exist" && exit 1
|
||||
|
||||
pwd
|
||||
|
||||
# exit 1
|
||||
|
||||
kubectl apply -k kustomize/install/namespace
|
||||
kubectl apply --server-side -k kustomize/install/default
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPT=scripts/init-data-ctl.sh
|
||||
|
||||
CONFIGMAP=$(kubectl -n infctl create configmap init-data-script --from-file=init-data.sh=$SCRIPT --dry-run=client -o yaml)
|
||||
|
||||
echo "$CONFIGMAP" | kubectl apply -f -
|
||||
5
scripts/create_k3d_cluster.sh
Executable file
5
scripts/create_k3d_cluster.sh
Executable file
|
|
@ -0,0 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
mkdir -p /mnt/data
|
||||
k3d cluster create --config files/ctl/k3d_config.yml
|
||||
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
CREATE_CONFIGMAP=$(kubectl -n infctl create configmap merge-data-script --from-file=scripts/merge_data_ctl.sh --dry-run=client -o yaml)
|
||||
|
||||
echo $CREATE_CONFIGMAP
|
||||
|
||||
echo "$CREATE_CONFIGMAP" | kubectl -n infctl apply -f -
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create or update the configmap."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
NGINX_CONFIGMAP=$(kubectl -n infctl create configmap nginx-config --from-file files/ctl/nginx/default.conf --dry-run=client -oyaml)
|
||||
|
||||
if [ -z "$NGINX_CONFIGMAP" ]; then
|
||||
echo "Failed to create NGINX configmap."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$NGINX_CONFIGMAP" | kubectl apply -f -
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to apply NGINX configmap."
|
||||
exit 1
|
||||
fi
|
||||
echo "NGINX configmap created successfully."
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: pg-cluster
|
||||
namespace: pg-cluster
|
||||
|
||||
spec:
|
||||
instances: 2
|
||||
storage:
|
||||
size: 2Gi
|
||||
EOF
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
NS="pg-cluster"
|
||||
|
||||
USERNAME=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.username}' | base64 -d)
|
||||
PASSWORD=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.password}' | base64 -d)
|
||||
HOST=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.host}' | base64 -d)
|
||||
PORT=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.port}' | base64 -d)
|
||||
DBNAME=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.dbname}' | base64 -d)
|
||||
PG_URI=$(kubectl -n $NS get secrets pg-cluster-app -o jsonpath='{.data.uri}' | base64 -d)
|
||||
postgres_fqdn="${HOST}.${NS}.svc.cluster.local"
|
||||
|
||||
echo ""
|
||||
|
||||
echo "this script needs to be sourced"
|
||||
|
||||
echo "then run a command to use it like "
|
||||
|
||||
echo ""
|
||||
|
||||
echo 'kubectl -n infctl create secret generic pg-credentials -o yaml --dry-run=client --from-literal username=$USERNAME --from-literal password=$PASSWORD --from-literal host=$postgres_fqdn --from-literal dbname=$DBNAME'
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
kubectl -n infctl delete secrets redis-auth
|
||||
|
||||
kubectl get secret redis-auth -n redis -o yaml | sed "s/namespace: redis/namespace: infctl/" | kubectl apply -n infctl -f -
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if kubectl get secret registry-credentials -n infctl >/dev/null 2>&1; then
|
||||
echo "Secret 'registry-credentials' already exists in namespace 'infctl'. Skipping."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
echo "Container Registry Server: $SERVER"
|
||||
|
||||
kubectl create secret docker-registry registry-credentials \
|
||||
--docker-server=$SERVER \
|
||||
--docker-username=$USER \
|
||||
--docker-password=$PASSWORD \
|
||||
--docker-email=$EMAIL \
|
||||
-n infctl
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to create the docker-registry secret."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Docker registry secret created successfully."
|
||||
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
CREATE_SMTP_CREDS=$(kubectl -n infctl create secret generic smtp-credentials -o yaml --dry-run=client --from-literal user=$SMTP_USER --from-literal password=$SMTP_PASS)
|
||||
|
||||
echo "$CREATE_SMTP_CREDS" | kubectl apply -f -
|
||||
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
LOG_FILE="/var/log/init-data.log"
|
||||
mkdir -p /var/log
|
||||
|
||||
|
||||
echo "env variables" | tee -a "$LOG_FILE"
|
||||
env | tee -a "$LOG_FILE"
|
||||
ls -lirt /var/www/public | tee -a "$LOG_FILE"
|
||||
|
||||
mkdir -p /var/www/{public,storage,database}
|
||||
|
||||
# Function to log errors and continue
|
||||
log_error() {
|
||||
echo "[ERROR] $1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Check if public directory is empty
|
||||
if [ -z "$(find /var/www/public -type f -o -type d -not -name "lost+found" -not -path "/var/www/public" 2>/dev/null)" ]; then
|
||||
echo "Public directory is empty, copying data from S3..." | tee -a "$LOG_FILE"
|
||||
aws s3 cp $S3_BUCKET/assets/public.tar /var/www/public/ 2>>"$LOG_FILE" || log_error "Failed to copy public data from S3"
|
||||
else
|
||||
echo "Public directory already has data, skipping S3 copy..." | tee -a "$LOG_FILE"
|
||||
fi
|
||||
|
||||
# Check if storage directory is empty
|
||||
if [ -z "$(find /var/www/storage -type f -o -type d -not -name "lost+found" -not -path "/var/www/storage" 2>/dev/null)" ]; then
|
||||
echo "Storage directory is empty, copying data from S3..." | tee -a "$LOG_FILE"
|
||||
aws s3 cp $S3_BUCKET/assets/storage.tar /var/www/storage/ 2>>"$LOG_FILE" || log_error "Failed to copy storage data from S3"
|
||||
else
|
||||
echo "Storage directory already has data, skipping S3 copy..." | tee -a "$LOG_FILE"
|
||||
fi
|
||||
|
||||
# Check if database directory is empty
|
||||
if [ -z "$(ls -A /var/www/database 2>/dev/null)" ]; then
|
||||
echo "Database directory is empty, copying data from S3..." | tee -a "$LOG_FILE"
|
||||
aws s3 cp $S3_BUCKET/assets/database.tar /var/www/database/ 2>>"$LOG_FILE" || log_error "Failed to copy database data from S3"
|
||||
else
|
||||
echo "Database directory already has data, skipping S3 copy..." | tee -a "$LOG_FILE"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
echo "Script completed. Check $LOG_FILE for details."
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if kubectl -n cnpg-system get pods | grep cnpg &>/dev/null; then
|
||||
echo "CloudNativePG pods already running. Skipping installation."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
echo "Installing CloudNativePG..."
|
||||
|
||||
helm repo add cnpg https://cloudnative-pg.github.io/charts
|
||||
helm upgrade --install cnpg \
|
||||
--namespace cnpg-system \
|
||||
--set config.clusterWide=true \
|
||||
--skip-crds \
|
||||
--force \
|
||||
cnpg/cloudnative-pg
|
||||
|
||||
|
||||
# had to do this
|
||||
|
||||
# kubectl get mutatingwebhookconfiguration,validatingwebhookconfiguration,crd -A | grep cnpg
|
||||
# Delete Conflicting Resources (if safe to do so):
|
||||
|
||||
# kubtctl delete <anyting in the above list>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.17/releases/cnpg-1.17.5.yaml
|
||||
|
||||
# kubectl patch configmap cnpg-config -n cnpg-system --type merge -p '{"data":{"config":"clusterWide: true"}}'
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Check if there are any pods in the longhorn-system namespace
|
||||
if kubectl -n longhorn-system get pods --no-headers 2>/dev/null | grep -q '^[^ ]'; then
|
||||
echo "Pods already exist in the longhorn-system namespace. Skipping installation."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.8.1/deploy/longhorn.yaml
|
||||
|
|
@ -1,10 +1,13 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if kubectl -n traefik get pods --no-headers 2>/dev/null | grep -q 'Running'; then
|
||||
echo "Traefik is already running in the 'traefik' namespace. Upgrading instead."
|
||||
if kubectl -n kube-system get pods --no-headers 2>/dev/null | grep -q 'traefik'; then
|
||||
echo "Traefik is already running in the 'kube-system' namespace. Upgrading instead."
|
||||
|
||||
# Create a temporary values file for more complex configuration
|
||||
cat > /tmp/traefik-values.yaml <<EOF
|
||||
ingressClass:
|
||||
enabled: true
|
||||
isDefaultClass: true
|
||||
ports:
|
||||
web:
|
||||
port: 80
|
||||
|
|
@ -58,7 +61,7 @@ service:
|
|||
targetPort: turn-udp
|
||||
EOF
|
||||
|
||||
helm upgrade traefik traefik/traefik --namespace traefik -f /tmp/traefik-values.yaml
|
||||
helm upgrade traefik traefik/traefik --namespace kube-system -f /tmp/traefik-values.yaml
|
||||
|
||||
else
|
||||
echo "Installing Traefik..."
|
||||
|
|
@ -68,6 +71,9 @@ else
|
|||
|
||||
# Create a temporary values file for more complex configuration
|
||||
cat > /tmp/traefik-values.yaml <<EOF
|
||||
ingressClass:
|
||||
enabled: true
|
||||
isDefaultClass: true
|
||||
ports:
|
||||
web:
|
||||
port: 80
|
||||
|
|
@ -121,48 +127,48 @@ service:
|
|||
targetPort: turn-udp
|
||||
EOF
|
||||
|
||||
helm install traefik traefik/traefik --namespace traefik --create-namespace -f /tmp/traefik-values.yaml
|
||||
helm install traefik traefik/traefik --namespace kube-system -f /tmp/traefik-values.yaml
|
||||
fi
|
||||
|
||||
|
||||
|
||||
cat > traefik-turn-service.yaml << EOF
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: traefik-turn
|
||||
namespace: traefik
|
||||
labels:
|
||||
app.kubernetes.io/instance: traefik-traefik
|
||||
app.kubernetes.io/name: traefik
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- name: turn-tcp
|
||||
port: 1194
|
||||
protocol: TCP
|
||||
targetPort: turn-tcp
|
||||
- name: turn-udp
|
||||
port: 1194
|
||||
protocol: UDP
|
||||
targetPort: turn-udp
|
||||
selector:
|
||||
app.kubernetes.io/instance: traefik-traefik
|
||||
app.kubernetes.io/name: traefik
|
||||
EOF
|
||||
# cat > traefik-turn-service.yaml << EOF
|
||||
# apiVersion: v1
|
||||
# kind: Service
|
||||
# metadata:
|
||||
# name: traefik-turn
|
||||
# namespace: kube-system
|
||||
# labels:
|
||||
# app.kubernetes.io/instance: traefik-traefik
|
||||
# app.kubernetes.io/name: traefik
|
||||
# spec:
|
||||
# type: LoadBalancer
|
||||
# ports:
|
||||
# - name: turn-tcp
|
||||
# port: 1194
|
||||
# protocol: TCP
|
||||
# targetPort: turn-tcp
|
||||
# - name: turn-udp
|
||||
# port: 1194
|
||||
# protocol: UDP
|
||||
# targetPort: turn-udp
|
||||
# selector:
|
||||
# app.kubernetes.io/instance: traefik-traefik
|
||||
# app.kubernetes.io/name: traefik
|
||||
# EOF
|
||||
|
||||
kubectl apply -f traefik-turn-service.yaml
|
||||
# kubectl apply -f traefik-turn-service.yaml
|
||||
|
||||
rm -f traefik-turn-service.yaml
|
||||
# rm -f traefik-turn-service.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
echo "Don't forget to create TCP and UDP ingress routes for the TURN server with:"
|
||||
echo "kubectl apply -f k8s-manifests/galene/ingressroute-tcp.yaml"
|
||||
echo "kubectl apply -f k8s-manifests/galene/ingressroute-udp.yaml"
|
||||
echo ""
|
||||
# echo "Don't forget to create TCP and UDP ingress routes for the TURN server with:"
|
||||
# echo "kubectl apply -f k8s-manifests/galene/ingressroute-tcp.yaml"
|
||||
# echo "kubectl apply -f k8s-manifests/galene/ingressroute-udp.yaml"
|
||||
# echo ""
|
||||
echo "To access the dashboard:"
|
||||
echo "kubectl port-forward -n traefik \$(kubectl get pods -n traefik -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
|
||||
echo "kubectl port-forward -n kube-system \$(kubectl get pods -n kube-system -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
|
||||
echo "Then visit http://localhost:9000/dashboard/ in your browser"
|
||||
|
|
@ -1,5 +1,10 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if ! command -v pwgen &> /dev/null; then
|
||||
echo "Error: 'pwgen' command not found. Please install it (e.g., 'sudo apt install pwgen') and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NAMESPACE=redis
|
||||
REDIS_SECRET=redis-auth
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue