Compare commits

..

No commits in common. "7384722305098bc412818517374a5cb9bd133072" and "834753473613e05cd2597dc20f40ff70491711e1" have entirely different histories.

51 changed files with 779 additions and 1173 deletions

View file

@ -1,9 +1,10 @@
# export VAGRANT_BRIDGE='Intel(R) Ethernet Connection (16) I219-V' export VAGRANT_BRIDGE='Intel(R) Ethernet Connection (16) I219-V'
# Network configuration for Vagrant/Ansible
export WORKSTATION_IP="192.168.56.10" export WORKSTATION_IP="192.168.56.10"
export VM1_IP="192.168.56.80" export VM1_IP="192.168.56.80"
export VM2_IP="192.168.56.81" export VM2_IP="192.168.56.81"
export VM3_IP="192.168.56.82" export VM3_IP="192.168.56.82"
export VAGRANT_NETWORK_PREFIX="192.168.56" export VAGRANT_NETWORK_PREFIX="192.168.56"
export K3S_URL_IP="192.168.56.250" export K3S_URL_IP="192.168.56.250"
export METALLB_IP_RANGE="192.168.56.230-192.168.56.240"

2
.gitignore vendored
View file

@ -22,5 +22,3 @@ scripts/ansible_inventory.ini
scripts/ansible_inventory.ini scripts/ansible_inventory.ini
vagrant/dev/ubuntu/ansible/ansible_inventory.ini vagrant/dev/ubuntu/ansible/ansible_inventory.ini
*.cast *.cast
vagrant/dev/ubuntu/certs/
vagrant/dev/ubuntu/config-dev

View file

@ -145,9 +145,9 @@ Key configuration options:
- `env`: Environment file path - `env`: Environment file path
- `preview_path`: Path for preview functionality - `preview_path`: Path for preview functionality
### Project Configuration (`config.json`) ### Customer Configuration (`config.json`)
Copy and customize the project-specific configuration: Copy and customize the customer-specific configuration:
```bash ```bash
cp config.json.example config.json cp config.json.example config.json
@ -155,7 +155,7 @@ cp config.json.example config.json
Key configuration options: Key configuration options:
- `project`: Project name/identifier (used as Kubernetes namespace) - `project`: Project name/identifier (used as Kubernetes namespace)
- `project_directory`: Project-specific directory - `customer_directory`: Customer-specific directory
- `ui_url`: UI service URL - `ui_url`: UI service URL
- `static_url`: Static content URL - `static_url`: Static content URL
- `port`: Service port - `port`: Service port
@ -186,7 +186,7 @@ Run the CLI by providing a path to your pipeline JSON file:
The tool will automatically: The tool will automatically:
1. Load base and project configurations 1. Load base and customer configurations
2. Initialize SQLite database for state management 2. Initialize SQLite database for state management
3. Execute the deployment pipeline defined in your JSON file 3. Execute the deployment pipeline defined in your JSON file
4. Run scripts from the `scripts/` directory 4. Run scripts from the `scripts/` directory
@ -292,13 +292,13 @@ infctl-cli/
├── main.go # Application entry point ├── main.go # Application entry point
├── go.mod # Go module definition ├── go.mod # Go module definition
├── base.json.example # Base configuration template ├── base.json.example # Base configuration template
├── config.json.example # Project configuration template ├── config.json.example # Customer configuration template
├── app/ # Core application logic ├── app/ # Core application logic
│ ├── app.go # Pipeline orchestration and state management │ ├── app.go # Pipeline orchestration and state management
│ └── k8s.go # Kubernetes operations (kubectl, kustomize) │ └── k8s.go # Kubernetes operations (kubectl, kustomize)
├── config/ # Configuration management ├── config/ # Configuration management
│ ├── base.go # Base configuration handling │ ├── base.go # Base configuration handling
│ └── project.go # Project configuration handling │ └── customer.go # Customer configuration handling
├── database/ # SQLite database operations ├── database/ # SQLite database operations
├── scripts/ # Shell scripts executed by the CLI ├── scripts/ # Shell scripts executed by the CLI
│ ├── install_*.sh # Infrastructure installation scripts │ ├── install_*.sh # Infrastructure installation scripts

View file

@ -1,21 +1,22 @@
package app package app
import ( import (
"bufio" "database/sql"
"bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"log"
"log/slog" "log/slog"
"os" "os"
"os/exec"
"time" "time"
"headshed/infctl-cli/config" "headshed/infctl-cli/config"
"headshed/infctl-cli/database"
) )
type AppState struct { type AppState struct {
Config config.BaseConfig Config config.BaseConfig
Project config.ProjectConfig Customer config.CustomerConfig
DB *sql.DB
} }
type PipelineStep struct { type PipelineStep struct {
@ -61,31 +62,28 @@ func (app *AppState) ToDoDeployment() []PipelineStep {
return []PipelineStep{} return []PipelineStep{}
} }
// func (app *AppState) RunJsonDeployment() []PipelineStep { func (app *AppState) RunJsonDeployment() []PipelineStep {
func (app *AppState) RunJsonDeployment() error {
jsonFile := app.Project.DeploymentFile jsonFile := app.Config.DeploymentFile
if jsonFile == "" { if jsonFile == "" {
return fmt.Errorf("no config specified with [-f|-deployment-file]=<path_to_config_file> => for all options see help with -h") log.Fatal("no config specified with [-f|--deployment-file]=<path_to_config_file>")
} }
file, err := os.Open(jsonFile) file, err := os.Open(jsonFile)
if err != nil { if err != nil {
slog.Error(fmt.Sprintf("Failed to open JSON file: %s", err)) slog.Error(fmt.Sprintf("Failed to open JSON file: %s", err))
return fmt.Errorf("failed to open JSON file: %w", err) os.Exit(1)
} }
defer file.Close() defer file.Close()
// fmt.Printf("jsonFile is : %s\n", jsonFile)
slog.Info(fmt.Sprintf("Using jsonFile: %s", jsonFile))
steps, err := parseStepsFromJSON(jsonFile) steps, err := parseStepsFromJSON(jsonFile)
if err != nil { if err != nil {
slog.Error(fmt.Sprintf("Failed to parse JSON file: %s", err)) slog.Error(fmt.Sprintf("Failed to parse JSON file: %s", err))
return fmt.Errorf("failed to parse JSON file: %w", err)
} }
for _, step := range steps { for _, step := range steps {
slog.Info(fmt.Sprintf("run json deployment => 🔄 %s", step.Name)) slog.Info(fmt.Sprintf("🔄 Running step: %s", step.Name))
function, exists := functionMap[step.Function] function, exists := functionMap[step.Function]
if !exists { if !exists {
slog.Error(fmt.Sprintf("Unknown function: %s", step.Function)) slog.Error(fmt.Sprintf("Unknown function: %s", step.Function))
@ -93,121 +91,134 @@ func (app *AppState) RunJsonDeployment() error {
} }
err := function(step.Params) err := function(step.Params)
if err != nil { if err != nil {
var innerErr error slog.Error(fmt.Sprintf("❌ Step failed: %s, error: %v", step.Name, err))
if step.RetryCount > 0 {
for i := 0; i < step.RetryCount; i++ {
sleep := app.Config.RetryDelaySenconds
slog.Info(fmt.Sprintf("Retrying step: %s (attempt %d/%d) after waiting for %d seconds...", step.Name, i+1, step.RetryCount, sleep))
time.Sleep(time.Duration(sleep) * time.Second)
if innerErr = function(step.Params); innerErr == nil {
slog.Info(fmt.Sprintf("✅ Step completed: %s\n", step.Name))
err = nil
break
}
}
if innerErr != nil {
if !step.ShouldAbort {
slog.Info(fmt.Sprintf("Not going to abort, step: %s", step.Name))
} else {
return fmt.Errorf("critical failure at step: %s", step.Name)
}
}
}
if step.ShouldAbort { if step.ShouldAbort {
return fmt.Errorf("critical failure at step: %s", step.Name) log.Fatalf("🚨Critical failure at step: %s", step.Name)
} }
} else { } else {
slog.Info(fmt.Sprintf("✅ Step completed: %s", step.Name)) slog.Info(fmt.Sprintf("✅ Step completed: %s", step.Name))
} }
} }
return nil return steps
} }
func (app *AppState) getPipeline() error { func (app *AppState) getPipeline() []PipelineStep {
switch app.Project.DeploymentMode {
switch app.Config.DeploymentType {
case "api": case "api":
return fmt.Errorf("api mode is not yet implemented") return app.ToDoDeployment()
case "json": case "json":
return app.RunJsonDeployment() return app.RunJsonDeployment()
default: default:
// return app.RunJsonDeployment() return app.RunJsonDeployment()
return fmt.Errorf("unknown mode: %s", app.Project.DeploymentMode)
} }
} }
func NewAppState(cust config.ProjectConfig, config config.BaseConfig) (*AppState, error) { func NewAppState(cust config.CustomerConfig, config config.BaseConfig, dbPath string) (*AppState, error) {
db, err := database.NewDatabase(dbPath)
if err != nil {
return nil, err
}
return &AppState{ return &AppState{
Config: config, Config: config,
Project: cust, Customer: cust,
DB: db,
}, nil }, nil
} }
func RunCommand(command string) error { func (app *AppState) runPipeline(steps []PipelineStep) error {
slog.Debug(fmt.Sprintf("🐞 Running command: %s", command)) for _, step := range steps {
cmd := exec.Command("sh", "-c", command) slog.Info(fmt.Sprintf("🔄 Running step: %s\n", step.Name))
var stdout, stderr bytes.Buffer // Look up the function in the functionMap
function, exists := functionMap[step.Function]
// Get pipes for real-time reading if !exists {
stdoutPipe, err := cmd.StdoutPipe() slog.Error(fmt.Sprintf("❌ Unknown function: %s", step.Function))
if err != nil { if step.ShouldAbort {
return fmt.Errorf("failed to create stdout pipe: %w", err) return fmt.Errorf("🚨critical failure: unknown function %s", step.Function)
} }
stderrPipe, err := cmd.StderrPipe() continue
if err != nil {
return fmt.Errorf("failed to create stderr pipe: %w", err)
}
// Start the command
if err := cmd.Start(); err != nil {
return fmt.Errorf("failed to start command: %w", err)
}
// Read stdout line by line and log through slog
go func() {
scanner := bufio.NewScanner(stdoutPipe)
for scanner.Scan() {
line := scanner.Text()
stdout.WriteString(line + "\n")
slog.Info(line)
} }
}()
// Read stderr line by line and log through slog // Execute the function with the provided parameters
go func() { err := function(step.Params)
scanner := bufio.NewScanner(stderrPipe) if err != nil {
for scanner.Scan() { slog.Error(fmt.Sprintf("❌ Step failed: %s, error: %v", step.Name, err))
line := scanner.Text()
stderr.WriteString(line + "\n") // Retry logic
slog.Info(line) if step.RetryCount > 0 {
for i := 0; i < step.RetryCount; i++ {
slog.Info("Waiting for 20 seconds before retrying...")
time.Sleep(20 * time.Second)
if innerErr := function(step.Params); innerErr == nil {
slog.Info(fmt.Sprintf("✅ Step completed: %s\n", step.Name))
err = nil
break
} else {
err = innerErr
}
}
}
// Handle failure after retries
if err != nil {
if step.ShouldAbort {
return fmt.Errorf("🚨critical failure at step: %s", step.Name)
}
continue
}
} }
}()
// Wait for command to complete slog.Info(fmt.Sprintf("✅ Step completed: %s\n", step.Name))
err = cmd.Wait()
if err != nil {
slog.Error(fmt.Sprintf("❌ Command failed with error: %v", err))
slog.Debug(fmt.Sprintf("🐞 Stdout: %s\n", stdout.String()))
slog.Debug(fmt.Sprintf("🐞 Stderr: %s\n", stderr.String()))
return fmt.Errorf("failed to run script command: %w", err)
} }
return nil
}
func (app *AppState) SetUpNewCustomer() error {
/*
| --------------------------
| main pipeline
| --------------------------
*/
steps := app.getPipeline()
app.runPipeline(steps)
slog.Info(fmt.Sprintln("🎉 Pipeline setup complete!"))
return nil return nil
} }
func (app *AppState) SetUpNewProject() error { func (app *AppState) CreatePipeline() error {
return app.getPipeline() isNew, err := database.CheckProjectName(app.DB, app.Customer.Project)
}
func (app *AppState) CreateProjectAndRunPipeline() error {
err := app.SetUpNewProject()
if err != nil { if err != nil {
return fmt.Errorf("Pipeline error: %w", err) return fmt.Errorf("failed to check project name: %w", err)
} }
if isNew {
port, err := database.GetNextPortNumber(app.DB)
if err != nil {
return fmt.Errorf("failed to get next port number: %w", err)
}
err = database.AddProjectName(app.DB, app.Customer.Project, port)
if err != nil {
return fmt.Errorf("failed to add project name: %w", err)
}
slog.Info(fmt.Sprintln("Project name added:", app.Customer.Project))
fmt.Printf("Port number assigned: %d\n", port)
app.Config.Port = port
}
err = app.SetUpNewCustomer()
if err != nil {
return fmt.Errorf("failed to set up new customer: %w", err)
}
return nil return nil
} }

View file

@ -2,31 +2,129 @@ package app
import ( import (
"encoding/json" "encoding/json"
"headshed/infctl-cli/config" "math/rand"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"testing" "testing"
"headshed/infctl-cli/config"
) )
// Test only pipeline execution and shell command running func TestMain(m *testing.M) {
// Setup: Set TEST_ENV=true for all tests
err := os.Setenv("TEST_ENV", "true")
if err != nil {
panic("Failed to set TEST_ENV")
}
// Run all tests
code := m.Run()
// Teardown: Unset TEST_ENV after all tests
os.Unsetenv("TEST_ENV")
// Exit with the test result code
os.Exit(code)
}
func TestRunPipeline(t *testing.T) { func TestRunPipeline(t *testing.T) {
// Create a temporary directory for test assets
tempDir, err := os.MkdirTemp("", "smoke-test") tempDir, err := os.MkdirTemp("", "smoke-test")
if err != nil { if err != nil {
t.Fatalf("Failed to create temp directory: %v", err) t.Fatalf("Failed to create temp directory: %v", err)
} }
defer os.RemoveAll(tempDir) // Cleanup after test
// Create test scripts
scripts := map[string]string{
"good.sh": "#!/bin/bash\necho 'Good script executed'\nexit 0",
"warning.sh": "#!/bin/bash\necho 'Warning script executed'\nexit 0",
"error.sh": "#!/bin/bash\necho 'Error script executed'\nexit 1",
}
for name, content := range scripts {
scriptPath := filepath.Join(tempDir, name)
if err := os.WriteFile(scriptPath, []byte(content), 0755); err != nil {
t.Fatalf("Failed to create script %s: %v", name, err)
}
}
// Create a test JSON pipeline file
pipeline := []PipelineStep{
{Name: "Good Step", Function: "RunCommand", Params: []string{filepath.Join(tempDir, "good.sh")}, RetryCount: 0, ShouldAbort: false},
{Name: "Warning Step", Function: "RunCommand", Params: []string{filepath.Join(tempDir, "warning.sh")}, RetryCount: 0, ShouldAbort: false},
{Name: "Error Step", Function: "RunCommand", Params: []string{filepath.Join(tempDir, "error.sh")}, RetryCount: 0, ShouldAbort: true},
}
pipelineFile := filepath.Join(tempDir, "pipeline.json")
pipelineData, err := json.Marshal(pipeline)
if err != nil {
t.Fatalf("Failed to marshal pipeline: %v", err)
}
if err := os.WriteFile(pipelineFile, pipelineData, 0644); err != nil {
t.Fatalf("Failed to write pipeline file: %v", err)
}
// Set up AppState
app := &AppState{
Config: config.BaseConfig{
DeploymentFile: pipelineFile,
},
}
// Run the pipeline
err = app.runPipeline(pipeline)
if err == nil {
t.Errorf("Expected error due to 'Error Step', but got none")
}
}
func randomString(length int) string {
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
b := make([]byte, length)
for i := range b {
b[i] = charset[rand.Intn(len(charset))]
}
return string(b)
}
func TestK3DNamespaceCreation(t *testing.T) {
// Check if k3d is installed
_, err := exec.LookPath("k3d")
if err != nil {
t.Fatal("k3d is not installed. Please install k3d to run this test.")
}
// Create a test cluster
clusterName := "test-" + randomString(6)
cmd := exec.Command("k3d", "cluster", "create", clusterName, "--servers", "1")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
t.Fatalf("Failed to create k3d cluster: %v", err)
}
defer func() {
// Clean up the test cluster
cmd := exec.Command("k3d", "cluster", "delete", clusterName)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
t.Errorf("Failed to delete k3d cluster: %v", err)
}
}()
// Create a temporary directory for the pipeline config
tempDir, err := os.MkdirTemp("", "k3d-test")
if err != nil {
t.Fatalf("Failed to create temp directory: %v", err)
}
defer os.RemoveAll(tempDir) defer os.RemoveAll(tempDir)
// Create a test script
scriptPath := filepath.Join(tempDir, "good.sh")
scriptContent := "#!/bin/bash\necho 'Good script executed'\nexit 0"
if err := os.WriteFile(scriptPath, []byte(scriptContent), 0755); err != nil {
t.Fatalf("Failed to create script: %v", err)
}
// Create a test JSON pipeline file // Create a test JSON pipeline file
pipeline := []PipelineStep{ pipeline := []PipelineStep{
{Name: "Good Step", Function: "RunCommand", Params: []string{scriptPath}, RetryCount: 0, ShouldAbort: false}, {Name: "Ensure Namespace Exists", Function: "k8sNamespaceExists", Params: []string{"test-namespace"}, RetryCount: 0, ShouldAbort: true},
} }
pipelineFile := filepath.Join(tempDir, "pipeline.json") pipelineFile := filepath.Join(tempDir, "pipeline.json")
pipelineData, err := json.Marshal(pipeline) pipelineData, err := json.Marshal(pipeline)
@ -39,20 +137,22 @@ func TestRunPipeline(t *testing.T) {
// Set up AppState // Set up AppState
app := &AppState{ app := &AppState{
Config: config.BaseConfig{}, Config: config.BaseConfig{
Project: config.ProjectConfig{
DeploymentFile: pipelineFile, DeploymentFile: pipelineFile,
}, },
} }
// Run the pipeline // Run the pipeline
err = app.RunJsonDeployment() err = app.runPipeline(pipeline)
if err != nil { if err != nil {
t.Errorf("Expected no error, got: %v", err) t.Fatalf("Pipeline execution failed: %v", err)
}
// Verify the namespace exists
cmd = exec.Command("kubectl", "get", "ns", "test-namespace")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
t.Fatalf("Namespace 'test-namespace' was not created: %v", err)
} }
} }
// Removed randomString: not needed for current tests
// Removed TestK3DNamespaceCreation: k3d and k8s namespace logic is no longer part of the app
// Removed TestSetUpNewProject: advanced project setup logic is no longer part of the app

View file

@ -1,6 +1,7 @@
package app package app
import ( import (
"bufio"
"bytes" "bytes"
"fmt" "fmt"
"log/slog" "log/slog"
@ -63,3 +64,56 @@ func k8sCreateNamespace(project string) error {
return nil return nil
} }
func RunCommand(command string) error {
slog.Debug(fmt.Sprintf("🐞 Running command: %s", command))
cmd := exec.Command("sh", "-c", command)
var stdout, stderr bytes.Buffer
// Get pipes for real-time reading
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("failed to create stdout pipe: %w", err)
}
stderrPipe, err := cmd.StderrPipe()
if err != nil {
return fmt.Errorf("failed to create stderr pipe: %w", err)
}
// Start the command
if err := cmd.Start(); err != nil {
return fmt.Errorf("failed to start command: %w", err)
}
// Read stdout line by line and log through slog
go func() {
scanner := bufio.NewScanner(stdoutPipe)
for scanner.Scan() {
line := scanner.Text()
stdout.WriteString(line + "\n")
slog.Info(line)
}
}()
// Read stderr line by line and log through slog
go func() {
scanner := bufio.NewScanner(stderrPipe)
for scanner.Scan() {
line := scanner.Text()
stderr.WriteString(line + "\n")
slog.Info(line)
}
}()
// Wait for command to complete
err = cmd.Wait()
if err != nil {
slog.Error(fmt.Sprintf("❌ Command failed with error: %v\n", err))
slog.Debug(fmt.Sprintf("🐞 Stdout: %s\n", stdout.String()))
slog.Debug(fmt.Sprintf("🐞 Stderr: %s\n", stderr.String()))
return fmt.Errorf("failed to run script command: %w", err)
}
return nil
}

17
base.json.example Normal file
View file

@ -0,0 +1,17 @@
{
"projects_directory": "/home/user/docker/",
"app_image": "headshead/some-app:0.0.1",
"webserver_image": "headsheddev/some-webserver_image:0.0.1",
"db": "database/database.sqlite",
"env": ".env",
"preview_path": "prv",
"data_www": "data_www",
"static_images": "data_images/",
"public_images": "images",
"php_conf": "php/local.ini",
"exports": "exports",
"logs": "logs/laravel.log",
"admin_url": ".headshed.dev",
"preview_url": "-prv.headshed.dev",
"nginx_conf": "nginx/conf.d"
}

View file

@ -5,12 +5,6 @@ mkdir -p bin
echo "Building for Linux..." echo "Building for Linux..."
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o bin/infctl-linux-amd64 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o bin/infctl-linux-amd64
echo "Building for Raspberry Pi (Linux ARM)..."
GOOS=linux GOARCH=arm GOARM=7 go build -ldflags="-s -w" -o bin/infctl-linux-armv7
echo "Building for Raspberry Pi (Linux ARM64)..."
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -o bin/infctl-linux-arm64
echo "Building for Windows..." echo "Building for Windows..."
GOOS=windows GOARCH=amd64 go build -ldflags="-s -w" -o bin/infctl-windows-amd64.exe GOOS=windows GOARCH=amd64 go build -ldflags="-s -w" -o bin/infctl-windows-amd64.exe

19
config.json.example Normal file
View file

@ -0,0 +1,19 @@
{
"project": "hdshd",
"project_data": "path_to/project_data",
"app_image": "headsheddev/some-app:0.0.1",
"env": "path_to/.env",
"images": "path_to_images",
"php_conf": "path_to/local.ini",
"exports": "path_to/exports",
"db": "path_to/db",
"logs": "path_to/logs",
"preview_path": "path_to_preview",
"webserver_image": "headsheddev/my-nginx:0.0.1",
"public_images": "path_to/images",
"data_www": "path_to/www",
"nginx_conf": "path_to/conf.d",
"admin_url": "admin_url.headshed.dev",
"preview_url": "app-prv.headshed.dev",
"ui_url": "ww2.headshed.dev"
}

View file

@ -7,30 +7,41 @@ import (
"os" "os"
) )
const Version = "v0.0.5" const Version = "v0.0.4"
// Package-level variables for flags
var (
baseConfigFile string
projectConfigFile string
pipelineFile string
)
type BaseConfig struct { type BaseConfig struct {
RetryDelaySenconds int `json:"retry_delay_seconds"` ProjectsDirectory string `json:"projects_directory"`
Env string `json:"env"`
StaticImages string `json:"static_images"`
PublicImages string `json:"public_images"`
PhpConf string `json:"php_conf"`
Exports string `json:"exports"`
Logs string `json:"logs"`
PreviewPath string `json:"preview_path"`
DataWww string `json:"data_www"`
NginxConf string `json:"nginx_conf"`
AdminURL string `json:"admin_url"`
PreviewURL string `json:"preview_url"`
AppImage string `json:"app_image"`
WebserverImage string `json:"webserver_image"`
EmptyDB string `json:"empty_db"`
DB string `json:"db"`
EmptyImages string `json:"empty_imaages"`
DeploymentType string `json:"deployment_type"`
DeploymentFile string `json:"deployment_file"`
Port int `json:"port"`
} }
// ParseFlags parses all command-line flags and handles help/version flags func ReadBaseConfig(path string) (BaseConfig, error) {
func ParseFlags() {
deploymentType := os.Getenv("DEPLOYMENT_TYPE")
deploymentFile := flag.String("deployment-file", "", "path to config file")
deploymentFileShorthand := flag.String("f", "", "shorthand for -deployment-file")
helpFlag := flag.Bool("help", false, "show help") helpFlag := flag.Bool("help", false, "show help")
versionFlag := flag.Bool("version", false, "show version") versionFlag := flag.Bool("version", false, "show version")
vFlag := flag.Bool("v", false, "show version (shorthand)") vFlag := flag.Bool("v", false, "show version (shorthand)")
flag.StringVar(&baseConfigFile, "base-config", "", "Path to base config file (optional)")
flag.StringVar(&projectConfigFile, "project-config", "", "Path to project config file (optional)")
flag.StringVar(&pipelineFile, "f", "", "Path to pipeline file")
flag.StringVar(&pipelineFile, "deployment-file", "", "Path to pipeline file (long format)")
flag.Parse() flag.Parse()
if *helpFlag { if *helpFlag {
@ -39,79 +50,28 @@ func ParseFlags() {
os.Exit(0) os.Exit(0)
} }
// Handle version flags
if *versionFlag || *vFlag { if *versionFlag || *vFlag {
fmt.Println("infctl-cli version:", Version) fmt.Println("infctl-cli version:", Version)
os.Exit(0) os.Exit(0)
} }
}
func ReadBaseConfig(path string) (BaseConfig, error) { var config BaseConfig
config := BaseConfig{} if *deploymentFileShorthand != "" {
config.DeploymentFile = *deploymentFileShorthand
// If base.json does not exist, create it with default value } else if *deploymentFile != "" {
if _, err := os.Stat(path); os.IsNotExist(err) { config.DeploymentFile = *deploymentFile
if err := CreateDefaultBaseConfig(path); err != nil {
return BaseConfig{}, fmt.Errorf("failed to create default base config: %w", err)
}
} }
data, err := os.ReadFile(path) data, err := os.ReadFile(path)
if err != nil { if err != nil {
return BaseConfig{}, fmt.Errorf("failed to read the config file: %w", err) return BaseConfig{}, fmt.Errorf("failed to read file: %w", err)
} }
if err := json.Unmarshal(data, &config); err != nil { if err := json.Unmarshal(data, &config); err != nil {
return config, fmt.Errorf("failed to unmarshal JSON: %w", err) return BaseConfig{}, fmt.Errorf("failed to unmarshal JSON: %w", err)
} }
config.DeploymentType = deploymentType
return config, nil return config, nil
} }
// CreateDefaultBaseConfig creates a default base.json with retry_delay_seconds: 3
func CreateDefaultBaseConfig(path string) error {
defaultConfig := BaseConfig{
RetryDelaySenconds: 3,
}
data, err := json.MarshalIndent(defaultConfig, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal default base config: %w", err)
}
if err := os.WriteFile(path, data, 0644); err != nil {
return fmt.Errorf("failed to write default base config to file: %w", err)
}
return nil
}
// LoadConfigs resolves config paths and loads both configs
func LoadConfigs() (BaseConfig, ProjectConfig, error) {
wd, err := os.Getwd()
if err != nil {
return BaseConfig{}, ProjectConfig{}, fmt.Errorf("failed to get current directory: %w", err)
}
var baseConfigPath string
if baseConfigFile == "" {
baseConfigPath = wd + string(os.PathSeparator) + "base.json"
} else {
baseConfigPath = baseConfigFile
}
var projectConfigPath string
if projectConfigFile == "" {
projectConfigPath = wd + string(os.PathSeparator) + "config.json"
} else {
projectConfigPath = projectConfigFile
}
baseConfig, err := ReadBaseConfig(baseConfigPath)
if err != nil {
return BaseConfig{}, ProjectConfig{}, fmt.Errorf("error reading base config file: %w", err)
}
projectConfig, err := ReadProjectConfig(projectConfigPath, &pipelineFile)
if err != nil {
return BaseConfig{}, ProjectConfig{}, fmt.Errorf("error reading project config file: %w", err)
}
return baseConfig, projectConfig, nil
}

View file

@ -1,50 +0,0 @@
package config
import (
"os"
"testing"
)
func TestReadBaseConfig_Basic(t *testing.T) {
// Create a temporary config file
file, err := os.CreateTemp("", "baseconfig_*.json")
if err != nil {
t.Fatalf("failed to create temp file: %v", err)
}
defer os.Remove(file.Name())
jsonContent := `{
"projects_directory": "/projects",
"env": "dev",
"static_images": "/static",
"public_images": "/public",
"php_conf": "/php.ini",
"exports": "/exports",
"logs": "/logs",
"preview_path": "/preview",
"data_www": "/data",
"nginx_conf": "/nginx.conf",
"admin_url": "http://admin",
"preview_url": "http://preview",
"app_image": "app:v1",
"webserver_image": "web:v1",
"empty_db": "empty.db",
"db": "app.db",
"empty_imaages": "empty.img",
"deployment_type": "json",
"deployment_file": "base.json",
"port": 8080
}`
file.WriteString(jsonContent)
file.Close()
os.Setenv("DEPLOYMENT_TYPE", "json")
config, err := ReadBaseConfig(file.Name())
if err != nil {
t.Fatalf("ReadBaseConfig failed: %v", err)
}
// Only check RetryDelaySenconds as that's the only field in BaseConfig now
if config.RetryDelaySenconds != 0 {
t.Errorf("expected RetryDelaySenconds 0, got %d", config.RetryDelaySenconds)
}
}

29
config/customer.go Normal file
View file

@ -0,0 +1,29 @@
package config
import (
"encoding/json"
"fmt"
"os"
)
type CustomerConfig struct {
Project string `json:"project"`
CustomerDirectory string `json:"customer_directory"`
UIURL string `json:"ui_url"`
StaticURL string `json:"static_url"`
Port int `json:"port"`
}
func ReadCustomerConfig(path string) (CustomerConfig, error) {
data, err := os.ReadFile(path)
if err != nil {
return CustomerConfig{}, fmt.Errorf("failed to read file: %w", err)
}
var cust CustomerConfig
if err := json.Unmarshal(data, &cust); err != nil {
return CustomerConfig{}, fmt.Errorf("failed to unmarshal JSON: %w", err)
}
return cust, nil
}

View file

@ -1,108 +0,0 @@
package config
import (
"encoding/json"
"fmt"
"os"
)
type ProjectConfig struct {
LogFormat string `json:"log_format"`
DeploymentFile string `json:"deployment_file"`
DeploymentType string `json:"deployment_type"`
DeploymentMode string `json:"deployment_mode"`
}
func ValidateProjectConfig(config ProjectConfig) error {
if config.LogFormat != "full" && config.LogFormat != "basic" && config.LogFormat != "none" {
return fmt.Errorf("invalid LogFormat: %s (must be 'full', 'basic', or 'none')", config.LogFormat)
}
if _, err := os.Stat(config.DeploymentFile); os.IsNotExist(err) {
return fmt.Errorf("deployment file does not exist: %s", config.DeploymentFile)
} else if err != nil {
return fmt.Errorf("error checking deployment file: %w", err)
}
if config.DeploymentMode != "json" && config.DeploymentMode != "api" {
return fmt.Errorf("invalid DeploymentMode: %s (must be 'json' or 'api')", config.DeploymentMode)
}
fmt.Printf("DeploymentType: %s\n", config.DeploymentType)
if config.DeploymentType != "development" && config.DeploymentType != "pre-production" && config.DeploymentType != "production" {
return fmt.Errorf("invalid DeploymentType: %s (must be 'development', 'pre-production', or 'production')", config.DeploymentType)
}
return nil
}
func CreateDefaultJsonConfig(path string, depploymentFile string) error {
defaultConfig := ProjectConfig{
LogFormat: "full",
DeploymentType: "development",
DeploymentFile: depploymentFile,
DeploymentMode: "json",
}
data, err := json.MarshalIndent(defaultConfig, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal default config: %w", err)
}
if err := os.WriteFile(path, data, 0644); err != nil {
return fmt.Errorf("failed to write default config to file: %w", err)
}
return nil
}
func ReadProjectConfig(path string, pipelineFile *string) (ProjectConfig, error) {
var config ProjectConfig
if pipelineFile == nil || *pipelineFile == "" {
return ProjectConfig{}, fmt.Errorf("no deployment file specified, please use -f or --deployment-file flag")
}
config.DeploymentFile = *pipelineFile
if _, err := os.Stat(path); os.IsNotExist(err) {
if err := CreateDefaultJsonConfig(path, config.DeploymentFile); err != nil {
return ProjectConfig{}, fmt.Errorf("failed to create default config: %w", err)
}
}
data, err := os.ReadFile(path)
if err != nil {
return ProjectConfig{}, fmt.Errorf("failed to read project configfile: %w", err)
}
var proj ProjectConfig
if err := json.Unmarshal(data, &proj); err != nil {
return ProjectConfig{}, fmt.Errorf("failed to unmarshal JSON: %w", err)
}
if proj.DeploymentMode != "" {
config.DeploymentMode = proj.DeploymentMode
} else {
config.DeploymentMode = "json"
}
deploymentModeEnv := os.Getenv("DEPLOYMENT_MODE")
if deploymentModeEnv != "" {
config.DeploymentMode = deploymentModeEnv
}
config.LogFormat = proj.LogFormat
if config.LogFormat == "" {
config.LogFormat = "full"
}
config.DeploymentType = proj.DeploymentType
if err := ValidateProjectConfig(config); err != nil {
return ProjectConfig{}, err
}
return config, nil
}

View file

@ -1,46 +0,0 @@
package config
import (
"os"
"testing"
)
func TestReadProjectConfig_Basic(t *testing.T) {
file, err := os.CreateTemp("", "projectconfig_*.json")
if err != nil {
t.Fatalf("failed to create temp file: %v", err)
}
defer os.Remove(file.Name())
jsonContent := `{
"log_format": "full",
"deployment_file": "pipeline.json",
"deployment_mode": "json",
"deployment_type": "development"
}`
file.WriteString(jsonContent)
file.Close()
pipelineFile, err := os.CreateTemp("", "pipeline_*.json")
if err != nil {
t.Fatalf("failed to create temp pipeline file: %v", err)
}
defer os.Remove(pipelineFile.Name())
pipelineFile.WriteString(`{}`) // minimal valid JSON
pipelineFile.Close()
pipelineFilePath := pipelineFile.Name()
config, err := ReadProjectConfig(file.Name(), &pipelineFilePath)
if err != nil {
t.Fatalf("ReadProjectConfig failed: %v", err)
}
if config.LogFormat != "full" {
t.Errorf("expected LogFormat 'full', got '%s'", config.LogFormat)
}
if config.DeploymentFile != pipelineFilePath {
t.Errorf("expected DeploymentFile '%s', got '%s'", pipelineFilePath, config.DeploymentFile)
}
if config.DeploymentMode != "json" {
t.Errorf("expected DeploymentMode 'json', got '%s'", config.DeploymentMode)
}
}

74
database/database.go Normal file
View file

@ -0,0 +1,74 @@
package database
import (
"database/sql"
"log"
"log/slog"
"os"
_ "modernc.org/sqlite"
)
func NewDatabase(dbPath string) (*sql.DB, error) {
// Check if the application is running in a test environment
if os.Getenv("TEST_ENV") == "true" {
dbPath = ":memory:" // Use in-memory database for tests
slog.Info("🧪 Running in test environment, using in-memory database")
log.Fatal("🧪 Running in test environment, using in-memory database ")
}
db, err := sql.Open("sqlite", dbPath)
if err != nil {
return nil, err
}
createTableSQL := `
CREATE TABLE IF NOT EXISTS project_name (
id INTEGER PRIMARY KEY AUTOINCREMENT,
project_name TEXT NOT NULL,
port INTEGER NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);`
_, err = db.Exec(createTableSQL)
if err != nil {
return nil, err
}
return db, nil
}
func CheckProjectName(db *sql.DB, projectName string) (bool, error) {
var exists bool
query := `SELECT EXISTS(SELECT 1 FROM project_name WHERE project_name = ? LIMIT 1);`
err := db.QueryRow(query, projectName).Scan(&exists)
if err != nil && err != sql.ErrNoRows {
return false, err
}
return !exists, nil
}
func AddProjectName(db *sql.DB, projectName string, port int) error {
query := `INSERT INTO project_name (project_name, port) VALUES (?, ?);`
_, err := db.Exec(query, projectName, port)
if err != nil {
return err
}
return nil
}
func GetNextPortNumber(db *sql.DB) (int, error) {
var maxPortNumber sql.NullInt64
query := `SELECT MAX(port) FROM project_name;`
err := db.QueryRow(query).Scan(&maxPortNumber)
if err != nil && err != sql.ErrNoRows {
return 0, err
}
if !maxPortNumber.Valid {
// No rows in the table, return a default port number
return 10000, nil
}
return int(maxPortNumber.Int64 + 1), nil
}

15
go.mod
View file

@ -1,3 +1,18 @@
module headshed/infctl-cli module headshed/infctl-cli
go 1.23.3 go 1.23.3
require modernc.org/sqlite v1.38.0
require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
golang.org/x/sys v0.33.0 // indirect
modernc.org/libc v1.65.10 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.11.0 // indirect
)

47
go.sum
View file

@ -0,0 +1,47 @@
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
modernc.org/cc/v4 v4.26.1 h1:+X5NtzVBn0KgsBCBe+xkDC7twLb/jNVj9FPgiwSQO3s=
modernc.org/cc/v4 v4.26.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU=
modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE=
modernc.org/fileutil v1.3.3 h1:3qaU+7f7xxTUmvU1pJTZiDLAIoJVdUSSauJNHg9yXoA=
modernc.org/fileutil v1.3.3/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/libc v1.65.10 h1:ZwEk8+jhW7qBjHIT+wd0d9VjitRyQef9BnzlzGwMODc=
modernc.org/libc v1.65.10/go.mod h1:StFvYpx7i/mXtBAfVOjaU0PWZOvIRoZSgXhrwXzr8Po=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.38.0 h1:+4OrfPQ8pxHKuWG4md1JpR/EYAh3Md7TdejuuzE7EUI=
modernc.org/sqlite v1.38.0/go.mod h1:1Bj+yES4SVvBZ4cBOpVZ6QgesMCKpJZDq0nxYzOpmNE=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=

View file

@ -20,33 +20,18 @@ fi
case "$OS" in case "$OS" in
Linux) OS="linux" ;; Linux) OS="linux" ;;
Darwin) OS="darwin" ;; Darwin) OS="darwin" ;;
MSYS_NT-10.0-26100) OS="windows" ;; # gitbash
*) echo "Unsupported OS: $OS"; exit 1 ;; *) echo "Unsupported OS: $OS"; exit 1 ;;
esac esac
# Construct the download URL # Construct the download URL
VERSION="v0.0.5" VERSION="v0.0.4"
BINARY_URL="https://codeberg.org/headshed/infctl-cli/releases/download/$VERSION/infctl-$OS-$ARCH" BINARY_URL="https://codeberg.org/headshed/infctl-cli/releases/download/$VERSION/infctl-$OS-$ARCH"
# Download the binary # Download the binary
echo "Downloading infctl binary for $OS-$ARCH..." echo "Downloading infctl binary for $OS-$ARCH..."
sudo curl -s -L "$BINARY_URL" -o /usr/local/bin/infctl
if [ "$OS" == "windows" ]; then # Make it executable
# Create ~/bin if it doesn't exist sudo chmod +x /usr/local/bin/infctl
mkdir -p "$HOME/bin"
curl -s -L "$BINARY_URL.exe" -o "$HOME/bin/infctl.exe"
if [ $? -ne 0 ]; then
echo "Failed to download infctl for Windows"
exit 1
fi
# Ensure ~/bin is in PATH for future sessions
if ! grep -q 'export PATH="$HOME/bin:$PATH"' "$HOME/.bash_profile" 2>/dev/null; then
echo 'export PATH="$HOME/bin:$PATH"' >> "$HOME/.bash_profile"
echo "Added ~/bin to PATH in ~/.bash_profile. Please restart your shell or run: source ~/.bash_profile"
fi
else
sudo curl -s -L "$BINARY_URL" -o /usr/local/bin/infctl
sudo chmod +x /usr/local/bin/infctl
fi
echo "infctl install done." echo "infctl installed successfully!"

View file

@ -1,119 +0,0 @@
package logger
import (
"context"
"fmt"
"log/slog"
"os"
)
// multiHandler writes to multiple slog.Handlers
type multiHandler struct {
handlers []slog.Handler
}
func (m *multiHandler) Enabled(ctx context.Context, level slog.Level) bool {
for _, h := range m.handlers {
if h.Enabled(ctx, level) {
return true
}
}
return false
}
func (m *multiHandler) Handle(ctx context.Context, r slog.Record) error {
var err error
for _, h := range m.handlers {
if e := h.Handle(ctx, r); e != nil && err == nil {
err = e
}
}
return err
}
func (m *multiHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
newHandlers := make([]slog.Handler, len(m.handlers))
for i, h := range m.handlers {
newHandlers[i] = h.WithAttrs(attrs)
}
return &multiHandler{handlers: newHandlers}
}
func (m *multiHandler) WithGroup(name string) slog.Handler {
newHandlers := make([]slog.Handler, len(m.handlers))
for i, h := range m.handlers {
newHandlers[i] = h.WithGroup(name)
}
return &multiHandler{handlers: newHandlers}
}
type customMessageOnlyHandler struct {
output *os.File
}
func (h *customMessageOnlyHandler) Enabled(_ context.Context, _ slog.Level) bool {
return true
}
func (h *customMessageOnlyHandler) Handle(_ context.Context, r slog.Record) error {
msg := r.Message
if msg != "" {
_, err := fmt.Fprintln(h.output, msg)
return err
}
return nil
}
func (h *customMessageOnlyHandler) WithAttrs(_ []slog.Attr) slog.Handler {
return h
}
func (h *customMessageOnlyHandler) WithGroup(_ string) slog.Handler {
return h
}
// SetupLogger configures slog based on format and file
func SetupLogger(logFormat string, logFilePath string, level slog.Level) *slog.Logger {
var handlers []slog.Handler
var logFile *os.File
var levelVar slog.LevelVar
levelVar.Set(level)
if logFilePath != "" {
f, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err == nil {
logFile = f
// Do not close here; let OS handle it at process exit
}
}
switch logFormat {
case "basic":
handlers = append(handlers, slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
if a.Key == slog.TimeKey {
return slog.Attr{}
}
return a
},
}))
if logFile != nil {
handlers = append(handlers, slog.NewTextHandler(logFile, &slog.HandlerOptions{}))
}
case "none":
handlers = append(handlers, &customMessageOnlyHandler{output: os.Stdout})
if logFile != nil {
handlers = append(handlers, &customMessageOnlyHandler{output: logFile})
}
default:
handlers = append(handlers, slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: &levelVar}))
if logFile != nil {
handlers = append(handlers, slog.NewJSONHandler(logFile, &slog.HandlerOptions{Level: &levelVar}))
}
}
if len(handlers) == 1 {
return slog.New(handlers[0])
}
return slog.New(&multiHandler{handlers: handlers})
}

View file

@ -1,42 +0,0 @@
package logger
import (
"log/slog"
"os"
"testing"
)
func TestMultiHandlerBasic(t *testing.T) {
// Create two custom handlers that write to files
f1, err := os.CreateTemp("", "log1_*.txt")
if err != nil {
t.Fatalf("failed to create temp file: %v", err)
}
defer os.Remove(f1.Name())
f2, err := os.CreateTemp("", "log2_*.txt")
if err != nil {
t.Fatalf("failed to create temp file: %v", err)
}
defer os.Remove(f2.Name())
h1 := &customMessageOnlyHandler{output: f1}
h2 := &customMessageOnlyHandler{output: f2}
mh := &multiHandler{handlers: []slog.Handler{h1, h2}}
logger := slog.New(mh)
logger.Info("test message")
f1.Seek(0, 0)
buf1 := make([]byte, 100)
n1, _ := f1.Read(buf1)
if string(buf1[:n1]) != "test message\n" {
t.Errorf("expected message in log1, got: %q", string(buf1[:n1]))
}
f2.Seek(0, 0)
buf2 := make([]byte, 100)
n2, _ := f2.Read(buf2)
if string(buf2[:n2]) != "test message\n" {
t.Errorf("expected message in log2, got: %q", string(buf2[:n2]))
}
}

104
main.go
View file

@ -14,61 +14,101 @@
package main package main
import ( import (
"encoding/json" "context"
"fmt" "fmt"
"log"
"log/slog" "log/slog"
"os" "os"
"headshed/infctl-cli/app" "headshed/infctl-cli/app"
"headshed/infctl-cli/config" "headshed/infctl-cli/config"
"headshed/infctl-cli/logger"
) )
type customMessageOnlyHandler struct {
output *os.File
}
func (h *customMessageOnlyHandler) Enabled(_ context.Context, _ slog.Level) bool {
return true
}
func (h *customMessageOnlyHandler) Handle(_ context.Context, r slog.Record) error {
// Directly retrieve the message from the record
msg := r.Message
if msg != "" {
_, err := fmt.Fprintln(h.output, msg)
return err
}
return nil
}
func (h *customMessageOnlyHandler) WithAttrs(_ []slog.Attr) slog.Handler {
return h
}
func (h *customMessageOnlyHandler) WithGroup(_ string) slog.Handler {
return h
}
func main() { func main() {
config.ParseFlags() var levelVar slog.LevelVar
baseConfig, projectConfig, err := config.LoadConfigs() levelVar.Set(slog.LevelDebug)
if err != nil {
fmt.Fprintf(os.Stderr, "Config error: %v\n", err) var logger *slog.Logger
os.Exit(1) if os.Getenv("LOG_FORMAT") == "basic" {
logger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
if a.Key == slog.TimeKey {
return slog.Attr{}
}
return a
},
}))
} else if os.Getenv("LOG_FORMAT") == "none" {
logger = slog.New(&customMessageOnlyHandler{output: os.Stdout})
} else {
logger = slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: &levelVar}))
} }
logFormat := projectConfig.LogFormat
if os.Getenv("LOG_FORMAT") != "" {
logFormat = os.Getenv("LOG_FORMAT")
}
logFilePath := os.Getenv("LOG_FILE")
logger := logger.SetupLogger(logFormat, logFilePath, slog.LevelDebug)
slog.SetDefault(logger) slog.SetDefault(logger)
if err := run(projectConfig, baseConfig); err != nil { if err := run(); err != nil {
slog.Error("❌ 💥 Pipeline error: " + err.Error()) log.Fatalf("Application error: %v", err)
os.Exit(1)
} else {
slog.Info("✅ 🚀 Pipeline completed successfully")
} }
} }
func run(projectConfig config.ProjectConfig, baseConfig config.BaseConfig) error { func run() error {
appState, err := app.NewAppState(projectConfig, baseConfig) wd, err := os.Getwd()
if err != nil {
return fmt.Errorf("failed to get current directory: %w", err)
}
baseConfigPath := wd + string(os.PathSeparator) + "base.json"
configPath := wd + string(os.PathSeparator) + "config.json"
baseConfig, err := config.ReadBaseConfig(baseConfigPath)
if err != nil {
return fmt.Errorf("error reading base config file: %w", err)
}
customerConfig, err := config.ReadCustomerConfig(configPath)
if err != nil {
return fmt.Errorf("error reading customer config file: %w", err)
}
appState, err := app.NewAppState(customerConfig, baseConfig, "app.db")
if err != nil { if err != nil {
return fmt.Errorf("failed to initialize app state: %w", err) return fmt.Errorf("failed to initialize app state: %w", err)
} }
defer func() {
// Pretty-print appState as JSON if err := appState.DB.Close(); err != nil {
if os.Getenv("DEBUG") == "1" { log.Printf("Error closing database: %v", err)
if jsonBytes, err := json.MarshalIndent(appState, "", " "); err == nil {
fmt.Printf(">>DEBUG>> appState:\n%s\n", string(jsonBytes))
} else {
fmt.Printf(">>DEBUG>> appState: %+v\n", appState)
} }
} }()
if err := appState.CreateProjectAndRunPipeline(); err != nil { if err := appState.CreatePipeline(); err != nil {
return fmt.Errorf("pipeline error: %w", err) return fmt.Errorf("failed to create customer project: %w", err)
} }
return nil return nil

View file

@ -1,10 +0,0 @@
package main
import "testing"
// Example test for main package
func TestMainDummy(t *testing.T) {
// This is a placeholder test.
// Add real tests for functions in main.go if possible.
t.Log("main package test ran")
}

View file

@ -1,20 +0,0 @@
[
{
"name": "run a failing job",
"function": "RunCommand",
"params": [
"./scripts/failue.sh"
],
"retryCount": 2,
"shouldAbort": false
},
{
"name": "run a successful job",
"function": "RunCommand",
"params": [
"./scripts/success.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -1,20 +0,0 @@
[
{
"name": "run a failing job",
"function": "RunCommand",
"params": [
"./scripts/failue.sh"
],
"retryCount": 2,
"shouldAbort": true
},
{
"name": "run a successful job",
"function": "RunCommand",
"params": [
"./scripts/success.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -1,6 +1,7 @@
[ [
{ {
"name": "run a failing job", "name": "Create Vagrant nodes",
"function": "RunCommand", "function": "RunCommand",
"params": [ "params": [
"./scripts/failue.sh" "./scripts/failue.sh"
@ -8,6 +9,7 @@
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true
}, },
{ {
"name": "Configure Vagrant K3s", "name": "Configure Vagrant K3s",
"function": "RunCommand", "function": "RunCommand",
@ -17,6 +19,8 @@
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true
}, },
{ {
"name": "Create Vagrant workstation", "name": "Create Vagrant workstation",
"function": "RunCommand", "function": "RunCommand",
@ -26,4 +30,4 @@
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true
} }
] ]

View file

@ -1,38 +0,0 @@
[
{
"name": "run succeeding pipeline",
"function": "RunCommand",
"params": [
"LOG_FORMAT=none infctl -f pipelines/dev/succeeding.json"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "run failing pipeline",
"function": "RunCommand",
"params": [
"LOG_FORMAT=none infctl -f pipelines/dev/failing.json"
],
"retryCount": 0,
"shouldAbort": false
},
{
"name": "run retry pipeline",
"function": "RunCommand",
"params": [
"LOG_FORMAT=none infctl -f pipelines/dev/retry.json"
],
"retryCount": 0,
"shouldAbort": false
},
{
"name": "run carry on pipeline",
"function": "RunCommand",
"params": [
"LOG_FORMAT=none infctl -f pipelines/dev/carry_on.json"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -1,11 +0,0 @@
[
{
"name": "run a failing job",
"function": "RunCommand",
"params": [
"./scripts/failue.sh"
],
"retryCount": 1,
"shouldAbort": true
}
]

View file

@ -1,11 +0,0 @@
[
{
"name": "run a successful job",
"function": "RunCommand",
"params": [
"./scripts/success.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -1,13 +1,5 @@
[ [
{
"name": "Checks for .envrc",
"function": "RunCommand",
"params": [
"./scripts/envrc_checks.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{ {
"name": "Create Vagrant nodes", "name": "Create Vagrant nodes",
"function": "RunCommand", "function": "RunCommand",
@ -17,6 +9,7 @@
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true
}, },
{ {
"name": "Configure Vagrant K3s", "name": "Configure Vagrant K3s",
"function": "RunCommand", "function": "RunCommand",
@ -26,6 +19,8 @@
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true
}, },
{ {
"name": "Create Vagrant workstation", "name": "Create Vagrant workstation",
"function": "RunCommand", "function": "RunCommand",
@ -35,4 +30,4 @@
"retryCount": 0, "retryCount": 0,
"shouldAbort": true "shouldAbort": true
} }
] ]

View file

@ -1,22 +0,0 @@
#!/usr/bin/env bash
# check if an .envrc file exists
if [ ! -f .envrc ]; then
echo ".envrc file not found"
cp .envrc.example .envrc
if [ $? -eq 0 ]; then
echo ".envrc file created from .envrc.example"
else
echo "Failed to create .envrc file"
exit 1
fi
else
echo ".envrc file found"
cp .envrc vagrant/dev/ubuntu/.envrc
if [ $? -eq 0 ]; then
echo ".envrc file synced to vagrant/dev/ubuntu/.envrc"
else
echo "Failed to sync .envrc file"
exit 1
fi
fi

View file

@ -1,21 +1,24 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# sleep 5
echo "crash" echo "crash"
# sleep 1 sleep 1
echo "bang" echo "bang"
# sleep 2 sleep 2
echo "wallop" echo "wallop"
# sleep 1 echo
echo
echo
echo "Houston, we have a problem" echo "Houston, we have a problem"
sleep 1 echo
echo
echo
exit 1 exit 1

135
scripts/install_traefik.sh Executable file
View file

@ -0,0 +1,135 @@
#!/usr/bin/env bash
if kubectl -n kube-system get pods --no-headers 2>/dev/null | grep -q 'traefik'; then
echo "Traefik is already running in the 'kube-system' namespace. Upgrading instead."
# Create a temporary values file for more complex configuration
cat > /tmp/traefik-values.yaml <<EOF
ingressClass:
enabled: true
isDefaultClass: true
ports:
web:
port: 80
websecure:
port: 443
traefik:
port: 9000
turn-tcp:
port: 1194
exposedPort: 1194
protocol: TCP
turn-udp:
port: 1194
exposedPort: 1194
protocol: UDP
entryPoints:
turn-tcp:
address: ":1194/tcp"
turn-udp:
address: ":1194/udp"
api:
dashboard: true
insecure: true
ingressRoute:
dashboard:
enabled: true
ping: true
log:
level: INFO
# Add this service section to expose the ports properly
service:
enabled: true
type: LoadBalancer
annotations: {}
ports:
web:
port: 80
protocol: TCP
targetPort: web
websecure:
port: 443
protocol: TCP
targetPort: websecure
turn-tcp:
port: 1194
protocol: TCP
targetPort: turn-tcp
turn-udp:
port: 1194
protocol: UDP
targetPort: turn-udp
EOF
helm upgrade traefik traefik/traefik --namespace kube-system -f /tmp/traefik-values.yaml
else
echo "Installing Traefik..."
helm repo add traefik https://traefik.github.io/charts
helm repo update
# Create a temporary values file for more complex configuration
cat > /tmp/traefik-values.yaml <<EOF
ingressClass:
enabled: true
isDefaultClass: true
ports:
web:
port: 80
websecure:
port: 443
traefik:
port: 9000
turn-tcp:
port: 1194
exposedPort: 1194
protocol: TCP
turn-udp:
port: 1194
exposedPort: 1194
protocol: UDP
entryPoints:
turn-tcp:
address: ":1194/tcp"
turn-udp:
address: ":1194/udp"
api:
dashboard: true
insecure: true
ingressRoute:
dashboard:
enabled: true
ping: true
log:
level: INFO
# Add the service section here too for new installations
service:
enabled: true
type: LoadBalancer
annotations: {}
ports:
web:
port: 80
protocol: TCP
targetPort: web
websecure:
port: 443
protocol: TCP
targetPort: websecure
turn-tcp:
port: 1194
protocol: TCP
targetPort: turn-tcp
turn-udp:
port: 1194
protocol: UDP
targetPort: turn-udp
EOF
helm install traefik traefik/traefik --namespace kube-system -f /tmp/traefik-values.yaml
fi
echo "To access the dashboard:"
echo "kubectl port-forward -n kube-system \$(kubectl get pods -n kube-system -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
echo "Then visit http://localhost:9000/dashboard/ in your browser"

View file

@ -12,5 +12,3 @@ cd "$VAGRANT_DIR" || {
vagrant up workstation vagrant up workstation

View file

@ -1,21 +0,0 @@
#!/usr/bin/env bash
# sleep 5
echo "bish"
# sleep 1
echo "bash"
# sleep 2
echo "bosh"
# sleep 1
echo "lovely jubbly"
sleep 1
exit 0

View file

@ -0,0 +1 @@
export VAGRANT_BRIDGE=<preferred interface to bride to>

View file

@ -21,7 +21,6 @@ Vagrant.configure("2") do |config|
# VM 1 Configuration # VM 1 Configuration
config.vm.define "vm1" do |vm1| config.vm.define "vm1" do |vm1|
vm1.vm.box = "ubuntu/jammy64" vm1.vm.box = "ubuntu/jammy64"
vm1.vm.boot_timeout = 600
vm1.vm.hostname = "vm1" vm1.vm.hostname = "vm1"
# Fixed private network IP # Fixed private network IP
@ -49,7 +48,6 @@ Vagrant.configure("2") do |config|
# VM 2 Configuration # VM 2 Configuration
config.vm.define "vm2" do |vm2| config.vm.define "vm2" do |vm2|
vm2.vm.box = "ubuntu/jammy64" vm2.vm.box = "ubuntu/jammy64"
vm2.vm.boot_timeout = 600
vm2.vm.hostname = "vm2" vm2.vm.hostname = "vm2"
# Fixed private network IP # Fixed private network IP
@ -77,7 +75,6 @@ Vagrant.configure("2") do |config|
# VM 3 Configuration # VM 3 Configuration
config.vm.define "vm3" do |vm3| config.vm.define "vm3" do |vm3|
vm3.vm.box = "ubuntu/jammy64" vm3.vm.box = "ubuntu/jammy64"
vm3.vm.boot_timeout = 600
vm3.vm.hostname = "vm3" vm3.vm.hostname = "vm3"
# Fixed private network IP # Fixed private network IP
@ -105,7 +102,6 @@ Vagrant.configure("2") do |config|
# Ansible Controller/Workstation Configuration # Ansible Controller/Workstation Configuration
config.vm.define "workstation" do |ws| config.vm.define "workstation" do |ws|
ws.vm.box = "ubuntu/jammy64" ws.vm.box = "ubuntu/jammy64"
ws.vm.boot_timeout = 600
ws.vm.hostname = "ansible-workstation" ws.vm.hostname = "ansible-workstation"
ws.vm.synced_folder ".", "/vagrant" ws.vm.synced_folder ".", "/vagrant"

View file

@ -1,78 +0,0 @@
---
- name: Install Dnsmasq on workstation
hosts: localhost
become: true
become_user: root
serial: 1 # Ensure tasks are executed one host at a time
vars_files:
- vars.yaml
tasks:
- name: Install dnsmasq
ansible.builtin.apt:
name: dnsmasq
state: present
- name: Stop systemd-resolved
ansible.builtin.systemd:
name: systemd-resolved
state: stopped
- name: Disable systemd-resolved
ansible.builtin.systemd:
name: systemd-resolved
enabled: false
- name: check to see if /etc/resolv.conf is a symlink
ansible.builtin.stat:
path: /etc/resolv.conf
register: resolv_conf
- name: Remove /etc/resolv.conf if it is a symlink
ansible.builtin.file:
path: /etc/resolv.conf
state: absent
when: resolv_conf.stat.islnk
- name: Ensure /etc/resolv.conf is a regular file
ansible.builtin.file:
path: /etc/resolv.conf
state: touch
- name: Ensure /etc/resolv.conf uses 127.0.0.1 for server
ansible.builtin.lineinfile:
path: /etc/resolv.conf
regexp: '^nameserver'
line: 'nameserver 127.0.0.1'
state: present
- name: Configure dnsmasq
ansible.builtin.copy:
dest: /etc/dnsmasq.d/k3s-cluster.conf
content: |
address=/{{ dnsmasq_k3s_domain }}
server=1.1.1.1
server=8.8.8.8
owner: root
group: root
mode: "0644"
notify: Restart dnsmasq
- name: Ensure conf-dir is uncommented in /etc/dnsmasq.conf
ansible.builtin.lineinfile:
path: /etc/dnsmasq.conf
regexp: '^#?conf-dir=/etc/dnsmasq.d'
line: 'conf-dir=/etc/dnsmasq.d'
state: present
owner: root
group: root
mode: '0644'
handlers:
- name: Restart dnsmasq
ansible.builtin.systemd:
name: dnsmasq
state: restarted

View file

@ -1,10 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
sudo apt-get update sudo apt-get update
sudo apt-get install -y software-properties-common git vim python3.10-venv jq figlet sudo apt-get install -y software-properties-common git vim python3.10-venv jq
source /vagrant/.envrc
# Set up ansible environment for vagrant user # Set up ansible environment for vagrant user
sudo -u vagrant mkdir -p /home/vagrant/.ansible sudo -u vagrant mkdir -p /home/vagrant/.ansible
@ -106,7 +103,6 @@ if ! grep -qF "$BLOCK_START" "$BASHRC"; then
eval `ssh-agent -s` eval `ssh-agent -s`
ssh-add ~/machines/*/virtualbox/private_key ssh-add ~/machines/*/virtualbox/private_key
ssh-add -L ssh-add -L
source /vagrant/.envrc
EOF EOF
else else
echo "Provisioning block already present in $BASHRC" echo "Provisioning block already present in $BASHRC"
@ -148,13 +144,7 @@ if [ $? -ne 0 ]; then
fi fi
# copy_k8s_config.yaml # copy_k8s_config.yaml
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1
fi
ANSIBLE_SUPPRESS_INTERPRETER_DISCOVERY_WARNING=1 ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_dnsmasq.yaml --inventory-file ansible_inventory.ini
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration." echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
exit 1 exit 1
@ -168,5 +158,3 @@ if [ $? -ne 0 ]; then
exit 1 exit 1
fi fi

View file

@ -7,8 +7,6 @@ k3s_url_ip: "{{ lookup('env', 'K3S_URL_IP') | default('192.168.56.250', true) }}
workstation_ip: "{{ lookup('env', 'WORKSTATION_IP') | default('192.168.56.10', true) }}" workstation_ip: "{{ lookup('env', 'WORKSTATION_IP') | default('192.168.56.10', true) }}"
network_prefix: "{{ lookup('env', 'VAGRANT_NETWORK_PREFIX') | default('192.168.56', true) }}" network_prefix: "{{ lookup('env', 'VAGRANT_NETWORK_PREFIX') | default('192.168.56', true) }}"
dnsmasq_k3s_domain: "{{ lookup('env', 'DNSMASQ_K3S_DOMAIN') | default('headshed.it/192.168.56.230', true) }}"
# K3s configuration # K3s configuration
k3s_cluster_name: "dev-cluster" k3s_cluster_name: "dev-cluster"
k3s_token_file: "/opt/k3s-token" k3s_token_file: "/opt/k3s-token"

View file

@ -13,14 +13,6 @@ spec:
labels: labels:
app: nginx-storage app: nginx-storage
spec: spec:
initContainers:
- name: init-nginx-content
image: busybox
command: ["sh", "-c", "echo '<html><body><h1>Welcome to nginx!</h1><h2>using MVK</h2><p><a href=\"https://mvk.headshed.dev/\">https://mvk.headshed.dev/</a></p></body></html>' > /usr/share/nginx/html/index.html"]
volumeMounts:
- name: nginx-data
mountPath: /usr/share/nginx/html
containers: containers:
- name: nginx - name: nginx
image: nginx:stable image: nginx:stable

View file

@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: infmon-cli
namespace: default
spec:
selector:
matchLabels:
app: infmon-cli
replicas: 1
template:
metadata:
labels:
app: infmon-cli
spec:
containers:
- name: infmon-cli
image: 192.168.2.190:5000/infmon-cli:0.0.1
command: ["sleep", "infinity"]
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "500m"
memory: "512Mi"
volumeMounts:
- name: kubeconfig
mountPath: /root/.kube/config
subPath: config
volumes:
- name: kubeconfig
secret:
secretName: infmon-kubeconfig

View file

@ -1,27 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-ingress
namespace: default
# This annotation is good practice to ensure it uses the right entrypoint
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
# This block is the key. It tells Ingress controllers like Traefik
# to use the specified secret for TLS termination for the listed hosts.
tls:
- hosts:
- "*.headshed.it" # Or a specific subdomain like test.headshed.it
secretName: wildcard-headshed-it-tls # <-- The name of the secret you created
rules:
- host: nginx.headshed.it # The actual domain you will use to access the service
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-storage # The name of the k8s service for your app
port:
number: 80 # The port your service is listening on

View file

@ -1,7 +0,0 @@
#!/usr/bin/env bash
kubectl apply -f pvc.yaml
kubectl apply -f deployment.yaml
kubectl apply -f service.yaml
kubectl apply -f ingress.yaml

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-storage
namespace: default
spec:
selector:
app: nginx-storage
ports:
- protocol: TCP
port: 80
targetPort: 80

View file

@ -1,8 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: TLSStore
metadata:
name: default
namespace: traefik
spec:
defaultCertificate:
secretName: wildcard-headshed-it-tls

View file

@ -1,20 +0,0 @@
[
{
"name": "Install Helm",
"function": "RunCommand",
"params": [
"./scripts/helm_check_install.sh"
],
"retryCount": 0,
"shouldAbort": true
},
{
"name": "Install traefik",
"function": "RunCommand",
"params": [
"./scripts/install_traefik.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -1,11 +0,0 @@
[
{
"name": "Install metallb",
"function": "RunCommand",
"params": [
"./scripts/install_metallb.sh"
],
"retryCount": 0,
"shouldAbort": true
}
]

View file

@ -14,6 +14,21 @@ then
install_infctl install_infctl
fi fi
# base.json.example config.json.example
# https://codeberg.org/headshed/infctl-cli/raw/branch/main/base.json.example
# https://codeberg.org/headshed/infctl-cli/raw/branch/main/config.json.example
if [ ! -f "base.json" ]; then
echo "base.json not found in home directory, downloading..."
curl -o "base.json" https://codeberg.org/headshed/infctl-cli/raw/branch/main/base.json.example
fi
if [ ! -f "config.json" ]; then
echo "config.json not found in home directory, downloading..."
curl -o "config.json" https://codeberg.org/headshed/infctl-cli/raw/branch/main/config.json.example
fi

View file

@ -1,15 +0,0 @@
#!/usr/bin/env bash
# check to see if helm is installed
if ! command -v helm &> /dev/null; then
echo "Helm is not installed. Installing it now ..."
# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
if [ $? -ne 0 ]; then
echo "Failed to install Helm."
exit 1
fi
fi
helm version

View file

@ -1,65 +0,0 @@
#!/usr/bin/env bash
source /vagrant/.envrc
# Check if MetalLB is already installed by looking for the controller deployment
if ! kubectl get deployment -n metallb-system controller &>/dev/null; then
echo "Installing MetalLB..."
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/main/config/manifests/metallb-native.yaml
if [ $? -ne 0 ]; then
echo "Fatal: Failed to apply MetalLB manifest." >&2
exit 1
fi
# Wait for MetalLB components to be ready
echo "Waiting for MetalLB components to be ready..."
kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=app=metallb \
--timeout=90s
else
echo "MetalLB is already installed."
fi
# Wait for the webhook service to be ready
echo "Waiting for MetalLB webhook service to be ready..."
kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=component=webhook \
--timeout=90s
# Check if the IPAddressPool already exists
if ! kubectl get ipaddresspool -n metallb-system default &>/dev/null; then
echo "Creating MetalLB IPAddressPool..."
cat <<EOF | kubectl apply -f -
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: default
namespace: metallb-system
spec:
addresses:
- ${METALLB_IP_RANGE}
EOF
else
echo "MetalLB IPAddressPool already exists."
fi
# Check if the L2Advertisement already exists
if ! kubectl get l2advertisement -n metallb-system default &>/dev/null; then
echo "Creating MetalLB L2Advertisement..."
cat <<EOF | kubectl apply -f -
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: default
namespace: metallb-system
spec:
ipAddressPools:
- default
EOF
else
echo "MetalLB L2Advertisement already exists."
fi

View file

@ -1,68 +0,0 @@
#!/usr/bin/env bash
# Exit immediately if a command exits with a non-zero status.
set -e
TMPFILE=$(mktemp)
trap 'rm -f "$TMPFILE"' EXIT
cat > "$TMPFILE" <<EOF
ingressClass:
enabled: true
isDefaultClass: true
ports:
web:
port: 80
websecure:
port: 443
traefik:
port: 9000
api:
dashboard: true
insecure: true
ingressRoute:
dashboard:
enabled: true
ping: true
log:
level: INFO
service:
enabled: true
type: LoadBalancer
annotations: {}
ports:
web:
port: 80
protocol: TCP
targetPort: web
websecure:
port: 443
protocol: TCP
targetPort: websecure
EOF
if helm status traefik --namespace traefik &> /dev/null; then
echo "Traefik is already installed in the 'traefik' namespace. Upgrading..."
helm upgrade traefik traefik/traefik --namespace traefik -f "$TMPFILE"
else
echo "Installing Traefik..."
helm repo add traefik https://traefik.github.io/charts
helm repo update
# Using --create-namespace is good practice, though traefik will always exist.
helm install traefik traefik/traefik --namespace traefik --create-namespace -f "$TMPFILE"
fi
# Apply the TLS store configuration
kubectl apply -f k8s/traefik-tlsstore.yaml
if [ $? -ne 0 ]; then
echo "Failed to apply TLS store configuration."
exit 1
fi
echo
echo "To access the dashboard:"
echo "kubectl port-forward -n traefik \$(kubectl get pods -n traefik -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
echo "Then visit http://localhost:9000/dashboard/ in your browser"