#!/usr/bin/env bash set -Eeuo pipefail # Debug mode: 0 = nur JSON, 1 = Logs auf stderr DEBUG="${DEBUG:-0}" export DEBUG SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # Log-Verzeichnis LOG_DIR="${SCRIPT_DIR}/logs" mkdir -p "${LOG_DIR}" # Temporäre Log-Datei (wird später umbenannt nach Container-Hostname) TEMP_LOG="${LOG_DIR}/install_$$.log" FINAL_LOG="" # Funktion zum Aufräumen bei Exit cleanup_log() { # Wenn FINAL_LOG gesetzt ist, umbenennen if [[ -n "${FINAL_LOG}" && -f "${TEMP_LOG}" ]]; then mv "${TEMP_LOG}" "${FINAL_LOG}" fi } trap cleanup_log EXIT # Alle Ausgaben in Log-Datei umleiten # Bei DEBUG=1: auch auf stderr ausgeben (tee) # Bei DEBUG=0: nur in Datei if [[ "$DEBUG" == "1" ]]; then # Debug-Modus: Ausgabe auf stderr UND in Datei exec > >(tee -a "${TEMP_LOG}") 2>&1 else # Normal-Modus: Nur in Datei, stdout bleibt für JSON frei exec 3>&1 # stdout (fd 3) für JSON reservieren exec > "${TEMP_LOG}" 2>&1 fi source "${SCRIPT_DIR}/libsupabase.sh" setup_traps usage() { cat >&2 <<'EOF' Usage: bash install.sh [options] Core options: --ctid Force CT ID (optional). If omitted, a customer-safe CTID is generated. --cores (default: unlimited) --memory (default: 4096) --swap (default: 512) --disk (default: 50) --bridge (default: vmbr0) --storage (default: local-zfs) --ip (default: dhcp) --vlan VLAN tag for net0 (default: 90; set 0 to disable) --privileged Create privileged CT (default: unprivileged) --apt-proxy Optional: APT proxy (e.g. http://192.168.45.2:3142) for Apt-Cacher NG Domain / n8n options: --base-domain (default: userman.de) -> FQDN becomes sb-.domain --n8n-owner-email (default: admin@) --n8n-owner-pass Optional. If omitted, generated (policy compliant). --debug Enable debug mode (show logs on stderr) --help Show help PostgREST / Supabase options: --postgrest-port PostgREST port (default: 3000) Notes: - This script creates a Debian 12 LXC and provisions Docker + customer stack (Postgres/pgvector + n8n + PostgREST). - PostgREST provides a REST API for PostgreSQL, compatible with Supabase Vector Store node in n8n. - At the end it prints a JSON with credentials and URLs. EOF } # Defaults #APT_PROXY="http://192.168.45.2:3142" DOCKER_REGISTRY_MIRROR="http://192.168.45.2:5000" APT_PROXY="" #DOCKER_REGISTRY_MIRROR="" CTID="" CORES="4" MEMORY="4096" SWAP="512" DISK="50" BRIDGE="vmbr0" STORAGE="local-zfs" IPCFG="dhcp" VLAN="90" UNPRIV="1" BASE_DOMAIN="userman.de" N8N_OWNER_EMAIL="" N8N_OWNER_PASS="" POSTGREST_PORT="3000" # Ollama API settings (hardcoded for local setup) OLLAMA_HOST="192.168.45.3" OLLAMA_PORT="11434" OLLAMA_URL="http://${OLLAMA_HOST}:${OLLAMA_PORT}" # --------------------------- # Arg parsing # --------------------------- while [[ $# -gt 0 ]]; do case "$1" in --ctid) CTID="${2:-}"; shift 2 ;; --apt-proxy) APT_PROXY="${2:-}"; shift 2 ;; --cores) CORES="${2:-}"; shift 2 ;; --memory) MEMORY="${2:-}"; shift 2 ;; --swap) SWAP="${2:-}"; shift 2 ;; --disk) DISK="${2:-}"; shift 2 ;; --bridge) BRIDGE="${2:-}"; shift 2 ;; --storage) STORAGE="${2:-}"; shift 2 ;; --ip) IPCFG="${2:-}"; shift 2 ;; --vlan) VLAN="${2:-}"; shift 2 ;; --privileged) UNPRIV="0"; shift 1 ;; --base-domain) BASE_DOMAIN="${2:-}"; shift 2 ;; --n8n-owner-email) N8N_OWNER_EMAIL="${2:-}"; shift 2 ;; --n8n-owner-pass) N8N_OWNER_PASS="${2:-}"; shift 2 ;; --postgrest-port) POSTGREST_PORT="${2:-}"; shift 2 ;; --debug) DEBUG="1"; export DEBUG; shift 1 ;; --help|-h) usage; exit 0 ;; *) die "Unknown option: $1 (use --help)" ;; esac done # --------------------------- # Validation # --------------------------- [[ "$CORES" =~ ^[0-9]+$ ]] || die "--cores must be integer" [[ "$MEMORY" =~ ^[0-9]+$ ]] || die "--memory must be integer" [[ "$SWAP" =~ ^[0-9]+$ ]] || die "--swap must be integer" [[ "$DISK" =~ ^[0-9]+$ ]] || die "--disk must be integer" [[ "$UNPRIV" == "0" || "$UNPRIV" == "1" ]] || die "internal: UNPRIV invalid" [[ "$VLAN" =~ ^[0-9]+$ ]] || die "--vlan must be integer (0 disables tagging)" [[ -n "$BASE_DOMAIN" ]] || die "--base-domain must not be empty" if [[ "$IPCFG" != "dhcp" ]]; then [[ "$IPCFG" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{1,2}$ ]] || die "--ip must be dhcp or CIDR (e.g. 192.168.45.171/24)" fi if [[ -n "${APT_PROXY}" ]]; then [[ "${APT_PROXY}" =~ ^http://[^/]+:[0-9]+$ ]] || die "--apt-proxy must look like http://IP:PORT (example: http://192.168.45.2:3142)" fi info "Argument-Parsing OK" if [[ -n "${APT_PROXY}" ]]; then info "APT proxy enabled: ${APT_PROXY}" else info "APT proxy disabled" fi # --------------------------- # Preflight Proxmox # --------------------------- need_cmd pct pvesm pveam pvesh grep date awk sed cut tr head pve_storage_exists "$STORAGE" || die "Storage not found: $STORAGE" pve_bridge_exists "$BRIDGE" || die "Bridge not found: $BRIDGE" TEMPLATE="$(pve_template_ensure_debian12 "$STORAGE")" info "Template OK: ${TEMPLATE}" # Hostname / FQDN based on unix time UNIXTS="$(date +%s)" CT_HOSTNAME="sb-${UNIXTS}" FQDN="${CT_HOSTNAME}.${BASE_DOMAIN}" # Log-Datei nach Container-Hostname benennen FINAL_LOG="${LOG_DIR}/${CT_HOSTNAME}.log" # CTID selection if [[ -n "$CTID" ]]; then [[ "$CTID" =~ ^[0-9]+$ ]] || die "--ctid must be integer" if pve_vmid_exists_cluster "$CTID"; then die "Forced CTID=${CTID} already exists in cluster" fi else # Your agreed approach: unix time - 1000000000 (safe until 2038) CTID="$(pve_ctid_from_unixtime "$UNIXTS")" if pve_vmid_exists_cluster "$CTID"; then die "Generated CTID=${CTID} already exists in cluster (unexpected). Try again in 1s." fi fi # n8n owner defaults if [[ -z "$N8N_OWNER_EMAIL" ]]; then N8N_OWNER_EMAIL="admin@${BASE_DOMAIN}" fi if [[ -z "$N8N_OWNER_PASS" ]]; then N8N_OWNER_PASS="$(gen_password_policy)" else # enforce policy early to avoid the UI error you saw password_policy_check "$N8N_OWNER_PASS" || die "--n8n-owner-pass does not meet policy: 8+ chars, 1 number, 1 uppercase" fi info "CTID selected: ${CTID}" info "SCRIPT_DIR=${SCRIPT_DIR}" info "CT_HOSTNAME=${CT_HOSTNAME}" info "FQDN=${FQDN}" info "cores=${CORES} memory=${MEMORY}MB swap=${SWAP}MB disk=${DISK}GB" info "bridge=${BRIDGE} storage=${STORAGE} ip=${IPCFG} vlan=${VLAN} unprivileged=${UNPRIV}" # --------------------------- # Step 5: Create CT # --------------------------- NET0="$(pve_build_net0 "$BRIDGE" "$IPCFG" "$VLAN")" ROOTFS="${STORAGE}:${DISK}" FEATURES="nesting=1,keyctl=1,fuse=1" info "Step 5: Create CT" info "Creating CT ${CTID} (${CT_HOSTNAME}) from ${TEMPLATE}" pct create "${CTID}" "${TEMPLATE}" \ --hostname "${CT_HOSTNAME}" \ --cores "${CORES}" \ --memory "${MEMORY}" \ --swap "${SWAP}" \ --net0 "${NET0}" \ --rootfs "${ROOTFS}" \ --unprivileged "${UNPRIV}" \ --features "${FEATURES}" \ --start 0 \ --onboot yes info "CT created (not started). Next step: start CT + wait for IP" info "Starting CT ${CTID}" pct start "${CTID}" CT_IP="$(pct_wait_for_ip "${CTID}" || true)" [[ -n "${CT_IP}" ]] || die "Could not determine CT IP after start" info "Step 5 OK: LXC erstellt + IP ermittelt" info "CT_HOSTNAME=${CT_HOSTNAME}" info "CT_IP=${CT_IP}" # --------------------------- # Step 6: Provision inside CT (Docker + Locales + Base) # --------------------------- info "Step 6: Provisioning im CT (Docker + Locales + Base)" # Optional: APT proxy (Apt-Cacher NG) if [[ -n "${APT_PROXY}" ]]; then pct_exec "${CTID}" "cat > /etc/apt/apt.conf.d/00aptproxy <<'EOF' Acquire::http::Proxy \"${APT_PROXY}\"; #Acquire::https::Proxy \"DIRECT\"; Acquire::https::Proxy \"${APT_PROXY}\"; EOF" pct_exec "$CTID" "apt-config dump | grep -i proxy || true" fi # Minimal base packages pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y" pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y ca-certificates curl gnupg lsb-release" # Locales (avoid perl warnings + consistent system) pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y" pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y locales ca-certificates curl gnupg lsb-release" pct_exec "${CTID}" "sed -i 's/^# *de_DE.UTF-8 UTF-8/de_DE.UTF-8 UTF-8/; s/^# *en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen || true" pct_exec "${CTID}" "locale-gen >/dev/null || true" pct_exec "${CTID}" "update-locale LANG=de_DE.UTF-8 LC_ALL=de_DE.UTF-8 || true" # Docker official repo (Debian 12 / bookworm) pct_exec "${CTID}" "install -m 0755 -d /etc/apt/keyrings" pct_exec "${CTID}" "curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" pct_exec "${CTID}" "chmod a+r /etc/apt/keyrings/docker.gpg" pct_exec "${CTID}" "echo \"deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \$(. /etc/os-release && echo \$VERSION_CODENAME) stable\" > /etc/apt/sources.list.d/docker.list" pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y" pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin" # Create stack directories pct_exec "${CTID}" "mkdir -p /opt/customer-stack/volumes/postgres/data /opt/customer-stack/volumes/n8n-data /opt/customer-stack/sql" # IMPORTANT: n8n runs as node (uid 1000) => fix permissions pct_exec "${CTID}" "chown -R 1000:1000 /opt/customer-stack/volumes/n8n-data" info "Step 6 OK: Docker + Compose Plugin installiert, Locales gesetzt, Basis-Verzeichnisse erstellt" info "Next: Schritt 7 (finales docker-compose + Secrets + n8n/supabase up + Healthchecks)" # --------------------------- # Step 7: Stack finalisieren + Secrets + Up + Checks # --------------------------- info "Step 7: Stack finalisieren + Secrets + Up + Checks" # Secrets PG_DB="customer" PG_USER="customer" PG_PASSWORD="$(gen_password_policy)" N8N_ENCRYPTION_KEY="$(gen_hex_64)" # External URL is HTTPS via OPNsense reverse proxy (but container internally is http) N8N_PORT="5678" N8N_PROTOCOL="http" N8N_HOST="${CT_IP}" N8N_EDITOR_BASE_URL="https://${FQDN}/" WEBHOOK_URL="https://${FQDN}/" # If you are behind HTTPS reverse proxy, secure cookies can be true. # But until proxy is in place, false avoids login trouble. N8N_SECURE_COOKIE="false" # Generate JWT secret for PostgREST (32 bytes = 256 bit) JWT_SECRET="$(openssl rand -base64 32 | tr -d '\n')" # For proper JWT, we need header.payload.signature format # Let's create proper JWTs JWT_HEADER="$(echo -n '{"alg":"HS256","typ":"JWT"}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" ANON_PAYLOAD="$(echo -n '{"role":"anon","iss":"supabase","iat":1700000000,"exp":2000000000}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" SERVICE_PAYLOAD="$(echo -n '{"role":"service_role","iss":"supabase","iat":1700000000,"exp":2000000000}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" ANON_SIGNATURE="$(echo -n "${JWT_HEADER}.${ANON_PAYLOAD}" | openssl dgst -sha256 -hmac "${JWT_SECRET}" -binary | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" SERVICE_SIGNATURE="$(echo -n "${JWT_HEADER}.${SERVICE_PAYLOAD}" | openssl dgst -sha256 -hmac "${JWT_SECRET}" -binary | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" ANON_KEY="${JWT_HEADER}.${ANON_PAYLOAD}.${ANON_SIGNATURE}" SERVICE_ROLE_KEY="${JWT_HEADER}.${SERVICE_PAYLOAD}.${SERVICE_SIGNATURE}" info "Generated JWT Secret and API Keys for PostgREST" # Write .env into CT pct_push_text "${CTID}" "/opt/customer-stack/.env" "$(cat < query_embedding) AS similarity FROM public.documents d WHERE (filter = '{}' OR d.metadata @> filter) ORDER BY d.embedding <=> query_embedding LIMIT match_count; END; $$; -- Grant permissions for PostgREST roles -- Create roles if they don't exist DO $$ BEGIN IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'anon') THEN CREATE ROLE anon NOLOGIN; END IF; IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'service_role') THEN CREATE ROLE service_role NOLOGIN; END IF; IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'authenticator') THEN CREATE ROLE authenticator NOINHERIT LOGIN PASSWORD 'authenticator_password'; END IF; END $$; -- Grant permissions GRANT USAGE ON SCHEMA public TO anon, service_role; GRANT ALL ON ALL TABLES IN SCHEMA public TO anon, service_role; GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO anon, service_role; GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO anon, service_role; -- Allow authenticator to switch to these roles GRANT anon TO authenticator; GRANT service_role TO authenticator; -- Set default privileges for future tables ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO anon, service_role; ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO anon, service_role; ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT EXECUTE ON FUNCTIONS TO anon, service_role; SQL )" # docker-compose.yml pct_push_text "${CTID}" "/opt/customer-stack/docker-compose.yml" "$(cat <<'YML' services: postgres: image: pgvector/pgvector:pg16 container_name: customer-postgres restart: unless-stopped environment: POSTGRES_DB: ${PG_DB} POSTGRES_USER: ${PG_USER} POSTGRES_PASSWORD: ${PG_PASSWORD} volumes: - ./volumes/postgres/data:/var/lib/postgresql/data - ./sql:/docker-entrypoint-initdb.d:ro healthcheck: test: ["CMD-SHELL", "pg_isready -U ${PG_USER} -d ${PG_DB} || exit 1"] interval: 10s timeout: 5s retries: 20 networks: - customer-net postgrest: image: postgrest/postgrest:latest container_name: customer-postgrest restart: unless-stopped depends_on: postgres: condition: service_healthy ports: - "${POSTGREST_PORT}:3000" environment: PGRST_DB_URI: postgres://${PG_USER}:${PG_PASSWORD}@postgres:5432/${PG_DB} PGRST_DB_SCHEMA: public PGRST_DB_ANON_ROLE: anon PGRST_JWT_SECRET: ${JWT_SECRET} PGRST_DB_USE_LEGACY_GUCS: "false" networks: - customer-net n8n: image: n8nio/n8n:latest container_name: n8n restart: unless-stopped depends_on: postgres: condition: service_healthy postgrest: condition: service_started ports: - "${N8N_PORT}:5678" environment: # --- Web / Cookies / URL --- N8N_PORT: 5678 N8N_PROTOCOL: ${N8N_PROTOCOL} N8N_HOST: ${N8N_HOST} N8N_EDITOR_BASE_URL: ${N8N_EDITOR_BASE_URL} WEBHOOK_URL: ${WEBHOOK_URL} N8N_SECURE_COOKIE: ${N8N_SECURE_COOKIE} # --- Disable telemetry / background calls --- N8N_DIAGNOSTICS_ENABLED: ${N8N_DIAGNOSTICS_ENABLED} N8N_VERSION_NOTIFICATIONS_ENABLED: ${N8N_VERSION_NOTIFICATIONS_ENABLED} N8N_TEMPLATES_ENABLED: ${N8N_TEMPLATES_ENABLED} # --- DB (Postgres) --- DB_TYPE: postgresdb DB_POSTGRESDB_HOST: postgres DB_POSTGRESDB_PORT: 5432 DB_POSTGRESDB_DATABASE: ${PG_DB} DB_POSTGRESDB_USER: ${PG_USER} DB_POSTGRESDB_PASSWORD: ${PG_PASSWORD} # --- Basics --- GENERIC_TIMEZONE: Europe/Berlin TZ: Europe/Berlin N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY} volumes: - ./volumes/n8n-data:/home/node/.n8n networks: - customer-net networks: customer-net: driver: bridge YML )" # Make sure permissions are correct (again, after file writes) pct_exec "${CTID}" "chown -R 1000:1000 /opt/customer-stack/volumes/n8n-data" # Proxy if [[ -n "${APT_PROXY}" ]]; then pct_exec "$CTID" "mkdir -p /etc/docker" pct_exec "$CTID" "cat > /etc/docker/daemon.json </dev/null 2>&1 || true" # Try modern command first (works in current n8n builds); if it fails, we leave setup screen (but you'll see it in logs). pct_exec "${CTID}" "cd /opt/customer-stack && (docker exec -u node n8n n8n user-management:reset --email '${N8N_OWNER_EMAIL}' --password '${N8N_OWNER_PASS}' --firstName 'Admin' --lastName 'Owner' >/dev/null 2>&1 || true)" info "Step 7 OK: Stack deployed" # --------------------------- # Step 8: Setup Owner Account via REST API (fallback) # --------------------------- info "Step 8: Setting up owner account via REST API..." # Wait for n8n to be ready sleep 5 # Try REST API setup (works if user-management:reset didn't work) pct_exec "${CTID}" "curl -sS -X POST 'http://127.0.0.1:5678/rest/owner/setup' \ -H 'Content-Type: application/json' \ -d '{\"email\":\"${N8N_OWNER_EMAIL}\",\"firstName\":\"Admin\",\"lastName\":\"Owner\",\"password\":\"${N8N_OWNER_PASS}\"}' || true" info "Step 8 OK: Owner account setup attempted" # --------------------------- # Step 9: Final URLs and Output # --------------------------- info "Step 9: Generating final output..." # Final URLs N8N_INTERNAL_URL="http://${CT_IP}:5678/" N8N_EXTERNAL_URL="https://${FQDN}" POSTGREST_URL="http://${CT_IP}:${POSTGREST_PORT}" # Supabase URL format for n8n credential (PostgREST acts as Supabase API) # IMPORTANT: n8n runs inside Docker, so it needs the Docker-internal URL! SUPABASE_URL="http://postgrest:3000" SUPABASE_URL_EXTERNAL="http://${CT_IP}:${POSTGREST_PORT}" # Chat URL (webhook URL for the chat trigger - will be available after workflow activation) CHAT_WEBHOOK_URL="https://${FQDN}/webhook/rag-chat-webhook/chat" CHAT_INTERNAL_URL="http://${CT_IP}:5678/webhook/rag-chat-webhook/chat" info "n8n intern: ${N8N_INTERNAL_URL}" info "n8n extern (geplant via OPNsense): ${N8N_EXTERNAL_URL}" info "PostgREST API: ${POSTGREST_URL}" info "Supabase Service Role Key: ${SERVICE_ROLE_KEY}" info "Ollama URL: ${OLLAMA_URL}" info "Chat Webhook URL (extern): ${CHAT_WEBHOOK_URL}" info "Chat Webhook URL (intern): ${CHAT_INTERNAL_URL}" # --------------------------- # Step 10: Setup n8n Credentials + Import Workflow + Activate # --------------------------- info "Step 10: Setting up n8n credentials and importing RAG workflow..." # Use the new robust n8n setup function from libsupabase.sh # Parameters: ctid, email, password, pg_host, pg_port, pg_db, pg_user, pg_pass, ollama_url, ollama_model, embedding_model if n8n_setup_rag_workflow "${CTID}" "${N8N_OWNER_EMAIL}" "${N8N_OWNER_PASS}" \ "postgres" "5432" "${PG_DB}" "${PG_USER}" "${PG_PASSWORD}" \ "${OLLAMA_URL}" "llama3.2:3b" "nomic-embed-text:v1.5"; then info "Step 10 OK: n8n RAG workflow setup completed successfully" else warn "Step 10: n8n workflow setup failed - manual setup may be required" info "Step 10: You can manually import the workflow via n8n UI" fi # --------------------------- # Step 11: Setup NGINX Reverse Proxy in OPNsense # --------------------------- info "Step 11: Setting up NGINX Reverse Proxy in OPNsense..." # Check if setup_nginx_proxy.sh exists if [[ -f "${SCRIPT_DIR}/setup_nginx_proxy.sh" ]]; then # Run the proxy setup script PROXY_RESULT=$(DEBUG="${DEBUG}" bash "${SCRIPT_DIR}/setup_nginx_proxy.sh" \ --ctid "${CTID}" \ --hostname "${CT_HOSTNAME}" \ --fqdn "${FQDN}" \ --backend-ip "${CT_IP}" \ --backend-port "5678" \ 2>&1 || echo '{"success": false, "error": "Proxy setup failed"}') # Check if proxy setup was successful if echo "$PROXY_RESULT" | grep -q '"success": true'; then info "NGINX Reverse Proxy setup successful" else warn "NGINX Reverse Proxy setup may have failed: ${PROXY_RESULT}" fi else warn "setup_nginx_proxy.sh not found, skipping proxy setup" fi info "Step 11 OK: Proxy setup completed" # --------------------------- # Final JSON Output # --------------------------- # Machine-readable JSON output (for your downstream automation) # Kompaktes JSON in einer Zeile für einfaches Parsing # Bei DEBUG=0: JSON auf fd 3 (ursprüngliches stdout) ausgeben # Bei DEBUG=1: JSON normal auf stdout (geht auch ins Log) JSON_OUTPUT="{\"ctid\":${CTID},\"hostname\":\"${CT_HOSTNAME}\",\"fqdn\":\"${FQDN}\",\"ip\":\"${CT_IP}\",\"vlan\":${VLAN},\"urls\":{\"n8n_internal\":\"${N8N_INTERNAL_URL}\",\"n8n_external\":\"${N8N_EXTERNAL_URL}\",\"postgrest\":\"${POSTGREST_URL}\",\"chat_webhook\":\"${CHAT_WEBHOOK_URL}\",\"chat_internal\":\"${CHAT_INTERNAL_URL}\"},\"postgres\":{\"host\":\"postgres\",\"port\":5432,\"db\":\"${PG_DB}\",\"user\":\"${PG_USER}\",\"password\":\"${PG_PASSWORD}\"},\"supabase\":{\"url\":\"${SUPABASE_URL}\",\"url_external\":\"${SUPABASE_URL_EXTERNAL}\",\"anon_key\":\"${ANON_KEY}\",\"service_role_key\":\"${SERVICE_ROLE_KEY}\",\"jwt_secret\":\"${JWT_SECRET}\"},\"ollama\":{\"url\":\"${OLLAMA_URL}\"},\"n8n\":{\"encryption_key\":\"${N8N_ENCRYPTION_KEY}\",\"owner_email\":\"${N8N_OWNER_EMAIL}\",\"owner_password\":\"${N8N_OWNER_PASS}\",\"secure_cookie\":${N8N_SECURE_COOKIE}},\"log_file\":\"${FINAL_LOG}\"}" if [[ "$DEBUG" == "1" ]]; then # Debug-Modus: JSON normal ausgeben (formatiert für Lesbarkeit) echo "$JSON_OUTPUT" | python3 -m json.tool 2>/dev/null || echo "$JSON_OUTPUT" else # Normal-Modus: JSON auf ursprüngliches stdout (fd 3) - kompakt echo "$JSON_OUTPUT" >&3 fi