2026-01-09 17:12:49 +01:00
|
|
|
#!/usr/bin/env bash
|
|
|
|
|
set -Eeuo pipefail
|
|
|
|
|
|
2026-01-18 17:03:16 +01:00
|
|
|
# Debug mode: 0 = nur JSON, 1 = Logs auf stderr
|
|
|
|
|
DEBUG="${DEBUG:-0}"
|
|
|
|
|
export DEBUG
|
|
|
|
|
|
2026-01-09 20:09:29 +01:00
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
2026-01-18 17:03:16 +01:00
|
|
|
|
|
|
|
|
# Log-Verzeichnis
|
|
|
|
|
LOG_DIR="${SCRIPT_DIR}/logs"
|
|
|
|
|
mkdir -p "${LOG_DIR}"
|
|
|
|
|
|
|
|
|
|
# Temporäre Log-Datei (wird später umbenannt nach Container-Hostname)
|
|
|
|
|
TEMP_LOG="${LOG_DIR}/install_$$.log"
|
|
|
|
|
FINAL_LOG=""
|
|
|
|
|
|
|
|
|
|
# Funktion zum Aufräumen bei Exit
|
|
|
|
|
cleanup_log() {
|
|
|
|
|
# Wenn FINAL_LOG gesetzt ist, umbenennen
|
|
|
|
|
if [[ -n "${FINAL_LOG}" && -f "${TEMP_LOG}" ]]; then
|
|
|
|
|
mv "${TEMP_LOG}" "${FINAL_LOG}"
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
trap cleanup_log EXIT
|
|
|
|
|
|
|
|
|
|
# Alle Ausgaben in Log-Datei umleiten
|
|
|
|
|
# Bei DEBUG=1: auch auf stderr ausgeben (tee)
|
|
|
|
|
# Bei DEBUG=0: nur in Datei
|
|
|
|
|
if [[ "$DEBUG" == "1" ]]; then
|
|
|
|
|
# Debug-Modus: Ausgabe auf stderr UND in Datei
|
|
|
|
|
exec > >(tee -a "${TEMP_LOG}") 2>&1
|
|
|
|
|
else
|
|
|
|
|
# Normal-Modus: Nur in Datei, stdout bleibt für JSON frei
|
|
|
|
|
exec 3>&1 # stdout (fd 3) für JSON reservieren
|
|
|
|
|
exec > "${TEMP_LOG}" 2>&1
|
|
|
|
|
fi
|
|
|
|
|
|
2026-01-09 18:54:01 +01:00
|
|
|
source "${SCRIPT_DIR}/libsupabase.sh"
|
2026-01-09 17:12:49 +01:00
|
|
|
setup_traps
|
|
|
|
|
|
2026-01-09 18:54:01 +01:00
|
|
|
usage() {
|
2026-01-09 20:09:29 +01:00
|
|
|
cat >&2 <<'EOF'
|
2026-01-09 18:54:01 +01:00
|
|
|
Usage:
|
|
|
|
|
bash install.sh [options]
|
|
|
|
|
|
|
|
|
|
Core options:
|
2026-01-11 17:54:12 +01:00
|
|
|
--ctid <id> Force CT ID (optional). If omitted, a customer-safe CTID is generated.
|
2026-01-14 21:24:49 +01:00
|
|
|
--cores <n> (default: unlimited)
|
2026-01-09 18:54:01 +01:00
|
|
|
--memory <mb> (default: 4096)
|
|
|
|
|
--swap <mb> (default: 512)
|
|
|
|
|
--disk <gb> (default: 50)
|
|
|
|
|
--bridge <vmbrX> (default: vmbr0)
|
|
|
|
|
--storage <storage> (default: local-zfs)
|
|
|
|
|
--ip <dhcp|CIDR> (default: dhcp)
|
2026-01-11 17:54:12 +01:00
|
|
|
--vlan <id> VLAN tag for net0 (default: 90; set 0 to disable)
|
2026-01-09 18:54:01 +01:00
|
|
|
--privileged Create privileged CT (default: unprivileged)
|
2026-01-23 14:15:16 +01:00
|
|
|
--apt-proxy <url> Optional: APT proxy (e.g. http://192.168.45.2:3142) for Apt-Cacher NG
|
2026-01-11 17:54:12 +01:00
|
|
|
|
|
|
|
|
Domain / n8n options:
|
|
|
|
|
--base-domain <domain> (default: userman.de) -> FQDN becomes sb-<unix>.domain
|
|
|
|
|
--n8n-owner-email <email> (default: admin@<base-domain>)
|
|
|
|
|
--n8n-owner-pass <pass> Optional. If omitted, generated (policy compliant).
|
2026-01-23 16:09:45 +01:00
|
|
|
--workflow-file <path> Path to n8n workflow JSON file (default: RAGKI-BotPGVector.json)
|
|
|
|
|
--ollama-model <model> Ollama chat model (default: ministral-3:3b)
|
|
|
|
|
--embedding-model <model> Ollama embedding model (default: nomic-embed-text:latest)
|
2026-01-18 17:03:16 +01:00
|
|
|
--debug Enable debug mode (show logs on stderr)
|
2026-01-09 18:54:01 +01:00
|
|
|
--help Show help
|
2026-01-11 17:54:12 +01:00
|
|
|
|
2026-01-23 14:15:16 +01:00
|
|
|
PostgREST / Supabase options:
|
|
|
|
|
--postgrest-port <port> PostgREST port (default: 3000)
|
|
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
Notes:
|
2026-01-23 14:15:16 +01:00
|
|
|
- This script creates a Debian 12 LXC and provisions Docker + customer stack (Postgres/pgvector + n8n + PostgREST).
|
|
|
|
|
- PostgREST provides a REST API for PostgreSQL, compatible with Supabase Vector Store node in n8n.
|
2026-01-11 17:54:12 +01:00
|
|
|
- At the end it prints a JSON with credentials and URLs.
|
2026-01-09 20:09:29 +01:00
|
|
|
EOF
|
2026-01-09 18:54:01 +01:00
|
|
|
}
|
|
|
|
|
|
2026-01-09 20:09:29 +01:00
|
|
|
# Defaults
|
2026-01-13 21:54:07 +01:00
|
|
|
#APT_PROXY="http://192.168.45.2:3142"
|
2026-01-12 07:43:24 +01:00
|
|
|
DOCKER_REGISTRY_MIRROR="http://192.168.45.2:5000"
|
2026-01-13 21:54:07 +01:00
|
|
|
APT_PROXY=""
|
|
|
|
|
#DOCKER_REGISTRY_MIRROR=""
|
2026-01-09 20:09:29 +01:00
|
|
|
CTID=""
|
2026-01-11 22:40:08 +01:00
|
|
|
CORES="4"
|
2026-01-09 20:09:29 +01:00
|
|
|
MEMORY="4096"
|
|
|
|
|
SWAP="512"
|
|
|
|
|
DISK="50"
|
|
|
|
|
BRIDGE="vmbr0"
|
|
|
|
|
STORAGE="local-zfs"
|
|
|
|
|
IPCFG="dhcp"
|
2026-01-11 15:03:55 +01:00
|
|
|
VLAN="90"
|
2026-01-09 20:09:29 +01:00
|
|
|
UNPRIV="1"
|
|
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
BASE_DOMAIN="userman.de"
|
|
|
|
|
N8N_OWNER_EMAIL=""
|
|
|
|
|
N8N_OWNER_PASS=""
|
2026-01-23 14:15:16 +01:00
|
|
|
POSTGREST_PORT="3000"
|
|
|
|
|
|
2026-01-23 16:09:45 +01:00
|
|
|
# Workflow file (default: RAGKI-BotPGVector.json in script directory)
|
|
|
|
|
WORKFLOW_FILE="${SCRIPT_DIR}/RAGKI-BotPGVector.json"
|
|
|
|
|
|
2026-01-23 14:15:16 +01:00
|
|
|
# Ollama API settings (hardcoded for local setup)
|
|
|
|
|
OLLAMA_HOST="192.168.45.3"
|
|
|
|
|
OLLAMA_PORT="11434"
|
|
|
|
|
OLLAMA_URL="http://${OLLAMA_HOST}:${OLLAMA_PORT}"
|
2026-01-11 17:54:12 +01:00
|
|
|
|
2026-01-23 16:09:45 +01:00
|
|
|
# Ollama models (can be overridden via CLI)
|
|
|
|
|
OLLAMA_MODEL="ministral-3:3b"
|
|
|
|
|
EMBEDDING_MODEL="nomic-embed-text:latest"
|
|
|
|
|
|
2026-01-11 12:36:12 +01:00
|
|
|
# ---------------------------
|
|
|
|
|
# Arg parsing
|
|
|
|
|
# ---------------------------
|
2026-01-09 17:12:49 +01:00
|
|
|
while [[ $# -gt 0 ]]; do
|
|
|
|
|
case "$1" in
|
2026-01-11 17:54:12 +01:00
|
|
|
--ctid) CTID="${2:-}"; shift 2 ;;
|
2026-01-11 22:40:08 +01:00
|
|
|
--apt-proxy) APT_PROXY="${2:-}"; shift 2 ;;
|
2026-01-11 17:54:12 +01:00
|
|
|
--cores) CORES="${2:-}"; shift 2 ;;
|
|
|
|
|
--memory) MEMORY="${2:-}"; shift 2 ;;
|
|
|
|
|
--swap) SWAP="${2:-}"; shift 2 ;;
|
|
|
|
|
--disk) DISK="${2:-}"; shift 2 ;;
|
|
|
|
|
--bridge) BRIDGE="${2:-}"; shift 2 ;;
|
|
|
|
|
--storage) STORAGE="${2:-}"; shift 2 ;;
|
|
|
|
|
--ip) IPCFG="${2:-}"; shift 2 ;;
|
|
|
|
|
--vlan) VLAN="${2:-}"; shift 2 ;;
|
2026-01-09 18:54:01 +01:00
|
|
|
--privileged) UNPRIV="0"; shift 1 ;;
|
2026-01-11 17:54:12 +01:00
|
|
|
--base-domain) BASE_DOMAIN="${2:-}"; shift 2 ;;
|
|
|
|
|
--n8n-owner-email) N8N_OWNER_EMAIL="${2:-}"; shift 2 ;;
|
|
|
|
|
--n8n-owner-pass) N8N_OWNER_PASS="${2:-}"; shift 2 ;;
|
2026-01-23 16:09:45 +01:00
|
|
|
--workflow-file) WORKFLOW_FILE="${2:-}"; shift 2 ;;
|
|
|
|
|
--ollama-model) OLLAMA_MODEL="${2:-}"; shift 2 ;;
|
|
|
|
|
--embedding-model) EMBEDDING_MODEL="${2:-}"; shift 2 ;;
|
2026-01-23 14:15:16 +01:00
|
|
|
--postgrest-port) POSTGREST_PORT="${2:-}"; shift 2 ;;
|
2026-01-18 17:03:16 +01:00
|
|
|
--debug) DEBUG="1"; export DEBUG; shift 1 ;;
|
2026-01-11 17:54:12 +01:00
|
|
|
--help|-h) usage; exit 0 ;;
|
2026-01-09 20:09:29 +01:00
|
|
|
*) die "Unknown option: $1 (use --help)" ;;
|
2026-01-09 17:12:49 +01:00
|
|
|
esac
|
|
|
|
|
done
|
|
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# ---------------------------
|
|
|
|
|
# Validation
|
|
|
|
|
# ---------------------------
|
|
|
|
|
[[ "$CORES" =~ ^[0-9]+$ ]] || die "--cores must be integer"
|
2026-01-11 14:23:33 +01:00
|
|
|
[[ "$MEMORY" =~ ^[0-9]+$ ]] || die "--memory must be integer"
|
2026-01-11 17:54:12 +01:00
|
|
|
[[ "$SWAP" =~ ^[0-9]+$ ]] || die "--swap must be integer"
|
|
|
|
|
[[ "$DISK" =~ ^[0-9]+$ ]] || die "--disk must be integer"
|
2026-01-09 20:09:29 +01:00
|
|
|
[[ "$UNPRIV" == "0" || "$UNPRIV" == "1" ]] || die "internal: UNPRIV invalid"
|
2026-01-11 17:54:12 +01:00
|
|
|
[[ "$VLAN" =~ ^[0-9]+$ ]] || die "--vlan must be integer (0 disables tagging)"
|
|
|
|
|
[[ -n "$BASE_DOMAIN" ]] || die "--base-domain must not be empty"
|
2026-01-09 18:54:01 +01:00
|
|
|
|
|
|
|
|
if [[ "$IPCFG" != "dhcp" ]]; then
|
2026-01-11 14:23:33 +01:00
|
|
|
[[ "$IPCFG" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{1,2}$ ]] || die "--ip must be dhcp or CIDR (e.g. 192.168.45.171/24)"
|
2026-01-09 18:54:01 +01:00
|
|
|
fi
|
|
|
|
|
|
2026-01-11 22:40:08 +01:00
|
|
|
if [[ -n "${APT_PROXY}" ]]; then
|
|
|
|
|
[[ "${APT_PROXY}" =~ ^http://[^/]+:[0-9]+$ ]] || die "--apt-proxy must look like http://IP:PORT (example: http://192.168.45.2:3142)"
|
|
|
|
|
fi
|
|
|
|
|
|
2026-01-23 16:09:45 +01:00
|
|
|
# Validate workflow file exists
|
|
|
|
|
if [[ ! -f "${WORKFLOW_FILE}" ]]; then
|
|
|
|
|
die "Workflow file not found: ${WORKFLOW_FILE}"
|
|
|
|
|
fi
|
2026-01-11 22:40:08 +01:00
|
|
|
|
2026-01-09 17:12:49 +01:00
|
|
|
info "Argument-Parsing OK"
|
2026-01-23 16:09:45 +01:00
|
|
|
info "Workflow file: ${WORKFLOW_FILE}"
|
|
|
|
|
info "Ollama model: ${OLLAMA_MODEL}"
|
|
|
|
|
info "Embedding model: ${EMBEDDING_MODEL}"
|
2026-01-09 17:12:49 +01:00
|
|
|
|
2026-01-11 22:40:08 +01:00
|
|
|
if [[ -n "${APT_PROXY}" ]]; then
|
|
|
|
|
info "APT proxy enabled: ${APT_PROXY}"
|
|
|
|
|
else
|
|
|
|
|
info "APT proxy disabled"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
|
2026-01-11 12:36:12 +01:00
|
|
|
# ---------------------------
|
|
|
|
|
# Preflight Proxmox
|
|
|
|
|
# ---------------------------
|
2026-01-11 17:54:12 +01:00
|
|
|
need_cmd pct pvesm pveam pvesh grep date awk sed cut tr head
|
2026-01-09 20:09:29 +01:00
|
|
|
|
|
|
|
|
pve_storage_exists "$STORAGE" || die "Storage not found: $STORAGE"
|
2026-01-11 14:23:33 +01:00
|
|
|
pve_bridge_exists "$BRIDGE" || die "Bridge not found: $BRIDGE"
|
2026-01-09 18:54:01 +01:00
|
|
|
|
|
|
|
|
TEMPLATE="$(pve_template_ensure_debian12 "$STORAGE")"
|
|
|
|
|
info "Template OK: ${TEMPLATE}"
|
|
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# Hostname / FQDN based on unix time
|
|
|
|
|
UNIXTS="$(date +%s)"
|
|
|
|
|
CT_HOSTNAME="sb-${UNIXTS}"
|
|
|
|
|
FQDN="${CT_HOSTNAME}.${BASE_DOMAIN}"
|
2026-01-09 20:09:29 +01:00
|
|
|
|
2026-01-18 17:03:16 +01:00
|
|
|
# Log-Datei nach Container-Hostname benennen
|
|
|
|
|
FINAL_LOG="${LOG_DIR}/${CT_HOSTNAME}.log"
|
|
|
|
|
|
2026-01-09 20:09:29 +01:00
|
|
|
# CTID selection
|
2026-01-09 18:54:01 +01:00
|
|
|
if [[ -n "$CTID" ]]; then
|
2026-01-11 14:23:33 +01:00
|
|
|
[[ "$CTID" =~ ^[0-9]+$ ]] || die "--ctid must be integer"
|
2026-01-11 17:54:12 +01:00
|
|
|
if pve_vmid_exists_cluster "$CTID"; then
|
|
|
|
|
die "Forced CTID=${CTID} already exists in cluster"
|
|
|
|
|
fi
|
2026-01-09 18:54:01 +01:00
|
|
|
else
|
2026-01-11 17:54:12 +01:00
|
|
|
# Your agreed approach: unix time - 1000000000 (safe until 2038)
|
|
|
|
|
CTID="$(pve_ctid_from_unixtime "$UNIXTS")"
|
|
|
|
|
if pve_vmid_exists_cluster "$CTID"; then
|
|
|
|
|
die "Generated CTID=${CTID} already exists in cluster (unexpected). Try again in 1s."
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# n8n owner defaults
|
|
|
|
|
if [[ -z "$N8N_OWNER_EMAIL" ]]; then
|
|
|
|
|
N8N_OWNER_EMAIL="admin@${BASE_DOMAIN}"
|
|
|
|
|
fi
|
|
|
|
|
if [[ -z "$N8N_OWNER_PASS" ]]; then
|
|
|
|
|
N8N_OWNER_PASS="$(gen_password_policy)"
|
|
|
|
|
else
|
|
|
|
|
# enforce policy early to avoid the UI error you saw
|
|
|
|
|
password_policy_check "$N8N_OWNER_PASS" || die "--n8n-owner-pass does not meet policy: 8+ chars, 1 number, 1 uppercase"
|
2026-01-09 18:54:01 +01:00
|
|
|
fi
|
|
|
|
|
|
2026-01-09 20:09:29 +01:00
|
|
|
info "CTID selected: ${CTID}"
|
2026-01-11 16:30:31 +01:00
|
|
|
info "SCRIPT_DIR=${SCRIPT_DIR}"
|
|
|
|
|
info "CT_HOSTNAME=${CT_HOSTNAME}"
|
|
|
|
|
info "FQDN=${FQDN}"
|
2026-01-09 18:54:01 +01:00
|
|
|
info "cores=${CORES} memory=${MEMORY}MB swap=${SWAP}MB disk=${DISK}GB"
|
2026-01-11 17:54:12 +01:00
|
|
|
info "bridge=${BRIDGE} storage=${STORAGE} ip=${IPCFG} vlan=${VLAN} unprivileged=${UNPRIV}"
|
2026-01-09 18:54:01 +01:00
|
|
|
|
2026-01-11 12:36:12 +01:00
|
|
|
# ---------------------------
|
2026-01-09 18:54:01 +01:00
|
|
|
# Step 5: Create CT
|
2026-01-11 12:36:12 +01:00
|
|
|
# ---------------------------
|
2026-01-09 22:58:06 +01:00
|
|
|
NET0="$(pve_build_net0 "$BRIDGE" "$IPCFG" "$VLAN")"
|
2026-01-09 20:09:29 +01:00
|
|
|
ROOTFS="${STORAGE}:${DISK}"
|
|
|
|
|
FEATURES="nesting=1,keyctl=1,fuse=1"
|
|
|
|
|
|
2026-01-09 18:54:01 +01:00
|
|
|
info "Step 5: Create CT"
|
2026-01-09 20:09:29 +01:00
|
|
|
info "Creating CT ${CTID} (${CT_HOSTNAME}) from ${TEMPLATE}"
|
|
|
|
|
pct create "${CTID}" "${TEMPLATE}" \
|
|
|
|
|
--hostname "${CT_HOSTNAME}" \
|
|
|
|
|
--cores "${CORES}" \
|
|
|
|
|
--memory "${MEMORY}" \
|
|
|
|
|
--swap "${SWAP}" \
|
|
|
|
|
--net0 "${NET0}" \
|
|
|
|
|
--rootfs "${ROOTFS}" \
|
|
|
|
|
--unprivileged "${UNPRIV}" \
|
|
|
|
|
--features "${FEATURES}" \
|
2026-01-14 21:24:49 +01:00
|
|
|
--start 0 \
|
|
|
|
|
--onboot yes
|
2026-01-09 18:54:01 +01:00
|
|
|
|
|
|
|
|
info "CT created (not started). Next step: start CT + wait for IP"
|
2026-01-09 20:09:29 +01:00
|
|
|
info "Starting CT ${CTID}"
|
|
|
|
|
pct start "${CTID}"
|
|
|
|
|
|
|
|
|
|
CT_IP="$(pct_wait_for_ip "${CTID}" || true)"
|
|
|
|
|
[[ -n "${CT_IP}" ]] || die "Could not determine CT IP after start"
|
2026-01-09 18:54:01 +01:00
|
|
|
|
|
|
|
|
info "Step 5 OK: LXC erstellt + IP ermittelt"
|
2026-01-11 16:30:31 +01:00
|
|
|
info "CT_HOSTNAME=${CT_HOSTNAME}"
|
2026-01-09 18:54:01 +01:00
|
|
|
info "CT_IP=${CT_IP}"
|
|
|
|
|
|
2026-01-11 12:36:12 +01:00
|
|
|
# ---------------------------
|
|
|
|
|
# Step 6: Provision inside CT (Docker + Locales + Base)
|
|
|
|
|
# ---------------------------
|
2026-01-11 11:50:35 +01:00
|
|
|
info "Step 6: Provisioning im CT (Docker + Locales + Base)"
|
2026-01-09 17:12:49 +01:00
|
|
|
|
2026-01-11 22:40:08 +01:00
|
|
|
# Optional: APT proxy (Apt-Cacher NG)
|
|
|
|
|
if [[ -n "${APT_PROXY}" ]]; then
|
|
|
|
|
pct_exec "${CTID}" "cat > /etc/apt/apt.conf.d/00aptproxy <<'EOF'
|
|
|
|
|
Acquire::http::Proxy \"${APT_PROXY}\";
|
2026-01-12 07:43:24 +01:00
|
|
|
#Acquire::https::Proxy \"DIRECT\";
|
|
|
|
|
Acquire::https::Proxy \"${APT_PROXY}\";
|
2026-01-11 22:40:08 +01:00
|
|
|
EOF"
|
2026-01-12 07:43:24 +01:00
|
|
|
pct_exec "$CTID" "apt-config dump | grep -i proxy || true"
|
2026-01-11 22:40:08 +01:00
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Minimal base packages
|
|
|
|
|
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y"
|
|
|
|
|
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y ca-certificates curl gnupg lsb-release"
|
|
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# Locales (avoid perl warnings + consistent system)
|
2026-01-09 20:09:29 +01:00
|
|
|
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y"
|
2026-01-11 17:54:12 +01:00
|
|
|
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y locales ca-certificates curl gnupg lsb-release"
|
|
|
|
|
pct_exec "${CTID}" "sed -i 's/^# *de_DE.UTF-8 UTF-8/de_DE.UTF-8 UTF-8/; s/^# *en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen || true"
|
2026-01-11 16:30:31 +01:00
|
|
|
pct_exec "${CTID}" "locale-gen >/dev/null || true"
|
2026-01-11 14:23:33 +01:00
|
|
|
pct_exec "${CTID}" "update-locale LANG=de_DE.UTF-8 LC_ALL=de_DE.UTF-8 || true"
|
2026-01-11 12:36:12 +01:00
|
|
|
|
|
|
|
|
# Docker official repo (Debian 12 / bookworm)
|
2026-01-09 20:09:29 +01:00
|
|
|
pct_exec "${CTID}" "install -m 0755 -d /etc/apt/keyrings"
|
|
|
|
|
pct_exec "${CTID}" "curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg"
|
|
|
|
|
pct_exec "${CTID}" "chmod a+r /etc/apt/keyrings/docker.gpg"
|
|
|
|
|
pct_exec "${CTID}" "echo \"deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \$(. /etc/os-release && echo \$VERSION_CODENAME) stable\" > /etc/apt/sources.list.d/docker.list"
|
|
|
|
|
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y"
|
|
|
|
|
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin"
|
2026-01-09 17:12:49 +01:00
|
|
|
|
2026-01-11 16:30:31 +01:00
|
|
|
# Create stack directories
|
2026-01-11 17:54:12 +01:00
|
|
|
pct_exec "${CTID}" "mkdir -p /opt/customer-stack/volumes/postgres/data /opt/customer-stack/volumes/n8n-data /opt/customer-stack/sql"
|
|
|
|
|
# IMPORTANT: n8n runs as node (uid 1000) => fix permissions
|
|
|
|
|
pct_exec "${CTID}" "chown -R 1000:1000 /opt/customer-stack/volumes/n8n-data"
|
2026-01-09 17:12:49 +01:00
|
|
|
|
2026-01-12 07:43:24 +01:00
|
|
|
|
|
|
|
|
|
2026-01-09 20:09:29 +01:00
|
|
|
info "Step 6 OK: Docker + Compose Plugin installiert, Locales gesetzt, Basis-Verzeichnisse erstellt"
|
2026-01-11 15:03:55 +01:00
|
|
|
info "Next: Schritt 7 (finales docker-compose + Secrets + n8n/supabase up + Healthchecks)"
|
2026-01-09 21:28:21 +01:00
|
|
|
|
2026-01-11 12:36:12 +01:00
|
|
|
# ---------------------------
|
|
|
|
|
# Step 7: Stack finalisieren + Secrets + Up + Checks
|
|
|
|
|
# ---------------------------
|
2026-01-09 21:28:21 +01:00
|
|
|
info "Step 7: Stack finalisieren + Secrets + Up + Checks"
|
|
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# Secrets
|
2026-01-11 11:50:35 +01:00
|
|
|
PG_DB="customer"
|
|
|
|
|
PG_USER="customer"
|
2026-01-11 17:54:12 +01:00
|
|
|
PG_PASSWORD="$(gen_password_policy)"
|
|
|
|
|
N8N_ENCRYPTION_KEY="$(gen_hex_64)"
|
2026-01-11 16:30:31 +01:00
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# External URL is HTTPS via OPNsense reverse proxy (but container internally is http)
|
2026-01-11 11:50:35 +01:00
|
|
|
N8N_PORT="5678"
|
2026-01-11 12:36:12 +01:00
|
|
|
N8N_PROTOCOL="http"
|
2026-01-09 21:28:21 +01:00
|
|
|
N8N_HOST="${CT_IP}"
|
2026-01-11 11:50:35 +01:00
|
|
|
N8N_EDITOR_BASE_URL="https://${FQDN}/"
|
|
|
|
|
WEBHOOK_URL="https://${FQDN}/"
|
|
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# If you are behind HTTPS reverse proxy, secure cookies can be true.
|
|
|
|
|
# But until proxy is in place, false avoids login trouble.
|
|
|
|
|
N8N_SECURE_COOKIE="false"
|
2026-01-11 12:36:12 +01:00
|
|
|
|
2026-01-23 14:15:16 +01:00
|
|
|
# Generate JWT secret for PostgREST (32 bytes = 256 bit)
|
|
|
|
|
JWT_SECRET="$(openssl rand -base64 32 | tr -d '\n')"
|
|
|
|
|
|
|
|
|
|
# For proper JWT, we need header.payload.signature format
|
|
|
|
|
# Let's create proper JWTs
|
|
|
|
|
JWT_HEADER="$(echo -n '{"alg":"HS256","typ":"JWT"}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')"
|
|
|
|
|
ANON_PAYLOAD="$(echo -n '{"role":"anon","iss":"supabase","iat":1700000000,"exp":2000000000}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')"
|
|
|
|
|
SERVICE_PAYLOAD="$(echo -n '{"role":"service_role","iss":"supabase","iat":1700000000,"exp":2000000000}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')"
|
|
|
|
|
|
|
|
|
|
ANON_SIGNATURE="$(echo -n "${JWT_HEADER}.${ANON_PAYLOAD}" | openssl dgst -sha256 -hmac "${JWT_SECRET}" -binary | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')"
|
|
|
|
|
SERVICE_SIGNATURE="$(echo -n "${JWT_HEADER}.${SERVICE_PAYLOAD}" | openssl dgst -sha256 -hmac "${JWT_SECRET}" -binary | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')"
|
|
|
|
|
|
|
|
|
|
ANON_KEY="${JWT_HEADER}.${ANON_PAYLOAD}.${ANON_SIGNATURE}"
|
|
|
|
|
SERVICE_ROLE_KEY="${JWT_HEADER}.${SERVICE_PAYLOAD}.${SERVICE_SIGNATURE}"
|
|
|
|
|
|
|
|
|
|
info "Generated JWT Secret and API Keys for PostgREST"
|
|
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# Write .env into CT
|
|
|
|
|
pct_push_text "${CTID}" "/opt/customer-stack/.env" "$(cat <<EOF
|
2026-01-11 11:50:35 +01:00
|
|
|
PG_DB=${PG_DB}
|
|
|
|
|
PG_USER=${PG_USER}
|
|
|
|
|
PG_PASSWORD=${PG_PASSWORD}
|
|
|
|
|
|
|
|
|
|
N8N_PORT=${N8N_PORT}
|
|
|
|
|
N8N_PROTOCOL=${N8N_PROTOCOL}
|
|
|
|
|
N8N_HOST=${N8N_HOST}
|
|
|
|
|
N8N_EDITOR_BASE_URL=${N8N_EDITOR_BASE_URL}
|
|
|
|
|
WEBHOOK_URL=${WEBHOOK_URL}
|
|
|
|
|
N8N_SECURE_COOKIE=${N8N_SECURE_COOKIE}
|
|
|
|
|
|
|
|
|
|
N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY}
|
|
|
|
|
|
2026-01-11 16:30:31 +01:00
|
|
|
# Telemetrie/Background Calls aus
|
2026-01-11 17:54:12 +01:00
|
|
|
N8N_DIAGNOSTICS_ENABLED=false
|
|
|
|
|
N8N_VERSION_NOTIFICATIONS_ENABLED=false
|
|
|
|
|
N8N_TEMPLATES_ENABLED=false
|
2026-01-23 14:15:16 +01:00
|
|
|
|
|
|
|
|
# PostgREST / Supabase API
|
|
|
|
|
POSTGREST_PORT=${POSTGREST_PORT}
|
|
|
|
|
JWT_SECRET=${JWT_SECRET}
|
|
|
|
|
ANON_KEY=${ANON_KEY}
|
|
|
|
|
SERVICE_ROLE_KEY=${SERVICE_ROLE_KEY}
|
2026-01-11 17:54:12 +01:00
|
|
|
EOF
|
|
|
|
|
)"
|
|
|
|
|
|
2026-01-23 14:15:16 +01:00
|
|
|
# init sql for pgvector + Supabase Vector Store schema
|
2026-01-11 17:54:12 +01:00
|
|
|
pct_push_text "${CTID}" "/opt/customer-stack/sql/init_pgvector.sql" "$(cat <<'SQL'
|
2026-01-23 14:15:16 +01:00
|
|
|
-- Enable extensions
|
2026-01-11 17:54:12 +01:00
|
|
|
CREATE EXTENSION IF NOT EXISTS vector;
|
|
|
|
|
CREATE EXTENSION IF NOT EXISTS pg_trgm;
|
2026-01-23 14:15:16 +01:00
|
|
|
|
|
|
|
|
-- Create schema for API
|
|
|
|
|
CREATE SCHEMA IF NOT EXISTS api;
|
|
|
|
|
|
|
|
|
|
-- Create documents table for Vector Store (n8n PGVector Store compatible)
|
|
|
|
|
CREATE TABLE IF NOT EXISTS public.documents (
|
|
|
|
|
id BIGSERIAL PRIMARY KEY,
|
|
|
|
|
text TEXT,
|
|
|
|
|
metadata JSONB,
|
|
|
|
|
embedding VECTOR(768) -- nomic-embed-text uses 768 dimensions
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
-- Create index for vector similarity search
|
|
|
|
|
CREATE INDEX IF NOT EXISTS documents_embedding_idx ON public.documents
|
|
|
|
|
USING ivfflat (embedding vector_cosine_ops)
|
|
|
|
|
WITH (lists = 100);
|
|
|
|
|
|
|
|
|
|
-- Create the match_documents function for similarity search (Supabase/LangChain compatible)
|
|
|
|
|
CREATE OR REPLACE FUNCTION public.match_documents(
|
|
|
|
|
query_embedding VECTOR(768),
|
|
|
|
|
match_count INT DEFAULT 5,
|
|
|
|
|
filter JSONB DEFAULT '{}'
|
|
|
|
|
)
|
|
|
|
|
RETURNS TABLE (
|
|
|
|
|
id BIGINT,
|
|
|
|
|
content TEXT,
|
|
|
|
|
metadata JSONB,
|
|
|
|
|
similarity FLOAT
|
|
|
|
|
)
|
|
|
|
|
LANGUAGE plpgsql
|
|
|
|
|
AS $$
|
|
|
|
|
BEGIN
|
|
|
|
|
RETURN QUERY
|
|
|
|
|
SELECT
|
|
|
|
|
d.id,
|
|
|
|
|
d.content,
|
|
|
|
|
d.metadata,
|
|
|
|
|
1 - (d.embedding <=> query_embedding) AS similarity
|
|
|
|
|
FROM public.documents d
|
|
|
|
|
WHERE (filter = '{}' OR d.metadata @> filter)
|
|
|
|
|
ORDER BY d.embedding <=> query_embedding
|
|
|
|
|
LIMIT match_count;
|
|
|
|
|
END;
|
|
|
|
|
$$;
|
|
|
|
|
|
|
|
|
|
-- Grant permissions for PostgREST roles
|
|
|
|
|
-- Create roles if they don't exist
|
|
|
|
|
DO $$
|
|
|
|
|
BEGIN
|
|
|
|
|
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'anon') THEN
|
|
|
|
|
CREATE ROLE anon NOLOGIN;
|
|
|
|
|
END IF;
|
|
|
|
|
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'service_role') THEN
|
|
|
|
|
CREATE ROLE service_role NOLOGIN;
|
|
|
|
|
END IF;
|
|
|
|
|
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'authenticator') THEN
|
|
|
|
|
CREATE ROLE authenticator NOINHERIT LOGIN PASSWORD 'authenticator_password';
|
|
|
|
|
END IF;
|
|
|
|
|
END
|
|
|
|
|
$$;
|
|
|
|
|
|
|
|
|
|
-- Grant permissions
|
|
|
|
|
GRANT USAGE ON SCHEMA public TO anon, service_role;
|
|
|
|
|
GRANT ALL ON ALL TABLES IN SCHEMA public TO anon, service_role;
|
|
|
|
|
GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO anon, service_role;
|
|
|
|
|
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO anon, service_role;
|
|
|
|
|
|
|
|
|
|
-- Allow authenticator to switch to these roles
|
|
|
|
|
GRANT anon TO authenticator;
|
|
|
|
|
GRANT service_role TO authenticator;
|
|
|
|
|
|
|
|
|
|
-- Set default privileges for future tables
|
|
|
|
|
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO anon, service_role;
|
|
|
|
|
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO anon, service_role;
|
|
|
|
|
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT EXECUTE ON FUNCTIONS TO anon, service_role;
|
2026-01-11 17:54:12 +01:00
|
|
|
SQL
|
|
|
|
|
)"
|
2026-01-11 11:50:35 +01:00
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# docker-compose.yml
|
|
|
|
|
pct_push_text "${CTID}" "/opt/customer-stack/docker-compose.yml" "$(cat <<'YML'
|
2026-01-11 11:50:35 +01:00
|
|
|
services:
|
|
|
|
|
postgres:
|
|
|
|
|
image: pgvector/pgvector:pg16
|
|
|
|
|
container_name: customer-postgres
|
|
|
|
|
restart: unless-stopped
|
|
|
|
|
environment:
|
2026-01-11 17:54:12 +01:00
|
|
|
POSTGRES_DB: ${PG_DB}
|
|
|
|
|
POSTGRES_USER: ${PG_USER}
|
|
|
|
|
POSTGRES_PASSWORD: ${PG_PASSWORD}
|
2026-01-11 11:50:35 +01:00
|
|
|
volumes:
|
|
|
|
|
- ./volumes/postgres/data:/var/lib/postgresql/data
|
|
|
|
|
- ./sql:/docker-entrypoint-initdb.d:ro
|
|
|
|
|
healthcheck:
|
2026-01-11 17:54:12 +01:00
|
|
|
test: ["CMD-SHELL", "pg_isready -U ${PG_USER} -d ${PG_DB} || exit 1"]
|
2026-01-11 11:50:35 +01:00
|
|
|
interval: 10s
|
|
|
|
|
timeout: 5s
|
|
|
|
|
retries: 20
|
|
|
|
|
networks:
|
|
|
|
|
- customer-net
|
|
|
|
|
|
2026-01-23 14:15:16 +01:00
|
|
|
postgrest:
|
|
|
|
|
image: postgrest/postgrest:latest
|
|
|
|
|
container_name: customer-postgrest
|
|
|
|
|
restart: unless-stopped
|
|
|
|
|
depends_on:
|
|
|
|
|
postgres:
|
|
|
|
|
condition: service_healthy
|
|
|
|
|
ports:
|
|
|
|
|
- "${POSTGREST_PORT}:3000"
|
|
|
|
|
environment:
|
|
|
|
|
PGRST_DB_URI: postgres://${PG_USER}:${PG_PASSWORD}@postgres:5432/${PG_DB}
|
|
|
|
|
PGRST_DB_SCHEMA: public
|
|
|
|
|
PGRST_DB_ANON_ROLE: anon
|
|
|
|
|
PGRST_JWT_SECRET: ${JWT_SECRET}
|
|
|
|
|
PGRST_DB_USE_LEGACY_GUCS: "false"
|
|
|
|
|
networks:
|
|
|
|
|
- customer-net
|
|
|
|
|
|
2026-01-11 11:50:35 +01:00
|
|
|
n8n:
|
|
|
|
|
image: n8nio/n8n:latest
|
|
|
|
|
container_name: n8n
|
|
|
|
|
restart: unless-stopped
|
|
|
|
|
depends_on:
|
|
|
|
|
postgres:
|
|
|
|
|
condition: service_healthy
|
2026-01-23 14:15:16 +01:00
|
|
|
postgrest:
|
|
|
|
|
condition: service_started
|
2026-01-11 11:50:35 +01:00
|
|
|
ports:
|
2026-01-11 17:54:12 +01:00
|
|
|
- "${N8N_PORT}:5678"
|
2026-01-11 11:50:35 +01:00
|
|
|
environment:
|
2026-01-11 17:54:12 +01:00
|
|
|
# --- Web / Cookies / URL ---
|
2026-01-11 11:50:35 +01:00
|
|
|
N8N_PORT: 5678
|
2026-01-11 17:54:12 +01:00
|
|
|
N8N_PROTOCOL: ${N8N_PROTOCOL}
|
|
|
|
|
N8N_HOST: ${N8N_HOST}
|
|
|
|
|
N8N_EDITOR_BASE_URL: ${N8N_EDITOR_BASE_URL}
|
|
|
|
|
WEBHOOK_URL: ${WEBHOOK_URL}
|
|
|
|
|
N8N_SECURE_COOKIE: ${N8N_SECURE_COOKIE}
|
|
|
|
|
|
|
|
|
|
# --- Disable telemetry / background calls ---
|
|
|
|
|
N8N_DIAGNOSTICS_ENABLED: ${N8N_DIAGNOSTICS_ENABLED}
|
|
|
|
|
N8N_VERSION_NOTIFICATIONS_ENABLED: ${N8N_VERSION_NOTIFICATIONS_ENABLED}
|
|
|
|
|
N8N_TEMPLATES_ENABLED: ${N8N_TEMPLATES_ENABLED}
|
|
|
|
|
|
|
|
|
|
# --- DB (Postgres) ---
|
2026-01-11 11:50:35 +01:00
|
|
|
DB_TYPE: postgresdb
|
|
|
|
|
DB_POSTGRESDB_HOST: postgres
|
|
|
|
|
DB_POSTGRESDB_PORT: 5432
|
2026-01-11 17:54:12 +01:00
|
|
|
DB_POSTGRESDB_DATABASE: ${PG_DB}
|
|
|
|
|
DB_POSTGRESDB_USER: ${PG_USER}
|
|
|
|
|
DB_POSTGRESDB_PASSWORD: ${PG_PASSWORD}
|
2026-01-11 11:50:35 +01:00
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# --- Basics ---
|
2026-01-11 11:50:35 +01:00
|
|
|
GENERIC_TIMEZONE: Europe/Berlin
|
|
|
|
|
TZ: Europe/Berlin
|
2026-01-11 12:36:12 +01:00
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY}
|
2026-01-11 11:50:35 +01:00
|
|
|
|
|
|
|
|
volumes:
|
|
|
|
|
- ./volumes/n8n-data:/home/node/.n8n
|
|
|
|
|
networks:
|
|
|
|
|
- customer-net
|
|
|
|
|
|
|
|
|
|
networks:
|
|
|
|
|
customer-net:
|
|
|
|
|
driver: bridge
|
2026-01-11 17:54:12 +01:00
|
|
|
YML
|
|
|
|
|
)"
|
2026-01-09 21:28:21 +01:00
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# Make sure permissions are correct (again, after file writes)
|
2026-01-11 14:23:33 +01:00
|
|
|
pct_exec "${CTID}" "chown -R 1000:1000 /opt/customer-stack/volumes/n8n-data"
|
|
|
|
|
|
2026-01-12 07:43:24 +01:00
|
|
|
# Proxy
|
|
|
|
|
if [[ -n "${APT_PROXY}" ]]; then
|
|
|
|
|
pct_exec "$CTID" "mkdir -p /etc/docker"
|
|
|
|
|
|
|
|
|
|
pct_exec "$CTID" "cat > /etc/docker/daemon.json <<EOF
|
|
|
|
|
{
|
|
|
|
|
\"registry-mirrors\": [\"${DOCKER_REGISTRY_MIRROR}\"]
|
|
|
|
|
}
|
|
|
|
|
EOF"
|
|
|
|
|
|
|
|
|
|
pct_exec "$CTID" "systemctl restart docker"
|
|
|
|
|
pct_exec "$CTID" "systemctl is-active docker"
|
|
|
|
|
pct_exec "$CTID" "docker info | grep -A2 -i 'Registry Mirrors'"
|
|
|
|
|
fi
|
|
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# Pull + up
|
2026-01-11 11:50:35 +01:00
|
|
|
pct_exec "${CTID}" "cd /opt/customer-stack && docker compose pull"
|
|
|
|
|
pct_exec "${CTID}" "cd /opt/customer-stack && docker compose up -d"
|
|
|
|
|
pct_exec "${CTID}" "cd /opt/customer-stack && docker compose ps"
|
2026-01-09 21:28:21 +01:00
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
# --- Owner account creation (robust way) ---
|
|
|
|
|
# n8n shows the setup screen if no user exists.
|
|
|
|
|
# We create the owner via CLI inside the container.
|
|
|
|
|
pct_exec "${CTID}" "cd /opt/customer-stack && docker exec -u node n8n n8n --help >/dev/null 2>&1 || true"
|
|
|
|
|
|
2026-01-23 14:15:16 +01:00
|
|
|
# Try modern command first (works in current n8n builds); if it fails, we leave setup screen (but you'll see it in logs).
|
2026-01-11 17:54:12 +01:00
|
|
|
pct_exec "${CTID}" "cd /opt/customer-stack && (docker exec -u node n8n n8n user-management:reset --email '${N8N_OWNER_EMAIL}' --password '${N8N_OWNER_PASS}' --firstName 'Admin' --lastName 'Owner' >/dev/null 2>&1 || true)"
|
|
|
|
|
|
2026-01-23 14:15:16 +01:00
|
|
|
info "Step 7 OK: Stack deployed"
|
|
|
|
|
|
|
|
|
|
# ---------------------------
|
|
|
|
|
# Step 8: Setup Owner Account via REST API (fallback)
|
|
|
|
|
# ---------------------------
|
|
|
|
|
info "Step 8: Setting up owner account via REST API..."
|
|
|
|
|
|
|
|
|
|
# Wait for n8n to be ready
|
|
|
|
|
sleep 5
|
|
|
|
|
|
|
|
|
|
# Try REST API setup (works if user-management:reset didn't work)
|
|
|
|
|
pct_exec "${CTID}" "curl -sS -X POST 'http://127.0.0.1:5678/rest/owner/setup' \
|
|
|
|
|
-H 'Content-Type: application/json' \
|
|
|
|
|
-d '{\"email\":\"${N8N_OWNER_EMAIL}\",\"firstName\":\"Admin\",\"lastName\":\"Owner\",\"password\":\"${N8N_OWNER_PASS}\"}' || true"
|
|
|
|
|
|
|
|
|
|
info "Step 8 OK: Owner account setup attempted"
|
|
|
|
|
|
|
|
|
|
# ---------------------------
|
|
|
|
|
# Step 9: Final URLs and Output
|
|
|
|
|
# ---------------------------
|
|
|
|
|
info "Step 9: Generating final output..."
|
|
|
|
|
|
|
|
|
|
# Final URLs
|
2026-01-11 17:54:12 +01:00
|
|
|
N8N_INTERNAL_URL="http://${CT_IP}:5678/"
|
|
|
|
|
N8N_EXTERNAL_URL="https://${FQDN}"
|
2026-01-23 14:15:16 +01:00
|
|
|
POSTGREST_URL="http://${CT_IP}:${POSTGREST_PORT}"
|
|
|
|
|
# Supabase URL format for n8n credential (PostgREST acts as Supabase API)
|
|
|
|
|
# IMPORTANT: n8n runs inside Docker, so it needs the Docker-internal URL!
|
|
|
|
|
SUPABASE_URL="http://postgrest:3000"
|
|
|
|
|
SUPABASE_URL_EXTERNAL="http://${CT_IP}:${POSTGREST_PORT}"
|
|
|
|
|
|
|
|
|
|
# Chat URL (webhook URL for the chat trigger - will be available after workflow activation)
|
|
|
|
|
CHAT_WEBHOOK_URL="https://${FQDN}/webhook/rag-chat-webhook/chat"
|
|
|
|
|
CHAT_INTERNAL_URL="http://${CT_IP}:5678/webhook/rag-chat-webhook/chat"
|
2026-01-11 17:54:12 +01:00
|
|
|
|
2026-01-24 22:31:26 +01:00
|
|
|
# Upload Form URL (for document upload)
|
|
|
|
|
UPLOAD_FORM_URL="https://${FQDN}/form/rag-upload-form"
|
|
|
|
|
UPLOAD_FORM_INTERNAL_URL="http://${CT_IP}:5678/form/rag-upload-form"
|
|
|
|
|
|
2026-01-11 17:54:12 +01:00
|
|
|
info "n8n intern: ${N8N_INTERNAL_URL}"
|
|
|
|
|
info "n8n extern (geplant via OPNsense): ${N8N_EXTERNAL_URL}"
|
2026-01-23 14:15:16 +01:00
|
|
|
info "PostgREST API: ${POSTGREST_URL}"
|
|
|
|
|
info "Supabase Service Role Key: ${SERVICE_ROLE_KEY}"
|
|
|
|
|
info "Ollama URL: ${OLLAMA_URL}"
|
|
|
|
|
info "Chat Webhook URL (extern): ${CHAT_WEBHOOK_URL}"
|
|
|
|
|
info "Chat Webhook URL (intern): ${CHAT_INTERNAL_URL}"
|
|
|
|
|
|
|
|
|
|
# ---------------------------
|
|
|
|
|
# Step 10: Setup n8n Credentials + Import Workflow + Activate
|
|
|
|
|
# ---------------------------
|
|
|
|
|
info "Step 10: Setting up n8n credentials and importing RAG workflow..."
|
|
|
|
|
|
|
|
|
|
# Use the new robust n8n setup function from libsupabase.sh
|
2026-01-23 16:09:45 +01:00
|
|
|
# Parameters: ctid, email, password, pg_host, pg_port, pg_db, pg_user, pg_pass, ollama_url, ollama_model, embedding_model, workflow_file
|
2026-01-23 14:15:16 +01:00
|
|
|
if n8n_setup_rag_workflow "${CTID}" "${N8N_OWNER_EMAIL}" "${N8N_OWNER_PASS}" \
|
|
|
|
|
"postgres" "5432" "${PG_DB}" "${PG_USER}" "${PG_PASSWORD}" \
|
2026-01-23 16:09:45 +01:00
|
|
|
"${OLLAMA_URL}" "${OLLAMA_MODEL}" "${EMBEDDING_MODEL}" "${WORKFLOW_FILE}"; then
|
2026-01-23 14:15:16 +01:00
|
|
|
info "Step 10 OK: n8n RAG workflow setup completed successfully"
|
|
|
|
|
else
|
|
|
|
|
warn "Step 10: n8n workflow setup failed - manual setup may be required"
|
|
|
|
|
info "Step 10: You can manually import the workflow via n8n UI"
|
|
|
|
|
fi
|
2026-01-11 12:36:12 +01:00
|
|
|
|
2026-01-24 22:31:26 +01:00
|
|
|
# ---------------------------
|
|
|
|
|
# Step 10a: Setup Workflow Auto-Reload on LXC Restart
|
|
|
|
|
# ---------------------------
|
|
|
|
|
info "Step 10a: Setting up workflow auto-reload on LXC restart..."
|
|
|
|
|
|
|
|
|
|
# Copy workflow template to container for auto-reload
|
|
|
|
|
info "Copying workflow template to container..."
|
|
|
|
|
if [[ -f "${WORKFLOW_FILE}" ]]; then
|
|
|
|
|
# Read workflow file content
|
|
|
|
|
WORKFLOW_CONTENT=$(cat "${WORKFLOW_FILE}")
|
|
|
|
|
pct_push_text "${CTID}" "/opt/customer-stack/workflow-template.json" "${WORKFLOW_CONTENT}"
|
|
|
|
|
info "Workflow template saved to /opt/customer-stack/workflow-template.json"
|
|
|
|
|
else
|
|
|
|
|
warn "Workflow file not found: ${WORKFLOW_FILE}"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Copy reload script to container
|
|
|
|
|
info "Installing workflow reload script..."
|
|
|
|
|
RELOAD_SCRIPT_CONTENT=$(cat "${SCRIPT_DIR}/templates/reload-workflow.sh")
|
|
|
|
|
pct_push_text "${CTID}" "/opt/customer-stack/reload-workflow.sh" "${RELOAD_SCRIPT_CONTENT}"
|
|
|
|
|
pct_exec "${CTID}" "chmod +x /opt/customer-stack/reload-workflow.sh"
|
|
|
|
|
info "Reload script installed"
|
|
|
|
|
|
|
|
|
|
# Copy systemd service file to container
|
|
|
|
|
info "Installing systemd service for workflow auto-reload..."
|
|
|
|
|
SYSTEMD_SERVICE_CONTENT=$(cat "${SCRIPT_DIR}/templates/n8n-workflow-reload.service")
|
|
|
|
|
pct_push_text "${CTID}" "/etc/systemd/system/n8n-workflow-reload.service" "${SYSTEMD_SERVICE_CONTENT}"
|
|
|
|
|
|
|
|
|
|
# Enable and start systemd service
|
|
|
|
|
pct_exec "${CTID}" "systemctl daemon-reload"
|
|
|
|
|
pct_exec "${CTID}" "systemctl enable n8n-workflow-reload.service"
|
|
|
|
|
info "Systemd service enabled"
|
|
|
|
|
|
|
|
|
|
info "Step 10a OK: Workflow auto-reload configured"
|
|
|
|
|
info "The workflow will be automatically reloaded on every LXC restart"
|
|
|
|
|
|
2026-01-23 14:15:16 +01:00
|
|
|
# ---------------------------
|
|
|
|
|
# Step 11: Setup NGINX Reverse Proxy in OPNsense
|
|
|
|
|
# ---------------------------
|
|
|
|
|
info "Step 11: Setting up NGINX Reverse Proxy in OPNsense..."
|
|
|
|
|
|
|
|
|
|
# Check if setup_nginx_proxy.sh exists
|
|
|
|
|
if [[ -f "${SCRIPT_DIR}/setup_nginx_proxy.sh" ]]; then
|
|
|
|
|
# Run the proxy setup script
|
|
|
|
|
PROXY_RESULT=$(DEBUG="${DEBUG}" bash "${SCRIPT_DIR}/setup_nginx_proxy.sh" \
|
|
|
|
|
--ctid "${CTID}" \
|
|
|
|
|
--hostname "${CT_HOSTNAME}" \
|
|
|
|
|
--fqdn "${FQDN}" \
|
|
|
|
|
--backend-ip "${CT_IP}" \
|
|
|
|
|
--backend-port "5678" \
|
|
|
|
|
2>&1 || echo '{"success": false, "error": "Proxy setup failed"}')
|
|
|
|
|
|
|
|
|
|
# Check if proxy setup was successful
|
|
|
|
|
if echo "$PROXY_RESULT" | grep -q '"success": true'; then
|
|
|
|
|
info "NGINX Reverse Proxy setup successful"
|
|
|
|
|
else
|
|
|
|
|
warn "NGINX Reverse Proxy setup may have failed: ${PROXY_RESULT}"
|
|
|
|
|
fi
|
|
|
|
|
else
|
|
|
|
|
warn "setup_nginx_proxy.sh not found, skipping proxy setup"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
info "Step 11 OK: Proxy setup completed"
|
|
|
|
|
|
|
|
|
|
# ---------------------------
|
|
|
|
|
# Final JSON Output
|
|
|
|
|
# ---------------------------
|
2026-01-11 17:54:12 +01:00
|
|
|
# Machine-readable JSON output (for your downstream automation)
|
2026-01-18 17:03:16 +01:00
|
|
|
# Kompaktes JSON in einer Zeile für einfaches Parsing
|
|
|
|
|
# Bei DEBUG=0: JSON auf fd 3 (ursprüngliches stdout) ausgeben
|
|
|
|
|
# Bei DEBUG=1: JSON normal auf stdout (geht auch ins Log)
|
2026-01-24 22:31:26 +01:00
|
|
|
JSON_OUTPUT="{\"ctid\":${CTID},\"hostname\":\"${CT_HOSTNAME}\",\"fqdn\":\"${FQDN}\",\"ip\":\"${CT_IP}\",\"vlan\":${VLAN},\"urls\":{\"n8n_internal\":\"${N8N_INTERNAL_URL}\",\"n8n_external\":\"${N8N_EXTERNAL_URL}\",\"postgrest\":\"${POSTGREST_URL}\",\"chat_webhook\":\"${CHAT_WEBHOOK_URL}\",\"chat_internal\":\"${CHAT_INTERNAL_URL}\",\"upload_form\":\"${UPLOAD_FORM_URL}\",\"upload_form_internal\":\"${UPLOAD_FORM_INTERNAL_URL}\"},\"postgres\":{\"host\":\"postgres\",\"port\":5432,\"db\":\"${PG_DB}\",\"user\":\"${PG_USER}\",\"password\":\"${PG_PASSWORD}\"},\"supabase\":{\"url\":\"${SUPABASE_URL}\",\"url_external\":\"${SUPABASE_URL_EXTERNAL}\",\"anon_key\":\"${ANON_KEY}\",\"service_role_key\":\"${SERVICE_ROLE_KEY}\",\"jwt_secret\":\"${JWT_SECRET}\"},\"ollama\":{\"url\":\"${OLLAMA_URL}\",\"model\":\"${OLLAMA_MODEL}\",\"embedding_model\":\"${EMBEDDING_MODEL}\"},\"n8n\":{\"encryption_key\":\"${N8N_ENCRYPTION_KEY}\",\"owner_email\":\"${N8N_OWNER_EMAIL}\",\"owner_password\":\"${N8N_OWNER_PASS}\",\"secure_cookie\":${N8N_SECURE_COOKIE}},\"log_file\":\"${FINAL_LOG}\"}"
|
2026-01-18 17:03:16 +01:00
|
|
|
|
|
|
|
|
if [[ "$DEBUG" == "1" ]]; then
|
|
|
|
|
# Debug-Modus: JSON normal ausgeben (formatiert für Lesbarkeit)
|
|
|
|
|
echo "$JSON_OUTPUT" | python3 -m json.tool 2>/dev/null || echo "$JSON_OUTPUT"
|
|
|
|
|
else
|
|
|
|
|
# Normal-Modus: JSON auf ursprüngliches stdout (fd 3) - kompakt
|
|
|
|
|
echo "$JSON_OUTPUT" >&3
|
|
|
|
|
fi
|
2026-01-24 22:31:26 +01:00
|
|
|
|
|
|
|
|
# ---------------------------
|
|
|
|
|
# Save credentials to file
|
|
|
|
|
# ---------------------------
|
|
|
|
|
CREDENTIALS_DIR="${SCRIPT_DIR}/credentials"
|
|
|
|
|
mkdir -p "${CREDENTIALS_DIR}"
|
|
|
|
|
CREDENTIALS_FILE="${CREDENTIALS_DIR}/${CT_HOSTNAME}.json"
|
|
|
|
|
|
|
|
|
|
# Save formatted credentials
|
|
|
|
|
echo "$JSON_OUTPUT" | python3 -m json.tool > "${CREDENTIALS_FILE}" 2>/dev/null || echo "$JSON_OUTPUT" > "${CREDENTIALS_FILE}"
|
|
|
|
|
|
|
|
|
|
info "Credentials saved to: ${CREDENTIALS_FILE}"
|
|
|
|
|
info "To update credentials later, use: bash update_credentials.sh --ctid ${CTID} --credentials-file ${CREDENTIALS_FILE}"
|