diff --git a/RAGKI-BotPGVector.json b/RAGKI-BotPGVector.json new file mode 100644 index 0000000..6299c57 --- /dev/null +++ b/RAGKI-BotPGVector.json @@ -0,0 +1,323 @@ +{ + "name": "RAG KI-Bot (PGVector)", + "nodes": [ + { + "parameters": { + "public": true, + "initialMessages": "Hallo! 👋\nMein Name ist Clara (Customer Learning & Answering Reference Assistant)\nWie kann ich behilflich sein?", + "options": { + "inputPlaceholder": "Hier die Frage eingeben...", + "showWelcomeScreen": true, + "subtitle": "Die Antworten der AI können fehlerhaft sein.", + "title": "Support-Chat 👋", + "customCss": ":root {\n /* Colors */\n --chat--color-primary: #e74266;\n --chat--color-primary-shade-50: #db4061;\n --chat--color-primary-shade-100: #cf3c5c;\n --chat--color-secondary: #20b69e;\n --chat--color-secondary-shade-50: #1ca08a;\n --chat--color-white: #ffffff;\n --chat--color-light: #f2f4f8;\n --chat--color-light-shade-50: #e6e9f1;\n --chat--color-light-shade-100: #c2c5cc;\n --chat--color-medium: #d2d4d9;\n --chat--color-dark: #101330;\n --chat--color-disabled: #d2d4d9;\n --chat--color-typing: #404040;\n\n /* Base Layout */\n --chat--spacing: 1rem;\n --chat--border-radius: 0.25rem;\n --chat--transition-duration: 0.15s;\n --chat--font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen-Sans, Ubuntu, Cantarell, 'Helvetica Neue', sans-serif;\n\n /* Window Dimensions */\n --chat--window--width: 400px;\n --chat--window--height: 600px;\n --chat--window--bottom: var(--chat--spacing);\n --chat--window--right: var(--chat--spacing);\n --chat--window--z-index: 9999;\n --chat--window--border: 1px solid var(--chat--color-light-shade-50);\n --chat--window--border-radius: var(--chat--border-radius);\n --chat--window--margin-bottom: var(--chat--spacing);\n\n /* Header Styles */\n --chat--header-height: auto;\n --chat--header--padding: var(--chat--spacing);\n --chat--header--background: var(--chat--color-dark);\n --chat--header--color: var(--chat--color-light);\n --chat--header--border-top: none;\n --chat--header--border-bottom: none;\n --chat--header--border-left: none;\n --chat--header--border-right: none;\n --chat--heading--font-size: 2em;\n --chat--subtitle--font-size: inherit;\n --chat--subtitle--line-height: 1.8;\n\n /* Message Styles */\n --chat--message--font-size: 1rem;\n --chat--message--padding: var(--chat--spacing);\n --chat--message--border-radius: var(--chat--border-radius);\n --chat--message-line-height: 1.5;\n --chat--message--margin-bottom: calc(var(--chat--spacing) * 1);\n --chat--message--bot--background: var(--chat--color-white);\n --chat--message--bot--color: var(--chat--color-dark);\n --chat--message--bot--border: none;\n --chat--message--user--background: var(--chat--color-secondary);\n --chat--message--user--color: var(--chat--color-white);\n --chat--message--user--border: none;\n --chat--message--pre--background: rgba(0, 0, 0, 0.05);\n --chat--messages-list--padding: var(--chat--spacing);\n\n /* Toggle Button */\n --chat--toggle--size: 64px;\n --chat--toggle--width: var(--chat--toggle--size);\n --chat--toggle--height: var(--chat--toggle--size);\n --chat--toggle--border-radius: 50%;\n --chat--toggle--background: var(--chat--color-primary);\n --chat--toggle--hover--background: var(--chat--color-primary-shade-50);\n --chat--toggle--active--background: var(--chat--color-primary-shade-100);\n --chat--toggle--color: var(--chat--color-white);\n\n /* Input Area */\n --chat--textarea--height: 50px;\n --chat--textarea--max-height: 30rem;\n --chat--input--font-size: inherit;\n --chat--input--border: 0;\n --chat--input--border-radius: 0;\n --chat--input--padding: 0.8rem;\n --chat--input--background: var(--chat--color-white);\n --chat--input--text-color: initial;\n --chat--input--line-height: 1.5;\n --chat--input--placeholder--font-size: var(--chat--input--font-size);\n --chat--input--border-active: 0;\n --chat--input--left--panel--width: 2rem;\n\n /* Button Styles */\n --chat--button--color: var(--chat--color-light);\n --chat--button--background: var(--chat--color-primary);\n --chat--button--padding: calc(var(--chat--spacing) * 1 / 2) var(--chat--spacing);\n --chat--button--border-radius: var(--chat--border-radius);\n --chat--button--hover--color: var(--chat--color-light);\n --chat--button--hover--background: var(--chat--color-primary-shade-50);\n --chat--close--button--color-hover: var(--chat--color-primary);\n\n /* Send and File Buttons */\n --chat--input--send--button--background: var(--chat--color-white);\n --chat--input--send--button--color: var(--chat--color-secondary);\n --chat--input--send--button--background-hover: var(--chat--color-primary-shade-50);\n --chat--input--send--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--input--file--button--background: var(--chat--color-white);\n --chat--input--file--button--color: var(--chat--color-secondary);\n --chat--input--file--button--background-hover: var(--chat--input--file--button--background);\n --chat--input--file--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--files-spacing: 0.25rem;\n\n /* Body and Footer */\n --chat--body--background: var(--chat--color-light);\n --chat--footer--background: var(--chat--color-light);\n --chat--footer--color: var(--chat--color-dark);\n}\n\n\n/* You can override any class styles, too. Right-click inspect in Chat UI to find class to override. */\n.chat-message {\n\tmax-width: 50%;\n}", + "responseMode": "lastNode" + } + }, + "type": "@n8n/n8n-nodes-langchain.chatTrigger", + "typeVersion": 1.3, + "position": [ + 0, + 0 + ], + "id": "chat-trigger-001", + "name": "When chat message received", + "webhookId": "rag-chat-webhook", + "notesInFlow": true, + "notes": "Chat URL: /webhook/rag-chat-webhook/chat" + }, + { + "parameters": { + "promptType": "define", + "text": "={{ $json.chatInput }}\nAntworte ausschliesslich auf Deutsch und nutze zuerst die Wissensdatenbank.", + "options": {} + }, + "type": "@n8n/n8n-nodes-langchain.agent", + "typeVersion": 2.2, + "position": [ + 208, + 0 + ], + "id": "ai-agent-001", + "name": "AI Agent" + }, + { + "parameters": { + "model": "ministral-3:3b", + "options": {} + }, + "type": "@n8n/n8n-nodes-langchain.lmChatOllama", + "typeVersion": 1, + "position": [ + 64, + 208 + ], + "id": "ollama-chat-001", + "name": "Ollama Chat Model", + "credentials": { + "ollamaApi": { + "id": "ZmMYzkrY4zMFYJ1J", + "name": "Ollama (local)" + } + } + }, + { + "parameters": {}, + "type": "@n8n/n8n-nodes-langchain.memoryBufferWindow", + "typeVersion": 1.3, + "position": [ + 224, + 208 + ], + "id": "memory-001", + "name": "Simple Memory" + }, + { + "parameters": { + "mode": "retrieve-as-tool", + "toolName": "knowledge_base", + "toolDescription": "Verwende dieses Tool für Infos die der Benutzer fragt. Sucht in der Wissensdatenbank nach relevanten Dokumenten.", + "tableName": "documents", + "options": {} + }, + "type": "@n8n/n8n-nodes-langchain.vectorStorePGVector", + "typeVersion": 1, + "position": [ + 432, + 128 + ], + "id": "pgvector-retrieve-001", + "name": "PGVector Store", + "credentials": { + "postgres": { + "id": "1VVtY5ei866suQdA", + "name": "PostgreSQL (local)" + } + } + }, + { + "parameters": { + "model": "nomic-embed-text:latest" + }, + "type": "@n8n/n8n-nodes-langchain.embeddingsOllama", + "typeVersion": 1, + "position": [ + 416, + 288 + ], + "id": "embeddings-retrieve-001", + "name": "Embeddings Ollama", + "credentials": { + "ollamaApi": { + "id": "ZmMYzkrY4zMFYJ1J", + "name": "Ollama (local)" + } + } + }, + { + "parameters": { + "formTitle": "Dokument hochladen", + "formDescription": "Laden Sie ein PDF-Dokument hoch, um es in die Wissensdatenbank aufzunehmen.", + "formFields": { + "values": [ + { + "fieldLabel": "Dokument", + "fieldType": "file", + "acceptFileTypes": ".pdf" + } + ] + }, + "options": {} + }, + "type": "n8n-nodes-base.formTrigger", + "typeVersion": 2.3, + "position": [ + 768, + 0 + ], + "id": "form-trigger-001", + "name": "On form submission", + "webhookId": "rag-upload-form" + }, + { + "parameters": { + "operation": "pdf", + "binaryPropertyName": "Dokument", + "options": {} + }, + "type": "n8n-nodes-base.extractFromFile", + "typeVersion": 1, + "position": [ + 976, + 0 + ], + "id": "extract-file-001", + "name": "Extract from File" + }, + { + "parameters": { + "mode": "insert", + "tableName": "documents", + "options": {} + }, + "type": "@n8n/n8n-nodes-langchain.vectorStorePGVector", + "typeVersion": 1, + "position": [ + 1184, + 0 + ], + "id": "pgvector-insert-001", + "name": "PGVector Store Insert", + "credentials": { + "postgres": { + "id": "1VVtY5ei866suQdA", + "name": "PostgreSQL (local)" + } + } + }, + { + "parameters": { + "model": "nomic-embed-text:latest" + }, + "type": "@n8n/n8n-nodes-langchain.embeddingsOllama", + "typeVersion": 1, + "position": [ + 1168, + 240 + ], + "id": "embeddings-insert-001", + "name": "Embeddings Ollama1", + "credentials": { + "ollamaApi": { + "id": "ZmMYzkrY4zMFYJ1J", + "name": "Ollama (local)" + } + } + }, + { + "parameters": { + "options": {} + }, + "type": "@n8n/n8n-nodes-langchain.documentDefaultDataLoader", + "typeVersion": 1.1, + "position": [ + 1392, + 240 + ], + "id": "data-loader-001", + "name": "Default Data Loader" + } + ], + "pinData": {}, + "connections": { + "When chat message received": { + "main": [ + [ + { + "node": "AI Agent", + "type": "main", + "index": 0 + } + ] + ] + }, + "Ollama Chat Model": { + "ai_languageModel": [ + [ + { + "node": "AI Agent", + "type": "ai_languageModel", + "index": 0 + } + ] + ] + }, + "Simple Memory": { + "ai_memory": [ + [ + { + "node": "AI Agent", + "type": "ai_memory", + "index": 0 + } + ] + ] + }, + "PGVector Store": { + "ai_tool": [ + [ + { + "node": "AI Agent", + "type": "ai_tool", + "index": 0 + } + ] + ] + }, + "Embeddings Ollama": { + "ai_embedding": [ + [ + { + "node": "PGVector Store", + "type": "ai_embedding", + "index": 0 + } + ] + ] + }, + "On form submission": { + "main": [ + [ + { + "node": "Extract from File", + "type": "main", + "index": 0 + } + ] + ] + }, + "Extract from File": { + "main": [ + [ + { + "node": "PGVector Store Insert", + "type": "main", + "index": 0 + } + ] + ] + }, + "Embeddings Ollama1": { + "ai_embedding": [ + [ + { + "node": "PGVector Store Insert", + "type": "ai_embedding", + "index": 0 + } + ] + ] + }, + "Default Data Loader": { + "ai_document": [ + [ + { + "node": "PGVector Store Insert", + "type": "ai_document", + "index": 0 + } + ] + ] + } + }, + "active": true, + "settings": { + "executionOrder": "v1" + }, + "versionId": "6ebf0ac8-b8ab-49ee-b6f1-df0b606b3a33", + "meta": { + "instanceId": "a2179cec0884855b4d650fea20868c0dbbb03f0d0054c803c700fff052afc74c" + }, + "id": "Q9Bm63B9ae8rAj95", + "tags": [] +} \ No newline at end of file diff --git a/credentials/.gitignore b/credentials/.gitignore new file mode 100644 index 0000000..da92395 --- /dev/null +++ b/credentials/.gitignore @@ -0,0 +1,5 @@ +# Ignore all credential files +*.json + +# Except the example file +!example-credentials.json diff --git a/credentials/example-credentials.json b/credentials/example-credentials.json new file mode 100644 index 0000000..89145d1 --- /dev/null +++ b/credentials/example-credentials.json @@ -0,0 +1,52 @@ +{ + "container": { + "ctid": 769276659, + "hostname": "sb-1769276659", + "fqdn": "sb-1769276659.userman.de", + "ip": "192.168.45.45", + "vlan": 90 + }, + "urls": { + "n8n_internal": "http://192.168.45.45:5678/", + "n8n_external": "https://sb-1769276659.userman.de", + "postgrest": "http://192.168.45.45:3000", + "chat_webhook": "https://sb-1769276659.userman.de/webhook/rag-chat-webhook/chat", + "chat_internal": "http://192.168.45.45:5678/webhook/rag-chat-webhook/chat", + "upload_form": "https://sb-1769276659.userman.de/form/rag-upload-form", + "upload_form_internal": "http://192.168.45.45:5678/form/rag-upload-form" + }, + "postgres": { + "host": "postgres", + "port": 5432, + "db": "customer", + "user": "customer", + "password": "EXAMPLE_PASSWORD" + }, + "supabase": { + "url": "http://postgrest:3000", + "url_external": "http://192.168.45.45:3000", + "anon_key": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", + "service_role_key": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", + "jwt_secret": "EXAMPLE_JWT_SECRET" + }, + "ollama": { + "url": "http://192.168.45.3:11434", + "model": "ministral-3:3b", + "embedding_model": "nomic-embed-text:latest" + }, + "n8n": { + "encryption_key": "EXAMPLE_ENCRYPTION_KEY", + "owner_email": "admin@userman.de", + "owner_password": "EXAMPLE_PASSWORD", + "secure_cookie": false + }, + "log_file": "/root/customer-installer/logs/sb-1769276659.log", + "created_at": "2026-01-24T18:00:00+01:00", + "updateable_fields": { + "ollama_url": "Can be updated to use hostname instead of IP (e.g., http://ollama.local:11434)", + "ollama_model": "Can be changed to different model (e.g., llama3.2:3b)", + "embedding_model": "Can be changed to different embedding model", + "postgres_password": "Can be updated (requires container restart)", + "n8n_owner_password": "Can be updated (requires container restart)" + } +} diff --git a/delete_nginx_proxy.sh b/delete_nginx_proxy.sh new file mode 100755 index 0000000..1ecd13e --- /dev/null +++ b/delete_nginx_proxy.sh @@ -0,0 +1,389 @@ +#!/usr/bin/env bash +set -Eeuo pipefail + +# ============================================================================= +# OPNsense NGINX Reverse Proxy Delete Script +# ============================================================================= +# Dieses Script löscht einen NGINX Reverse Proxy auf OPNsense +# für eine n8n-Instanz über die OPNsense API. +# ============================================================================= + +SCRIPT_VERSION="1.0.2" + +# Debug mode: 0 = nur JSON, 1 = Logs auf stderr +DEBUG="${DEBUG:-0}" +export DEBUG + +# Logging functions +log_ts() { date "+[%F %T]"; } +info() { [[ "$DEBUG" == "1" ]] && echo "$(log_ts) INFO: $*" >&2; return 0; } +warn() { [[ "$DEBUG" == "1" ]] && echo "$(log_ts) WARN: $*" >&2; return 0; } +die() { + if [[ "$DEBUG" == "1" ]]; then + echo "$(log_ts) ERROR: $*" >&2 + else + echo "{\"error\": \"$*\"}" + fi + exit 1 +} + +# ============================================================================= +# Default Configuration +# ============================================================================= +OPNSENSE_HOST="${OPNSENSE_HOST:-192.168.45.1}" +OPNSENSE_PORT="${OPNSENSE_PORT:-4444}" +OPNSENSE_API_KEY="${OPNSENSE_API_KEY:-cUUs80IDkQelMJVgAVK2oUoDHrQf+cQPwXoPKNd3KDIgiCiEyEfMq38UTXeY5/VO/yWtCC7k9Y9kJ0Pn}" +OPNSENSE_API_SECRET="${OPNSENSE_API_SECRET:-2egxxFYCAUjBDp0OrgbJO3NBZmR4jpDm028jeS8Nq8OtCGu/0lAxt4YXWXbdZjcFVMS0Nrhru1I2R1si}" + +# ============================================================================= +# Usage +# ============================================================================= +usage() { + cat >&2 <<'EOF' +Usage: + bash delete_nginx_proxy.sh [options] + +Required options: + --ctid Container ID (used to find components by description) + +Optional: + --fqdn Full domain name (to find HTTP Server by servername) + --opnsense-host OPNsense IP or hostname (default: 192.168.45.1) + --opnsense-port OPNsense WebUI/API port (default: 4444) + --dry-run Show what would be deleted without actually deleting + --debug Enable debug mode + --help Show this help + +Examples: + # Delete proxy by CTID: + bash delete_nginx_proxy.sh --ctid 768736636 + + # Delete proxy with debug output: + bash delete_nginx_proxy.sh --debug --ctid 768736636 + + # Dry run (show what would be deleted): + bash delete_nginx_proxy.sh --dry-run --ctid 768736636 + + # Delete by CTID and FQDN: + bash delete_nginx_proxy.sh --ctid 768736636 --fqdn sb-1768736636.userman.de +EOF +} + +# ============================================================================= +# Default values for arguments +# ============================================================================= +CTID="" +FQDN="" +DRY_RUN="0" + +# ============================================================================= +# Argument parsing +# ============================================================================= +while [[ $# -gt 0 ]]; do + case "$1" in + --ctid) CTID="${2:-}"; shift 2 ;; + --fqdn) FQDN="${2:-}"; shift 2 ;; + --opnsense-host) OPNSENSE_HOST="${2:-}"; shift 2 ;; + --opnsense-port) OPNSENSE_PORT="${2:-}"; shift 2 ;; + --dry-run) DRY_RUN="1"; shift 1 ;; + --debug) DEBUG="1"; export DEBUG; shift 1 ;; + --help|-h) usage; exit 0 ;; + *) die "Unknown option: $1 (use --help)" ;; + esac +done + +# ============================================================================= +# API Base URL +# ============================================================================= +API_BASE="https://${OPNSENSE_HOST}:${OPNSENSE_PORT}/api" + +# ============================================================================= +# API Helper Functions +# ============================================================================= + +# Make API request to OPNsense +api_request() { + local method="$1" + local endpoint="$2" + local data="${3:-}" + + local url="${API_BASE}${endpoint}" + local auth="${OPNSENSE_API_KEY}:${OPNSENSE_API_SECRET}" + + info "API ${method} ${url}" + + local response + + if [[ -n "$data" ]]; then + response=$(curl -s -k -X "${method}" \ + -u "${auth}" \ + -H "Content-Type: application/json" \ + -d "${data}" \ + "${url}" 2>&1) + else + response=$(curl -s -k -X "${method}" \ + -u "${auth}" \ + "${url}" 2>&1) + fi + + echo "$response" +} + +# Search for items by description +search_by_description() { + local search_endpoint="$1" + local description="$2" + + local response + response=$(api_request "GET" "${search_endpoint}") + + info "Search response for ${search_endpoint}: ${response:0:500}..." + + # Extract all UUIDs where description matches + local uuid + uuid=$(echo "$response" | python3 -c " +import json, sys +desc = sys.argv[1] if len(sys.argv) > 1 else '' +try: + data = json.load(sys.stdin) + rows = data.get('rows', []) + for row in rows: + row_desc = row.get('description', '') + if row_desc == desc: + print(row.get('uuid', '')) + sys.exit(0) +except Exception as e: + print(f'Error: {e}', file=sys.stderr) +" "${description}" 2>/dev/null || true) + + info "Found UUID for description '${description}': ${uuid:-none}" + echo "$uuid" +} + +# Search for HTTP Server by servername +search_http_server_by_servername() { + local servername="$1" + + local response + response=$(api_request "GET" "/nginx/settings/searchHttpServer") + + info "HTTP Server search response: ${response:0:500}..." + + # Extract UUID where servername matches + local uuid + uuid=$(echo "$response" | python3 -c " +import json, sys +sname = sys.argv[1] if len(sys.argv) > 1 else '' +try: + data = json.load(sys.stdin) + rows = data.get('rows', []) + for row in rows: + row_sname = row.get('servername', '') + if row_sname == sname: + print(row.get('uuid', '')) + sys.exit(0) +except Exception as e: + print(f'Error: {e}', file=sys.stderr) +" "${servername}" 2>/dev/null || true) + + info "Found HTTP Server UUID for servername '${servername}': ${uuid:-none}" + echo "$uuid" +} + +# ============================================================================= +# Delete Functions +# ============================================================================= + +delete_item() { + local item_type="$1" + local uuid="$2" + local endpoint="$3" + + if [[ -z "$uuid" ]]; then + info "No ${item_type} found to delete" + return 0 + fi + + if [[ "$DRY_RUN" == "1" ]]; then + info "[DRY-RUN] Would delete ${item_type}: ${uuid}" + echo "dry-run" + return 0 + fi + + info "Deleting ${item_type}: ${uuid}" + local response + response=$(api_request "POST" "${endpoint}/${uuid}") + + local result + result=$(echo "$response" | python3 -c "import json,sys; print(json.load(sys.stdin).get('result','unknown'))" 2>/dev/null || echo "unknown") + + if [[ "$result" == "deleted" ]]; then + info "${item_type} deleted successfully" + echo "deleted" + else + warn "Failed to delete ${item_type}: ${response}" + echo "failed" + fi +} + +# ============================================================================= +# Validation +# ============================================================================= +[[ -n "$CTID" ]] || die "--ctid is required" + +info "Script Version: ${SCRIPT_VERSION}" +info "Configuration:" +info " CTID: ${CTID}" +info " FQDN: ${FQDN:-auto-detect}" +info " OPNsense: ${OPNSENSE_HOST}:${OPNSENSE_PORT}" +info " Dry Run: ${DRY_RUN}" + +# ============================================================================= +# Main +# ============================================================================= +main() { + info "Starting NGINX Reverse Proxy deletion for CTID ${CTID}..." + + local description="${CTID}" + local deleted_count=0 + local failed_count=0 + + # Results tracking + local http_server_result="not_found" + local location_result="not_found" + local upstream_result="not_found" + local upstream_server_result="not_found" + + # Step 1: Find and delete HTTP Server + info "Step 1: Finding HTTP Server..." + local http_server_uuid="" + + # Try to find by FQDN first + if [[ -n "$FQDN" ]]; then + http_server_uuid=$(search_http_server_by_servername "${FQDN}") + fi + + # If not found by FQDN, try common patterns + if [[ -z "$http_server_uuid" ]]; then + # Try sb-.userman.de pattern + http_server_uuid=$(search_http_server_by_servername "sb-${CTID}.userman.de") + fi + + if [[ -z "$http_server_uuid" ]]; then + # Try sb-1.userman.de pattern (with leading 1) + http_server_uuid=$(search_http_server_by_servername "sb-1${CTID}.userman.de") + fi + + if [[ -n "$http_server_uuid" ]]; then + http_server_result=$(delete_item "HTTP Server" "$http_server_uuid" "/nginx/settings/delHttpServer") + if [[ "$http_server_result" == "deleted" || "$http_server_result" == "dry-run" ]]; then + deleted_count=$((deleted_count + 1)) + else + failed_count=$((failed_count + 1)) + fi + else + info "No HTTP Server found for CTID ${CTID}" + fi + + # Step 2: Find and delete Location + info "Step 2: Finding Location..." + local location_uuid + location_uuid=$(search_by_description "/nginx/settings/searchLocation" "${description}") + + if [[ -n "$location_uuid" ]]; then + location_result=$(delete_item "Location" "$location_uuid" "/nginx/settings/delLocation") + if [[ "$location_result" == "deleted" || "$location_result" == "dry-run" ]]; then + deleted_count=$((deleted_count + 1)) + else + failed_count=$((failed_count + 1)) + fi + else + info "No Location found for CTID ${CTID}" + fi + + # Step 3: Find and delete Upstream + info "Step 3: Finding Upstream..." + local upstream_uuid + upstream_uuid=$(search_by_description "/nginx/settings/searchUpstream" "${description}") + + if [[ -n "$upstream_uuid" ]]; then + upstream_result=$(delete_item "Upstream" "$upstream_uuid" "/nginx/settings/delUpstream") + if [[ "$upstream_result" == "deleted" || "$upstream_result" == "dry-run" ]]; then + deleted_count=$((deleted_count + 1)) + else + failed_count=$((failed_count + 1)) + fi + else + info "No Upstream found for CTID ${CTID}" + fi + + # Step 4: Find and delete Upstream Server + info "Step 4: Finding Upstream Server..." + local upstream_server_uuid + upstream_server_uuid=$(search_by_description "/nginx/settings/searchUpstreamServer" "${description}") + + if [[ -n "$upstream_server_uuid" ]]; then + upstream_server_result=$(delete_item "Upstream Server" "$upstream_server_uuid" "/nginx/settings/delUpstreamServer") + if [[ "$upstream_server_result" == "deleted" || "$upstream_server_result" == "dry-run" ]]; then + deleted_count=$((deleted_count + 1)) + else + failed_count=$((failed_count + 1)) + fi + else + info "No Upstream Server found for CTID ${CTID}" + fi + + # Step 5: Apply configuration (if not dry-run and something was deleted) + local reconfigure_result="skipped" + if [[ "$DRY_RUN" != "1" && $deleted_count -gt 0 ]]; then + info "Step 5: Applying NGINX configuration..." + local response + response=$(api_request "POST" "/nginx/service/reconfigure" "{}") + + local status + status=$(echo "$response" | python3 -c "import json,sys; print(json.load(sys.stdin).get('status',''))" 2>/dev/null || echo "unknown") + + if [[ "$status" == "ok" ]]; then + info "NGINX configuration applied successfully" + reconfigure_result="ok" + else + warn "NGINX reconfigure status: ${status}" + reconfigure_result="failed" + fi + elif [[ "$DRY_RUN" == "1" ]]; then + info "[DRY-RUN] Would apply NGINX configuration" + reconfigure_result="dry-run" + fi + + # Output result as JSON + local success="true" + [[ $failed_count -gt 0 ]] && success="false" + + local result + result=$(cat </dev/null || echo "$result" + fi +} + +main diff --git a/install.sh b/install.sh new file mode 100755 index 0000000..e086f05 --- /dev/null +++ b/install.sh @@ -0,0 +1,731 @@ +#!/usr/bin/env bash +set -Eeuo pipefail + +# Debug mode: 0 = nur JSON, 1 = Logs auf stderr +DEBUG="${DEBUG:-0}" +export DEBUG + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Log-Verzeichnis +LOG_DIR="${SCRIPT_DIR}/logs" +mkdir -p "${LOG_DIR}" + +# Temporäre Log-Datei (wird später umbenannt nach Container-Hostname) +TEMP_LOG="${LOG_DIR}/install_$$.log" +FINAL_LOG="" + +# Funktion zum Aufräumen bei Exit +cleanup_log() { + # Wenn FINAL_LOG gesetzt ist, umbenennen + if [[ -n "${FINAL_LOG}" && -f "${TEMP_LOG}" ]]; then + mv "${TEMP_LOG}" "${FINAL_LOG}" + fi +} +trap cleanup_log EXIT + +# Alle Ausgaben in Log-Datei umleiten +# Bei DEBUG=1: auch auf stderr ausgeben (tee) +# Bei DEBUG=0: nur in Datei +if [[ "$DEBUG" == "1" ]]; then + # Debug-Modus: Ausgabe auf stderr UND in Datei + exec > >(tee -a "${TEMP_LOG}") 2>&1 +else + # Normal-Modus: Nur in Datei, stdout bleibt für JSON frei + exec 3>&1 # stdout (fd 3) für JSON reservieren + exec > "${TEMP_LOG}" 2>&1 +fi + +source "${SCRIPT_DIR}/libsupabase.sh" +setup_traps + +usage() { + cat >&2 <<'EOF' +Usage: + bash install.sh [options] + +Core options: + --ctid Force CT ID (optional). If omitted, a customer-safe CTID is generated. + --cores (default: unlimited) + --memory (default: 4096) + --swap (default: 512) + --disk (default: 50) + --bridge (default: vmbr0) + --storage (default: local-zfs) + --ip (default: dhcp) + --vlan VLAN tag for net0 (default: 90; set 0 to disable) + --privileged Create privileged CT (default: unprivileged) + --apt-proxy Optional: APT proxy (e.g. http://192.168.45.2:3142) for Apt-Cacher NG + +Domain / n8n options: + --base-domain (default: userman.de) -> FQDN becomes sb-.domain + --n8n-owner-email (default: admin@) + --n8n-owner-pass Optional. If omitted, generated (policy compliant). + --workflow-file Path to n8n workflow JSON file (default: RAGKI-BotPGVector.json) + --ollama-model Ollama chat model (default: ministral-3:3b) + --embedding-model Ollama embedding model (default: nomic-embed-text:latest) + --debug Enable debug mode (show logs on stderr) + --help Show help + +PostgREST / Supabase options: + --postgrest-port PostgREST port (default: 3000) + +Notes: +- This script creates a Debian 12 LXC and provisions Docker + customer stack (Postgres/pgvector + n8n + PostgREST). +- PostgREST provides a REST API for PostgreSQL, compatible with Supabase Vector Store node in n8n. +- At the end it prints a JSON with credentials and URLs. +EOF +} + +# Defaults +#APT_PROXY="http://192.168.45.2:3142" +DOCKER_REGISTRY_MIRROR="http://192.168.45.2:5000" +APT_PROXY="" +#DOCKER_REGISTRY_MIRROR="" +CTID="" +CORES="4" +MEMORY="4096" +SWAP="512" +DISK="50" +BRIDGE="vmbr0" +STORAGE="local-zfs" +IPCFG="dhcp" +VLAN="90" +UNPRIV="1" + +BASE_DOMAIN="userman.de" +N8N_OWNER_EMAIL="" +N8N_OWNER_PASS="" +POSTGREST_PORT="3000" + +# Workflow file (default: RAGKI-BotPGVector.json in script directory) +WORKFLOW_FILE="${SCRIPT_DIR}/RAGKI-BotPGVector.json" + +# Ollama API settings (hardcoded for local setup) +OLLAMA_HOST="192.168.45.3" +OLLAMA_PORT="11434" +OLLAMA_URL="http://${OLLAMA_HOST}:${OLLAMA_PORT}" + +# Ollama models (can be overridden via CLI) +OLLAMA_MODEL="ministral-3:3b" +EMBEDDING_MODEL="nomic-embed-text:latest" + +# --------------------------- +# Arg parsing +# --------------------------- +while [[ $# -gt 0 ]]; do + case "$1" in + --ctid) CTID="${2:-}"; shift 2 ;; + --apt-proxy) APT_PROXY="${2:-}"; shift 2 ;; + --cores) CORES="${2:-}"; shift 2 ;; + --memory) MEMORY="${2:-}"; shift 2 ;; + --swap) SWAP="${2:-}"; shift 2 ;; + --disk) DISK="${2:-}"; shift 2 ;; + --bridge) BRIDGE="${2:-}"; shift 2 ;; + --storage) STORAGE="${2:-}"; shift 2 ;; + --ip) IPCFG="${2:-}"; shift 2 ;; + --vlan) VLAN="${2:-}"; shift 2 ;; + --privileged) UNPRIV="0"; shift 1 ;; + --base-domain) BASE_DOMAIN="${2:-}"; shift 2 ;; + --n8n-owner-email) N8N_OWNER_EMAIL="${2:-}"; shift 2 ;; + --n8n-owner-pass) N8N_OWNER_PASS="${2:-}"; shift 2 ;; + --workflow-file) WORKFLOW_FILE="${2:-}"; shift 2 ;; + --ollama-model) OLLAMA_MODEL="${2:-}"; shift 2 ;; + --embedding-model) EMBEDDING_MODEL="${2:-}"; shift 2 ;; + --postgrest-port) POSTGREST_PORT="${2:-}"; shift 2 ;; + --debug) DEBUG="1"; export DEBUG; shift 1 ;; + --help|-h) usage; exit 0 ;; + *) die "Unknown option: $1 (use --help)" ;; + esac +done + +# --------------------------- +# Validation +# --------------------------- +[[ "$CORES" =~ ^[0-9]+$ ]] || die "--cores must be integer" +[[ "$MEMORY" =~ ^[0-9]+$ ]] || die "--memory must be integer" +[[ "$SWAP" =~ ^[0-9]+$ ]] || die "--swap must be integer" +[[ "$DISK" =~ ^[0-9]+$ ]] || die "--disk must be integer" +[[ "$UNPRIV" == "0" || "$UNPRIV" == "1" ]] || die "internal: UNPRIV invalid" +[[ "$VLAN" =~ ^[0-9]+$ ]] || die "--vlan must be integer (0 disables tagging)" +[[ -n "$BASE_DOMAIN" ]] || die "--base-domain must not be empty" + +if [[ "$IPCFG" != "dhcp" ]]; then + [[ "$IPCFG" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{1,2}$ ]] || die "--ip must be dhcp or CIDR (e.g. 192.168.45.171/24)" +fi + +if [[ -n "${APT_PROXY}" ]]; then + [[ "${APT_PROXY}" =~ ^http://[^/]+:[0-9]+$ ]] || die "--apt-proxy must look like http://IP:PORT (example: http://192.168.45.2:3142)" +fi + +# Validate workflow file exists +if [[ ! -f "${WORKFLOW_FILE}" ]]; then + die "Workflow file not found: ${WORKFLOW_FILE}" +fi + +info "Argument-Parsing OK" +info "Workflow file: ${WORKFLOW_FILE}" +info "Ollama model: ${OLLAMA_MODEL}" +info "Embedding model: ${EMBEDDING_MODEL}" + +if [[ -n "${APT_PROXY}" ]]; then + info "APT proxy enabled: ${APT_PROXY}" +else + info "APT proxy disabled" +fi + + +# --------------------------- +# Preflight Proxmox +# --------------------------- +need_cmd pct pvesm pveam pvesh grep date awk sed cut tr head + +pve_storage_exists "$STORAGE" || die "Storage not found: $STORAGE" +pve_bridge_exists "$BRIDGE" || die "Bridge not found: $BRIDGE" + +TEMPLATE="$(pve_template_ensure_debian12 "$STORAGE")" +info "Template OK: ${TEMPLATE}" + +# Hostname / FQDN based on unix time +UNIXTS="$(date +%s)" +CT_HOSTNAME="sb-${UNIXTS}" +FQDN="${CT_HOSTNAME}.${BASE_DOMAIN}" + +# Log-Datei nach Container-Hostname benennen +FINAL_LOG="${LOG_DIR}/${CT_HOSTNAME}.log" + +# CTID selection +if [[ -n "$CTID" ]]; then + [[ "$CTID" =~ ^[0-9]+$ ]] || die "--ctid must be integer" + if pve_vmid_exists_cluster "$CTID"; then + die "Forced CTID=${CTID} already exists in cluster" + fi +else + # Your agreed approach: unix time - 1000000000 (safe until 2038) + CTID="$(pve_ctid_from_unixtime "$UNIXTS")" + if pve_vmid_exists_cluster "$CTID"; then + die "Generated CTID=${CTID} already exists in cluster (unexpected). Try again in 1s." + fi +fi + +# n8n owner defaults +if [[ -z "$N8N_OWNER_EMAIL" ]]; then + N8N_OWNER_EMAIL="admin@${BASE_DOMAIN}" +fi +if [[ -z "$N8N_OWNER_PASS" ]]; then + N8N_OWNER_PASS="$(gen_password_policy)" +else + # enforce policy early to avoid the UI error you saw + password_policy_check "$N8N_OWNER_PASS" || die "--n8n-owner-pass does not meet policy: 8+ chars, 1 number, 1 uppercase" +fi + +info "CTID selected: ${CTID}" +info "SCRIPT_DIR=${SCRIPT_DIR}" +info "CT_HOSTNAME=${CT_HOSTNAME}" +info "FQDN=${FQDN}" +info "cores=${CORES} memory=${MEMORY}MB swap=${SWAP}MB disk=${DISK}GB" +info "bridge=${BRIDGE} storage=${STORAGE} ip=${IPCFG} vlan=${VLAN} unprivileged=${UNPRIV}" + +# --------------------------- +# Step 5: Create CT +# --------------------------- +NET0="$(pve_build_net0 "$BRIDGE" "$IPCFG" "$VLAN")" +ROOTFS="${STORAGE}:${DISK}" +FEATURES="nesting=1,keyctl=1,fuse=1" + +info "Step 5: Create CT" +info "Creating CT ${CTID} (${CT_HOSTNAME}) from ${TEMPLATE}" +pct create "${CTID}" "${TEMPLATE}" \ + --hostname "${CT_HOSTNAME}" \ + --cores "${CORES}" \ + --memory "${MEMORY}" \ + --swap "${SWAP}" \ + --net0 "${NET0}" \ + --rootfs "${ROOTFS}" \ + --unprivileged "${UNPRIV}" \ + --features "${FEATURES}" \ + --start 0 \ + --onboot yes + +info "CT created (not started). Next step: start CT + wait for IP" +info "Starting CT ${CTID}" +pct start "${CTID}" + +CT_IP="$(pct_wait_for_ip "${CTID}" || true)" +[[ -n "${CT_IP}" ]] || die "Could not determine CT IP after start" + +info "Step 5 OK: LXC erstellt + IP ermittelt" +info "CT_HOSTNAME=${CT_HOSTNAME}" +info "CT_IP=${CT_IP}" + +# --------------------------- +# Step 6: Provision inside CT (Docker + Locales + Base) +# --------------------------- +info "Step 6: Provisioning im CT (Docker + Locales + Base)" + +# Optional: APT proxy (Apt-Cacher NG) +if [[ -n "${APT_PROXY}" ]]; then + pct_exec "${CTID}" "cat > /etc/apt/apt.conf.d/00aptproxy <<'EOF' +Acquire::http::Proxy \"${APT_PROXY}\"; +#Acquire::https::Proxy \"DIRECT\"; +Acquire::https::Proxy \"${APT_PROXY}\"; +EOF" + pct_exec "$CTID" "apt-config dump | grep -i proxy || true" +fi + +# Minimal base packages +pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y" +pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y ca-certificates curl gnupg lsb-release" + +# Locales (avoid perl warnings + consistent system) +pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y" +pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y locales ca-certificates curl gnupg lsb-release" +pct_exec "${CTID}" "sed -i 's/^# *de_DE.UTF-8 UTF-8/de_DE.UTF-8 UTF-8/; s/^# *en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen || true" +pct_exec "${CTID}" "locale-gen >/dev/null || true" +pct_exec "${CTID}" "update-locale LANG=de_DE.UTF-8 LC_ALL=de_DE.UTF-8 || true" + +# Docker official repo (Debian 12 / bookworm) +pct_exec "${CTID}" "install -m 0755 -d /etc/apt/keyrings" +pct_exec "${CTID}" "curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" +pct_exec "${CTID}" "chmod a+r /etc/apt/keyrings/docker.gpg" +pct_exec "${CTID}" "echo \"deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \$(. /etc/os-release && echo \$VERSION_CODENAME) stable\" > /etc/apt/sources.list.d/docker.list" +pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y" +pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin" + +# Create stack directories +pct_exec "${CTID}" "mkdir -p /opt/customer-stack/volumes/postgres/data /opt/customer-stack/volumes/n8n-data /opt/customer-stack/sql" +# IMPORTANT: n8n runs as node (uid 1000) => fix permissions +pct_exec "${CTID}" "chown -R 1000:1000 /opt/customer-stack/volumes/n8n-data" + + + +info "Step 6 OK: Docker + Compose Plugin installiert, Locales gesetzt, Basis-Verzeichnisse erstellt" +info "Next: Schritt 7 (finales docker-compose + Secrets + n8n/supabase up + Healthchecks)" + +# --------------------------- +# Step 7: Stack finalisieren + Secrets + Up + Checks +# --------------------------- +info "Step 7: Stack finalisieren + Secrets + Up + Checks" + +# Secrets +PG_DB="customer" +PG_USER="customer" +PG_PASSWORD="$(gen_password_policy)" +N8N_ENCRYPTION_KEY="$(gen_hex_64)" + +# External URL is HTTPS via OPNsense reverse proxy (but container internally is http) +N8N_PORT="5678" +N8N_PROTOCOL="http" +N8N_HOST="${CT_IP}" +N8N_EDITOR_BASE_URL="https://${FQDN}/" +WEBHOOK_URL="https://${FQDN}/" + +# If you are behind HTTPS reverse proxy, secure cookies can be true. +# But until proxy is in place, false avoids login trouble. +N8N_SECURE_COOKIE="false" + +# Generate JWT secret for PostgREST (32 bytes = 256 bit) +JWT_SECRET="$(openssl rand -base64 32 | tr -d '\n')" + +# For proper JWT, we need header.payload.signature format +# Let's create proper JWTs +JWT_HEADER="$(echo -n '{"alg":"HS256","typ":"JWT"}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" +ANON_PAYLOAD="$(echo -n '{"role":"anon","iss":"supabase","iat":1700000000,"exp":2000000000}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" +SERVICE_PAYLOAD="$(echo -n '{"role":"service_role","iss":"supabase","iat":1700000000,"exp":2000000000}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" + +ANON_SIGNATURE="$(echo -n "${JWT_HEADER}.${ANON_PAYLOAD}" | openssl dgst -sha256 -hmac "${JWT_SECRET}" -binary | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" +SERVICE_SIGNATURE="$(echo -n "${JWT_HEADER}.${SERVICE_PAYLOAD}" | openssl dgst -sha256 -hmac "${JWT_SECRET}" -binary | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" + +ANON_KEY="${JWT_HEADER}.${ANON_PAYLOAD}.${ANON_SIGNATURE}" +SERVICE_ROLE_KEY="${JWT_HEADER}.${SERVICE_PAYLOAD}.${SERVICE_SIGNATURE}" + +info "Generated JWT Secret and API Keys for PostgREST" + +# Write .env into CT +pct_push_text "${CTID}" "/opt/customer-stack/.env" "$(cat < query_embedding) AS similarity + FROM public.documents d + WHERE (filter = '{}' OR d.metadata @> filter) + ORDER BY d.embedding <=> query_embedding + LIMIT match_count; +END; +$$; + +-- Grant permissions for PostgREST roles +-- Create roles if they don't exist +DO $$ +BEGIN + IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'anon') THEN + CREATE ROLE anon NOLOGIN; + END IF; + IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'service_role') THEN + CREATE ROLE service_role NOLOGIN; + END IF; + IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'authenticator') THEN + CREATE ROLE authenticator NOINHERIT LOGIN PASSWORD 'authenticator_password'; + END IF; +END +$$; + +-- Grant permissions +GRANT USAGE ON SCHEMA public TO anon, service_role; +GRANT ALL ON ALL TABLES IN SCHEMA public TO anon, service_role; +GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO anon, service_role; +GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO anon, service_role; + +-- Allow authenticator to switch to these roles +GRANT anon TO authenticator; +GRANT service_role TO authenticator; + +-- Set default privileges for future tables +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO anon, service_role; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO anon, service_role; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT EXECUTE ON FUNCTIONS TO anon, service_role; +SQL +)" + +# docker-compose.yml +pct_push_text "${CTID}" "/opt/customer-stack/docker-compose.yml" "$(cat <<'YML' +services: + postgres: + image: pgvector/pgvector:pg16 + container_name: customer-postgres + restart: unless-stopped + environment: + POSTGRES_DB: ${PG_DB} + POSTGRES_USER: ${PG_USER} + POSTGRES_PASSWORD: ${PG_PASSWORD} + volumes: + - ./volumes/postgres/data:/var/lib/postgresql/data + - ./sql:/docker-entrypoint-initdb.d:ro + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${PG_USER} -d ${PG_DB} || exit 1"] + interval: 10s + timeout: 5s + retries: 20 + networks: + - customer-net + + postgrest: + image: postgrest/postgrest:latest + container_name: customer-postgrest + restart: unless-stopped + depends_on: + postgres: + condition: service_healthy + ports: + - "${POSTGREST_PORT}:3000" + environment: + PGRST_DB_URI: postgres://${PG_USER}:${PG_PASSWORD}@postgres:5432/${PG_DB} + PGRST_DB_SCHEMA: public + PGRST_DB_ANON_ROLE: anon + PGRST_JWT_SECRET: ${JWT_SECRET} + PGRST_DB_USE_LEGACY_GUCS: "false" + networks: + - customer-net + + n8n: + image: n8nio/n8n:latest + container_name: n8n + restart: unless-stopped + depends_on: + postgres: + condition: service_healthy + postgrest: + condition: service_started + ports: + - "${N8N_PORT}:5678" + environment: + # --- Web / Cookies / URL --- + N8N_PORT: 5678 + N8N_PROTOCOL: ${N8N_PROTOCOL} + N8N_HOST: ${N8N_HOST} + N8N_EDITOR_BASE_URL: ${N8N_EDITOR_BASE_URL} + WEBHOOK_URL: ${WEBHOOK_URL} + N8N_SECURE_COOKIE: ${N8N_SECURE_COOKIE} + + # --- Disable telemetry / background calls --- + N8N_DIAGNOSTICS_ENABLED: ${N8N_DIAGNOSTICS_ENABLED} + N8N_VERSION_NOTIFICATIONS_ENABLED: ${N8N_VERSION_NOTIFICATIONS_ENABLED} + N8N_TEMPLATES_ENABLED: ${N8N_TEMPLATES_ENABLED} + + # --- DB (Postgres) --- + DB_TYPE: postgresdb + DB_POSTGRESDB_HOST: postgres + DB_POSTGRESDB_PORT: 5432 + DB_POSTGRESDB_DATABASE: ${PG_DB} + DB_POSTGRESDB_USER: ${PG_USER} + DB_POSTGRESDB_PASSWORD: ${PG_PASSWORD} + + # --- Basics --- + GENERIC_TIMEZONE: Europe/Berlin + TZ: Europe/Berlin + + N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY} + + volumes: + - ./volumes/n8n-data:/home/node/.n8n + networks: + - customer-net + +networks: + customer-net: + driver: bridge +YML +)" + +# Make sure permissions are correct (again, after file writes) +pct_exec "${CTID}" "chown -R 1000:1000 /opt/customer-stack/volumes/n8n-data" + +# Proxy +if [[ -n "${APT_PROXY}" ]]; then +pct_exec "$CTID" "mkdir -p /etc/docker" + +pct_exec "$CTID" "cat > /etc/docker/daemon.json </dev/null 2>&1 || true" + +# Try modern command first (works in current n8n builds); if it fails, we leave setup screen (but you'll see it in logs). +pct_exec "${CTID}" "cd /opt/customer-stack && (docker exec -u node n8n n8n user-management:reset --email '${N8N_OWNER_EMAIL}' --password '${N8N_OWNER_PASS}' --firstName 'Admin' --lastName 'Owner' >/dev/null 2>&1 || true)" + +info "Step 7 OK: Stack deployed" + +# --------------------------- +# Step 8: Setup Owner Account via REST API (fallback) +# --------------------------- +info "Step 8: Setting up owner account via REST API..." + +# Wait for n8n to be ready +sleep 5 + +# Try REST API setup (works if user-management:reset didn't work) +pct_exec "${CTID}" "curl -sS -X POST 'http://127.0.0.1:5678/rest/owner/setup' \ + -H 'Content-Type: application/json' \ + -d '{\"email\":\"${N8N_OWNER_EMAIL}\",\"firstName\":\"Admin\",\"lastName\":\"Owner\",\"password\":\"${N8N_OWNER_PASS}\"}' || true" + +info "Step 8 OK: Owner account setup attempted" + +# --------------------------- +# Step 9: Final URLs and Output +# --------------------------- +info "Step 9: Generating final output..." + +# Final URLs +N8N_INTERNAL_URL="http://${CT_IP}:5678/" +N8N_EXTERNAL_URL="https://${FQDN}" +POSTGREST_URL="http://${CT_IP}:${POSTGREST_PORT}" +# Supabase URL format for n8n credential (PostgREST acts as Supabase API) +# IMPORTANT: n8n runs inside Docker, so it needs the Docker-internal URL! +SUPABASE_URL="http://postgrest:3000" +SUPABASE_URL_EXTERNAL="http://${CT_IP}:${POSTGREST_PORT}" + +# Chat URL (webhook URL for the chat trigger - will be available after workflow activation) +CHAT_WEBHOOK_URL="https://${FQDN}/webhook/rag-chat-webhook/chat" +CHAT_INTERNAL_URL="http://${CT_IP}:5678/webhook/rag-chat-webhook/chat" + +# Upload Form URL (for document upload) +UPLOAD_FORM_URL="https://${FQDN}/form/rag-upload-form" +UPLOAD_FORM_INTERNAL_URL="http://${CT_IP}:5678/form/rag-upload-form" + +info "n8n intern: ${N8N_INTERNAL_URL}" +info "n8n extern (geplant via OPNsense): ${N8N_EXTERNAL_URL}" +info "PostgREST API: ${POSTGREST_URL}" +info "Supabase Service Role Key: ${SERVICE_ROLE_KEY}" +info "Ollama URL: ${OLLAMA_URL}" +info "Chat Webhook URL (extern): ${CHAT_WEBHOOK_URL}" +info "Chat Webhook URL (intern): ${CHAT_INTERNAL_URL}" + +# --------------------------- +# Step 10: Setup n8n Credentials + Import Workflow + Activate +# --------------------------- +info "Step 10: Setting up n8n credentials and importing RAG workflow..." + +# Use the new robust n8n setup function from libsupabase.sh +# Parameters: ctid, email, password, pg_host, pg_port, pg_db, pg_user, pg_pass, ollama_url, ollama_model, embedding_model, workflow_file +if n8n_setup_rag_workflow "${CTID}" "${N8N_OWNER_EMAIL}" "${N8N_OWNER_PASS}" \ + "postgres" "5432" "${PG_DB}" "${PG_USER}" "${PG_PASSWORD}" \ + "${OLLAMA_URL}" "${OLLAMA_MODEL}" "${EMBEDDING_MODEL}" "${WORKFLOW_FILE}"; then + info "Step 10 OK: n8n RAG workflow setup completed successfully" +else + warn "Step 10: n8n workflow setup failed - manual setup may be required" + info "Step 10: You can manually import the workflow via n8n UI" +fi + +# --------------------------- +# Step 10a: Setup Workflow Auto-Reload on LXC Restart +# --------------------------- +info "Step 10a: Setting up workflow auto-reload on LXC restart..." + +# Copy workflow template to container for auto-reload +info "Copying workflow template to container..." +if [[ -f "${WORKFLOW_FILE}" ]]; then + # Read workflow file content + WORKFLOW_CONTENT=$(cat "${WORKFLOW_FILE}") + pct_push_text "${CTID}" "/opt/customer-stack/workflow-template.json" "${WORKFLOW_CONTENT}" + info "Workflow template saved to /opt/customer-stack/workflow-template.json" +else + warn "Workflow file not found: ${WORKFLOW_FILE}" +fi + +# Copy reload script to container +info "Installing workflow reload script..." +RELOAD_SCRIPT_CONTENT=$(cat "${SCRIPT_DIR}/templates/reload-workflow.sh") +pct_push_text "${CTID}" "/opt/customer-stack/reload-workflow.sh" "${RELOAD_SCRIPT_CONTENT}" +pct_exec "${CTID}" "chmod +x /opt/customer-stack/reload-workflow.sh" +info "Reload script installed" + +# Copy systemd service file to container +info "Installing systemd service for workflow auto-reload..." +SYSTEMD_SERVICE_CONTENT=$(cat "${SCRIPT_DIR}/templates/n8n-workflow-reload.service") +pct_push_text "${CTID}" "/etc/systemd/system/n8n-workflow-reload.service" "${SYSTEMD_SERVICE_CONTENT}" + +# Enable and start systemd service +pct_exec "${CTID}" "systemctl daemon-reload" +pct_exec "${CTID}" "systemctl enable n8n-workflow-reload.service" +info "Systemd service enabled" + +info "Step 10a OK: Workflow auto-reload configured" +info "The workflow will be automatically reloaded on every LXC restart" + +# --------------------------- +# Step 11: Setup NGINX Reverse Proxy in OPNsense +# --------------------------- +info "Step 11: Setting up NGINX Reverse Proxy in OPNsense..." + +# Check if setup_nginx_proxy.sh exists +if [[ -f "${SCRIPT_DIR}/setup_nginx_proxy.sh" ]]; then + # Run the proxy setup script + PROXY_RESULT=$(DEBUG="${DEBUG}" bash "${SCRIPT_DIR}/setup_nginx_proxy.sh" \ + --ctid "${CTID}" \ + --hostname "${CT_HOSTNAME}" \ + --fqdn "${FQDN}" \ + --backend-ip "${CT_IP}" \ + --backend-port "5678" \ + 2>&1 || echo '{"success": false, "error": "Proxy setup failed"}') + + # Check if proxy setup was successful + if echo "$PROXY_RESULT" | grep -q '"success": true'; then + info "NGINX Reverse Proxy setup successful" + else + warn "NGINX Reverse Proxy setup may have failed: ${PROXY_RESULT}" + fi +else + warn "setup_nginx_proxy.sh not found, skipping proxy setup" +fi + +info "Step 11 OK: Proxy setup completed" + +# --------------------------- +# Final JSON Output +# --------------------------- +# Machine-readable JSON output (for your downstream automation) +# Kompaktes JSON in einer Zeile für einfaches Parsing +# Bei DEBUG=0: JSON auf fd 3 (ursprüngliches stdout) ausgeben +# Bei DEBUG=1: JSON normal auf stdout (geht auch ins Log) +JSON_OUTPUT="{\"ctid\":${CTID},\"hostname\":\"${CT_HOSTNAME}\",\"fqdn\":\"${FQDN}\",\"ip\":\"${CT_IP}\",\"vlan\":${VLAN},\"urls\":{\"n8n_internal\":\"${N8N_INTERNAL_URL}\",\"n8n_external\":\"${N8N_EXTERNAL_URL}\",\"postgrest\":\"${POSTGREST_URL}\",\"chat_webhook\":\"${CHAT_WEBHOOK_URL}\",\"chat_internal\":\"${CHAT_INTERNAL_URL}\",\"upload_form\":\"${UPLOAD_FORM_URL}\",\"upload_form_internal\":\"${UPLOAD_FORM_INTERNAL_URL}\"},\"postgres\":{\"host\":\"postgres\",\"port\":5432,\"db\":\"${PG_DB}\",\"user\":\"${PG_USER}\",\"password\":\"${PG_PASSWORD}\"},\"supabase\":{\"url\":\"${SUPABASE_URL}\",\"url_external\":\"${SUPABASE_URL_EXTERNAL}\",\"anon_key\":\"${ANON_KEY}\",\"service_role_key\":\"${SERVICE_ROLE_KEY}\",\"jwt_secret\":\"${JWT_SECRET}\"},\"ollama\":{\"url\":\"${OLLAMA_URL}\",\"model\":\"${OLLAMA_MODEL}\",\"embedding_model\":\"${EMBEDDING_MODEL}\"},\"n8n\":{\"encryption_key\":\"${N8N_ENCRYPTION_KEY}\",\"owner_email\":\"${N8N_OWNER_EMAIL}\",\"owner_password\":\"${N8N_OWNER_PASS}\",\"secure_cookie\":${N8N_SECURE_COOKIE}},\"log_file\":\"${FINAL_LOG}\"}" + +if [[ "$DEBUG" == "1" ]]; then + # Debug-Modus: JSON normal ausgeben (formatiert für Lesbarkeit) + echo "$JSON_OUTPUT" | python3 -m json.tool 2>/dev/null || echo "$JSON_OUTPUT" +else + # Normal-Modus: JSON auf ursprüngliches stdout (fd 3) - kompakt + echo "$JSON_OUTPUT" >&3 +fi + +# --------------------------- +# Save credentials to file +# --------------------------- +CREDENTIALS_DIR="${SCRIPT_DIR}/credentials" +mkdir -p "${CREDENTIALS_DIR}" +CREDENTIALS_FILE="${CREDENTIALS_DIR}/${CT_HOSTNAME}.json" + +# Save formatted credentials +echo "$JSON_OUTPUT" | python3 -m json.tool > "${CREDENTIALS_FILE}" 2>/dev/null || echo "$JSON_OUTPUT" > "${CREDENTIALS_FILE}" + +info "Credentials saved to: ${CREDENTIALS_FILE}" +info "To update credentials later, use: bash update_credentials.sh --ctid ${CTID} --credentials-file ${CREDENTIALS_FILE}" diff --git a/libsupabase.sh b/libsupabase.sh new file mode 100755 index 0000000..341caf8 --- /dev/null +++ b/libsupabase.sh @@ -0,0 +1,979 @@ +#!/usr/bin/env bash +set -Eeuo pipefail + +# Debug mode: 0 = nur JSON ausgeben, 1 = Logs auf stderr +DEBUG="${DEBUG:-0}" + +log_ts() { date "+[%F %T]"; } + +info() { + [[ "$DEBUG" == "1" ]] && echo "$(log_ts) INFO: $*" >&2 + return 0 +} + +warn() { + [[ "$DEBUG" == "1" ]] && echo "$(log_ts) WARN: $*" >&2 + return 0 +} + +die() { + if [[ "$DEBUG" == "1" ]]; then + echo "$(log_ts) ERROR: $*" >&2 + else + # JSON-Fehler auf fd 3 ausgeben (falls verfügbar), sonst stdout + if { true >&3; } 2>/dev/null; then + echo "{\"error\": \"$*\"}" >&3 + else + echo "{\"error\": \"$*\"}" + fi + fi + exit 1 +} + +setup_traps() { + trap 'rc=$?; if [[ $rc -ne 0 ]]; then + if [[ "$DEBUG" == "1" ]]; then + echo "$(log_ts) ERROR: Failed at line ${BASH_LINENO[0]}: ${BASH_COMMAND} (exit=$rc)" >&2 + else + # JSON-Fehler auf fd 3 ausgeben (falls verfügbar), sonst stdout + if { true >&3; } 2>/dev/null; then + echo "{\"error\": \"Failed at line ${BASH_LINENO[0]}: ${BASH_COMMAND} (exit=$rc)\"}" >&3 + else + echo "{\"error\": \"Failed at line ${BASH_LINENO[0]}: ${BASH_COMMAND} (exit=$rc)\"}" + fi + fi + fi; exit $rc' ERR +} + +need_cmd() { + local c + for c in "$@"; do + command -v "$c" >/dev/null 2>&1 || die "Missing command: $c" + done +} + +# ----- Proxmox helpers ----- + +pve_storage_exists() { + local s="$1" + pvesm status | awk 'NR>1{print $1}' | grep -qx "$s" +} + +pve_bridge_exists() { + local b="$1" + ip link show "$b" >/dev/null 2>&1 +} + +# Return ONLY template path on stdout. Logs go to stderr. +pve_template_ensure_debian12() { + local storage="$1" + local tmpl="debian-12-standard_12.12-1_amd64.tar.zst" + local cache="/var/lib/vz/template/cache/${tmpl}" + + # pveam templates must be on "local" (dir storage), not on zfs + local tstore="$storage" + if ! pveam available -section system >/dev/null 2>&1; then + warn "pveam not working? continuing" + fi + + # heuristic: if storage isn't usable for templates, fallback to local + # Most Proxmox setups use 'local' for templates. + if ! pvesm status | awk 'NR>1{print $1,$2}' | grep -q "^${tstore} "; then + warn "pveam storage '${tstore}' not found; falling back to 'local'" + tstore="local" + fi + + # If storage exists but isn't a dir storage for templates, pveam will fail -> fallback + if ! pveam list "${tstore}" >/dev/null 2>&1; then + warn "pveam storage '${tstore}' not available for templates; falling back to 'local'" + tstore="local" + fi + + if [[ ! -f "$cache" ]]; then + info "Downloading CT template to ${tstore}: ${tmpl}" + pveam download "${tstore}" "${tmpl}" >&2 + fi + + echo "${tstore}:vztmpl/${tmpl}" +} + +# Build net0 string (with optional vlan tag) +pve_build_net0() { + local bridge="$1" + local ipcfg="$2" + local vlan="${3:-0}" + + local mac + mac="$(gen_mac)" + + local net="name=eth0,bridge=${bridge},hwaddr=${mac}" + if [[ "$vlan" != "0" ]]; then + net+=",tag=${vlan}" + fi + + if [[ "$ipcfg" == "dhcp" ]]; then + net+=",ip=dhcp" + else + net+=",ip=${ipcfg}" + fi + + echo "$net" +} + +# Wait for IP from pct; returns first IPv4 +pct_wait_for_ip() { + local ctid="$1" + local i ip + for i in $(seq 1 40); do + ip="$(pct exec "$ctid" -- bash -lc "ip -4 -o addr show scope global | awk '{print \$4}' | cut -d/ -f1 | head -n1" 2>/dev/null || true)" + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + sleep 1 + done + return 1 +} + +pct_exec() { + local ctid="$1"; shift + pct exec "$ctid" -- bash -lc "$*" +} + +# Push a text file into CT without SCP +pct_push_text() { + local ctid="$1" + local dest="$2" + local content="$3" + pct exec "$ctid" -- bash -lc "cat > '$dest' <<'EOF' +${content} +EOF" +} + +# Cluster VMID existence check (best effort) +# Uses pvesh cluster resources. If API not available, returns false (and caller can choose another approach). +pve_vmid_exists_cluster() { + local vmid="$1" + pvesh get /cluster/resources --output-format json 2>/dev/null \ + | python3 - <<'PY' "$vmid" || exit 0 +import json,sys +vmid=sys.argv[1] +try: + data=json.load(sys.stdin) +except Exception: + sys.exit(0) +for r in data: + if str(r.get("vmid",""))==str(vmid): + sys.exit(1) +sys.exit(0) +PY + [[ $? -eq 1 ]] +} + +# Your agreed CTID scheme: unix time - 1,000,000,000 +pve_ctid_from_unixtime() { + local ts="$1" + echo $(( ts - 1000000000 )) +} + +# ----- Generators / policies ----- + +# Avoid "tr: Broken pipe" by not piping random through tr|head. +gen_hex_64() { + # 64 hex chars = 32 bytes + openssl rand -hex 32 +} + +gen_mac() { + # locally administered unicast: 02:xx:xx:xx:xx:xx + printf '02:%02x:%02x:%02x:%02x:%02x\n' \ + "$((RANDOM%256))" "$((RANDOM%256))" "$((RANDOM%256))" "$((RANDOM%256))" "$((RANDOM%256))" +} + +password_policy_check() { + local p="$1" + [[ ${#p} -ge 8 ]] || return 1 + [[ "$p" =~ [0-9] ]] || return 1 + [[ "$p" =~ [A-Z] ]] || return 1 + return 0 +} + +gen_password_policy() { + # generate until it matches policy (no broken pipes, deterministic enough) + local p + while true; do + # 18 chars, base64-ish but remove confusing chars + p="$(openssl rand -base64 18 | tr -d '/+=' | cut -c1-16)" + # ensure at least one uppercase and number + p="${p}A1" + password_policy_check "$p" && { echo "$p"; return 0; } + done +} + +emit_json() { + # prints to stdout only; keep logs on stderr + cat +} + +# ----- n8n API helpers ----- +# These functions interact with n8n REST API inside a container + +# Login to n8n and save session cookie +# Usage: n8n_api_login +# Returns: 0 on success, 1 on failure +# Side effect: Creates /tmp/n8n_cookies.txt in the container +n8n_api_login() { + local ctid="$1" + local email="$2" + local password="$3" + local api_url="http://127.0.0.1:5678" + + info "n8n API: Logging in as ${email}..." + + # Escape special characters in password for JSON + local escaped_password + escaped_password=$(echo "$password" | sed 's/\\/\\\\/g; s/"/\\"/g') + + local response + response=$(pct exec "$ctid" -- bash -c "curl -sS -X POST '${api_url}/rest/login' \ + -H 'Content-Type: application/json' \ + -c /tmp/n8n_cookies.txt \ + -d '{\"email\":\"${email}\",\"password\":\"${escaped_password}\"}' 2>&1" || echo "CURL_FAILED") + + if [[ "$response" == *"CURL_FAILED"* ]] || [[ "$response" == *"error"* && "$response" != *"data"* ]]; then + warn "n8n API login failed: ${response}" + return 1 + fi + + info "n8n API: Login successful" + return 0 +} + +# Create PostgreSQL credential in n8n +# Usage: n8n_api_create_postgres_credential +# Returns: Credential ID on stdout, or empty on failure +n8n_api_create_postgres_credential() { + local ctid="$1" + local name="$2" + local host="$3" + local port="$4" + local database="$5" + local user="$6" + local password="$7" + local api_url="http://127.0.0.1:5678" + + info "n8n API: Creating PostgreSQL credential '${name}'..." + + # Escape special characters in password for JSON + local escaped_password + escaped_password=$(echo "$password" | sed 's/\\/\\\\/g; s/"/\\"/g') + + local response + response=$(pct exec "$ctid" -- bash -c "curl -sS -X POST '${api_url}/rest/credentials' \ + -H 'Content-Type: application/json' \ + -b /tmp/n8n_cookies.txt \ + -d '{ + \"name\": \"${name}\", + \"type\": \"postgres\", + \"data\": { + \"host\": \"${host}\", + \"port\": ${port}, + \"database\": \"${database}\", + \"user\": \"${user}\", + \"password\": \"${escaped_password}\", + \"ssl\": \"disable\" + } + }' 2>&1" || echo "") + + # Extract credential ID from response + local cred_id + cred_id=$(echo "$response" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1 || echo "") + + if [[ -n "$cred_id" ]]; then + info "n8n API: PostgreSQL credential created with ID: ${cred_id}" + echo "$cred_id" + return 0 + else + warn "n8n API: Failed to create PostgreSQL credential: ${response}" + echo "" + return 1 + fi +} + +# Create Ollama credential in n8n +# Usage: n8n_api_create_ollama_credential +# Returns: Credential ID on stdout, or empty on failure +n8n_api_create_ollama_credential() { + local ctid="$1" + local name="$2" + local base_url="$3" + local api_url="http://127.0.0.1:5678" + + info "n8n API: Creating Ollama credential '${name}'..." + + local response + response=$(pct exec "$ctid" -- bash -c "curl -sS -X POST '${api_url}/rest/credentials' \ + -H 'Content-Type: application/json' \ + -b /tmp/n8n_cookies.txt \ + -d '{ + \"name\": \"${name}\", + \"type\": \"ollamaApi\", + \"data\": { + \"baseUrl\": \"${base_url}\" + } + }' 2>&1" || echo "") + + # Extract credential ID from response + local cred_id + cred_id=$(echo "$response" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1 || echo "") + + if [[ -n "$cred_id" ]]; then + info "n8n API: Ollama credential created with ID: ${cred_id}" + echo "$cred_id" + return 0 + else + warn "n8n API: Failed to create Ollama credential: ${response}" + echo "" + return 1 + fi +} + +# Import workflow into n8n +# Usage: n8n_api_import_workflow +# Returns: Workflow ID on stdout, or empty on failure +n8n_api_import_workflow() { + local ctid="$1" + local workflow_file="$2" + local api_url="http://127.0.0.1:5678" + + info "n8n API: Importing workflow from ${workflow_file}..." + + local response + response=$(pct exec "$ctid" -- bash -c "curl -sS -X POST '${api_url}/rest/workflows' \ + -H 'Content-Type: application/json' \ + -b /tmp/n8n_cookies.txt \ + -d @${workflow_file} 2>&1" || echo "") + + # Extract workflow ID from response + local workflow_id + workflow_id=$(echo "$response" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1 || echo "") + + if [[ -n "$workflow_id" ]]; then + info "n8n API: Workflow imported with ID: ${workflow_id}" + echo "$workflow_id" + return 0 + else + warn "n8n API: Failed to import workflow: ${response}" + echo "" + return 1 + fi +} + +# Activate workflow in n8n +# Usage: n8n_api_activate_workflow +# Returns: 0 on success, 1 on failure +n8n_api_activate_workflow() { + local ctid="$1" + local workflow_id="$2" + local api_url="http://127.0.0.1:5678" + + info "n8n API: Activating workflow ${workflow_id}..." + + local response + response=$(pct exec "$ctid" -- bash -c "curl -sS -X PATCH '${api_url}/rest/workflows/${workflow_id}' \ + -H 'Content-Type: application/json' \ + -b /tmp/n8n_cookies.txt \ + -d '{\"active\": true}' 2>&1" || echo "") + + if [[ "$response" == *"\"active\":true"* ]] || [[ "$response" == *"\"active\": true"* ]]; then + info "n8n API: Workflow ${workflow_id} activated successfully" + return 0 + else + warn "n8n API: Failed to activate workflow: ${response}" + return 1 + fi +} + +# Generate RAG workflow JSON with credential IDs +# Usage: n8n_generate_rag_workflow_json +# Returns: Workflow JSON on stdout +n8n_generate_rag_workflow_json() { + local postgres_cred_id="$1" + local postgres_cred_name="${2:-PostgreSQL (local)}" + local ollama_cred_id="$3" + local ollama_cred_name="${4:-Ollama (local)}" + local ollama_model="${5:-llama3.2:3b}" + local embedding_model="${6:-nomic-embed-text:v1.5}" + + cat < +# Returns: JSON array of workflows on stdout +n8n_api_list_workflows() { + local ctid="$1" + local api_url="http://127.0.0.1:5678" + + info "n8n API: Listing workflows..." + + local response + response=$(pct exec "$ctid" -- bash -c "curl -sS -X GET '${api_url}/rest/workflows' \ + -H 'Content-Type: application/json' \ + -b /tmp/n8n_cookies.txt 2>&1" || echo "") + + echo "$response" + return 0 +} + +# Get workflow by name +# Usage: n8n_api_get_workflow_by_name +# Returns: Workflow ID on stdout, or empty if not found +n8n_api_get_workflow_by_name() { + local ctid="$1" + local workflow_name="$2" + + info "n8n API: Searching for workflow '${workflow_name}'..." + + local workflows + workflows=$(n8n_api_list_workflows "$ctid") + + # Extract workflow ID by name using grep and awk + local workflow_id + workflow_id=$(echo "$workflows" | grep -oP "\"name\":\s*\"${workflow_name}\".*?\"id\":\s*\"\K[^\"]+|\"id\":\s*\"\K[^\"]+(?=.*?\"name\":\s*\"${workflow_name}\")" | head -1 || echo "") + + if [[ -n "$workflow_id" ]]; then + info "n8n API: Found workflow '${workflow_name}' with ID: ${workflow_id}" + echo "$workflow_id" + return 0 + else + info "n8n API: Workflow '${workflow_name}' not found" + echo "" + return 1 + fi +} + +# Delete workflow by ID +# Usage: n8n_api_delete_workflow +# Returns: 0 on success, 1 on failure +n8n_api_delete_workflow() { + local ctid="$1" + local workflow_id="$2" + local api_url="http://127.0.0.1:5678" + + info "n8n API: Deleting workflow ${workflow_id}..." + + local response + response=$(pct exec "$ctid" -- bash -c "curl -sS -X DELETE '${api_url}/rest/workflows/${workflow_id}' \ + -H 'Content-Type: application/json' \ + -b /tmp/n8n_cookies.txt 2>&1" || echo "") + + # Check if deletion was successful (empty response or success message) + if [[ -z "$response" ]] || [[ "$response" == *"\"success\":true"* ]] || [[ "$response" == "{}" ]]; then + info "n8n API: Workflow ${workflow_id} deleted successfully" + return 0 + else + warn "n8n API: Failed to delete workflow: ${response}" + return 1 + fi +} + +# Get credential by name and type +# Usage: n8n_api_get_credential_by_name +# Returns: Credential ID on stdout, or empty if not found +n8n_api_get_credential_by_name() { + local ctid="$1" + local cred_name="$2" + local cred_type="$3" + local api_url="http://127.0.0.1:5678" + + info "n8n API: Searching for credential '${cred_name}' (type: ${cred_type})..." + + local response + response=$(pct exec "$ctid" -- bash -c "curl -sS -X GET '${api_url}/rest/credentials' \ + -H 'Content-Type: application/json' \ + -b /tmp/n8n_cookies.txt 2>&1" || echo "") + + # Extract credential ID by name and type + local cred_id + cred_id=$(echo "$response" | grep -oP "\"name\":\s*\"${cred_name}\".*?\"type\":\s*\"${cred_type}\".*?\"id\":\s*\"\K[^\"]+|\"id\":\s*\"\K[^\"]+(?=.*?\"name\":\s*\"${cred_name}\".*?\"type\":\s*\"${cred_type}\")" | head -1 || echo "") + + if [[ -n "$cred_id" ]]; then + info "n8n API: Found credential '${cred_name}' with ID: ${cred_id}" + echo "$cred_id" + return 0 + else + info "n8n API: Credential '${cred_name}' not found" + echo "" + return 1 + fi +} + +# Cleanup n8n API session +# Usage: n8n_api_cleanup +n8n_api_cleanup() { + local ctid="$1" + pct exec "$ctid" -- bash -c "rm -f /tmp/n8n_cookies.txt /tmp/rag_workflow.json" 2>/dev/null || true +} + +# Full n8n setup: Create credentials, import workflow from file, activate +# This version runs all API calls in a single shell session to preserve cookies +# Usage: n8n_setup_rag_workflow +# Returns: 0 on success, 1 on failure +n8n_setup_rag_workflow() { + local ctid="$1" + local email="$2" + local password="$3" + local pg_host="$4" + local pg_port="$5" + local pg_db="$6" + local pg_user="$7" + local pg_pass="$8" + local ollama_url="$9" + local ollama_model="${10:-ministral-3:3b}" + local embedding_model="${11:-nomic-embed-text:latest}" + local workflow_file="${12:-}" + + info "n8n Setup: Starting RAG workflow setup..." + + # Validate workflow file + if [[ -z "$workflow_file" ]]; then + warn "n8n Setup: No workflow file specified, using built-in template" + workflow_file="" + elif [[ ! -f "$workflow_file" ]]; then + warn "n8n Setup: Workflow file not found: $workflow_file" + return 1 + else + info "n8n Setup: Using workflow file: $workflow_file" + fi + + # Wait for n8n to be ready + info "n8n Setup: Waiting for n8n to be ready..." + local i + for i in $(seq 1 30); do + if pct exec "$ctid" -- bash -c "curl -sS -o /dev/null -w '%{http_code}' http://127.0.0.1:5678/rest/settings 2>/dev/null" | grep -q "200"; then + info "n8n Setup: n8n is ready" + break + fi + sleep 2 + done + + # Escape special characters in passwords for JSON + local escaped_password + escaped_password=$(echo "$password" | sed 's/\\/\\\\/g; s/"/\\"/g') + local escaped_pg_pass + escaped_pg_pass=$(echo "$pg_pass" | sed 's/\\/\\\\/g; s/"/\\"/g') + + # Read workflow from file or generate from template + info "n8n Setup: Preparing workflow JSON..." + local workflow_json + if [[ -n "$workflow_file" && -f "$workflow_file" ]]; then + # Read workflow from external file + workflow_json=$(cat "$workflow_file") + info "n8n Setup: Loaded workflow from file: $workflow_file" + else + # Generate workflow from built-in template + workflow_json=$(n8n_generate_rag_workflow_json "POSTGRES_CRED_ID" "PostgreSQL (local)" "OLLAMA_CRED_ID" "Ollama (local)" "$ollama_model" "$embedding_model") + info "n8n Setup: Generated workflow from built-in template" + fi + + # Push workflow JSON to container (will be processed by setup script) + pct_push_text "$ctid" "/tmp/rag_workflow_template.json" "$workflow_json" + + # Create a setup script that runs all API calls in one session + info "n8n Setup: Creating setup script..." + pct_push_text "$ctid" "/tmp/n8n_setup.sh" "$(cat < /tmp/process_workflow.py << 'PYTHON_SCRIPT' +import json +import sys + +# Read the workflow template +with open('/tmp/rag_workflow_template.json', 'r') as f: + workflow = json.load(f) + +# Get credential IDs from environment/arguments +pg_cred_id = sys.argv[1] +ollama_cred_id = sys.argv[2] + +# Remove fields that should not be in the import +fields_to_remove = ['id', 'versionId', 'meta', 'tags', 'active', 'pinData'] +for field in fields_to_remove: + workflow.pop(field, None) + +# Process all nodes and replace credential IDs +for node in workflow.get('nodes', []): + credentials = node.get('credentials', {}) + + # Replace PostgreSQL credential + if 'postgres' in credentials: + credentials['postgres'] = { + 'id': pg_cred_id, + 'name': 'PostgreSQL (local)' + } + + # Replace Ollama credential + if 'ollamaApi' in credentials: + credentials['ollamaApi'] = { + 'id': ollama_cred_id, + 'name': 'Ollama (local)' + } + +# Write the processed workflow +with open('/tmp/rag_workflow.json', 'w') as f: + json.dump(workflow, f) + +print("Workflow processed successfully") +PYTHON_SCRIPT + +# Run the Python script to process the workflow +python3 /tmp/process_workflow.py "\$PG_CRED_ID" "\$OLLAMA_CRED_ID" + +# Import workflow +echo "Importing workflow..." +WORKFLOW_RESP=\$(curl -sS -X POST "\${API_URL}/rest/workflows" \\ + -H "Content-Type: application/json" \\ + -b "\${COOKIE_FILE}" \\ + -d @/tmp/rag_workflow.json) + +WORKFLOW_ID=\$(echo "\$WORKFLOW_RESP" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1) +VERSION_ID=\$(echo "\$WORKFLOW_RESP" | grep -oP '"versionId"\s*:\s*"\K[^"]+' | head -1) +if [ -z "\$WORKFLOW_ID" ]; then + echo "WORKFLOW_IMPORT_FAILED: \$WORKFLOW_RESP" + exit 1 +fi +echo "Workflow imported: \$WORKFLOW_ID (version: \$VERSION_ID)" + +# Activate workflow using POST /activate endpoint with versionId +echo "Activating workflow..." +ACTIVATE_RESP=\$(curl -sS -X POST "\${API_URL}/rest/workflows/\${WORKFLOW_ID}/activate" \\ + -H "Content-Type: application/json" \\ + -b "\${COOKIE_FILE}" \\ + -d "{\"versionId\":\"\${VERSION_ID}\"}") + +if echo "\$ACTIVATE_RESP" | grep -q '"active":true\|"active": true'; then + echo "Workflow activated successfully" +else + echo "WORKFLOW_ACTIVATION_WARNING: \$ACTIVATE_RESP" +fi + +# Cleanup +rm -f "\${COOKIE_FILE}" /tmp/rag_workflow_template.json /tmp/rag_workflow.json + +# Output results +echo "SUCCESS" +echo "POSTGRES_CRED_ID=\$PG_CRED_ID" +echo "OLLAMA_CRED_ID=\$OLLAMA_CRED_ID" +echo "WORKFLOW_ID=\$WORKFLOW_ID" +SETUP_SCRIPT +)" + + # Make script executable and run it + pct exec "$ctid" -- chmod +x /tmp/n8n_setup.sh + + info "n8n Setup: Running setup script in container..." + local setup_output + setup_output=$(pct exec "$ctid" -- /tmp/n8n_setup.sh 2>&1 || echo "SCRIPT_FAILED") + + # Log the output + info "n8n Setup: Script output:" + echo "$setup_output" | while read -r line; do + info " $line" + done + + # Check for success + if echo "$setup_output" | grep -q "^SUCCESS$"; then + # Extract IDs from output + local pg_cred_id ollama_cred_id workflow_id + pg_cred_id=$(echo "$setup_output" | grep "^POSTGRES_CRED_ID=" | cut -d= -f2) + ollama_cred_id=$(echo "$setup_output" | grep "^OLLAMA_CRED_ID=" | cut -d= -f2) + workflow_id=$(echo "$setup_output" | grep "^WORKFLOW_ID=" | cut -d= -f2) + + info "n8n Setup: RAG workflow setup completed successfully" + info "n8n Setup: Workflow ID: ${workflow_id}" + info "n8n Setup: PostgreSQL Credential ID: ${pg_cred_id}" + info "n8n Setup: Ollama Credential ID: ${ollama_cred_id}" + + # Cleanup setup script + pct exec "$ctid" -- rm -f /tmp/n8n_setup.sh 2>/dev/null || true + + return 0 + else + warn "n8n Setup: Setup script failed" + # Cleanup + pct exec "$ctid" -- rm -f /tmp/n8n_setup.sh /tmp/n8n_cookies.txt /tmp/rag_workflow_template.json /tmp/rag_workflow.json 2>/dev/null || true + return 1 + fi +} diff --git a/logs/install_2430046.log b/logs/install_2430046.log new file mode 100644 index 0000000..e69de29 diff --git a/logs/sb-1772907798.log b/logs/sb-1772907798.log new file mode 100644 index 0000000..fc7ca6f --- /dev/null +++ b/logs/sb-1772907798.log @@ -0,0 +1,1111 @@ +extracting archive '/var/lib/vz/template/cache/debian-12-standard_12.12-1_amd64.tar.zst' +Total bytes read: 522782720 (499MiB, 196MiB/s) +Detected container architecture: amd64 +Setting up 'proxmox-regenerate-snakeoil.service' to regenerate snakeoil certificate.. +Creating SSH host key 'ssh_host_rsa_key' - this may take some time ... +done: SHA256:zz/KHCpnpGnd3xtzhnRZ5VUr1MNMctsmDaOmb6Mvczw root@sb-1772907798 +Creating SSH host key 'ssh_host_ed25519_key' - this may take some time ... +done: SHA256:T+NX0SuD0Nnd12Iz7WefjBtZIZkVPbflvYof0wKRU5k root@sb-1772907798 +Creating SSH host key 'ssh_host_ecdsa_key' - this may take some time ... +done: SHA256:ZwZB29QrxEzvJ0sKwqG1TK+Wj+glzzDqDa0yfkry0xw root@sb-1772907798 +Acquire::http::Proxy "http://192.168.45.2:3142"; +Acquire::https::Proxy "http://192.168.45.2:3142"; +Get:1 http://deb.debian.org/debian bookworm InRelease [151 kB] +Get:2 http://security.debian.org bookworm-security InRelease [48.0 kB] +Get:3 http://deb.debian.org/debian bookworm-updates InRelease [55.4 kB] +Get:4 http://deb.debian.org/debian bookworm/main amd64 Packages [8792 kB] +Get:5 http://deb.debian.org/debian bookworm/main Translation-en [6108 kB] +Get:6 http://deb.debian.org/debian bookworm/contrib amd64 Packages [53.5 kB] +Get:7 http://deb.debian.org/debian bookworm/contrib Translation-en [48.4 kB] +Get:8 http://security.debian.org bookworm-security/main amd64 Packages [293 kB] +Get:9 http://deb.debian.org/debian bookworm-updates/main Translation-en [5448 B] +Get:10 http://security.debian.org bookworm-security/main Translation-en [178 kB] +Get:11 http://security.debian.org bookworm-security/contrib Translation-en [652 B] +Fetched 15.7 MB in 2s (9522 kB/s) +Reading package lists... +Reading package lists... +Building dependency tree... +ca-certificates is already the newest version (20230311+deb12u1). +The following additional packages will be installed: + dirmngr gnupg-l10n gnupg-utils gpg gpg-agent gpg-wks-client gpg-wks-server + gpgconf gpgsm gpgv libassuan0 libcurl4 libksba8 libnpth0 pinentry-curses +Suggested packages: + dbus-user-session pinentry-gnome3 tor parcimonie xloadimage scdaemon + pinentry-doc +The following NEW packages will be installed: + curl dirmngr gnupg gnupg-l10n gnupg-utils gpg gpg-agent gpg-wks-client + gpg-wks-server gpgconf gpgsm libassuan0 libcurl4 libksba8 libnpth0 + lsb-release pinentry-curses +The following packages will be upgraded: + gpgv +1 upgraded, 17 newly installed, 0 to remove and 20 not upgraded. +Need to get 9247 kB of archives. +After this operation, 17.4 MB of additional disk space will be used. +Get:1 http://deb.debian.org/debian bookworm/main amd64 gpgv amd64 2.2.40-1.1+deb12u2 [649 kB] +Get:2 http://deb.debian.org/debian bookworm/main amd64 libcurl4 amd64 7.88.1-10+deb12u14 [392 kB] +Get:3 http://deb.debian.org/debian bookworm/main amd64 curl amd64 7.88.1-10+deb12u14 [316 kB] +Get:4 http://deb.debian.org/debian bookworm/main amd64 libassuan0 amd64 2.5.5-5 [48.5 kB] +Get:5 http://deb.debian.org/debian bookworm/main amd64 gpgconf amd64 2.2.40-1.1+deb12u2 [565 kB] +Get:6 http://deb.debian.org/debian bookworm/main amd64 libksba8 amd64 1.6.3-2 [128 kB] +Get:7 http://deb.debian.org/debian bookworm/main amd64 libnpth0 amd64 1.6-3 [19.0 kB] +Get:8 http://deb.debian.org/debian bookworm/main amd64 dirmngr amd64 2.2.40-1.1+deb12u2 [793 kB] +Get:9 http://deb.debian.org/debian bookworm/main amd64 gnupg-l10n all 2.2.40-1.1+deb12u2 [1093 kB] +Get:10 http://deb.debian.org/debian bookworm/main amd64 gnupg-utils amd64 2.2.40-1.1+deb12u2 [927 kB] +Get:11 http://deb.debian.org/debian bookworm/main amd64 gpg amd64 2.2.40-1.1+deb12u2 [950 kB] +Get:12 http://deb.debian.org/debian bookworm/main amd64 pinentry-curses amd64 1.2.1-1 [77.4 kB] +Get:13 http://deb.debian.org/debian bookworm/main amd64 gpg-agent amd64 2.2.40-1.1+deb12u2 [695 kB] +Get:14 http://deb.debian.org/debian bookworm/main amd64 gpg-wks-client amd64 2.2.40-1.1+deb12u2 [541 kB] +Get:15 http://deb.debian.org/debian bookworm/main amd64 gpg-wks-server amd64 2.2.40-1.1+deb12u2 [531 kB] +Get:16 http://deb.debian.org/debian bookworm/main amd64 gpgsm amd64 2.2.40-1.1+deb12u2 [671 kB] +Get:17 http://deb.debian.org/debian bookworm/main amd64 gnupg all 2.2.40-1.1+deb12u2 [846 kB] +Get:18 http://deb.debian.org/debian bookworm/main amd64 lsb-release all 12.0-1 [6416 B] +apt-listchanges: Can't set locale; make sure $LC_* and $LANG are correct! +apt-listchanges: Reading changelogs... +perl: warning: Setting locale failed. +perl: warning: Please check that your locale settings: + LANGUAGE = (unset), + LC_ALL = (unset), + LANG = "en_US.UTF-8" + are supported and installed on your system. +perl: warning: Falling back to the standard locale ("C"). +locale: Cannot set LC_CTYPE to default locale: No such file or directory +locale: Cannot set LC_MESSAGES to default locale: No such file or directory +locale: Cannot set LC_ALL to default locale: No such file or directory +Fetched 9247 kB in 0s (125 MB/s) +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 19144 files and directories currently installed.) +Preparing to unpack .../gpgv_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpgv (2.2.40-1.1+deb12u2) over (2.2.40-1.1+deb12u1) ... +Setting up gpgv (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package libcurl4:amd64. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 19144 files and directories currently installed.) +Preparing to unpack .../00-libcurl4_7.88.1-10+deb12u14_amd64.deb ... +Unpacking libcurl4:amd64 (7.88.1-10+deb12u14) ... +Selecting previously unselected package curl. +Preparing to unpack .../01-curl_7.88.1-10+deb12u14_amd64.deb ... +Unpacking curl (7.88.1-10+deb12u14) ... +Selecting previously unselected package libassuan0:amd64. +Preparing to unpack .../02-libassuan0_2.5.5-5_amd64.deb ... +Unpacking libassuan0:amd64 (2.5.5-5) ... +Selecting previously unselected package gpgconf. +Preparing to unpack .../03-gpgconf_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpgconf (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package libksba8:amd64. +Preparing to unpack .../04-libksba8_1.6.3-2_amd64.deb ... +Unpacking libksba8:amd64 (1.6.3-2) ... +Selecting previously unselected package libnpth0:amd64. +Preparing to unpack .../05-libnpth0_1.6-3_amd64.deb ... +Unpacking libnpth0:amd64 (1.6-3) ... +Selecting previously unselected package dirmngr. +Preparing to unpack .../06-dirmngr_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking dirmngr (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gnupg-l10n. +Preparing to unpack .../07-gnupg-l10n_2.2.40-1.1+deb12u2_all.deb ... +Unpacking gnupg-l10n (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gnupg-utils. +Preparing to unpack .../08-gnupg-utils_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gnupg-utils (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gpg. +Preparing to unpack .../09-gpg_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpg (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package pinentry-curses. +Preparing to unpack .../10-pinentry-curses_1.2.1-1_amd64.deb ... +Unpacking pinentry-curses (1.2.1-1) ... +Selecting previously unselected package gpg-agent. +Preparing to unpack .../11-gpg-agent_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpg-agent (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gpg-wks-client. +Preparing to unpack .../12-gpg-wks-client_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpg-wks-client (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gpg-wks-server. +Preparing to unpack .../13-gpg-wks-server_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpg-wks-server (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gpgsm. +Preparing to unpack .../14-gpgsm_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpgsm (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gnupg. +Preparing to unpack .../15-gnupg_2.2.40-1.1+deb12u2_all.deb ... +Unpacking gnupg (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package lsb-release. +Preparing to unpack .../16-lsb-release_12.0-1_all.deb ... +Unpacking lsb-release (12.0-1) ... +Setting up libksba8:amd64 (1.6.3-2) ... +Setting up libnpth0:amd64 (1.6-3) ... +Setting up libassuan0:amd64 (2.5.5-5) ... +Setting up gnupg-l10n (2.2.40-1.1+deb12u2) ... +Setting up gpgconf (2.2.40-1.1+deb12u2) ... +Setting up libcurl4:amd64 (7.88.1-10+deb12u14) ... +Setting up curl (7.88.1-10+deb12u14) ... +Setting up lsb-release (12.0-1) ... +Setting up gpg (2.2.40-1.1+deb12u2) ... +Setting up gnupg-utils (2.2.40-1.1+deb12u2) ... +Setting up pinentry-curses (1.2.1-1) ... +Setting up gpg-agent (2.2.40-1.1+deb12u2) ... +Created symlink /etc/systemd/user/sockets.target.wants/gpg-agent-browser.socket → /usr/lib/systemd/user/gpg-agent-browser.socket. +Created symlink /etc/systemd/user/sockets.target.wants/gpg-agent-extra.socket → /usr/lib/systemd/user/gpg-agent-extra.socket. +Created symlink /etc/systemd/user/sockets.target.wants/gpg-agent-ssh.socket → /usr/lib/systemd/user/gpg-agent-ssh.socket. +Created symlink /etc/systemd/user/sockets.target.wants/gpg-agent.socket → /usr/lib/systemd/user/gpg-agent.socket. +Setting up gpgsm (2.2.40-1.1+deb12u2) ... +Setting up dirmngr (2.2.40-1.1+deb12u2) ... +Created symlink /etc/systemd/user/sockets.target.wants/dirmngr.socket → /usr/lib/systemd/user/dirmngr.socket. +Setting up gpg-wks-server (2.2.40-1.1+deb12u2) ... +Setting up gpg-wks-client (2.2.40-1.1+deb12u2) ... +Setting up gnupg (2.2.40-1.1+deb12u2) ... +Processing triggers for man-db (2.11.2-2) ... +Processing triggers for libc-bin (2.36-9+deb12u13) ... +Hit:1 http://deb.debian.org/debian bookworm InRelease +Hit:2 http://security.debian.org bookworm-security InRelease +Hit:3 http://deb.debian.org/debian bookworm-updates InRelease +Reading package lists... +Reading package lists... +Building dependency tree... +Reading state information... +locales is already the newest version (2.36-9+deb12u13). +ca-certificates is already the newest version (20230311+deb12u1). +curl is already the newest version (7.88.1-10+deb12u14). +gnupg is already the newest version (2.2.40-1.1+deb12u2). +lsb-release is already the newest version (12.0-1). +0 upgraded, 0 newly installed, 0 to remove and 20 not upgraded. +Hit:1 http://deb.debian.org/debian bookworm InRelease +Hit:2 http://security.debian.org bookworm-security InRelease +Hit:3 http://deb.debian.org/debian bookworm-updates InRelease +Get:4 https://download.docker.com/linux/debian bookworm InRelease [46.6 kB] +Get:5 https://download.docker.com/linux/debian bookworm/stable amd64 Packages [63.6 kB] +Fetched 110 kB in 0s (264 kB/s) +Reading package lists... +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + apparmor dbus-user-session docker-ce-rootless-extras git git-man iptables + liberror-perl libglib2.0-0 libglib2.0-data libip6tc2 libnetfilter-conntrack3 + libnfnetlink0 libslirp0 patch pigz shared-mime-info slirp4netns + xdg-user-dirs +Suggested packages: + apparmor-profiles-extra apparmor-utils cgroupfs-mount | cgroup-lite + docker-model-plugin git-daemon-run | git-daemon-sysvinit git-doc git-email + git-gui gitk gitweb git-cvs git-mediawiki git-svn firewalld + low-memory-monitor ed diffutils-doc +The following NEW packages will be installed: + apparmor containerd.io dbus-user-session docker-buildx-plugin docker-ce + docker-ce-cli docker-ce-rootless-extras docker-compose-plugin git git-man + iptables liberror-perl libglib2.0-0 libglib2.0-data libip6tc2 + libnetfilter-conntrack3 libnfnetlink0 libslirp0 patch pigz shared-mime-info + slirp4netns xdg-user-dirs +0 upgraded, 23 newly installed, 0 to remove and 20 not upgraded. +Need to get 111 MB of archives. +After this operation, 466 MB of additional disk space will be used. +Get:1 http://deb.debian.org/debian bookworm/main amd64 libip6tc2 amd64 1.8.9-2 [19.4 kB] +Get:2 http://deb.debian.org/debian bookworm/main amd64 libnfnetlink0 amd64 1.0.2-2 [15.1 kB] +Get:3 http://deb.debian.org/debian bookworm/main amd64 libnetfilter-conntrack3 amd64 1.0.9-3 [40.7 kB] +Get:4 http://deb.debian.org/debian bookworm/main amd64 iptables amd64 1.8.9-2 [360 kB] +Get:5 http://deb.debian.org/debian bookworm/main amd64 pigz amd64 2.6-1 [64.0 kB] +Get:6 http://deb.debian.org/debian bookworm/main amd64 apparmor amd64 3.0.8-3 [616 kB] +Get:7 http://deb.debian.org/debian bookworm/main amd64 dbus-user-session amd64 1.14.10-1~deb12u1 [78.1 kB] +Get:8 http://deb.debian.org/debian bookworm/main amd64 liberror-perl all 0.17029-2 [29.0 kB] +Get:9 http://deb.debian.org/debian bookworm/main amd64 git-man all 1:2.39.5-0+deb12u3 [2,053 kB] +Get:10 http://deb.debian.org/debian bookworm/main amd64 git amd64 1:2.39.5-0+deb12u3 [7,264 kB] +Get:11 http://deb.debian.org/debian bookworm/main amd64 libglib2.0-0 amd64 2.74.6-2+deb12u8 [1,402 kB] +Get:12 http://deb.debian.org/debian bookworm/main amd64 libglib2.0-data all 2.74.6-2+deb12u8 [1,210 kB] +Get:13 http://deb.debian.org/debian bookworm/main amd64 libslirp0 amd64 4.7.0-1 [63.0 kB] +Get:14 http://deb.debian.org/debian bookworm/main amd64 patch amd64 2.7.6-7 [128 kB] +Get:15 http://deb.debian.org/debian bookworm/main amd64 shared-mime-info amd64 2.2-1 [729 kB] +Get:16 http://deb.debian.org/debian bookworm/main amd64 slirp4netns amd64 1.2.0-1 [37.5 kB] +Get:17 http://deb.debian.org/debian bookworm/main amd64 xdg-user-dirs amd64 0.18-1 [54.4 kB] +Get:18 https://download.docker.com/linux/debian bookworm/stable amd64 containerd.io amd64 2.2.1-1~debian.12~bookworm [23.4 MB] +Get:19 https://download.docker.com/linux/debian bookworm/stable amd64 docker-ce-cli amd64 5:29.3.0-1~debian.12~bookworm [16.4 MB] +Get:20 https://download.docker.com/linux/debian bookworm/stable amd64 docker-ce amd64 5:29.3.0-1~debian.12~bookworm [22.5 MB] +Get:21 https://download.docker.com/linux/debian bookworm/stable amd64 docker-buildx-plugin amd64 0.31.1-1~debian.12~bookworm [20.2 MB] +Get:22 https://download.docker.com/linux/debian bookworm/stable amd64 docker-ce-rootless-extras amd64 5:29.3.0-1~debian.12~bookworm [6,389 kB] +Get:23 https://download.docker.com/linux/debian bookworm/stable amd64 docker-compose-plugin amd64 5.1.0-1~debian.12~bookworm [7,847 kB] +Preconfiguring packages ... +Fetched 111 MB in 1s (85.4 MB/s) +Selecting previously unselected package containerd.io. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 19417 files and directories currently installed.) +Preparing to unpack .../00-containerd.io_2.2.1-1~debian.12~bookworm_amd64.deb ... +Unpacking containerd.io (2.2.1-1~debian.12~bookworm) ... +Selecting previously unselected package docker-ce-cli. +Preparing to unpack .../01-docker-ce-cli_5%3a29.3.0-1~debian.12~bookworm_amd64.deb ... +Unpacking docker-ce-cli (5:29.3.0-1~debian.12~bookworm) ... +Selecting previously unselected package libip6tc2:amd64. +Preparing to unpack .../02-libip6tc2_1.8.9-2_amd64.deb ... +Unpacking libip6tc2:amd64 (1.8.9-2) ... +Selecting previously unselected package libnfnetlink0:amd64. +Preparing to unpack .../03-libnfnetlink0_1.0.2-2_amd64.deb ... +Unpacking libnfnetlink0:amd64 (1.0.2-2) ... +Selecting previously unselected package libnetfilter-conntrack3:amd64. +Preparing to unpack .../04-libnetfilter-conntrack3_1.0.9-3_amd64.deb ... +Unpacking libnetfilter-conntrack3:amd64 (1.0.9-3) ... +Selecting previously unselected package iptables. +Preparing to unpack .../05-iptables_1.8.9-2_amd64.deb ... +Unpacking iptables (1.8.9-2) ... +Selecting previously unselected package docker-ce. +Preparing to unpack .../06-docker-ce_5%3a29.3.0-1~debian.12~bookworm_amd64.deb ... +Unpacking docker-ce (5:29.3.0-1~debian.12~bookworm) ... +Selecting previously unselected package pigz. +Preparing to unpack .../07-pigz_2.6-1_amd64.deb ... +Unpacking pigz (2.6-1) ... +Selecting previously unselected package apparmor. +Preparing to unpack .../08-apparmor_3.0.8-3_amd64.deb ... +Unpacking apparmor (3.0.8-3) ... +Selecting previously unselected package dbus-user-session. +Preparing to unpack .../09-dbus-user-session_1.14.10-1~deb12u1_amd64.deb ... +Unpacking dbus-user-session (1.14.10-1~deb12u1) ... +Selecting previously unselected package docker-buildx-plugin. +Preparing to unpack .../10-docker-buildx-plugin_0.31.1-1~debian.12~bookworm_amd64.deb ... +Unpacking docker-buildx-plugin (0.31.1-1~debian.12~bookworm) ... +Selecting previously unselected package docker-ce-rootless-extras. +Preparing to unpack .../11-docker-ce-rootless-extras_5%3a29.3.0-1~debian.12~bookworm_amd64.deb ... +Unpacking docker-ce-rootless-extras (5:29.3.0-1~debian.12~bookworm) ... +Selecting previously unselected package docker-compose-plugin. +Preparing to unpack .../12-docker-compose-plugin_5.1.0-1~debian.12~bookworm_amd64.deb ... +Unpacking docker-compose-plugin (5.1.0-1~debian.12~bookworm) ... +Selecting previously unselected package liberror-perl. +Preparing to unpack .../13-liberror-perl_0.17029-2_all.deb ... +Unpacking liberror-perl (0.17029-2) ... +Selecting previously unselected package git-man. +Preparing to unpack .../14-git-man_1%3a2.39.5-0+deb12u3_all.deb ... +Unpacking git-man (1:2.39.5-0+deb12u3) ... +Selecting previously unselected package git. +Preparing to unpack .../15-git_1%3a2.39.5-0+deb12u3_amd64.deb ... +Unpacking git (1:2.39.5-0+deb12u3) ... +Selecting previously unselected package libglib2.0-0:amd64. +Preparing to unpack .../16-libglib2.0-0_2.74.6-2+deb12u8_amd64.deb ... +Unpacking libglib2.0-0:amd64 (2.74.6-2+deb12u8) ... +Selecting previously unselected package libglib2.0-data. +Preparing to unpack .../17-libglib2.0-data_2.74.6-2+deb12u8_all.deb ... +Unpacking libglib2.0-data (2.74.6-2+deb12u8) ... +Selecting previously unselected package libslirp0:amd64. +Preparing to unpack .../18-libslirp0_4.7.0-1_amd64.deb ... +Unpacking libslirp0:amd64 (4.7.0-1) ... +Selecting previously unselected package patch. +Preparing to unpack .../19-patch_2.7.6-7_amd64.deb ... +Unpacking patch (2.7.6-7) ... +Selecting previously unselected package shared-mime-info. +Preparing to unpack .../20-shared-mime-info_2.2-1_amd64.deb ... +Unpacking shared-mime-info (2.2-1) ... +Selecting previously unselected package slirp4netns. +Preparing to unpack .../21-slirp4netns_1.2.0-1_amd64.deb ... +Unpacking slirp4netns (1.2.0-1) ... +Selecting previously unselected package xdg-user-dirs. +Preparing to unpack .../22-xdg-user-dirs_0.18-1_amd64.deb ... +Unpacking xdg-user-dirs (0.18-1) ... +Setting up xdg-user-dirs (0.18-1) ... +Setting up libip6tc2:amd64 (1.8.9-2) ... +Setting up libglib2.0-0:amd64 (2.74.6-2+deb12u8) ... +No schema files found: doing nothing. +Setting up liberror-perl (0.17029-2) ... +Setting up apparmor (3.0.8-3) ... +Created symlink /etc/systemd/system/sysinit.target.wants/apparmor.service → /lib/systemd/system/apparmor.service. +Setting up dbus-user-session (1.14.10-1~deb12u1) ... +Setting up docker-buildx-plugin (0.31.1-1~debian.12~bookworm) ... +Setting up libglib2.0-data (2.74.6-2+deb12u8) ... +Setting up shared-mime-info (2.2-1) ... +Setting up containerd.io (2.2.1-1~debian.12~bookworm) ... +Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. +Setting up patch (2.7.6-7) ... +Setting up docker-compose-plugin (5.1.0-1~debian.12~bookworm) ... +Setting up docker-ce-cli (5:29.3.0-1~debian.12~bookworm) ... +Setting up libslirp0:amd64 (4.7.0-1) ... +Setting up pigz (2.6-1) ... +Setting up libnfnetlink0:amd64 (1.0.2-2) ... +Setting up git-man (1:2.39.5-0+deb12u3) ... +Setting up docker-ce-rootless-extras (5:29.3.0-1~debian.12~bookworm) ... +Setting up slirp4netns (1.2.0-1) ... +Setting up git (1:2.39.5-0+deb12u3) ... +Setting up libnetfilter-conntrack3:amd64 (1.0.9-3) ... +Setting up iptables (1.8.9-2) ... +update-alternatives: using /usr/sbin/iptables-legacy to provide /usr/sbin/iptables (iptables) in auto mode +update-alternatives: using /usr/sbin/ip6tables-legacy to provide /usr/sbin/ip6tables (ip6tables) in auto mode +update-alternatives: using /usr/sbin/iptables-nft to provide /usr/sbin/iptables (iptables) in auto mode +update-alternatives: using /usr/sbin/ip6tables-nft to provide /usr/sbin/ip6tables (ip6tables) in auto mode +update-alternatives: using /usr/sbin/arptables-nft to provide /usr/sbin/arptables (arptables) in auto mode +update-alternatives: using /usr/sbin/ebtables-nft to provide /usr/sbin/ebtables (ebtables) in auto mode +Setting up docker-ce (5:29.3.0-1~debian.12~bookworm) ... +Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. +Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. +Processing triggers for man-db (2.11.2-2) ... +Processing triggers for libc-bin (2.36-9+deb12u13) ... +active + Registry Mirrors: + http://192.168.45.2:5000/ + Live Restore Enabled: false + Image n8nio/n8n:latest Pulling + Image pgvector/pgvector:pg16 Pulling + Image postgrest/postgrest:latest Pulling + ec3fb2289333 Pulling fs layer 0B + 1a00de3f240e Pulling fs layer 0B + 63715795656b Pulling fs layer 0B + dffc6c281883 Pulling fs layer 0B + 84a2afebaf4d Pulling fs layer 0B + ed0e000a8fd4 Pulling fs layer 0B + 5532acd798e6 Pulling fs layer 0B + 8c20357c6451 Pulling fs layer 0B + de1f62cfa678 Pulling fs layer 0B + 4d3a5f84c681 Pulling fs layer 0B + 7b7e3b130d1d Pulling fs layer 0B + 995d61980f0c Pulling fs layer 0B + 70c8aaf9de13 Pulling fs layer 0B + cf9c30bf9650 Pulling fs layer 0B + ed3a3e9d9431 Pulling fs layer 0B + 442906e7dc4a Pulling fs layer 0B + b5671f4fb259 Pulling fs layer 0B + 63715795656b Downloading 3.145kB + dffc6c281883 Downloading 168B + 8c20357c6451 Downloading 124.8kB + 84a2afebaf4d Downloading 6.291MB + 7b7e3b130d1d Downloading 1.25MB + 70c8aaf9de13 Downloading 5.837kB + 8194dcfb262a Downloading 1.999kB + 1a00de3f240e Downloading 919.1kB + ed0e000a8fd4 Downloading 1.17kB + de1f62cfa678 Downloading 1.049MB + 4d3a5f84c681 Downloading 6.291MB + cf9c30bf9650 Downloading 116B + ed3a3e9d9431 Downloading 9.971kB + b5671f4fb259 Downloading 128B + 84a2afebaf4d Downloading 19.92MB + 5532acd798e6 Download complete 0B + 7b7e3b130d1d Download complete 0B + 995d61980f0c Download complete 0B + 70c8aaf9de13 Download complete 0B + 8194dcfb262a Download complete 0B + 442906e7dc4a Downloading 8.066MB + 1a00de3f240e Download complete 0B + ed0e000a8fd4 Download complete 0B + de1f62cfa678 Download complete 0B + 4d3a5f84c681 Downloading 18.87MB + cf9c30bf9650 Download complete 0B + ed3a3e9d9431 Download complete 0B + b5671f4fb259 Download complete 0B + 63715795656b Download complete 0B + dffc6c281883 Download complete 0B + 8c20357c6451 Download complete 0B + 84a2afebaf4d Download complete 0B + 442906e7dc4a Download complete 0B + 4d3a5f84c681 Downloading 35.65MB + 84a2afebaf4d Extracting 1B + 4d3a5f84c681 Downloading 52.43MB + 84a2afebaf4d Extracting 1B + ec3fb2289333 Downloading 2.097MB + 4d3a5f84c681 Downloading 70.25MB + 84a2afebaf4d Extracting 1B + 81d49b7a1dbb Pulling fs layer 0B + 59ef0fb7d7f5 Pulling fs layer 0B + ec3fb2289333 Download complete 0B + 22b37da5853a Pulling fs layer 0B + cb0e99e1c627 Pulling fs layer 0B + d1520cba0de0 Pulling fs layer 0B + 4f4fb700ef54 Pulling fs layer 0B + 6099ac1649a9 Pulling fs layer 0B + bc619b6a0ef8 Pulling fs layer 0B + 3ab68b51df15 Pulling fs layer 0B + 4f4fb700ef54 Pulling fs layer 0B + 8e7654dec0af Pulling fs layer 0B + ea2debf3e1ac Pulling fs layer 0B + ec3fb2289333 Extracting 1B + 4d3a5f84c681 Downloading 87.03MB + 84a2afebaf4d Extracting 1B + 4d3a5f84c681 Downloading 96.47MB + 81d49b7a1dbb Download complete 0B + d1520cba0de0 Downloading 5.243MB + ec3fb2289333 Extracting 1B + 59ef0fb7d7f5 Download complete 0B + 22b37da5853a Download complete 0B + cb0e99e1c627 Downloading 5.243MB + 4f4fb700ef54 Download complete 0B + ea2debf3e1ac Download complete 0B + 22b37da5853a Extracting 1B + 84a2afebaf4d Extracting 1B + d1520cba0de0 Downloading 15.73MB + 4d3a5f84c681 Downloading 108MB + ec3fb2289333 Extracting 1B + cb0e99e1c627 Download complete 0B + 22b37da5853a Pull complete 0B + cb0e99e1c627 Extracting 1B + 84a2afebaf4d Extracting 1B + 4d3a5f84c681 Downloading 111.7MB + d1520cba0de0 Downloading 24.12MB + ec3fb2289333 Extracting 1B + cb0e99e1c627 Extracting 1B + 84a2afebaf4d Extracting 1B + ec3fb2289333 Pull complete 0B + Image postgrest/postgrest:latest Pulled + d1520cba0de0 Downloading 38.8MB + 4d3a5f84c681 Download complete 0B + cb0e99e1c627 Extracting 1B + 84a2afebaf4d Extracting 1B + 850a070c9c29 Downloading 1.049MB + d1520cba0de0 Download complete 0B + cb0e99e1c627 Extracting 1B + 84a2afebaf4d Extracting 1B + 6099ac1649a9 Download complete 0B + 850a070c9c29 Downloading 4.194MB + bc619b6a0ef8 Downloading 3.146MB + 8e7654dec0af Download complete 0B + cb0e99e1c627 Extracting 1B + 84a2afebaf4d Extracting 1B + 850a070c9c29 Download complete 0B + bc619b6a0ef8 Downloading 5.243MB + 3ab68b51df15 Downloading 2.097MB + 59ef0fb7d7f5 Pull complete 0B + cb0e99e1c627 Pull complete 0B + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 1B + bc619b6a0ef8 Downloading 8.389MB + 3ab68b51df15 Downloading 5.243MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 2B + bc619b6a0ef8 Downloading 10.49MB + 3ab68b51df15 Downloading 7.34MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 2B + bc619b6a0ef8 Downloading 12.58MB + 3ab68b51df15 Downloading 9.437MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 2B + bc619b6a0ef8 Downloading 15.73MB + 3ab68b51df15 Downloading 11.53MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 2B + bc619b6a0ef8 Downloading 18.87MB + 3ab68b51df15 Downloading 14.68MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 2B + bc619b6a0ef8 Download complete 0B + 3ab68b51df15 Downloading 16.78MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 2B + 3ab68b51df15 Downloading 19.92MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 2B + 3ab68b51df15 Downloading 23.07MB + d1520cba0de0 Extracting 1B + ed0e000a8fd4 Extracting 1B + 84a2afebaf4d Pull complete 0B + 3ab68b51df15 Downloading 26.21MB + d1520cba0de0 Extracting 1B + ed0e000a8fd4 Extracting 1B + 3ab68b51df15 Downloading 33.55MB + d1520cba0de0 Extracting 2B + ed0e000a8fd4 Pull complete 0B + de1f62cfa678 Extracting 1B + 3ab68b51df15 Downloading 37.75MB + d1520cba0de0 Extracting 2B + de1f62cfa678 Extracting 1B + 3ab68b51df15 Downloading 42.99MB + d1520cba0de0 Extracting 2B + 7b7e3b130d1d Extracting 1B + de1f62cfa678 Pull complete 0B + 3ab68b51df15 Downloading 48.23MB + d1520cba0de0 Extracting 2B + 7b7e3b130d1d Pull complete 0B + 442906e7dc4a Extracting 1B + 3ab68b51df15 Downloading 52.43MB + ea2debf3e1ac Extracting 1B + d1520cba0de0 Pull complete 0B + 442906e7dc4a Extracting 1B + 3ab68b51df15 Downloading 55.57MB + 81d49b7a1dbb Pull complete 0B + ea2debf3e1ac Pull complete 0B + bc619b6a0ef8 Extracting 1B + 442906e7dc4a Extracting 1B + 3ab68b51df15 Downloading 60.82MB + bc619b6a0ef8 Extracting 1B + 442906e7dc4a Extracting 1B + 3ab68b51df15 Downloading 65.01MB + bc619b6a0ef8 Extracting 1B + 995d61980f0c Extracting 1B + 442906e7dc4a Pull complete 0B + 3ab68b51df15 Downloading 69.21MB + bc619b6a0ef8 Extracting 1B + 3ab68b51df15 Downloading 74.45MB + bc619b6a0ef8 Extracting 1B + 995d61980f0c Pull complete 0B + cf9c30bf9650 Pull complete 0B + 63715795656b Pull complete 0B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 79.69MB + bc619b6a0ef8 Extracting 1B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 83.89MB + bc619b6a0ef8 Extracting 1B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 88.08MB + bc619b6a0ef8 Extracting 1B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 92.27MB + bc619b6a0ef8 Pull complete 0B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 96.47MB + 4f4fb700ef54 Pull complete 0B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 99.61MB + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 103.8MB + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 109.1MB + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 114.3MB + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 118.5MB + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Downloading 123.7MB + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Downloading 129MB + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Downloading 134.2MB + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Downloading 139.5MB + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Downloading 142.6MB + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Downloading 148.9MB + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Downloading 153.1MB + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Downloading 157.3MB + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Downloading 162.5MB + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Downloading 166.7MB + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Downloading 170.9MB + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Downloading 175.1MB + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Downloading 179.3MB + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Downloading 183.5MB + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Downloading 187.7MB + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Downloading 192.9MB + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Downloading 195MB + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Downloading 195MB + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Downloading 195MB + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Downloading 202.2MB + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Download complete 0B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 4B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 4B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 4B + 3ab68b51df15 Extracting 1B + 70c8aaf9de13 Extracting 1B + ed3a3e9d9431 Pull complete 0B + b5671f4fb259 Pull complete 0B + dffc6c281883 Pull complete 0B + 4d3a5f84c681 Pull complete 0B + 5532acd798e6 Pull complete 0B + 70c8aaf9de13 Pull complete 0B + 3ab68b51df15 Extracting 1B + 1a00de3f240e Extracting 1B + 8c20357c6451 Pull complete 0B + 1a00de3f240e Pull complete 0B + Image pgvector/pgvector:pg16 Pulled + 3ab68b51df15 Extracting 1B + 3ab68b51df15 Extracting 1B + 3ab68b51df15 Extracting 1B + 3ab68b51df15 Extracting 1B + 3ab68b51df15 Extracting 1B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Pull complete 0B + 6099ac1649a9 Pull complete 0B + 8e7654dec0af Pull complete 0B + Image n8nio/n8n:latest Pulled + Network customer-stack_customer-net Creating + Network customer-stack_customer-net Created + Container customer-postgres Creating + Container customer-postgres Created + Container customer-postgrest Creating + Container customer-postgrest Created + Container n8n Creating + Container n8n Created + Container customer-postgres Starting + Container customer-postgres Started + Container customer-postgres Waiting + Container customer-postgres Healthy + Container customer-postgrest Starting + Container customer-postgrest Started + Container customer-postgres Waiting + Container customer-postgres Healthy + Container n8n Starting + Container n8n Started +NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS +customer-postgres pgvector/pgvector:pg16 "docker-entrypoint.s…" postgres 12 seconds ago Up 12 seconds (healthy) 5432/tcp +customer-postgrest postgrest/postgrest:latest "/bin/postgrest" postgrest 12 seconds ago Up 1 second 0.0.0.0:3000->3000/tcp, [::]:3000->3000/tcp +n8n n8nio/n8n:latest "tini -- /docker-ent…" n8n 12 seconds ago Up Less than a second 0.0.0.0:5678->5678/tcp, [::]:5678->5678/tcp +{"data":{"createdAt":"2026-03-07T18:25:23.519Z","id":"dd6bb44c-a828-4266-a6a7-7dfd84bb0cf1","email":"admin@userman.de","firstName":"Admin","lastName":"Owner","personalizationAnswers":null,"settings":{"userActivated":false},"disabled":false,"mfaEnabled":false,"lastActiveAt":"2026-03-07T18:25:32.836Z","isPending":false,"role":"global:owner","signInType":"email","isOwner":true,"featureFlags":{},"globalScopes":["aiAssistant:manage","annotationTag:create","annotationTag:delete","annotationTag:list","annotationTag:read","annotationTag:update","apiKey:manage","auditLogs:manage","banner:dismiss","breakingChanges:list","chatHubAgent:create","chatHubAgent:delete","chatHubAgent:list","chatHubAgent:read","chatHubAgent:update","chatHub:manage","chatHub:message","communityPackage:install","communityPackage:list","communityPackage:uninstall","communityPackage:update","community:register","credential:create","credential:delete","credential:list","credential:move","credential:read","credentialResolver:create","credentialResolver:delete","credentialResolver:list","credentialResolver:read","credentialResolver:update","credential:share","credential:shareGlobally","credential:update","dataTable:create","dataTable:delete","dataTable:list","dataTable:listProject","dataTable:read","dataTable:readRow","dataTable:update","dataTable:writeRow","eventBusDestination:create","eventBusDestination:delete","eventBusDestination:list","eventBusDestination:read","eventBusDestination:test","eventBusDestination:update","externalSecret:list","externalSecretsProvider:create","externalSecretsProvider:delete","externalSecretsProvider:list","externalSecretsProvider:read","externalSecretsProvider:sync","externalSecretsProvider:update","folder:create","folder:delete","folder:list","folder:move","folder:read","folder:update","insights:list","ldap:manage","ldap:sync","license:manage","logStreaming:manage","mcpApiKey:create","mcpApiKey:rotate","mcp:manage","mcp:oauth","oidc:manage","orchestration:read","project:create","project:delete","project:list","project:read","project:update","projectVariable:create","projectVariable:delete","projectVariable:list","projectVariable:read","projectVariable:update","provisioning:manage","role:manage","saml:manage","securityAudit:generate","securitySettings:manage","sourceControl:manage","sourceControl:pull","sourceControl:push","tag:create","tag:delete","tag:list","tag:read","tag:update","user:changeRole","user:create","user:delete","user:enforceMfa","user:generateInviteLink","user:list","user:read","user:resetPassword","user:update","variable:create","variable:delete","variable:list","variable:read","variable:update","workersView:manage","workflow:create","workflow:delete","workflow:execute","workflow:execute-chat","workflow:list","workflow:move","workflow:publish","workflow:read","workflow:share","workflow:update"],"mfaAuthenticated":false}}cat: /root/installer/templates/reload-workflow.sh: No such file or directory diff --git a/logs/sb-1772907999.log b/logs/sb-1772907999.log new file mode 100644 index 0000000..cedd08e --- /dev/null +++ b/logs/sb-1772907999.log @@ -0,0 +1,1093 @@ +extracting archive '/var/lib/vz/template/cache/debian-12-standard_12.12-1_amd64.tar.zst' +Total bytes read: 522782720 (499MiB, 187MiB/s) +Detected container architecture: amd64 +Setting up 'proxmox-regenerate-snakeoil.service' to regenerate snakeoil certificate.. +Creating SSH host key 'ssh_host_ecdsa_key' - this may take some time ... +done: SHA256:aqKPuKxZWrSF+SVYSPNRTr6mMbevVL67EMS3XluvZQU root@sb-1772907999 +Creating SSH host key 'ssh_host_rsa_key' - this may take some time ... +done: SHA256:PDtuJmzGFTz0ZqmeFPoV67f4bcSVck/H4Dh/gMhg6dE root@sb-1772907999 +Creating SSH host key 'ssh_host_ed25519_key' - this may take some time ... +done: SHA256:5KTptds4Yz1Icp9eoymfTlOJqI+666FMGbf2q2KhlLA root@sb-1772907999 +Acquire::http::Proxy "http://192.168.45.2:3142"; +Acquire::https::Proxy "http://192.168.45.2:3142"; +Get:1 http://deb.debian.org/debian bookworm InRelease [151 kB] +Get:2 http://security.debian.org bookworm-security InRelease [48.0 kB] +Get:3 http://deb.debian.org/debian bookworm-updates InRelease [55.4 kB] +Get:4 http://deb.debian.org/debian bookworm/main amd64 Packages [8792 kB] +Get:5 http://deb.debian.org/debian bookworm/main Translation-en [6108 kB] +Get:6 http://deb.debian.org/debian bookworm/contrib amd64 Packages [53.5 kB] +Get:7 http://deb.debian.org/debian bookworm/contrib Translation-en [48.4 kB] +Get:8 http://security.debian.org bookworm-security/main amd64 Packages [293 kB] +Get:9 http://security.debian.org bookworm-security/main Translation-en [178 kB] +Get:10 http://security.debian.org bookworm-security/contrib Translation-en [652 B] +Get:11 http://deb.debian.org/debian bookworm-updates/main Translation-en [5448 B] +Fetched 15.7 MB in 2s (9634 kB/s) +Reading package lists... +Reading package lists... +Building dependency tree... +ca-certificates is already the newest version (20230311+deb12u1). +The following additional packages will be installed: + dirmngr gnupg-l10n gnupg-utils gpg gpg-agent gpg-wks-client gpg-wks-server + gpgconf gpgsm gpgv libassuan0 libcurl4 libksba8 libnpth0 pinentry-curses +Suggested packages: + dbus-user-session pinentry-gnome3 tor parcimonie xloadimage scdaemon + pinentry-doc +The following NEW packages will be installed: + curl dirmngr gnupg gnupg-l10n gnupg-utils gpg gpg-agent gpg-wks-client + gpg-wks-server gpgconf gpgsm libassuan0 libcurl4 libksba8 libnpth0 + lsb-release pinentry-curses +The following packages will be upgraded: + gpgv +1 upgraded, 17 newly installed, 0 to remove and 20 not upgraded. +Need to get 9247 kB of archives. +After this operation, 17.4 MB of additional disk space will be used. +Get:1 http://deb.debian.org/debian bookworm/main amd64 gpgv amd64 2.2.40-1.1+deb12u2 [649 kB] +Get:2 http://deb.debian.org/debian bookworm/main amd64 libcurl4 amd64 7.88.1-10+deb12u14 [392 kB] +Get:3 http://deb.debian.org/debian bookworm/main amd64 curl amd64 7.88.1-10+deb12u14 [316 kB] +Get:4 http://deb.debian.org/debian bookworm/main amd64 libassuan0 amd64 2.5.5-5 [48.5 kB] +Get:5 http://deb.debian.org/debian bookworm/main amd64 gpgconf amd64 2.2.40-1.1+deb12u2 [565 kB] +Get:6 http://deb.debian.org/debian bookworm/main amd64 libksba8 amd64 1.6.3-2 [128 kB] +Get:7 http://deb.debian.org/debian bookworm/main amd64 libnpth0 amd64 1.6-3 [19.0 kB] +Get:8 http://deb.debian.org/debian bookworm/main amd64 dirmngr amd64 2.2.40-1.1+deb12u2 [793 kB] +Get:9 http://deb.debian.org/debian bookworm/main amd64 gnupg-l10n all 2.2.40-1.1+deb12u2 [1093 kB] +Get:10 http://deb.debian.org/debian bookworm/main amd64 gnupg-utils amd64 2.2.40-1.1+deb12u2 [927 kB] +Get:11 http://deb.debian.org/debian bookworm/main amd64 gpg amd64 2.2.40-1.1+deb12u2 [950 kB] +Get:12 http://deb.debian.org/debian bookworm/main amd64 pinentry-curses amd64 1.2.1-1 [77.4 kB] +Get:13 http://deb.debian.org/debian bookworm/main amd64 gpg-agent amd64 2.2.40-1.1+deb12u2 [695 kB] +Get:14 http://deb.debian.org/debian bookworm/main amd64 gpg-wks-client amd64 2.2.40-1.1+deb12u2 [541 kB] +Get:15 http://deb.debian.org/debian bookworm/main amd64 gpg-wks-server amd64 2.2.40-1.1+deb12u2 [531 kB] +Get:16 http://deb.debian.org/debian bookworm/main amd64 gpgsm amd64 2.2.40-1.1+deb12u2 [671 kB] +Get:17 http://deb.debian.org/debian bookworm/main amd64 gnupg all 2.2.40-1.1+deb12u2 [846 kB] +Get:18 http://deb.debian.org/debian bookworm/main amd64 lsb-release all 12.0-1 [6416 B] +apt-listchanges: Can't set locale; make sure $LC_* and $LANG are correct! +apt-listchanges: Reading changelogs... +perl: warning: Setting locale failed. +perl: warning: Please check that your locale settings: + LANGUAGE = (unset), + LC_ALL = (unset), + LANG = "en_US.UTF-8" + are supported and installed on your system. +perl: warning: Falling back to the standard locale ("C"). +locale: Cannot set LC_CTYPE to default locale: No such file or directory +locale: Cannot set LC_MESSAGES to default locale: No such file or directory +locale: Cannot set LC_ALL to default locale: No such file or directory +Fetched 9247 kB in 0s (153 MB/s) +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 19144 files and directories currently installed.) +Preparing to unpack .../gpgv_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpgv (2.2.40-1.1+deb12u2) over (2.2.40-1.1+deb12u1) ... +Setting up gpgv (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package libcurl4:amd64. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 19144 files and directories currently installed.) +Preparing to unpack .../00-libcurl4_7.88.1-10+deb12u14_amd64.deb ... +Unpacking libcurl4:amd64 (7.88.1-10+deb12u14) ... +Selecting previously unselected package curl. +Preparing to unpack .../01-curl_7.88.1-10+deb12u14_amd64.deb ... +Unpacking curl (7.88.1-10+deb12u14) ... +Selecting previously unselected package libassuan0:amd64. +Preparing to unpack .../02-libassuan0_2.5.5-5_amd64.deb ... +Unpacking libassuan0:amd64 (2.5.5-5) ... +Selecting previously unselected package gpgconf. +Preparing to unpack .../03-gpgconf_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpgconf (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package libksba8:amd64. +Preparing to unpack .../04-libksba8_1.6.3-2_amd64.deb ... +Unpacking libksba8:amd64 (1.6.3-2) ... +Selecting previously unselected package libnpth0:amd64. +Preparing to unpack .../05-libnpth0_1.6-3_amd64.deb ... +Unpacking libnpth0:amd64 (1.6-3) ... +Selecting previously unselected package dirmngr. +Preparing to unpack .../06-dirmngr_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking dirmngr (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gnupg-l10n. +Preparing to unpack .../07-gnupg-l10n_2.2.40-1.1+deb12u2_all.deb ... +Unpacking gnupg-l10n (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gnupg-utils. +Preparing to unpack .../08-gnupg-utils_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gnupg-utils (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gpg. +Preparing to unpack .../09-gpg_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpg (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package pinentry-curses. +Preparing to unpack .../10-pinentry-curses_1.2.1-1_amd64.deb ... +Unpacking pinentry-curses (1.2.1-1) ... +Selecting previously unselected package gpg-agent. +Preparing to unpack .../11-gpg-agent_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpg-agent (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gpg-wks-client. +Preparing to unpack .../12-gpg-wks-client_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpg-wks-client (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gpg-wks-server. +Preparing to unpack .../13-gpg-wks-server_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpg-wks-server (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gpgsm. +Preparing to unpack .../14-gpgsm_2.2.40-1.1+deb12u2_amd64.deb ... +Unpacking gpgsm (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package gnupg. +Preparing to unpack .../15-gnupg_2.2.40-1.1+deb12u2_all.deb ... +Unpacking gnupg (2.2.40-1.1+deb12u2) ... +Selecting previously unselected package lsb-release. +Preparing to unpack .../16-lsb-release_12.0-1_all.deb ... +Unpacking lsb-release (12.0-1) ... +Setting up libksba8:amd64 (1.6.3-2) ... +Setting up libnpth0:amd64 (1.6-3) ... +Setting up libassuan0:amd64 (2.5.5-5) ... +Setting up gnupg-l10n (2.2.40-1.1+deb12u2) ... +Setting up gpgconf (2.2.40-1.1+deb12u2) ... +Setting up libcurl4:amd64 (7.88.1-10+deb12u14) ... +Setting up curl (7.88.1-10+deb12u14) ... +Setting up lsb-release (12.0-1) ... +Setting up gpg (2.2.40-1.1+deb12u2) ... +Setting up gnupg-utils (2.2.40-1.1+deb12u2) ... +Setting up pinentry-curses (1.2.1-1) ... +Setting up gpg-agent (2.2.40-1.1+deb12u2) ... +Created symlink /etc/systemd/user/sockets.target.wants/gpg-agent-browser.socket → /usr/lib/systemd/user/gpg-agent-browser.socket. +Created symlink /etc/systemd/user/sockets.target.wants/gpg-agent-extra.socket → /usr/lib/systemd/user/gpg-agent-extra.socket. +Created symlink /etc/systemd/user/sockets.target.wants/gpg-agent-ssh.socket → /usr/lib/systemd/user/gpg-agent-ssh.socket. +Created symlink /etc/systemd/user/sockets.target.wants/gpg-agent.socket → /usr/lib/systemd/user/gpg-agent.socket. +Setting up gpgsm (2.2.40-1.1+deb12u2) ... +Setting up dirmngr (2.2.40-1.1+deb12u2) ... +Created symlink /etc/systemd/user/sockets.target.wants/dirmngr.socket → /usr/lib/systemd/user/dirmngr.socket. +Setting up gpg-wks-server (2.2.40-1.1+deb12u2) ... +Setting up gpg-wks-client (2.2.40-1.1+deb12u2) ... +Setting up gnupg (2.2.40-1.1+deb12u2) ... +Processing triggers for man-db (2.11.2-2) ... +Processing triggers for libc-bin (2.36-9+deb12u13) ... +Hit:1 http://deb.debian.org/debian bookworm InRelease +Hit:2 http://security.debian.org bookworm-security InRelease +Hit:3 http://deb.debian.org/debian bookworm-updates InRelease +Reading package lists... +Reading package lists... +Building dependency tree... +Reading state information... +locales is already the newest version (2.36-9+deb12u13). +ca-certificates is already the newest version (20230311+deb12u1). +curl is already the newest version (7.88.1-10+deb12u14). +gnupg is already the newest version (2.2.40-1.1+deb12u2). +lsb-release is already the newest version (12.0-1). +0 upgraded, 0 newly installed, 0 to remove and 20 not upgraded. +Hit:1 http://deb.debian.org/debian bookworm InRelease +Hit:2 http://security.debian.org bookworm-security InRelease +Hit:3 http://deb.debian.org/debian bookworm-updates InRelease +Get:4 https://download.docker.com/linux/debian bookworm InRelease [46.6 kB] +Get:5 https://download.docker.com/linux/debian bookworm/stable amd64 Packages [63.6 kB] +Fetched 110 kB in 0s (262 kB/s) +Reading package lists... +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + apparmor dbus-user-session docker-ce-rootless-extras git git-man iptables + liberror-perl libglib2.0-0 libglib2.0-data libip6tc2 libnetfilter-conntrack3 + libnfnetlink0 libslirp0 patch pigz shared-mime-info slirp4netns + xdg-user-dirs +Suggested packages: + apparmor-profiles-extra apparmor-utils cgroupfs-mount | cgroup-lite + docker-model-plugin git-daemon-run | git-daemon-sysvinit git-doc git-email + git-gui gitk gitweb git-cvs git-mediawiki git-svn firewalld + low-memory-monitor ed diffutils-doc +The following NEW packages will be installed: + apparmor containerd.io dbus-user-session docker-buildx-plugin docker-ce + docker-ce-cli docker-ce-rootless-extras docker-compose-plugin git git-man + iptables liberror-perl libglib2.0-0 libglib2.0-data libip6tc2 + libnetfilter-conntrack3 libnfnetlink0 libslirp0 patch pigz shared-mime-info + slirp4netns xdg-user-dirs +0 upgraded, 23 newly installed, 0 to remove and 20 not upgraded. +Need to get 111 MB of archives. +After this operation, 466 MB of additional disk space will be used. +Get:1 http://deb.debian.org/debian bookworm/main amd64 libip6tc2 amd64 1.8.9-2 [19.4 kB] +Get:2 http://deb.debian.org/debian bookworm/main amd64 libnfnetlink0 amd64 1.0.2-2 [15.1 kB] +Get:3 http://deb.debian.org/debian bookworm/main amd64 libnetfilter-conntrack3 amd64 1.0.9-3 [40.7 kB] +Get:4 http://deb.debian.org/debian bookworm/main amd64 iptables amd64 1.8.9-2 [360 kB] +Get:5 http://deb.debian.org/debian bookworm/main amd64 pigz amd64 2.6-1 [64.0 kB] +Get:6 http://deb.debian.org/debian bookworm/main amd64 apparmor amd64 3.0.8-3 [616 kB] +Get:7 http://deb.debian.org/debian bookworm/main amd64 dbus-user-session amd64 1.14.10-1~deb12u1 [78.1 kB] +Get:8 http://deb.debian.org/debian bookworm/main amd64 liberror-perl all 0.17029-2 [29.0 kB] +Get:9 http://deb.debian.org/debian bookworm/main amd64 git-man all 1:2.39.5-0+deb12u3 [2,053 kB] +Get:10 http://deb.debian.org/debian bookworm/main amd64 git amd64 1:2.39.5-0+deb12u3 [7,264 kB] +Get:11 http://deb.debian.org/debian bookworm/main amd64 libglib2.0-0 amd64 2.74.6-2+deb12u8 [1,402 kB] +Get:12 http://deb.debian.org/debian bookworm/main amd64 libglib2.0-data all 2.74.6-2+deb12u8 [1,210 kB] +Get:13 http://deb.debian.org/debian bookworm/main amd64 libslirp0 amd64 4.7.0-1 [63.0 kB] +Get:14 http://deb.debian.org/debian bookworm/main amd64 patch amd64 2.7.6-7 [128 kB] +Get:15 http://deb.debian.org/debian bookworm/main amd64 shared-mime-info amd64 2.2-1 [729 kB] +Get:16 http://deb.debian.org/debian bookworm/main amd64 slirp4netns amd64 1.2.0-1 [37.5 kB] +Get:17 http://deb.debian.org/debian bookworm/main amd64 xdg-user-dirs amd64 0.18-1 [54.4 kB] +Get:18 https://download.docker.com/linux/debian bookworm/stable amd64 containerd.io amd64 2.2.1-1~debian.12~bookworm [23.4 MB] +Get:19 https://download.docker.com/linux/debian bookworm/stable amd64 docker-ce-cli amd64 5:29.3.0-1~debian.12~bookworm [16.4 MB] +Get:20 https://download.docker.com/linux/debian bookworm/stable amd64 docker-ce amd64 5:29.3.0-1~debian.12~bookworm [22.5 MB] +Get:21 https://download.docker.com/linux/debian bookworm/stable amd64 docker-buildx-plugin amd64 0.31.1-1~debian.12~bookworm [20.2 MB] +Get:22 https://download.docker.com/linux/debian bookworm/stable amd64 docker-ce-rootless-extras amd64 5:29.3.0-1~debian.12~bookworm [6,389 kB] +Get:23 https://download.docker.com/linux/debian bookworm/stable amd64 docker-compose-plugin amd64 5.1.0-1~debian.12~bookworm [7,847 kB] +Preconfiguring packages ... +Fetched 111 MB in 1s (90.3 MB/s) +Selecting previously unselected package containerd.io. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 19417 files and directories currently installed.) +Preparing to unpack .../00-containerd.io_2.2.1-1~debian.12~bookworm_amd64.deb ... +Unpacking containerd.io (2.2.1-1~debian.12~bookworm) ... +Selecting previously unselected package docker-ce-cli. +Preparing to unpack .../01-docker-ce-cli_5%3a29.3.0-1~debian.12~bookworm_amd64.deb ... +Unpacking docker-ce-cli (5:29.3.0-1~debian.12~bookworm) ... +Selecting previously unselected package libip6tc2:amd64. +Preparing to unpack .../02-libip6tc2_1.8.9-2_amd64.deb ... +Unpacking libip6tc2:amd64 (1.8.9-2) ... +Selecting previously unselected package libnfnetlink0:amd64. +Preparing to unpack .../03-libnfnetlink0_1.0.2-2_amd64.deb ... +Unpacking libnfnetlink0:amd64 (1.0.2-2) ... +Selecting previously unselected package libnetfilter-conntrack3:amd64. +Preparing to unpack .../04-libnetfilter-conntrack3_1.0.9-3_amd64.deb ... +Unpacking libnetfilter-conntrack3:amd64 (1.0.9-3) ... +Selecting previously unselected package iptables. +Preparing to unpack .../05-iptables_1.8.9-2_amd64.deb ... +Unpacking iptables (1.8.9-2) ... +Selecting previously unselected package docker-ce. +Preparing to unpack .../06-docker-ce_5%3a29.3.0-1~debian.12~bookworm_amd64.deb ... +Unpacking docker-ce (5:29.3.0-1~debian.12~bookworm) ... +Selecting previously unselected package pigz. +Preparing to unpack .../07-pigz_2.6-1_amd64.deb ... +Unpacking pigz (2.6-1) ... +Selecting previously unselected package apparmor. +Preparing to unpack .../08-apparmor_3.0.8-3_amd64.deb ... +Unpacking apparmor (3.0.8-3) ... +Selecting previously unselected package dbus-user-session. +Preparing to unpack .../09-dbus-user-session_1.14.10-1~deb12u1_amd64.deb ... +Unpacking dbus-user-session (1.14.10-1~deb12u1) ... +Selecting previously unselected package docker-buildx-plugin. +Preparing to unpack .../10-docker-buildx-plugin_0.31.1-1~debian.12~bookworm_amd64.deb ... +Unpacking docker-buildx-plugin (0.31.1-1~debian.12~bookworm) ... +Selecting previously unselected package docker-ce-rootless-extras. +Preparing to unpack .../11-docker-ce-rootless-extras_5%3a29.3.0-1~debian.12~bookworm_amd64.deb ... +Unpacking docker-ce-rootless-extras (5:29.3.0-1~debian.12~bookworm) ... +Selecting previously unselected package docker-compose-plugin. +Preparing to unpack .../12-docker-compose-plugin_5.1.0-1~debian.12~bookworm_amd64.deb ... +Unpacking docker-compose-plugin (5.1.0-1~debian.12~bookworm) ... +Selecting previously unselected package liberror-perl. +Preparing to unpack .../13-liberror-perl_0.17029-2_all.deb ... +Unpacking liberror-perl (0.17029-2) ... +Selecting previously unselected package git-man. +Preparing to unpack .../14-git-man_1%3a2.39.5-0+deb12u3_all.deb ... +Unpacking git-man (1:2.39.5-0+deb12u3) ... +Selecting previously unselected package git. +Preparing to unpack .../15-git_1%3a2.39.5-0+deb12u3_amd64.deb ... +Unpacking git (1:2.39.5-0+deb12u3) ... +Selecting previously unselected package libglib2.0-0:amd64. +Preparing to unpack .../16-libglib2.0-0_2.74.6-2+deb12u8_amd64.deb ... +Unpacking libglib2.0-0:amd64 (2.74.6-2+deb12u8) ... +Selecting previously unselected package libglib2.0-data. +Preparing to unpack .../17-libglib2.0-data_2.74.6-2+deb12u8_all.deb ... +Unpacking libglib2.0-data (2.74.6-2+deb12u8) ... +Selecting previously unselected package libslirp0:amd64. +Preparing to unpack .../18-libslirp0_4.7.0-1_amd64.deb ... +Unpacking libslirp0:amd64 (4.7.0-1) ... +Selecting previously unselected package patch. +Preparing to unpack .../19-patch_2.7.6-7_amd64.deb ... +Unpacking patch (2.7.6-7) ... +Selecting previously unselected package shared-mime-info. +Preparing to unpack .../20-shared-mime-info_2.2-1_amd64.deb ... +Unpacking shared-mime-info (2.2-1) ... +Selecting previously unselected package slirp4netns. +Preparing to unpack .../21-slirp4netns_1.2.0-1_amd64.deb ... +Unpacking slirp4netns (1.2.0-1) ... +Selecting previously unselected package xdg-user-dirs. +Preparing to unpack .../22-xdg-user-dirs_0.18-1_amd64.deb ... +Unpacking xdg-user-dirs (0.18-1) ... +Setting up xdg-user-dirs (0.18-1) ... +Setting up libip6tc2:amd64 (1.8.9-2) ... +Setting up libglib2.0-0:amd64 (2.74.6-2+deb12u8) ... +No schema files found: doing nothing. +Setting up liberror-perl (0.17029-2) ... +Setting up apparmor (3.0.8-3) ... +Created symlink /etc/systemd/system/sysinit.target.wants/apparmor.service → /lib/systemd/system/apparmor.service. +Setting up dbus-user-session (1.14.10-1~deb12u1) ... +Setting up docker-buildx-plugin (0.31.1-1~debian.12~bookworm) ... +Setting up libglib2.0-data (2.74.6-2+deb12u8) ... +Setting up shared-mime-info (2.2-1) ... +Setting up containerd.io (2.2.1-1~debian.12~bookworm) ... +Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. +Setting up patch (2.7.6-7) ... +Setting up docker-compose-plugin (5.1.0-1~debian.12~bookworm) ... +Setting up docker-ce-cli (5:29.3.0-1~debian.12~bookworm) ... +Setting up libslirp0:amd64 (4.7.0-1) ... +Setting up pigz (2.6-1) ... +Setting up libnfnetlink0:amd64 (1.0.2-2) ... +Setting up git-man (1:2.39.5-0+deb12u3) ... +Setting up docker-ce-rootless-extras (5:29.3.0-1~debian.12~bookworm) ... +Setting up slirp4netns (1.2.0-1) ... +Setting up git (1:2.39.5-0+deb12u3) ... +Setting up libnetfilter-conntrack3:amd64 (1.0.9-3) ... +Setting up iptables (1.8.9-2) ... +update-alternatives: using /usr/sbin/iptables-legacy to provide /usr/sbin/iptables (iptables) in auto mode +update-alternatives: using /usr/sbin/ip6tables-legacy to provide /usr/sbin/ip6tables (ip6tables) in auto mode +update-alternatives: using /usr/sbin/iptables-nft to provide /usr/sbin/iptables (iptables) in auto mode +update-alternatives: using /usr/sbin/ip6tables-nft to provide /usr/sbin/ip6tables (ip6tables) in auto mode +update-alternatives: using /usr/sbin/arptables-nft to provide /usr/sbin/arptables (arptables) in auto mode +update-alternatives: using /usr/sbin/ebtables-nft to provide /usr/sbin/ebtables (ebtables) in auto mode +Setting up docker-ce (5:29.3.0-1~debian.12~bookworm) ... +Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. +Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. +Processing triggers for man-db (2.11.2-2) ... +Processing triggers for libc-bin (2.36-9+deb12u13) ... +active + Registry Mirrors: + http://192.168.45.2:5000/ + Live Restore Enabled: false + Image postgrest/postgrest:latest Pulling + Image n8nio/n8n:latest Pulling + Image pgvector/pgvector:pg16 Pulling + ec3fb2289333 Pulling fs layer 0B + ec3fb2289333 Downloading 3.146MB + ec3fb2289333 Download complete 0B + ec3fb2289333 Extracting 1B + ec3fb2289333 Pull complete 0B + Image postgrest/postgrest:latest Pulled + 6099ac1649a9 Pulling fs layer 0B + bc619b6a0ef8 Pulling fs layer 0B + 4f4fb700ef54 Pulling fs layer 0B + d1520cba0de0 Pulling fs layer 0B + 3ab68b51df15 Pulling fs layer 0B + ea2debf3e1ac Pulling fs layer 0B + 81d49b7a1dbb Pulling fs layer 0B + 8e7654dec0af Pulling fs layer 0B + 4f4fb700ef54 Pulling fs layer 0B + 59ef0fb7d7f5 Pulling fs layer 0B + 22b37da5853a Pulling fs layer 0B + cb0e99e1c627 Pulling fs layer 0B + 1a00de3f240e Pulling fs layer 0B + ed3a3e9d9431 Pulling fs layer 0B + cf9c30bf9650 Pulling fs layer 0B + dffc6c281883 Pulling fs layer 0B + 7b7e3b130d1d Pulling fs layer 0B + ed0e000a8fd4 Pulling fs layer 0B + 84a2afebaf4d Pulling fs layer 0B + de1f62cfa678 Pulling fs layer 0B + 442906e7dc4a Pulling fs layer 0B + 70c8aaf9de13 Pulling fs layer 0B + 995d61980f0c Pulling fs layer 0B + b5671f4fb259 Pulling fs layer 0B + 5532acd798e6 Pulling fs layer 0B + 4d3a5f84c681 Pulling fs layer 0B + 8c20357c6451 Pulling fs layer 0B + 63715795656b Pulling fs layer 0B + 8194dcfb262a Downloading 1.999kB + 3ab68b51df15 Downloading 1.049MB + 81d49b7a1dbb Downloading 6.084kB + 59ef0fb7d7f5 Downloading 376B + 850a070c9c29 Downloading 5.558MB + d1520cba0de0 Downloading 1.049MB + ea2debf3e1ac Downloading 77B + 442906e7dc4a Downloading 2.097MB + 995d61980f0c Downloading 1.049MB + 7b7e3b130d1d Downloading 1.25MB + 8194dcfb262a Download complete 0B + b5671f4fb259 Downloading 128B + 70c8aaf9de13 Downloading 5.837kB + 5532acd798e6 Downloading 185B + cf9c30bf9650 Downloading 116B + dffc6c281883 Downloading 168B + cb0e99e1c627 Downloading 6.291MB + bc619b6a0ef8 Downloading 4.194MB + 63715795656b Download complete 0B + 4f4fb700ef54 Download complete 0B + 3ab68b51df15 Downloading 8.389MB + 81d49b7a1dbb Download complete 0B + 8e7654dec0af Download complete 0B + 59ef0fb7d7f5 Download complete 0B + 850a070c9c29 Download complete 0B + 6099ac1649a9 Download complete 0B + 22b37da5853a Download complete 0B + d1520cba0de0 Downloading 10.49MB + ea2debf3e1ac Download complete 0B + 22b37da5853a Extracting 1B + 1a00de3f240e Downloading 919.1kB + ed3a3e9d9431 Downloading 9.971kB + 3ab68b51df15 Downloading 12.58MB + d1520cba0de0 Downloading 15.73MB + cb0e99e1c627 Downloading 9.542MB + bc619b6a0ef8 Downloading 8.389MB + b5671f4fb259 Download complete 0B + 70c8aaf9de13 Download complete 0B + 5532acd798e6 Download complete 0B + 4d3a5f84c681 Downloading 4.194MB + ed0e000a8fd4 Downloading 1.17kB + cf9c30bf9650 Download complete 0B + dffc6c281883 Download complete 0B + 442906e7dc4a Downloading 8.066MB + 995d61980f0c Download complete 0B + 22b37da5853a Extracting 1B + 7b7e3b130d1d Download complete 0B + 8c20357c6451 Downloading 124.8kB + 84a2afebaf4d Downloading 4.194MB + de1f62cfa678 Downloading 3.146MB + d1520cba0de0 Downloading 23.07MB + 442906e7dc4a Download complete 0B + cb0e99e1c627 Download complete 0B + bc619b6a0ef8 Downloading 15.73MB + 3ab68b51df15 Downloading 23.07MB + 8c20357c6451 Download complete 0B + 84a2afebaf4d Downloading 10.49MB + de1f62cfa678 Download complete 0B + 1a00de3f240e Download complete 0B + ed3a3e9d9431 Download complete 0B + 4d3a5f84c681 Downloading 12.58MB + ed0e000a8fd4 Download complete 0B + 22b37da5853a Pull complete 0B + cb0e99e1c627 Extracting 1B + 4d3a5f84c681 Downloading 22.02MB + 84a2afebaf4d Downloading 17.83MB + d1520cba0de0 Downloading 33.55MB + bc619b6a0ef8 Download complete 0B + 3ab68b51df15 Downloading 31.46MB + cb0e99e1c627 Extracting 1B + 84a2afebaf4d Downloading 27.26MB + 4d3a5f84c681 Downloading 32.51MB + d1520cba0de0 Downloading 42.11MB + 3ab68b51df15 Downloading 45.09MB + cb0e99e1c627 Extracting 1B + 84a2afebaf4d Download complete 0B + 4d3a5f84c681 Downloading 45.09MB + 3ab68b51df15 Downloading 57.67MB + d1520cba0de0 Download complete 0B + 84a2afebaf4d Extracting 1B + cb0e99e1c627 Extracting 1B + 4d3a5f84c681 Downloading 54.53MB + 3ab68b51df15 Downloading 66.06MB + cb0e99e1c627 Extracting 1B + 84a2afebaf4d Extracting 1B + 4d3a5f84c681 Downloading 66.06MB + 3ab68b51df15 Downloading 76.55MB + 59ef0fb7d7f5 Pull complete 0B + cb0e99e1c627 Pull complete 0B + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 1B + 4d3a5f84c681 Downloading 77.59MB + 3ab68b51df15 Downloading 89.13MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 1B + 4d3a5f84c681 Downloading 93.32MB + 3ab68b51df15 Downloading 102.8MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 1B + 4d3a5f84c681 Downloading 108MB + 3ab68b51df15 Downloading 116.4MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 1B + 4d3a5f84c681 Download complete 0B + 3ab68b51df15 Downloading 133.2MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 1B + 3ab68b51df15 Downloading 154.1MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 1B + 3ab68b51df15 Downloading 174.1MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 1B + 3ab68b51df15 Downloading 192.9MB + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 1B + 84a2afebaf4d Extracting 2B + 3ab68b51df15 Downloading 202.2MB + d1520cba0de0 Extracting 1B + 3ab68b51df15 Download complete 0B + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 2B + d1520cba0de0 Extracting 1B + 84a2afebaf4d Extracting 2B + d1520cba0de0 Extracting 2B + 84a2afebaf4d Extracting 2B + 84a2afebaf4d Extracting 2B + d1520cba0de0 Extracting 2B + d1520cba0de0 Extracting 2B + 84a2afebaf4d Extracting 2B + ed0e000a8fd4 Extracting 1B + 84a2afebaf4d Pull complete 0B + d1520cba0de0 Extracting 2B + de1f62cfa678 Extracting 1B + ea2debf3e1ac Extracting 1B + d1520cba0de0 Pull complete 0B + ed0e000a8fd4 Pull complete 0B + 7b7e3b130d1d Extracting 1B + de1f62cfa678 Pull complete 0B + 81d49b7a1dbb Pull complete 0B + ea2debf3e1ac Pull complete 0B + bc619b6a0ef8 Extracting 1B + 7b7e3b130d1d Pull complete 0B + bc619b6a0ef8 Extracting 1B + 442906e7dc4a Extracting 1B + 442906e7dc4a Extracting 1B + bc619b6a0ef8 Extracting 1B + bc619b6a0ef8 Extracting 1B + 442906e7dc4a Extracting 1B + bc619b6a0ef8 Extracting 1B + 442906e7dc4a Extracting 1B + 995d61980f0c Extracting 1B + 442906e7dc4a Pull complete 0B + bc619b6a0ef8 Extracting 1B + 63715795656b Pull complete 0B + cf9c30bf9650 Pull complete 0B + 995d61980f0c Pull complete 0B + 4d3a5f84c681 Extracting 1B + bc619b6a0ef8 Extracting 1B + 4f4fb700ef54 Pull complete 0B + bc619b6a0ef8 Pull complete 0B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Extracting 1B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 1B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Extracting 1B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 1B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 1B + 3ab68b51df15 Extracting 1B + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Extracting 1B + 3ab68b51df15 Extracting 2B + 4d3a5f84c681 Extracting 2B + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Extracting 2B + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 4d3a5f84c681 Extracting 2B + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Extracting 2B + 3ab68b51df15 Extracting 2B + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Extracting 2B + 4d3a5f84c681 Extracting 2B + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Extracting 2B + 4d3a5f84c681 Extracting 2B + 3ab68b51df15 Extracting 2B + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Extracting 2B + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Extracting 3B + 4d3a5f84c681 Extracting 3B + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Extracting 3B + 3ab68b51df15 Extracting 3B + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Extracting 3B + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Extracting 3B + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Extracting 3B + 4d3a5f84c681 Extracting 3B + 4d3a5f84c681 Extracting 3B + 3ab68b51df15 Extracting 3B + b5671f4fb259 Extracting 1B + 3ab68b51df15 Extracting 3B + ed3a3e9d9431 Pull complete 0B + 4d3a5f84c681 Pull complete 0B + b5671f4fb259 Pull complete 0B + 70c8aaf9de13 Pull complete 0B + 5532acd798e6 Pull complete 0B + dffc6c281883 Pull complete 0B + 3ab68b51df15 Extracting 4B + 8c20357c6451 Extracting 1B + 8c20357c6451 Pull complete 0B + 1a00de3f240e Pull complete 0B + 3ab68b51df15 Extracting 4B + Image pgvector/pgvector:pg16 Pulled + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 4B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 5B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 6B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 7B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 8B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 9B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 10B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 11B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 12B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 13B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 14B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 15B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 16B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 17B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 18B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 19B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 20B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 21B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 22B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 23B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 24B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 25B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 26B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 27B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 28B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 29B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 30B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 31B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 32B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 33B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 34B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 35B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 36B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 37B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 38B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 39B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 40B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 41B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 42B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 43B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 44B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 45B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 46B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 47B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 48B + 3ab68b51df15 Extracting 49B + 3ab68b51df15 Extracting 49B + 3ab68b51df15 Extracting 49B + 3ab68b51df15 Extracting 49B + 3ab68b51df15 Extracting 49B + 3ab68b51df15 Extracting 49B + 3ab68b51df15 Extracting 49B + 3ab68b51df15 Extracting 49B + 3ab68b51df15 Extracting 49B + 3ab68b51df15 Extracting 49B + 3ab68b51df15 Extracting 50B + 3ab68b51df15 Extracting 50B + 3ab68b51df15 Extracting 50B + 3ab68b51df15 Extracting 50B + 8e7654dec0af Extracting 1B + 3ab68b51df15 Pull complete 0B + 8e7654dec0af Pull complete 0B + 6099ac1649a9 Pull complete 0B + Image n8nio/n8n:latest Pulled + Network customer-stack_customer-net Creating + Network customer-stack_customer-net Created + Container customer-postgres Creating + Container customer-postgres Created + Container customer-postgrest Creating + Container customer-postgrest Created + Container n8n Creating + Container n8n Created + Container customer-postgres Starting + Container customer-postgres Started + Container customer-postgres Waiting + Container customer-postgres Healthy + Container customer-postgrest Starting + Container customer-postgrest Started + Container customer-postgres Waiting + Container customer-postgres Healthy + Container n8n Starting + Container n8n Started +NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS +customer-postgres pgvector/pgvector:pg16 "docker-entrypoint.s…" postgres 12 seconds ago Up 12 seconds (healthy) 5432/tcp +customer-postgrest postgrest/postgrest:latest "/bin/postgrest" postgrest 12 seconds ago Up 1 second 0.0.0.0:3000->3000/tcp, [::]:3000->3000/tcp +n8n n8nio/n8n:latest "tini -- /docker-ent…" n8n 12 seconds ago Up Less than a second 0.0.0.0:5678->5678/tcp, [::]:5678->5678/tcp +{"data":{"createdAt":"2026-03-07T18:28:43.078Z","id":"803d6c8e-7285-4efd-b350-52f8a2bbda78","email":"admin@userman.de","firstName":"Admin","lastName":"Owner","personalizationAnswers":null,"settings":{"userActivated":false},"disabled":false,"mfaEnabled":false,"lastActiveAt":"2026-03-07T18:28:52.241Z","isPending":false,"role":"global:owner","signInType":"email","isOwner":true,"featureFlags":{},"globalScopes":["aiAssistant:manage","annotationTag:create","annotationTag:delete","annotationTag:list","annotationTag:read","annotationTag:update","apiKey:manage","auditLogs:manage","banner:dismiss","breakingChanges:list","chatHubAgent:create","chatHubAgent:delete","chatHubAgent:list","chatHubAgent:read","chatHubAgent:update","chatHub:manage","chatHub:message","communityPackage:install","communityPackage:list","communityPackage:uninstall","communityPackage:update","community:register","credential:create","credential:delete","credential:list","credential:move","credential:read","credentialResolver:create","credentialResolver:delete","credentialResolver:list","credentialResolver:read","credentialResolver:update","credential:share","credential:shareGlobally","credential:update","dataTable:create","dataTable:delete","dataTable:list","dataTable:listProject","dataTable:read","dataTable:readRow","dataTable:update","dataTable:writeRow","eventBusDestination:create","eventBusDestination:delete","eventBusDestination:list","eventBusDestination:read","eventBusDestination:test","eventBusDestination:update","externalSecret:list","externalSecretsProvider:create","externalSecretsProvider:delete","externalSecretsProvider:list","externalSecretsProvider:read","externalSecretsProvider:sync","externalSecretsProvider:update","folder:create","folder:delete","folder:list","folder:move","folder:read","folder:update","insights:list","ldap:manage","ldap:sync","license:manage","logStreaming:manage","mcpApiKey:create","mcpApiKey:rotate","mcp:manage","mcp:oauth","oidc:manage","orchestration:read","project:create","project:delete","project:list","project:read","project:update","projectVariable:create","projectVariable:delete","projectVariable:list","projectVariable:read","projectVariable:update","provisioning:manage","role:manage","saml:manage","securityAudit:generate","securitySettings:manage","sourceControl:manage","sourceControl:pull","sourceControl:push","tag:create","tag:delete","tag:list","tag:read","tag:update","user:changeRole","user:create","user:delete","user:enforceMfa","user:generateInviteLink","user:list","user:read","user:resetPassword","user:update","variable:create","variable:delete","variable:list","variable:read","variable:update","workersView:manage","workflow:create","workflow:delete","workflow:execute","workflow:execute-chat","workflow:list","workflow:move","workflow:publish","workflow:read","workflow:share","workflow:update"],"mfaAuthenticated":false}}Created symlink /etc/systemd/system/multi-user.target.wants/n8n-workflow-reload.service → /etc/systemd/system/n8n-workflow-reload.service. diff --git a/sql/add_installer_json_api.sql b/sql/add_installer_json_api.sql new file mode 100644 index 0000000..679717d --- /dev/null +++ b/sql/add_installer_json_api.sql @@ -0,0 +1,378 @@ +-- ===================================================== +-- BotKonzept - Installer JSON API Extension +-- ===================================================== +-- Extends the database schema to store and expose installer JSON data +-- safely to frontend clients (without secrets) + +-- ===================================================== +-- Step 1: Add installer_json column to instances table +-- ===================================================== + +-- Add column to store the complete installer JSON +ALTER TABLE instances +ADD COLUMN IF NOT EXISTS installer_json JSONB DEFAULT '{}'::jsonb; + +-- Create index for faster JSON queries +CREATE INDEX IF NOT EXISTS idx_instances_installer_json ON instances USING gin(installer_json); + +-- Add comment +COMMENT ON COLUMN instances.installer_json IS 'Complete installer JSON output from install.sh (includes secrets - use api.instance_config view for safe access)'; + +-- ===================================================== +-- Step 2: Create safe API view (NON-SECRET data only) +-- ===================================================== + +-- Create API schema if it doesn't exist +CREATE SCHEMA IF NOT EXISTS api; + +-- Grant usage on api schema +GRANT USAGE ON SCHEMA api TO anon, authenticated, service_role; + +-- Create view that exposes only safe (non-secret) installer data +CREATE OR REPLACE VIEW api.instance_config AS +SELECT + i.id, + i.customer_id, + i.lxc_id as ctid, + i.hostname, + i.fqdn, + i.ip, + i.vlan, + i.status, + i.created_at, + -- Extract safe URLs from installer_json + jsonb_build_object( + 'n8n_internal', i.installer_json->'urls'->>'n8n_internal', + 'n8n_external', i.installer_json->'urls'->>'n8n_external', + 'postgrest', i.installer_json->'urls'->>'postgrest', + 'chat_webhook', i.installer_json->'urls'->>'chat_webhook', + 'chat_internal', i.installer_json->'urls'->>'chat_internal', + 'upload_form', i.installer_json->'urls'->>'upload_form', + 'upload_form_internal', i.installer_json->'urls'->>'upload_form_internal' + ) as urls, + -- Extract safe Supabase data (NO service_role_key, NO jwt_secret) + jsonb_build_object( + 'url_external', i.installer_json->'supabase'->>'url_external', + 'anon_key', i.installer_json->'supabase'->>'anon_key' + ) as supabase, + -- Extract Ollama URL (safe) + jsonb_build_object( + 'url', i.installer_json->'ollama'->>'url', + 'model', i.installer_json->'ollama'->>'model', + 'embedding_model', i.installer_json->'ollama'->>'embedding_model' + ) as ollama, + -- Customer info (joined) + c.email as customer_email, + c.first_name, + c.last_name, + c.company, + c.status as customer_status +FROM instances i +JOIN customers c ON i.customer_id = c.id +WHERE i.status = 'active' AND i.deleted_at IS NULL; + +-- Add comment +COMMENT ON VIEW api.instance_config IS 'Safe API view for instance configuration - exposes only non-secret data from installer JSON'; + +-- ===================================================== +-- Step 3: Row Level Security (RLS) for API view +-- ===================================================== + +-- Enable RLS on the view (inherited from base table) +-- Customers can only see their own instance config + +-- Policy: Allow customers to see their own instance config +CREATE POLICY instance_config_select_own ON instances + FOR SELECT + USING ( + -- Allow if customer_id matches authenticated user + customer_id::text = auth.uid()::text + OR + -- Allow service_role to see all (for n8n workflows) + auth.jwt()->>'role' = 'service_role' + ); + +-- Grant SELECT on api.instance_config view +GRANT SELECT ON api.instance_config TO anon, authenticated, service_role; + +-- ===================================================== +-- Step 4: Create function to get config by customer email +-- ===================================================== + +-- Function to get instance config by customer email (for public access) +CREATE OR REPLACE FUNCTION api.get_instance_config_by_email(customer_email_param TEXT) +RETURNS TABLE ( + id UUID, + customer_id UUID, + ctid BIGINT, + hostname VARCHAR, + fqdn VARCHAR, + ip VARCHAR, + vlan INTEGER, + status VARCHAR, + created_at TIMESTAMPTZ, + urls JSONB, + supabase JSONB, + ollama JSONB, + customer_email VARCHAR, + first_name VARCHAR, + last_name VARCHAR, + company VARCHAR, + customer_status VARCHAR +) AS $$ +BEGIN + RETURN QUERY + SELECT + ic.id, + ic.customer_id, + ic.ctid, + ic.hostname, + ic.fqdn, + ic.ip, + ic.vlan, + ic.status, + ic.created_at, + ic.urls, + ic.supabase, + ic.ollama, + ic.customer_email, + ic.first_name, + ic.last_name, + ic.company, + ic.customer_status + FROM api.instance_config ic + WHERE ic.customer_email = customer_email_param + LIMIT 1; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +-- Grant execute permission +GRANT EXECUTE ON FUNCTION api.get_instance_config_by_email(TEXT) TO anon, authenticated, service_role; + +-- Add comment +COMMENT ON FUNCTION api.get_instance_config_by_email IS 'Get instance configuration by customer email - returns only non-secret data'; + +-- ===================================================== +-- Step 5: Create function to get config by CTID +-- ===================================================== + +-- Function to get instance config by CTID (for internal use) +CREATE OR REPLACE FUNCTION api.get_instance_config_by_ctid(ctid_param BIGINT) +RETURNS TABLE ( + id UUID, + customer_id UUID, + ctid BIGINT, + hostname VARCHAR, + fqdn VARCHAR, + ip VARCHAR, + vlan INTEGER, + status VARCHAR, + created_at TIMESTAMPTZ, + urls JSONB, + supabase JSONB, + ollama JSONB, + customer_email VARCHAR, + first_name VARCHAR, + last_name VARCHAR, + company VARCHAR, + customer_status VARCHAR +) AS $$ +BEGIN + RETURN QUERY + SELECT + ic.id, + ic.customer_id, + ic.ctid, + ic.hostname, + ic.fqdn, + ic.ip, + ic.vlan, + ic.status, + ic.created_at, + ic.urls, + ic.supabase, + ic.ollama, + ic.customer_email, + ic.first_name, + ic.last_name, + ic.company, + ic.customer_status + FROM api.instance_config ic + WHERE ic.ctid = ctid_param + LIMIT 1; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +-- Grant execute permission +GRANT EXECUTE ON FUNCTION api.get_instance_config_by_ctid(BIGINT) TO service_role; + +-- Add comment +COMMENT ON FUNCTION api.get_instance_config_by_ctid IS 'Get instance configuration by CTID - for internal use only'; + +-- ===================================================== +-- Step 6: Create public config endpoint (no auth required) +-- ===================================================== + +-- Function to get public config (for website registration form) +-- Returns only the registration webhook URL +CREATE OR REPLACE FUNCTION api.get_public_config() +RETURNS TABLE ( + registration_webhook_url TEXT, + api_base_url TEXT +) AS $$ +BEGIN + RETURN QUERY + SELECT + 'https://api.botkonzept.de/webhook/botkonzept-registration'::TEXT as registration_webhook_url, + 'https://api.botkonzept.de'::TEXT as api_base_url; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +-- Grant execute permission to everyone +GRANT EXECUTE ON FUNCTION api.get_public_config() TO anon, authenticated, service_role; + +-- Add comment +COMMENT ON FUNCTION api.get_public_config IS 'Get public configuration for website (registration webhook URL)'; + +-- ===================================================== +-- Step 7: Update install.sh integration +-- ===================================================== + +-- This SQL will be executed after instance creation +-- The install.sh script should call this function to store the installer JSON + +CREATE OR REPLACE FUNCTION api.store_installer_json( + customer_email_param TEXT, + lxc_id_param BIGINT, + installer_json_param JSONB +) +RETURNS JSONB AS $$ +DECLARE + instance_record RECORD; + result JSONB; +BEGIN + -- Find the instance by customer email and lxc_id + SELECT i.id, i.customer_id INTO instance_record + FROM instances i + JOIN customers c ON i.customer_id = c.id + WHERE c.email = customer_email_param + AND i.lxc_id = lxc_id_param + LIMIT 1; + + IF NOT FOUND THEN + RETURN jsonb_build_object( + 'success', false, + 'error', 'Instance not found for customer email and LXC ID' + ); + END IF; + + -- Update the installer_json column + UPDATE instances + SET installer_json = installer_json_param, + updated_at = NOW() + WHERE id = instance_record.id; + + -- Return success + result := jsonb_build_object( + 'success', true, + 'instance_id', instance_record.id, + 'customer_id', instance_record.customer_id, + 'message', 'Installer JSON stored successfully' + ); + + RETURN result; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +-- Grant execute permission to service_role only +GRANT EXECUTE ON FUNCTION api.store_installer_json(TEXT, BIGINT, JSONB) TO service_role; + +-- Add comment +COMMENT ON FUNCTION api.store_installer_json IS 'Store installer JSON after instance creation - called by install.sh via n8n workflow'; + +-- ===================================================== +-- Step 8: Create audit log entry for API access +-- ===================================================== + +-- Function to log API access +CREATE OR REPLACE FUNCTION api.log_config_access( + customer_id_param UUID, + access_type TEXT, + ip_address_param INET DEFAULT NULL +) +RETURNS VOID AS $$ +BEGIN + INSERT INTO audit_log ( + customer_id, + action, + entity_type, + performed_by, + ip_address, + metadata + ) VALUES ( + customer_id_param, + 'api_config_access', + 'instance_config', + 'api_user', + ip_address_param, + jsonb_build_object('access_type', access_type) + ); +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +-- Grant execute permission +GRANT EXECUTE ON FUNCTION api.log_config_access(UUID, TEXT, INET) TO anon, authenticated, service_role; + +-- ===================================================== +-- Step 9: Example queries for testing +-- ===================================================== + +-- Example 1: Get instance config by customer email +-- SELECT * FROM api.get_instance_config_by_email('max@beispiel.de'); + +-- Example 2: Get instance config by CTID +-- SELECT * FROM api.get_instance_config_by_ctid(769697636); + +-- Example 3: Get public config +-- SELECT * FROM api.get_public_config(); + +-- Example 4: Store installer JSON (called by install.sh) +-- SELECT api.store_installer_json( +-- 'max@beispiel.de', +-- 769697636, +-- '{"ctid": 769697636, "urls": {...}, ...}'::jsonb +-- ); + +-- ===================================================== +-- Step 10: PostgREST API Routes +-- ===================================================== + +-- After running this SQL, the following PostgREST routes will be available: +-- +-- 1. GET /api/instance_config +-- - Returns all instance configs (filtered by RLS) +-- - Requires authentication +-- +-- 2. POST /rpc/get_instance_config_by_email +-- - Body: {"customer_email_param": "max@beispiel.de"} +-- - Returns instance config for specific customer +-- - No authentication required (public) +-- +-- 3. POST /rpc/get_instance_config_by_ctid +-- - Body: {"ctid_param": 769697636} +-- - Returns instance config for specific CTID +-- - Requires service_role authentication +-- +-- 4. POST /rpc/get_public_config +-- - Body: {} +-- - Returns public configuration (registration webhook URL) +-- - No authentication required (public) +-- +-- 5. POST /rpc/store_installer_json +-- - Body: {"customer_email_param": "...", "lxc_id_param": 123, "installer_json_param": {...}} +-- - Stores installer JSON after instance creation +-- - Requires service_role authentication + +-- ===================================================== +-- End of API Extension +-- ===================================================== diff --git a/sql/add_installer_json_api_supabase_auth.sql b/sql/add_installer_json_api_supabase_auth.sql new file mode 100644 index 0000000..fd1ec20 --- /dev/null +++ b/sql/add_installer_json_api_supabase_auth.sql @@ -0,0 +1,476 @@ +-- ===================================================== +-- BotKonzept - Installer JSON API (Supabase Auth) +-- ===================================================== +-- Secure API using Supabase Auth JWT tokens +-- NO Service Role Key in Frontend - EVER! + +-- ===================================================== +-- Step 1: Add installer_json column to instances table +-- ===================================================== + +ALTER TABLE instances +ADD COLUMN IF NOT EXISTS installer_json JSONB DEFAULT '{}'::jsonb; + +CREATE INDEX IF NOT EXISTS idx_instances_installer_json ON instances USING gin(installer_json); + +COMMENT ON COLUMN instances.installer_json IS 'Complete installer JSON output from install.sh (includes secrets - use api.get_my_instance_config() for safe access)'; + +-- ===================================================== +-- Step 2: Link instances to Supabase Auth users +-- ===================================================== + +-- Add owner_user_id column to link instance to Supabase Auth user +ALTER TABLE instances +ADD COLUMN IF NOT EXISTS owner_user_id UUID REFERENCES auth.users(id) ON DELETE SET NULL; + +-- Create index for faster lookups +CREATE INDEX IF NOT EXISTS idx_instances_owner_user_id ON instances(owner_user_id); + +COMMENT ON COLUMN instances.owner_user_id IS 'Supabase Auth user ID of the instance owner'; + +-- ===================================================== +-- Step 3: Create safe API view (NON-SECRET data only) +-- ===================================================== + +CREATE SCHEMA IF NOT EXISTS api; +GRANT USAGE ON SCHEMA api TO anon, authenticated, service_role; + +-- View that exposes only safe (non-secret) installer data +CREATE OR REPLACE VIEW api.instance_config AS +SELECT + i.id, + i.customer_id, + i.owner_user_id, + i.lxc_id as ctid, + i.hostname, + i.fqdn, + i.ip, + i.vlan, + i.status, + i.created_at, + -- Extract safe URLs from installer_json (NO SECRETS) + jsonb_build_object( + 'n8n_internal', i.installer_json->'urls'->>'n8n_internal', + 'n8n_external', i.installer_json->'urls'->>'n8n_external', + 'postgrest', i.installer_json->'urls'->>'postgrest', + 'chat_webhook', i.installer_json->'urls'->>'chat_webhook', + 'chat_internal', i.installer_json->'urls'->>'chat_internal', + 'upload_form', i.installer_json->'urls'->>'upload_form', + 'upload_form_internal', i.installer_json->'urls'->>'upload_form_internal' + ) as urls, + -- Extract safe Supabase data (NO service_role_key, NO jwt_secret) + jsonb_build_object( + 'url_external', i.installer_json->'supabase'->>'url_external', + 'anon_key', i.installer_json->'supabase'->>'anon_key' + ) as supabase, + -- Extract Ollama URL (safe) + jsonb_build_object( + 'url', i.installer_json->'ollama'->>'url', + 'model', i.installer_json->'ollama'->>'model', + 'embedding_model', i.installer_json->'ollama'->>'embedding_model' + ) as ollama, + -- Customer info (joined) + c.email as customer_email, + c.first_name, + c.last_name, + c.company, + c.status as customer_status +FROM instances i +JOIN customers c ON i.customer_id = c.id +WHERE i.status = 'active' AND i.deleted_at IS NULL; + +COMMENT ON VIEW api.instance_config IS 'Safe API view - exposes only non-secret data from installer JSON'; + +-- ===================================================== +-- Step 4: Row Level Security (RLS) Policies +-- ===================================================== + +-- Enable RLS on instances table (if not already enabled) +ALTER TABLE instances ENABLE ROW LEVEL SECURITY; + +-- Drop old policy if exists +DROP POLICY IF EXISTS instance_config_select_own ON instances; + +-- Policy: Users can only see their own instances +CREATE POLICY instances_select_own ON instances + FOR SELECT + USING ( + -- Allow if owner_user_id matches authenticated user + owner_user_id = auth.uid() + OR + -- Allow service_role to see all (for n8n workflows) + auth.jwt()->>'role' = 'service_role' + ); + +-- Grant SELECT on api.instance_config view +GRANT SELECT ON api.instance_config TO authenticated, service_role; + +-- ===================================================== +-- Step 5: Function to get MY instance config (Auth required) +-- ===================================================== + +-- Function to get instance config for authenticated user +-- Uses auth.uid() - NO email parameter (more secure) +CREATE OR REPLACE FUNCTION api.get_my_instance_config() +RETURNS TABLE ( + id UUID, + customer_id UUID, + owner_user_id UUID, + ctid BIGINT, + hostname VARCHAR, + fqdn VARCHAR, + ip VARCHAR, + vlan INTEGER, + status VARCHAR, + created_at TIMESTAMPTZ, + urls JSONB, + supabase JSONB, + ollama JSONB, + customer_email VARCHAR, + first_name VARCHAR, + last_name VARCHAR, + company VARCHAR, + customer_status VARCHAR +) +SECURITY DEFINER +SET search_path = public +AS $$ +BEGIN + -- Check if user is authenticated + IF auth.uid() IS NULL THEN + RAISE EXCEPTION 'Not authenticated'; + END IF; + + -- Return instance config for authenticated user + RETURN QUERY + SELECT + ic.id, + ic.customer_id, + ic.owner_user_id, + ic.ctid, + ic.hostname, + ic.fqdn, + ic.ip, + ic.vlan, + ic.status, + ic.created_at, + ic.urls, + ic.supabase, + ic.ollama, + ic.customer_email, + ic.first_name, + ic.last_name, + ic.company, + ic.customer_status + FROM api.instance_config ic + WHERE ic.owner_user_id = auth.uid() + LIMIT 1; +END; +$$ LANGUAGE plpgsql; + +GRANT EXECUTE ON FUNCTION api.get_my_instance_config() TO authenticated; + +COMMENT ON FUNCTION api.get_my_instance_config IS 'Get instance configuration for authenticated user - uses auth.uid() for security'; + +-- ===================================================== +-- Step 6: Function to get config by CTID (Service Role ONLY) +-- ===================================================== + +CREATE OR REPLACE FUNCTION api.get_instance_config_by_ctid(ctid_param BIGINT) +RETURNS TABLE ( + id UUID, + customer_id UUID, + owner_user_id UUID, + ctid BIGINT, + hostname VARCHAR, + fqdn VARCHAR, + ip VARCHAR, + vlan INTEGER, + status VARCHAR, + created_at TIMESTAMPTZ, + urls JSONB, + supabase JSONB, + ollama JSONB, + customer_email VARCHAR, + first_name VARCHAR, + last_name VARCHAR, + company VARCHAR, + customer_status VARCHAR +) +SECURITY DEFINER +SET search_path = public +AS $$ +BEGIN + -- Only service_role can call this + IF auth.jwt()->>'role' != 'service_role' THEN + RAISE EXCEPTION 'Forbidden: service_role required'; + END IF; + + RETURN QUERY + SELECT + ic.id, + ic.customer_id, + ic.owner_user_id, + ic.ctid, + ic.hostname, + ic.fqdn, + ic.ip, + ic.vlan, + ic.status, + ic.created_at, + ic.urls, + ic.supabase, + ic.ollama, + ic.customer_email, + ic.first_name, + ic.last_name, + ic.company, + ic.customer_status + FROM api.instance_config ic + WHERE ic.ctid = ctid_param + LIMIT 1; +END; +$$ LANGUAGE plpgsql; + +GRANT EXECUTE ON FUNCTION api.get_instance_config_by_ctid(BIGINT) TO service_role; + +COMMENT ON FUNCTION api.get_instance_config_by_ctid IS 'Get instance configuration by CTID - service_role only'; + +-- ===================================================== +-- Step 7: Public config endpoint (NO auth required) +-- ===================================================== + +CREATE OR REPLACE FUNCTION api.get_public_config() +RETURNS TABLE ( + registration_webhook_url TEXT, + api_base_url TEXT +) +SECURITY DEFINER +SET search_path = public +AS $$ +BEGIN + RETURN QUERY + SELECT + 'https://api.botkonzept.de/webhook/botkonzept-registration'::TEXT as registration_webhook_url, + 'https://api.botkonzept.de'::TEXT as api_base_url; +END; +$$ LANGUAGE plpgsql; + +GRANT EXECUTE ON FUNCTION api.get_public_config() TO anon, authenticated, service_role; + +COMMENT ON FUNCTION api.get_public_config IS 'Get public configuration for website (registration webhook URL)'; + +-- ===================================================== +-- Step 8: Store installer JSON (Service Role ONLY) +-- ===================================================== + +CREATE OR REPLACE FUNCTION api.store_installer_json( + customer_email_param TEXT, + lxc_id_param BIGINT, + installer_json_param JSONB +) +RETURNS JSONB +SECURITY DEFINER +SET search_path = public +AS $$ +DECLARE + instance_record RECORD; + result JSONB; +BEGIN + -- Only service_role can call this + IF auth.jwt()->>'role' != 'service_role' THEN + RAISE EXCEPTION 'Forbidden: service_role required'; + END IF; + + -- Find the instance by customer email and lxc_id + SELECT i.id, i.customer_id, c.id as auth_user_id INTO instance_record + FROM instances i + JOIN customers c ON i.customer_id = c.id + WHERE c.email = customer_email_param + AND i.lxc_id = lxc_id_param + LIMIT 1; + + IF NOT FOUND THEN + RETURN jsonb_build_object( + 'success', false, + 'error', 'Instance not found for customer email and LXC ID' + ); + END IF; + + -- Update the installer_json column + UPDATE instances + SET installer_json = installer_json_param, + updated_at = NOW() + WHERE id = instance_record.id; + + -- Return success + result := jsonb_build_object( + 'success', true, + 'instance_id', instance_record.id, + 'customer_id', instance_record.customer_id, + 'message', 'Installer JSON stored successfully' + ); + + RETURN result; +END; +$$ LANGUAGE plpgsql; + +GRANT EXECUTE ON FUNCTION api.store_installer_json(TEXT, BIGINT, JSONB) TO service_role; + +COMMENT ON FUNCTION api.store_installer_json IS 'Store installer JSON after instance creation - service_role only'; + +-- ===================================================== +-- Step 9: Link customer to Supabase Auth user +-- ===================================================== + +-- Function to link customer to Supabase Auth user (called during registration) +CREATE OR REPLACE FUNCTION api.link_customer_to_auth_user( + customer_email_param TEXT, + auth_user_id_param UUID +) +RETURNS JSONB +SECURITY DEFINER +SET search_path = public +AS $$ +DECLARE + customer_record RECORD; + instance_record RECORD; + result JSONB; +BEGIN + -- Only service_role can call this + IF auth.jwt()->>'role' != 'service_role' THEN + RAISE EXCEPTION 'Forbidden: service_role required'; + END IF; + + -- Find customer by email + SELECT id INTO customer_record + FROM customers + WHERE email = customer_email_param + LIMIT 1; + + IF NOT FOUND THEN + RETURN jsonb_build_object( + 'success', false, + 'error', 'Customer not found' + ); + END IF; + + -- Update all instances for this customer with owner_user_id + UPDATE instances + SET owner_user_id = auth_user_id_param, + updated_at = NOW() + WHERE customer_id = customer_record.id; + + -- Return success + result := jsonb_build_object( + 'success', true, + 'customer_id', customer_record.id, + 'auth_user_id', auth_user_id_param, + 'message', 'Customer linked to auth user successfully' + ); + + RETURN result; +END; +$$ LANGUAGE plpgsql; + +GRANT EXECUTE ON FUNCTION api.link_customer_to_auth_user(TEXT, UUID) TO service_role; + +COMMENT ON FUNCTION api.link_customer_to_auth_user IS 'Link customer to Supabase Auth user - service_role only'; + +-- ===================================================== +-- Step 10: Audit logging +-- ===================================================== + +CREATE OR REPLACE FUNCTION api.log_config_access( + access_type TEXT, + ip_address_param INET DEFAULT NULL +) +RETURNS VOID +SECURITY DEFINER +SET search_path = public +AS $$ +BEGIN + -- Log access for authenticated user + IF auth.uid() IS NOT NULL THEN + INSERT INTO audit_log ( + customer_id, + action, + entity_type, + performed_by, + ip_address, + metadata + ) + SELECT + i.customer_id, + 'api_config_access', + 'instance_config', + auth.uid()::text, + ip_address_param, + jsonb_build_object('access_type', access_type) + FROM instances i + WHERE i.owner_user_id = auth.uid() + LIMIT 1; + END IF; +END; +$$ LANGUAGE plpgsql; + +GRANT EXECUTE ON FUNCTION api.log_config_access(TEXT, INET) TO authenticated, service_role; + +-- ===================================================== +-- Step 11: PostgREST API Routes +-- ===================================================== + +-- Available routes: +-- +-- 1. POST /rpc/get_my_instance_config +-- - Body: {} +-- - Returns instance config for authenticated user +-- - Requires: Supabase Auth JWT token +-- - Response: Single instance config object (or empty if not found) +-- +-- 2. POST /rpc/get_public_config +-- - Body: {} +-- - Returns public configuration (registration webhook URL) +-- - Requires: No authentication +-- +-- 3. POST /rpc/get_instance_config_by_ctid +-- - Body: {"ctid_param": 769697636} +-- - Returns instance config for specific CTID +-- - Requires: Service Role Key (backend only) +-- +-- 4. POST /rpc/store_installer_json +-- - Body: {"customer_email_param": "...", "lxc_id_param": 123, "installer_json_param": {...}} +-- - Stores installer JSON after instance creation +-- - Requires: Service Role Key (backend only) +-- +-- 5. POST /rpc/link_customer_to_auth_user +-- - Body: {"customer_email_param": "...", "auth_user_id_param": "..."} +-- - Links customer to Supabase Auth user +-- - Requires: Service Role Key (backend only) + +-- ===================================================== +-- Example Usage +-- ===================================================== + +-- Example 1: Get my instance config (authenticated user) +-- POST /rpc/get_my_instance_config +-- Headers: Authorization: Bearer +-- Body: {} + +-- Example 2: Get public config (no auth) +-- POST /rpc/get_public_config +-- Body: {} + +-- Example 3: Store installer JSON (service role) +-- POST /rpc/store_installer_json +-- Headers: Authorization: Bearer +-- Body: {"customer_email_param": "max@beispiel.de", "lxc_id_param": 769697636, "installer_json_param": {...}} + +-- Example 4: Link customer to auth user (service role) +-- POST /rpc/link_customer_to_auth_user +-- Headers: Authorization: Bearer +-- Body: {"customer_email_param": "max@beispiel.de", "auth_user_id_param": "550e8400-e29b-41d4-a716-446655440000"} + +-- ===================================================== +-- End of Supabase Auth API +-- ===================================================== diff --git a/sql/botkonzept_schema.sql b/sql/botkonzept_schema.sql new file mode 100644 index 0000000..ea7cedd --- /dev/null +++ b/sql/botkonzept_schema.sql @@ -0,0 +1,444 @@ +-- ===================================================== +-- BotKonzept - Database Schema for Customer Management +-- ===================================================== +-- This schema manages customers, instances, emails, and payments +-- for the BotKonzept SaaS platform + +-- Enable UUID extension +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; + +-- ===================================================== +-- Table: customers +-- ===================================================== +-- Stores customer information and trial status +CREATE TABLE IF NOT EXISTS customers ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + email VARCHAR(255) UNIQUE NOT NULL, + first_name VARCHAR(100) NOT NULL, + last_name VARCHAR(100) NOT NULL, + company VARCHAR(255), + phone VARCHAR(50), + + -- Status tracking + status VARCHAR(50) DEFAULT 'trial' CHECK (status IN ('trial', 'active', 'cancelled', 'suspended', 'deleted')), + + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + updated_at TIMESTAMPTZ DEFAULT NOW(), + trial_end_date TIMESTAMPTZ, + subscription_start_date TIMESTAMPTZ, + subscription_end_date TIMESTAMPTZ, + + -- Marketing tracking + utm_source VARCHAR(100), + utm_medium VARCHAR(100), + utm_campaign VARCHAR(100), + referral_code VARCHAR(50), + + -- Metadata + metadata JSONB DEFAULT '{}'::jsonb, + + -- Indexes + CONSTRAINT email_format CHECK (email ~* '^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}$') +); + +-- Create indexes for customers +CREATE INDEX idx_customers_email ON customers(email); +CREATE INDEX idx_customers_status ON customers(status); +CREATE INDEX idx_customers_created_at ON customers(created_at); +CREATE INDEX idx_customers_trial_end_date ON customers(trial_end_date); + +-- ===================================================== +-- Table: instances +-- ===================================================== +-- Stores LXC instance information for each customer +CREATE TABLE IF NOT EXISTS instances ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + + -- Instance details + lxc_id BIGINT NOT NULL UNIQUE, + hostname VARCHAR(255) NOT NULL, + ip VARCHAR(50) NOT NULL, + fqdn VARCHAR(255) NOT NULL, + vlan INTEGER, + + -- Status + status VARCHAR(50) DEFAULT 'active' CHECK (status IN ('creating', 'active', 'suspended', 'deleted', 'error')), + + -- Credentials (encrypted JSON) + credentials JSONB NOT NULL, + + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + updated_at TIMESTAMPTZ DEFAULT NOW(), + deleted_at TIMESTAMPTZ, + trial_end_date TIMESTAMPTZ, + + -- Resource usage + disk_usage_gb DECIMAL(10,2), + memory_usage_mb INTEGER, + cpu_usage_percent DECIMAL(5,2), + + -- Metadata + metadata JSONB DEFAULT '{}'::jsonb +); + +-- Create indexes for instances +CREATE INDEX idx_instances_customer_id ON instances(customer_id); +CREATE INDEX idx_instances_lxc_id ON instances(lxc_id); +CREATE INDEX idx_instances_status ON instances(status); +CREATE INDEX idx_instances_hostname ON instances(hostname); + +-- ===================================================== +-- Table: emails_sent +-- ===================================================== +-- Tracks all emails sent to customers +CREATE TABLE IF NOT EXISTS emails_sent ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + + -- Email details + email_type VARCHAR(50) NOT NULL CHECK (email_type IN ( + 'welcome', + 'day3_upgrade', + 'day5_reminder', + 'day7_last_chance', + 'day8_goodbye', + 'payment_confirm', + 'payment_failed', + 'instance_created', + 'instance_deleted', + 'password_reset', + 'newsletter' + )), + + subject VARCHAR(255), + recipient_email VARCHAR(255) NOT NULL, + + -- Status + status VARCHAR(50) DEFAULT 'sent' CHECK (status IN ('sent', 'delivered', 'opened', 'clicked', 'bounced', 'failed')), + + -- Timestamps + sent_at TIMESTAMPTZ DEFAULT NOW(), + delivered_at TIMESTAMPTZ, + opened_at TIMESTAMPTZ, + clicked_at TIMESTAMPTZ, + + -- Metadata + metadata JSONB DEFAULT '{}'::jsonb +); + +-- Create indexes for emails_sent +CREATE INDEX idx_emails_customer_id ON emails_sent(customer_id); +CREATE INDEX idx_emails_type ON emails_sent(email_type); +CREATE INDEX idx_emails_sent_at ON emails_sent(sent_at); +CREATE INDEX idx_emails_status ON emails_sent(status); + +-- ===================================================== +-- Table: subscriptions +-- ===================================================== +-- Stores subscription and payment information +CREATE TABLE IF NOT EXISTS subscriptions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + + -- Plan details + plan_name VARCHAR(50) NOT NULL CHECK (plan_name IN ('trial', 'starter', 'business', 'enterprise')), + plan_price DECIMAL(10,2) NOT NULL, + billing_cycle VARCHAR(20) DEFAULT 'monthly' CHECK (billing_cycle IN ('monthly', 'yearly')), + + -- Discount + discount_percent DECIMAL(5,2) DEFAULT 0, + discount_code VARCHAR(50), + discount_end_date TIMESTAMPTZ, + + -- Status + status VARCHAR(50) DEFAULT 'active' CHECK (status IN ('active', 'cancelled', 'past_due', 'suspended')), + + -- Payment provider + payment_provider VARCHAR(50) CHECK (payment_provider IN ('stripe', 'paypal', 'manual')), + payment_provider_id VARCHAR(255), + + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + updated_at TIMESTAMPTZ DEFAULT NOW(), + current_period_start TIMESTAMPTZ, + current_period_end TIMESTAMPTZ, + cancelled_at TIMESTAMPTZ, + + -- Metadata + metadata JSONB DEFAULT '{}'::jsonb +); + +-- Create indexes for subscriptions +CREATE INDEX idx_subscriptions_customer_id ON subscriptions(customer_id); +CREATE INDEX idx_subscriptions_status ON subscriptions(status); +CREATE INDEX idx_subscriptions_plan_name ON subscriptions(plan_name); + +-- ===================================================== +-- Table: payments +-- ===================================================== +-- Stores payment transaction history +CREATE TABLE IF NOT EXISTS payments ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + subscription_id UUID REFERENCES subscriptions(id) ON DELETE SET NULL, + + -- Payment details + amount DECIMAL(10,2) NOT NULL, + currency VARCHAR(3) DEFAULT 'EUR', + + -- Status + status VARCHAR(50) DEFAULT 'pending' CHECK (status IN ('pending', 'succeeded', 'failed', 'refunded', 'cancelled')), + + -- Payment provider + payment_provider VARCHAR(50) CHECK (payment_provider IN ('stripe', 'paypal', 'manual')), + payment_provider_id VARCHAR(255), + payment_method VARCHAR(50), + + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + paid_at TIMESTAMPTZ, + refunded_at TIMESTAMPTZ, + + -- Invoice + invoice_number VARCHAR(50), + invoice_url TEXT, + + -- Metadata + metadata JSONB DEFAULT '{}'::jsonb +); + +-- Create indexes for payments +CREATE INDEX idx_payments_customer_id ON payments(customer_id); +CREATE INDEX idx_payments_subscription_id ON payments(subscription_id); +CREATE INDEX idx_payments_status ON payments(status); +CREATE INDEX idx_payments_created_at ON payments(created_at); + +-- ===================================================== +-- Table: usage_stats +-- ===================================================== +-- Tracks usage statistics for each instance +CREATE TABLE IF NOT EXISTS usage_stats ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + instance_id UUID NOT NULL REFERENCES instances(id) ON DELETE CASCADE, + + -- Usage metrics + date DATE NOT NULL, + messages_count INTEGER DEFAULT 0, + documents_count INTEGER DEFAULT 0, + api_calls_count INTEGER DEFAULT 0, + storage_used_mb DECIMAL(10,2) DEFAULT 0, + + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + + -- Unique constraint: one record per instance per day + UNIQUE(instance_id, date) +); + +-- Create indexes for usage_stats +CREATE INDEX idx_usage_instance_id ON usage_stats(instance_id); +CREATE INDEX idx_usage_date ON usage_stats(date); + +-- ===================================================== +-- Table: audit_log +-- ===================================================== +-- Audit trail for important actions +CREATE TABLE IF NOT EXISTS audit_log ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID REFERENCES customers(id) ON DELETE SET NULL, + instance_id UUID REFERENCES instances(id) ON DELETE SET NULL, + + -- Action details + action VARCHAR(100) NOT NULL, + entity_type VARCHAR(50), + entity_id UUID, + + -- User/system that performed the action + performed_by VARCHAR(100), + ip_address INET, + user_agent TEXT, + + -- Changes + old_values JSONB, + new_values JSONB, + + -- Timestamp + created_at TIMESTAMPTZ DEFAULT NOW(), + + -- Metadata + metadata JSONB DEFAULT '{}'::jsonb +); + +-- Create indexes for audit_log +CREATE INDEX idx_audit_customer_id ON audit_log(customer_id); +CREATE INDEX idx_audit_instance_id ON audit_log(instance_id); +CREATE INDEX idx_audit_action ON audit_log(action); +CREATE INDEX idx_audit_created_at ON audit_log(created_at); + +-- ===================================================== +-- Functions & Triggers +-- ===================================================== + +-- Function to update updated_at timestamp +CREATE OR REPLACE FUNCTION update_updated_at_column() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Triggers for updated_at +CREATE TRIGGER update_customers_updated_at BEFORE UPDATE ON customers + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TRIGGER update_instances_updated_at BEFORE UPDATE ON instances + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TRIGGER update_subscriptions_updated_at BEFORE UPDATE ON subscriptions + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +-- Function to calculate trial end date +CREATE OR REPLACE FUNCTION set_trial_end_date() +RETURNS TRIGGER AS $$ +BEGIN + IF NEW.trial_end_date IS NULL THEN + NEW.trial_end_date = NEW.created_at + INTERVAL '7 days'; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Trigger for trial end date +CREATE TRIGGER set_customer_trial_end_date BEFORE INSERT ON customers + FOR EACH ROW EXECUTE FUNCTION set_trial_end_date(); + +-- ===================================================== +-- Views +-- ===================================================== + +-- View: Active trials expiring soon +CREATE OR REPLACE VIEW trials_expiring_soon AS +SELECT + c.id, + c.email, + c.first_name, + c.last_name, + c.created_at, + c.trial_end_date, + EXTRACT(DAY FROM (c.trial_end_date - NOW())) as days_remaining, + i.lxc_id, + i.hostname, + i.fqdn +FROM customers c +JOIN instances i ON c.id = i.customer_id +WHERE c.status = 'trial' + AND i.status = 'active' + AND c.trial_end_date > NOW() + AND c.trial_end_date <= NOW() + INTERVAL '3 days'; + +-- View: Customer overview with instance info +CREATE OR REPLACE VIEW customer_overview AS +SELECT + c.id, + c.email, + c.first_name, + c.last_name, + c.company, + c.status, + c.created_at, + c.trial_end_date, + i.lxc_id, + i.hostname, + i.fqdn, + i.ip, + i.status as instance_status, + s.plan_name, + s.plan_price, + s.status as subscription_status +FROM customers c +LEFT JOIN instances i ON c.id = i.customer_id AND i.status = 'active' +LEFT JOIN subscriptions s ON c.id = s.customer_id AND s.status = 'active'; + +-- View: Revenue metrics +CREATE OR REPLACE VIEW revenue_metrics AS +SELECT + DATE_TRUNC('month', paid_at) as month, + COUNT(*) as payment_count, + SUM(amount) as total_revenue, + AVG(amount) as average_payment, + COUNT(DISTINCT customer_id) as unique_customers +FROM payments +WHERE status = 'succeeded' + AND paid_at IS NOT NULL +GROUP BY DATE_TRUNC('month', paid_at) +ORDER BY month DESC; + +-- ===================================================== +-- Row Level Security (RLS) Policies +-- ===================================================== + +-- Enable RLS on tables +ALTER TABLE customers ENABLE ROW LEVEL SECURITY; +ALTER TABLE instances ENABLE ROW LEVEL SECURITY; +ALTER TABLE subscriptions ENABLE ROW LEVEL SECURITY; +ALTER TABLE payments ENABLE ROW LEVEL SECURITY; + +-- Policy: Customers can only see their own data +CREATE POLICY customers_select_own ON customers + FOR SELECT + USING (auth.uid()::text = id::text); + +CREATE POLICY instances_select_own ON instances + FOR SELECT + USING (customer_id::text = auth.uid()::text); + +CREATE POLICY subscriptions_select_own ON subscriptions + FOR SELECT + USING (customer_id::text = auth.uid()::text); + +CREATE POLICY payments_select_own ON payments + FOR SELECT + USING (customer_id::text = auth.uid()::text); + +-- ===================================================== +-- Sample Data (for testing) +-- ===================================================== + +-- Insert sample customer (commented out for production) +-- INSERT INTO customers (email, first_name, last_name, company, status) +-- VALUES ('test@example.com', 'Max', 'Mustermann', 'Test GmbH', 'trial'); + +-- ===================================================== +-- Grants +-- ===================================================== + +-- Grant permissions to authenticated users +GRANT SELECT, INSERT, UPDATE ON customers TO authenticated; +GRANT SELECT ON instances TO authenticated; +GRANT SELECT ON subscriptions TO authenticated; +GRANT SELECT ON payments TO authenticated; +GRANT SELECT ON usage_stats TO authenticated; + +-- Grant all permissions to service role (for n8n workflows) +GRANT ALL ON ALL TABLES IN SCHEMA public TO service_role; +GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO service_role; + +-- ===================================================== +-- Comments +-- ===================================================== + +COMMENT ON TABLE customers IS 'Stores customer information and trial status'; +COMMENT ON TABLE instances IS 'Stores LXC instance information for each customer'; +COMMENT ON TABLE emails_sent IS 'Tracks all emails sent to customers'; +COMMENT ON TABLE subscriptions IS 'Stores subscription and payment information'; +COMMENT ON TABLE payments IS 'Stores payment transaction history'; +COMMENT ON TABLE usage_stats IS 'Tracks usage statistics for each instance'; +COMMENT ON TABLE audit_log IS 'Audit trail for important actions'; + +-- ===================================================== +-- End of Schema +-- ===================================================== diff --git a/sql/init_pgvector.sql b/sql/init_pgvector.sql new file mode 100644 index 0000000..bbb3ce1 --- /dev/null +++ b/sql/init_pgvector.sql @@ -0,0 +1,2 @@ +CREATE EXTENSION IF NOT EXISTS vector; +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; diff --git a/templates/docker-compose.yml b/templates/docker-compose.yml new file mode 100644 index 0000000..b99d596 --- /dev/null +++ b/templates/docker-compose.yml @@ -0,0 +1,63 @@ +services: + postgres: + image: pgvector/pgvector:pg16 + container_name: customer-postgres + restart: unless-stopped + environment: + POSTGRES_DB: ${PG_DB} + POSTGRES_USER: ${PG_USER} + POSTGRES_PASSWORD: ${PG_PASSWORD} + volumes: + - ./volumes/postgres/data:/var/lib/postgresql/data + - ./sql:/docker-entrypoint-initdb.d:ro + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${PG_USER} -d ${PG_DB} || exit 1"] + interval: 10s + timeout: 5s + retries: 20 + networks: + - customer-net + + n8n: + image: n8nio/n8n:latest + container_name: n8n + restart: unless-stopped + depends_on: + postgres: + condition: service_healthy + ports: + - "${N8N_PORT}:5678" + environment: + # --- Web / Cookies / URL --- + N8N_PORT: 5678 + N8N_PROTOCOL: ${N8N_PROTOCOL} + N8N_HOST: ${N8N_HOST} + N8N_EDITOR_BASE_URL: ${N8N_EDITOR_BASE_URL} + WEBHOOK_URL: ${WEBHOOK_URL} + + # Ohne TLS/Reverse Proxy: sonst Secure-Cookie Warning / Login-Probleme + N8N_SECURE_COOKIE: ${N8N_SECURE_COOKIE} + + # --- DB (Postgres) --- + DB_TYPE: postgresdb + DB_POSTGRESDB_HOST: postgres + DB_POSTGRESDB_PORT: 5432 + DB_POSTGRESDB_DATABASE: ${PG_DB} + DB_POSTGRESDB_USER: ${PG_USER} + DB_POSTGRESDB_PASSWORD: ${PG_PASSWORD} + + # --- Basics --- + GENERIC_TIMEZONE: Europe/Berlin + TZ: Europe/Berlin + + # optional (später hart machen) + N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY} + + volumes: + - ./volumes/n8n-data:/home/node/.n8n + networks: + - customer-net + +networks: + customer-net: + driver: bridge diff --git a/templates/env.template b/templates/env.template new file mode 100644 index 0000000..edd3813 --- /dev/null +++ b/templates/env.template @@ -0,0 +1,20 @@ +# Basics +TZ=Europe/Berlin + +# n8n URL-Setup (wird pro Kunde gefüllt) +N8N_HOST={{N8N_HOST}} +N8N_EDITOR_BASE_URL=https://{{N8N_HOST}}/ +WEBHOOK_URL=https://{{N8N_HOST}}/ + +# Dashboard BasicAuth (wird random generiert) +DASHBOARD_USERNAME={{DASHBOARD_USERNAME}} +DASHBOARD_PASSWORD={{DASHBOARD_PASSWORD}} + +# n8n Credential Encryption Key (wird random generiert, 64 hex chars ok) +N8N_ENCRYPTION_KEY={{N8N_ENCRYPTION_KEY}} + +# Postgres +POSTGRES_USER=postgres +POSTGRES_PASSWORD={{POSTGRES_PASSWORD}} +POSTGRES_DB=postgres + diff --git a/templates/n8n-workflow-reload.service b/templates/n8n-workflow-reload.service new file mode 100644 index 0000000..9c09724 --- /dev/null +++ b/templates/n8n-workflow-reload.service @@ -0,0 +1,32 @@ +[Unit] +Description=n8n Workflow Auto-Reload Service +Documentation=https://docs.n8n.io/ +After=docker.service +Wants=docker.service +# Warte bis n8n-Container läuft +After=docker-n8n.service +Requires=docker.service + +[Service] +Type=oneshot +RemainAfterExit=yes +User=root +WorkingDirectory=/opt/customer-stack + +# Warte kurz, damit Docker-Container vollständig gestartet sind +ExecStartPre=/bin/sleep 10 + +# Führe Reload-Script aus +ExecStart=/bin/bash /opt/customer-stack/reload-workflow.sh + +# Logging +StandardOutput=journal +StandardError=journal +SyslogIdentifier=n8n-workflow-reload + +# Restart-Policy bei Fehler +Restart=on-failure +RestartSec=30 + +[Install] +WantedBy=multi-user.target diff --git a/templates/reload-workflow.sh b/templates/reload-workflow.sh new file mode 100644 index 0000000..e4c3497 --- /dev/null +++ b/templates/reload-workflow.sh @@ -0,0 +1,379 @@ +#!/bin/bash +# +# n8n Workflow Auto-Reload Script +# Wird beim LXC-Start ausgeführt, um den Workflow neu zu laden +# + +set -euo pipefail + +# Konfiguration +SCRIPT_DIR="/opt/customer-stack" +LOG_DIR="${SCRIPT_DIR}/logs" +LOG_FILE="${LOG_DIR}/workflow-reload.log" +ENV_FILE="${SCRIPT_DIR}/.env" +WORKFLOW_TEMPLATE="${SCRIPT_DIR}/workflow-template.json" +WORKFLOW_NAME="RAG KI-Bot (PGVector)" + +# API-Konfiguration +API_URL="http://127.0.0.1:5678" +COOKIE_FILE="/tmp/n8n_reload_cookies.txt" +MAX_WAIT=60 # Maximale Wartezeit in Sekunden +# Erstelle Log-Verzeichnis sofort (vor den Logging-Funktionen) +mkdir -p "${LOG_DIR}" + + +# Logging-Funktion +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "${LOG_FILE}" +} + +log_error() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ERROR: $*" | tee -a "${LOG_FILE}" >&2 +} + +# Funktion: Warten bis n8n bereit ist +wait_for_n8n() { + log "Warte auf n8n API..." + local count=0 + + while [ $count -lt $MAX_WAIT ]; do + if curl -sS -o /dev/null -w "%{http_code}" "${API_URL}/rest/settings" 2>/dev/null | grep -q "200"; then + log "n8n API ist bereit" + return 0 + fi + sleep 1 + count=$((count + 1)) + done + + log_error "n8n API nicht erreichbar nach ${MAX_WAIT} Sekunden" + return 1 +} + +# Funktion: .env-Datei laden +load_env() { + if [ ! -f "${ENV_FILE}" ]; then + log_error ".env-Datei nicht gefunden: ${ENV_FILE}" + return 1 + fi + + # Exportiere alle Variablen aus .env + set -a + source "${ENV_FILE}" + set +a + + log "Konfiguration geladen aus ${ENV_FILE}" + return 0 +} + +# Funktion: Login bei n8n +n8n_login() { + log "Login bei n8n als ${N8N_OWNER_EMAIL}..." + + # Escape special characters in password for JSON + local escaped_password + escaped_password=$(echo "${N8N_OWNER_PASS}" | sed 's/\\/\\\\/g; s/"/\\"/g') + + local response + response=$(curl -sS -X POST "${API_URL}/rest/login" \ + -H "Content-Type: application/json" \ + -c "${COOKIE_FILE}" \ + -d "{\"emailOrLdapLoginId\":\"${N8N_OWNER_EMAIL}\",\"password\":\"${escaped_password}\"}" 2>&1) + + if echo "$response" | grep -q '"code":\|"status":"error"'; then + log_error "Login fehlgeschlagen: ${response}" + return 1 + fi + + log "Login erfolgreich" + return 0 +} + +# Funktion: Workflow nach Name suchen +find_workflow() { + local workflow_name="$1" + + log "Suche nach Workflow '${workflow_name}'..." + + local response + response=$(curl -sS -X GET "${API_URL}/rest/workflows" \ + -H "Content-Type: application/json" \ + -b "${COOKIE_FILE}" 2>&1) + + # Extract workflow ID by name + local workflow_id + workflow_id=$(echo "$response" | grep -oP "\"name\":\s*\"${workflow_name}\".*?\"id\":\s*\"\K[^\"]+|\"id\":\s*\"\K[^\"]+(?=.*?\"name\":\s*\"${workflow_name}\")" | head -1 || echo "") + + if [ -n "$workflow_id" ]; then + log "Workflow gefunden: ID=${workflow_id}" + echo "$workflow_id" + return 0 + else + log "Workflow '${workflow_name}' nicht gefunden" + echo "" + return 1 + fi +} + +# Funktion: Workflow löschen +delete_workflow() { + local workflow_id="$1" + + log "Lösche Workflow ${workflow_id}..." + + local response + response=$(curl -sS -X DELETE "${API_URL}/rest/workflows/${workflow_id}" \ + -H "Content-Type: application/json" \ + -b "${COOKIE_FILE}" 2>&1) + + log "Workflow ${workflow_id} gelöscht" + return 0 +} + +# Funktion: Credential nach Name und Typ suchen +find_credential() { + local cred_name="$1" + local cred_type="$2" + + log "Suche nach Credential '${cred_name}' (Typ: ${cred_type})..." + + local response + response=$(curl -sS -X GET "${API_URL}/rest/credentials" \ + -H "Content-Type: application/json" \ + -b "${COOKIE_FILE}" 2>&1) + + # Extract credential ID by name and type + local cred_id + cred_id=$(echo "$response" | grep -oP "\"name\":\s*\"${cred_name}\".*?\"type\":\s*\"${cred_type}\".*?\"id\":\s*\"\K[^\"]+|\"id\":\s*\"\K[^\"]+(?=.*?\"name\":\s*\"${cred_name}\".*?\"type\":\s*\"${cred_type}\")" | head -1 || echo "") + + if [ -n "$cred_id" ]; then + log "Credential gefunden: ID=${cred_id}" + echo "$cred_id" + return 0 + else + log_error "Credential '${cred_name}' nicht gefunden" + echo "" + return 1 + fi +} + +# Funktion: Workflow-Template verarbeiten +process_workflow_template() { + local pg_cred_id="$1" + local ollama_cred_id="$2" + local output_file="/tmp/workflow_processed.json" + + log "Verarbeite Workflow-Template..." + + # Python-Script zum Verarbeiten des Workflows + python3 - "$pg_cred_id" "$ollama_cred_id" <<'PYTHON_SCRIPT' +import json +import sys + +# Read the workflow template +with open('/opt/customer-stack/workflow-template.json', 'r') as f: + workflow = json.load(f) + +# Get credential IDs from arguments +pg_cred_id = sys.argv[1] +ollama_cred_id = sys.argv[2] + +# Remove fields that should not be in the import +fields_to_remove = ['id', 'versionId', 'meta', 'tags', 'active', 'pinData'] +for field in fields_to_remove: + workflow.pop(field, None) + +# Process all nodes and replace credential IDs +for node in workflow.get('nodes', []): + credentials = node.get('credentials', {}) + + # Replace PostgreSQL credential + if 'postgres' in credentials: + credentials['postgres'] = { + 'id': pg_cred_id, + 'name': 'PostgreSQL (local)' + } + + # Replace Ollama credential + if 'ollamaApi' in credentials: + credentials['ollamaApi'] = { + 'id': ollama_cred_id, + 'name': 'Ollama (local)' + } + +# Write the processed workflow +with open('/tmp/workflow_processed.json', 'w') as f: + json.dump(workflow, f) + +print("Workflow processed successfully") +PYTHON_SCRIPT + + if [ $? -eq 0 ]; then + log "Workflow-Template erfolgreich verarbeitet" + echo "$output_file" + return 0 + else + log_error "Fehler beim Verarbeiten des Workflow-Templates" + return 1 + fi +} + +# Funktion: Workflow importieren +import_workflow() { + local workflow_file="$1" + + log "Importiere Workflow aus ${workflow_file}..." + + local response + response=$(curl -sS -X POST "${API_URL}/rest/workflows" \ + -H "Content-Type: application/json" \ + -b "${COOKIE_FILE}" \ + -d @"${workflow_file}" 2>&1) + + # Extract workflow ID and version ID + local workflow_id + local version_id + workflow_id=$(echo "$response" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1) + version_id=$(echo "$response" | grep -oP '"versionId"\s*:\s*"\K[^"]+' | head -1) + + if [ -z "$workflow_id" ]; then + log_error "Workflow-Import fehlgeschlagen: ${response}" + return 1 + fi + + log "Workflow importiert: ID=${workflow_id}, Version=${version_id}" + echo "${workflow_id}:${version_id}" + return 0 +} + +# Funktion: Workflow aktivieren +activate_workflow() { + local workflow_id="$1" + local version_id="$2" + + log "Aktiviere Workflow ${workflow_id}..." + + local response + response=$(curl -sS -X POST "${API_URL}/rest/workflows/${workflow_id}/activate" \ + -H "Content-Type: application/json" \ + -b "${COOKIE_FILE}" \ + -d "{\"versionId\":\"${version_id}\"}" 2>&1) + + if echo "$response" | grep -q '"active":true\|"active": true'; then + log "Workflow ${workflow_id} erfolgreich aktiviert" + return 0 + else + log_error "Workflow-Aktivierung fehlgeschlagen: ${response}" + return 1 + fi +} + +# Funktion: Cleanup +cleanup() { + rm -f "${COOKIE_FILE}" /tmp/workflow_processed.json 2>/dev/null || true +} + +# Hauptfunktion +main() { + log "=========================================" + log "n8n Workflow Auto-Reload gestartet" + log "=========================================" + + # Erstelle Log-Verzeichnis falls nicht vorhanden + + # Lade Konfiguration + if ! load_env; then + log_error "Fehler beim Laden der Konfiguration" + exit 1 + fi + + # Prüfe ob Workflow-Template existiert + if [ ! -f "${WORKFLOW_TEMPLATE}" ]; then + log_error "Workflow-Template nicht gefunden: ${WORKFLOW_TEMPLATE}" + exit 1 + fi + + # Warte auf n8n + if ! wait_for_n8n; then + log_error "n8n nicht erreichbar" + exit 1 + fi + + # Login + if ! n8n_login; then + log_error "Login fehlgeschlagen" + cleanup + exit 1 + fi + + # Suche nach bestehendem Workflow + local existing_workflow_id + existing_workflow_id=$(find_workflow "${WORKFLOW_NAME}" || echo "") + + if [ -n "$existing_workflow_id" ]; then + log "Bestehender Workflow gefunden, wird gelöscht..." + delete_workflow "$existing_workflow_id" + fi + + # Suche nach Credentials + log "Suche nach bestehenden Credentials..." + local pg_cred_id + local ollama_cred_id + + pg_cred_id=$(find_credential "PostgreSQL (local)" "postgres" || echo "") + ollama_cred_id=$(find_credential "Ollama (local)" "ollamaApi" || echo "") + + if [ -z "$pg_cred_id" ] || [ -z "$ollama_cred_id" ]; then + log_error "Credentials nicht gefunden (PostgreSQL: ${pg_cred_id}, Ollama: ${ollama_cred_id})" + cleanup + exit 1 + fi + + # Verarbeite Workflow-Template + local processed_workflow + processed_workflow=$(process_workflow_template "$pg_cred_id" "$ollama_cred_id") + + if [ -z "$processed_workflow" ]; then + log_error "Fehler beim Verarbeiten des Workflow-Templates" + cleanup + exit 1 + fi + + # Importiere Workflow + local import_result + import_result=$(import_workflow "$processed_workflow") + + if [ -z "$import_result" ]; then + log_error "Workflow-Import fehlgeschlagen" + cleanup + exit 1 + fi + + # Extrahiere IDs + local new_workflow_id + local new_version_id + new_workflow_id=$(echo "$import_result" | cut -d: -f1) + new_version_id=$(echo "$import_result" | cut -d: -f2) + + # Aktiviere Workflow + if ! activate_workflow "$new_workflow_id" "$new_version_id"; then + log_error "Workflow-Aktivierung fehlgeschlagen" + cleanup + exit 1 + fi + + # Cleanup + cleanup + + log "=========================================" + log "Workflow-Reload erfolgreich abgeschlossen" + log "Workflow-ID: ${new_workflow_id}" + log "=========================================" + + exit 0 +} + +# Trap für Cleanup bei Fehler +trap cleanup EXIT + +# Hauptfunktion ausführen +main "$@"