Data upload
This commit is contained in:
323
RAGKI-BotPGVector.json
Normal file
323
RAGKI-BotPGVector.json
Normal file
@@ -0,0 +1,323 @@
|
||||
{
|
||||
"name": "RAG KI-Bot (PGVector)",
|
||||
"nodes": [
|
||||
{
|
||||
"parameters": {
|
||||
"public": true,
|
||||
"initialMessages": "Hallo! 👋\nMein Name ist Clara (Customer Learning & Answering Reference Assistant)\nWie kann ich behilflich sein?",
|
||||
"options": {
|
||||
"inputPlaceholder": "Hier die Frage eingeben...",
|
||||
"showWelcomeScreen": true,
|
||||
"subtitle": "Die Antworten der AI können fehlerhaft sein.",
|
||||
"title": "Support-Chat 👋",
|
||||
"customCss": ":root {\n /* Colors */\n --chat--color-primary: #e74266;\n --chat--color-primary-shade-50: #db4061;\n --chat--color-primary-shade-100: #cf3c5c;\n --chat--color-secondary: #20b69e;\n --chat--color-secondary-shade-50: #1ca08a;\n --chat--color-white: #ffffff;\n --chat--color-light: #f2f4f8;\n --chat--color-light-shade-50: #e6e9f1;\n --chat--color-light-shade-100: #c2c5cc;\n --chat--color-medium: #d2d4d9;\n --chat--color-dark: #101330;\n --chat--color-disabled: #d2d4d9;\n --chat--color-typing: #404040;\n\n /* Base Layout */\n --chat--spacing: 1rem;\n --chat--border-radius: 0.25rem;\n --chat--transition-duration: 0.15s;\n --chat--font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen-Sans, Ubuntu, Cantarell, 'Helvetica Neue', sans-serif;\n\n /* Window Dimensions */\n --chat--window--width: 400px;\n --chat--window--height: 600px;\n --chat--window--bottom: var(--chat--spacing);\n --chat--window--right: var(--chat--spacing);\n --chat--window--z-index: 9999;\n --chat--window--border: 1px solid var(--chat--color-light-shade-50);\n --chat--window--border-radius: var(--chat--border-radius);\n --chat--window--margin-bottom: var(--chat--spacing);\n\n /* Header Styles */\n --chat--header-height: auto;\n --chat--header--padding: var(--chat--spacing);\n --chat--header--background: var(--chat--color-dark);\n --chat--header--color: var(--chat--color-light);\n --chat--header--border-top: none;\n --chat--header--border-bottom: none;\n --chat--header--border-left: none;\n --chat--header--border-right: none;\n --chat--heading--font-size: 2em;\n --chat--subtitle--font-size: inherit;\n --chat--subtitle--line-height: 1.8;\n\n /* Message Styles */\n --chat--message--font-size: 1rem;\n --chat--message--padding: var(--chat--spacing);\n --chat--message--border-radius: var(--chat--border-radius);\n --chat--message-line-height: 1.5;\n --chat--message--margin-bottom: calc(var(--chat--spacing) * 1);\n --chat--message--bot--background: var(--chat--color-white);\n --chat--message--bot--color: var(--chat--color-dark);\n --chat--message--bot--border: none;\n --chat--message--user--background: var(--chat--color-secondary);\n --chat--message--user--color: var(--chat--color-white);\n --chat--message--user--border: none;\n --chat--message--pre--background: rgba(0, 0, 0, 0.05);\n --chat--messages-list--padding: var(--chat--spacing);\n\n /* Toggle Button */\n --chat--toggle--size: 64px;\n --chat--toggle--width: var(--chat--toggle--size);\n --chat--toggle--height: var(--chat--toggle--size);\n --chat--toggle--border-radius: 50%;\n --chat--toggle--background: var(--chat--color-primary);\n --chat--toggle--hover--background: var(--chat--color-primary-shade-50);\n --chat--toggle--active--background: var(--chat--color-primary-shade-100);\n --chat--toggle--color: var(--chat--color-white);\n\n /* Input Area */\n --chat--textarea--height: 50px;\n --chat--textarea--max-height: 30rem;\n --chat--input--font-size: inherit;\n --chat--input--border: 0;\n --chat--input--border-radius: 0;\n --chat--input--padding: 0.8rem;\n --chat--input--background: var(--chat--color-white);\n --chat--input--text-color: initial;\n --chat--input--line-height: 1.5;\n --chat--input--placeholder--font-size: var(--chat--input--font-size);\n --chat--input--border-active: 0;\n --chat--input--left--panel--width: 2rem;\n\n /* Button Styles */\n --chat--button--color: var(--chat--color-light);\n --chat--button--background: var(--chat--color-primary);\n --chat--button--padding: calc(var(--chat--spacing) * 1 / 2) var(--chat--spacing);\n --chat--button--border-radius: var(--chat--border-radius);\n --chat--button--hover--color: var(--chat--color-light);\n --chat--button--hover--background: var(--chat--color-primary-shade-50);\n --chat--close--button--color-hover: var(--chat--color-primary);\n\n /* Send and File Buttons */\n --chat--input--send--button--background: var(--chat--color-white);\n --chat--input--send--button--color: var(--chat--color-secondary);\n --chat--input--send--button--background-hover: var(--chat--color-primary-shade-50);\n --chat--input--send--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--input--file--button--background: var(--chat--color-white);\n --chat--input--file--button--color: var(--chat--color-secondary);\n --chat--input--file--button--background-hover: var(--chat--input--file--button--background);\n --chat--input--file--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--files-spacing: 0.25rem;\n\n /* Body and Footer */\n --chat--body--background: var(--chat--color-light);\n --chat--footer--background: var(--chat--color-light);\n --chat--footer--color: var(--chat--color-dark);\n}\n\n\n/* You can override any class styles, too. Right-click inspect in Chat UI to find class to override. */\n.chat-message {\n\tmax-width: 50%;\n}",
|
||||
"responseMode": "lastNode"
|
||||
}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.chatTrigger",
|
||||
"typeVersion": 1.3,
|
||||
"position": [
|
||||
0,
|
||||
0
|
||||
],
|
||||
"id": "chat-trigger-001",
|
||||
"name": "When chat message received",
|
||||
"webhookId": "rag-chat-webhook",
|
||||
"notesInFlow": true,
|
||||
"notes": "Chat URL: /webhook/rag-chat-webhook/chat"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"promptType": "define",
|
||||
"text": "={{ $json.chatInput }}\nAntworte ausschliesslich auf Deutsch und nutze zuerst die Wissensdatenbank.",
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.agent",
|
||||
"typeVersion": 2.2,
|
||||
"position": [
|
||||
208,
|
||||
0
|
||||
],
|
||||
"id": "ai-agent-001",
|
||||
"name": "AI Agent"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"model": "ministral-3:3b",
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.lmChatOllama",
|
||||
"typeVersion": 1,
|
||||
"position": [
|
||||
64,
|
||||
208
|
||||
],
|
||||
"id": "ollama-chat-001",
|
||||
"name": "Ollama Chat Model",
|
||||
"credentials": {
|
||||
"ollamaApi": {
|
||||
"id": "ZmMYzkrY4zMFYJ1J",
|
||||
"name": "Ollama (local)"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"parameters": {},
|
||||
"type": "@n8n/n8n-nodes-langchain.memoryBufferWindow",
|
||||
"typeVersion": 1.3,
|
||||
"position": [
|
||||
224,
|
||||
208
|
||||
],
|
||||
"id": "memory-001",
|
||||
"name": "Simple Memory"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"mode": "retrieve-as-tool",
|
||||
"toolName": "knowledge_base",
|
||||
"toolDescription": "Verwende dieses Tool für Infos die der Benutzer fragt. Sucht in der Wissensdatenbank nach relevanten Dokumenten.",
|
||||
"tableName": "documents",
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.vectorStorePGVector",
|
||||
"typeVersion": 1,
|
||||
"position": [
|
||||
432,
|
||||
128
|
||||
],
|
||||
"id": "pgvector-retrieve-001",
|
||||
"name": "PGVector Store",
|
||||
"credentials": {
|
||||
"postgres": {
|
||||
"id": "1VVtY5ei866suQdA",
|
||||
"name": "PostgreSQL (local)"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"model": "nomic-embed-text:latest"
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.embeddingsOllama",
|
||||
"typeVersion": 1,
|
||||
"position": [
|
||||
416,
|
||||
288
|
||||
],
|
||||
"id": "embeddings-retrieve-001",
|
||||
"name": "Embeddings Ollama",
|
||||
"credentials": {
|
||||
"ollamaApi": {
|
||||
"id": "ZmMYzkrY4zMFYJ1J",
|
||||
"name": "Ollama (local)"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"formTitle": "Dokument hochladen",
|
||||
"formDescription": "Laden Sie ein PDF-Dokument hoch, um es in die Wissensdatenbank aufzunehmen.",
|
||||
"formFields": {
|
||||
"values": [
|
||||
{
|
||||
"fieldLabel": "Dokument",
|
||||
"fieldType": "file",
|
||||
"acceptFileTypes": ".pdf"
|
||||
}
|
||||
]
|
||||
},
|
||||
"options": {}
|
||||
},
|
||||
"type": "n8n-nodes-base.formTrigger",
|
||||
"typeVersion": 2.3,
|
||||
"position": [
|
||||
768,
|
||||
0
|
||||
],
|
||||
"id": "form-trigger-001",
|
||||
"name": "On form submission",
|
||||
"webhookId": "rag-upload-form"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"operation": "pdf",
|
||||
"binaryPropertyName": "Dokument",
|
||||
"options": {}
|
||||
},
|
||||
"type": "n8n-nodes-base.extractFromFile",
|
||||
"typeVersion": 1,
|
||||
"position": [
|
||||
976,
|
||||
0
|
||||
],
|
||||
"id": "extract-file-001",
|
||||
"name": "Extract from File"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"mode": "insert",
|
||||
"tableName": "documents",
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.vectorStorePGVector",
|
||||
"typeVersion": 1,
|
||||
"position": [
|
||||
1184,
|
||||
0
|
||||
],
|
||||
"id": "pgvector-insert-001",
|
||||
"name": "PGVector Store Insert",
|
||||
"credentials": {
|
||||
"postgres": {
|
||||
"id": "1VVtY5ei866suQdA",
|
||||
"name": "PostgreSQL (local)"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"model": "nomic-embed-text:latest"
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.embeddingsOllama",
|
||||
"typeVersion": 1,
|
||||
"position": [
|
||||
1168,
|
||||
240
|
||||
],
|
||||
"id": "embeddings-insert-001",
|
||||
"name": "Embeddings Ollama1",
|
||||
"credentials": {
|
||||
"ollamaApi": {
|
||||
"id": "ZmMYzkrY4zMFYJ1J",
|
||||
"name": "Ollama (local)"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.documentDefaultDataLoader",
|
||||
"typeVersion": 1.1,
|
||||
"position": [
|
||||
1392,
|
||||
240
|
||||
],
|
||||
"id": "data-loader-001",
|
||||
"name": "Default Data Loader"
|
||||
}
|
||||
],
|
||||
"pinData": {},
|
||||
"connections": {
|
||||
"When chat message received": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"node": "AI Agent",
|
||||
"type": "main",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"Ollama Chat Model": {
|
||||
"ai_languageModel": [
|
||||
[
|
||||
{
|
||||
"node": "AI Agent",
|
||||
"type": "ai_languageModel",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"Simple Memory": {
|
||||
"ai_memory": [
|
||||
[
|
||||
{
|
||||
"node": "AI Agent",
|
||||
"type": "ai_memory",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"PGVector Store": {
|
||||
"ai_tool": [
|
||||
[
|
||||
{
|
||||
"node": "AI Agent",
|
||||
"type": "ai_tool",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"Embeddings Ollama": {
|
||||
"ai_embedding": [
|
||||
[
|
||||
{
|
||||
"node": "PGVector Store",
|
||||
"type": "ai_embedding",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"On form submission": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"node": "Extract from File",
|
||||
"type": "main",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"Extract from File": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"node": "PGVector Store Insert",
|
||||
"type": "main",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"Embeddings Ollama1": {
|
||||
"ai_embedding": [
|
||||
[
|
||||
{
|
||||
"node": "PGVector Store Insert",
|
||||
"type": "ai_embedding",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"Default Data Loader": {
|
||||
"ai_document": [
|
||||
[
|
||||
{
|
||||
"node": "PGVector Store Insert",
|
||||
"type": "ai_document",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
},
|
||||
"active": true,
|
||||
"settings": {
|
||||
"executionOrder": "v1"
|
||||
},
|
||||
"versionId": "6ebf0ac8-b8ab-49ee-b6f1-df0b606b3a33",
|
||||
"meta": {
|
||||
"instanceId": "a2179cec0884855b4d650fea20868c0dbbb03f0d0054c803c700fff052afc74c"
|
||||
},
|
||||
"id": "Q9Bm63B9ae8rAj95",
|
||||
"tags": []
|
||||
}
|
||||
5
credentials/.gitignore
vendored
Normal file
5
credentials/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Ignore all credential files
|
||||
*.json
|
||||
|
||||
# Except the example file
|
||||
!example-credentials.json
|
||||
52
credentials/example-credentials.json
Normal file
52
credentials/example-credentials.json
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"container": {
|
||||
"ctid": 769276659,
|
||||
"hostname": "sb-1769276659",
|
||||
"fqdn": "sb-1769276659.userman.de",
|
||||
"ip": "192.168.45.45",
|
||||
"vlan": 90
|
||||
},
|
||||
"urls": {
|
||||
"n8n_internal": "http://192.168.45.45:5678/",
|
||||
"n8n_external": "https://sb-1769276659.userman.de",
|
||||
"postgrest": "http://192.168.45.45:3000",
|
||||
"chat_webhook": "https://sb-1769276659.userman.de/webhook/rag-chat-webhook/chat",
|
||||
"chat_internal": "http://192.168.45.45:5678/webhook/rag-chat-webhook/chat",
|
||||
"upload_form": "https://sb-1769276659.userman.de/form/rag-upload-form",
|
||||
"upload_form_internal": "http://192.168.45.45:5678/form/rag-upload-form"
|
||||
},
|
||||
"postgres": {
|
||||
"host": "postgres",
|
||||
"port": 5432,
|
||||
"db": "customer",
|
||||
"user": "customer",
|
||||
"password": "EXAMPLE_PASSWORD"
|
||||
},
|
||||
"supabase": {
|
||||
"url": "http://postgrest:3000",
|
||||
"url_external": "http://192.168.45.45:3000",
|
||||
"anon_key": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
|
||||
"service_role_key": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
|
||||
"jwt_secret": "EXAMPLE_JWT_SECRET"
|
||||
},
|
||||
"ollama": {
|
||||
"url": "http://192.168.45.3:11434",
|
||||
"model": "ministral-3:3b",
|
||||
"embedding_model": "nomic-embed-text:latest"
|
||||
},
|
||||
"n8n": {
|
||||
"encryption_key": "EXAMPLE_ENCRYPTION_KEY",
|
||||
"owner_email": "admin@userman.de",
|
||||
"owner_password": "EXAMPLE_PASSWORD",
|
||||
"secure_cookie": false
|
||||
},
|
||||
"log_file": "/root/customer-installer/logs/sb-1769276659.log",
|
||||
"created_at": "2026-01-24T18:00:00+01:00",
|
||||
"updateable_fields": {
|
||||
"ollama_url": "Can be updated to use hostname instead of IP (e.g., http://ollama.local:11434)",
|
||||
"ollama_model": "Can be changed to different model (e.g., llama3.2:3b)",
|
||||
"embedding_model": "Can be changed to different embedding model",
|
||||
"postgres_password": "Can be updated (requires container restart)",
|
||||
"n8n_owner_password": "Can be updated (requires container restart)"
|
||||
}
|
||||
}
|
||||
389
delete_nginx_proxy.sh
Executable file
389
delete_nginx_proxy.sh
Executable file
@@ -0,0 +1,389 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
# =============================================================================
|
||||
# OPNsense NGINX Reverse Proxy Delete Script
|
||||
# =============================================================================
|
||||
# Dieses Script löscht einen NGINX Reverse Proxy auf OPNsense
|
||||
# für eine n8n-Instanz über die OPNsense API.
|
||||
# =============================================================================
|
||||
|
||||
SCRIPT_VERSION="1.0.2"
|
||||
|
||||
# Debug mode: 0 = nur JSON, 1 = Logs auf stderr
|
||||
DEBUG="${DEBUG:-0}"
|
||||
export DEBUG
|
||||
|
||||
# Logging functions
|
||||
log_ts() { date "+[%F %T]"; }
|
||||
info() { [[ "$DEBUG" == "1" ]] && echo "$(log_ts) INFO: $*" >&2; return 0; }
|
||||
warn() { [[ "$DEBUG" == "1" ]] && echo "$(log_ts) WARN: $*" >&2; return 0; }
|
||||
die() {
|
||||
if [[ "$DEBUG" == "1" ]]; then
|
||||
echo "$(log_ts) ERROR: $*" >&2
|
||||
else
|
||||
echo "{\"error\": \"$*\"}"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# Default Configuration
|
||||
# =============================================================================
|
||||
OPNSENSE_HOST="${OPNSENSE_HOST:-192.168.45.1}"
|
||||
OPNSENSE_PORT="${OPNSENSE_PORT:-4444}"
|
||||
OPNSENSE_API_KEY="${OPNSENSE_API_KEY:-cUUs80IDkQelMJVgAVK2oUoDHrQf+cQPwXoPKNd3KDIgiCiEyEfMq38UTXeY5/VO/yWtCC7k9Y9kJ0Pn}"
|
||||
OPNSENSE_API_SECRET="${OPNSENSE_API_SECRET:-2egxxFYCAUjBDp0OrgbJO3NBZmR4jpDm028jeS8Nq8OtCGu/0lAxt4YXWXbdZjcFVMS0Nrhru1I2R1si}"
|
||||
|
||||
# =============================================================================
|
||||
# Usage
|
||||
# =============================================================================
|
||||
usage() {
|
||||
cat >&2 <<'EOF'
|
||||
Usage:
|
||||
bash delete_nginx_proxy.sh [options]
|
||||
|
||||
Required options:
|
||||
--ctid <id> Container ID (used to find components by description)
|
||||
|
||||
Optional:
|
||||
--fqdn <domain> Full domain name (to find HTTP Server by servername)
|
||||
--opnsense-host <ip> OPNsense IP or hostname (default: 192.168.45.1)
|
||||
--opnsense-port <port> OPNsense WebUI/API port (default: 4444)
|
||||
--dry-run Show what would be deleted without actually deleting
|
||||
--debug Enable debug mode
|
||||
--help Show this help
|
||||
|
||||
Examples:
|
||||
# Delete proxy by CTID:
|
||||
bash delete_nginx_proxy.sh --ctid 768736636
|
||||
|
||||
# Delete proxy with debug output:
|
||||
bash delete_nginx_proxy.sh --debug --ctid 768736636
|
||||
|
||||
# Dry run (show what would be deleted):
|
||||
bash delete_nginx_proxy.sh --dry-run --ctid 768736636
|
||||
|
||||
# Delete by CTID and FQDN:
|
||||
bash delete_nginx_proxy.sh --ctid 768736636 --fqdn sb-1768736636.userman.de
|
||||
EOF
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# Default values for arguments
|
||||
# =============================================================================
|
||||
CTID=""
|
||||
FQDN=""
|
||||
DRY_RUN="0"
|
||||
|
||||
# =============================================================================
|
||||
# Argument parsing
|
||||
# =============================================================================
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--ctid) CTID="${2:-}"; shift 2 ;;
|
||||
--fqdn) FQDN="${2:-}"; shift 2 ;;
|
||||
--opnsense-host) OPNSENSE_HOST="${2:-}"; shift 2 ;;
|
||||
--opnsense-port) OPNSENSE_PORT="${2:-}"; shift 2 ;;
|
||||
--dry-run) DRY_RUN="1"; shift 1 ;;
|
||||
--debug) DEBUG="1"; export DEBUG; shift 1 ;;
|
||||
--help|-h) usage; exit 0 ;;
|
||||
*) die "Unknown option: $1 (use --help)" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# =============================================================================
|
||||
# API Base URL
|
||||
# =============================================================================
|
||||
API_BASE="https://${OPNSENSE_HOST}:${OPNSENSE_PORT}/api"
|
||||
|
||||
# =============================================================================
|
||||
# API Helper Functions
|
||||
# =============================================================================
|
||||
|
||||
# Make API request to OPNsense
|
||||
api_request() {
|
||||
local method="$1"
|
||||
local endpoint="$2"
|
||||
local data="${3:-}"
|
||||
|
||||
local url="${API_BASE}${endpoint}"
|
||||
local auth="${OPNSENSE_API_KEY}:${OPNSENSE_API_SECRET}"
|
||||
|
||||
info "API ${method} ${url}"
|
||||
|
||||
local response
|
||||
|
||||
if [[ -n "$data" ]]; then
|
||||
response=$(curl -s -k -X "${method}" \
|
||||
-u "${auth}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "${data}" \
|
||||
"${url}" 2>&1)
|
||||
else
|
||||
response=$(curl -s -k -X "${method}" \
|
||||
-u "${auth}" \
|
||||
"${url}" 2>&1)
|
||||
fi
|
||||
|
||||
echo "$response"
|
||||
}
|
||||
|
||||
# Search for items by description
|
||||
search_by_description() {
|
||||
local search_endpoint="$1"
|
||||
local description="$2"
|
||||
|
||||
local response
|
||||
response=$(api_request "GET" "${search_endpoint}")
|
||||
|
||||
info "Search response for ${search_endpoint}: ${response:0:500}..."
|
||||
|
||||
# Extract all UUIDs where description matches
|
||||
local uuid
|
||||
uuid=$(echo "$response" | python3 -c "
|
||||
import json, sys
|
||||
desc = sys.argv[1] if len(sys.argv) > 1 else ''
|
||||
try:
|
||||
data = json.load(sys.stdin)
|
||||
rows = data.get('rows', [])
|
||||
for row in rows:
|
||||
row_desc = row.get('description', '')
|
||||
if row_desc == desc:
|
||||
print(row.get('uuid', ''))
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
print(f'Error: {e}', file=sys.stderr)
|
||||
" "${description}" 2>/dev/null || true)
|
||||
|
||||
info "Found UUID for description '${description}': ${uuid:-none}"
|
||||
echo "$uuid"
|
||||
}
|
||||
|
||||
# Search for HTTP Server by servername
|
||||
search_http_server_by_servername() {
|
||||
local servername="$1"
|
||||
|
||||
local response
|
||||
response=$(api_request "GET" "/nginx/settings/searchHttpServer")
|
||||
|
||||
info "HTTP Server search response: ${response:0:500}..."
|
||||
|
||||
# Extract UUID where servername matches
|
||||
local uuid
|
||||
uuid=$(echo "$response" | python3 -c "
|
||||
import json, sys
|
||||
sname = sys.argv[1] if len(sys.argv) > 1 else ''
|
||||
try:
|
||||
data = json.load(sys.stdin)
|
||||
rows = data.get('rows', [])
|
||||
for row in rows:
|
||||
row_sname = row.get('servername', '')
|
||||
if row_sname == sname:
|
||||
print(row.get('uuid', ''))
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
print(f'Error: {e}', file=sys.stderr)
|
||||
" "${servername}" 2>/dev/null || true)
|
||||
|
||||
info "Found HTTP Server UUID for servername '${servername}': ${uuid:-none}"
|
||||
echo "$uuid"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# Delete Functions
|
||||
# =============================================================================
|
||||
|
||||
delete_item() {
|
||||
local item_type="$1"
|
||||
local uuid="$2"
|
||||
local endpoint="$3"
|
||||
|
||||
if [[ -z "$uuid" ]]; then
|
||||
info "No ${item_type} found to delete"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ "$DRY_RUN" == "1" ]]; then
|
||||
info "[DRY-RUN] Would delete ${item_type}: ${uuid}"
|
||||
echo "dry-run"
|
||||
return 0
|
||||
fi
|
||||
|
||||
info "Deleting ${item_type}: ${uuid}"
|
||||
local response
|
||||
response=$(api_request "POST" "${endpoint}/${uuid}")
|
||||
|
||||
local result
|
||||
result=$(echo "$response" | python3 -c "import json,sys; print(json.load(sys.stdin).get('result','unknown'))" 2>/dev/null || echo "unknown")
|
||||
|
||||
if [[ "$result" == "deleted" ]]; then
|
||||
info "${item_type} deleted successfully"
|
||||
echo "deleted"
|
||||
else
|
||||
warn "Failed to delete ${item_type}: ${response}"
|
||||
echo "failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# Validation
|
||||
# =============================================================================
|
||||
[[ -n "$CTID" ]] || die "--ctid is required"
|
||||
|
||||
info "Script Version: ${SCRIPT_VERSION}"
|
||||
info "Configuration:"
|
||||
info " CTID: ${CTID}"
|
||||
info " FQDN: ${FQDN:-auto-detect}"
|
||||
info " OPNsense: ${OPNSENSE_HOST}:${OPNSENSE_PORT}"
|
||||
info " Dry Run: ${DRY_RUN}"
|
||||
|
||||
# =============================================================================
|
||||
# Main
|
||||
# =============================================================================
|
||||
main() {
|
||||
info "Starting NGINX Reverse Proxy deletion for CTID ${CTID}..."
|
||||
|
||||
local description="${CTID}"
|
||||
local deleted_count=0
|
||||
local failed_count=0
|
||||
|
||||
# Results tracking
|
||||
local http_server_result="not_found"
|
||||
local location_result="not_found"
|
||||
local upstream_result="not_found"
|
||||
local upstream_server_result="not_found"
|
||||
|
||||
# Step 1: Find and delete HTTP Server
|
||||
info "Step 1: Finding HTTP Server..."
|
||||
local http_server_uuid=""
|
||||
|
||||
# Try to find by FQDN first
|
||||
if [[ -n "$FQDN" ]]; then
|
||||
http_server_uuid=$(search_http_server_by_servername "${FQDN}")
|
||||
fi
|
||||
|
||||
# If not found by FQDN, try common patterns
|
||||
if [[ -z "$http_server_uuid" ]]; then
|
||||
# Try sb-<ctid>.userman.de pattern
|
||||
http_server_uuid=$(search_http_server_by_servername "sb-${CTID}.userman.de")
|
||||
fi
|
||||
|
||||
if [[ -z "$http_server_uuid" ]]; then
|
||||
# Try sb-1<ctid>.userman.de pattern (with leading 1)
|
||||
http_server_uuid=$(search_http_server_by_servername "sb-1${CTID}.userman.de")
|
||||
fi
|
||||
|
||||
if [[ -n "$http_server_uuid" ]]; then
|
||||
http_server_result=$(delete_item "HTTP Server" "$http_server_uuid" "/nginx/settings/delHttpServer")
|
||||
if [[ "$http_server_result" == "deleted" || "$http_server_result" == "dry-run" ]]; then
|
||||
deleted_count=$((deleted_count + 1))
|
||||
else
|
||||
failed_count=$((failed_count + 1))
|
||||
fi
|
||||
else
|
||||
info "No HTTP Server found for CTID ${CTID}"
|
||||
fi
|
||||
|
||||
# Step 2: Find and delete Location
|
||||
info "Step 2: Finding Location..."
|
||||
local location_uuid
|
||||
location_uuid=$(search_by_description "/nginx/settings/searchLocation" "${description}")
|
||||
|
||||
if [[ -n "$location_uuid" ]]; then
|
||||
location_result=$(delete_item "Location" "$location_uuid" "/nginx/settings/delLocation")
|
||||
if [[ "$location_result" == "deleted" || "$location_result" == "dry-run" ]]; then
|
||||
deleted_count=$((deleted_count + 1))
|
||||
else
|
||||
failed_count=$((failed_count + 1))
|
||||
fi
|
||||
else
|
||||
info "No Location found for CTID ${CTID}"
|
||||
fi
|
||||
|
||||
# Step 3: Find and delete Upstream
|
||||
info "Step 3: Finding Upstream..."
|
||||
local upstream_uuid
|
||||
upstream_uuid=$(search_by_description "/nginx/settings/searchUpstream" "${description}")
|
||||
|
||||
if [[ -n "$upstream_uuid" ]]; then
|
||||
upstream_result=$(delete_item "Upstream" "$upstream_uuid" "/nginx/settings/delUpstream")
|
||||
if [[ "$upstream_result" == "deleted" || "$upstream_result" == "dry-run" ]]; then
|
||||
deleted_count=$((deleted_count + 1))
|
||||
else
|
||||
failed_count=$((failed_count + 1))
|
||||
fi
|
||||
else
|
||||
info "No Upstream found for CTID ${CTID}"
|
||||
fi
|
||||
|
||||
# Step 4: Find and delete Upstream Server
|
||||
info "Step 4: Finding Upstream Server..."
|
||||
local upstream_server_uuid
|
||||
upstream_server_uuid=$(search_by_description "/nginx/settings/searchUpstreamServer" "${description}")
|
||||
|
||||
if [[ -n "$upstream_server_uuid" ]]; then
|
||||
upstream_server_result=$(delete_item "Upstream Server" "$upstream_server_uuid" "/nginx/settings/delUpstreamServer")
|
||||
if [[ "$upstream_server_result" == "deleted" || "$upstream_server_result" == "dry-run" ]]; then
|
||||
deleted_count=$((deleted_count + 1))
|
||||
else
|
||||
failed_count=$((failed_count + 1))
|
||||
fi
|
||||
else
|
||||
info "No Upstream Server found for CTID ${CTID}"
|
||||
fi
|
||||
|
||||
# Step 5: Apply configuration (if not dry-run and something was deleted)
|
||||
local reconfigure_result="skipped"
|
||||
if [[ "$DRY_RUN" != "1" && $deleted_count -gt 0 ]]; then
|
||||
info "Step 5: Applying NGINX configuration..."
|
||||
local response
|
||||
response=$(api_request "POST" "/nginx/service/reconfigure" "{}")
|
||||
|
||||
local status
|
||||
status=$(echo "$response" | python3 -c "import json,sys; print(json.load(sys.stdin).get('status',''))" 2>/dev/null || echo "unknown")
|
||||
|
||||
if [[ "$status" == "ok" ]]; then
|
||||
info "NGINX configuration applied successfully"
|
||||
reconfigure_result="ok"
|
||||
else
|
||||
warn "NGINX reconfigure status: ${status}"
|
||||
reconfigure_result="failed"
|
||||
fi
|
||||
elif [[ "$DRY_RUN" == "1" ]]; then
|
||||
info "[DRY-RUN] Would apply NGINX configuration"
|
||||
reconfigure_result="dry-run"
|
||||
fi
|
||||
|
||||
# Output result as JSON
|
||||
local success="true"
|
||||
[[ $failed_count -gt 0 ]] && success="false"
|
||||
|
||||
local result
|
||||
result=$(cat <<EOF
|
||||
{
|
||||
"success": ${success},
|
||||
"dry_run": $([[ "$DRY_RUN" == "1" ]] && echo "true" || echo "false"),
|
||||
"ctid": "${CTID}",
|
||||
"deleted_count": ${deleted_count},
|
||||
"failed_count": ${failed_count},
|
||||
"components": {
|
||||
"http_server": "${http_server_result}",
|
||||
"location": "${location_result}",
|
||||
"upstream": "${upstream_result}",
|
||||
"upstream_server": "${upstream_server_result}"
|
||||
},
|
||||
"reconfigure": "${reconfigure_result}"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
if [[ "$DEBUG" == "1" ]]; then
|
||||
echo "$result"
|
||||
else
|
||||
# Compact JSON
|
||||
echo "$result" | python3 -c "import json,sys; print(json.dumps(json.load(sys.stdin)))" 2>/dev/null || echo "$result"
|
||||
fi
|
||||
}
|
||||
|
||||
main
|
||||
731
install.sh
Executable file
731
install.sh
Executable file
@@ -0,0 +1,731 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
# Debug mode: 0 = nur JSON, 1 = Logs auf stderr
|
||||
DEBUG="${DEBUG:-0}"
|
||||
export DEBUG
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Log-Verzeichnis
|
||||
LOG_DIR="${SCRIPT_DIR}/logs"
|
||||
mkdir -p "${LOG_DIR}"
|
||||
|
||||
# Temporäre Log-Datei (wird später umbenannt nach Container-Hostname)
|
||||
TEMP_LOG="${LOG_DIR}/install_$$.log"
|
||||
FINAL_LOG=""
|
||||
|
||||
# Funktion zum Aufräumen bei Exit
|
||||
cleanup_log() {
|
||||
# Wenn FINAL_LOG gesetzt ist, umbenennen
|
||||
if [[ -n "${FINAL_LOG}" && -f "${TEMP_LOG}" ]]; then
|
||||
mv "${TEMP_LOG}" "${FINAL_LOG}"
|
||||
fi
|
||||
}
|
||||
trap cleanup_log EXIT
|
||||
|
||||
# Alle Ausgaben in Log-Datei umleiten
|
||||
# Bei DEBUG=1: auch auf stderr ausgeben (tee)
|
||||
# Bei DEBUG=0: nur in Datei
|
||||
if [[ "$DEBUG" == "1" ]]; then
|
||||
# Debug-Modus: Ausgabe auf stderr UND in Datei
|
||||
exec > >(tee -a "${TEMP_LOG}") 2>&1
|
||||
else
|
||||
# Normal-Modus: Nur in Datei, stdout bleibt für JSON frei
|
||||
exec 3>&1 # stdout (fd 3) für JSON reservieren
|
||||
exec > "${TEMP_LOG}" 2>&1
|
||||
fi
|
||||
|
||||
source "${SCRIPT_DIR}/libsupabase.sh"
|
||||
setup_traps
|
||||
|
||||
usage() {
|
||||
cat >&2 <<'EOF'
|
||||
Usage:
|
||||
bash install.sh [options]
|
||||
|
||||
Core options:
|
||||
--ctid <id> Force CT ID (optional). If omitted, a customer-safe CTID is generated.
|
||||
--cores <n> (default: unlimited)
|
||||
--memory <mb> (default: 4096)
|
||||
--swap <mb> (default: 512)
|
||||
--disk <gb> (default: 50)
|
||||
--bridge <vmbrX> (default: vmbr0)
|
||||
--storage <storage> (default: local-zfs)
|
||||
--ip <dhcp|CIDR> (default: dhcp)
|
||||
--vlan <id> VLAN tag for net0 (default: 90; set 0 to disable)
|
||||
--privileged Create privileged CT (default: unprivileged)
|
||||
--apt-proxy <url> Optional: APT proxy (e.g. http://192.168.45.2:3142) for Apt-Cacher NG
|
||||
|
||||
Domain / n8n options:
|
||||
--base-domain <domain> (default: userman.de) -> FQDN becomes sb-<unix>.domain
|
||||
--n8n-owner-email <email> (default: admin@<base-domain>)
|
||||
--n8n-owner-pass <pass> Optional. If omitted, generated (policy compliant).
|
||||
--workflow-file <path> Path to n8n workflow JSON file (default: RAGKI-BotPGVector.json)
|
||||
--ollama-model <model> Ollama chat model (default: ministral-3:3b)
|
||||
--embedding-model <model> Ollama embedding model (default: nomic-embed-text:latest)
|
||||
--debug Enable debug mode (show logs on stderr)
|
||||
--help Show help
|
||||
|
||||
PostgREST / Supabase options:
|
||||
--postgrest-port <port> PostgREST port (default: 3000)
|
||||
|
||||
Notes:
|
||||
- This script creates a Debian 12 LXC and provisions Docker + customer stack (Postgres/pgvector + n8n + PostgREST).
|
||||
- PostgREST provides a REST API for PostgreSQL, compatible with Supabase Vector Store node in n8n.
|
||||
- At the end it prints a JSON with credentials and URLs.
|
||||
EOF
|
||||
}
|
||||
|
||||
# Defaults
|
||||
#APT_PROXY="http://192.168.45.2:3142"
|
||||
DOCKER_REGISTRY_MIRROR="http://192.168.45.2:5000"
|
||||
APT_PROXY=""
|
||||
#DOCKER_REGISTRY_MIRROR=""
|
||||
CTID=""
|
||||
CORES="4"
|
||||
MEMORY="4096"
|
||||
SWAP="512"
|
||||
DISK="50"
|
||||
BRIDGE="vmbr0"
|
||||
STORAGE="local-zfs"
|
||||
IPCFG="dhcp"
|
||||
VLAN="90"
|
||||
UNPRIV="1"
|
||||
|
||||
BASE_DOMAIN="userman.de"
|
||||
N8N_OWNER_EMAIL=""
|
||||
N8N_OWNER_PASS=""
|
||||
POSTGREST_PORT="3000"
|
||||
|
||||
# Workflow file (default: RAGKI-BotPGVector.json in script directory)
|
||||
WORKFLOW_FILE="${SCRIPT_DIR}/RAGKI-BotPGVector.json"
|
||||
|
||||
# Ollama API settings (hardcoded for local setup)
|
||||
OLLAMA_HOST="192.168.45.3"
|
||||
OLLAMA_PORT="11434"
|
||||
OLLAMA_URL="http://${OLLAMA_HOST}:${OLLAMA_PORT}"
|
||||
|
||||
# Ollama models (can be overridden via CLI)
|
||||
OLLAMA_MODEL="ministral-3:3b"
|
||||
EMBEDDING_MODEL="nomic-embed-text:latest"
|
||||
|
||||
# ---------------------------
|
||||
# Arg parsing
|
||||
# ---------------------------
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--ctid) CTID="${2:-}"; shift 2 ;;
|
||||
--apt-proxy) APT_PROXY="${2:-}"; shift 2 ;;
|
||||
--cores) CORES="${2:-}"; shift 2 ;;
|
||||
--memory) MEMORY="${2:-}"; shift 2 ;;
|
||||
--swap) SWAP="${2:-}"; shift 2 ;;
|
||||
--disk) DISK="${2:-}"; shift 2 ;;
|
||||
--bridge) BRIDGE="${2:-}"; shift 2 ;;
|
||||
--storage) STORAGE="${2:-}"; shift 2 ;;
|
||||
--ip) IPCFG="${2:-}"; shift 2 ;;
|
||||
--vlan) VLAN="${2:-}"; shift 2 ;;
|
||||
--privileged) UNPRIV="0"; shift 1 ;;
|
||||
--base-domain) BASE_DOMAIN="${2:-}"; shift 2 ;;
|
||||
--n8n-owner-email) N8N_OWNER_EMAIL="${2:-}"; shift 2 ;;
|
||||
--n8n-owner-pass) N8N_OWNER_PASS="${2:-}"; shift 2 ;;
|
||||
--workflow-file) WORKFLOW_FILE="${2:-}"; shift 2 ;;
|
||||
--ollama-model) OLLAMA_MODEL="${2:-}"; shift 2 ;;
|
||||
--embedding-model) EMBEDDING_MODEL="${2:-}"; shift 2 ;;
|
||||
--postgrest-port) POSTGREST_PORT="${2:-}"; shift 2 ;;
|
||||
--debug) DEBUG="1"; export DEBUG; shift 1 ;;
|
||||
--help|-h) usage; exit 0 ;;
|
||||
*) die "Unknown option: $1 (use --help)" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# ---------------------------
|
||||
# Validation
|
||||
# ---------------------------
|
||||
[[ "$CORES" =~ ^[0-9]+$ ]] || die "--cores must be integer"
|
||||
[[ "$MEMORY" =~ ^[0-9]+$ ]] || die "--memory must be integer"
|
||||
[[ "$SWAP" =~ ^[0-9]+$ ]] || die "--swap must be integer"
|
||||
[[ "$DISK" =~ ^[0-9]+$ ]] || die "--disk must be integer"
|
||||
[[ "$UNPRIV" == "0" || "$UNPRIV" == "1" ]] || die "internal: UNPRIV invalid"
|
||||
[[ "$VLAN" =~ ^[0-9]+$ ]] || die "--vlan must be integer (0 disables tagging)"
|
||||
[[ -n "$BASE_DOMAIN" ]] || die "--base-domain must not be empty"
|
||||
|
||||
if [[ "$IPCFG" != "dhcp" ]]; then
|
||||
[[ "$IPCFG" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{1,2}$ ]] || die "--ip must be dhcp or CIDR (e.g. 192.168.45.171/24)"
|
||||
fi
|
||||
|
||||
if [[ -n "${APT_PROXY}" ]]; then
|
||||
[[ "${APT_PROXY}" =~ ^http://[^/]+:[0-9]+$ ]] || die "--apt-proxy must look like http://IP:PORT (example: http://192.168.45.2:3142)"
|
||||
fi
|
||||
|
||||
# Validate workflow file exists
|
||||
if [[ ! -f "${WORKFLOW_FILE}" ]]; then
|
||||
die "Workflow file not found: ${WORKFLOW_FILE}"
|
||||
fi
|
||||
|
||||
info "Argument-Parsing OK"
|
||||
info "Workflow file: ${WORKFLOW_FILE}"
|
||||
info "Ollama model: ${OLLAMA_MODEL}"
|
||||
info "Embedding model: ${EMBEDDING_MODEL}"
|
||||
|
||||
if [[ -n "${APT_PROXY}" ]]; then
|
||||
info "APT proxy enabled: ${APT_PROXY}"
|
||||
else
|
||||
info "APT proxy disabled"
|
||||
fi
|
||||
|
||||
|
||||
# ---------------------------
|
||||
# Preflight Proxmox
|
||||
# ---------------------------
|
||||
need_cmd pct pvesm pveam pvesh grep date awk sed cut tr head
|
||||
|
||||
pve_storage_exists "$STORAGE" || die "Storage not found: $STORAGE"
|
||||
pve_bridge_exists "$BRIDGE" || die "Bridge not found: $BRIDGE"
|
||||
|
||||
TEMPLATE="$(pve_template_ensure_debian12 "$STORAGE")"
|
||||
info "Template OK: ${TEMPLATE}"
|
||||
|
||||
# Hostname / FQDN based on unix time
|
||||
UNIXTS="$(date +%s)"
|
||||
CT_HOSTNAME="sb-${UNIXTS}"
|
||||
FQDN="${CT_HOSTNAME}.${BASE_DOMAIN}"
|
||||
|
||||
# Log-Datei nach Container-Hostname benennen
|
||||
FINAL_LOG="${LOG_DIR}/${CT_HOSTNAME}.log"
|
||||
|
||||
# CTID selection
|
||||
if [[ -n "$CTID" ]]; then
|
||||
[[ "$CTID" =~ ^[0-9]+$ ]] || die "--ctid must be integer"
|
||||
if pve_vmid_exists_cluster "$CTID"; then
|
||||
die "Forced CTID=${CTID} already exists in cluster"
|
||||
fi
|
||||
else
|
||||
# Your agreed approach: unix time - 1000000000 (safe until 2038)
|
||||
CTID="$(pve_ctid_from_unixtime "$UNIXTS")"
|
||||
if pve_vmid_exists_cluster "$CTID"; then
|
||||
die "Generated CTID=${CTID} already exists in cluster (unexpected). Try again in 1s."
|
||||
fi
|
||||
fi
|
||||
|
||||
# n8n owner defaults
|
||||
if [[ -z "$N8N_OWNER_EMAIL" ]]; then
|
||||
N8N_OWNER_EMAIL="admin@${BASE_DOMAIN}"
|
||||
fi
|
||||
if [[ -z "$N8N_OWNER_PASS" ]]; then
|
||||
N8N_OWNER_PASS="$(gen_password_policy)"
|
||||
else
|
||||
# enforce policy early to avoid the UI error you saw
|
||||
password_policy_check "$N8N_OWNER_PASS" || die "--n8n-owner-pass does not meet policy: 8+ chars, 1 number, 1 uppercase"
|
||||
fi
|
||||
|
||||
info "CTID selected: ${CTID}"
|
||||
info "SCRIPT_DIR=${SCRIPT_DIR}"
|
||||
info "CT_HOSTNAME=${CT_HOSTNAME}"
|
||||
info "FQDN=${FQDN}"
|
||||
info "cores=${CORES} memory=${MEMORY}MB swap=${SWAP}MB disk=${DISK}GB"
|
||||
info "bridge=${BRIDGE} storage=${STORAGE} ip=${IPCFG} vlan=${VLAN} unprivileged=${UNPRIV}"
|
||||
|
||||
# ---------------------------
|
||||
# Step 5: Create CT
|
||||
# ---------------------------
|
||||
NET0="$(pve_build_net0 "$BRIDGE" "$IPCFG" "$VLAN")"
|
||||
ROOTFS="${STORAGE}:${DISK}"
|
||||
FEATURES="nesting=1,keyctl=1,fuse=1"
|
||||
|
||||
info "Step 5: Create CT"
|
||||
info "Creating CT ${CTID} (${CT_HOSTNAME}) from ${TEMPLATE}"
|
||||
pct create "${CTID}" "${TEMPLATE}" \
|
||||
--hostname "${CT_HOSTNAME}" \
|
||||
--cores "${CORES}" \
|
||||
--memory "${MEMORY}" \
|
||||
--swap "${SWAP}" \
|
||||
--net0 "${NET0}" \
|
||||
--rootfs "${ROOTFS}" \
|
||||
--unprivileged "${UNPRIV}" \
|
||||
--features "${FEATURES}" \
|
||||
--start 0 \
|
||||
--onboot yes
|
||||
|
||||
info "CT created (not started). Next step: start CT + wait for IP"
|
||||
info "Starting CT ${CTID}"
|
||||
pct start "${CTID}"
|
||||
|
||||
CT_IP="$(pct_wait_for_ip "${CTID}" || true)"
|
||||
[[ -n "${CT_IP}" ]] || die "Could not determine CT IP after start"
|
||||
|
||||
info "Step 5 OK: LXC erstellt + IP ermittelt"
|
||||
info "CT_HOSTNAME=${CT_HOSTNAME}"
|
||||
info "CT_IP=${CT_IP}"
|
||||
|
||||
# ---------------------------
|
||||
# Step 6: Provision inside CT (Docker + Locales + Base)
|
||||
# ---------------------------
|
||||
info "Step 6: Provisioning im CT (Docker + Locales + Base)"
|
||||
|
||||
# Optional: APT proxy (Apt-Cacher NG)
|
||||
if [[ -n "${APT_PROXY}" ]]; then
|
||||
pct_exec "${CTID}" "cat > /etc/apt/apt.conf.d/00aptproxy <<'EOF'
|
||||
Acquire::http::Proxy \"${APT_PROXY}\";
|
||||
#Acquire::https::Proxy \"DIRECT\";
|
||||
Acquire::https::Proxy \"${APT_PROXY}\";
|
||||
EOF"
|
||||
pct_exec "$CTID" "apt-config dump | grep -i proxy || true"
|
||||
fi
|
||||
|
||||
# Minimal base packages
|
||||
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y"
|
||||
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y ca-certificates curl gnupg lsb-release"
|
||||
|
||||
# Locales (avoid perl warnings + consistent system)
|
||||
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y"
|
||||
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y locales ca-certificates curl gnupg lsb-release"
|
||||
pct_exec "${CTID}" "sed -i 's/^# *de_DE.UTF-8 UTF-8/de_DE.UTF-8 UTF-8/; s/^# *en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen || true"
|
||||
pct_exec "${CTID}" "locale-gen >/dev/null || true"
|
||||
pct_exec "${CTID}" "update-locale LANG=de_DE.UTF-8 LC_ALL=de_DE.UTF-8 || true"
|
||||
|
||||
# Docker official repo (Debian 12 / bookworm)
|
||||
pct_exec "${CTID}" "install -m 0755 -d /etc/apt/keyrings"
|
||||
pct_exec "${CTID}" "curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg"
|
||||
pct_exec "${CTID}" "chmod a+r /etc/apt/keyrings/docker.gpg"
|
||||
pct_exec "${CTID}" "echo \"deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \$(. /etc/os-release && echo \$VERSION_CODENAME) stable\" > /etc/apt/sources.list.d/docker.list"
|
||||
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get update -y"
|
||||
pct_exec "${CTID}" "export DEBIAN_FRONTEND=noninteractive; apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin"
|
||||
|
||||
# Create stack directories
|
||||
pct_exec "${CTID}" "mkdir -p /opt/customer-stack/volumes/postgres/data /opt/customer-stack/volumes/n8n-data /opt/customer-stack/sql"
|
||||
# IMPORTANT: n8n runs as node (uid 1000) => fix permissions
|
||||
pct_exec "${CTID}" "chown -R 1000:1000 /opt/customer-stack/volumes/n8n-data"
|
||||
|
||||
|
||||
|
||||
info "Step 6 OK: Docker + Compose Plugin installiert, Locales gesetzt, Basis-Verzeichnisse erstellt"
|
||||
info "Next: Schritt 7 (finales docker-compose + Secrets + n8n/supabase up + Healthchecks)"
|
||||
|
||||
# ---------------------------
|
||||
# Step 7: Stack finalisieren + Secrets + Up + Checks
|
||||
# ---------------------------
|
||||
info "Step 7: Stack finalisieren + Secrets + Up + Checks"
|
||||
|
||||
# Secrets
|
||||
PG_DB="customer"
|
||||
PG_USER="customer"
|
||||
PG_PASSWORD="$(gen_password_policy)"
|
||||
N8N_ENCRYPTION_KEY="$(gen_hex_64)"
|
||||
|
||||
# External URL is HTTPS via OPNsense reverse proxy (but container internally is http)
|
||||
N8N_PORT="5678"
|
||||
N8N_PROTOCOL="http"
|
||||
N8N_HOST="${CT_IP}"
|
||||
N8N_EDITOR_BASE_URL="https://${FQDN}/"
|
||||
WEBHOOK_URL="https://${FQDN}/"
|
||||
|
||||
# If you are behind HTTPS reverse proxy, secure cookies can be true.
|
||||
# But until proxy is in place, false avoids login trouble.
|
||||
N8N_SECURE_COOKIE="false"
|
||||
|
||||
# Generate JWT secret for PostgREST (32 bytes = 256 bit)
|
||||
JWT_SECRET="$(openssl rand -base64 32 | tr -d '\n')"
|
||||
|
||||
# For proper JWT, we need header.payload.signature format
|
||||
# Let's create proper JWTs
|
||||
JWT_HEADER="$(echo -n '{"alg":"HS256","typ":"JWT"}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')"
|
||||
ANON_PAYLOAD="$(echo -n '{"role":"anon","iss":"supabase","iat":1700000000,"exp":2000000000}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')"
|
||||
SERVICE_PAYLOAD="$(echo -n '{"role":"service_role","iss":"supabase","iat":1700000000,"exp":2000000000}' | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')"
|
||||
|
||||
ANON_SIGNATURE="$(echo -n "${JWT_HEADER}.${ANON_PAYLOAD}" | openssl dgst -sha256 -hmac "${JWT_SECRET}" -binary | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')"
|
||||
SERVICE_SIGNATURE="$(echo -n "${JWT_HEADER}.${SERVICE_PAYLOAD}" | openssl dgst -sha256 -hmac "${JWT_SECRET}" -binary | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')"
|
||||
|
||||
ANON_KEY="${JWT_HEADER}.${ANON_PAYLOAD}.${ANON_SIGNATURE}"
|
||||
SERVICE_ROLE_KEY="${JWT_HEADER}.${SERVICE_PAYLOAD}.${SERVICE_SIGNATURE}"
|
||||
|
||||
info "Generated JWT Secret and API Keys for PostgREST"
|
||||
|
||||
# Write .env into CT
|
||||
pct_push_text "${CTID}" "/opt/customer-stack/.env" "$(cat <<EOF
|
||||
PG_DB=${PG_DB}
|
||||
PG_USER=${PG_USER}
|
||||
PG_PASSWORD=${PG_PASSWORD}
|
||||
|
||||
N8N_PORT=${N8N_PORT}
|
||||
N8N_PROTOCOL=${N8N_PROTOCOL}
|
||||
N8N_HOST=${N8N_HOST}
|
||||
N8N_EDITOR_BASE_URL=${N8N_EDITOR_BASE_URL}
|
||||
WEBHOOK_URL=${WEBHOOK_URL}
|
||||
N8N_SECURE_COOKIE=${N8N_SECURE_COOKIE}
|
||||
|
||||
N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY}
|
||||
|
||||
# Telemetrie/Background Calls aus
|
||||
N8N_DIAGNOSTICS_ENABLED=false
|
||||
N8N_VERSION_NOTIFICATIONS_ENABLED=false
|
||||
N8N_TEMPLATES_ENABLED=false
|
||||
|
||||
# PostgREST / Supabase API
|
||||
POSTGREST_PORT=${POSTGREST_PORT}
|
||||
JWT_SECRET=${JWT_SECRET}
|
||||
ANON_KEY=${ANON_KEY}
|
||||
SERVICE_ROLE_KEY=${SERVICE_ROLE_KEY}
|
||||
EOF
|
||||
)"
|
||||
|
||||
# init sql for pgvector + Supabase Vector Store schema
|
||||
pct_push_text "${CTID}" "/opt/customer-stack/sql/init_pgvector.sql" "$(cat <<'SQL'
|
||||
-- Enable extensions
|
||||
CREATE EXTENSION IF NOT EXISTS vector;
|
||||
CREATE EXTENSION IF NOT EXISTS pg_trgm;
|
||||
|
||||
-- Create schema for API
|
||||
CREATE SCHEMA IF NOT EXISTS api;
|
||||
|
||||
-- Create documents table for Vector Store (n8n PGVector Store compatible)
|
||||
CREATE TABLE IF NOT EXISTS public.documents (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
text TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(768) -- nomic-embed-text uses 768 dimensions
|
||||
);
|
||||
|
||||
-- Create index for vector similarity search
|
||||
CREATE INDEX IF NOT EXISTS documents_embedding_idx ON public.documents
|
||||
USING ivfflat (embedding vector_cosine_ops)
|
||||
WITH (lists = 100);
|
||||
|
||||
-- Create the match_documents function for similarity search (Supabase/LangChain compatible)
|
||||
CREATE OR REPLACE FUNCTION public.match_documents(
|
||||
query_embedding VECTOR(768),
|
||||
match_count INT DEFAULT 5,
|
||||
filter JSONB DEFAULT '{}'
|
||||
)
|
||||
RETURNS TABLE (
|
||||
id BIGINT,
|
||||
content TEXT,
|
||||
metadata JSONB,
|
||||
similarity FLOAT
|
||||
)
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
d.id,
|
||||
d.content,
|
||||
d.metadata,
|
||||
1 - (d.embedding <=> query_embedding) AS similarity
|
||||
FROM public.documents d
|
||||
WHERE (filter = '{}' OR d.metadata @> filter)
|
||||
ORDER BY d.embedding <=> query_embedding
|
||||
LIMIT match_count;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Grant permissions for PostgREST roles
|
||||
-- Create roles if they don't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'anon') THEN
|
||||
CREATE ROLE anon NOLOGIN;
|
||||
END IF;
|
||||
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'service_role') THEN
|
||||
CREATE ROLE service_role NOLOGIN;
|
||||
END IF;
|
||||
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'authenticator') THEN
|
||||
CREATE ROLE authenticator NOINHERIT LOGIN PASSWORD 'authenticator_password';
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Grant permissions
|
||||
GRANT USAGE ON SCHEMA public TO anon, service_role;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA public TO anon, service_role;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO anon, service_role;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO anon, service_role;
|
||||
|
||||
-- Allow authenticator to switch to these roles
|
||||
GRANT anon TO authenticator;
|
||||
GRANT service_role TO authenticator;
|
||||
|
||||
-- Set default privileges for future tables
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO anon, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO anon, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT EXECUTE ON FUNCTIONS TO anon, service_role;
|
||||
SQL
|
||||
)"
|
||||
|
||||
# docker-compose.yml
|
||||
pct_push_text "${CTID}" "/opt/customer-stack/docker-compose.yml" "$(cat <<'YML'
|
||||
services:
|
||||
postgres:
|
||||
image: pgvector/pgvector:pg16
|
||||
container_name: customer-postgres
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: ${PG_DB}
|
||||
POSTGRES_USER: ${PG_USER}
|
||||
POSTGRES_PASSWORD: ${PG_PASSWORD}
|
||||
volumes:
|
||||
- ./volumes/postgres/data:/var/lib/postgresql/data
|
||||
- ./sql:/docker-entrypoint-initdb.d:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${PG_USER} -d ${PG_DB} || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 20
|
||||
networks:
|
||||
- customer-net
|
||||
|
||||
postgrest:
|
||||
image: postgrest/postgrest:latest
|
||||
container_name: customer-postgrest
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "${POSTGREST_PORT}:3000"
|
||||
environment:
|
||||
PGRST_DB_URI: postgres://${PG_USER}:${PG_PASSWORD}@postgres:5432/${PG_DB}
|
||||
PGRST_DB_SCHEMA: public
|
||||
PGRST_DB_ANON_ROLE: anon
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_DB_USE_LEGACY_GUCS: "false"
|
||||
networks:
|
||||
- customer-net
|
||||
|
||||
n8n:
|
||||
image: n8nio/n8n:latest
|
||||
container_name: n8n
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
postgrest:
|
||||
condition: service_started
|
||||
ports:
|
||||
- "${N8N_PORT}:5678"
|
||||
environment:
|
||||
# --- Web / Cookies / URL ---
|
||||
N8N_PORT: 5678
|
||||
N8N_PROTOCOL: ${N8N_PROTOCOL}
|
||||
N8N_HOST: ${N8N_HOST}
|
||||
N8N_EDITOR_BASE_URL: ${N8N_EDITOR_BASE_URL}
|
||||
WEBHOOK_URL: ${WEBHOOK_URL}
|
||||
N8N_SECURE_COOKIE: ${N8N_SECURE_COOKIE}
|
||||
|
||||
# --- Disable telemetry / background calls ---
|
||||
N8N_DIAGNOSTICS_ENABLED: ${N8N_DIAGNOSTICS_ENABLED}
|
||||
N8N_VERSION_NOTIFICATIONS_ENABLED: ${N8N_VERSION_NOTIFICATIONS_ENABLED}
|
||||
N8N_TEMPLATES_ENABLED: ${N8N_TEMPLATES_ENABLED}
|
||||
|
||||
# --- DB (Postgres) ---
|
||||
DB_TYPE: postgresdb
|
||||
DB_POSTGRESDB_HOST: postgres
|
||||
DB_POSTGRESDB_PORT: 5432
|
||||
DB_POSTGRESDB_DATABASE: ${PG_DB}
|
||||
DB_POSTGRESDB_USER: ${PG_USER}
|
||||
DB_POSTGRESDB_PASSWORD: ${PG_PASSWORD}
|
||||
|
||||
# --- Basics ---
|
||||
GENERIC_TIMEZONE: Europe/Berlin
|
||||
TZ: Europe/Berlin
|
||||
|
||||
N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY}
|
||||
|
||||
volumes:
|
||||
- ./volumes/n8n-data:/home/node/.n8n
|
||||
networks:
|
||||
- customer-net
|
||||
|
||||
networks:
|
||||
customer-net:
|
||||
driver: bridge
|
||||
YML
|
||||
)"
|
||||
|
||||
# Make sure permissions are correct (again, after file writes)
|
||||
pct_exec "${CTID}" "chown -R 1000:1000 /opt/customer-stack/volumes/n8n-data"
|
||||
|
||||
# Proxy
|
||||
if [[ -n "${APT_PROXY}" ]]; then
|
||||
pct_exec "$CTID" "mkdir -p /etc/docker"
|
||||
|
||||
pct_exec "$CTID" "cat > /etc/docker/daemon.json <<EOF
|
||||
{
|
||||
\"registry-mirrors\": [\"${DOCKER_REGISTRY_MIRROR}\"]
|
||||
}
|
||||
EOF"
|
||||
|
||||
pct_exec "$CTID" "systemctl restart docker"
|
||||
pct_exec "$CTID" "systemctl is-active docker"
|
||||
pct_exec "$CTID" "docker info | grep -A2 -i 'Registry Mirrors'"
|
||||
fi
|
||||
|
||||
# Pull + up
|
||||
pct_exec "${CTID}" "cd /opt/customer-stack && docker compose pull"
|
||||
pct_exec "${CTID}" "cd /opt/customer-stack && docker compose up -d"
|
||||
pct_exec "${CTID}" "cd /opt/customer-stack && docker compose ps"
|
||||
|
||||
# --- Owner account creation (robust way) ---
|
||||
# n8n shows the setup screen if no user exists.
|
||||
# We create the owner via CLI inside the container.
|
||||
pct_exec "${CTID}" "cd /opt/customer-stack && docker exec -u node n8n n8n --help >/dev/null 2>&1 || true"
|
||||
|
||||
# Try modern command first (works in current n8n builds); if it fails, we leave setup screen (but you'll see it in logs).
|
||||
pct_exec "${CTID}" "cd /opt/customer-stack && (docker exec -u node n8n n8n user-management:reset --email '${N8N_OWNER_EMAIL}' --password '${N8N_OWNER_PASS}' --firstName 'Admin' --lastName 'Owner' >/dev/null 2>&1 || true)"
|
||||
|
||||
info "Step 7 OK: Stack deployed"
|
||||
|
||||
# ---------------------------
|
||||
# Step 8: Setup Owner Account via REST API (fallback)
|
||||
# ---------------------------
|
||||
info "Step 8: Setting up owner account via REST API..."
|
||||
|
||||
# Wait for n8n to be ready
|
||||
sleep 5
|
||||
|
||||
# Try REST API setup (works if user-management:reset didn't work)
|
||||
pct_exec "${CTID}" "curl -sS -X POST 'http://127.0.0.1:5678/rest/owner/setup' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{\"email\":\"${N8N_OWNER_EMAIL}\",\"firstName\":\"Admin\",\"lastName\":\"Owner\",\"password\":\"${N8N_OWNER_PASS}\"}' || true"
|
||||
|
||||
info "Step 8 OK: Owner account setup attempted"
|
||||
|
||||
# ---------------------------
|
||||
# Step 9: Final URLs and Output
|
||||
# ---------------------------
|
||||
info "Step 9: Generating final output..."
|
||||
|
||||
# Final URLs
|
||||
N8N_INTERNAL_URL="http://${CT_IP}:5678/"
|
||||
N8N_EXTERNAL_URL="https://${FQDN}"
|
||||
POSTGREST_URL="http://${CT_IP}:${POSTGREST_PORT}"
|
||||
# Supabase URL format for n8n credential (PostgREST acts as Supabase API)
|
||||
# IMPORTANT: n8n runs inside Docker, so it needs the Docker-internal URL!
|
||||
SUPABASE_URL="http://postgrest:3000"
|
||||
SUPABASE_URL_EXTERNAL="http://${CT_IP}:${POSTGREST_PORT}"
|
||||
|
||||
# Chat URL (webhook URL for the chat trigger - will be available after workflow activation)
|
||||
CHAT_WEBHOOK_URL="https://${FQDN}/webhook/rag-chat-webhook/chat"
|
||||
CHAT_INTERNAL_URL="http://${CT_IP}:5678/webhook/rag-chat-webhook/chat"
|
||||
|
||||
# Upload Form URL (for document upload)
|
||||
UPLOAD_FORM_URL="https://${FQDN}/form/rag-upload-form"
|
||||
UPLOAD_FORM_INTERNAL_URL="http://${CT_IP}:5678/form/rag-upload-form"
|
||||
|
||||
info "n8n intern: ${N8N_INTERNAL_URL}"
|
||||
info "n8n extern (geplant via OPNsense): ${N8N_EXTERNAL_URL}"
|
||||
info "PostgREST API: ${POSTGREST_URL}"
|
||||
info "Supabase Service Role Key: ${SERVICE_ROLE_KEY}"
|
||||
info "Ollama URL: ${OLLAMA_URL}"
|
||||
info "Chat Webhook URL (extern): ${CHAT_WEBHOOK_URL}"
|
||||
info "Chat Webhook URL (intern): ${CHAT_INTERNAL_URL}"
|
||||
|
||||
# ---------------------------
|
||||
# Step 10: Setup n8n Credentials + Import Workflow + Activate
|
||||
# ---------------------------
|
||||
info "Step 10: Setting up n8n credentials and importing RAG workflow..."
|
||||
|
||||
# Use the new robust n8n setup function from libsupabase.sh
|
||||
# Parameters: ctid, email, password, pg_host, pg_port, pg_db, pg_user, pg_pass, ollama_url, ollama_model, embedding_model, workflow_file
|
||||
if n8n_setup_rag_workflow "${CTID}" "${N8N_OWNER_EMAIL}" "${N8N_OWNER_PASS}" \
|
||||
"postgres" "5432" "${PG_DB}" "${PG_USER}" "${PG_PASSWORD}" \
|
||||
"${OLLAMA_URL}" "${OLLAMA_MODEL}" "${EMBEDDING_MODEL}" "${WORKFLOW_FILE}"; then
|
||||
info "Step 10 OK: n8n RAG workflow setup completed successfully"
|
||||
else
|
||||
warn "Step 10: n8n workflow setup failed - manual setup may be required"
|
||||
info "Step 10: You can manually import the workflow via n8n UI"
|
||||
fi
|
||||
|
||||
# ---------------------------
|
||||
# Step 10a: Setup Workflow Auto-Reload on LXC Restart
|
||||
# ---------------------------
|
||||
info "Step 10a: Setting up workflow auto-reload on LXC restart..."
|
||||
|
||||
# Copy workflow template to container for auto-reload
|
||||
info "Copying workflow template to container..."
|
||||
if [[ -f "${WORKFLOW_FILE}" ]]; then
|
||||
# Read workflow file content
|
||||
WORKFLOW_CONTENT=$(cat "${WORKFLOW_FILE}")
|
||||
pct_push_text "${CTID}" "/opt/customer-stack/workflow-template.json" "${WORKFLOW_CONTENT}"
|
||||
info "Workflow template saved to /opt/customer-stack/workflow-template.json"
|
||||
else
|
||||
warn "Workflow file not found: ${WORKFLOW_FILE}"
|
||||
fi
|
||||
|
||||
# Copy reload script to container
|
||||
info "Installing workflow reload script..."
|
||||
RELOAD_SCRIPT_CONTENT=$(cat "${SCRIPT_DIR}/templates/reload-workflow.sh")
|
||||
pct_push_text "${CTID}" "/opt/customer-stack/reload-workflow.sh" "${RELOAD_SCRIPT_CONTENT}"
|
||||
pct_exec "${CTID}" "chmod +x /opt/customer-stack/reload-workflow.sh"
|
||||
info "Reload script installed"
|
||||
|
||||
# Copy systemd service file to container
|
||||
info "Installing systemd service for workflow auto-reload..."
|
||||
SYSTEMD_SERVICE_CONTENT=$(cat "${SCRIPT_DIR}/templates/n8n-workflow-reload.service")
|
||||
pct_push_text "${CTID}" "/etc/systemd/system/n8n-workflow-reload.service" "${SYSTEMD_SERVICE_CONTENT}"
|
||||
|
||||
# Enable and start systemd service
|
||||
pct_exec "${CTID}" "systemctl daemon-reload"
|
||||
pct_exec "${CTID}" "systemctl enable n8n-workflow-reload.service"
|
||||
info "Systemd service enabled"
|
||||
|
||||
info "Step 10a OK: Workflow auto-reload configured"
|
||||
info "The workflow will be automatically reloaded on every LXC restart"
|
||||
|
||||
# ---------------------------
|
||||
# Step 11: Setup NGINX Reverse Proxy in OPNsense
|
||||
# ---------------------------
|
||||
info "Step 11: Setting up NGINX Reverse Proxy in OPNsense..."
|
||||
|
||||
# Check if setup_nginx_proxy.sh exists
|
||||
if [[ -f "${SCRIPT_DIR}/setup_nginx_proxy.sh" ]]; then
|
||||
# Run the proxy setup script
|
||||
PROXY_RESULT=$(DEBUG="${DEBUG}" bash "${SCRIPT_DIR}/setup_nginx_proxy.sh" \
|
||||
--ctid "${CTID}" \
|
||||
--hostname "${CT_HOSTNAME}" \
|
||||
--fqdn "${FQDN}" \
|
||||
--backend-ip "${CT_IP}" \
|
||||
--backend-port "5678" \
|
||||
2>&1 || echo '{"success": false, "error": "Proxy setup failed"}')
|
||||
|
||||
# Check if proxy setup was successful
|
||||
if echo "$PROXY_RESULT" | grep -q '"success": true'; then
|
||||
info "NGINX Reverse Proxy setup successful"
|
||||
else
|
||||
warn "NGINX Reverse Proxy setup may have failed: ${PROXY_RESULT}"
|
||||
fi
|
||||
else
|
||||
warn "setup_nginx_proxy.sh not found, skipping proxy setup"
|
||||
fi
|
||||
|
||||
info "Step 11 OK: Proxy setup completed"
|
||||
|
||||
# ---------------------------
|
||||
# Final JSON Output
|
||||
# ---------------------------
|
||||
# Machine-readable JSON output (for your downstream automation)
|
||||
# Kompaktes JSON in einer Zeile für einfaches Parsing
|
||||
# Bei DEBUG=0: JSON auf fd 3 (ursprüngliches stdout) ausgeben
|
||||
# Bei DEBUG=1: JSON normal auf stdout (geht auch ins Log)
|
||||
JSON_OUTPUT="{\"ctid\":${CTID},\"hostname\":\"${CT_HOSTNAME}\",\"fqdn\":\"${FQDN}\",\"ip\":\"${CT_IP}\",\"vlan\":${VLAN},\"urls\":{\"n8n_internal\":\"${N8N_INTERNAL_URL}\",\"n8n_external\":\"${N8N_EXTERNAL_URL}\",\"postgrest\":\"${POSTGREST_URL}\",\"chat_webhook\":\"${CHAT_WEBHOOK_URL}\",\"chat_internal\":\"${CHAT_INTERNAL_URL}\",\"upload_form\":\"${UPLOAD_FORM_URL}\",\"upload_form_internal\":\"${UPLOAD_FORM_INTERNAL_URL}\"},\"postgres\":{\"host\":\"postgres\",\"port\":5432,\"db\":\"${PG_DB}\",\"user\":\"${PG_USER}\",\"password\":\"${PG_PASSWORD}\"},\"supabase\":{\"url\":\"${SUPABASE_URL}\",\"url_external\":\"${SUPABASE_URL_EXTERNAL}\",\"anon_key\":\"${ANON_KEY}\",\"service_role_key\":\"${SERVICE_ROLE_KEY}\",\"jwt_secret\":\"${JWT_SECRET}\"},\"ollama\":{\"url\":\"${OLLAMA_URL}\",\"model\":\"${OLLAMA_MODEL}\",\"embedding_model\":\"${EMBEDDING_MODEL}\"},\"n8n\":{\"encryption_key\":\"${N8N_ENCRYPTION_KEY}\",\"owner_email\":\"${N8N_OWNER_EMAIL}\",\"owner_password\":\"${N8N_OWNER_PASS}\",\"secure_cookie\":${N8N_SECURE_COOKIE}},\"log_file\":\"${FINAL_LOG}\"}"
|
||||
|
||||
if [[ "$DEBUG" == "1" ]]; then
|
||||
# Debug-Modus: JSON normal ausgeben (formatiert für Lesbarkeit)
|
||||
echo "$JSON_OUTPUT" | python3 -m json.tool 2>/dev/null || echo "$JSON_OUTPUT"
|
||||
else
|
||||
# Normal-Modus: JSON auf ursprüngliches stdout (fd 3) - kompakt
|
||||
echo "$JSON_OUTPUT" >&3
|
||||
fi
|
||||
|
||||
# ---------------------------
|
||||
# Save credentials to file
|
||||
# ---------------------------
|
||||
CREDENTIALS_DIR="${SCRIPT_DIR}/credentials"
|
||||
mkdir -p "${CREDENTIALS_DIR}"
|
||||
CREDENTIALS_FILE="${CREDENTIALS_DIR}/${CT_HOSTNAME}.json"
|
||||
|
||||
# Save formatted credentials
|
||||
echo "$JSON_OUTPUT" | python3 -m json.tool > "${CREDENTIALS_FILE}" 2>/dev/null || echo "$JSON_OUTPUT" > "${CREDENTIALS_FILE}"
|
||||
|
||||
info "Credentials saved to: ${CREDENTIALS_FILE}"
|
||||
info "To update credentials later, use: bash update_credentials.sh --ctid ${CTID} --credentials-file ${CREDENTIALS_FILE}"
|
||||
979
libsupabase.sh
Executable file
979
libsupabase.sh
Executable file
@@ -0,0 +1,979 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
# Debug mode: 0 = nur JSON ausgeben, 1 = Logs auf stderr
|
||||
DEBUG="${DEBUG:-0}"
|
||||
|
||||
log_ts() { date "+[%F %T]"; }
|
||||
|
||||
info() {
|
||||
[[ "$DEBUG" == "1" ]] && echo "$(log_ts) INFO: $*" >&2
|
||||
return 0
|
||||
}
|
||||
|
||||
warn() {
|
||||
[[ "$DEBUG" == "1" ]] && echo "$(log_ts) WARN: $*" >&2
|
||||
return 0
|
||||
}
|
||||
|
||||
die() {
|
||||
if [[ "$DEBUG" == "1" ]]; then
|
||||
echo "$(log_ts) ERROR: $*" >&2
|
||||
else
|
||||
# JSON-Fehler auf fd 3 ausgeben (falls verfügbar), sonst stdout
|
||||
if { true >&3; } 2>/dev/null; then
|
||||
echo "{\"error\": \"$*\"}" >&3
|
||||
else
|
||||
echo "{\"error\": \"$*\"}"
|
||||
fi
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
|
||||
setup_traps() {
|
||||
trap 'rc=$?; if [[ $rc -ne 0 ]]; then
|
||||
if [[ "$DEBUG" == "1" ]]; then
|
||||
echo "$(log_ts) ERROR: Failed at line ${BASH_LINENO[0]}: ${BASH_COMMAND} (exit=$rc)" >&2
|
||||
else
|
||||
# JSON-Fehler auf fd 3 ausgeben (falls verfügbar), sonst stdout
|
||||
if { true >&3; } 2>/dev/null; then
|
||||
echo "{\"error\": \"Failed at line ${BASH_LINENO[0]}: ${BASH_COMMAND} (exit=$rc)\"}" >&3
|
||||
else
|
||||
echo "{\"error\": \"Failed at line ${BASH_LINENO[0]}: ${BASH_COMMAND} (exit=$rc)\"}"
|
||||
fi
|
||||
fi
|
||||
fi; exit $rc' ERR
|
||||
}
|
||||
|
||||
need_cmd() {
|
||||
local c
|
||||
for c in "$@"; do
|
||||
command -v "$c" >/dev/null 2>&1 || die "Missing command: $c"
|
||||
done
|
||||
}
|
||||
|
||||
# ----- Proxmox helpers -----
|
||||
|
||||
pve_storage_exists() {
|
||||
local s="$1"
|
||||
pvesm status | awk 'NR>1{print $1}' | grep -qx "$s"
|
||||
}
|
||||
|
||||
pve_bridge_exists() {
|
||||
local b="$1"
|
||||
ip link show "$b" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Return ONLY template path on stdout. Logs go to stderr.
|
||||
pve_template_ensure_debian12() {
|
||||
local storage="$1"
|
||||
local tmpl="debian-12-standard_12.12-1_amd64.tar.zst"
|
||||
local cache="/var/lib/vz/template/cache/${tmpl}"
|
||||
|
||||
# pveam templates must be on "local" (dir storage), not on zfs
|
||||
local tstore="$storage"
|
||||
if ! pveam available -section system >/dev/null 2>&1; then
|
||||
warn "pveam not working? continuing"
|
||||
fi
|
||||
|
||||
# heuristic: if storage isn't usable for templates, fallback to local
|
||||
# Most Proxmox setups use 'local' for templates.
|
||||
if ! pvesm status | awk 'NR>1{print $1,$2}' | grep -q "^${tstore} "; then
|
||||
warn "pveam storage '${tstore}' not found; falling back to 'local'"
|
||||
tstore="local"
|
||||
fi
|
||||
|
||||
# If storage exists but isn't a dir storage for templates, pveam will fail -> fallback
|
||||
if ! pveam list "${tstore}" >/dev/null 2>&1; then
|
||||
warn "pveam storage '${tstore}' not available for templates; falling back to 'local'"
|
||||
tstore="local"
|
||||
fi
|
||||
|
||||
if [[ ! -f "$cache" ]]; then
|
||||
info "Downloading CT template to ${tstore}: ${tmpl}"
|
||||
pveam download "${tstore}" "${tmpl}" >&2
|
||||
fi
|
||||
|
||||
echo "${tstore}:vztmpl/${tmpl}"
|
||||
}
|
||||
|
||||
# Build net0 string (with optional vlan tag)
|
||||
pve_build_net0() {
|
||||
local bridge="$1"
|
||||
local ipcfg="$2"
|
||||
local vlan="${3:-0}"
|
||||
|
||||
local mac
|
||||
mac="$(gen_mac)"
|
||||
|
||||
local net="name=eth0,bridge=${bridge},hwaddr=${mac}"
|
||||
if [[ "$vlan" != "0" ]]; then
|
||||
net+=",tag=${vlan}"
|
||||
fi
|
||||
|
||||
if [[ "$ipcfg" == "dhcp" ]]; then
|
||||
net+=",ip=dhcp"
|
||||
else
|
||||
net+=",ip=${ipcfg}"
|
||||
fi
|
||||
|
||||
echo "$net"
|
||||
}
|
||||
|
||||
# Wait for IP from pct; returns first IPv4
|
||||
pct_wait_for_ip() {
|
||||
local ctid="$1"
|
||||
local i ip
|
||||
for i in $(seq 1 40); do
|
||||
ip="$(pct exec "$ctid" -- bash -lc "ip -4 -o addr show scope global | awk '{print \$4}' | cut -d/ -f1 | head -n1" 2>/dev/null || true)"
|
||||
if [[ -n "$ip" ]]; then
|
||||
echo "$ip"
|
||||
return 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
pct_exec() {
|
||||
local ctid="$1"; shift
|
||||
pct exec "$ctid" -- bash -lc "$*"
|
||||
}
|
||||
|
||||
# Push a text file into CT without SCP
|
||||
pct_push_text() {
|
||||
local ctid="$1"
|
||||
local dest="$2"
|
||||
local content="$3"
|
||||
pct exec "$ctid" -- bash -lc "cat > '$dest' <<'EOF'
|
||||
${content}
|
||||
EOF"
|
||||
}
|
||||
|
||||
# Cluster VMID existence check (best effort)
|
||||
# Uses pvesh cluster resources. If API not available, returns false (and caller can choose another approach).
|
||||
pve_vmid_exists_cluster() {
|
||||
local vmid="$1"
|
||||
pvesh get /cluster/resources --output-format json 2>/dev/null \
|
||||
| python3 - <<'PY' "$vmid" || exit 0
|
||||
import json,sys
|
||||
vmid=sys.argv[1]
|
||||
try:
|
||||
data=json.load(sys.stdin)
|
||||
except Exception:
|
||||
sys.exit(0)
|
||||
for r in data:
|
||||
if str(r.get("vmid",""))==str(vmid):
|
||||
sys.exit(1)
|
||||
sys.exit(0)
|
||||
PY
|
||||
[[ $? -eq 1 ]]
|
||||
}
|
||||
|
||||
# Your agreed CTID scheme: unix time - 1,000,000,000
|
||||
pve_ctid_from_unixtime() {
|
||||
local ts="$1"
|
||||
echo $(( ts - 1000000000 ))
|
||||
}
|
||||
|
||||
# ----- Generators / policies -----
|
||||
|
||||
# Avoid "tr: Broken pipe" by not piping random through tr|head.
|
||||
gen_hex_64() {
|
||||
# 64 hex chars = 32 bytes
|
||||
openssl rand -hex 32
|
||||
}
|
||||
|
||||
gen_mac() {
|
||||
# locally administered unicast: 02:xx:xx:xx:xx:xx
|
||||
printf '02:%02x:%02x:%02x:%02x:%02x\n' \
|
||||
"$((RANDOM%256))" "$((RANDOM%256))" "$((RANDOM%256))" "$((RANDOM%256))" "$((RANDOM%256))"
|
||||
}
|
||||
|
||||
password_policy_check() {
|
||||
local p="$1"
|
||||
[[ ${#p} -ge 8 ]] || return 1
|
||||
[[ "$p" =~ [0-9] ]] || return 1
|
||||
[[ "$p" =~ [A-Z] ]] || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
gen_password_policy() {
|
||||
# generate until it matches policy (no broken pipes, deterministic enough)
|
||||
local p
|
||||
while true; do
|
||||
# 18 chars, base64-ish but remove confusing chars
|
||||
p="$(openssl rand -base64 18 | tr -d '/+=' | cut -c1-16)"
|
||||
# ensure at least one uppercase and number
|
||||
p="${p}A1"
|
||||
password_policy_check "$p" && { echo "$p"; return 0; }
|
||||
done
|
||||
}
|
||||
|
||||
emit_json() {
|
||||
# prints to stdout only; keep logs on stderr
|
||||
cat
|
||||
}
|
||||
|
||||
# ----- n8n API helpers -----
|
||||
# These functions interact with n8n REST API inside a container
|
||||
|
||||
# Login to n8n and save session cookie
|
||||
# Usage: n8n_api_login <ctid> <email> <password>
|
||||
# Returns: 0 on success, 1 on failure
|
||||
# Side effect: Creates /tmp/n8n_cookies.txt in the container
|
||||
n8n_api_login() {
|
||||
local ctid="$1"
|
||||
local email="$2"
|
||||
local password="$3"
|
||||
local api_url="http://127.0.0.1:5678"
|
||||
|
||||
info "n8n API: Logging in as ${email}..."
|
||||
|
||||
# Escape special characters in password for JSON
|
||||
local escaped_password
|
||||
escaped_password=$(echo "$password" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||
|
||||
local response
|
||||
response=$(pct exec "$ctid" -- bash -c "curl -sS -X POST '${api_url}/rest/login' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-c /tmp/n8n_cookies.txt \
|
||||
-d '{\"email\":\"${email}\",\"password\":\"${escaped_password}\"}' 2>&1" || echo "CURL_FAILED")
|
||||
|
||||
if [[ "$response" == *"CURL_FAILED"* ]] || [[ "$response" == *"error"* && "$response" != *"data"* ]]; then
|
||||
warn "n8n API login failed: ${response}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
info "n8n API: Login successful"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Create PostgreSQL credential in n8n
|
||||
# Usage: n8n_api_create_postgres_credential <ctid> <name> <host> <port> <database> <user> <password>
|
||||
# Returns: Credential ID on stdout, or empty on failure
|
||||
n8n_api_create_postgres_credential() {
|
||||
local ctid="$1"
|
||||
local name="$2"
|
||||
local host="$3"
|
||||
local port="$4"
|
||||
local database="$5"
|
||||
local user="$6"
|
||||
local password="$7"
|
||||
local api_url="http://127.0.0.1:5678"
|
||||
|
||||
info "n8n API: Creating PostgreSQL credential '${name}'..."
|
||||
|
||||
# Escape special characters in password for JSON
|
||||
local escaped_password
|
||||
escaped_password=$(echo "$password" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||
|
||||
local response
|
||||
response=$(pct exec "$ctid" -- bash -c "curl -sS -X POST '${api_url}/rest/credentials' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-b /tmp/n8n_cookies.txt \
|
||||
-d '{
|
||||
\"name\": \"${name}\",
|
||||
\"type\": \"postgres\",
|
||||
\"data\": {
|
||||
\"host\": \"${host}\",
|
||||
\"port\": ${port},
|
||||
\"database\": \"${database}\",
|
||||
\"user\": \"${user}\",
|
||||
\"password\": \"${escaped_password}\",
|
||||
\"ssl\": \"disable\"
|
||||
}
|
||||
}' 2>&1" || echo "")
|
||||
|
||||
# Extract credential ID from response
|
||||
local cred_id
|
||||
cred_id=$(echo "$response" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1 || echo "")
|
||||
|
||||
if [[ -n "$cred_id" ]]; then
|
||||
info "n8n API: PostgreSQL credential created with ID: ${cred_id}"
|
||||
echo "$cred_id"
|
||||
return 0
|
||||
else
|
||||
warn "n8n API: Failed to create PostgreSQL credential: ${response}"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create Ollama credential in n8n
|
||||
# Usage: n8n_api_create_ollama_credential <ctid> <name> <base_url>
|
||||
# Returns: Credential ID on stdout, or empty on failure
|
||||
n8n_api_create_ollama_credential() {
|
||||
local ctid="$1"
|
||||
local name="$2"
|
||||
local base_url="$3"
|
||||
local api_url="http://127.0.0.1:5678"
|
||||
|
||||
info "n8n API: Creating Ollama credential '${name}'..."
|
||||
|
||||
local response
|
||||
response=$(pct exec "$ctid" -- bash -c "curl -sS -X POST '${api_url}/rest/credentials' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-b /tmp/n8n_cookies.txt \
|
||||
-d '{
|
||||
\"name\": \"${name}\",
|
||||
\"type\": \"ollamaApi\",
|
||||
\"data\": {
|
||||
\"baseUrl\": \"${base_url}\"
|
||||
}
|
||||
}' 2>&1" || echo "")
|
||||
|
||||
# Extract credential ID from response
|
||||
local cred_id
|
||||
cred_id=$(echo "$response" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1 || echo "")
|
||||
|
||||
if [[ -n "$cred_id" ]]; then
|
||||
info "n8n API: Ollama credential created with ID: ${cred_id}"
|
||||
echo "$cred_id"
|
||||
return 0
|
||||
else
|
||||
warn "n8n API: Failed to create Ollama credential: ${response}"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Import workflow into n8n
|
||||
# Usage: n8n_api_import_workflow <ctid> <workflow_json_file_in_container>
|
||||
# Returns: Workflow ID on stdout, or empty on failure
|
||||
n8n_api_import_workflow() {
|
||||
local ctid="$1"
|
||||
local workflow_file="$2"
|
||||
local api_url="http://127.0.0.1:5678"
|
||||
|
||||
info "n8n API: Importing workflow from ${workflow_file}..."
|
||||
|
||||
local response
|
||||
response=$(pct exec "$ctid" -- bash -c "curl -sS -X POST '${api_url}/rest/workflows' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-b /tmp/n8n_cookies.txt \
|
||||
-d @${workflow_file} 2>&1" || echo "")
|
||||
|
||||
# Extract workflow ID from response
|
||||
local workflow_id
|
||||
workflow_id=$(echo "$response" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1 || echo "")
|
||||
|
||||
if [[ -n "$workflow_id" ]]; then
|
||||
info "n8n API: Workflow imported with ID: ${workflow_id}"
|
||||
echo "$workflow_id"
|
||||
return 0
|
||||
else
|
||||
warn "n8n API: Failed to import workflow: ${response}"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Activate workflow in n8n
|
||||
# Usage: n8n_api_activate_workflow <ctid> <workflow_id>
|
||||
# Returns: 0 on success, 1 on failure
|
||||
n8n_api_activate_workflow() {
|
||||
local ctid="$1"
|
||||
local workflow_id="$2"
|
||||
local api_url="http://127.0.0.1:5678"
|
||||
|
||||
info "n8n API: Activating workflow ${workflow_id}..."
|
||||
|
||||
local response
|
||||
response=$(pct exec "$ctid" -- bash -c "curl -sS -X PATCH '${api_url}/rest/workflows/${workflow_id}' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-b /tmp/n8n_cookies.txt \
|
||||
-d '{\"active\": true}' 2>&1" || echo "")
|
||||
|
||||
if [[ "$response" == *"\"active\":true"* ]] || [[ "$response" == *"\"active\": true"* ]]; then
|
||||
info "n8n API: Workflow ${workflow_id} activated successfully"
|
||||
return 0
|
||||
else
|
||||
warn "n8n API: Failed to activate workflow: ${response}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate RAG workflow JSON with credential IDs
|
||||
# Usage: n8n_generate_rag_workflow_json <postgres_cred_id> <ollama_cred_id> <ollama_model> <embedding_model>
|
||||
# Returns: Workflow JSON on stdout
|
||||
n8n_generate_rag_workflow_json() {
|
||||
local postgres_cred_id="$1"
|
||||
local postgres_cred_name="${2:-PostgreSQL (local)}"
|
||||
local ollama_cred_id="$3"
|
||||
local ollama_cred_name="${4:-Ollama (local)}"
|
||||
local ollama_model="${5:-llama3.2:3b}"
|
||||
local embedding_model="${6:-nomic-embed-text:v1.5}"
|
||||
|
||||
cat <<WORKFLOW_JSON
|
||||
{
|
||||
"name": "RAG KI-Bot (PGVector)",
|
||||
"nodes": [
|
||||
{
|
||||
"parameters": {
|
||||
"public": true,
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.chatTrigger",
|
||||
"typeVersion": 1.3,
|
||||
"position": [0, 0],
|
||||
"id": "chat-trigger-001",
|
||||
"name": "When chat message received",
|
||||
"webhookId": "rag-chat-webhook",
|
||||
"notesInFlow": true,
|
||||
"notes": "Chat URL: /webhook/rag-chat-webhook/chat"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"promptType": "define",
|
||||
"text": "={{ \$json.chatInput }}\nAntworte ausschliesslich auf Deutsch",
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.agent",
|
||||
"typeVersion": 2.2,
|
||||
"position": [208, 0],
|
||||
"id": "ai-agent-001",
|
||||
"name": "AI Agent"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"model": "${ollama_model}",
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.lmChatOllama",
|
||||
"typeVersion": 1,
|
||||
"position": [64, 208],
|
||||
"id": "ollama-chat-001",
|
||||
"name": "Ollama Chat Model",
|
||||
"credentials": {
|
||||
"ollamaApi": {
|
||||
"id": "${ollama_cred_id}",
|
||||
"name": "${ollama_cred_name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"parameters": {},
|
||||
"type": "@n8n/n8n-nodes-langchain.memoryBufferWindow",
|
||||
"typeVersion": 1.3,
|
||||
"position": [224, 208],
|
||||
"id": "memory-001",
|
||||
"name": "Simple Memory"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"mode": "retrieve-as-tool",
|
||||
"toolName": "knowledge_base",
|
||||
"toolDescription": "Verwende dieses Tool für Infos die der Benutzer fragt. Sucht in der Wissensdatenbank nach relevanten Dokumenten.",
|
||||
"tableName": "documents",
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.vectorStorePGVector",
|
||||
"typeVersion": 1,
|
||||
"position": [432, 128],
|
||||
"id": "pgvector-retrieve-001",
|
||||
"name": "PGVector Store",
|
||||
"credentials": {
|
||||
"postgres": {
|
||||
"id": "${postgres_cred_id}",
|
||||
"name": "${postgres_cred_name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"model": "${embedding_model}"
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.embeddingsOllama",
|
||||
"typeVersion": 1,
|
||||
"position": [384, 320],
|
||||
"id": "embeddings-retrieve-001",
|
||||
"name": "Embeddings Ollama",
|
||||
"credentials": {
|
||||
"ollamaApi": {
|
||||
"id": "${ollama_cred_id}",
|
||||
"name": "${ollama_cred_name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"formTitle": "Dokument hochladen",
|
||||
"formDescription": "Laden Sie ein PDF-Dokument hoch, um es in die Wissensdatenbank aufzunehmen.",
|
||||
"formFields": {
|
||||
"values": [
|
||||
{
|
||||
"fieldLabel": "Dokument",
|
||||
"fieldType": "file",
|
||||
"acceptFileTypes": ".pdf"
|
||||
}
|
||||
]
|
||||
},
|
||||
"options": {}
|
||||
},
|
||||
"type": "n8n-nodes-base.formTrigger",
|
||||
"typeVersion": 2.3,
|
||||
"position": [768, 0],
|
||||
"id": "form-trigger-001",
|
||||
"name": "On form submission",
|
||||
"webhookId": "rag-upload-form"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"operation": "pdf",
|
||||
"binaryPropertyName": "Dokument",
|
||||
"options": {}
|
||||
},
|
||||
"type": "n8n-nodes-base.extractFromFile",
|
||||
"typeVersion": 1,
|
||||
"position": [976, 0],
|
||||
"id": "extract-file-001",
|
||||
"name": "Extract from File"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"mode": "insert",
|
||||
"tableName": "documents",
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.vectorStorePGVector",
|
||||
"typeVersion": 1,
|
||||
"position": [1184, 0],
|
||||
"id": "pgvector-insert-001",
|
||||
"name": "PGVector Store Insert",
|
||||
"credentials": {
|
||||
"postgres": {
|
||||
"id": "${postgres_cred_id}",
|
||||
"name": "${postgres_cred_name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"model": "${embedding_model}"
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.embeddingsOllama",
|
||||
"typeVersion": 1,
|
||||
"position": [1168, 240],
|
||||
"id": "embeddings-insert-001",
|
||||
"name": "Embeddings Ollama1",
|
||||
"credentials": {
|
||||
"ollamaApi": {
|
||||
"id": "${ollama_cred_id}",
|
||||
"name": "${ollama_cred_name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"options": {}
|
||||
},
|
||||
"type": "@n8n/n8n-nodes-langchain.documentDefaultDataLoader",
|
||||
"typeVersion": 1.1,
|
||||
"position": [1392, 240],
|
||||
"id": "data-loader-001",
|
||||
"name": "Default Data Loader"
|
||||
}
|
||||
],
|
||||
"connections": {
|
||||
"When chat message received": {
|
||||
"main": [[{"node": "AI Agent", "type": "main", "index": 0}]]
|
||||
},
|
||||
"Ollama Chat Model": {
|
||||
"ai_languageModel": [[{"node": "AI Agent", "type": "ai_languageModel", "index": 0}]]
|
||||
},
|
||||
"Simple Memory": {
|
||||
"ai_memory": [[{"node": "AI Agent", "type": "ai_memory", "index": 0}]]
|
||||
},
|
||||
"PGVector Store": {
|
||||
"ai_tool": [[{"node": "AI Agent", "type": "ai_tool", "index": 0}]]
|
||||
},
|
||||
"Embeddings Ollama": {
|
||||
"ai_embedding": [[{"node": "PGVector Store", "type": "ai_embedding", "index": 0}]]
|
||||
},
|
||||
"On form submission": {
|
||||
"main": [[{"node": "Extract from File", "type": "main", "index": 0}]]
|
||||
},
|
||||
"Extract from File": {
|
||||
"main": [[{"node": "PGVector Store Insert", "type": "main", "index": 0}]]
|
||||
},
|
||||
"Embeddings Ollama1": {
|
||||
"ai_embedding": [[{"node": "PGVector Store Insert", "type": "ai_embedding", "index": 0}]]
|
||||
},
|
||||
"Default Data Loader": {
|
||||
"ai_document": [[{"node": "PGVector Store Insert", "type": "ai_document", "index": 0}]]
|
||||
}
|
||||
},
|
||||
"settings": {
|
||||
"executionOrder": "v1"
|
||||
}
|
||||
}
|
||||
WORKFLOW_JSON
|
||||
}
|
||||
|
||||
# List all workflows in n8n
|
||||
# Usage: n8n_api_list_workflows <ctid>
|
||||
# Returns: JSON array of workflows on stdout
|
||||
n8n_api_list_workflows() {
|
||||
local ctid="$1"
|
||||
local api_url="http://127.0.0.1:5678"
|
||||
|
||||
info "n8n API: Listing workflows..."
|
||||
|
||||
local response
|
||||
response=$(pct exec "$ctid" -- bash -c "curl -sS -X GET '${api_url}/rest/workflows' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-b /tmp/n8n_cookies.txt 2>&1" || echo "")
|
||||
|
||||
echo "$response"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Get workflow by name
|
||||
# Usage: n8n_api_get_workflow_by_name <ctid> <workflow_name>
|
||||
# Returns: Workflow ID on stdout, or empty if not found
|
||||
n8n_api_get_workflow_by_name() {
|
||||
local ctid="$1"
|
||||
local workflow_name="$2"
|
||||
|
||||
info "n8n API: Searching for workflow '${workflow_name}'..."
|
||||
|
||||
local workflows
|
||||
workflows=$(n8n_api_list_workflows "$ctid")
|
||||
|
||||
# Extract workflow ID by name using grep and awk
|
||||
local workflow_id
|
||||
workflow_id=$(echo "$workflows" | grep -oP "\"name\":\s*\"${workflow_name}\".*?\"id\":\s*\"\K[^\"]+|\"id\":\s*\"\K[^\"]+(?=.*?\"name\":\s*\"${workflow_name}\")" | head -1 || echo "")
|
||||
|
||||
if [[ -n "$workflow_id" ]]; then
|
||||
info "n8n API: Found workflow '${workflow_name}' with ID: ${workflow_id}"
|
||||
echo "$workflow_id"
|
||||
return 0
|
||||
else
|
||||
info "n8n API: Workflow '${workflow_name}' not found"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Delete workflow by ID
|
||||
# Usage: n8n_api_delete_workflow <ctid> <workflow_id>
|
||||
# Returns: 0 on success, 1 on failure
|
||||
n8n_api_delete_workflow() {
|
||||
local ctid="$1"
|
||||
local workflow_id="$2"
|
||||
local api_url="http://127.0.0.1:5678"
|
||||
|
||||
info "n8n API: Deleting workflow ${workflow_id}..."
|
||||
|
||||
local response
|
||||
response=$(pct exec "$ctid" -- bash -c "curl -sS -X DELETE '${api_url}/rest/workflows/${workflow_id}' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-b /tmp/n8n_cookies.txt 2>&1" || echo "")
|
||||
|
||||
# Check if deletion was successful (empty response or success message)
|
||||
if [[ -z "$response" ]] || [[ "$response" == *"\"success\":true"* ]] || [[ "$response" == "{}" ]]; then
|
||||
info "n8n API: Workflow ${workflow_id} deleted successfully"
|
||||
return 0
|
||||
else
|
||||
warn "n8n API: Failed to delete workflow: ${response}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Get credential by name and type
|
||||
# Usage: n8n_api_get_credential_by_name <ctid> <credential_name> <credential_type>
|
||||
# Returns: Credential ID on stdout, or empty if not found
|
||||
n8n_api_get_credential_by_name() {
|
||||
local ctid="$1"
|
||||
local cred_name="$2"
|
||||
local cred_type="$3"
|
||||
local api_url="http://127.0.0.1:5678"
|
||||
|
||||
info "n8n API: Searching for credential '${cred_name}' (type: ${cred_type})..."
|
||||
|
||||
local response
|
||||
response=$(pct exec "$ctid" -- bash -c "curl -sS -X GET '${api_url}/rest/credentials' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-b /tmp/n8n_cookies.txt 2>&1" || echo "")
|
||||
|
||||
# Extract credential ID by name and type
|
||||
local cred_id
|
||||
cred_id=$(echo "$response" | grep -oP "\"name\":\s*\"${cred_name}\".*?\"type\":\s*\"${cred_type}\".*?\"id\":\s*\"\K[^\"]+|\"id\":\s*\"\K[^\"]+(?=.*?\"name\":\s*\"${cred_name}\".*?\"type\":\s*\"${cred_type}\")" | head -1 || echo "")
|
||||
|
||||
if [[ -n "$cred_id" ]]; then
|
||||
info "n8n API: Found credential '${cred_name}' with ID: ${cred_id}"
|
||||
echo "$cred_id"
|
||||
return 0
|
||||
else
|
||||
info "n8n API: Credential '${cred_name}' not found"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Cleanup n8n API session
|
||||
# Usage: n8n_api_cleanup <ctid>
|
||||
n8n_api_cleanup() {
|
||||
local ctid="$1"
|
||||
pct exec "$ctid" -- bash -c "rm -f /tmp/n8n_cookies.txt /tmp/rag_workflow.json" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Full n8n setup: Create credentials, import workflow from file, activate
|
||||
# This version runs all API calls in a single shell session to preserve cookies
|
||||
# Usage: n8n_setup_rag_workflow <ctid> <email> <password> <pg_host> <pg_port> <pg_db> <pg_user> <pg_pass> <ollama_url> <ollama_model> <embedding_model> <workflow_file>
|
||||
# Returns: 0 on success, 1 on failure
|
||||
n8n_setup_rag_workflow() {
|
||||
local ctid="$1"
|
||||
local email="$2"
|
||||
local password="$3"
|
||||
local pg_host="$4"
|
||||
local pg_port="$5"
|
||||
local pg_db="$6"
|
||||
local pg_user="$7"
|
||||
local pg_pass="$8"
|
||||
local ollama_url="$9"
|
||||
local ollama_model="${10:-ministral-3:3b}"
|
||||
local embedding_model="${11:-nomic-embed-text:latest}"
|
||||
local workflow_file="${12:-}"
|
||||
|
||||
info "n8n Setup: Starting RAG workflow setup..."
|
||||
|
||||
# Validate workflow file
|
||||
if [[ -z "$workflow_file" ]]; then
|
||||
warn "n8n Setup: No workflow file specified, using built-in template"
|
||||
workflow_file=""
|
||||
elif [[ ! -f "$workflow_file" ]]; then
|
||||
warn "n8n Setup: Workflow file not found: $workflow_file"
|
||||
return 1
|
||||
else
|
||||
info "n8n Setup: Using workflow file: $workflow_file"
|
||||
fi
|
||||
|
||||
# Wait for n8n to be ready
|
||||
info "n8n Setup: Waiting for n8n to be ready..."
|
||||
local i
|
||||
for i in $(seq 1 30); do
|
||||
if pct exec "$ctid" -- bash -c "curl -sS -o /dev/null -w '%{http_code}' http://127.0.0.1:5678/rest/settings 2>/dev/null" | grep -q "200"; then
|
||||
info "n8n Setup: n8n is ready"
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Escape special characters in passwords for JSON
|
||||
local escaped_password
|
||||
escaped_password=$(echo "$password" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||
local escaped_pg_pass
|
||||
escaped_pg_pass=$(echo "$pg_pass" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||
|
||||
# Read workflow from file or generate from template
|
||||
info "n8n Setup: Preparing workflow JSON..."
|
||||
local workflow_json
|
||||
if [[ -n "$workflow_file" && -f "$workflow_file" ]]; then
|
||||
# Read workflow from external file
|
||||
workflow_json=$(cat "$workflow_file")
|
||||
info "n8n Setup: Loaded workflow from file: $workflow_file"
|
||||
else
|
||||
# Generate workflow from built-in template
|
||||
workflow_json=$(n8n_generate_rag_workflow_json "POSTGRES_CRED_ID" "PostgreSQL (local)" "OLLAMA_CRED_ID" "Ollama (local)" "$ollama_model" "$embedding_model")
|
||||
info "n8n Setup: Generated workflow from built-in template"
|
||||
fi
|
||||
|
||||
# Push workflow JSON to container (will be processed by setup script)
|
||||
pct_push_text "$ctid" "/tmp/rag_workflow_template.json" "$workflow_json"
|
||||
|
||||
# Create a setup script that runs all API calls in one session
|
||||
info "n8n Setup: Creating setup script..."
|
||||
pct_push_text "$ctid" "/tmp/n8n_setup.sh" "$(cat <<SETUP_SCRIPT
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
API_URL="http://127.0.0.1:5678"
|
||||
COOKIE_FILE="/tmp/n8n_cookies.txt"
|
||||
EMAIL="${email}"
|
||||
PASSWORD="${escaped_password}"
|
||||
|
||||
# Login (n8n API uses emailOrLdapLoginId instead of email)
|
||||
echo "Logging in..."
|
||||
LOGIN_RESP=\$(curl -sS -X POST "\${API_URL}/rest/login" \\
|
||||
-H "Content-Type: application/json" \\
|
||||
-c "\${COOKIE_FILE}" \\
|
||||
-d "{\"emailOrLdapLoginId\":\"\${EMAIL}\",\"password\":\"\${PASSWORD}\"}")
|
||||
|
||||
if echo "\$LOGIN_RESP" | grep -q '"code":\|"status":"error"'; then
|
||||
echo "LOGIN_FAILED: \$LOGIN_RESP"
|
||||
exit 1
|
||||
fi
|
||||
echo "Login successful"
|
||||
|
||||
# Create PostgreSQL credential
|
||||
echo "Creating PostgreSQL credential..."
|
||||
PG_CRED_RESP=\$(curl -sS -X POST "\${API_URL}/rest/credentials" \\
|
||||
-H "Content-Type: application/json" \\
|
||||
-b "\${COOKIE_FILE}" \\
|
||||
-d '{
|
||||
"name": "PostgreSQL (local)",
|
||||
"type": "postgres",
|
||||
"data": {
|
||||
"host": "${pg_host}",
|
||||
"port": ${pg_port},
|
||||
"database": "${pg_db}",
|
||||
"user": "${pg_user}",
|
||||
"password": "${escaped_pg_pass}",
|
||||
"ssl": "disable"
|
||||
}
|
||||
}')
|
||||
|
||||
PG_CRED_ID=\$(echo "\$PG_CRED_RESP" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1)
|
||||
if [ -z "\$PG_CRED_ID" ]; then
|
||||
echo "POSTGRES_CRED_FAILED: \$PG_CRED_RESP"
|
||||
exit 1
|
||||
fi
|
||||
echo "PostgreSQL credential created: \$PG_CRED_ID"
|
||||
|
||||
# Create Ollama credential
|
||||
echo "Creating Ollama credential..."
|
||||
OLLAMA_CRED_RESP=\$(curl -sS -X POST "\${API_URL}/rest/credentials" \\
|
||||
-H "Content-Type: application/json" \\
|
||||
-b "\${COOKIE_FILE}" \\
|
||||
-d '{
|
||||
"name": "Ollama (local)",
|
||||
"type": "ollamaApi",
|
||||
"data": {
|
||||
"baseUrl": "${ollama_url}"
|
||||
}
|
||||
}')
|
||||
|
||||
OLLAMA_CRED_ID=\$(echo "\$OLLAMA_CRED_RESP" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1)
|
||||
if [ -z "\$OLLAMA_CRED_ID" ]; then
|
||||
echo "OLLAMA_CRED_FAILED: \$OLLAMA_CRED_RESP"
|
||||
exit 1
|
||||
fi
|
||||
echo "Ollama credential created: \$OLLAMA_CRED_ID"
|
||||
|
||||
# Process workflow JSON: replace credential IDs and clean up
|
||||
echo "Preparing workflow JSON..."
|
||||
|
||||
# Create a Python script to process the workflow JSON
|
||||
cat > /tmp/process_workflow.py << 'PYTHON_SCRIPT'
|
||||
import json
|
||||
import sys
|
||||
|
||||
# Read the workflow template
|
||||
with open('/tmp/rag_workflow_template.json', 'r') as f:
|
||||
workflow = json.load(f)
|
||||
|
||||
# Get credential IDs from environment/arguments
|
||||
pg_cred_id = sys.argv[1]
|
||||
ollama_cred_id = sys.argv[2]
|
||||
|
||||
# Remove fields that should not be in the import
|
||||
fields_to_remove = ['id', 'versionId', 'meta', 'tags', 'active', 'pinData']
|
||||
for field in fields_to_remove:
|
||||
workflow.pop(field, None)
|
||||
|
||||
# Process all nodes and replace credential IDs
|
||||
for node in workflow.get('nodes', []):
|
||||
credentials = node.get('credentials', {})
|
||||
|
||||
# Replace PostgreSQL credential
|
||||
if 'postgres' in credentials:
|
||||
credentials['postgres'] = {
|
||||
'id': pg_cred_id,
|
||||
'name': 'PostgreSQL (local)'
|
||||
}
|
||||
|
||||
# Replace Ollama credential
|
||||
if 'ollamaApi' in credentials:
|
||||
credentials['ollamaApi'] = {
|
||||
'id': ollama_cred_id,
|
||||
'name': 'Ollama (local)'
|
||||
}
|
||||
|
||||
# Write the processed workflow
|
||||
with open('/tmp/rag_workflow.json', 'w') as f:
|
||||
json.dump(workflow, f)
|
||||
|
||||
print("Workflow processed successfully")
|
||||
PYTHON_SCRIPT
|
||||
|
||||
# Run the Python script to process the workflow
|
||||
python3 /tmp/process_workflow.py "\$PG_CRED_ID" "\$OLLAMA_CRED_ID"
|
||||
|
||||
# Import workflow
|
||||
echo "Importing workflow..."
|
||||
WORKFLOW_RESP=\$(curl -sS -X POST "\${API_URL}/rest/workflows" \\
|
||||
-H "Content-Type: application/json" \\
|
||||
-b "\${COOKIE_FILE}" \\
|
||||
-d @/tmp/rag_workflow.json)
|
||||
|
||||
WORKFLOW_ID=\$(echo "\$WORKFLOW_RESP" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1)
|
||||
VERSION_ID=\$(echo "\$WORKFLOW_RESP" | grep -oP '"versionId"\s*:\s*"\K[^"]+' | head -1)
|
||||
if [ -z "\$WORKFLOW_ID" ]; then
|
||||
echo "WORKFLOW_IMPORT_FAILED: \$WORKFLOW_RESP"
|
||||
exit 1
|
||||
fi
|
||||
echo "Workflow imported: \$WORKFLOW_ID (version: \$VERSION_ID)"
|
||||
|
||||
# Activate workflow using POST /activate endpoint with versionId
|
||||
echo "Activating workflow..."
|
||||
ACTIVATE_RESP=\$(curl -sS -X POST "\${API_URL}/rest/workflows/\${WORKFLOW_ID}/activate" \\
|
||||
-H "Content-Type: application/json" \\
|
||||
-b "\${COOKIE_FILE}" \\
|
||||
-d "{\"versionId\":\"\${VERSION_ID}\"}")
|
||||
|
||||
if echo "\$ACTIVATE_RESP" | grep -q '"active":true\|"active": true'; then
|
||||
echo "Workflow activated successfully"
|
||||
else
|
||||
echo "WORKFLOW_ACTIVATION_WARNING: \$ACTIVATE_RESP"
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
rm -f "\${COOKIE_FILE}" /tmp/rag_workflow_template.json /tmp/rag_workflow.json
|
||||
|
||||
# Output results
|
||||
echo "SUCCESS"
|
||||
echo "POSTGRES_CRED_ID=\$PG_CRED_ID"
|
||||
echo "OLLAMA_CRED_ID=\$OLLAMA_CRED_ID"
|
||||
echo "WORKFLOW_ID=\$WORKFLOW_ID"
|
||||
SETUP_SCRIPT
|
||||
)"
|
||||
|
||||
# Make script executable and run it
|
||||
pct exec "$ctid" -- chmod +x /tmp/n8n_setup.sh
|
||||
|
||||
info "n8n Setup: Running setup script in container..."
|
||||
local setup_output
|
||||
setup_output=$(pct exec "$ctid" -- /tmp/n8n_setup.sh 2>&1 || echo "SCRIPT_FAILED")
|
||||
|
||||
# Log the output
|
||||
info "n8n Setup: Script output:"
|
||||
echo "$setup_output" | while read -r line; do
|
||||
info " $line"
|
||||
done
|
||||
|
||||
# Check for success
|
||||
if echo "$setup_output" | grep -q "^SUCCESS$"; then
|
||||
# Extract IDs from output
|
||||
local pg_cred_id ollama_cred_id workflow_id
|
||||
pg_cred_id=$(echo "$setup_output" | grep "^POSTGRES_CRED_ID=" | cut -d= -f2)
|
||||
ollama_cred_id=$(echo "$setup_output" | grep "^OLLAMA_CRED_ID=" | cut -d= -f2)
|
||||
workflow_id=$(echo "$setup_output" | grep "^WORKFLOW_ID=" | cut -d= -f2)
|
||||
|
||||
info "n8n Setup: RAG workflow setup completed successfully"
|
||||
info "n8n Setup: Workflow ID: ${workflow_id}"
|
||||
info "n8n Setup: PostgreSQL Credential ID: ${pg_cred_id}"
|
||||
info "n8n Setup: Ollama Credential ID: ${ollama_cred_id}"
|
||||
|
||||
# Cleanup setup script
|
||||
pct exec "$ctid" -- rm -f /tmp/n8n_setup.sh 2>/dev/null || true
|
||||
|
||||
return 0
|
||||
else
|
||||
warn "n8n Setup: Setup script failed"
|
||||
# Cleanup
|
||||
pct exec "$ctid" -- rm -f /tmp/n8n_setup.sh /tmp/n8n_cookies.txt /tmp/rag_workflow_template.json /tmp/rag_workflow.json 2>/dev/null || true
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
0
logs/install_2430046.log
Normal file
0
logs/install_2430046.log
Normal file
1111
logs/sb-1772907798.log
Normal file
1111
logs/sb-1772907798.log
Normal file
File diff suppressed because it is too large
Load Diff
1093
logs/sb-1772907999.log
Normal file
1093
logs/sb-1772907999.log
Normal file
File diff suppressed because it is too large
Load Diff
378
sql/add_installer_json_api.sql
Normal file
378
sql/add_installer_json_api.sql
Normal file
@@ -0,0 +1,378 @@
|
||||
-- =====================================================
|
||||
-- BotKonzept - Installer JSON API Extension
|
||||
-- =====================================================
|
||||
-- Extends the database schema to store and expose installer JSON data
|
||||
-- safely to frontend clients (without secrets)
|
||||
|
||||
-- =====================================================
|
||||
-- Step 1: Add installer_json column to instances table
|
||||
-- =====================================================
|
||||
|
||||
-- Add column to store the complete installer JSON
|
||||
ALTER TABLE instances
|
||||
ADD COLUMN IF NOT EXISTS installer_json JSONB DEFAULT '{}'::jsonb;
|
||||
|
||||
-- Create index for faster JSON queries
|
||||
CREATE INDEX IF NOT EXISTS idx_instances_installer_json ON instances USING gin(installer_json);
|
||||
|
||||
-- Add comment
|
||||
COMMENT ON COLUMN instances.installer_json IS 'Complete installer JSON output from install.sh (includes secrets - use api.instance_config view for safe access)';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 2: Create safe API view (NON-SECRET data only)
|
||||
-- =====================================================
|
||||
|
||||
-- Create API schema if it doesn't exist
|
||||
CREATE SCHEMA IF NOT EXISTS api;
|
||||
|
||||
-- Grant usage on api schema
|
||||
GRANT USAGE ON SCHEMA api TO anon, authenticated, service_role;
|
||||
|
||||
-- Create view that exposes only safe (non-secret) installer data
|
||||
CREATE OR REPLACE VIEW api.instance_config AS
|
||||
SELECT
|
||||
i.id,
|
||||
i.customer_id,
|
||||
i.lxc_id as ctid,
|
||||
i.hostname,
|
||||
i.fqdn,
|
||||
i.ip,
|
||||
i.vlan,
|
||||
i.status,
|
||||
i.created_at,
|
||||
-- Extract safe URLs from installer_json
|
||||
jsonb_build_object(
|
||||
'n8n_internal', i.installer_json->'urls'->>'n8n_internal',
|
||||
'n8n_external', i.installer_json->'urls'->>'n8n_external',
|
||||
'postgrest', i.installer_json->'urls'->>'postgrest',
|
||||
'chat_webhook', i.installer_json->'urls'->>'chat_webhook',
|
||||
'chat_internal', i.installer_json->'urls'->>'chat_internal',
|
||||
'upload_form', i.installer_json->'urls'->>'upload_form',
|
||||
'upload_form_internal', i.installer_json->'urls'->>'upload_form_internal'
|
||||
) as urls,
|
||||
-- Extract safe Supabase data (NO service_role_key, NO jwt_secret)
|
||||
jsonb_build_object(
|
||||
'url_external', i.installer_json->'supabase'->>'url_external',
|
||||
'anon_key', i.installer_json->'supabase'->>'anon_key'
|
||||
) as supabase,
|
||||
-- Extract Ollama URL (safe)
|
||||
jsonb_build_object(
|
||||
'url', i.installer_json->'ollama'->>'url',
|
||||
'model', i.installer_json->'ollama'->>'model',
|
||||
'embedding_model', i.installer_json->'ollama'->>'embedding_model'
|
||||
) as ollama,
|
||||
-- Customer info (joined)
|
||||
c.email as customer_email,
|
||||
c.first_name,
|
||||
c.last_name,
|
||||
c.company,
|
||||
c.status as customer_status
|
||||
FROM instances i
|
||||
JOIN customers c ON i.customer_id = c.id
|
||||
WHERE i.status = 'active' AND i.deleted_at IS NULL;
|
||||
|
||||
-- Add comment
|
||||
COMMENT ON VIEW api.instance_config IS 'Safe API view for instance configuration - exposes only non-secret data from installer JSON';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 3: Row Level Security (RLS) for API view
|
||||
-- =====================================================
|
||||
|
||||
-- Enable RLS on the view (inherited from base table)
|
||||
-- Customers can only see their own instance config
|
||||
|
||||
-- Policy: Allow customers to see their own instance config
|
||||
CREATE POLICY instance_config_select_own ON instances
|
||||
FOR SELECT
|
||||
USING (
|
||||
-- Allow if customer_id matches authenticated user
|
||||
customer_id::text = auth.uid()::text
|
||||
OR
|
||||
-- Allow service_role to see all (for n8n workflows)
|
||||
auth.jwt()->>'role' = 'service_role'
|
||||
);
|
||||
|
||||
-- Grant SELECT on api.instance_config view
|
||||
GRANT SELECT ON api.instance_config TO anon, authenticated, service_role;
|
||||
|
||||
-- =====================================================
|
||||
-- Step 4: Create function to get config by customer email
|
||||
-- =====================================================
|
||||
|
||||
-- Function to get instance config by customer email (for public access)
|
||||
CREATE OR REPLACE FUNCTION api.get_instance_config_by_email(customer_email_param TEXT)
|
||||
RETURNS TABLE (
|
||||
id UUID,
|
||||
customer_id UUID,
|
||||
ctid BIGINT,
|
||||
hostname VARCHAR,
|
||||
fqdn VARCHAR,
|
||||
ip VARCHAR,
|
||||
vlan INTEGER,
|
||||
status VARCHAR,
|
||||
created_at TIMESTAMPTZ,
|
||||
urls JSONB,
|
||||
supabase JSONB,
|
||||
ollama JSONB,
|
||||
customer_email VARCHAR,
|
||||
first_name VARCHAR,
|
||||
last_name VARCHAR,
|
||||
company VARCHAR,
|
||||
customer_status VARCHAR
|
||||
) AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
ic.id,
|
||||
ic.customer_id,
|
||||
ic.ctid,
|
||||
ic.hostname,
|
||||
ic.fqdn,
|
||||
ic.ip,
|
||||
ic.vlan,
|
||||
ic.status,
|
||||
ic.created_at,
|
||||
ic.urls,
|
||||
ic.supabase,
|
||||
ic.ollama,
|
||||
ic.customer_email,
|
||||
ic.first_name,
|
||||
ic.last_name,
|
||||
ic.company,
|
||||
ic.customer_status
|
||||
FROM api.instance_config ic
|
||||
WHERE ic.customer_email = customer_email_param
|
||||
LIMIT 1;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
-- Grant execute permission
|
||||
GRANT EXECUTE ON FUNCTION api.get_instance_config_by_email(TEXT) TO anon, authenticated, service_role;
|
||||
|
||||
-- Add comment
|
||||
COMMENT ON FUNCTION api.get_instance_config_by_email IS 'Get instance configuration by customer email - returns only non-secret data';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 5: Create function to get config by CTID
|
||||
-- =====================================================
|
||||
|
||||
-- Function to get instance config by CTID (for internal use)
|
||||
CREATE OR REPLACE FUNCTION api.get_instance_config_by_ctid(ctid_param BIGINT)
|
||||
RETURNS TABLE (
|
||||
id UUID,
|
||||
customer_id UUID,
|
||||
ctid BIGINT,
|
||||
hostname VARCHAR,
|
||||
fqdn VARCHAR,
|
||||
ip VARCHAR,
|
||||
vlan INTEGER,
|
||||
status VARCHAR,
|
||||
created_at TIMESTAMPTZ,
|
||||
urls JSONB,
|
||||
supabase JSONB,
|
||||
ollama JSONB,
|
||||
customer_email VARCHAR,
|
||||
first_name VARCHAR,
|
||||
last_name VARCHAR,
|
||||
company VARCHAR,
|
||||
customer_status VARCHAR
|
||||
) AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
ic.id,
|
||||
ic.customer_id,
|
||||
ic.ctid,
|
||||
ic.hostname,
|
||||
ic.fqdn,
|
||||
ic.ip,
|
||||
ic.vlan,
|
||||
ic.status,
|
||||
ic.created_at,
|
||||
ic.urls,
|
||||
ic.supabase,
|
||||
ic.ollama,
|
||||
ic.customer_email,
|
||||
ic.first_name,
|
||||
ic.last_name,
|
||||
ic.company,
|
||||
ic.customer_status
|
||||
FROM api.instance_config ic
|
||||
WHERE ic.ctid = ctid_param
|
||||
LIMIT 1;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
-- Grant execute permission
|
||||
GRANT EXECUTE ON FUNCTION api.get_instance_config_by_ctid(BIGINT) TO service_role;
|
||||
|
||||
-- Add comment
|
||||
COMMENT ON FUNCTION api.get_instance_config_by_ctid IS 'Get instance configuration by CTID - for internal use only';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 6: Create public config endpoint (no auth required)
|
||||
-- =====================================================
|
||||
|
||||
-- Function to get public config (for website registration form)
|
||||
-- Returns only the registration webhook URL
|
||||
CREATE OR REPLACE FUNCTION api.get_public_config()
|
||||
RETURNS TABLE (
|
||||
registration_webhook_url TEXT,
|
||||
api_base_url TEXT
|
||||
) AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
'https://api.botkonzept.de/webhook/botkonzept-registration'::TEXT as registration_webhook_url,
|
||||
'https://api.botkonzept.de'::TEXT as api_base_url;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
-- Grant execute permission to everyone
|
||||
GRANT EXECUTE ON FUNCTION api.get_public_config() TO anon, authenticated, service_role;
|
||||
|
||||
-- Add comment
|
||||
COMMENT ON FUNCTION api.get_public_config IS 'Get public configuration for website (registration webhook URL)';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 7: Update install.sh integration
|
||||
-- =====================================================
|
||||
|
||||
-- This SQL will be executed after instance creation
|
||||
-- The install.sh script should call this function to store the installer JSON
|
||||
|
||||
CREATE OR REPLACE FUNCTION api.store_installer_json(
|
||||
customer_email_param TEXT,
|
||||
lxc_id_param BIGINT,
|
||||
installer_json_param JSONB
|
||||
)
|
||||
RETURNS JSONB AS $$
|
||||
DECLARE
|
||||
instance_record RECORD;
|
||||
result JSONB;
|
||||
BEGIN
|
||||
-- Find the instance by customer email and lxc_id
|
||||
SELECT i.id, i.customer_id INTO instance_record
|
||||
FROM instances i
|
||||
JOIN customers c ON i.customer_id = c.id
|
||||
WHERE c.email = customer_email_param
|
||||
AND i.lxc_id = lxc_id_param
|
||||
LIMIT 1;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
RETURN jsonb_build_object(
|
||||
'success', false,
|
||||
'error', 'Instance not found for customer email and LXC ID'
|
||||
);
|
||||
END IF;
|
||||
|
||||
-- Update the installer_json column
|
||||
UPDATE instances
|
||||
SET installer_json = installer_json_param,
|
||||
updated_at = NOW()
|
||||
WHERE id = instance_record.id;
|
||||
|
||||
-- Return success
|
||||
result := jsonb_build_object(
|
||||
'success', true,
|
||||
'instance_id', instance_record.id,
|
||||
'customer_id', instance_record.customer_id,
|
||||
'message', 'Installer JSON stored successfully'
|
||||
);
|
||||
|
||||
RETURN result;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
-- Grant execute permission to service_role only
|
||||
GRANT EXECUTE ON FUNCTION api.store_installer_json(TEXT, BIGINT, JSONB) TO service_role;
|
||||
|
||||
-- Add comment
|
||||
COMMENT ON FUNCTION api.store_installer_json IS 'Store installer JSON after instance creation - called by install.sh via n8n workflow';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 8: Create audit log entry for API access
|
||||
-- =====================================================
|
||||
|
||||
-- Function to log API access
|
||||
CREATE OR REPLACE FUNCTION api.log_config_access(
|
||||
customer_id_param UUID,
|
||||
access_type TEXT,
|
||||
ip_address_param INET DEFAULT NULL
|
||||
)
|
||||
RETURNS VOID AS $$
|
||||
BEGIN
|
||||
INSERT INTO audit_log (
|
||||
customer_id,
|
||||
action,
|
||||
entity_type,
|
||||
performed_by,
|
||||
ip_address,
|
||||
metadata
|
||||
) VALUES (
|
||||
customer_id_param,
|
||||
'api_config_access',
|
||||
'instance_config',
|
||||
'api_user',
|
||||
ip_address_param,
|
||||
jsonb_build_object('access_type', access_type)
|
||||
);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
-- Grant execute permission
|
||||
GRANT EXECUTE ON FUNCTION api.log_config_access(UUID, TEXT, INET) TO anon, authenticated, service_role;
|
||||
|
||||
-- =====================================================
|
||||
-- Step 9: Example queries for testing
|
||||
-- =====================================================
|
||||
|
||||
-- Example 1: Get instance config by customer email
|
||||
-- SELECT * FROM api.get_instance_config_by_email('max@beispiel.de');
|
||||
|
||||
-- Example 2: Get instance config by CTID
|
||||
-- SELECT * FROM api.get_instance_config_by_ctid(769697636);
|
||||
|
||||
-- Example 3: Get public config
|
||||
-- SELECT * FROM api.get_public_config();
|
||||
|
||||
-- Example 4: Store installer JSON (called by install.sh)
|
||||
-- SELECT api.store_installer_json(
|
||||
-- 'max@beispiel.de',
|
||||
-- 769697636,
|
||||
-- '{"ctid": 769697636, "urls": {...}, ...}'::jsonb
|
||||
-- );
|
||||
|
||||
-- =====================================================
|
||||
-- Step 10: PostgREST API Routes
|
||||
-- =====================================================
|
||||
|
||||
-- After running this SQL, the following PostgREST routes will be available:
|
||||
--
|
||||
-- 1. GET /api/instance_config
|
||||
-- - Returns all instance configs (filtered by RLS)
|
||||
-- - Requires authentication
|
||||
--
|
||||
-- 2. POST /rpc/get_instance_config_by_email
|
||||
-- - Body: {"customer_email_param": "max@beispiel.de"}
|
||||
-- - Returns instance config for specific customer
|
||||
-- - No authentication required (public)
|
||||
--
|
||||
-- 3. POST /rpc/get_instance_config_by_ctid
|
||||
-- - Body: {"ctid_param": 769697636}
|
||||
-- - Returns instance config for specific CTID
|
||||
-- - Requires service_role authentication
|
||||
--
|
||||
-- 4. POST /rpc/get_public_config
|
||||
-- - Body: {}
|
||||
-- - Returns public configuration (registration webhook URL)
|
||||
-- - No authentication required (public)
|
||||
--
|
||||
-- 5. POST /rpc/store_installer_json
|
||||
-- - Body: {"customer_email_param": "...", "lxc_id_param": 123, "installer_json_param": {...}}
|
||||
-- - Stores installer JSON after instance creation
|
||||
-- - Requires service_role authentication
|
||||
|
||||
-- =====================================================
|
||||
-- End of API Extension
|
||||
-- =====================================================
|
||||
476
sql/add_installer_json_api_supabase_auth.sql
Normal file
476
sql/add_installer_json_api_supabase_auth.sql
Normal file
@@ -0,0 +1,476 @@
|
||||
-- =====================================================
|
||||
-- BotKonzept - Installer JSON API (Supabase Auth)
|
||||
-- =====================================================
|
||||
-- Secure API using Supabase Auth JWT tokens
|
||||
-- NO Service Role Key in Frontend - EVER!
|
||||
|
||||
-- =====================================================
|
||||
-- Step 1: Add installer_json column to instances table
|
||||
-- =====================================================
|
||||
|
||||
ALTER TABLE instances
|
||||
ADD COLUMN IF NOT EXISTS installer_json JSONB DEFAULT '{}'::jsonb;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_instances_installer_json ON instances USING gin(installer_json);
|
||||
|
||||
COMMENT ON COLUMN instances.installer_json IS 'Complete installer JSON output from install.sh (includes secrets - use api.get_my_instance_config() for safe access)';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 2: Link instances to Supabase Auth users
|
||||
-- =====================================================
|
||||
|
||||
-- Add owner_user_id column to link instance to Supabase Auth user
|
||||
ALTER TABLE instances
|
||||
ADD COLUMN IF NOT EXISTS owner_user_id UUID REFERENCES auth.users(id) ON DELETE SET NULL;
|
||||
|
||||
-- Create index for faster lookups
|
||||
CREATE INDEX IF NOT EXISTS idx_instances_owner_user_id ON instances(owner_user_id);
|
||||
|
||||
COMMENT ON COLUMN instances.owner_user_id IS 'Supabase Auth user ID of the instance owner';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 3: Create safe API view (NON-SECRET data only)
|
||||
-- =====================================================
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS api;
|
||||
GRANT USAGE ON SCHEMA api TO anon, authenticated, service_role;
|
||||
|
||||
-- View that exposes only safe (non-secret) installer data
|
||||
CREATE OR REPLACE VIEW api.instance_config AS
|
||||
SELECT
|
||||
i.id,
|
||||
i.customer_id,
|
||||
i.owner_user_id,
|
||||
i.lxc_id as ctid,
|
||||
i.hostname,
|
||||
i.fqdn,
|
||||
i.ip,
|
||||
i.vlan,
|
||||
i.status,
|
||||
i.created_at,
|
||||
-- Extract safe URLs from installer_json (NO SECRETS)
|
||||
jsonb_build_object(
|
||||
'n8n_internal', i.installer_json->'urls'->>'n8n_internal',
|
||||
'n8n_external', i.installer_json->'urls'->>'n8n_external',
|
||||
'postgrest', i.installer_json->'urls'->>'postgrest',
|
||||
'chat_webhook', i.installer_json->'urls'->>'chat_webhook',
|
||||
'chat_internal', i.installer_json->'urls'->>'chat_internal',
|
||||
'upload_form', i.installer_json->'urls'->>'upload_form',
|
||||
'upload_form_internal', i.installer_json->'urls'->>'upload_form_internal'
|
||||
) as urls,
|
||||
-- Extract safe Supabase data (NO service_role_key, NO jwt_secret)
|
||||
jsonb_build_object(
|
||||
'url_external', i.installer_json->'supabase'->>'url_external',
|
||||
'anon_key', i.installer_json->'supabase'->>'anon_key'
|
||||
) as supabase,
|
||||
-- Extract Ollama URL (safe)
|
||||
jsonb_build_object(
|
||||
'url', i.installer_json->'ollama'->>'url',
|
||||
'model', i.installer_json->'ollama'->>'model',
|
||||
'embedding_model', i.installer_json->'ollama'->>'embedding_model'
|
||||
) as ollama,
|
||||
-- Customer info (joined)
|
||||
c.email as customer_email,
|
||||
c.first_name,
|
||||
c.last_name,
|
||||
c.company,
|
||||
c.status as customer_status
|
||||
FROM instances i
|
||||
JOIN customers c ON i.customer_id = c.id
|
||||
WHERE i.status = 'active' AND i.deleted_at IS NULL;
|
||||
|
||||
COMMENT ON VIEW api.instance_config IS 'Safe API view - exposes only non-secret data from installer JSON';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 4: Row Level Security (RLS) Policies
|
||||
-- =====================================================
|
||||
|
||||
-- Enable RLS on instances table (if not already enabled)
|
||||
ALTER TABLE instances ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Drop old policy if exists
|
||||
DROP POLICY IF EXISTS instance_config_select_own ON instances;
|
||||
|
||||
-- Policy: Users can only see their own instances
|
||||
CREATE POLICY instances_select_own ON instances
|
||||
FOR SELECT
|
||||
USING (
|
||||
-- Allow if owner_user_id matches authenticated user
|
||||
owner_user_id = auth.uid()
|
||||
OR
|
||||
-- Allow service_role to see all (for n8n workflows)
|
||||
auth.jwt()->>'role' = 'service_role'
|
||||
);
|
||||
|
||||
-- Grant SELECT on api.instance_config view
|
||||
GRANT SELECT ON api.instance_config TO authenticated, service_role;
|
||||
|
||||
-- =====================================================
|
||||
-- Step 5: Function to get MY instance config (Auth required)
|
||||
-- =====================================================
|
||||
|
||||
-- Function to get instance config for authenticated user
|
||||
-- Uses auth.uid() - NO email parameter (more secure)
|
||||
CREATE OR REPLACE FUNCTION api.get_my_instance_config()
|
||||
RETURNS TABLE (
|
||||
id UUID,
|
||||
customer_id UUID,
|
||||
owner_user_id UUID,
|
||||
ctid BIGINT,
|
||||
hostname VARCHAR,
|
||||
fqdn VARCHAR,
|
||||
ip VARCHAR,
|
||||
vlan INTEGER,
|
||||
status VARCHAR,
|
||||
created_at TIMESTAMPTZ,
|
||||
urls JSONB,
|
||||
supabase JSONB,
|
||||
ollama JSONB,
|
||||
customer_email VARCHAR,
|
||||
first_name VARCHAR,
|
||||
last_name VARCHAR,
|
||||
company VARCHAR,
|
||||
customer_status VARCHAR
|
||||
)
|
||||
SECURITY DEFINER
|
||||
SET search_path = public
|
||||
AS $$
|
||||
BEGIN
|
||||
-- Check if user is authenticated
|
||||
IF auth.uid() IS NULL THEN
|
||||
RAISE EXCEPTION 'Not authenticated';
|
||||
END IF;
|
||||
|
||||
-- Return instance config for authenticated user
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
ic.id,
|
||||
ic.customer_id,
|
||||
ic.owner_user_id,
|
||||
ic.ctid,
|
||||
ic.hostname,
|
||||
ic.fqdn,
|
||||
ic.ip,
|
||||
ic.vlan,
|
||||
ic.status,
|
||||
ic.created_at,
|
||||
ic.urls,
|
||||
ic.supabase,
|
||||
ic.ollama,
|
||||
ic.customer_email,
|
||||
ic.first_name,
|
||||
ic.last_name,
|
||||
ic.company,
|
||||
ic.customer_status
|
||||
FROM api.instance_config ic
|
||||
WHERE ic.owner_user_id = auth.uid()
|
||||
LIMIT 1;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
GRANT EXECUTE ON FUNCTION api.get_my_instance_config() TO authenticated;
|
||||
|
||||
COMMENT ON FUNCTION api.get_my_instance_config IS 'Get instance configuration for authenticated user - uses auth.uid() for security';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 6: Function to get config by CTID (Service Role ONLY)
|
||||
-- =====================================================
|
||||
|
||||
CREATE OR REPLACE FUNCTION api.get_instance_config_by_ctid(ctid_param BIGINT)
|
||||
RETURNS TABLE (
|
||||
id UUID,
|
||||
customer_id UUID,
|
||||
owner_user_id UUID,
|
||||
ctid BIGINT,
|
||||
hostname VARCHAR,
|
||||
fqdn VARCHAR,
|
||||
ip VARCHAR,
|
||||
vlan INTEGER,
|
||||
status VARCHAR,
|
||||
created_at TIMESTAMPTZ,
|
||||
urls JSONB,
|
||||
supabase JSONB,
|
||||
ollama JSONB,
|
||||
customer_email VARCHAR,
|
||||
first_name VARCHAR,
|
||||
last_name VARCHAR,
|
||||
company VARCHAR,
|
||||
customer_status VARCHAR
|
||||
)
|
||||
SECURITY DEFINER
|
||||
SET search_path = public
|
||||
AS $$
|
||||
BEGIN
|
||||
-- Only service_role can call this
|
||||
IF auth.jwt()->>'role' != 'service_role' THEN
|
||||
RAISE EXCEPTION 'Forbidden: service_role required';
|
||||
END IF;
|
||||
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
ic.id,
|
||||
ic.customer_id,
|
||||
ic.owner_user_id,
|
||||
ic.ctid,
|
||||
ic.hostname,
|
||||
ic.fqdn,
|
||||
ic.ip,
|
||||
ic.vlan,
|
||||
ic.status,
|
||||
ic.created_at,
|
||||
ic.urls,
|
||||
ic.supabase,
|
||||
ic.ollama,
|
||||
ic.customer_email,
|
||||
ic.first_name,
|
||||
ic.last_name,
|
||||
ic.company,
|
||||
ic.customer_status
|
||||
FROM api.instance_config ic
|
||||
WHERE ic.ctid = ctid_param
|
||||
LIMIT 1;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
GRANT EXECUTE ON FUNCTION api.get_instance_config_by_ctid(BIGINT) TO service_role;
|
||||
|
||||
COMMENT ON FUNCTION api.get_instance_config_by_ctid IS 'Get instance configuration by CTID - service_role only';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 7: Public config endpoint (NO auth required)
|
||||
-- =====================================================
|
||||
|
||||
CREATE OR REPLACE FUNCTION api.get_public_config()
|
||||
RETURNS TABLE (
|
||||
registration_webhook_url TEXT,
|
||||
api_base_url TEXT
|
||||
)
|
||||
SECURITY DEFINER
|
||||
SET search_path = public
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
'https://api.botkonzept.de/webhook/botkonzept-registration'::TEXT as registration_webhook_url,
|
||||
'https://api.botkonzept.de'::TEXT as api_base_url;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
GRANT EXECUTE ON FUNCTION api.get_public_config() TO anon, authenticated, service_role;
|
||||
|
||||
COMMENT ON FUNCTION api.get_public_config IS 'Get public configuration for website (registration webhook URL)';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 8: Store installer JSON (Service Role ONLY)
|
||||
-- =====================================================
|
||||
|
||||
CREATE OR REPLACE FUNCTION api.store_installer_json(
|
||||
customer_email_param TEXT,
|
||||
lxc_id_param BIGINT,
|
||||
installer_json_param JSONB
|
||||
)
|
||||
RETURNS JSONB
|
||||
SECURITY DEFINER
|
||||
SET search_path = public
|
||||
AS $$
|
||||
DECLARE
|
||||
instance_record RECORD;
|
||||
result JSONB;
|
||||
BEGIN
|
||||
-- Only service_role can call this
|
||||
IF auth.jwt()->>'role' != 'service_role' THEN
|
||||
RAISE EXCEPTION 'Forbidden: service_role required';
|
||||
END IF;
|
||||
|
||||
-- Find the instance by customer email and lxc_id
|
||||
SELECT i.id, i.customer_id, c.id as auth_user_id INTO instance_record
|
||||
FROM instances i
|
||||
JOIN customers c ON i.customer_id = c.id
|
||||
WHERE c.email = customer_email_param
|
||||
AND i.lxc_id = lxc_id_param
|
||||
LIMIT 1;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
RETURN jsonb_build_object(
|
||||
'success', false,
|
||||
'error', 'Instance not found for customer email and LXC ID'
|
||||
);
|
||||
END IF;
|
||||
|
||||
-- Update the installer_json column
|
||||
UPDATE instances
|
||||
SET installer_json = installer_json_param,
|
||||
updated_at = NOW()
|
||||
WHERE id = instance_record.id;
|
||||
|
||||
-- Return success
|
||||
result := jsonb_build_object(
|
||||
'success', true,
|
||||
'instance_id', instance_record.id,
|
||||
'customer_id', instance_record.customer_id,
|
||||
'message', 'Installer JSON stored successfully'
|
||||
);
|
||||
|
||||
RETURN result;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
GRANT EXECUTE ON FUNCTION api.store_installer_json(TEXT, BIGINT, JSONB) TO service_role;
|
||||
|
||||
COMMENT ON FUNCTION api.store_installer_json IS 'Store installer JSON after instance creation - service_role only';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 9: Link customer to Supabase Auth user
|
||||
-- =====================================================
|
||||
|
||||
-- Function to link customer to Supabase Auth user (called during registration)
|
||||
CREATE OR REPLACE FUNCTION api.link_customer_to_auth_user(
|
||||
customer_email_param TEXT,
|
||||
auth_user_id_param UUID
|
||||
)
|
||||
RETURNS JSONB
|
||||
SECURITY DEFINER
|
||||
SET search_path = public
|
||||
AS $$
|
||||
DECLARE
|
||||
customer_record RECORD;
|
||||
instance_record RECORD;
|
||||
result JSONB;
|
||||
BEGIN
|
||||
-- Only service_role can call this
|
||||
IF auth.jwt()->>'role' != 'service_role' THEN
|
||||
RAISE EXCEPTION 'Forbidden: service_role required';
|
||||
END IF;
|
||||
|
||||
-- Find customer by email
|
||||
SELECT id INTO customer_record
|
||||
FROM customers
|
||||
WHERE email = customer_email_param
|
||||
LIMIT 1;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
RETURN jsonb_build_object(
|
||||
'success', false,
|
||||
'error', 'Customer not found'
|
||||
);
|
||||
END IF;
|
||||
|
||||
-- Update all instances for this customer with owner_user_id
|
||||
UPDATE instances
|
||||
SET owner_user_id = auth_user_id_param,
|
||||
updated_at = NOW()
|
||||
WHERE customer_id = customer_record.id;
|
||||
|
||||
-- Return success
|
||||
result := jsonb_build_object(
|
||||
'success', true,
|
||||
'customer_id', customer_record.id,
|
||||
'auth_user_id', auth_user_id_param,
|
||||
'message', 'Customer linked to auth user successfully'
|
||||
);
|
||||
|
||||
RETURN result;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
GRANT EXECUTE ON FUNCTION api.link_customer_to_auth_user(TEXT, UUID) TO service_role;
|
||||
|
||||
COMMENT ON FUNCTION api.link_customer_to_auth_user IS 'Link customer to Supabase Auth user - service_role only';
|
||||
|
||||
-- =====================================================
|
||||
-- Step 10: Audit logging
|
||||
-- =====================================================
|
||||
|
||||
CREATE OR REPLACE FUNCTION api.log_config_access(
|
||||
access_type TEXT,
|
||||
ip_address_param INET DEFAULT NULL
|
||||
)
|
||||
RETURNS VOID
|
||||
SECURITY DEFINER
|
||||
SET search_path = public
|
||||
AS $$
|
||||
BEGIN
|
||||
-- Log access for authenticated user
|
||||
IF auth.uid() IS NOT NULL THEN
|
||||
INSERT INTO audit_log (
|
||||
customer_id,
|
||||
action,
|
||||
entity_type,
|
||||
performed_by,
|
||||
ip_address,
|
||||
metadata
|
||||
)
|
||||
SELECT
|
||||
i.customer_id,
|
||||
'api_config_access',
|
||||
'instance_config',
|
||||
auth.uid()::text,
|
||||
ip_address_param,
|
||||
jsonb_build_object('access_type', access_type)
|
||||
FROM instances i
|
||||
WHERE i.owner_user_id = auth.uid()
|
||||
LIMIT 1;
|
||||
END IF;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
GRANT EXECUTE ON FUNCTION api.log_config_access(TEXT, INET) TO authenticated, service_role;
|
||||
|
||||
-- =====================================================
|
||||
-- Step 11: PostgREST API Routes
|
||||
-- =====================================================
|
||||
|
||||
-- Available routes:
|
||||
--
|
||||
-- 1. POST /rpc/get_my_instance_config
|
||||
-- - Body: {}
|
||||
-- - Returns instance config for authenticated user
|
||||
-- - Requires: Supabase Auth JWT token
|
||||
-- - Response: Single instance config object (or empty if not found)
|
||||
--
|
||||
-- 2. POST /rpc/get_public_config
|
||||
-- - Body: {}
|
||||
-- - Returns public configuration (registration webhook URL)
|
||||
-- - Requires: No authentication
|
||||
--
|
||||
-- 3. POST /rpc/get_instance_config_by_ctid
|
||||
-- - Body: {"ctid_param": 769697636}
|
||||
-- - Returns instance config for specific CTID
|
||||
-- - Requires: Service Role Key (backend only)
|
||||
--
|
||||
-- 4. POST /rpc/store_installer_json
|
||||
-- - Body: {"customer_email_param": "...", "lxc_id_param": 123, "installer_json_param": {...}}
|
||||
-- - Stores installer JSON after instance creation
|
||||
-- - Requires: Service Role Key (backend only)
|
||||
--
|
||||
-- 5. POST /rpc/link_customer_to_auth_user
|
||||
-- - Body: {"customer_email_param": "...", "auth_user_id_param": "..."}
|
||||
-- - Links customer to Supabase Auth user
|
||||
-- - Requires: Service Role Key (backend only)
|
||||
|
||||
-- =====================================================
|
||||
-- Example Usage
|
||||
-- =====================================================
|
||||
|
||||
-- Example 1: Get my instance config (authenticated user)
|
||||
-- POST /rpc/get_my_instance_config
|
||||
-- Headers: Authorization: Bearer <USER_JWT_TOKEN>
|
||||
-- Body: {}
|
||||
|
||||
-- Example 2: Get public config (no auth)
|
||||
-- POST /rpc/get_public_config
|
||||
-- Body: {}
|
||||
|
||||
-- Example 3: Store installer JSON (service role)
|
||||
-- POST /rpc/store_installer_json
|
||||
-- Headers: Authorization: Bearer <SERVICE_ROLE_KEY>
|
||||
-- Body: {"customer_email_param": "max@beispiel.de", "lxc_id_param": 769697636, "installer_json_param": {...}}
|
||||
|
||||
-- Example 4: Link customer to auth user (service role)
|
||||
-- POST /rpc/link_customer_to_auth_user
|
||||
-- Headers: Authorization: Bearer <SERVICE_ROLE_KEY>
|
||||
-- Body: {"customer_email_param": "max@beispiel.de", "auth_user_id_param": "550e8400-e29b-41d4-a716-446655440000"}
|
||||
|
||||
-- =====================================================
|
||||
-- End of Supabase Auth API
|
||||
-- =====================================================
|
||||
444
sql/botkonzept_schema.sql
Normal file
444
sql/botkonzept_schema.sql
Normal file
@@ -0,0 +1,444 @@
|
||||
-- =====================================================
|
||||
-- BotKonzept - Database Schema for Customer Management
|
||||
-- =====================================================
|
||||
-- This schema manages customers, instances, emails, and payments
|
||||
-- for the BotKonzept SaaS platform
|
||||
|
||||
-- Enable UUID extension
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
|
||||
-- =====================================================
|
||||
-- Table: customers
|
||||
-- =====================================================
|
||||
-- Stores customer information and trial status
|
||||
CREATE TABLE IF NOT EXISTS customers (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
email VARCHAR(255) UNIQUE NOT NULL,
|
||||
first_name VARCHAR(100) NOT NULL,
|
||||
last_name VARCHAR(100) NOT NULL,
|
||||
company VARCHAR(255),
|
||||
phone VARCHAR(50),
|
||||
|
||||
-- Status tracking
|
||||
status VARCHAR(50) DEFAULT 'trial' CHECK (status IN ('trial', 'active', 'cancelled', 'suspended', 'deleted')),
|
||||
|
||||
-- Timestamps
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
trial_end_date TIMESTAMPTZ,
|
||||
subscription_start_date TIMESTAMPTZ,
|
||||
subscription_end_date TIMESTAMPTZ,
|
||||
|
||||
-- Marketing tracking
|
||||
utm_source VARCHAR(100),
|
||||
utm_medium VARCHAR(100),
|
||||
utm_campaign VARCHAR(100),
|
||||
referral_code VARCHAR(50),
|
||||
|
||||
-- Metadata
|
||||
metadata JSONB DEFAULT '{}'::jsonb,
|
||||
|
||||
-- Indexes
|
||||
CONSTRAINT email_format CHECK (email ~* '^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}$')
|
||||
);
|
||||
|
||||
-- Create indexes for customers
|
||||
CREATE INDEX idx_customers_email ON customers(email);
|
||||
CREATE INDEX idx_customers_status ON customers(status);
|
||||
CREATE INDEX idx_customers_created_at ON customers(created_at);
|
||||
CREATE INDEX idx_customers_trial_end_date ON customers(trial_end_date);
|
||||
|
||||
-- =====================================================
|
||||
-- Table: instances
|
||||
-- =====================================================
|
||||
-- Stores LXC instance information for each customer
|
||||
CREATE TABLE IF NOT EXISTS instances (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE,
|
||||
|
||||
-- Instance details
|
||||
lxc_id BIGINT NOT NULL UNIQUE,
|
||||
hostname VARCHAR(255) NOT NULL,
|
||||
ip VARCHAR(50) NOT NULL,
|
||||
fqdn VARCHAR(255) NOT NULL,
|
||||
vlan INTEGER,
|
||||
|
||||
-- Status
|
||||
status VARCHAR(50) DEFAULT 'active' CHECK (status IN ('creating', 'active', 'suspended', 'deleted', 'error')),
|
||||
|
||||
-- Credentials (encrypted JSON)
|
||||
credentials JSONB NOT NULL,
|
||||
|
||||
-- Timestamps
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
deleted_at TIMESTAMPTZ,
|
||||
trial_end_date TIMESTAMPTZ,
|
||||
|
||||
-- Resource usage
|
||||
disk_usage_gb DECIMAL(10,2),
|
||||
memory_usage_mb INTEGER,
|
||||
cpu_usage_percent DECIMAL(5,2),
|
||||
|
||||
-- Metadata
|
||||
metadata JSONB DEFAULT '{}'::jsonb
|
||||
);
|
||||
|
||||
-- Create indexes for instances
|
||||
CREATE INDEX idx_instances_customer_id ON instances(customer_id);
|
||||
CREATE INDEX idx_instances_lxc_id ON instances(lxc_id);
|
||||
CREATE INDEX idx_instances_status ON instances(status);
|
||||
CREATE INDEX idx_instances_hostname ON instances(hostname);
|
||||
|
||||
-- =====================================================
|
||||
-- Table: emails_sent
|
||||
-- =====================================================
|
||||
-- Tracks all emails sent to customers
|
||||
CREATE TABLE IF NOT EXISTS emails_sent (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE,
|
||||
|
||||
-- Email details
|
||||
email_type VARCHAR(50) NOT NULL CHECK (email_type IN (
|
||||
'welcome',
|
||||
'day3_upgrade',
|
||||
'day5_reminder',
|
||||
'day7_last_chance',
|
||||
'day8_goodbye',
|
||||
'payment_confirm',
|
||||
'payment_failed',
|
||||
'instance_created',
|
||||
'instance_deleted',
|
||||
'password_reset',
|
||||
'newsletter'
|
||||
)),
|
||||
|
||||
subject VARCHAR(255),
|
||||
recipient_email VARCHAR(255) NOT NULL,
|
||||
|
||||
-- Status
|
||||
status VARCHAR(50) DEFAULT 'sent' CHECK (status IN ('sent', 'delivered', 'opened', 'clicked', 'bounced', 'failed')),
|
||||
|
||||
-- Timestamps
|
||||
sent_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
delivered_at TIMESTAMPTZ,
|
||||
opened_at TIMESTAMPTZ,
|
||||
clicked_at TIMESTAMPTZ,
|
||||
|
||||
-- Metadata
|
||||
metadata JSONB DEFAULT '{}'::jsonb
|
||||
);
|
||||
|
||||
-- Create indexes for emails_sent
|
||||
CREATE INDEX idx_emails_customer_id ON emails_sent(customer_id);
|
||||
CREATE INDEX idx_emails_type ON emails_sent(email_type);
|
||||
CREATE INDEX idx_emails_sent_at ON emails_sent(sent_at);
|
||||
CREATE INDEX idx_emails_status ON emails_sent(status);
|
||||
|
||||
-- =====================================================
|
||||
-- Table: subscriptions
|
||||
-- =====================================================
|
||||
-- Stores subscription and payment information
|
||||
CREATE TABLE IF NOT EXISTS subscriptions (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE,
|
||||
|
||||
-- Plan details
|
||||
plan_name VARCHAR(50) NOT NULL CHECK (plan_name IN ('trial', 'starter', 'business', 'enterprise')),
|
||||
plan_price DECIMAL(10,2) NOT NULL,
|
||||
billing_cycle VARCHAR(20) DEFAULT 'monthly' CHECK (billing_cycle IN ('monthly', 'yearly')),
|
||||
|
||||
-- Discount
|
||||
discount_percent DECIMAL(5,2) DEFAULT 0,
|
||||
discount_code VARCHAR(50),
|
||||
discount_end_date TIMESTAMPTZ,
|
||||
|
||||
-- Status
|
||||
status VARCHAR(50) DEFAULT 'active' CHECK (status IN ('active', 'cancelled', 'past_due', 'suspended')),
|
||||
|
||||
-- Payment provider
|
||||
payment_provider VARCHAR(50) CHECK (payment_provider IN ('stripe', 'paypal', 'manual')),
|
||||
payment_provider_id VARCHAR(255),
|
||||
|
||||
-- Timestamps
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
current_period_start TIMESTAMPTZ,
|
||||
current_period_end TIMESTAMPTZ,
|
||||
cancelled_at TIMESTAMPTZ,
|
||||
|
||||
-- Metadata
|
||||
metadata JSONB DEFAULT '{}'::jsonb
|
||||
);
|
||||
|
||||
-- Create indexes for subscriptions
|
||||
CREATE INDEX idx_subscriptions_customer_id ON subscriptions(customer_id);
|
||||
CREATE INDEX idx_subscriptions_status ON subscriptions(status);
|
||||
CREATE INDEX idx_subscriptions_plan_name ON subscriptions(plan_name);
|
||||
|
||||
-- =====================================================
|
||||
-- Table: payments
|
||||
-- =====================================================
|
||||
-- Stores payment transaction history
|
||||
CREATE TABLE IF NOT EXISTS payments (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE,
|
||||
subscription_id UUID REFERENCES subscriptions(id) ON DELETE SET NULL,
|
||||
|
||||
-- Payment details
|
||||
amount DECIMAL(10,2) NOT NULL,
|
||||
currency VARCHAR(3) DEFAULT 'EUR',
|
||||
|
||||
-- Status
|
||||
status VARCHAR(50) DEFAULT 'pending' CHECK (status IN ('pending', 'succeeded', 'failed', 'refunded', 'cancelled')),
|
||||
|
||||
-- Payment provider
|
||||
payment_provider VARCHAR(50) CHECK (payment_provider IN ('stripe', 'paypal', 'manual')),
|
||||
payment_provider_id VARCHAR(255),
|
||||
payment_method VARCHAR(50),
|
||||
|
||||
-- Timestamps
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
paid_at TIMESTAMPTZ,
|
||||
refunded_at TIMESTAMPTZ,
|
||||
|
||||
-- Invoice
|
||||
invoice_number VARCHAR(50),
|
||||
invoice_url TEXT,
|
||||
|
||||
-- Metadata
|
||||
metadata JSONB DEFAULT '{}'::jsonb
|
||||
);
|
||||
|
||||
-- Create indexes for payments
|
||||
CREATE INDEX idx_payments_customer_id ON payments(customer_id);
|
||||
CREATE INDEX idx_payments_subscription_id ON payments(subscription_id);
|
||||
CREATE INDEX idx_payments_status ON payments(status);
|
||||
CREATE INDEX idx_payments_created_at ON payments(created_at);
|
||||
|
||||
-- =====================================================
|
||||
-- Table: usage_stats
|
||||
-- =====================================================
|
||||
-- Tracks usage statistics for each instance
|
||||
CREATE TABLE IF NOT EXISTS usage_stats (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
instance_id UUID NOT NULL REFERENCES instances(id) ON DELETE CASCADE,
|
||||
|
||||
-- Usage metrics
|
||||
date DATE NOT NULL,
|
||||
messages_count INTEGER DEFAULT 0,
|
||||
documents_count INTEGER DEFAULT 0,
|
||||
api_calls_count INTEGER DEFAULT 0,
|
||||
storage_used_mb DECIMAL(10,2) DEFAULT 0,
|
||||
|
||||
-- Timestamps
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
|
||||
-- Unique constraint: one record per instance per day
|
||||
UNIQUE(instance_id, date)
|
||||
);
|
||||
|
||||
-- Create indexes for usage_stats
|
||||
CREATE INDEX idx_usage_instance_id ON usage_stats(instance_id);
|
||||
CREATE INDEX idx_usage_date ON usage_stats(date);
|
||||
|
||||
-- =====================================================
|
||||
-- Table: audit_log
|
||||
-- =====================================================
|
||||
-- Audit trail for important actions
|
||||
CREATE TABLE IF NOT EXISTS audit_log (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
customer_id UUID REFERENCES customers(id) ON DELETE SET NULL,
|
||||
instance_id UUID REFERENCES instances(id) ON DELETE SET NULL,
|
||||
|
||||
-- Action details
|
||||
action VARCHAR(100) NOT NULL,
|
||||
entity_type VARCHAR(50),
|
||||
entity_id UUID,
|
||||
|
||||
-- User/system that performed the action
|
||||
performed_by VARCHAR(100),
|
||||
ip_address INET,
|
||||
user_agent TEXT,
|
||||
|
||||
-- Changes
|
||||
old_values JSONB,
|
||||
new_values JSONB,
|
||||
|
||||
-- Timestamp
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
|
||||
-- Metadata
|
||||
metadata JSONB DEFAULT '{}'::jsonb
|
||||
);
|
||||
|
||||
-- Create indexes for audit_log
|
||||
CREATE INDEX idx_audit_customer_id ON audit_log(customer_id);
|
||||
CREATE INDEX idx_audit_instance_id ON audit_log(instance_id);
|
||||
CREATE INDEX idx_audit_action ON audit_log(action);
|
||||
CREATE INDEX idx_audit_created_at ON audit_log(created_at);
|
||||
|
||||
-- =====================================================
|
||||
-- Functions & Triggers
|
||||
-- =====================================================
|
||||
|
||||
-- Function to update updated_at timestamp
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Triggers for updated_at
|
||||
CREATE TRIGGER update_customers_updated_at BEFORE UPDATE ON customers
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
CREATE TRIGGER update_instances_updated_at BEFORE UPDATE ON instances
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
CREATE TRIGGER update_subscriptions_updated_at BEFORE UPDATE ON subscriptions
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- Function to calculate trial end date
|
||||
CREATE OR REPLACE FUNCTION set_trial_end_date()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.trial_end_date IS NULL THEN
|
||||
NEW.trial_end_date = NEW.created_at + INTERVAL '7 days';
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger for trial end date
|
||||
CREATE TRIGGER set_customer_trial_end_date BEFORE INSERT ON customers
|
||||
FOR EACH ROW EXECUTE FUNCTION set_trial_end_date();
|
||||
|
||||
-- =====================================================
|
||||
-- Views
|
||||
-- =====================================================
|
||||
|
||||
-- View: Active trials expiring soon
|
||||
CREATE OR REPLACE VIEW trials_expiring_soon AS
|
||||
SELECT
|
||||
c.id,
|
||||
c.email,
|
||||
c.first_name,
|
||||
c.last_name,
|
||||
c.created_at,
|
||||
c.trial_end_date,
|
||||
EXTRACT(DAY FROM (c.trial_end_date - NOW())) as days_remaining,
|
||||
i.lxc_id,
|
||||
i.hostname,
|
||||
i.fqdn
|
||||
FROM customers c
|
||||
JOIN instances i ON c.id = i.customer_id
|
||||
WHERE c.status = 'trial'
|
||||
AND i.status = 'active'
|
||||
AND c.trial_end_date > NOW()
|
||||
AND c.trial_end_date <= NOW() + INTERVAL '3 days';
|
||||
|
||||
-- View: Customer overview with instance info
|
||||
CREATE OR REPLACE VIEW customer_overview AS
|
||||
SELECT
|
||||
c.id,
|
||||
c.email,
|
||||
c.first_name,
|
||||
c.last_name,
|
||||
c.company,
|
||||
c.status,
|
||||
c.created_at,
|
||||
c.trial_end_date,
|
||||
i.lxc_id,
|
||||
i.hostname,
|
||||
i.fqdn,
|
||||
i.ip,
|
||||
i.status as instance_status,
|
||||
s.plan_name,
|
||||
s.plan_price,
|
||||
s.status as subscription_status
|
||||
FROM customers c
|
||||
LEFT JOIN instances i ON c.id = i.customer_id AND i.status = 'active'
|
||||
LEFT JOIN subscriptions s ON c.id = s.customer_id AND s.status = 'active';
|
||||
|
||||
-- View: Revenue metrics
|
||||
CREATE OR REPLACE VIEW revenue_metrics AS
|
||||
SELECT
|
||||
DATE_TRUNC('month', paid_at) as month,
|
||||
COUNT(*) as payment_count,
|
||||
SUM(amount) as total_revenue,
|
||||
AVG(amount) as average_payment,
|
||||
COUNT(DISTINCT customer_id) as unique_customers
|
||||
FROM payments
|
||||
WHERE status = 'succeeded'
|
||||
AND paid_at IS NOT NULL
|
||||
GROUP BY DATE_TRUNC('month', paid_at)
|
||||
ORDER BY month DESC;
|
||||
|
||||
-- =====================================================
|
||||
-- Row Level Security (RLS) Policies
|
||||
-- =====================================================
|
||||
|
||||
-- Enable RLS on tables
|
||||
ALTER TABLE customers ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE instances ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE subscriptions ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE payments ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Policy: Customers can only see their own data
|
||||
CREATE POLICY customers_select_own ON customers
|
||||
FOR SELECT
|
||||
USING (auth.uid()::text = id::text);
|
||||
|
||||
CREATE POLICY instances_select_own ON instances
|
||||
FOR SELECT
|
||||
USING (customer_id::text = auth.uid()::text);
|
||||
|
||||
CREATE POLICY subscriptions_select_own ON subscriptions
|
||||
FOR SELECT
|
||||
USING (customer_id::text = auth.uid()::text);
|
||||
|
||||
CREATE POLICY payments_select_own ON payments
|
||||
FOR SELECT
|
||||
USING (customer_id::text = auth.uid()::text);
|
||||
|
||||
-- =====================================================
|
||||
-- Sample Data (for testing)
|
||||
-- =====================================================
|
||||
|
||||
-- Insert sample customer (commented out for production)
|
||||
-- INSERT INTO customers (email, first_name, last_name, company, status)
|
||||
-- VALUES ('test@example.com', 'Max', 'Mustermann', 'Test GmbH', 'trial');
|
||||
|
||||
-- =====================================================
|
||||
-- Grants
|
||||
-- =====================================================
|
||||
|
||||
-- Grant permissions to authenticated users
|
||||
GRANT SELECT, INSERT, UPDATE ON customers TO authenticated;
|
||||
GRANT SELECT ON instances TO authenticated;
|
||||
GRANT SELECT ON subscriptions TO authenticated;
|
||||
GRANT SELECT ON payments TO authenticated;
|
||||
GRANT SELECT ON usage_stats TO authenticated;
|
||||
|
||||
-- Grant all permissions to service role (for n8n workflows)
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA public TO service_role;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO service_role;
|
||||
|
||||
-- =====================================================
|
||||
-- Comments
|
||||
-- =====================================================
|
||||
|
||||
COMMENT ON TABLE customers IS 'Stores customer information and trial status';
|
||||
COMMENT ON TABLE instances IS 'Stores LXC instance information for each customer';
|
||||
COMMENT ON TABLE emails_sent IS 'Tracks all emails sent to customers';
|
||||
COMMENT ON TABLE subscriptions IS 'Stores subscription and payment information';
|
||||
COMMENT ON TABLE payments IS 'Stores payment transaction history';
|
||||
COMMENT ON TABLE usage_stats IS 'Tracks usage statistics for each instance';
|
||||
COMMENT ON TABLE audit_log IS 'Audit trail for important actions';
|
||||
|
||||
-- =====================================================
|
||||
-- End of Schema
|
||||
-- =====================================================
|
||||
2
sql/init_pgvector.sql
Normal file
2
sql/init_pgvector.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
CREATE EXTENSION IF NOT EXISTS vector;
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
63
templates/docker-compose.yml
Normal file
63
templates/docker-compose.yml
Normal file
@@ -0,0 +1,63 @@
|
||||
services:
|
||||
postgres:
|
||||
image: pgvector/pgvector:pg16
|
||||
container_name: customer-postgres
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: ${PG_DB}
|
||||
POSTGRES_USER: ${PG_USER}
|
||||
POSTGRES_PASSWORD: ${PG_PASSWORD}
|
||||
volumes:
|
||||
- ./volumes/postgres/data:/var/lib/postgresql/data
|
||||
- ./sql:/docker-entrypoint-initdb.d:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${PG_USER} -d ${PG_DB} || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 20
|
||||
networks:
|
||||
- customer-net
|
||||
|
||||
n8n:
|
||||
image: n8nio/n8n:latest
|
||||
container_name: n8n
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "${N8N_PORT}:5678"
|
||||
environment:
|
||||
# --- Web / Cookies / URL ---
|
||||
N8N_PORT: 5678
|
||||
N8N_PROTOCOL: ${N8N_PROTOCOL}
|
||||
N8N_HOST: ${N8N_HOST}
|
||||
N8N_EDITOR_BASE_URL: ${N8N_EDITOR_BASE_URL}
|
||||
WEBHOOK_URL: ${WEBHOOK_URL}
|
||||
|
||||
# Ohne TLS/Reverse Proxy: sonst Secure-Cookie Warning / Login-Probleme
|
||||
N8N_SECURE_COOKIE: ${N8N_SECURE_COOKIE}
|
||||
|
||||
# --- DB (Postgres) ---
|
||||
DB_TYPE: postgresdb
|
||||
DB_POSTGRESDB_HOST: postgres
|
||||
DB_POSTGRESDB_PORT: 5432
|
||||
DB_POSTGRESDB_DATABASE: ${PG_DB}
|
||||
DB_POSTGRESDB_USER: ${PG_USER}
|
||||
DB_POSTGRESDB_PASSWORD: ${PG_PASSWORD}
|
||||
|
||||
# --- Basics ---
|
||||
GENERIC_TIMEZONE: Europe/Berlin
|
||||
TZ: Europe/Berlin
|
||||
|
||||
# optional (später hart machen)
|
||||
N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY}
|
||||
|
||||
volumes:
|
||||
- ./volumes/n8n-data:/home/node/.n8n
|
||||
networks:
|
||||
- customer-net
|
||||
|
||||
networks:
|
||||
customer-net:
|
||||
driver: bridge
|
||||
20
templates/env.template
Normal file
20
templates/env.template
Normal file
@@ -0,0 +1,20 @@
|
||||
# Basics
|
||||
TZ=Europe/Berlin
|
||||
|
||||
# n8n URL-Setup (wird pro Kunde gefüllt)
|
||||
N8N_HOST={{N8N_HOST}}
|
||||
N8N_EDITOR_BASE_URL=https://{{N8N_HOST}}/
|
||||
WEBHOOK_URL=https://{{N8N_HOST}}/
|
||||
|
||||
# Dashboard BasicAuth (wird random generiert)
|
||||
DASHBOARD_USERNAME={{DASHBOARD_USERNAME}}
|
||||
DASHBOARD_PASSWORD={{DASHBOARD_PASSWORD}}
|
||||
|
||||
# n8n Credential Encryption Key (wird random generiert, 64 hex chars ok)
|
||||
N8N_ENCRYPTION_KEY={{N8N_ENCRYPTION_KEY}}
|
||||
|
||||
# Postgres
|
||||
POSTGRES_USER=postgres
|
||||
POSTGRES_PASSWORD={{POSTGRES_PASSWORD}}
|
||||
POSTGRES_DB=postgres
|
||||
|
||||
32
templates/n8n-workflow-reload.service
Normal file
32
templates/n8n-workflow-reload.service
Normal file
@@ -0,0 +1,32 @@
|
||||
[Unit]
|
||||
Description=n8n Workflow Auto-Reload Service
|
||||
Documentation=https://docs.n8n.io/
|
||||
After=docker.service
|
||||
Wants=docker.service
|
||||
# Warte bis n8n-Container läuft
|
||||
After=docker-n8n.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
User=root
|
||||
WorkingDirectory=/opt/customer-stack
|
||||
|
||||
# Warte kurz, damit Docker-Container vollständig gestartet sind
|
||||
ExecStartPre=/bin/sleep 10
|
||||
|
||||
# Führe Reload-Script aus
|
||||
ExecStart=/bin/bash /opt/customer-stack/reload-workflow.sh
|
||||
|
||||
# Logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=n8n-workflow-reload
|
||||
|
||||
# Restart-Policy bei Fehler
|
||||
Restart=on-failure
|
||||
RestartSec=30
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
379
templates/reload-workflow.sh
Normal file
379
templates/reload-workflow.sh
Normal file
@@ -0,0 +1,379 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# n8n Workflow Auto-Reload Script
|
||||
# Wird beim LXC-Start ausgeführt, um den Workflow neu zu laden
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Konfiguration
|
||||
SCRIPT_DIR="/opt/customer-stack"
|
||||
LOG_DIR="${SCRIPT_DIR}/logs"
|
||||
LOG_FILE="${LOG_DIR}/workflow-reload.log"
|
||||
ENV_FILE="${SCRIPT_DIR}/.env"
|
||||
WORKFLOW_TEMPLATE="${SCRIPT_DIR}/workflow-template.json"
|
||||
WORKFLOW_NAME="RAG KI-Bot (PGVector)"
|
||||
|
||||
# API-Konfiguration
|
||||
API_URL="http://127.0.0.1:5678"
|
||||
COOKIE_FILE="/tmp/n8n_reload_cookies.txt"
|
||||
MAX_WAIT=60 # Maximale Wartezeit in Sekunden
|
||||
# Erstelle Log-Verzeichnis sofort (vor den Logging-Funktionen)
|
||||
mkdir -p "${LOG_DIR}"
|
||||
|
||||
|
||||
# Logging-Funktion
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "${LOG_FILE}"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] ERROR: $*" | tee -a "${LOG_FILE}" >&2
|
||||
}
|
||||
|
||||
# Funktion: Warten bis n8n bereit ist
|
||||
wait_for_n8n() {
|
||||
log "Warte auf n8n API..."
|
||||
local count=0
|
||||
|
||||
while [ $count -lt $MAX_WAIT ]; do
|
||||
if curl -sS -o /dev/null -w "%{http_code}" "${API_URL}/rest/settings" 2>/dev/null | grep -q "200"; then
|
||||
log "n8n API ist bereit"
|
||||
return 0
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count + 1))
|
||||
done
|
||||
|
||||
log_error "n8n API nicht erreichbar nach ${MAX_WAIT} Sekunden"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Funktion: .env-Datei laden
|
||||
load_env() {
|
||||
if [ ! -f "${ENV_FILE}" ]; then
|
||||
log_error ".env-Datei nicht gefunden: ${ENV_FILE}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Exportiere alle Variablen aus .env
|
||||
set -a
|
||||
source "${ENV_FILE}"
|
||||
set +a
|
||||
|
||||
log "Konfiguration geladen aus ${ENV_FILE}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Funktion: Login bei n8n
|
||||
n8n_login() {
|
||||
log "Login bei n8n als ${N8N_OWNER_EMAIL}..."
|
||||
|
||||
# Escape special characters in password for JSON
|
||||
local escaped_password
|
||||
escaped_password=$(echo "${N8N_OWNER_PASS}" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||
|
||||
local response
|
||||
response=$(curl -sS -X POST "${API_URL}/rest/login" \
|
||||
-H "Content-Type: application/json" \
|
||||
-c "${COOKIE_FILE}" \
|
||||
-d "{\"emailOrLdapLoginId\":\"${N8N_OWNER_EMAIL}\",\"password\":\"${escaped_password}\"}" 2>&1)
|
||||
|
||||
if echo "$response" | grep -q '"code":\|"status":"error"'; then
|
||||
log_error "Login fehlgeschlagen: ${response}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log "Login erfolgreich"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Funktion: Workflow nach Name suchen
|
||||
find_workflow() {
|
||||
local workflow_name="$1"
|
||||
|
||||
log "Suche nach Workflow '${workflow_name}'..."
|
||||
|
||||
local response
|
||||
response=$(curl -sS -X GET "${API_URL}/rest/workflows" \
|
||||
-H "Content-Type: application/json" \
|
||||
-b "${COOKIE_FILE}" 2>&1)
|
||||
|
||||
# Extract workflow ID by name
|
||||
local workflow_id
|
||||
workflow_id=$(echo "$response" | grep -oP "\"name\":\s*\"${workflow_name}\".*?\"id\":\s*\"\K[^\"]+|\"id\":\s*\"\K[^\"]+(?=.*?\"name\":\s*\"${workflow_name}\")" | head -1 || echo "")
|
||||
|
||||
if [ -n "$workflow_id" ]; then
|
||||
log "Workflow gefunden: ID=${workflow_id}"
|
||||
echo "$workflow_id"
|
||||
return 0
|
||||
else
|
||||
log "Workflow '${workflow_name}' nicht gefunden"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Funktion: Workflow löschen
|
||||
delete_workflow() {
|
||||
local workflow_id="$1"
|
||||
|
||||
log "Lösche Workflow ${workflow_id}..."
|
||||
|
||||
local response
|
||||
response=$(curl -sS -X DELETE "${API_URL}/rest/workflows/${workflow_id}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-b "${COOKIE_FILE}" 2>&1)
|
||||
|
||||
log "Workflow ${workflow_id} gelöscht"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Funktion: Credential nach Name und Typ suchen
|
||||
find_credential() {
|
||||
local cred_name="$1"
|
||||
local cred_type="$2"
|
||||
|
||||
log "Suche nach Credential '${cred_name}' (Typ: ${cred_type})..."
|
||||
|
||||
local response
|
||||
response=$(curl -sS -X GET "${API_URL}/rest/credentials" \
|
||||
-H "Content-Type: application/json" \
|
||||
-b "${COOKIE_FILE}" 2>&1)
|
||||
|
||||
# Extract credential ID by name and type
|
||||
local cred_id
|
||||
cred_id=$(echo "$response" | grep -oP "\"name\":\s*\"${cred_name}\".*?\"type\":\s*\"${cred_type}\".*?\"id\":\s*\"\K[^\"]+|\"id\":\s*\"\K[^\"]+(?=.*?\"name\":\s*\"${cred_name}\".*?\"type\":\s*\"${cred_type}\")" | head -1 || echo "")
|
||||
|
||||
if [ -n "$cred_id" ]; then
|
||||
log "Credential gefunden: ID=${cred_id}"
|
||||
echo "$cred_id"
|
||||
return 0
|
||||
else
|
||||
log_error "Credential '${cred_name}' nicht gefunden"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Funktion: Workflow-Template verarbeiten
|
||||
process_workflow_template() {
|
||||
local pg_cred_id="$1"
|
||||
local ollama_cred_id="$2"
|
||||
local output_file="/tmp/workflow_processed.json"
|
||||
|
||||
log "Verarbeite Workflow-Template..."
|
||||
|
||||
# Python-Script zum Verarbeiten des Workflows
|
||||
python3 - "$pg_cred_id" "$ollama_cred_id" <<'PYTHON_SCRIPT'
|
||||
import json
|
||||
import sys
|
||||
|
||||
# Read the workflow template
|
||||
with open('/opt/customer-stack/workflow-template.json', 'r') as f:
|
||||
workflow = json.load(f)
|
||||
|
||||
# Get credential IDs from arguments
|
||||
pg_cred_id = sys.argv[1]
|
||||
ollama_cred_id = sys.argv[2]
|
||||
|
||||
# Remove fields that should not be in the import
|
||||
fields_to_remove = ['id', 'versionId', 'meta', 'tags', 'active', 'pinData']
|
||||
for field in fields_to_remove:
|
||||
workflow.pop(field, None)
|
||||
|
||||
# Process all nodes and replace credential IDs
|
||||
for node in workflow.get('nodes', []):
|
||||
credentials = node.get('credentials', {})
|
||||
|
||||
# Replace PostgreSQL credential
|
||||
if 'postgres' in credentials:
|
||||
credentials['postgres'] = {
|
||||
'id': pg_cred_id,
|
||||
'name': 'PostgreSQL (local)'
|
||||
}
|
||||
|
||||
# Replace Ollama credential
|
||||
if 'ollamaApi' in credentials:
|
||||
credentials['ollamaApi'] = {
|
||||
'id': ollama_cred_id,
|
||||
'name': 'Ollama (local)'
|
||||
}
|
||||
|
||||
# Write the processed workflow
|
||||
with open('/tmp/workflow_processed.json', 'w') as f:
|
||||
json.dump(workflow, f)
|
||||
|
||||
print("Workflow processed successfully")
|
||||
PYTHON_SCRIPT
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
log "Workflow-Template erfolgreich verarbeitet"
|
||||
echo "$output_file"
|
||||
return 0
|
||||
else
|
||||
log_error "Fehler beim Verarbeiten des Workflow-Templates"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Funktion: Workflow importieren
|
||||
import_workflow() {
|
||||
local workflow_file="$1"
|
||||
|
||||
log "Importiere Workflow aus ${workflow_file}..."
|
||||
|
||||
local response
|
||||
response=$(curl -sS -X POST "${API_URL}/rest/workflows" \
|
||||
-H "Content-Type: application/json" \
|
||||
-b "${COOKIE_FILE}" \
|
||||
-d @"${workflow_file}" 2>&1)
|
||||
|
||||
# Extract workflow ID and version ID
|
||||
local workflow_id
|
||||
local version_id
|
||||
workflow_id=$(echo "$response" | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1)
|
||||
version_id=$(echo "$response" | grep -oP '"versionId"\s*:\s*"\K[^"]+' | head -1)
|
||||
|
||||
if [ -z "$workflow_id" ]; then
|
||||
log_error "Workflow-Import fehlgeschlagen: ${response}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log "Workflow importiert: ID=${workflow_id}, Version=${version_id}"
|
||||
echo "${workflow_id}:${version_id}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Funktion: Workflow aktivieren
|
||||
activate_workflow() {
|
||||
local workflow_id="$1"
|
||||
local version_id="$2"
|
||||
|
||||
log "Aktiviere Workflow ${workflow_id}..."
|
||||
|
||||
local response
|
||||
response=$(curl -sS -X POST "${API_URL}/rest/workflows/${workflow_id}/activate" \
|
||||
-H "Content-Type: application/json" \
|
||||
-b "${COOKIE_FILE}" \
|
||||
-d "{\"versionId\":\"${version_id}\"}" 2>&1)
|
||||
|
||||
if echo "$response" | grep -q '"active":true\|"active": true'; then
|
||||
log "Workflow ${workflow_id} erfolgreich aktiviert"
|
||||
return 0
|
||||
else
|
||||
log_error "Workflow-Aktivierung fehlgeschlagen: ${response}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Funktion: Cleanup
|
||||
cleanup() {
|
||||
rm -f "${COOKIE_FILE}" /tmp/workflow_processed.json 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Hauptfunktion
|
||||
main() {
|
||||
log "========================================="
|
||||
log "n8n Workflow Auto-Reload gestartet"
|
||||
log "========================================="
|
||||
|
||||
# Erstelle Log-Verzeichnis falls nicht vorhanden
|
||||
|
||||
# Lade Konfiguration
|
||||
if ! load_env; then
|
||||
log_error "Fehler beim Laden der Konfiguration"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Prüfe ob Workflow-Template existiert
|
||||
if [ ! -f "${WORKFLOW_TEMPLATE}" ]; then
|
||||
log_error "Workflow-Template nicht gefunden: ${WORKFLOW_TEMPLATE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Warte auf n8n
|
||||
if ! wait_for_n8n; then
|
||||
log_error "n8n nicht erreichbar"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Login
|
||||
if ! n8n_login; then
|
||||
log_error "Login fehlgeschlagen"
|
||||
cleanup
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Suche nach bestehendem Workflow
|
||||
local existing_workflow_id
|
||||
existing_workflow_id=$(find_workflow "${WORKFLOW_NAME}" || echo "")
|
||||
|
||||
if [ -n "$existing_workflow_id" ]; then
|
||||
log "Bestehender Workflow gefunden, wird gelöscht..."
|
||||
delete_workflow "$existing_workflow_id"
|
||||
fi
|
||||
|
||||
# Suche nach Credentials
|
||||
log "Suche nach bestehenden Credentials..."
|
||||
local pg_cred_id
|
||||
local ollama_cred_id
|
||||
|
||||
pg_cred_id=$(find_credential "PostgreSQL (local)" "postgres" || echo "")
|
||||
ollama_cred_id=$(find_credential "Ollama (local)" "ollamaApi" || echo "")
|
||||
|
||||
if [ -z "$pg_cred_id" ] || [ -z "$ollama_cred_id" ]; then
|
||||
log_error "Credentials nicht gefunden (PostgreSQL: ${pg_cred_id}, Ollama: ${ollama_cred_id})"
|
||||
cleanup
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verarbeite Workflow-Template
|
||||
local processed_workflow
|
||||
processed_workflow=$(process_workflow_template "$pg_cred_id" "$ollama_cred_id")
|
||||
|
||||
if [ -z "$processed_workflow" ]; then
|
||||
log_error "Fehler beim Verarbeiten des Workflow-Templates"
|
||||
cleanup
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Importiere Workflow
|
||||
local import_result
|
||||
import_result=$(import_workflow "$processed_workflow")
|
||||
|
||||
if [ -z "$import_result" ]; then
|
||||
log_error "Workflow-Import fehlgeschlagen"
|
||||
cleanup
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extrahiere IDs
|
||||
local new_workflow_id
|
||||
local new_version_id
|
||||
new_workflow_id=$(echo "$import_result" | cut -d: -f1)
|
||||
new_version_id=$(echo "$import_result" | cut -d: -f2)
|
||||
|
||||
# Aktiviere Workflow
|
||||
if ! activate_workflow "$new_workflow_id" "$new_version_id"; then
|
||||
log_error "Workflow-Aktivierung fehlgeschlagen"
|
||||
cleanup
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
cleanup
|
||||
|
||||
log "========================================="
|
||||
log "Workflow-Reload erfolgreich abgeschlossen"
|
||||
log "Workflow-ID: ${new_workflow_id}"
|
||||
log "========================================="
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Trap für Cleanup bei Fehler
|
||||
trap cleanup EXIT
|
||||
|
||||
# Hauptfunktion ausführen
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user