257 lines
7.6 KiB
YAML
257 lines
7.6 KiB
YAML
services:
|
|
# flynn:
|
|
# build: .
|
|
# container_name: flynn
|
|
# restart: unless-stopped
|
|
# ports:
|
|
# - "18800:18800"
|
|
# volumes:
|
|
# # Persistent data (sessions DB, memory store)
|
|
# - flynn-data:/data
|
|
# # Mount your config file
|
|
# - ./config/default.yaml:/config/config.yaml:ro
|
|
# environment:
|
|
# # Required: at least one model provider API key
|
|
# - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
|
# # Optional: additional provider keys
|
|
# - OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
|
# - OPENROUTER_API_KEY=${OPENROUTER_API_KEY:-}
|
|
# - GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
|
|
# # Optional: Telegram integration
|
|
# - FLYNN_TELEGRAM_TOKEN=${FLYNN_TELEGRAM_TOKEN:-}
|
|
# # Optional: Discord integration
|
|
# - DISCORD_BOT_TOKEN=${DISCORD_BOT_TOKEN:-}
|
|
# # Optional: Gateway auth token
|
|
# - FLYNN_SERVER_TOKEN=${FLYNN_SERVER_TOKEN:-}
|
|
# healthcheck:
|
|
# test: ["CMD", "wget", "-qO-", "http://localhost:18800/"]
|
|
# interval: 30s
|
|
# timeout: 5s
|
|
# start_period: 15s
|
|
# retries: 3
|
|
|
|
# Optional local dependency: whisper.cpp server for audio transcription.
|
|
# Start with: docker compose --profile voice up -d whisper-server
|
|
whisper-server:
|
|
image: ghcr.io/ggml-org/whisper.cpp@sha256:3a39e86d5a0e911086b5cbebc9029cac71b02fbd08e217b775857de1490f55bf
|
|
container_name: whisper-server
|
|
restart: unless-stopped
|
|
profiles: ["voice"]
|
|
ports:
|
|
- "18801:8080"
|
|
volumes:
|
|
- whisper-models:/app/models
|
|
# Override image entrypoint so args are passed directly to whisper-server.
|
|
entrypoint: ["whisper-server"]
|
|
command:
|
|
- --model
|
|
- /app/models/ggml-base.en.bin
|
|
- --host
|
|
- 0.0.0.0
|
|
- --port
|
|
- "8080"
|
|
- --convert
|
|
- --language
|
|
- en
|
|
- --inference-path
|
|
- /v1/audio/transcriptions
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD-SHELL",
|
|
"curl -f http://localhost:8080/ >/dev/null 2>&1 || exit 1",
|
|
]
|
|
interval: 30s
|
|
timeout: 5s
|
|
start_period: 15s
|
|
retries: 3
|
|
labels:
|
|
agentmon.monitor: "true"
|
|
agentmon.role: "voice"
|
|
agentmon.port: "18801"
|
|
|
|
# kokoro TTS
|
|
kokoro-tts:
|
|
image: ghcr.io/remsky/kokoro-fastapi-cpu@sha256:c8812546d358cbfd6a5c4087a28795b2b001d8e32d7a322eedd246e6bc13cb55
|
|
container_name: kokoro-tts
|
|
profiles: ["voice"]
|
|
ports:
|
|
- "18805:8880"
|
|
environment:
|
|
- USE_GPU=false
|
|
# - PYTHONUNBUFFERED=1
|
|
#deploy:
|
|
# resources:
|
|
# reservations:
|
|
# devices:
|
|
# - driver: nvidia
|
|
# count: all
|
|
# capabilities: [gpu]
|
|
restart: unless-stopped
|
|
labels:
|
|
agentmon.monitor: "true"
|
|
agentmon.role: "voice"
|
|
agentmon.port: "18805"
|
|
|
|
# Optional local dependency: Brave Search MCP server (HTTP mode).
|
|
# Start with: docker compose --profile search up -d brave-search
|
|
brave-search:
|
|
image: mcp/brave-search@sha256:aea0b091da7029c35f0ed0964367328af0f2824477d79d2e85e43e7cd5a757d4
|
|
container_name: brave-search
|
|
restart: unless-stopped
|
|
profiles: ["search"]
|
|
ports:
|
|
- "18802:8000"
|
|
environment:
|
|
- BRAVE_API_KEY=${BRAVE_API_KEY:?BRAVE_API_KEY is required}
|
|
- BRAVE_MCP_TRANSPORT=http
|
|
- BRAVE_MCP_HOST=0.0.0.0
|
|
- BRAVE_MCP_PORT=8000
|
|
labels:
|
|
agentmon.monitor: "true"
|
|
agentmon.role: "mcp"
|
|
agentmon.port: "18802"
|
|
|
|
# Optional local dependency: SearXNG metasearch instance.
|
|
# Start with: docker compose --profile search up -d searxng
|
|
searxng:
|
|
image: searxng/searxng@sha256:c5e5aedb70ec4b7ff5b1e8f8ea69253dceb85a98b8757360c1b65fe8652d6066
|
|
container_name: searxng
|
|
restart: unless-stopped
|
|
profiles: ["search"]
|
|
ports:
|
|
- "18803:8080"
|
|
environment:
|
|
- BASE_URL=http://localhost:18803/
|
|
- INSTANCE_NAME=Flynn Local SearXNG
|
|
volumes:
|
|
- ./searxng/settings.yml:/etc/searxng/settings.yml:ro
|
|
labels:
|
|
agentmon.monitor: "true"
|
|
agentmon.role: "search"
|
|
agentmon.port: "18803"
|
|
|
|
# Optional local dependency: liteLLM proxy for unified LLM API.
|
|
# Start with: docker compose --profile api up -d litellm
|
|
litellm:
|
|
image: litellm/litellm:v1.77.7
|
|
container_name: litellm
|
|
restart: unless-stopped
|
|
profiles: ["api"]
|
|
ports:
|
|
- "18804:4000"
|
|
volumes:
|
|
- ./litellm-config.yaml:/app/config.yaml:ro
|
|
- ./litellm-copilot-tokens:/root/.config/litellm/github_copilot
|
|
environment:
|
|
- LITELLM_PORT=4000
|
|
- LITELLM_DROP_PARAMS=true
|
|
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
|
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
|
- OPENROUTER_API_KEY=${OPENROUTER_API_KEY:-}
|
|
- GEMINI_API_KEY=${GEMINI_API_KEY:-}
|
|
- ZAI_API_KEY=${ZAI_API_KEY:-}
|
|
- GITHUB_COPILOT_TOKEN_DIR=/root/.config/litellm/github_copilot
|
|
- DATABASE_URL=postgresql://litellm:litellm_password@litellm-db:5432/litellm
|
|
- LITELLM_MASTER_KEY=${LITELLM_MASTER_KEY:-sk-1234}
|
|
- LITELLM_SALT_KEY=${LITELLM_SALT_KEY:-}
|
|
- STORE_MODEL_IN_DB=True
|
|
command:
|
|
[
|
|
"--config",
|
|
"/app/config.yaml",
|
|
"--port",
|
|
"4000",
|
|
]
|
|
depends_on:
|
|
litellm-db:
|
|
condition: service_healthy
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "python3 -c \"import urllib.request; urllib.request.urlopen('http://localhost:4000/health/liveliness')\""]
|
|
interval: 30s
|
|
timeout: 5s
|
|
start_period: 15s
|
|
retries: 3
|
|
labels:
|
|
agentmon.monitor: "true"
|
|
agentmon.role: "llm-proxy"
|
|
agentmon.port: "18804"
|
|
|
|
litellm-init:
|
|
image: curlimages/curl@sha256:d94d07ba9e7d6de898b6d96c1a072f6f8266c687af78a74f380087a0addf5d17
|
|
container_name: litellm-init
|
|
profiles: ["api"]
|
|
restart: "no"
|
|
volumes:
|
|
- ./litellm-init-credentials.sh:/init.sh:ro
|
|
- ./litellm-init-models.sh:/litellm-init-models.sh:ro
|
|
environment:
|
|
- LITELLM_URL=http://litellm:4000
|
|
- LITELLM_MASTER_KEY=${LITELLM_MASTER_KEY:-sk-1234}
|
|
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
|
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
|
- GEMINI_API_KEY=${GEMINI_API_KEY:-}
|
|
- ZAI_API_KEY=${ZAI_API_KEY:-}
|
|
entrypoint: ["sh", "/init.sh"]
|
|
depends_on:
|
|
litellm:
|
|
condition: service_healthy
|
|
|
|
litellm-db:
|
|
image: postgres:15-alpine
|
|
container_name: litellm-db
|
|
restart: unless-stopped
|
|
profiles: ["api"]
|
|
volumes:
|
|
- litellm-db-data:/var/lib/postgresql/data
|
|
environment:
|
|
- POSTGRES_USER=litellm
|
|
- POSTGRES_PASSWORD=litellm_password
|
|
- POSTGRES_DB=litellm
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "pg_isready -U litellm"]
|
|
interval: 10s
|
|
timeout: 5s
|
|
start_period: 5s
|
|
retries: 5
|
|
labels:
|
|
agentmon.monitor: "true"
|
|
agentmon.role: "db"
|
|
|
|
# Dedicated local n8n instance for agent-oriented workflows.
|
|
# Start with: docker compose --profile automation up -d n8n-agent
|
|
n8n-agent:
|
|
image: docker.n8n.io/n8nio/n8n:2.11.3
|
|
container_name: n8n-agent
|
|
restart: unless-stopped
|
|
profiles: ["automation"]
|
|
ports:
|
|
- "18808:5678"
|
|
environment:
|
|
- N8N_HOST=0.0.0.0
|
|
- N8N_PORT=5678
|
|
- N8N_PROTOCOL=http
|
|
- N8N_EDITOR_BASE_URL=http://localhost:18808
|
|
- WEBHOOK_URL=http://localhost:18808/
|
|
- TZ=UTC
|
|
- GENERIC_TIMEZONE=UTC
|
|
- N8N_SECURE_COOKIE=false
|
|
volumes:
|
|
- n8n-agent-data:/home/node/.n8n
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "wget -qO- http://localhost:5678/healthz >/dev/null 2>&1 || exit 1"]
|
|
interval: 30s
|
|
timeout: 5s
|
|
start_period: 30s
|
|
retries: 5
|
|
labels:
|
|
agentmon.monitor: "true"
|
|
agentmon.role: "automation"
|
|
agentmon.port: "18808"
|
|
|
|
volumes:
|
|
# flynn-data:
|
|
whisper-models:
|
|
litellm-db-data:
|
|
n8n-agent-data:
|