Files
swarm-master/docker-compose.yaml
William Valentin aceeb7b542 Initial commit — OpenClaw VM infrastructure
- ansible/: VM provisioning playbooks and roles
  - provision-vm.yml: create KVM VM from Ubuntu cloud image
  - install.yml: install OpenClaw on guest (upstream)
  - customize.yml: swappiness, virtiofs fstab, linger
  - roles/vm/: libvirt domain XML, cloud-init templates
  - inventory.yml + host_vars/zap.yml: zap instance config
- backup-openclaw-vm.sh: daily rsync + MinIO upload
- restore-openclaw-vm.sh: full redeploy from scratch
- README.md: full operational documentation

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-12 12:18:31 -07:00

230 lines
6.6 KiB
YAML

services:
# flynn:
# build: .
# container_name: flynn
# restart: unless-stopped
# ports:
# - "18800:18800"
# volumes:
# # Persistent data (sessions DB, memory store)
# - flynn-data:/data
# # Mount your config file
# - ./config/default.yaml:/config/config.yaml:ro
# environment:
# # Required: at least one model provider API key
# - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
# # Optional: additional provider keys
# - OPENAI_API_KEY=${OPENAI_API_KEY:-}
# - OPENROUTER_API_KEY=${OPENROUTER_API_KEY:-}
# - GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
# # Optional: Telegram integration
# - FLYNN_TELEGRAM_TOKEN=${FLYNN_TELEGRAM_TOKEN:-}
# # Optional: Discord integration
# - DISCORD_BOT_TOKEN=${DISCORD_BOT_TOKEN:-}
# # Optional: Gateway auth token
# - FLYNN_SERVER_TOKEN=${FLYNN_SERVER_TOKEN:-}
# healthcheck:
# test: ["CMD", "wget", "-qO-", "http://localhost:18800/"]
# interval: 30s
# timeout: 5s
# start_period: 15s
# retries: 3
# Optional local dependency: whisper.cpp server for audio transcription.
# Start with: docker compose --profile voice up -d whisper-server
whisper-server:
image: ghcr.io/ggml-org/whisper.cpp:main
container_name: whisper-server
restart: unless-stopped
profiles: ["voice"]
ports:
- "18801:8080"
volumes:
- whisper-models:/app/models
# Override image entrypoint so args are passed directly to whisper-server.
entrypoint: ["whisper-server"]
command:
- --model
- /app/models/ggml-base.en.bin
- --host
- 0.0.0.0
- --port
- "8080"
- --convert
- --language
- en
- --inference-path
- /v1/audio/transcriptions
healthcheck:
test:
[
"CMD-SHELL",
"curl -f http://localhost:8080/ >/dev/null 2>&1 || exit 1",
]
interval: 30s
timeout: 5s
start_period: 15s
retries: 3
# kokoro TTS
kokoro-tts:
image: ghcr.io/remsky/kokoro-fastapi-cpu:latest
container_name: kokoro-tts
profiles: ["voice"]
ports:
- "18805:8880"
environment:
- USE_GPU=false
# - PYTHONUNBUFFERED=1
#deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
restart: unless-stopped
# Optional local dependency: Brave Search MCP server (HTTP mode).
# Start with: docker compose --profile search up -d brave-search
brave-search:
image: mcp/brave-search:latest
container_name: brave-search
restart: unless-stopped
profiles: ["search"]
ports:
- "18802:8000"
environment:
- BRAVE_API_KEY=${BRAVE_API_KEY:?BRAVE_API_KEY is required}
- BRAVE_MCP_TRANSPORT=http
- BRAVE_MCP_HOST=0.0.0.0
- BRAVE_MCP_PORT=8000
# Optional local dependency: SearXNG metasearch instance.
# Start with: docker compose --profile search up -d searxng
searxng:
image: searxng/searxng:latest
container_name: searxng
restart: unless-stopped
profiles: ["search"]
ports:
- "18803:8080"
environment:
- BASE_URL=http://localhost:18803/
- INSTANCE_NAME=Flynn Local SearXNG
volumes:
- ./searxng/settings.yml:/etc/searxng/settings.yml:ro
# Optional local dependency: liteLLM proxy for unified LLM API.
# Start with: docker compose --profile api up -d litellm
litellm:
image: litellm/litellm:latest
container_name: litellm
restart: unless-stopped
profiles: ["api"]
ports:
- "18804:4000"
volumes:
- ./litellm-config.yaml:/app/config.yaml:ro
- ./litellm-copilot-tokens:/root/.config/litellm/github_copilot
environment:
- LITELLM_PORT=4000
- LITELLM_DROP_PARAMS=true
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- OPENROUTER_API_KEY=${OPENROUTER_API_KEY:-}
- GEMINI_API_KEY=${GEMINI_API_KEY:-}
- ZAI_API_KEY=${ZAI_API_KEY:-}
- GITHUB_COPILOT_TOKEN_DIR=/root/.config/litellm/github_copilot
- DATABASE_URL=postgresql://litellm:litellm_password@litellm-db:5432/litellm
- LITELLM_MASTER_KEY=${LITELLM_MASTER_KEY:-sk-1234}
- LITELLM_SALT_KEY=${LITELLM_SALT_KEY:-}
- STORE_MODEL_IN_DB=True
command:
[
"--config",
"/app/config.yaml",
"--port",
"4000",
]
depends_on:
litellm-db:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "python3 -c \"import urllib.request; urllib.request.urlopen('http://localhost:4000/health/liveliness')\""]
interval: 30s
timeout: 5s
start_period: 15s
retries: 3
litellm-init:
image: curlimages/curl:latest
container_name: litellm-init
profiles: ["api"]
restart: "no"
volumes:
- ./litellm-init-credentials.sh:/init.sh:ro
- ./litellm-init-models.sh:/litellm-init-models.sh:ro
environment:
- LITELLM_URL=http://litellm:4000
- LITELLM_MASTER_KEY=${LITELLM_MASTER_KEY:-sk-1234}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- GEMINI_API_KEY=${GEMINI_API_KEY:-}
- ZAI_API_KEY=${ZAI_API_KEY:-}
entrypoint: ["sh", "/init.sh"]
depends_on:
litellm:
condition: service_healthy
litellm-db:
image: postgres:15-alpine
container_name: litellm-db
restart: unless-stopped
profiles: ["api"]
volumes:
- litellm-db-data:/var/lib/postgresql/data
environment:
- POSTGRES_USER=litellm
- POSTGRES_PASSWORD=litellm_password
- POSTGRES_DB=litellm
healthcheck:
test: ["CMD-SHELL", "pg_isready -U litellm"]
interval: 10s
timeout: 5s
start_period: 5s
retries: 5
# Dedicated local n8n instance for agent-oriented workflows.
# Start with: docker compose --profile automation up -d n8n-agent
n8n-agent:
image: docker.n8n.io/n8nio/n8n:latest
container_name: n8n-agent
restart: unless-stopped
profiles: ["automation"]
ports:
- "18808:5678"
environment:
- N8N_HOST=0.0.0.0
- N8N_PORT=5678
- N8N_PROTOCOL=http
- N8N_EDITOR_BASE_URL=http://localhost:18808
- WEBHOOK_URL=http://localhost:18808/
- TZ=UTC
- GENERIC_TIMEZONE=UTC
- N8N_SECURE_COOKIE=false
volumes:
- n8n-agent-data:/home/node/.n8n
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:5678/healthz >/dev/null 2>&1 || exit 1"]
interval: 30s
timeout: 5s
start_period: 30s
retries: 5
volumes:
# flynn-data:
whisper-models:
litellm-db-data:
n8n-agent-data: