Fix: add init container functionality

This commit is contained in:
Harshith Mullapudi 2025-08-01 10:32:37 +05:30
parent 94ab4b9f4f
commit 2a9aee29b4
8 changed files with 27 additions and 711 deletions

View File

@ -55,6 +55,3 @@ MODEL=gpt-4.1-2025-04-14
TRIGGER_PROJECT_ID=proj_core
TRIGGER_SECRET_KEY=tr_prod_1yvnRh3pA1M2E67GBY7m
TRIGGER_API_URL=http://host.docker.internal:8030
TRIGGER_IMAGE=redplanethq/proj_core:latest
# this should be same in .env in trigger folder
TRIGGER_DB=trigger

View File

@ -1,21 +1,6 @@
version: "3.8"
services:
init:
container_name: core-init
image: redplanethq/init:${VERSION}
environment:
- VERSION=${VERSION}
- DB_HOST=${DB_HOST}
- DB_PORT=${DB_PORT}
- TRIGGER_DB=${TRIGGER_DB}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- TRIGGER_TASKS_IMAGE=${TRIGGER_TASKS_IMAGE}
- NODE_ENV=${NODE_ENV}
networks:
- core
core:
container_name: core-app
image: redplanethq/core:${VERSION}
@ -91,6 +76,8 @@ services:
image: neo4j:5
environment:
- NEO4J_AUTH=${NEO4J_AUTH}
- NEO4J_dbms_security_procedures_unrestricted=gds.*
- NEO4J_dbms_security_procedures_allowlist=gds.*
ports:
- "7474:7474"
- "7687:7687"

View File

@ -17,6 +17,7 @@ MANAGED_WORKER_SECRET=447c29678f9eaf289e9c4b70d3dd8a7f
# - For the split setup, you will have to set this manually. The token is available in the webapp logs but will only be shown once.
# - See the docs for more information: https://trigger.dev/docs/self-hosting/docker
TRIGGER_WORKER_TOKEN=tr_wgt_MwNm1OkMP7nZs5EaknV4LxayPfUKAieQrwh7k5Ln
TRIGGER_TASKS_IMAGE=redplanethq/proj_core:latest
# Worker URLs
# - In split setups, uncomment and set to the public URL of your webapp
@ -133,4 +134,6 @@ OBJECT_STORE_SECRET_ACCESS_KEY=very-safe-password
# TRAEFIK_ENTRYPOINT=websecure
# TRAEFIK_HTTP_PUBLISH_IP=0.0.0.0
# TRAEFIK_HTTPS_PUBLISH_IP=0.0.0.0
# TRAEFIK_DASHBOARD_PUBLISH_IP=127.0.0.1
# TRAEFIK_DASHBOARD_PUBLISH_IP=127.0.0.1
CORE_VERSION=0.1.13

View File

@ -6,6 +6,22 @@ x-logging: &logging-config
compress: ${LOGGING_COMPRESS:-true}
services:
init:
container_name: trigger-init
image: redplanethq/init:${CORE_VERSION}
restart: "no" # prevent retries
environment:
- VERSION=${CORE_VERSION}
- DB_HOST=${DB_HOST}
- DB_PORT=${DB_PORT}
- TRIGGER_DB=${TRIGGER_DB}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- TRIGGER_TASKS_IMAGE=${TRIGGER_TASKS_IMAGE}
- NODE_ENV=production
networks:
- webapp
webapp:
container_name: trigger-webapp
image: ghcr.io/triggerdotdev/trigger.dev:${TRIGGER_IMAGE_TAG:-v4-beta}
@ -14,7 +30,11 @@ services:
ports:
- ${WEBAPP_PUBLISH_IP:-0.0.0.0}:8030:3000
depends_on:
- clickhouse
clickhouse:
condition: service_started
init:
condition: service_started
networks:
- webapp
- supervisor

View File

@ -1,139 +0,0 @@
# Trigger.dev self-hosting environment variables
# - These are the default values for the self-hosting stack
# - You should change them to suit your needs, especially the secrets
# - See the docs for more information: https://trigger.dev/docs/self-hosting/overview
# Secrets
# - Do NOT use these defaults in production
# - Generate your own by running `openssl rand -hex 16` for each secret
SESSION_SECRET=2818143646516f6fffd707b36f334bbb
MAGIC_LINK_SECRET=44da78b7bbb0dfe709cf38931d25dcdd
ENCRYPTION_KEY=f686147ab967943ebbe9ed3b496e465a
MANAGED_WORKER_SECRET=447c29678f9eaf289e9c4b70d3dd8a7f
# Worker token
# - This is the token for the worker to connect to the webapp
# - When running the combined stack, this is set automatically during bootstrap
# - For the split setup, you will have to set this manually. The token is available in the webapp logs but will only be shown once.
# - See the docs for more information: https://trigger.dev/docs/self-hosting/docker
# TRIGGER_WORKER_TOKEN=
# Worker URLs
# - In split setups, uncomment and set to the public URL of your webapp
# TRIGGER_API_URL=https://trigger.example.com
# OTEL_EXPORTER_OTLP_ENDPOINT=https://trigger.example.com/otel
# Postgres
# - Do NOT use these defaults in production
# - Especially if you decide to expose the database to the internet
# POSTGRES_USER=postgres
POSTGRES_USER=docker
POSTGRES_PASSWORD=docker
TRIGGER_DB=trigger
DB_HOST=host.docker.internal
DB_PORT=5432
# POSTGRES_DB=postgres
DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:${DB_PORT}/${TRIGGER_DB}?schema=public&sslmode=disable
DIRECT_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:${DB_PORT}/${TRIGGER_DB}?schema=public&sslmode=disable
ELECTRIC_DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}/${TRIGGER_DB}
# Trigger image tag
# - This is the version of the webapp and worker images to use, they should be locked to a specific version in production
# - For example: TRIGGER_IMAGE_TAG=v4.0.0-v4-beta.21
TRIGGER_IMAGE_TAG=v4-beta
# Webapp
# - These should generally be set to the same value
# - In production, these should be set to the public URL of your webapp, e.g. https://trigger.example.com
APP_ORIGIN=http://localhost:8030
LOGIN_ORIGIN=http://localhost:8030
API_ORIGIN=http://localhost:8030
DEV_OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:8030/otel
# You may need to set this when testing locally or when using the combined setup
# API_ORIGIN=http://webapp:3000
# Webapp - memory management
# - This sets the maximum memory allocation for Node.js heap in MiB (e.g. "4096" for 4GB)
# - It should be set according to your total webapp machine's memory or any container limits you have set
# - Setting this too high or low WILL cause crashes, inefficient memory utilization and high CPU usage
# - You should allow for some memory overhead, we suggest at least 20%, for example:
# - 2GB machine: NODE_MAX_OLD_SPACE_SIZE=1600
# - 4GB machine: NODE_MAX_OLD_SPACE_SIZE=3200
# - 6GB machine: NODE_MAX_OLD_SPACE_SIZE=4800
# - 8GB machine: NODE_MAX_OLD_SPACE_SIZE=6400
# NODE_MAX_OLD_SPACE_SIZE=8192
# ClickHouse
# - Do NOT use these defaults in production
CLICKHOUSE_USER=default
CLICKHOUSE_PASSWORD=password
CLICKHOUSE_URL=http://default:password@clickhouse:8123?secure=false
RUN_REPLICATION_CLICKHOUSE_URL=http://default:password@clickhouse:8123
# Docker Registry
# - When testing locally, the default values should be fine
# - When deploying to production, you will have to change these, especially the password and URL
# - See the docs for more information: https://trigger.dev/docs/self-hosting/docker#registry-setup
DOCKER_REGISTRY_URL=localhost:5000
DOCKER_REGISTRY_NAMESPACE=
DOCKER_REGISTRY_USERNAME=registry-user
DOCKER_REGISTRY_PASSWORD=very-secure-indeed
# Object store
# - You need to log into the Minio dashboard and create a bucket called "packets"
# - See the docs for more information: https://trigger.dev/docs/self-hosting/docker#object-storage
OBJECT_STORE_ACCESS_KEY_ID=admin
OBJECT_STORE_SECRET_ACCESS_KEY=very-safe-password
# You will have to uncomment and configure this for production
# OBJECT_STORE_BASE_URL=http://localhost:9000
# Credentials to access the Minio dashboard at http://localhost:9001
# - You should change these credentials and not use them for the `OBJECT_STORE_` env vars above
# - Instead, setup a non-root user with access the "packets" bucket
# MINIO_ROOT_USER=admin
# MINIO_ROOT_PASSWORD=very-safe-password
# Other image tags
# - These are the versions of the other images to use
# - You should lock these to a specific version in production
# POSTGRES_IMAGE_TAG=14
# REDIS_IMAGE_TAG=7
# ELECTRIC_IMAGE_TAG=1.0.13
# CLICKHOUSE_IMAGE_TAG=latest
# REGISTRY_IMAGE_TAG=2
# MINIO_IMAGE_TAG=latest
# DOCKER_PROXY_IMAGE_TAG=latest
# TRAEFIK_IMAGE_TAG=v3.4
# Publish IPs
# - These are the IPs to publish the services to
# - Setting to 127.0.0.1 makes the service only accessible locally
# - When deploying to production, you will have to change these, depending on your setup
# WEBAPP_PUBLISH_IP=0.0.0.0
# POSTGRES_PUBLISH_IP=127.0.0.1
# REDIS_PUBLISH_IP=127.0.0.1
# ELECTRIC_PUBLISH_IP=127.0.0.1
# CLICKHOUSE_PUBLISH_IP=127.0.0.1
# REGISTRY_PUBLISH_IP=127.0.0.1
# MINIO_PUBLISH_IP=127.0.0.1
# Restart policy
# - Applies to all services, adjust as needed
# RESTART_POLICY=unless-stopped
# Docker logging
# - See the official docs: https://docs.docker.com/engine/logging/configure/
# LOGGING_DRIVER=local
# LOGGING_MAX_SIZE=20m
# LOGGING_MAX_FILES=5
# LOGGING_COMPRESS=true
# Traefik
# - Reverse proxy settings only serve as an example and require further configuration
# - See the partial overrides in docker-compose.traefik.yml for more details
# TRAEFIK_ENTRYPOINT=websecure
# TRAEFIK_HTTP_PUBLISH_IP=0.0.0.0
# TRAEFIK_HTTPS_PUBLISH_IP=0.0.0.0
# TRAEFIK_DASHBOARD_PUBLISH_IP=127.0.0.1

View File

@ -1 +0,0 @@
registry-user:$2y$05$6ingYqw0.3j13dxHY4w3neMSvKhF3pvRmc0AFifScWsVA9JpuLwNK

View File

@ -1,281 +0,0 @@
x-logging: &logging-config
driver: ${LOGGING_DRIVER:-local}
options:
max-size: ${LOGGING_MAX_SIZE:-20m}
max-file: ${LOGGING_MAX_FILES:-5}
compress: ${LOGGING_COMPRESS:-true}
services:
webapp:
container_name: trigger-webapp
image: ghcr.io/triggerdotdev/trigger.dev:${TRIGGER_IMAGE_TAG:-v4-beta}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
ports:
- ${WEBAPP_PUBLISH_IP:-0.0.0.0}:8030:3000
depends_on:
- clickhouse
networks:
- webapp
- supervisor
volumes:
- type: bind
source: /efs/trigger/shared
target: /home/node/shared
# Only needed for bootstrap
user: root
# Only needed for bootstrap
command: sh -c "chown -R node:node /home/node/shared && exec ./scripts/entrypoint.sh"
healthcheck:
test:
[
"CMD",
"node",
"-e",
"http.get('http://localhost:3000/healthcheck', res => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))",
]
interval: 30s
timeout: 10s
retries: 5
start_period: 10s
environment:
APP_ORIGIN: ${APP_ORIGIN:-http://localhost:8030}
LOGIN_ORIGIN: ${LOGIN_ORIGIN:-http://localhost:8030}
API_ORIGIN: ${API_ORIGIN:-http://localhost:8030}
ELECTRIC_ORIGIN: http://electric:3000
DATABASE_URL: ${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/main?schema=public&sslmode=disable}
DIRECT_URL: ${DIRECT_URL:-postgresql://postgres:postgres@postgres:5432/main?schema=public&sslmode=disable}
SESSION_SECRET: ${SESSION_SECRET}
MAGIC_LINK_SECRET: ${MAGIC_LINK_SECRET}
ENCRYPTION_KEY: ${ENCRYPTION_KEY}
MANAGED_WORKER_SECRET: ${MANAGED_WORKER_SECRET}
REDIS_HOST: trigger-redis
REDIS_PORT: 6379
REDIS_TLS_DISABLED: true
APP_LOG_LEVEL: info
DEV_OTEL_EXPORTER_OTLP_ENDPOINT: ${DEV_OTEL_EXPORTER_OTLP_ENDPOINT:-http://localhost:8030/otel}
DEPLOY_REGISTRY_HOST: ${DOCKER_REGISTRY_URL:-localhost:5000}
OBJECT_STORE_BASE_URL: ${OBJECT_STORE_BASE_URL:-http://minio:9000}
OBJECT_STORE_ACCESS_KEY_ID: ${OBJECT_STORE_ACCESS_KEY_ID}
OBJECT_STORE_SECRET_ACCESS_KEY: ${OBJECT_STORE_SECRET_ACCESS_KEY}
GRACEFUL_SHUTDOWN_TIMEOUT: 1000
# Bootstrap - this will automatically set up a worker group for you
# This will NOT work for split deployments
TRIGGER_BOOTSTRAP_ENABLED: 1
TRIGGER_BOOTSTRAP_WORKER_GROUP_NAME: bootstrap
TRIGGER_BOOTSTRAP_WORKER_TOKEN_PATH: /home/node/shared/worker_token
# ClickHouse configuration
CLICKHOUSE_URL: ${CLICKHOUSE_URL:-http://default:password@clickhouse:8123?secure=false}
CLICKHOUSE_LOG_LEVEL: ${CLICKHOUSE_LOG_LEVEL:-info}
# Run replication
RUN_REPLICATION_ENABLED: ${RUN_REPLICATION_ENABLED:-1}
RUN_REPLICATION_CLICKHOUSE_URL: ${RUN_REPLICATION_CLICKHOUSE_URL:-http://default:password@clickhouse:8123}
RUN_REPLICATION_LOG_LEVEL: ${RUN_REPLICATION_LOG_LEVEL:-info}
# Limits
# TASK_PAYLOAD_OFFLOAD_THRESHOLD: 524288 # 512KB
# TASK_PAYLOAD_MAXIMUM_SIZE: 3145728 # 3MB
# BATCH_TASK_PAYLOAD_MAXIMUM_SIZE: 1000000 # 1MB
# TASK_RUN_METADATA_MAXIMUM_SIZE: 262144 # 256KB
# DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT: 100
# DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT: 100
# Internal OTEL configuration
INTERNAL_OTEL_TRACE_LOGGING_ENABLED: ${INTERNAL_OTEL_TRACE_LOGGING_ENABLED:-0}
electric:
container_name: trigger-electric
image: electricsql/electric:${ELECTRIC_IMAGE_TAG:-1.0.10}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
networks:
- webapp
environment:
DATABASE_URL: ${ELECTRIC_DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/main?schema=public&sslmode=disable}
ELECTRIC_INSECURE: true
ELECTRIC_USAGE_REPORTING: false
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
clickhouse:
container_name: trigger-clickhouse
image: bitnami/clickhouse:${CLICKHOUSE_IMAGE_TAG:-latest}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
ports:
- ${CLICKHOUSE_PUBLISH_IP:-127.0.0.1}:9123:8123
- ${CLICKHOUSE_PUBLISH_IP:-127.0.0.1}:9090:9000
environment:
CLICKHOUSE_ADMIN_USER: ${CLICKHOUSE_USER:-default}
CLICKHOUSE_ADMIN_PASSWORD: ${CLICKHOUSE_PASSWORD:-password}
volumes:
- type: bind
source: /efs/trigger/clickhouse
target: /bitnami/clickhouse
- ../clickhouse/override.xml:/bitnami/clickhouse/etc/config.d/override.xml:ro
networks:
- webapp
healthcheck:
test:
[
"CMD",
"clickhouse-client",
"--host",
"localhost",
"--port",
"9000",
"--user",
"default",
"--password",
"password",
"--query",
"SELECT 1",
]
interval: 5s
timeout: 5s
retries: 5
start_period: 10s
registry:
container_name: trigger-registry
image: registry:${REGISTRY_IMAGE_TAG:-2}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
ports:
- ${REGISTRY_PUBLISH_IP:-127.0.0.1}:5000:5000
networks:
- webapp
volumes:
# registry-user:very-secure-indeed
- ../registry/auth.htpasswd:/auth/htpasswd:ro
environment:
REGISTRY_AUTH: htpasswd
REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm
REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:5000/"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
minio:
container_name: trigger-minio
image: bitnami/minio:${MINIO_IMAGE_TAG:-latest}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
ports:
- ${MINIO_PUBLISH_IP:-127.0.0.1}:9000:9000
- ${MINIO_PUBLISH_IP:-127.0.0.1}:9001:9001
networks:
- webapp
volumes:
- type: bind
source: /efs/trigger/minio
target: /bitnami/minio/data
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-admin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-very-safe-password}
MINIO_DEFAULT_BUCKETS: packets
MINIO_BROWSER: "on"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 5s
timeout: 10s
retries: 5
start_period: 10s
# Worker related
supervisor:
container_name: trigger-supervisor
image: ghcr.io/triggerdotdev/supervisor:${TRIGGER_IMAGE_TAG:-v4-beta}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
depends_on:
- docker-proxy
networks:
- supervisor
- docker-proxy
- webapp
volumes:
- type: bind
source: /efs/trigger/shared
target: /home/node/shared
# Only needed for bootstrap
user: root
# Only needed for bootstrap
command: sh -c "chown -R node:node /home/node/shared && exec /usr/bin/dumb-init -- pnpm run --filter supervisor start"
environment:
# This needs to match the token of the worker group you want to connect to
# TRIGGER_WORKER_TOKEN: ${TRIGGER_WORKER_TOKEN}
# Use the bootstrap token created by the webapp
TRIGGER_WORKER_TOKEN: file:///home/node/shared/worker_token
MANAGED_WORKER_SECRET: ${MANAGED_WORKER_SECRET}
TRIGGER_API_URL: ${TRIGGER_API_URL:-http://webapp:3000}
OTEL_EXPORTER_OTLP_ENDPOINT: ${OTEL_EXPORTER_OTLP_ENDPOINT:-http://webapp:3000/otel}
TRIGGER_WORKLOAD_API_DOMAIN: supervisor
TRIGGER_WORKLOAD_API_PORT_EXTERNAL: 8020
# Optional settings
DEBUG: 1
ENFORCE_MACHINE_PRESETS: 1
TRIGGER_DEQUEUE_INTERVAL_MS: 1000
DOCKER_HOST: tcp://docker-proxy:2375
DOCKER_RUNNER_NETWORKS: webapp,supervisor
DOCKER_REGISTRY_URL: ${DOCKER_REGISTRY_URL:-localhost:5000}
DEPLOY_REGISTRY_NAMESPACE: ${DOCKER_REGISTRY_NAMESPACE:-redplanethq}
DOCKER_REGISTRY_USERNAME: ${DOCKER_REGISTRY_USERNAME:-}
DOCKER_REGISTRY_PASSWORD: ${DOCKER_REGISTRY_PASSWORD:-}
DOCKER_AUTOREMOVE_EXITED_CONTAINERS: 0
healthcheck:
test:
[
"CMD",
"node",
"-e",
"http.get('http://localhost:8020/health', res => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))",
]
interval: 30s
timeout: 10s
retries: 5
start_period: 10s
docker-proxy:
container_name: trigger-docker-proxy
image: tecnativa/docker-socket-proxy:${DOCKER_PROXY_IMAGE_TAG:-latest}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
networks:
- docker-proxy
environment:
- LOG_LEVEL=info
- POST=1
- CONTAINERS=1
- IMAGES=1
- INFO=1
- NETWORKS=1
healthcheck:
test: ["CMD", "nc", "-z", "127.0.0.1", "2375"]
interval: 30s
timeout: 5s
retries: 5
start_period: 5s
redis:
container_name: trigger-redis
image: redis:7
ports:
- "6379:6379"
networks:
- webapp
networks:
docker-proxy:
name: docker-proxy
supervisor:
name: supervisor
webapp:
name: webapp

View File

@ -1,270 +0,0 @@
x-logging: &logging-config
driver: ${LOGGING_DRIVER:-local}
options:
max-size: ${LOGGING_MAX_SIZE:-20m}
max-file: ${LOGGING_MAX_FILES:-5}
compress: ${LOGGING_COMPRESS:-true}
services:
webapp:
container_name: trigger-webapp
image: ghcr.io/triggerdotdev/trigger.dev:${TRIGGER_IMAGE_TAG:-v4-beta}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
ports:
- ${WEBAPP_PUBLISH_IP:-0.0.0.0}:8030:3000
depends_on:
- clickhouse
networks:
- webapp
- supervisor
volumes:
- shared:/home/node/shared
# Only needed for bootstrap
user: root
# Only needed for bootstrap
command: sh -c "chown -R node:node /home/node/shared && exec ./scripts/entrypoint.sh"
healthcheck:
test:
[
"CMD",
"node",
"-e",
"http.get('http://localhost:3000/healthcheck', res => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))",
]
interval: 30s
timeout: 10s
retries: 5
start_period: 10s
environment:
APP_ORIGIN: ${APP_ORIGIN:-http://localhost:8030}
LOGIN_ORIGIN: ${LOGIN_ORIGIN:-http://localhost:8030}
API_ORIGIN: ${API_ORIGIN:-http://localhost:8030}
ELECTRIC_ORIGIN: http://electric:3000
DATABASE_URL: ${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/main?schema=public&sslmode=disable}
DIRECT_URL: ${DIRECT_URL:-postgresql://postgres:postgres@postgres:5432/main?schema=public&sslmode=disable}
SESSION_SECRET: ${SESSION_SECRET}
MAGIC_LINK_SECRET: ${MAGIC_LINK_SECRET}
ENCRYPTION_KEY: ${ENCRYPTION_KEY}
MANAGED_WORKER_SECRET: ${MANAGED_WORKER_SECRET}
REDIS_HOST: host.docker.internal
REDIS_PORT: 6379
REDIS_TLS_DISABLED: true
APP_LOG_LEVEL: info
DEV_OTEL_EXPORTER_OTLP_ENDPOINT: ${DEV_OTEL_EXPORTER_OTLP_ENDPOINT:-http://localhost:8030/otel}
DEPLOY_REGISTRY_HOST: ${DOCKER_REGISTRY_URL:-localhost:5000}
DEPLOY_REGISTRY_NAMESPACE: ${DOCKER_REGISTRY_NAMESPACE:-trigger}
OBJECT_STORE_BASE_URL: ${OBJECT_STORE_BASE_URL:-http://minio:9000}
OBJECT_STORE_ACCESS_KEY_ID: ${OBJECT_STORE_ACCESS_KEY_ID}
OBJECT_STORE_SECRET_ACCESS_KEY: ${OBJECT_STORE_SECRET_ACCESS_KEY}
GRACEFUL_SHUTDOWN_TIMEOUT: 1000
# Bootstrap - this will automatically set up a worker group for you
# This will NOT work for split deployments
TRIGGER_BOOTSTRAP_ENABLED: 1
TRIGGER_BOOTSTRAP_WORKER_GROUP_NAME: bootstrap
TRIGGER_BOOTSTRAP_WORKER_TOKEN_PATH: /home/node/shared/worker_token
# ClickHouse configuration
CLICKHOUSE_URL: ${CLICKHOUSE_URL:-http://default:password@clickhouse:8123?secure=false}
CLICKHOUSE_LOG_LEVEL: ${CLICKHOUSE_LOG_LEVEL:-info}
# Run replication
RUN_REPLICATION_ENABLED: ${RUN_REPLICATION_ENABLED:-1}
RUN_REPLICATION_CLICKHOUSE_URL: ${RUN_REPLICATION_CLICKHOUSE_URL:-http://default:password@clickhouse:8123}
RUN_REPLICATION_LOG_LEVEL: ${RUN_REPLICATION_LOG_LEVEL:-info}
# Limits
# TASK_PAYLOAD_OFFLOAD_THRESHOLD: 524288 # 512KB
# TASK_PAYLOAD_MAXIMUM_SIZE: 3145728 # 3MB
# BATCH_TASK_PAYLOAD_MAXIMUM_SIZE: 1000000 # 1MB
# TASK_RUN_METADATA_MAXIMUM_SIZE: 262144 # 256KB
# DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT: 100
# DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT: 100
# Internal OTEL configuration
INTERNAL_OTEL_TRACE_LOGGING_ENABLED: ${INTERNAL_OTEL_TRACE_LOGGING_ENABLED:-0}
electric:
container_name: trigger-electric
image: electricsql/electric:${ELECTRIC_IMAGE_TAG:-1.0.10}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
networks:
- webapp
environment:
DATABASE_URL: ${ELECTRIC_DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/main?schema=public&sslmode=disable}
ELECTRIC_INSECURE: true
ELECTRIC_USAGE_REPORTING: false
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
clickhouse:
container_name: trigger-clickhouse
image: bitnami/clickhouse:${CLICKHOUSE_IMAGE_TAG:-latest}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
ports:
- ${CLICKHOUSE_PUBLISH_IP:-127.0.0.1}:9123:8123
- ${CLICKHOUSE_PUBLISH_IP:-127.0.0.1}:9090:9000
environment:
CLICKHOUSE_ADMIN_USER: ${CLICKHOUSE_USER:-default}
CLICKHOUSE_ADMIN_PASSWORD: ${CLICKHOUSE_PASSWORD:-password}
volumes:
- clickhouse:/bitnami/clickhouse
- ../clickhouse/override.xml:/bitnami/clickhouse/etc/config.d/override.xml:ro
networks:
- webapp
healthcheck:
test:
[
"CMD",
"clickhouse-client",
"--host",
"localhost",
"--port",
"9000",
"--user",
"default",
"--password",
"password",
"--query",
"SELECT 1",
]
interval: 5s
timeout: 5s
retries: 5
start_period: 10s
registry:
container_name: trigger-registry
image: registry:${REGISTRY_IMAGE_TAG:-2}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
ports:
- ${REGISTRY_PUBLISH_IP:-127.0.0.1}:5000:5000
networks:
- webapp
volumes:
# registry-user:very-secure-indeed
- ./auth.htpasswd:/auth/htpasswd:ro
environment:
REGISTRY_AUTH: htpasswd
REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm
REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:5000/"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
minio:
container_name: trigger-minio
image: bitnami/minio:${MINIO_IMAGE_TAG:-latest}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
ports:
- ${MINIO_PUBLISH_IP:-127.0.0.1}:9000:9000
- ${MINIO_PUBLISH_IP:-127.0.0.1}:9001:9001
networks:
- webapp
volumes:
- minio:/bitnami/minio/data
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-admin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-very-safe-password}
MINIO_DEFAULT_BUCKETS: packets
MINIO_BROWSER: "on"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 5s
timeout: 10s
retries: 5
start_period: 10s
# Worker related
supervisor:
container_name: trigger-supervisor
image: ghcr.io/triggerdotdev/supervisor:${TRIGGER_IMAGE_TAG:-v4-beta}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
depends_on:
- docker-proxy
networks:
- supervisor
- docker-proxy
- webapp
volumes:
- shared:/home/node/shared
# Only needed for bootstrap
user: root
# Only needed for bootstrap
command: sh -c "chown -R node:node /home/node/shared && exec /usr/bin/dumb-init -- pnpm run --filter supervisor start"
environment:
# This needs to match the token of the worker group you want to connect to
# TRIGGER_WORKER_TOKEN: ${TRIGGER_WORKER_TOKEN}
# Use the bootstrap token created by the webapp
TRIGGER_WORKER_TOKEN: file:///home/node/shared/worker_token
MANAGED_WORKER_SECRET: ${MANAGED_WORKER_SECRET}
TRIGGER_API_URL: ${TRIGGER_API_URL:-http://webapp:3000}
OTEL_EXPORTER_OTLP_ENDPOINT: ${OTEL_EXPORTER_OTLP_ENDPOINT:-http://webapp:3000/otel}
TRIGGER_WORKLOAD_API_DOMAIN: supervisor
TRIGGER_WORKLOAD_API_PORT_EXTERNAL: 8020
# Optional settings
DEBUG: 1
ENFORCE_MACHINE_PRESETS: 1
TRIGGER_DEQUEUE_INTERVAL_MS: 1000
DOCKER_HOST: tcp://docker-proxy:2375
DOCKER_RUNNER_NETWORKS: webapp,supervisor
DOCKER_REGISTRY_URL: ${DOCKER_REGISTRY_URL:-localhost:5000}
DOCKER_REGISTRY_USERNAME: ${DOCKER_REGISTRY_USERNAME:-}
DOCKER_REGISTRY_PASSWORD: ${DOCKER_REGISTRY_PASSWORD:-}
DOCKER_AUTOREMOVE_EXITED_CONTAINERS: 0
healthcheck:
test:
[
"CMD",
"node",
"-e",
"http.get('http://localhost:8020/health', res => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))",
]
interval: 30s
timeout: 10s
retries: 5
start_period: 10s
docker-proxy:
container_name: trigger-docker-proxy
image: tecnativa/docker-socket-proxy:${DOCKER_PROXY_IMAGE_TAG:-latest}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
networks:
- docker-proxy
environment:
- LOG_LEVEL=info
- POST=1
- CONTAINERS=1
- IMAGES=1
- INFO=1
- NETWORKS=1
healthcheck:
test: ["CMD", "nc", "-z", "127.0.0.1", "2375"]
interval: 30s
timeout: 5s
retries: 5
start_period: 5s
volumes:
shared:
clickhouse:
minio:
networks:
docker-proxy:
name: docker-proxy
supervisor:
name: supervisor
webapp:
name: webapp