name: onyx services: api_server: image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest} build: context: ../../backend dockerfile: Dockerfile.cloud command: > /bin/sh -c "alembic -n schema_private upgrade head && echo \"Starting Onyx Api Server\" && uvicorn onyx.main:app --host 0.0.0.0 --port 8080" depends_on: - relational_db - index - cache - inference_model_server - minio restart: unless-stopped environment: - AUTH_TYPE=${AUTH_TYPE:-oidc} - POSTGRES_HOST=relational_db - VESPA_HOST=index - REDIS_HOST=cache - MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server} # MinIO configuration - S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000} - S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin} - S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin} env_file: - path: .env required: false extra_hosts: - "host.docker.internal:host-gateway" logging: driver: json-file options: max-size: "50m" max-file: "6" background: image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest} build: context: ../../backend dockerfile: Dockerfile command: /app/scripts/supervisord_entrypoint.sh depends_on: - relational_db - index - cache - inference_model_server - indexing_model_server restart: unless-stopped environment: - USE_LIGHTWEIGHT_BACKGROUND_WORKER=${USE_LIGHTWEIGHT_BACKGROUND_WORKER:-true} - AUTH_TYPE=${AUTH_TYPE:-oidc} - POSTGRES_HOST=relational_db - VESPA_HOST=index - REDIS_HOST=cache - MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server} - INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server} # MinIO configuration - S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000} - S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin} - S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin} env_file: - path: .env required: false extra_hosts: - "host.docker.internal:host-gateway" logging: driver: json-file options: max-size: "50m" max-file: "6" web_server: image: onyxdotapp/onyx-web-server:${IMAGE_TAG:-latest} build: context: ../../web dockerfile: Dockerfile args: - NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS:-} - NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS:-} - NEXT_PUBLIC_DISABLE_LOGOUT=${NEXT_PUBLIC_DISABLE_LOGOUT:-} - NEXT_PUBLIC_THEME=${NEXT_PUBLIC_THEME:-} - NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=${NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED:-} depends_on: - api_server restart: unless-stopped environment: - INTERNAL_URL=http://api_server:8080 env_file: - path: .env required: false logging: driver: json-file options: max-size: "50m" max-file: "6" mcp_server: image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest} build: context: ../../backend dockerfile: Dockerfile command: > /bin/sh -c "if [ \"${MCP_SERVER_ENABLED:-}\" != \"True\" ] && [ \"${MCP_SERVER_ENABLED:-}\" != \"true\" ]; then echo 'MCP server is disabled (MCP_SERVER_ENABLED=false), skipping...'; exit 0; else exec python -m onyx.mcp_server_main; fi" env_file: - path: .env required: false depends_on: - relational_db - cache restart: "no" environment: - POSTGRES_HOST=relational_db - REDIS_HOST=cache # MCP Server Configuration - MCP_SERVER_ENABLED=${MCP_SERVER_ENABLED:-false} - MCP_SERVER_PORT=${MCP_SERVER_PORT:-8090} - MCP_SERVER_CORS_ORIGINS=${MCP_SERVER_CORS_ORIGINS:-} - API_SERVER_PROTOCOL=${API_SERVER_PROTOCOL:-http} - API_SERVER_HOST=${API_SERVER_HOST:-api_server} - API_SERVER_PORT=${API_SERVER_PORT:-8080} extra_hosts: - "host.docker.internal:host-gateway" logging: driver: json-file options: max-size: "50m" max-file: "6" volumes: - mcp_server_logs:/var/log/onyx relational_db: image: postgres:15.2-alpine shm_size: 1g command: -c 'max_connections=250' restart: unless-stopped # POSTGRES_USER and POSTGRES_PASSWORD should be set in .env file env_file: - path: .env required: false volumes: - db_volume:/var/lib/postgresql/data logging: driver: json-file options: max-size: "50m" max-file: "6" inference_model_server: image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest} build: context: ../../backend dockerfile: Dockerfile.model_server command: > /bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then echo 'Skipping service...'; exit 0; else exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000; fi" restart: on-failure environment: - MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-} # Set to debug to get more fine-grained logs - LOG_LEVEL=${LOG_LEVEL:-info} volumes: # Not necessary, this is just to reduce download time during startup - model_cache_huggingface:/app/.cache/huggingface/ logging: driver: json-file options: max-size: "50m" max-file: "6" indexing_model_server: image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest} build: context: ../../backend dockerfile: Dockerfile.model_server command: > /bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then echo 'Skipping service...'; exit 0; else exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000; fi" restart: on-failure environment: - MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-} - INDEXING_ONLY=True # Set to debug to get more fine-grained logs - LOG_LEVEL=${LOG_LEVEL:-info} - VESPA_SEARCHER_THREADS=${VESPA_SEARCHER_THREADS:-1} volumes: # Not necessary, this is just to reduce download time during startup - indexing_huggingface_model_cache:/app/.cache/huggingface/ logging: driver: json-file options: max-size: "50m" max-file: "6" # This container name cannot have an underscore in it due to Vespa expectations of the URL index: image: vespaengine/vespa:8.609.39 restart: unless-stopped environment: - VESPA_SKIP_UPGRADE_CHECK=true ports: - "19071:19071" - "8081:8081" volumes: - vespa_volume:/opt/vespa/var logging: driver: json-file options: max-size: "50m" max-file: "6" nginx: image: nginx:1.25.5-alpine restart: unless-stopped # nginx will immediately crash with `nginx: [emerg] host not found in upstream` # if api_server / web_server are not up depends_on: - api_server - web_server ports: - "80:80" - "443:443" volumes: - ../data/nginx:/etc/nginx/conf.d - ../data/certbot/conf:/etc/letsencrypt - ../data/certbot/www:/var/www/certbot # sleep a little bit to allow the web_server / api_server to start up. # Without this we've seen issues where nginx shows no error logs but # does not recieve any traffic logging: driver: json-file options: max-size: "50m" max-file: "6" # The specified script waits for the api_server to start up. # Without this we've seen issues where nginx shows no error logs but # does not recieve any traffic # NOTE: we have to use dos2unix to remove Carriage Return chars from the file # in order to make this work on both Unix-like systems and windows command: > /bin/sh -c "dos2unix /etc/nginx/conf.d/run-nginx.sh && /etc/nginx/conf.d/run-nginx.sh app.conf.template.prod" env_file: - .env.nginx environment: # Nginx proxy timeout settings (in seconds) - NGINX_PROXY_CONNECT_TIMEOUT=${NGINX_PROXY_CONNECT_TIMEOUT:-300} - NGINX_PROXY_SEND_TIMEOUT=${NGINX_PROXY_SEND_TIMEOUT:-300} - NGINX_PROXY_READ_TIMEOUT=${NGINX_PROXY_READ_TIMEOUT:-300} # follows https://pentacent.medium.com/nginx-and-lets-encrypt-with-docker-in-less-than-5-minutes-b4b8a60d3a71 certbot: image: certbot/certbot restart: unless-stopped volumes: - ../data/certbot/conf:/etc/letsencrypt - ../data/certbot/www:/var/www/certbot logging: driver: json-file options: max-size: "50m" max-file: "6" entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'" minio: image: minio/minio:RELEASE.2025-07-23T15-54-02Z-cpuv1 restart: unless-stopped environment: MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin} MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin} MINIO_DEFAULT_BUCKETS: ${S3_FILE_STORE_BUCKET_NAME:-onyx-file-store-bucket} volumes: - minio_data:/data command: server /data --console-address ":9001" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 cache: image: redis:7.4-alpine restart: unless-stopped ports: - "6379:6379" # docker silently mounts /data even without an explicit volume mount, which enables # persistence. explicitly setting save and appendonly forces ephemeral behavior. command: redis-server --save "" --appendonly no # Use tmpfs to prevent creation of anonymous volumes for /data tmpfs: - /data volumes: db_volume: vespa_volume: minio_data: # Created by the container itself model_cache_huggingface: indexing_huggingface_model_cache: mcp_server_logs: