initial commit

This commit is contained in:
KARMACOMA 2025-12-13 18:15:26 +01:00
commit 964e1219bc
18 changed files with 3873 additions and 0 deletions

47
README.md Normal file
View file

@ -0,0 +1,47 @@
# Welcome to Onyx
To set up Onyx there are several options, Onyx supports the following for deployment:
1. Quick guided install via the install.sh script
2. Pulling the repo and running `docker compose up -d` from the deployment/docker_compose directory
- Note, it is recommended to copy over the env.template file to .env and edit the necessary values
3. For large scale deployments leveraging Kubernetes, there are two options, Helm or Terraform.
This README focuses on the easiest guided deployment which is via install.sh.
**For more detailed guides, please refer to the documentation: https://docs.onyx.app/deployment/overview**
## install.sh script
```
curl -fsSL https://raw.githubusercontent.com/onyx-dot-app/onyx/main/deployment/docker_compose/install.sh > install.sh && chmod +x install.sh && ./install.sh
```
This provides a guided installation of Onyx via Docker Compose. It will deploy the latest version of Onyx
and set up the volumes to ensure data is persisted across deployments or upgrades.
The script will create an onyx_data directory, all necessary files for the deployment will be stored in
there. Note that no application critical data is stored in that directory so even if you delete it, the
data needed to restore the app will not be destroyed.
The data about chats, users, etc. are instead stored as named Docker Volumes. This is managed by Docker
and where it is stored will depend on your Docker setup. You can always delete these as well by running
the install.sh script with --delete-data.
To shut down the deployment without deleting, use install.sh --shutdown.
### Upgrading the deployment
Onyx maintains backwards compatibility across all minor versions following SemVer. If following the install.sh script (or through Docker Compose), you can
upgrade it by first bringing down the containers. To do this, use `install.sh --shutdown`
(or `docker compose down` from the directory with the docker-compose.yml file).
After the containers are stopped, you can safely upgrade by either re-running the `install.sh` script (if you left the values as default which is latest,
then it will automatically update to latest each time the script is run). If you are more comfortable running docker compose commands, you can also run
commands directly from the directory with the docker-compose.yml file. First verify the version you want in the environment file (see below),
(if using `latest` tag, be sure to run `docker compose pull`) and run `docker compose up` to restart the services on the latest version
### Environment variables
The Docker Compose files try to look for a .env file in the same directory. The `install.sh` script sets it up from a file called env.template which is
downloaded during the initial setup. Feel free to edit the .env file to customize your deployment. The most important / common changed values are
located near the top of the file.
IMAGE_TAG is the version of Onyx to run. It is recommended to leave it as latest to get all updates with each redeployment.

41
docker-compose.dev.yml Normal file
View file

@ -0,0 +1,41 @@
# Docker Compose Override for Development/Testing
# This file exposes service ports for development and testing purposes
#
# Usage:
# docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d
#
# Or set COMPOSE_FILE environment variable:
# export COMPOSE_FILE=docker-compose.yml:docker-compose.dev.yml
# docker compose up -d
services:
api_server:
ports:
- "8080:8080"
mcp_server:
ports:
- "8090:8090"
relational_db:
ports:
- "5432:5432"
index:
ports:
- "19071:19071"
- "8081:8081"
cache:
ports:
- "6379:6379"
minio:
# use different ports to avoid conflicts with model servers
ports:
- "9004:9000"
- "9005:9001"
code-interpreter:
ports:
- "8000:8000"

View file

@ -0,0 +1,21 @@
name: onyx
services:
mcp_api_key_server:
image: ${ONYX_BACKEND_IMAGE:-onyxdotapp/onyx-backend:latest}
restart: on-failure
working_dir: /workspace
environment:
- MCP_API_KEY_TEST_PORT=${MCP_API_KEY_TEST_PORT:-8005}
- MCP_API_KEY=${MCP_API_KEY:-test-api-key-12345}
- MCP_SERVER_HOST=${MCP_API_KEY_SERVER_HOST:-0.0.0.0}
- MCP_SERVER_PUBLIC_HOST=${MCP_API_KEY_SERVER_PUBLIC_HOST:-host.docker.internal}
command: >
/bin/sh -c "
python backend/tests/integration/mock_services/mcp_test_server/run_mcp_server_api_key.py ${MCP_API_KEY:-test-api-key-12345} ${MCP_API_KEY_TEST_PORT:-8005}
"
ports:
- "${MCP_API_KEY_TEST_PORT:-8005}:${MCP_API_KEY_TEST_PORT:-8005}"
volumes:
- ../..:/workspace:ro

View file

@ -0,0 +1,29 @@
name: onyx
services:
mcp_oauth_server:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
restart: on-failure
working_dir: /workspace
environment:
- MCP_OAUTH_CLIENT_ID=${MCP_OAUTH_CLIENT_ID:-}
- MCP_OAUTH_CLIENT_SECRET=${MCP_OAUTH_CLIENT_SECRET:-}
- MCP_OAUTH_ISSUER=${MCP_OAUTH_ISSUER:-}
- MCP_OAUTH_JWKS_URI=${MCP_OAUTH_JWKS_URI:-}
- MCP_OAUTH_USERNAME=${MCP_OAUTH_USERNAME:-}
- MCP_OAUTH_PASSWORD=${MCP_OAUTH_PASSWORD:-}
- MCP_OAUTH_REQUIRED_SCOPES=${MCP_OAUTH_REQUIRED_SCOPES:-mcp:use}
- MCP_TEST_SERVER_PORT=${MCP_TEST_SERVER_PORT:-8004}
- MCP_SERVER_PORT=${MCP_TEST_SERVER_PORT:-8004}
- MCP_SERVER_HOST=${MCP_SERVER_HOST:-0.0.0.0}
- MCP_SERVER_PUBLIC_HOST=${MCP_SERVER_PUBLIC_HOST:-host.docker.internal}
- MCP_SERVER_PUBLIC_URL=${MCP_SERVER_PUBLIC_URL:-}
command: >
/bin/sh -c "
python backend/tests/integration/mock_services/mcp_test_server/run_mcp_server_oauth.py ${MCP_TEST_SERVER_PORT:-8004}
"
ports:
- "${MCP_TEST_SERVER_PORT:-8004}:${MCP_TEST_SERVER_PORT:-8004}"
volumes:
- ../..:/workspace:ro

View file

@ -0,0 +1,39 @@
name: onyx
services:
indexing_model_server:
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- INDEX_BATCH_SIZE=${INDEX_BATCH_SIZE:-}
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
- INDEXING_ONLY=True
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
- CLIENT_EMBEDDING_TIMEOUT=${CLIENT_EMBEDDING_TIMEOUT:-}
# Analytics Configs
- SENTRY_DSN=${SENTRY_DSN:-}
volumes:
# Not necessary, this is just to reduce download time during startup
- indexing_huggingface_model_cache:/app/.cache/huggingface/
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
ports:
- "9000:9000" # <-- Add this line to expose the port to the host
volumes:
indexing_huggingface_model_cache:

View file

@ -0,0 +1,530 @@
name: onyx
services:
api_server:
image: ${ONYX_BACKEND_IMAGE:-onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "
alembic -n schema_private upgrade head &&
echo \"Starting Onyx Api Server\" &&
uvicorn onyx.main:app --host 0.0.0.0 --port 8080"
depends_on:
- relational_db
- index
- cache
- inference_model_server
- minio
restart: unless-stopped
ports:
- "8080:8080"
environment:
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=true
- MULTI_TENANT=true
- LOG_LEVEL=DEBUG
- AUTH_TYPE=cloud
- REQUIRE_EMAIL_VERIFICATION=false
- DISABLE_TELEMETRY=true
- IMAGE_TAG=test
- DEV_MODE=true
# Auth Settings
- SESSION_EXPIRE_TIME_SECONDS=${SESSION_EXPIRE_TIME_SECONDS:-}
- ENCRYPTION_KEY_SECRET=${ENCRYPTION_KEY_SECRET:-}
- VALID_EMAIL_DOMAINS=${VALID_EMAIL_DOMAINS:-}
- GOOGLE_OAUTH_CLIENT_ID=${GOOGLE_OAUTH_CLIENT_ID:-}
- GOOGLE_OAUTH_CLIENT_SECRET=${GOOGLE_OAUTH_CLIENT_SECRET:-}
- SMTP_SERVER=${SMTP_SERVER:-}
- SMTP_PORT=${SMTP_PORT:-587}
- SMTP_USER=${SMTP_USER:-}
- SMTP_PASS=${SMTP_PASS:-}
- ENABLE_EMAIL_INVITES=${ENABLE_EMAIL_INVITES:-}
- EMAIL_FROM=${EMAIL_FROM:-}
- OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID:-}
- OAUTH_CLIENT_SECRET=${OAUTH_CLIENT_SECRET:-}
- OPENID_CONFIG_URL=${OPENID_CONFIG_URL:-}
- TRACK_EXTERNAL_IDP_EXPIRY=${TRACK_EXTERNAL_IDP_EXPIRY:-}
- CORS_ALLOWED_ORIGIN=${CORS_ALLOWED_ORIGIN:-}
# Gen AI Settings
- GEN_AI_MAX_TOKENS=${GEN_AI_MAX_TOKENS:-}
- QA_TIMEOUT=${QA_TIMEOUT:-}
- MAX_CHUNKS_FED_TO_CHAT=${MAX_CHUNKS_FED_TO_CHAT:-}
- DISABLE_LLM_QUERY_REPHRASE=${DISABLE_LLM_QUERY_REPHRASE:-}
- DISABLE_LITELLM_STREAMING=${DISABLE_LITELLM_STREAMING:-}
- LITELLM_EXTRA_HEADERS=${LITELLM_EXTRA_HEADERS:-}
- DISABLE_LLM_DOC_RELEVANCE=${DISABLE_LLM_DOC_RELEVANCE:-}
- GEN_AI_API_KEY=${GEN_AI_API_KEY:-}
- TOKEN_BUDGET_GLOBALLY_ENABLED=${TOKEN_BUDGET_GLOBALLY_ENABLED:-}
# Query Options
- DOC_TIME_DECAY=${DOC_TIME_DECAY:-}
- HYBRID_ALPHA=${HYBRID_ALPHA:-}
- EDIT_KEYWORD_QUERY=${EDIT_KEYWORD_QUERY:-}
- MULTILINGUAL_QUERY_EXPANSION=${MULTILINGUAL_QUERY_EXPANSION:-}
- LANGUAGE_HINT=${LANGUAGE_HINT:-}
- LANGUAGE_CHAT_NAMING_HINT=${LANGUAGE_CHAT_NAMING_HINT:-}
# Other services
- POSTGRES_HOST=relational_db
- POSTGRES_DEFAULT_SCHEMA=${POSTGRES_DEFAULT_SCHEMA:-}
- VESPA_HOST=index
- REDIS_HOST=cache
- WEB_DOMAIN=${WEB_DOMAIN:-}
# MinIO configuration
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
- S3_FILE_STORE_BUCKET_NAME=${S3_FILE_STORE_BUCKET_NAME:-}
# Don't change the NLP model configs unless you know what you're doing
- EMBEDDING_BATCH_SIZE=${EMBEDDING_BATCH_SIZE:-}
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
- DOC_EMBEDDING_DIM=${DOC_EMBEDDING_DIM:-}
- NORMALIZE_EMBEDDINGS=${NORMALIZE_EMBEDDINGS:-}
- ASYM_QUERY_PREFIX=${ASYM_QUERY_PREFIX:-}
- DISABLE_RERANK_FOR_STREAMING=${DISABLE_RERANK_FOR_STREAMING:-}
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
- LOG_ALL_MODEL_INTERACTIONS=${LOG_ALL_MODEL_INTERACTIONS:-}
- LOG_ONYX_MODEL_INTERACTIONS=${LOG_ONYX_MODEL_INTERACTIONS:-}
- LOG_INDIVIDUAL_MODEL_TOKENS=${LOG_INDIVIDUAL_MODEL_TOKENS:-}
- LOG_VESPA_TIMING_INFORMATION=${LOG_VESPA_TIMING_INFORMATION:-}
- LOG_ENDPOINT_LATENCY=${LOG_ENDPOINT_LATENCY:-}
- LOG_POSTGRES_LATENCY=${LOG_POSTGRES_LATENCY:-}
- LOG_POSTGRES_CONN_COUNTS=${LOG_POSTGRES_CONN_COUNTS:-}
- CELERY_BROKER_POOL_LIMIT=${CELERY_BROKER_POOL_LIMIT:-}
- LITELLM_CUSTOM_ERROR_MESSAGE_MAPPINGS=${LITELLM_CUSTOM_ERROR_MESSAGE_MAPPINGS:-}
# Egnyte OAuth Configs
- EGNYTE_CLIENT_ID=${EGNYTE_CLIENT_ID:-}
- EGNYTE_CLIENT_SECRET=${EGNYTE_CLIENT_SECRET:-}
- EGNYTE_LOCALHOST_OVERRIDE=${EGNYTE_LOCALHOST_OVERRIDE:-}
# Linear OAuth Configs
- LINEAR_CLIENT_ID=${LINEAR_CLIENT_ID:-}
- LINEAR_CLIENT_SECRET=${LINEAR_CLIENT_SECRET:-}
# Analytics Configs
- SENTRY_DSN=${SENTRY_DSN:-}
# Chat Configs
- HARD_DELETE_CHATS=${HARD_DELETE_CHATS:-}
# Show extra/uncommon connectors
- SHOW_EXTRA_CONNECTORS=${SHOW_EXTRA_CONNECTORS:-true}
# Enables the use of bedrock models or IAM Auth
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-}
- AWS_REGION_NAME=${AWS_REGION_NAME:-}
- API_KEY_HASH_ROUNDS=${API_KEY_HASH_ROUNDS:-}
# Seeding configuration
- USE_IAM_AUTH=${USE_IAM_AUTH:-}
# Vespa Language Forcing
# See: https://docs.vespa.ai/en/linguistics.html
- VESPA_LANGUAGE_OVERRIDE=${VESPA_LANGUAGE_OVERRIDE:-}
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
background:
image: ${ONYX_BACKEND_IMAGE:-onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "
if [ -f /etc/ssl/certs/custom-ca.crt ]; then
update-ca-certificates;
fi &&
/app/scripts/supervisord_entrypoint.sh"
depends_on:
- relational_db
- index
- cache
- inference_model_server
- indexing_model_server
restart: unless-stopped
environment:
- USE_LIGHTWEIGHT_BACKGROUND_WORKER=${USE_LIGHTWEIGHT_BACKGROUND_WORKER:-true}
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=true
- MULTI_TENANT=true
- LOG_LEVEL=DEBUG
- AUTH_TYPE=cloud
- REQUIRE_EMAIL_VERIFICATION=false
- DISABLE_TELEMETRY=true
- IMAGE_TAG=test
- ENCRYPTION_KEY_SECRET=${ENCRYPTION_KEY_SECRET:-}
- JWT_PUBLIC_KEY_URL=${JWT_PUBLIC_KEY_URL:-}
# Gen AI Settings (Needed by OnyxBot)
- GEN_AI_MAX_TOKENS=${GEN_AI_MAX_TOKENS:-}
- QA_TIMEOUT=${QA_TIMEOUT:-}
- MAX_CHUNKS_FED_TO_CHAT=${MAX_CHUNKS_FED_TO_CHAT:-}
- DISABLE_LLM_QUERY_REPHRASE=${DISABLE_LLM_QUERY_REPHRASE:-}
- GENERATIVE_MODEL_ACCESS_CHECK_FREQ=${GENERATIVE_MODEL_ACCESS_CHECK_FREQ:-}
- DISABLE_LITELLM_STREAMING=${DISABLE_LITELLM_STREAMING:-}
- LITELLM_EXTRA_HEADERS=${LITELLM_EXTRA_HEADERS:-}
- GEN_AI_API_KEY=${GEN_AI_API_KEY:-}
# Query Options
- DOC_TIME_DECAY=${DOC_TIME_DECAY:-}
- HYBRID_ALPHA=${HYBRID_ALPHA:-}
- EDIT_KEYWORD_QUERY=${EDIT_KEYWORD_QUERY:-}
- MULTILINGUAL_QUERY_EXPANSION=${MULTILINGUAL_QUERY_EXPANSION:-}
- LANGUAGE_HINT=${LANGUAGE_HINT:-}
- LANGUAGE_CHAT_NAMING_HINT=${LANGUAGE_CHAT_NAMING_HINT:-}
# Other Services
- POSTGRES_HOST=relational_db
- POSTGRES_USER=${POSTGRES_USER:-}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-}
- DB_READONLY_USER=${DB_READONLY_USER:-}
- DB_READONLY_PASSWORD=${DB_READONLY_PASSWORD:-}
- POSTGRES_DB=${POSTGRES_DB:-}
- POSTGRES_DEFAULT_SCHEMA=${POSTGRES_DEFAULT_SCHEMA:-}
- VESPA_HOST=index
- REDIS_HOST=cache
- WEB_DOMAIN=${WEB_DOMAIN:-}
# MinIO configuration
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
- S3_FILE_STORE_BUCKET_NAME=${S3_FILE_STORE_BUCKET_NAME:-}
# Don't change the NLP model configs unless you know what you're doing
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
- DOC_EMBEDDING_DIM=${DOC_EMBEDDING_DIM:-}
- NORMALIZE_EMBEDDINGS=${NORMALIZE_EMBEDDINGS:-}
- ASYM_QUERY_PREFIX=${ASYM_QUERY_PREFIX:-}
- ASYM_PASSAGE_PREFIX=${ASYM_PASSAGE_PREFIX:-}
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
# Indexing Configs
- VESPA_SEARCHER_THREADS=${VESPA_SEARCHER_THREADS:-}
- ENABLED_CONNECTOR_TYPES=${ENABLED_CONNECTOR_TYPES:-}
- DISABLE_INDEX_UPDATE_ON_SWAP=${DISABLE_INDEX_UPDATE_ON_SWAP:-}
- DASK_JOB_CLIENT_ENABLED=${DASK_JOB_CLIENT_ENABLED:-}
- CONTINUE_ON_CONNECTOR_FAILURE=${CONTINUE_ON_CONNECTOR_FAILURE:-}
- EXPERIMENTAL_CHECKPOINTING_ENABLED=${EXPERIMENTAL_CHECKPOINTING_ENABLED:-}
- CONFLUENCE_CONNECTOR_LABELS_TO_SKIP=${CONFLUENCE_CONNECTOR_LABELS_TO_SKIP:-}
- JIRA_CONNECTOR_LABELS_TO_SKIP=${JIRA_CONNECTOR_LABELS_TO_SKIP:-}
- WEB_CONNECTOR_VALIDATE_URLS=${WEB_CONNECTOR_VALIDATE_URLS:-}
- JIRA_SERVER_API_VERSION=${JIRA_SERVER_API_VERSION:-}
- JIRA_CLOUD_API_VERSION=${JIRA_CLOUD_API_VERSION:-}
- GONG_CONNECTOR_START_TIME=${GONG_CONNECTOR_START_TIME:-}
- NOTION_CONNECTOR_ENABLE_RECURSIVE_PAGE_LOOKUP=${NOTION_CONNECTOR_ENABLE_RECURSIVE_PAGE_LOOKUP:-}
- GITHUB_CONNECTOR_BASE_URL=${GITHUB_CONNECTOR_BASE_URL:-}
- MAX_DOCUMENT_CHARS=${MAX_DOCUMENT_CHARS:-}
- MAX_FILE_SIZE_BYTES=${MAX_FILE_SIZE_BYTES:-}
# Egnyte OAuth Configs
- EGNYTE_CLIENT_ID=${EGNYTE_CLIENT_ID:-}
- EGNYTE_CLIENT_SECRET=${EGNYTE_CLIENT_SECRET:-}
- EGNYTE_LOCALHOST_OVERRIDE=${EGNYTE_LOCALHOST_OVERRIDE:-}
# Lienar OAuth Configs
- LINEAR_CLIENT_ID=${LINEAR_CLIENT_ID:-}
- LINEAR_CLIENT_SECRET=${LINEAR_CLIENT_SECRET:-}
# Celery Configs (defaults are set in the supervisord.conf file.
# prefer doing that to have one source of defaults)
- CELERY_WORKER_DOCFETCHING_CONCURRENCY=${CELERY_WORKER_DOCFETCHING_CONCURRENCY:-}
- CELERY_WORKER_DOCPROCESSING_CONCURRENCY=${CELERY_WORKER_DOCPROCESSING_CONCURRENCY:-}
- CELERY_WORKER_LIGHT_CONCURRENCY=${CELERY_WORKER_LIGHT_CONCURRENCY:-}
- CELERY_WORKER_LIGHT_PREFETCH_MULTIPLIER=${CELERY_WORKER_LIGHT_PREFETCH_MULTIPLIER:-}
# Onyx SlackBot Configs
- ONYX_BOT_DISABLE_DOCS_ONLY_ANSWER=${ONYX_BOT_DISABLE_DOCS_ONLY_ANSWER:-}
- ONYX_BOT_FEEDBACK_VISIBILITY=${ONYX_BOT_FEEDBACK_VISIBILITY:-}
- ONYX_BOT_DISPLAY_ERROR_MSGS=${ONYX_BOT_DISPLAY_ERROR_MSGS:-}
- NOTIFY_SLACKBOT_NO_ANSWER=${NOTIFY_SLACKBOT_NO_ANSWER:-}
- ONYX_BOT_MAX_QPM=${ONYX_BOT_MAX_QPM:-}
- ONYX_BOT_MAX_WAIT_TIME=${ONYX_BOT_MAX_WAIT_TIME:-}
# Logging
# Leave this on pretty please? Nothing sensitive is collected!
- DISABLE_TELEMETRY=${DISABLE_TELEMETRY:-}
- LOG_LEVEL=${LOG_LEVEL:-info} # Set to debug to get more fine-grained logs
- LOG_ALL_MODEL_INTERACTIONS=${LOG_ALL_MODEL_INTERACTIONS:-} # LiteLLM Verbose Logging
# Log all of Onyx prompts and interactions with the LLM
- LOG_ONYX_MODEL_INTERACTIONS=${LOG_ONYX_MODEL_INTERACTIONS:-}
- LOG_INDIVIDUAL_MODEL_TOKENS=${LOG_INDIVIDUAL_MODEL_TOKENS:-}
- LOG_VESPA_TIMING_INFORMATION=${LOG_VESPA_TIMING_INFORMATION:-}
# Analytics Configs
- SENTRY_DSN=${SENTRY_DSN:-}
# Enterprise Edition stuff
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=${ENABLE_PAID_ENTERPRISE_EDITION_FEATURES:-false}
- USE_IAM_AUTH=${USE_IAM_AUTH:-}
- AWS_REGION_NAME=${AWS_REGION_NAME:-}
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID-}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY-}
# Uncomment the line below to use if IAM_AUTH is true and you are using iam auth for postgres
# volumes:
# - ./bundle.pem:/app/bundle.pem:ro
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# Uncomment the following lines if you need to include a custom CA certificate
# This section enables the use of a custom CA certificate
# If present, the custom CA certificate is mounted as a volume
# The container checks for its existence and updates the system's CA certificates
# This allows for secure communication with services using custom SSL certificates
# Optional volume mount for CA certificate
# volumes:
# # Maps to the CA_CERT_PATH environment variable in the Dockerfile
# - ${CA_CERT_PATH:-./custom-ca.crt}:/etc/ssl/certs/custom-ca.crt:ro
web_server:
image: ${ONYX_WEB_SERVER_IMAGE:-onyxdotapp/onyx-web-server:${IMAGE_TAG:-latest}}
build:
context: ../../web
dockerfile: Dockerfile
args:
- NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_DISABLE_LOGOUT=${NEXT_PUBLIC_DISABLE_LOGOUT:-}
- NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN=${NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN:-}
- NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=${NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED:-}
# Enterprise Edition only
- NEXT_PUBLIC_THEME=${NEXT_PUBLIC_THEME:-}
# DO NOT TURN ON unless you have EXPLICIT PERMISSION from Onyx.
- NEXT_PUBLIC_DO_NOT_USE_TOGGLE_OFF_DANSWER_POWERED=${NEXT_PUBLIC_DO_NOT_USE_TOGGLE_OFF_DANSWER_POWERED:-false}
depends_on:
- api_server
restart: unless-stopped
environment:
- INTERNAL_URL=http://api_server:8080
- WEB_DOMAIN=${WEB_DOMAIN:-}
- THEME_IS_DARK=${THEME_IS_DARK:-}
- DISABLE_LLM_DOC_RELEVANCE=${DISABLE_LLM_DOC_RELEVANCE:-}
# Enterprise Edition only
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=${ENABLE_PAID_ENTERPRISE_EDITION_FEATURES:-false}
- NEXT_PUBLIC_CUSTOM_REFRESH_URL=${NEXT_PUBLIC_CUSTOM_REFRESH_URL:-}
mcp_server:
image: ${ONYX_BACKEND_IMAGE:-onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "if [ \"${MCP_SERVER_ENABLED:-}\" != \"True\" ] && [ \"${MCP_SERVER_ENABLED:-}\" != \"true\" ]; then
echo 'MCP server is disabled (MCP_SERVER_ENABLED=false), skipping...';
exit 0;
else
exec python -m onyx.mcp_server_main;
fi"
ports:
- "8090:8090"
env_file:
- path: .env
required: false
depends_on:
- relational_db
- cache
restart: "no"
environment:
- POSTGRES_HOST=relational_db
- REDIS_HOST=cache
# MCP Server Configuration
- MCP_SERVER_ENABLED=${MCP_SERVER_ENABLED:-false}
- MCP_SERVER_PORT=${MCP_SERVER_PORT:-8090}
- MCP_SERVER_CORS_ORIGINS=${MCP_SERVER_CORS_ORIGINS:-}
- API_SERVER_PROTOCOL=${API_SERVER_PROTOCOL:-http}
- API_SERVER_HOST=api_server
- API_SERVER_PORT=8080
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
volumes:
- mcp_server_logs:/var/log/onyx
inference_model_server:
image: ${ONYX_MODEL_SERVER_IMAGE:-onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
# Analytics Configs
- SENTRY_DSN=${SENTRY_DSN:-}
volumes:
# Not necessary, this is just to reduce download time during startup
- model_cache_huggingface:/app/.cache/huggingface/
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
indexing_model_server:
image: ${ONYX_MODEL_SERVER_IMAGE:-onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- INDEX_BATCH_SIZE=${INDEX_BATCH_SIZE:-}
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
- INDEXING_ONLY=True
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
- CLIENT_EMBEDDING_TIMEOUT=${CLIENT_EMBEDDING_TIMEOUT:-}
# Analytics Configs
- SENTRY_DSN=${SENTRY_DSN:-}
volumes:
# Not necessary, this is just to reduce download time during startup
- indexing_huggingface_model_cache:/app/.cache/huggingface/
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
relational_db:
image: postgres:15.2-alpine
shm_size: 1g
command: -c 'max_connections=250'
restart: unless-stopped
environment:
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-password}
- DB_READONLY_USER=${DB_READONLY_USER:-}
- DB_READONLY_PASSWORD=${DB_READONLY_PASSWORD:-}
ports:
- "5432:5432"
volumes:
- db_volume:/var/lib/postgresql/data
# This container name cannot have an underscore in it due to Vespa expectations of the URL
index:
image: vespaengine/vespa:8.609.39
restart: unless-stopped
environment:
- VESPA_SKIP_UPGRADE_CHECK=true
ports:
- "19071:19071"
- "8081:8081"
volumes:
- vespa_volume:/opt/vespa/var
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
nginx:
image: nginx:1.25.5-alpine
restart: unless-stopped
# nginx will immediately crash with `nginx: [emerg] host not found in upstream`
# if api_server / web_server are not up
depends_on:
- api_server
- web_server
environment:
- DOMAIN=localhost
ports:
- "${HOST_PORT_80:-80}:80"
- "${HOST_PORT:-3000}:80" # allow for localhost:3000 usage, since that is the norm
volumes:
- ../data/nginx:/etc/nginx/conf.d
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# The specified script waits for the api_server to start up.
# Without this we've seen issues where nginx shows no error logs but
# does not recieve any traffic
# NOTE: we have to use dos2unix to remove Carriage Return chars from the file
# in order to make this work on both Unix-like systems and windows
command: >
/bin/sh -c "dos2unix /etc/nginx/conf.d/run-nginx.sh
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template"
minio:
image: minio/minio:RELEASE.2025-07-23T15-54-02Z-cpuv1
restart: unless-stopped
ports:
- "9004:9000"
- "9005:9001"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
MINIO_DEFAULT_BUCKETS: ${S3_FILE_STORE_BUCKET_NAME:-onyx-file-store-bucket}
volumes:
- minio_data:/data
command: server /data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
cache:
image: redis:7.4-alpine
restart: unless-stopped
ports:
- "6379:6379"
# docker silently mounts /data even without an explicit volume mount, which enables
# persistence. explicitly setting save and appendonly forces ephemeral behavior.
command: redis-server --save "" --appendonly no
# Use tmpfs to prevent creation of anonymous volumes for /data
tmpfs:
- /data
code-interpreter:
image: onyxdotapp/code-interpreter:${CODE_INTERPRETER_IMAGE_TAG:-latest}
entrypoint: ["/bin/bash", "-c"]
command: >
"
if [ \"$${CODE_INTERPRETER_BETA_ENABLED}\" = \"True\" ] || [ \"$${CODE_INTERPRETER_BETA_ENABLED}\" = \"true\" ]; then
exec bash ./entrypoint.sh code-interpreter-api;
else
echo 'Skipping code interpreter';
exec tail -f /dev/null;
fi
"
restart: unless-stopped
env_file:
- path: .env
required: false
# Below is needed for the `docker-out-of-docker` execution mode
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
# uncomment below + comment out the above to use the `docker-in-docker` execution mode
# privileged: true
volumes:
db_volume:
vespa_volume: # Created by the container itself
minio_data:
model_cache_huggingface:
indexing_huggingface_model_cache:
mcp_server_logs:

View file

@ -0,0 +1,317 @@
name: onyx
services:
api_server:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile.cloud
command: >
/bin/sh -c "alembic -n schema_private upgrade head &&
echo \"Starting Onyx Api Server\" &&
uvicorn onyx.main:app --host 0.0.0.0 --port 8080"
depends_on:
- relational_db
- index
- cache
- inference_model_server
- minio
restart: unless-stopped
environment:
- AUTH_TYPE=${AUTH_TYPE:-oidc}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
# MinIO configuration
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
env_file:
- path: .env
required: false
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
background:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile
command: /app/scripts/supervisord_entrypoint.sh
depends_on:
- relational_db
- index
- cache
- inference_model_server
- indexing_model_server
restart: unless-stopped
environment:
- USE_LIGHTWEIGHT_BACKGROUND_WORKER=${USE_LIGHTWEIGHT_BACKGROUND_WORKER:-true}
- AUTH_TYPE=${AUTH_TYPE:-oidc}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
# MinIO configuration
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
env_file:
- path: .env
required: false
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
web_server:
image: onyxdotapp/onyx-web-server:${IMAGE_TAG:-latest}
build:
context: ../../web
dockerfile: Dockerfile
args:
- NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_DISABLE_LOGOUT=${NEXT_PUBLIC_DISABLE_LOGOUT:-}
- NEXT_PUBLIC_THEME=${NEXT_PUBLIC_THEME:-}
- NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=${NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED:-}
depends_on:
- api_server
restart: unless-stopped
environment:
- INTERNAL_URL=http://api_server:8080
env_file:
- path: .env
required: false
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
mcp_server:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "if [ \"${MCP_SERVER_ENABLED:-}\" != \"True\" ] && [ \"${MCP_SERVER_ENABLED:-}\" != \"true\" ]; then
echo 'MCP server is disabled (MCP_SERVER_ENABLED=false), skipping...';
exit 0;
else
exec python -m onyx.mcp_server_main;
fi"
env_file:
- path: .env
required: false
depends_on:
- relational_db
- cache
restart: "no"
environment:
- POSTGRES_HOST=relational_db
- REDIS_HOST=cache
# MCP Server Configuration
- MCP_SERVER_ENABLED=${MCP_SERVER_ENABLED:-false}
- MCP_SERVER_PORT=${MCP_SERVER_PORT:-8090}
- MCP_SERVER_CORS_ORIGINS=${MCP_SERVER_CORS_ORIGINS:-}
- API_SERVER_PROTOCOL=${API_SERVER_PROTOCOL:-http}
- API_SERVER_HOST=${API_SERVER_HOST:-api_server}
- API_SERVER_PORT=${API_SERVER_PORT:-8080}
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
volumes:
- mcp_server_logs:/var/log/onyx
relational_db:
image: postgres:15.2-alpine
shm_size: 1g
command: -c 'max_connections=250'
restart: unless-stopped
# POSTGRES_USER and POSTGRES_PASSWORD should be set in .env file
env_file:
- path: .env
required: false
volumes:
- db_volume:/var/lib/postgresql/data
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
inference_model_server:
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
volumes:
# Not necessary, this is just to reduce download time during startup
- model_cache_huggingface:/app/.cache/huggingface/
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
indexing_model_server:
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
- INDEXING_ONLY=True
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
- VESPA_SEARCHER_THREADS=${VESPA_SEARCHER_THREADS:-1}
volumes:
# Not necessary, this is just to reduce download time during startup
- indexing_huggingface_model_cache:/app/.cache/huggingface/
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# This container name cannot have an underscore in it due to Vespa expectations of the URL
index:
image: vespaengine/vespa:8.609.39
restart: unless-stopped
environment:
- VESPA_SKIP_UPGRADE_CHECK=true
ports:
- "19071:19071"
- "8081:8081"
volumes:
- vespa_volume:/opt/vespa/var
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
nginx:
image: nginx:1.25.5-alpine
restart: unless-stopped
# nginx will immediately crash with `nginx: [emerg] host not found in upstream`
# if api_server / web_server are not up
depends_on:
- api_server
- web_server
ports:
- "80:80"
- "443:443"
volumes:
- ../data/nginx:/etc/nginx/conf.d
- ../data/certbot/conf:/etc/letsencrypt
- ../data/certbot/www:/var/www/certbot
# sleep a little bit to allow the web_server / api_server to start up.
# Without this we've seen issues where nginx shows no error logs but
# does not recieve any traffic
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# The specified script waits for the api_server to start up.
# Without this we've seen issues where nginx shows no error logs but
# does not recieve any traffic
# NOTE: we have to use dos2unix to remove Carriage Return chars from the file
# in order to make this work on both Unix-like systems and windows
command: >
/bin/sh -c "dos2unix /etc/nginx/conf.d/run-nginx.sh
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template.prod"
env_file:
- .env.nginx
environment:
# Nginx proxy timeout settings (in seconds)
- NGINX_PROXY_CONNECT_TIMEOUT=${NGINX_PROXY_CONNECT_TIMEOUT:-300}
- NGINX_PROXY_SEND_TIMEOUT=${NGINX_PROXY_SEND_TIMEOUT:-300}
- NGINX_PROXY_READ_TIMEOUT=${NGINX_PROXY_READ_TIMEOUT:-300}
# follows https://pentacent.medium.com/nginx-and-lets-encrypt-with-docker-in-less-than-5-minutes-b4b8a60d3a71
certbot:
image: certbot/certbot
restart: unless-stopped
volumes:
- ../data/certbot/conf:/etc/letsencrypt
- ../data/certbot/www:/var/www/certbot
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
minio:
image: minio/minio:RELEASE.2025-07-23T15-54-02Z-cpuv1
restart: unless-stopped
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
MINIO_DEFAULT_BUCKETS: ${S3_FILE_STORE_BUCKET_NAME:-onyx-file-store-bucket}
volumes:
- minio_data:/data
command: server /data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
cache:
image: redis:7.4-alpine
restart: unless-stopped
ports:
- "6379:6379"
# docker silently mounts /data even without an explicit volume mount, which enables
# persistence. explicitly setting save and appendonly forces ephemeral behavior.
command: redis-server --save "" --appendonly no
# Use tmpfs to prevent creation of anonymous volumes for /data
tmpfs:
- /data
volumes:
db_volume:
vespa_volume:
minio_data:
# Created by the container itself
model_cache_huggingface:
indexing_huggingface_model_cache:
mcp_server_logs:

View file

@ -0,0 +1,353 @@
name: onyx
services:
api_server:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "alembic upgrade head &&
echo \"Starting Onyx Api Server\" &&
uvicorn onyx.main:app --host 0.0.0.0 --port 8080"
depends_on:
- relational_db
- index
- cache
- inference_model_server
- minio
restart: unless-stopped
environment:
- AUTH_TYPE=${AUTH_TYPE:-oidc}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- USE_IAM_AUTH=${USE_IAM_AUTH}
- AWS_REGION_NAME=${AWS_REGION_NAME-}
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID-}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY-}
# MinIO configuration
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
env_file:
- path: .env
required: false
# Uncomment the line below to use if IAM_AUTH is true and you are using iam auth for postgres
# volumes:
# - ./bundle.pem:/app/bundle.pem:ro
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
volumes:
# optional, only for debugging purposes
- api_server_logs:/var/log/onyx
background:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile
command: /app/scripts/supervisord_entrypoint.sh
depends_on:
- relational_db
- index
- cache
- inference_model_server
- indexing_model_server
restart: unless-stopped
environment:
- USE_LIGHTWEIGHT_BACKGROUND_WORKER=${USE_LIGHTWEIGHT_BACKGROUND_WORKER:-true}
- AUTH_TYPE=${AUTH_TYPE:-oidc}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
- USE_IAM_AUTH=${USE_IAM_AUTH}
- AWS_REGION_NAME=${AWS_REGION_NAME-}
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID-}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY-}
# MinIO configuration
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
env_file:
- path: .env
required: false
# Uncomment the line below to use if IAM_AUTH is true and you are using iam auth for postgres
# volumes:
# - ./bundle.pem:/app/bundle.pem:ro
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- background_logs:/var/log/onyx
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
web_server:
image: onyxdotapp/onyx-web-server:${IMAGE_TAG:-latest}
build:
context: ../../web
dockerfile: Dockerfile
args:
- NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_DISABLE_LOGOUT=${NEXT_PUBLIC_DISABLE_LOGOUT:-}
- NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN=${NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN:-}
- NEXT_PUBLIC_THEME=${NEXT_PUBLIC_THEME:-}
depends_on:
- api_server
restart: unless-stopped
environment:
- INTERNAL_URL=http://api_server:8080
env_file:
- path: .env
required: false
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
mcp_server:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "if [ \"${MCP_SERVER_ENABLED:-}\" != \"True\" ] && [ \"${MCP_SERVER_ENABLED:-}\" != \"true\" ]; then
echo 'MCP server is disabled (MCP_SERVER_ENABLED=false), skipping...';
exit 0;
else
exec python -m onyx.mcp_server_main;
fi"
env_file:
- path: .env
required: false
depends_on:
- relational_db
- cache
restart: "no"
environment:
- POSTGRES_HOST=relational_db
- REDIS_HOST=cache
# MCP Server Configuration
- MCP_SERVER_ENABLED=${MCP_SERVER_ENABLED:-false}
- MCP_SERVER_PORT=${MCP_SERVER_PORT:-8090}
- MCP_SERVER_CORS_ORIGINS=${MCP_SERVER_CORS_ORIGINS:-}
- API_SERVER_PROTOCOL=${API_SERVER_PROTOCOL:-http}
- API_SERVER_HOST=${API_SERVER_HOST:-api_server}
- API_SERVER_PORT=${API_SERVER_PORT:-8080}
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
volumes:
- mcp_server_logs:/var/log/onyx
inference_model_server:
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
volumes:
# Not necessary, this is just to reduce download time during startup
- model_cache_huggingface:/app/.cache/huggingface/
# optional, only for debugging purposes
- inference_model_server_logs:/var/log/onyx
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
indexing_model_server:
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
- INDEXING_ONLY=True
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
- VESPA_SEARCHER_THREADS=${VESPA_SEARCHER_THREADS:-1}
volumes:
# Not necessary, this is just to reduce download time during startup
- indexing_huggingface_model_cache:/app/.cache/huggingface/
# optional, only for debugging purposes
- indexing_model_server_logs:/var/log/onyx
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
relational_db:
image: postgres:15.2-alpine
shm_size: 1g
command: -c 'max_connections=250'
restart: unless-stopped
# POSTGRES_USER and POSTGRES_PASSWORD should be set in .env file
env_file:
- path: .env
required: false
volumes:
- db_volume:/var/lib/postgresql/data
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# This container name cannot have an underscore in it due to Vespa expectations of the URL
index:
image: vespaengine/vespa:8.609.39
restart: unless-stopped
environment:
- VESPA_SKIP_UPGRADE_CHECK=true
ports:
- "19071:19071"
- "8081:8081"
volumes:
- vespa_volume:/opt/vespa/var
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
nginx:
image: nginx:1.25.5-alpine
restart: unless-stopped
# nginx will immediately crash with `nginx: [emerg] host not found in upstream`
# if api_server / web_server are not up
depends_on:
- api_server
- web_server
ports:
- "80:80"
- "443:443"
volumes:
- ../data/nginx:/etc/nginx/conf.d
- ../data/sslcerts:/etc/nginx/sslcerts
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# The specified script waits for the api_server to start up.
# Without this we've seen issues where nginx shows no error logs but
# does not recieve any traffic
# NOTE: we have to use dos2unix to remove Carriage Return chars from the file
# in order to make this work on both Unix-like systems and windows
command: >
/bin/sh -c "dos2unix /etc/nginx/conf.d/run-nginx.sh
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template.prod.no-letsencrypt"
env_file:
- .env.nginx
environment:
# Nginx proxy timeout settings (in seconds)
- NGINX_PROXY_CONNECT_TIMEOUT=${NGINX_PROXY_CONNECT_TIMEOUT:-300}
- NGINX_PROXY_SEND_TIMEOUT=${NGINX_PROXY_SEND_TIMEOUT:-300}
- NGINX_PROXY_READ_TIMEOUT=${NGINX_PROXY_READ_TIMEOUT:-300}
minio:
image: minio/minio:RELEASE.2025-07-23T15-54-02Z-cpuv1
restart: unless-stopped
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
MINIO_DEFAULT_BUCKETS: ${S3_FILE_STORE_BUCKET_NAME:-onyx-file-store-bucket}
volumes:
- minio_data:/data
command: server /data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
cache:
image: redis:7.4-alpine
restart: unless-stopped
ports:
- "6379:6379"
# docker silently mounts /data even without an explicit volume mount, which enables
# persistence. explicitly setting save and appendonly forces ephemeral behavior.
command: redis-server --save "" --appendonly no
# Use tmpfs to prevent creation of anonymous volumes for /data
tmpfs:
- /data
code-interpreter:
image: onyxdotapp/code-interpreter:${CODE_INTERPRETER_IMAGE_TAG:-latest}
entrypoint: ["/bin/bash", "-c"]
command: >
"
if [ \"$${CODE_INTERPRETER_BETA_ENABLED}\" = \"True\" ] || [ \"$${CODE_INTERPRETER_BETA_ENABLED}\" = \"true\" ]; then
exec bash ./entrypoint.sh code-interpreter-api;
else
echo 'Skipping code interpreter';
exec tail -f /dev/null;
fi
"
restart: unless-stopped
env_file:
- path: .env
required: false
# Below is needed for the `docker-out-of-docker` execution mode
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
# uncomment below + comment out the above to use the `docker-in-docker` execution mode
# privileged: true
volumes:
db_volume:
vespa_volume:
minio_data:
# Created by the container itself
model_cache_huggingface:
indexing_huggingface_model_cache:
# for logs that we don't want to lose on container restarts
api_server_logs:
background_logs:
inference_model_server_logs:
indexing_model_server_logs:
mcp_server_logs:

383
docker-compose.prod.yml Normal file
View file

@ -0,0 +1,383 @@
name: onyx
services:
api_server:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "
alembic upgrade head &&
echo \"Starting Onyx Api Server\" &&
uvicorn onyx.main:app --host 0.0.0.0 --port 8080"
depends_on:
- relational_db
- index
- cache
- minio
- inference_model_server
restart: unless-stopped
environment:
- AUTH_TYPE=${AUTH_TYPE:-oidc}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- USE_IAM_AUTH=${USE_IAM_AUTH}
- AWS_REGION_NAME=${AWS_REGION_NAME-}
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID-}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY-}
# MinIO configuration
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
env_file:
- path: .env
required: false
# Uncomment the line below to use if IAM_AUTH is true and you are using iam auth for postgres
# volumes:
# - ./bundle.pem:/app/bundle.pem:ro
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
volumes:
- api_server_logs:/var/log/onyx
background:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "
if [ -f /etc/ssl/certs/custom-ca.crt ]; then
update-ca-certificates;
fi &&
/app/scripts/supervisord_entrypoint.sh"
depends_on:
- relational_db
- index
- cache
- inference_model_server
- indexing_model_server
restart: unless-stopped
environment:
- USE_LIGHTWEIGHT_BACKGROUND_WORKER=${USE_LIGHTWEIGHT_BACKGROUND_WORKER:-true}
- AUTH_TYPE=${AUTH_TYPE:-oidc}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
- USE_IAM_AUTH=${USE_IAM_AUTH}
- AWS_REGION_NAME=${AWS_REGION_NAME-}
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID-}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY-}
# MinIO configuration
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
env_file:
- path: .env
required: false
# Uncomment the line below to use if IAM_AUTH is true and you are using iam auth for postgres
# volumes:
# - ./bundle.pem:/app/bundle.pem:ro
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- background_logs:/var/log/onyx
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# Uncomment the following lines if you need to include a custom CA certificate
# This section enables the use of a custom CA certificate
# If present, the custom CA certificate is mounted as a volume
# The container checks for its existence and updates the system's CA certificates
# This allows for secure communication with services using custom SSL certificates
# volumes:
# # Maps to the CA_CERT_PATH environment variable in the Dockerfile
# - ${CA_CERT_PATH:-./custom-ca.crt}:/etc/ssl/certs/custom-ca.crt:ro
web_server:
image: onyxdotapp/onyx-web-server:${IMAGE_TAG:-latest}
build:
context: ../../web
dockerfile: Dockerfile
args:
- NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_DISABLE_LOGOUT=${NEXT_PUBLIC_DISABLE_LOGOUT:-}
- NEXT_PUBLIC_THEME=${NEXT_PUBLIC_THEME:-}
- NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=${NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED:-}
depends_on:
- api_server
restart: unless-stopped
environment:
- INTERNAL_URL=http://api_server:8080
env_file:
- path: .env
required: false
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
mcp_server:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "if [ \"${MCP_SERVER_ENABLED:-}\" != \"True\" ] && [ \"${MCP_SERVER_ENABLED:-}\" != \"true\" ]; then
echo 'MCP server is disabled (MCP_SERVER_ENABLED=false), skipping...';
exit 0;
else
exec python -m onyx.mcp_server_main;
fi"
env_file:
- path: .env
required: false
depends_on:
- relational_db
- cache
restart: "no"
environment:
- POSTGRES_HOST=relational_db
- REDIS_HOST=cache
# MCP Server Configuration
- MCP_SERVER_ENABLED=${MCP_SERVER_ENABLED:-false}
- MCP_SERVER_PORT=${MCP_SERVER_PORT:-8090}
- MCP_SERVER_CORS_ORIGINS=${MCP_SERVER_CORS_ORIGINS:-}
- API_SERVER_PROTOCOL=${API_SERVER_PROTOCOL:-http}
- API_SERVER_HOST=${API_SERVER_HOST:-api_server}
- API_SERVER_PORT=${API_SERVER_PORT:-8080}
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
volumes:
- mcp_server_logs:/var/log/onyx
relational_db:
image: postgres:15.2-alpine
shm_size: 1g
command: -c 'max_connections=250'
restart: unless-stopped
# POSTGRES_USER and POSTGRES_PASSWORD should be set in .env file
env_file:
- path: .env
required: false
volumes:
- db_volume:/var/lib/postgresql/data
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
inference_model_server:
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: unless-stopped
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
volumes:
# Not necessary, this is just to reduce download time during startup
- model_cache_huggingface:/app/.cache/huggingface/
# optional, only for debugging purposes
- inference_model_server_logs:/var/log/onyx
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
indexing_model_server:
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: unless-stopped
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
- INDEXING_ONLY=True
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
- VESPA_SEARCHER_THREADS=${VESPA_SEARCHER_THREADS:-1}
volumes:
# Not necessary, this is just to reduce download time during startup
- indexing_huggingface_model_cache:/app/.cache/huggingface/
# optional, only for debugging purposes
- indexing_model_server_logs:/var/log/onyx
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# This container name cannot have an underscore in it due to Vespa expectations of the URL
index:
image: vespaengine/vespa:8.609.39
restart: unless-stopped
environment:
- VESPA_SKIP_UPGRADE_CHECK=true
ports:
- "19071:19071"
- "8081:8081"
volumes:
- vespa_volume:/opt/vespa/var
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
nginx:
image: nginx:1.25.5-alpine
restart: unless-stopped
# nginx will immediately crash with `nginx: [emerg] host not found in upstream`
# if api_server / web_server are not up
depends_on:
- api_server
- web_server
ports:
- "80:80"
- "443:443"
volumes:
- ../data/nginx:/etc/nginx/conf.d
- ../data/certbot/conf:/etc/letsencrypt
- ../data/certbot/www:/var/www/certbot
# sleep a little bit to allow the web_server / api_server to start up.
# Without this we've seen issues where nginx shows no error logs but
# does not recieve any traffic
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# The specified script waits for the api_server to start up.
# Without this we've seen issues where nginx shows no error logs but
# does not recieve any traffic
# NOTE: we have to use dos2unix to remove Carriage Return chars from the file
# in order to make this work on both Unix-like systems and windows
command: >
/bin/sh -c "dos2unix /etc/nginx/conf.d/run-nginx.sh
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template.prod"
env_file:
- .env.nginx
environment:
# Nginx proxy timeout settings (in seconds)
- NGINX_PROXY_CONNECT_TIMEOUT=${NGINX_PROXY_CONNECT_TIMEOUT:-300}
- NGINX_PROXY_SEND_TIMEOUT=${NGINX_PROXY_SEND_TIMEOUT:-300}
- NGINX_PROXY_READ_TIMEOUT=${NGINX_PROXY_READ_TIMEOUT:-300}
# follows https://pentacent.medium.com/nginx-and-lets-encrypt-with-docker-in-less-than-5-minutes-b4b8a60d3a71
certbot:
image: certbot/certbot
restart: unless-stopped
volumes:
- ../data/certbot/conf:/etc/letsencrypt
- ../data/certbot/www:/var/www/certbot
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
minio:
image: minio/minio:RELEASE.2025-07-23T15-54-02Z-cpuv1
restart: unless-stopped
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
MINIO_DEFAULT_BUCKETS: ${S3_FILE_STORE_BUCKET_NAME:-onyx-file-store-bucket}
volumes:
- minio_data:/data
command: server /data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
cache:
image: redis:7.4-alpine
restart: unless-stopped
ports:
- "6379:6379"
# docker silently mounts /data even without an explicit volume mount, which enables
# persistence. explicitly setting save and appendonly forces ephemeral behavior.
command: redis-server --save "" --appendonly no
# Use tmpfs to prevent creation of anonymous volumes for /data
tmpfs:
- /data
code-interpreter:
image: onyxdotapp/code-interpreter:${CODE_INTERPRETER_IMAGE_TAG:-latest}
entrypoint: ["/bin/bash", "-c"]
command: >
"
if [ \"$${CODE_INTERPRETER_BETA_ENABLED}\" = \"True\" ] || [ \"$${CODE_INTERPRETER_BETA_ENABLED}\" = \"true\" ]; then
exec bash ./entrypoint.sh code-interpreter-api;
else
echo 'Skipping code interpreter';
exec tail -f /dev/null;
fi
"
restart: unless-stopped
env_file:
- path: .env
required: false
# Below is needed for the `docker-out-of-docker` execution mode
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
# uncomment below + comment out the above to use the `docker-in-docker` execution mode
# privileged: true
volumes:
db_volume:
vespa_volume:
minio_data:
# Created by the container itself
model_cache_huggingface:
indexing_huggingface_model_cache:
# for logs that we don't want to lose on container restarts
api_server_logs:
background_logs:
inference_model_server_logs:
indexing_model_server_logs:
mcp_server_logs:

View file

@ -0,0 +1,86 @@
# Docker service resource limits. Most are commented out by default.
# 'background' service has preset (override-able) limits due to variable resource needs.
# Uncomment and set env vars for specific service limits.
# See: https://docs.danswer.dev/deployment/resource-sizing for details.
services:
background:
deploy:
resources:
limits:
# 6 CPUs, 10GB of memory. Very generous, primarily to prevent OOM crashing the host machine.
cpus: ${BACKGROUND_CPU_LIMIT:-6}
memory: ${BACKGROUND_MEM_LIMIT:-10g}
# reservations:
# cpus: ${BACKGROUND_CPU_RESERVATION}
# memory: ${BACKGROUND_MEM_RESERVATION}
#
nginx:
deploy:
resources:
limits:
cpus: ${NGINX_CPU_LIMIT:-1}
memory: ${NGINX_MEM_LIMIT:-1g}
# reservations:
# cpus: ${NGINX_CPU_RESERVATION:}
# memory: ${NGINX_MEM_RESERVATION}
#
api_server:
deploy:
resources:
limits:
cpus: ${API_SERVER_CPU_LIMIT:-2}
memory: ${API_SERVER_MEM_LIMIT:-4g}
# reservations:
# cpus: ${API_SERVER_CPU_RESERVATION}
# memory: ${API_SERVER_MEM_RESERVATION}
#
# index:
# deploy:
# resources:
# limits:
# cpus: ${VESPA_CPU_LIMIT}
# memory: ${VESPA_MEM_LIMIT}
# reservations:
# cpus: ${VESPA_CPU_RESERVATION}
# memory: ${VESPA_MEM_RESERVATION}
#
inference_model_server:
deploy:
resources:
limits:
# cpus: ${INFERENCE_CPU_LIMIT}
memory: ${INFERENCE_MEM_LIMIT:-5g}
# reservations:
# cpus: ${INFERENCE_CPU_RESERVATION}
# memory: ${INFERENCE_MEM_RESERVATION}
#
indexing_model_server:
deploy:
resources:
limits:
# cpus: ${INDEXING_CPU_LIMIT}
memory: ${INDEXING_MEM_LIMIT:-5g}
# reservations:
# cpus: ${INDEXING_CPU_RESERVATION}
# memory: ${INDEXING_MEM_RESERVATION}
#
relational_db:
deploy:
resources:
limits:
cpus: ${POSTGRES_CPU_LIMIT:-2}
memory: ${POSTGRES_MEM_LIMIT:-4g}
# reservations:
# cpus: ${POSTGRES_CPU_RESERVATION}
# memory: ${POSTGRES_MEM_RESERVATION}
# minio:
# deploy:
# resources:
# limits:
# cpus: ${MINIO_CPU_LIMIT:-1}
# memory: ${MINIO_MEM_LIMIT:-1g}
# reservations:
# cpus: ${MINIO_CPU_RESERVATION}
# memory: ${MINIO_MEM_RESERVATION}

View file

@ -0,0 +1,266 @@
name: onyx
services:
api_server:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "alembic upgrade head &&
echo \"Starting Onyx Api Server\" &&
uvicorn onyx.main:app --host 0.0.0.0 --port 8080"
depends_on:
- relational_db
- index
- cache
- minio
restart: unless-stopped
ports:
- "8080"
env_file:
- .env_eval
environment:
- AUTH_TYPE=disabled
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
- ENV_SEED_CONFIGURATION=${ENV_SEED_CONFIGURATION:-}
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=True
# MinIO configuration
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
- S3_FILE_STORE_BUCKET_NAME=${S3_FILE_STORE_BUCKET_NAME:-}
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
background:
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile
command: /app/scripts/supervisord_entrypoint.sh
depends_on:
- relational_db
- index
- cache
restart: unless-stopped
env_file:
- .env_eval
environment:
- USE_LIGHTWEIGHT_BACKGROUND_WORKER=${USE_LIGHTWEIGHT_BACKGROUND_WORKER:-true}
- AUTH_TYPE=disabled
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
- ENV_SEED_CONFIGURATION=${ENV_SEED_CONFIGURATION:-}
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=True
# MinIO configuration
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
- S3_FILE_STORE_BUCKET_NAME=${S3_FILE_STORE_BUCKET_NAME:-}
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- log_store:/var/log/onyx
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
web_server:
image: onyxdotapp/onyx-web-server:${IMAGE_TAG:-latest}
build:
context: ../../web
dockerfile: Dockerfile
args:
- NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_DISABLE_LOGOUT=${NEXT_PUBLIC_DISABLE_LOGOUT:-}
- NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN=${NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN:-}
# Enterprise Edition only
- NEXT_PUBLIC_THEME=${NEXT_PUBLIC_THEME:-}
# DO NOT TURN ON unless you have EXPLICIT PERMISSION from Onyx.
- NEXT_PUBLIC_DO_NOT_USE_TOGGLE_OFF_DANSWER_POWERED=${NEXT_PUBLIC_DO_NOT_USE_TOGGLE_OFF_DANSWER_POWERED:-false}
depends_on:
- api_server
restart: unless-stopped
environment:
- INTERNAL_URL=http://api_server:8080
- WEB_DOMAIN=${WEB_DOMAIN:-}
- THEME_IS_DARK=${THEME_IS_DARK:-}
# Enterprise Edition only
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=${ENABLE_PAID_ENTERPRISE_EDITION_FEATURES:-false}
inference_model_server:
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
- LOG_LEVEL=${LOG_LEVEL:-debug}
volumes:
- inference_model_cache_huggingface:/app/.cache/huggingface/
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
indexing_model_server:
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
- INDEXING_ONLY=True
- LOG_LEVEL=${LOG_LEVEL:-debug}
- VESPA_SEARCHER_THREADS=${VESPA_SEARCHER_THREADS:-1}
volumes:
- inference_model_cache_huggingface:/app/.cache/huggingface/
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
relational_db:
image: postgres:15.2-alpine
shm_size: 1g
command: -c 'max_connections=250'
restart: unless-stopped
environment:
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-password}
- DB_READONLY_USER=${DB_READONLY_USER:-}
- DB_READONLY_PASSWORD=${DB_READONLY_PASSWORD:-}
ports:
- "5432"
volumes:
- db_volume:/var/lib/postgresql/data
# This container name cannot have an underscore in it due to Vespa expectations of the URL
index:
image: vespaengine/vespa:8.609.39
restart: unless-stopped
environment:
- VESPA_SKIP_UPGRADE_CHECK=true
ports:
- "19071:19071"
- "8081:8081"
volumes:
- vespa_volume:/opt/vespa/var
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
nginx:
image: nginx:1.25.5-alpine
restart: unless-stopped
# nginx will immediately crash with `nginx: [emerg] host not found in upstream`
# if api_server / web_server are not up
depends_on:
- api_server
- web_server
environment:
- DOMAIN=localhost
ports:
- "${NGINX_PORT:-3000}:80" # allow for localhost:3000 usage, since that is the norm
volumes:
- ../data/nginx:/etc/nginx/conf.d
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# The specified script waits for the api_server to start up.
# Without this we've seen issues where nginx shows no error logs but
# does not recieve any traffic
# NOTE: we have to use dos2unix to remove Carriage Return chars from the file
# in order to make this work on both Unix-like systems and windows
command: >
/bin/sh -c "dos2unix /etc/nginx/conf.d/run-nginx.sh
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template"
minio:
image: minio/minio:RELEASE.2025-07-23T15-54-02Z-cpuv1
restart: unless-stopped
ports:
- "9004:9000"
- "9005:9001"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
MINIO_DEFAULT_BUCKETS: ${S3_FILE_STORE_BUCKET_NAME:-onyx-file-store-bucket}
volumes:
- minio_data:/data
command: server /data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
cache:
image: redis:7.4-alpine
restart: unless-stopped
ports:
- "6379:6379"
# docker silently mounts /data even without an explicit volume mount, which enables
# persistence. explicitly setting save and appendonly forces ephemeral behavior.
command: redis-server --save "" --appendonly no
# Use tmpfs to prevent creation of anonymous volumes for /data
tmpfs:
- /data
volumes:
inference_model_cache_huggingface:
db_volume:
driver: local
driver_opts:
type: none
o: bind
device: ${DANSWER_POSTGRES_DATA_DIR:-./postgres_data}
vespa_volume:
driver: local
driver_opts:
type: none
o: bind
device: ${DANSWER_VESPA_DATA_DIR:-./vespa_data}
log_store: # for logs that we don't want to lose on container restarts
minio_data:

461
docker-compose.yml Normal file
View file

@ -0,0 +1,461 @@
# =============================================================================
# ONYX DOCKER COMPOSE
# =============================================================================
# This is the default configuration for Onyx. This file is fairly configurable,
# also see env.template for possible settings.
#
# PRODUCTION DEPLOYMENT CHECKLIST:
# To convert this setup to a production deployment following best practices,
# follow the checklist below. Note that there are other ways to secure the Onyx
# deployment so these are not strictly necessary for all teams.
#
# 1. SECURITY HARDENING:
# - Remove all port exposures except nginx (80/443)
# - Comment out ports for: api_server, relational_db, index, cache, minio
#
# 2. SSL/TLS SETUP:
# - Uncomment the certbot service (see below)
# - Add SSL certificate volumes to nginx service
# - Change nginx command from app.conf.template to app.conf.template.prod
#
# 3. ENVIRONMENT CONFIGURATION:
# - Replace env_file with explicit environment variables
#
# 4. AUTHENTICATION:
# - Select an authentication method like Basic, Google OAuth, OIDC, or SAML
#
# 5. CA CERTIFICATES:
# - Uncomment custom CA certificate volumes if needed
#
# 6. DOMAIN CONFIGURATION:
# - Set proper DOMAIN environment variable for nginx
# - Configure DNS and SSL certificates
#
# For a complete production setup, refer to docker-compose.prod.yml
# =============================================================================
name: onyx
services:
api_server:
image: ${ONYX_BACKEND_IMAGE:-onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "alembic upgrade head &&
echo \"Starting Onyx Api Server\" &&
uvicorn onyx.main:app --host 0.0.0.0 --port 8080"
# Check env.template and copy to .env for env vars
env_file:
- path: .env
required: false
depends_on:
- relational_db
- index
- cache
- inference_model_server
- minio
restart: unless-stopped
# DEV: To expose ports, either:
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d
# 2. Uncomment the ports below
# ports:
# - "8080:8080"
environment:
# Auth Settings
- AUTH_TYPE=${AUTH_TYPE:-basic}
- POSTGRES_HOST=${POSTGRES_HOST:-relational_db}
- VESPA_HOST=${VESPA_HOST:-index}
- REDIS_HOST=${REDIS_HOST:-cache}
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
# PRODUCTION: Uncomment the line below to use if IAM_AUTH is true and you are using iam auth for postgres
# volumes:
# - ./bundle.pem:/app/bundle.pem:ro
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# Optional, only for debugging purposes
volumes:
- api_server_logs:/var/log/onyx
background:
image: ${ONYX_BACKEND_IMAGE:-onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "
if [ -f /etc/ssl/certs/custom-ca.crt ]; then
update-ca-certificates;
fi &&
/app/scripts/supervisord_entrypoint.sh"
env_file:
- path: .env
required: false
depends_on:
- relational_db
- index
- cache
- inference_model_server
- indexing_model_server
restart: unless-stopped
environment:
- USE_LIGHTWEIGHT_BACKGROUND_WORKER=${USE_LIGHTWEIGHT_BACKGROUND_WORKER:-true}
- POSTGRES_HOST=${POSTGRES_HOST:-relational_db}
- VESPA_HOST=${VESPA_HOST:-index}
- REDIS_HOST=${REDIS_HOST:-cache}
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
# PRODUCTION: Uncomment the line below to use if IAM_AUTH is true and you are using iam auth for postgres
# volumes:
# - ./bundle.pem:/app/bundle.pem:ro
extra_hosts:
- "host.docker.internal:host-gateway"
# Optional, only for debugging purposes
volumes:
- background_logs:/var/log/onyx
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# PRODUCTION: Uncomment the following lines if you need to include a custom CA certificate
# This section enables the use of a custom CA certificate
# If present, the custom CA certificate is mounted as a volume
# The container checks for its existence and updates the system's CA certificates
# This allows for secure communication with services using custom SSL certificates
# Optional volume mount for CA certificate
# volumes:
# # Maps to the CA_CERT_PATH environment variable in the Dockerfile
# - ${CA_CERT_PATH:-./custom-ca.crt}:/etc/ssl/certs/custom-ca.crt:ro
web_server:
image: ${ONYX_WEB_SERVER_IMAGE:-onyxdotapp/onyx-web-server:${IMAGE_TAG:-latest}}
build:
context: ../../web
dockerfile: Dockerfile
args:
- NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
- NEXT_PUBLIC_DISABLE_LOGOUT=${NEXT_PUBLIC_DISABLE_LOGOUT:-}
- NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN=${NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN:-}
- NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=${NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED:-}
# Enterprise Edition only
- NEXT_PUBLIC_THEME=${NEXT_PUBLIC_THEME:-}
# DO NOT TURN ON unless you have EXPLICIT PERMISSION from Onyx.
- NEXT_PUBLIC_DO_NOT_USE_TOGGLE_OFF_DANSWER_POWERED=${NEXT_PUBLIC_DO_NOT_USE_TOGGLE_OFF_DANSWER_POWERED:-false}
- NODE_OPTIONS=${NODE_OPTIONS:-"--max-old-space-size=4096"}
env_file:
- path: .env
required: false
depends_on:
- api_server
restart: unless-stopped
environment:
- INTERNAL_URL=${INTERNAL_URL:-http://api_server:8080}
mcp_server:
image: ${ONYX_BACKEND_IMAGE:-onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}}
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "if [ \"${MCP_SERVER_ENABLED:-}\" != \"True\" ] && [ \"${MCP_SERVER_ENABLED:-}\" != \"true\" ]; then
echo 'MCP server is disabled (MCP_SERVER_ENABLED=false), skipping...';
exit 0;
else
exec python -m onyx.mcp_server_main;
fi"
env_file:
- path: .env
required: false
depends_on:
- relational_db
- cache
restart: "no"
environment:
- POSTGRES_HOST=${POSTGRES_HOST:-relational_db}
- REDIS_HOST=${REDIS_HOST:-cache}
# MCP Server Configuration
- MCP_SERVER_ENABLED=${MCP_SERVER_ENABLED:-false}
- MCP_SERVER_PORT=${MCP_SERVER_PORT:-8090}
- MCP_SERVER_CORS_ORIGINS=${MCP_SERVER_CORS_ORIGINS:-}
- API_SERVER_PROTOCOL=${API_SERVER_PROTOCOL:-http}
- API_SERVER_HOST=${API_SERVER_HOST:-api_server}
- API_SERVER_PORT=${API_SERVER_PORT:-8080}
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# Optional, only for debugging purposes
volumes:
- mcp_server_logs:/var/log/onyx
inference_model_server:
image: ${ONYX_MODEL_SERVER_IMAGE:-onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
# GPU Support: Uncomment the following lines to enable GPU support
# Requires nvidia-container-toolkit to be installed on the host
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
env_file:
- path: .env
required: false
restart: unless-stopped
volumes:
# Not necessary, this is just to reduce download time during startup
- model_cache_huggingface:/app/.cache/huggingface/
# Optional, only for debugging purposes
- inference_model_server_logs:/var/log/onyx
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
indexing_model_server:
image: ${ONYX_MODEL_SERVER_IMAGE:-onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}}
build:
context: ../../backend
dockerfile: Dockerfile.model_server
# GPU Support: Uncomment the following lines to enable GPU support
# Requires nvidia-container-toolkit to be installed on the host
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
env_file:
- path: .env
required: false
restart: unless-stopped
environment:
- INDEXING_ONLY=True
volumes:
# Not necessary, this is just to reduce download time during startup
- indexing_huggingface_model_cache:/app/.cache/huggingface/
# Optional, only for debugging purposes
- indexing_model_server_logs:/var/log/onyx
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
relational_db:
image: postgres:15.2-alpine
shm_size: 1g
command: -c 'max_connections=250'
env_file:
- path: .env
required: false
restart: unless-stopped
# PRODUCTION: Override the defaults by passing in the environment variables
environment:
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-password}
# DEV: To expose ports, either:
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d
# 2. Uncomment the ports below
# ports:
# - "5432:5432"
volumes:
- db_volume:/var/lib/postgresql/data
# This container name cannot have an underscore in it due to Vespa expectations of the URL
index:
image: vespaengine/vespa:8.609.39
restart: unless-stopped
env_file:
- path: .env
required: false
environment:
- VESPA_SKIP_UPGRADE_CHECK=${VESPA_SKIP_UPGRADE_CHECK:-true}
# DEV: To expose ports, either:
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d
# 2. Uncomment the ports below
# ports:
# - "19071:19071"
# - "8081:8081"
volumes:
- vespa_volume:/opt/vespa/var
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
nginx:
image: nginx:1.25.5-alpine
restart: unless-stopped
# nginx will immediately crash with `nginx: [emerg] host not found in upstream`
# if api_server / web_server are not up
depends_on:
- api_server
- web_server
env_file:
- path: .env
required: false
environment:
- DOMAIN=localhost
# Nginx proxy timeout settings (in seconds)
- NGINX_PROXY_CONNECT_TIMEOUT=${NGINX_PROXY_CONNECT_TIMEOUT:-300}
- NGINX_PROXY_SEND_TIMEOUT=${NGINX_PROXY_SEND_TIMEOUT:-300}
- NGINX_PROXY_READ_TIMEOUT=${NGINX_PROXY_READ_TIMEOUT:-300}
ports:
- "${HOST_PORT_80:-80}:80"
- "${HOST_PORT:-3000}:80" # allow for localhost:3000 usage, since that is the norm
volumes:
- ../data/nginx:/etc/nginx/conf.d
# PRODUCTION: Add SSL certificate volumes for HTTPS support:
# - ../data/certbot/conf:/etc/letsencrypt
# - ../data/certbot/www:/var/www/certbot
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# The specified script waits for the api_server to start up.
# Without this we've seen issues where nginx shows no error logs but
# does not receive any traffic
# NOTE: we have to use dos2unix to remove Carriage Return chars from the file
# in order to make this work on both Unix-like systems and windows
# PRODUCTION: Change to app.conf.template.prod for production nginx config
command: >
/bin/sh -c "dos2unix /etc/nginx/conf.d/run-nginx.sh
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template"
cache:
image: redis:7.4-alpine
restart: unless-stopped
# DEV: To expose ports, either:
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d
# 2. Uncomment the ports below
# ports:
# - "6379:6379"
# docker silently mounts /data even without an explicit volume mount, which enables
# persistence. explicitly setting save and appendonly forces ephemeral behavior.
command: redis-server --save "" --appendonly no
env_file:
- path: .env
required: false
# Use tmpfs to prevent creation of anonymous volumes for /data
tmpfs:
- /data
minio:
image: minio/minio:RELEASE.2025-07-23T15-54-02Z-cpuv1
restart: unless-stopped
# DEV: To expose ports, either:
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d
# 2. Uncomment the ports below
# ports:
# - "9004:9000"
# - "9005:9001"
env_file:
- path: .env
required: false
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
# Note: we've seen the default bucket creation logic not work in some cases
MINIO_DEFAULT_BUCKETS: ${S3_FILE_STORE_BUCKET_NAME:-onyx-file-store-bucket}
volumes:
- minio_data:/data
command: server /data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
code-interpreter:
image: onyxdotapp/code-interpreter:${CODE_INTERPRETER_IMAGE_TAG:-latest}
entrypoint: ["/bin/bash", "-c"]
command: >
"
if [ \"$${CODE_INTERPRETER_BETA_ENABLED}\" = \"True\" ] || [ \"$${CODE_INTERPRETER_BETA_ENABLED}\" = \"true\" ]; then
exec bash ./entrypoint.sh code-interpreter-api;
else
echo 'Skipping code interpreter';
exec tail -f /dev/null;
fi
"
restart: unless-stopped
env_file:
- path: .env
required: false
# Below is needed for the `docker-out-of-docker` execution mode
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
# uncomment below + comment out the above to use the `docker-in-docker` execution mode
# privileged: true
# PRODUCTION: Uncomment the following certbot service for SSL certificate management
# certbot:
# image: certbot/certbot
# restart: unless-stopped
# volumes:
# - ../data/certbot/conf:/etc/letsencrypt
# - ../data/certbot/www:/var/www/certbot
# logging:
# driver: json-file
# options:
# max-size: "50m"
# max-file: "6"
# entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
volumes:
# Necessary for persisting data for use
db_volume:
vespa_volume: # Created by the container itself
minio_data:
# Caches to prevent re-downloading models, not strictly necessary
model_cache_huggingface:
indexing_huggingface_model_cache:
# Logs preserved across container restarts
api_server_logs:
background_logs:
mcp_server_logs:
inference_model_server_logs:
indexing_model_server_logs:

View file

@ -0,0 +1,8 @@
# This env template shows how to configure Onyx for custom multilingual use
# Note that for most use cases it will be enough to configure Onyx multilingual purely through the UI
# See "Search Settings" -> "Advanced" for UI options.
# To use it, copy it to .env in the docker_compose directory (or the equivalent environment settings file for your deployment)
# The following is included with the user prompt. Here's one example but feel free to customize it to your needs:
LANGUAGE_HINT="IMPORTANT: ALWAYS RESPOND IN FRENCH! Even if the documents and the user query are in English, your response must be in French."
LANGUAGE_CHAT_NAMING_HINT="The name of the conversation must be in the same language as the user query."

11
env.nginx.template Normal file
View file

@ -0,0 +1,11 @@
# DOMAIN is necessary for https setup, EMAIL is optional
DOMAIN=
EMAIL=
# If using the `no-letsencrypt` setup, the below are required.
# They specify the path within /onyx/deployment/data/sslcerts directory
# where the certificate / certificate key can be found. You can either
# name your certificate / certificate key files to follow the convention
# below or adjust these to match your naming conventions.
SSL_CERT_FILE_NAME=ssl.cert
SSL_CERT_KEY_FILE_NAME=ssl.key

74
env.prod.template Normal file
View file

@ -0,0 +1,74 @@
# Fill in the values and copy the contents of this file to .env in the deployment directory.
# Some valid default values are provided where applicable, delete the variables which you don't set values for.
# This is only necessary when using the docker-compose.prod.yml compose file.
# Could be something like onyx.companyname.com
WEB_DOMAIN=http://localhost:3000
# The following are for configuring User Authentication, supported flows are:
# disabled
# basic (standard username / password)
# google_oauth (login with google/gmail account)
# oidc
# saml
AUTH_TYPE=google_oauth
# Set the values below to use with Google OAuth
GOOGLE_OAUTH_CLIENT_ID=
GOOGLE_OAUTH_CLIENT_SECRET=
SECRET=
# if using basic auth and you want to require email verification,
# then uncomment / set the following
#REQUIRE_EMAIL_VERIFICATION=true
#SMTP_USER=your-email@company.com
#SMTP_PASS=your-gmail-password
# The below are only needed if you aren't using gmail as your SMTP
#SMTP_SERVER=
#SMTP_PORT=
# When missing SMTP_USER, this is used instead
#EMAIL_FROM=
# OpenID Connect (OIDC)
#OPENID_CONFIG_URL=
# SAML config directory for OneLogin compatible setups
#SAML_CONF_DIR=
# How long before user needs to reauthenticate, default to 7 days. (cookie expiration time)
SESSION_EXPIRE_TIME_SECONDS=604800
# Use the below to specify a list of allowed user domains, only checked if user Auth is turned on
# e.g. `VALID_EMAIL_DOMAINS=example.com,example.org` will only allow users
# with an @example.com or an @example.org email
#VALID_EMAIL_DOMAINS=
# Default values here are what Postgres uses by default, feel free to change.
POSTGRES_USER=postgres
POSTGRES_PASSWORD=password
# Default values here for the read-only user for the knowledge graph and other future read-only purposes.
# Please change password!
DB_READONLY_USER=db_readonly_user
DB_READONLY_PASSWORD=password
# If setting the vespa language is required, set this ('en', 'de', etc.).
# See: https://docs.vespa.ai/en/linguistics.html
#VESPA_LANGUAGE_OVERRIDE=
# Show extra/uncommon connectors
# See https://docs.onyx.app/admins/connectors/overview for a full list of connectors
SHOW_EXTRA_CONNECTORS=False
# User File Upload Configuration
# Skip the token count threshold check (100,000 tokens) for uploaded files
# For self-hosted: set to true to skip for all users
#SKIP_USERFILE_THRESHOLD=false
# For multi-tenant: comma-separated list of tenant IDs to skip threshold
#SKIP_USERFILE_THRESHOLD_TENANT_IDS=

231
env.template Normal file
View file

@ -0,0 +1,231 @@
# Copy this file to .env so it's picked up by the docker compose yaml files
# Uncomment the values you would like to set
# No edits necessary, works out of the box
################################################################################
## COMMONLY MODIFIED CONFIGURATIONS
################################################################################
## Version of Onyx to deploy, default is latest (main built nightly)
IMAGE_TAG=latest
## Auth Settings
### https://docs.onyx.app/deployment/authentication
AUTH_TYPE=disabled
# SESSION_EXPIRE_TIME_SECONDS=
### Recommend to set this for security
# ENCRYPTION_KEY_SECRET=
### Optional
# API_KEY_HASH_ROUNDS=
### You can add a comma separated list of domains like onyx.app, only those domains will be allowed to signup/log in
# VALID_EMAIL_DOMAINS=
## Chat Configuration
# HARD_DELETE_CHATS=
## Base URL for redirects
# WEB_DOMAIN=
## Enterprise Features, requires a paid plan and licenses
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=false
## User File Upload Configuration
# Skip the token count threshold check (100,000 tokens) for uploaded files
# For self-hosted: set to true to skip for all users
# SKIP_USERFILE_THRESHOLD=false
# For multi-tenant: comma-separated list of tenant IDs to skip threshold
# SKIP_USERFILE_THRESHOLD_TENANT_IDS=
################################################################################
## SERVICES CONFIGURATIONS
################################################################################
## Database Configuration
POSTGRES_USER=postgres
POSTGRES_PASSWORD=password
# POSTGRES_DB=
# POSTGRES_DEFAULT_SCHEMA=
# POSTGRES_USE_NULL_POOL=
# POSTGRES_API_SERVER_POOL_SIZE=
# POSTGRES_API_SERVER_POOL_OVERFLOW
# POSTGRES_IDLE_SESSIONS_TIMEOUT=
# POSTGRES_POOL_RECYCLE=
# DB_READONLY_USER=
# DB_READONLY_PASSWORD=
## MinIO/S3 Configuration
S3_ENDPOINT_URL=http://minio:9000
S3_AWS_ACCESS_KEY_ID=minioadmin
S3_AWS_SECRET_ACCESS_KEY=minioadmin
S3_FILE_STORE_BUCKET_NAME=onyx-file-store-bucket
MINIO_ROOT_USER=minioadmin
MINIO_ROOT_PASSWORD=minioadmin
## Nginx Proxy Timeout Configuration (in seconds)
## These settings control how long nginx waits for upstream servers (api_server/web_server)
## Increase these values if you experience timeout errors with long-running requests
# NGINX_PROXY_CONNECT_TIMEOUT=300
# NGINX_PROXY_SEND_TIMEOUT=300
# NGINX_PROXY_READ_TIMEOUT=300
## MCP Server Configuration
## The MCP (Model Context Protocol) server allows external MCP clients to interact with Onyx
## Set to true to enable the MCP server (disabled by default)
# MCP_SERVER_ENABLED=false
## Port for the MCP server (defaults to 8090)
# MCP_SERVER_PORT=8090
## CORS origins for MCP clients (comma-separated list)
# MCP_SERVER_CORS_ORIGINS=
## Celery Configuration
# CELERY_BROKER_POOL_LIMIT=
# CELERY_WORKER_DOCFETCHING_CONCURRENCY=
# CELERY_WORKER_DOCPROCESSING_CONCURRENCY=
# CELERY_WORKER_LIGHT_CONCURRENCY=
# CELERY_WORKER_LIGHT_PREFETCH_MULTIPLIER=
## AWS Configuration
# AWS_ACCESS_KEY_ID=
# AWS_SECRET_ACCESS_KEY=
# AWS_REGION_NAME=
# Set to true when using IAM authentication for Postgres connections.
USE_IAM_AUTH=false
################################################################################
## DEVELOPER, DEBUGGING, AND LOGGING
################################################################################
## Logging and Telemetry
LOG_LEVEL=info
LOG_ALL_MODEL_INTERACTIONS=False
LOG_ONYX_MODEL_INTERACTIONS=False
LOG_INDIVIDUAL_MODEL_TOKENS=False
# LOG_VESPA_TIMING_INFORMATION=
# LOG_ENDPOINT_LATENCY=
# LOG_POSTGRES_LATENCY=
# LOG_POSTGRES_CONN_COUNTS=
# DISABLE_TELEMETRY=
## Feature Flags
# SHOW_EXTRA_CONNECTORS=true
# DISABLE_MODEL_SERVER=false
## Analytics
# SENTRY_DSN=
## Demo/Testing
# MOCK_CONNECTOR_FILE_PATH=
################################################################################
## ADVANCED CONFIGURATIONS
################################################################################
## SlackBot Configuration
# ONYX_BOT_DISABLE_DOCS_ONLY_ANSWER=
# ONYX_BOT_FEEDBACK_VISIBILITY=
# ONYX_BOT_DISPLAY_ERROR_MSGS=
# NOTIFY_SLACKBOT_NO_ANSWER=
# ONYX_BOT_MAX_QPM=
# ONYX_BOT_MAX_WAIT_TIME=
## Advanced Auth Settings
# GOOGLE_OAUTH_CLIENT_ID=
# GOOGLE_OAUTH_CLIENT_SECRET=
# REQUIRE_EMAIL_VERIFICATION=
# SMTP_SERVER=
# SMTP_PORT=
# SMTP_USER=
# SMTP_PASS=
# ENABLE_EMAIL_INVITES=
# EMAIL_FROM=
# OAUTH_CLIENT_ID=
# OAUTH_CLIENT_SECRET=
# OPENID_CONFIG_URL=
# TRACK_EXTERNAL_IDP_EXPIRY=
# CORS_ALLOWED_ORIGIN=
# INTEGRATION_TESTS_MODE=
# JWT_PUBLIC_KEY_URL=
## Gen AI Settings
# GEN_AI_MAX_TOKENS=
# QA_TIMEOUT=
# MAX_CHUNKS_FED_TO_CHAT=
# DISABLE_LLM_QUERY_REPHRASE=
# DISABLE_LITELLM_STREAMING=
# LITELLM_EXTRA_HEADERS=
# DISABLE_LLM_DOC_RELEVANCE=
# GEN_AI_API_KEY=
# TOKEN_BUDGET_GLOBALLY_ENABLED=
# GENERATIVE_MODEL_ACCESS_CHECK_FREQ=
# LITELLM_CUSTOM_ERROR_MESSAGE_MAPPINGS=
## Query Options
# DOC_TIME_DECAY=
# HYBRID_ALPHA=
# EDIT_KEYWORD_QUERY=
# MULTILINGUAL_QUERY_EXPANSION=
# LANGUAGE_HINT=
# LANGUAGE_CHAT_NAMING_HINT=
# USE_SEMANTIC_KEYWORD_EXPANSIONS_BASIC_SEARCH=
## Model Configuration
# EMBEDDING_BATCH_SIZE=
# DOCUMENT_ENCODER_MODEL=
# DOC_EMBEDDING_DIM=
# NORMALIZE_EMBEDDINGS=
# ASYM_QUERY_PREFIX=
# ASYM_PASSAGE_PREFIX=
# DISABLE_RERANK_FOR_STREAMING=
# MODEL_SERVER_PORT=
# INDEX_BATCH_SIZE=
# MIN_THREADS_ML_MODELS=
# CLIENT_EMBEDDING_TIMEOUT=
## Indexing Configuration
# VESPA_SEARCHER_THREADS=
# ENABLED_CONNECTOR_TYPES=
# DISABLE_INDEX_UPDATE_ON_SWAP=
# DASK_JOB_CLIENT_ENABLED=
# CONTINUE_ON_CONNECTOR_FAILURE=
# EXPERIMENTAL_CHECKPOINTING_ENABLED=
# CONFLUENCE_CONNECTOR_LABELS_TO_SKIP=
# JIRA_CONNECTOR_LABELS_TO_SKIP=
# WEB_CONNECTOR_VALIDATE_URLS=
# JIRA_SERVER_API_VERSION=
# JIRA_CLOUD_API_VERSION=
# GONG_CONNECTOR_START_TIME=
# NOTION_CONNECTOR_ENABLE_RECURSIVE_PAGE_LOOKUP=
# GITHUB_CONNECTOR_BASE_URL=
# MAX_DOCUMENT_CHARS=
# MAX_FILE_SIZE_BYTES=
## OAuth Connector Configs
# EGNYTE_CLIENT_ID=
# EGNYTE_CLIENT_SECRET=
# EGNYTE_LOCALHOST_OVERRIDE=
# LINEAR_CLIENT_ID=
# LINEAR_CLIENT_SECRET=
## Miscellaneous
# ONYX_QUERY_HISTORY_TYPE=
# CHECK_TTL_MANAGEMENT_TASK_FREQUENCY_IN_HOURS=
# VESPA_LANGUAGE_OVERRIDE=
## Frontend Configs
# THEME_IS_DARK=
# NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS=
# NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS=
# NEXT_PUBLIC_DISABLE_LOGOUT=
# NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN=
# NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=
# NEXT_PUBLIC_THEME=
# NEXT_PUBLIC_DO_NOT_USE_TOGGLE_OFF_DANSWER_POWERED=
# NEXT_PUBLIC_CUSTOM_REFRESH_URL=
## Pointer to services
POSTGRES_HOST=relational_db
VESPA_HOST=index
REDIS_HOST=cache
MODEL_SERVER_HOST=inference_model_server
INDEXING_MODEL_SERVER_HOST=indexing_model_server
INTERNAL_URL=http://api_server:8080

125
init-letsencrypt.sh Executable file
View file

@ -0,0 +1,125 @@
#!/bin/bash
# .env.nginx file must be present in the same directory as this script and
# must set DOMAIN (and optionally EMAIL)
set -o allexport
source .env.nginx
set +o allexport
# Function to determine correct docker compose command
docker_compose_cmd() {
if command -v docker-compose >/dev/null 2>&1; then
echo "docker-compose"
elif command -v docker compose >/dev/null 2>&1; then
echo "docker compose"
else
echo 'Error: docker-compose or docker compose is not installed.' >&2
exit 1
fi
}
# Assign appropriate Docker Compose command
COMPOSE_CMD=$(docker_compose_cmd)
# Only add www to domain list if domain wasn't explicitly set as a subdomain
if [[ ! $DOMAIN == www.* ]]; then
domains=("$DOMAIN" "www.$DOMAIN")
else
domains=("$DOMAIN")
fi
rsa_key_size=4096
data_path="../data/certbot"
email="$EMAIL" # Adding a valid address is strongly recommended
staging=0 # Set to 1 if you're testing your setup to avoid hitting request limits
if [ -d "$data_path" ]; then
read -p "Existing data found for $domains. Continue and replace existing certificate? (y/N) " decision
if [ "$decision" != "Y" ] && [ "$decision" != "y" ]; then
exit
fi
fi
if [ ! -e "$data_path/conf/options-ssl-nginx.conf" ] || [ ! -e "$data_path/conf/ssl-dhparams.pem" ]; then
echo "### Downloading recommended TLS parameters ..."
mkdir -p "$data_path/conf"
curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot-nginx/certbot_nginx/_internal/tls_configs/options-ssl-nginx.conf > "$data_path/conf/options-ssl-nginx.conf"
curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot/certbot/ssl-dhparams.pem > "$data_path/conf/ssl-dhparams.pem"
echo
fi
echo "### Creating dummy certificate for $domains ..."
path="/etc/letsencrypt/live/$domains"
mkdir -p "$data_path/conf/live/$domains"
$COMPOSE_CMD -f docker-compose.prod.yml run --name onyx --rm --entrypoint "\
openssl req -x509 -nodes -newkey rsa:$rsa_key_size -days 1\
-keyout '$path/privkey.pem' \
-out '$path/fullchain.pem' \
-subj '/CN=localhost'" certbot
echo
echo "### Starting nginx ..."
$COMPOSE_CMD -f docker-compose.prod.yml up --force-recreate -d nginx
echo
echo "Waiting for nginx to be ready, this may take a minute..."
while true; do
# Use curl to send a request and capture the HTTP status code
status_code=$(curl -o /dev/null -s -w "%{http_code}\n" "http://localhost/api/health")
# Check if the status code is 200
if [ "$status_code" -eq 200 ]; then
break # Exit the loop
else
echo "Nginx is not ready yet, retrying in 5 seconds..."
sleep 5 # Sleep for 5 seconds before retrying
fi
done
echo "### Deleting dummy certificate for $domains ..."
$COMPOSE_CMD -f docker-compose.prod.yml run --name onyx --rm --entrypoint "\
rm -Rf /etc/letsencrypt/live/$domains && \
rm -Rf /etc/letsencrypt/archive/$domains && \
rm -Rf /etc/letsencrypt/renewal/$domains.conf" certbot
echo
echo "### Requesting Let's Encrypt certificate for $domains ..."
#Join $domains to -d args
domain_args=""
for domain in "${domains[@]}"; do
domain_args="$domain_args -d $domain"
done
# Select appropriate email arg
case "$email" in
"") email_arg="--register-unsafely-without-email" ;;
*) email_arg="--email $email" ;;
esac
# Enable staging mode if needed
if [ $staging != "0" ]; then staging_arg="--staging"; fi
$COMPOSE_CMD -f docker-compose.prod.yml run --name onyx --rm --entrypoint "\
certbot certonly --webroot -w /var/www/certbot \
$staging_arg \
$email_arg \
$domain_args \
--rsa-key-size $rsa_key_size \
--agree-tos \
--force-renewal" certbot
echo
echo "### Renaming certificate directory if needed ..."
$COMPOSE_CMD -f docker-compose.prod.yml run --name onyx --rm --entrypoint "\
sh -c 'for domain in $domains; do \
numbered_dir=\$(find /etc/letsencrypt/live -maxdepth 1 -type d -name \"\$domain-00*\" | sort -r | head -n1); \
if [ -n \"\$numbered_dir\" ]; then \
mv \"\$numbered_dir\" /etc/letsencrypt/live/\$domain; \
fi; \
done'" certbot
echo "### Reloading nginx ..."
$COMPOSE_CMD -f docker-compose.prod.yml up --force-recreate -d

851
install.sh Executable file
View file

@ -0,0 +1,851 @@
#!/bin/bash
set -e
# Expected resource requirements
EXPECTED_DOCKER_RAM_GB=10
EXPECTED_DISK_GB=32
# Parse command line arguments
SHUTDOWN_MODE=false
DELETE_DATA_MODE=false
while [[ $# -gt 0 ]]; do
case $1 in
--shutdown)
SHUTDOWN_MODE=true
shift
;;
--delete-data)
DELETE_DATA_MODE=true
shift
;;
--help|-h)
echo "Onyx Installation Script"
echo ""
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
echo " --shutdown Stop (pause) Onyx containers"
echo " --delete-data Remove all Onyx data (containers, volumes, and files)"
echo " --help, -h Show this help message"
echo ""
echo "Examples:"
echo " $0 # Install Onyx"
echo " $0 --shutdown # Pause Onyx services"
echo " $0 --delete-data # Completely remove Onyx and all data"
exit 0
;;
*)
echo "Unknown option: $1"
echo "Use --help for usage information"
exit 1
;;
esac
done
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
BOLD='\033[1m'
NC='\033[0m' # No Color
# Step counter variables
CURRENT_STEP=0
TOTAL_STEPS=8
# Print colored output
print_success() {
echo -e "${GREEN}${NC} $1"
}
print_error() {
echo -e "${RED}${NC} $1"
}
print_info() {
echo -e "${YELLOW}${NC} $1"
}
print_step() {
CURRENT_STEP=$((CURRENT_STEP + 1))
echo ""
echo -e "${BLUE}${BOLD}=== $1 - Step ${CURRENT_STEP}/${TOTAL_STEPS} ===${NC}"
echo ""
}
print_warning() {
echo -e "${YELLOW}${NC} $1"
}
# Handle shutdown mode
if [ "$SHUTDOWN_MODE" = true ]; then
echo ""
echo -e "${BLUE}${BOLD}=== Shutting down Onyx ===${NC}"
echo ""
if [ -d "onyx_data/deployment" ]; then
print_info "Stopping Onyx containers..."
# Check if docker-compose.yml exists
if [ -f "onyx_data/deployment/docker-compose.yml" ]; then
# Determine compose command
if docker compose version &> /dev/null; then
COMPOSE_CMD="docker compose"
elif command -v docker-compose &> /dev/null; then
COMPOSE_CMD="docker-compose"
else
print_error "Docker Compose not found. Cannot stop containers."
exit 1
fi
# Stop containers (without removing them)
(cd onyx_data/deployment && $COMPOSE_CMD -f docker-compose.yml stop)
if [ $? -eq 0 ]; then
print_success "Onyx containers stopped (paused)"
else
print_error "Failed to stop containers"
exit 1
fi
else
print_warning "docker-compose.yml not found in onyx_data/deployment"
fi
else
print_warning "Onyx data directory not found. Nothing to shutdown."
fi
echo ""
print_success "Onyx shutdown complete!"
exit 0
fi
# Handle delete data mode
if [ "$DELETE_DATA_MODE" = true ]; then
echo ""
echo -e "${RED}${BOLD}=== WARNING: This will permanently delete all Onyx data ===${NC}"
echo ""
print_warning "This action will remove:"
echo " • All Onyx containers and volumes"
echo " • All downloaded files and configurations"
echo " • All user data and documents"
echo ""
read -p "Are you sure you want to continue? Type 'DELETE' to confirm: " -r
echo ""
if [ "$REPLY" != "DELETE" ]; then
print_info "Operation cancelled."
exit 0
fi
print_info "Removing Onyx containers and volumes..."
if [ -d "onyx_data/deployment" ]; then
# Check if docker-compose.yml exists
if [ -f "onyx_data/deployment/docker-compose.yml" ]; then
# Determine compose command
if docker compose version &> /dev/null; then
COMPOSE_CMD="docker compose"
elif command -v docker-compose &> /dev/null; then
COMPOSE_CMD="docker-compose"
else
print_error "Docker Compose not found. Cannot remove containers."
exit 1
fi
# Stop and remove containers with volumes
(cd onyx_data/deployment && $COMPOSE_CMD -f docker-compose.yml down -v)
if [ $? -eq 0 ]; then
print_success "Onyx containers and volumes removed"
else
print_error "Failed to remove containers and volumes"
fi
fi
fi
print_info "Removing data directories..."
if [ -d "onyx_data" ]; then
rm -rf onyx_data
print_success "Data directories removed"
else
print_warning "No onyx_data directory found"
fi
echo ""
print_success "All Onyx data has been permanently deleted!"
exit 0
fi
# ASCII Art Banner
echo ""
echo -e "${BLUE}${BOLD}"
echo " ____ "
echo " / __ \ "
echo "| | | |_ __ _ ___ __ "
echo "| | | | '_ \| | | \ \/ / "
echo "| |__| | | | | |_| |> < "
echo " \____/|_| |_|\__, /_/\_\ "
echo " __/ | "
echo " |___/ "
echo -e "${NC}"
echo "Welcome to Onyx Installation Script"
echo "===================================="
echo ""
# User acknowledgment section
echo -e "${YELLOW}${BOLD}This script will:${NC}"
echo "1. Download deployment files for Onyx into a new 'onyx_data' directory"
echo "2. Check your system resources (Docker, memory, disk space)"
echo "3. Guide you through deployment options (version, authentication)"
echo ""
# Only prompt for acknowledgment if running interactively
if [ -t 0 ]; then
echo -e "${YELLOW}${BOLD}Please acknowledge and press Enter to continue...${NC}"
read -r
echo ""
else
echo -e "${YELLOW}${BOLD}Running in non-interactive mode - proceeding automatically...${NC}"
echo ""
fi
# GitHub repo base URL - using docker-compose-easy branch
GITHUB_RAW_URL="https://raw.githubusercontent.com/onyx-dot-app/onyx/main/deployment/docker_compose"
# Check system requirements
print_step "Verifying Docker installation"
# Check Docker
if ! command -v docker &> /dev/null; then
print_error "Docker is not installed. Please install Docker first."
echo "Visit: https://docs.docker.com/get-docker/"
exit 1
fi
DOCKER_VERSION=$(docker --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
print_success "Docker $DOCKER_VERSION is installed"
# Check Docker Compose
if docker compose version &> /dev/null; then
COMPOSE_VERSION=$(docker compose version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
COMPOSE_CMD="docker compose"
print_success "Docker Compose $COMPOSE_VERSION is installed (plugin)"
elif command -v docker-compose &> /dev/null; then
COMPOSE_VERSION=$(docker-compose --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
COMPOSE_CMD="docker-compose"
print_success "Docker Compose $COMPOSE_VERSION is installed (standalone)"
else
print_error "Docker Compose is not installed. Please install Docker Compose first."
echo "Visit: https://docs.docker.com/compose/install/"
exit 1
fi
# Function to compare version numbers
version_compare() {
# Returns 0 if $1 <= $2, 1 if $1 > $2
local version1=$1
local version2=$2
# Split versions into components
local v1_major=$(echo $version1 | cut -d. -f1)
local v1_minor=$(echo $version1 | cut -d. -f2)
local v1_patch=$(echo $version1 | cut -d. -f3)
local v2_major=$(echo $version2 | cut -d. -f1)
local v2_minor=$(echo $version2 | cut -d. -f2)
local v2_patch=$(echo $version2 | cut -d. -f3)
# Compare major version
if [ "$v1_major" -lt "$v2_major" ]; then
return 0
elif [ "$v1_major" -gt "$v2_major" ]; then
return 1
fi
# Compare minor version
if [ "$v1_minor" -lt "$v2_minor" ]; then
return 0
elif [ "$v1_minor" -gt "$v2_minor" ]; then
return 1
fi
# Compare patch version
if [ "$v1_patch" -le "$v2_patch" ]; then
return 0
else
return 1
fi
}
# Check Docker daemon
if ! docker info &> /dev/null; then
print_error "Docker daemon is not running. Please start Docker."
exit 1
fi
print_success "Docker daemon is running"
# Check Docker resources
print_step "Verifying Docker resources"
# Get Docker system info
DOCKER_INFO=$(docker system info 2>/dev/null)
# Try to get memory allocation (method varies by platform)
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS - Docker Desktop
if command -v jq &> /dev/null && [ -f ~/Library/Group\ Containers/group.com.docker/settings.json ]; then
MEMORY_MB=$(cat ~/Library/Group\ Containers/group.com.docker/settings.json 2>/dev/null | jq '.memoryMiB // 0' 2>/dev/null || echo "0")
else
# Try to get from docker system info
MEMORY_BYTES=$(docker system info 2>/dev/null | grep -i "total memory" | grep -oE '[0-9]+\.[0-9]+' | head -1)
if [ -n "$MEMORY_BYTES" ]; then
# Convert from GiB to MB (multiply by 1024)
MEMORY_MB=$(echo "$MEMORY_BYTES * 1024" | bc 2>/dev/null | cut -d. -f1)
if [ -z "$MEMORY_MB" ]; then
MEMORY_MB="0"
fi
else
MEMORY_MB="0"
fi
fi
else
# Linux - Native Docker
MEMORY_KB=$(grep MemTotal /proc/meminfo | grep -oE '[0-9]+' || echo "0")
MEMORY_MB=$((MEMORY_KB / 1024))
fi
# Convert to GB for display
if [ "$MEMORY_MB" -gt 0 ]; then
MEMORY_GB=$((MEMORY_MB / 1024))
print_info "Docker memory allocation: ~${MEMORY_GB}GB"
else
print_warning "Could not determine Docker memory allocation"
MEMORY_MB=0
fi
# Check disk space (different commands for macOS vs Linux)
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS uses -g for GB
DISK_AVAILABLE=$(df -g . | awk 'NR==2 {print $4}')
else
# Linux uses -BG for GB
DISK_AVAILABLE=$(df -BG . | awk 'NR==2 {print $4}' | sed 's/G//')
fi
print_info "Available disk space: ${DISK_AVAILABLE}GB"
# Resource requirements check
RESOURCE_WARNING=false
EXPECTED_RAM_MB=$((EXPECTED_DOCKER_RAM_GB * 1024))
if [ "$MEMORY_MB" -gt 0 ] && [ "$MEMORY_MB" -lt "$EXPECTED_RAM_MB" ]; then
print_warning "Docker has less than ${EXPECTED_DOCKER_RAM_GB}GB RAM allocated (found: ~${MEMORY_GB}GB)"
RESOURCE_WARNING=true
fi
if [ "$DISK_AVAILABLE" -lt "$EXPECTED_DISK_GB" ]; then
print_warning "Less than ${EXPECTED_DISK_GB}GB disk space available (found: ${DISK_AVAILABLE}GB)"
RESOURCE_WARNING=true
fi
if [ "$RESOURCE_WARNING" = true ]; then
echo ""
print_warning "Onyx recommends at least ${EXPECTED_DOCKER_RAM_GB}GB RAM and ${EXPECTED_DISK_GB}GB disk space for optimal performance."
echo ""
read -p "Do you want to continue anyway? (y/N): " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
print_info "Installation cancelled. Please allocate more resources and try again."
exit 1
fi
print_info "Proceeding with installation despite resource limitations..."
fi
# Create directory structure
print_step "Creating directory structure"
if [ -d "onyx_data" ]; then
print_info "Directory structure already exists"
print_success "Using existing onyx_data directory"
else
mkdir -p onyx_data/deployment
mkdir -p onyx_data/data/nginx/local
print_success "Directory structure created"
fi
# Download all required files
print_step "Downloading Onyx configuration files"
print_info "This step downloads all necessary configuration files from GitHub..."
echo ""
print_info "Downloading the following files:"
echo " • docker-compose.yml - Main Docker Compose configuration"
echo " • env.template - Environment variables template"
echo " • nginx/app.conf.template - Nginx web server configuration"
echo " • nginx/run-nginx.sh - Nginx startup script"
echo " • README.md - Documentation and setup instructions"
echo ""
# Download Docker Compose file
COMPOSE_FILE="onyx_data/deployment/docker-compose.yml"
print_info "Downloading docker-compose.yml..."
if curl -fsSL -o "$COMPOSE_FILE" "${GITHUB_RAW_URL}/docker-compose.yml" 2>/dev/null; then
print_success "Docker Compose file downloaded successfully"
# Check if Docker Compose version is older than 2.24.0 and show warning
if version_compare "$COMPOSE_VERSION" "2.24.0"; then
print_warning "Docker Compose version $COMPOSE_VERSION is older than 2.24.0"
echo ""
print_warning "The docker-compose.yml file uses the newer env_file format that requires Docker Compose 2.24.0 or later."
echo ""
print_info "To use this configuration with your current Docker Compose version, you have two options:"
echo ""
echo "1. Upgrade Docker Compose to version 2.24.0 or later (recommended)"
echo " Visit: https://docs.docker.com/compose/install/"
echo ""
echo "2. Manually replace all env_file sections in docker-compose.yml"
echo " Change from:"
echo " env_file:"
echo " - path: .env"
echo " required: false"
echo " To:"
echo " env_file: .env"
echo ""
print_warning "The installation will continue, but may fail if Docker Compose cannot parse the file."
echo ""
read -p "Do you want to continue anyway? (y/N): " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
print_info "Installation cancelled. Please upgrade Docker Compose or manually edit the docker-compose.yml file."
exit 1
fi
print_info "Proceeding with installation despite Docker Compose version compatibility issues..."
fi
else
print_error "Failed to download Docker Compose file"
print_info "Please ensure you have internet connection and try again"
exit 1
fi
# Download env.template file
ENV_TEMPLATE="onyx_data/deployment/env.template"
print_info "Downloading env.template..."
if curl -fsSL -o "$ENV_TEMPLATE" "${GITHUB_RAW_URL}/env.template" 2>/dev/null; then
print_success "Environment template downloaded successfully"
else
print_error "Failed to download env.template"
print_info "Please ensure you have internet connection and try again"
exit 1
fi
# Download nginx config files
NGINX_BASE_URL="https://raw.githubusercontent.com/onyx-dot-app/onyx/main/deployment/data/nginx"
# Download app.conf.template
NGINX_CONFIG="onyx_data/data/nginx/app.conf.template"
print_info "Downloading nginx configuration template..."
if curl -fsSL -o "$NGINX_CONFIG" "$NGINX_BASE_URL/app.conf.template" 2>/dev/null; then
print_success "Nginx configuration template downloaded"
else
print_error "Failed to download nginx configuration template"
print_info "Please ensure you have internet connection and try again"
exit 1
fi
# Download run-nginx.sh script
NGINX_RUN_SCRIPT="onyx_data/data/nginx/run-nginx.sh"
print_info "Downloading nginx startup script..."
if curl -fsSL -o "$NGINX_RUN_SCRIPT" "$NGINX_BASE_URL/run-nginx.sh" 2>/dev/null; then
chmod +x "$NGINX_RUN_SCRIPT"
print_success "Nginx startup script downloaded and made executable"
else
print_error "Failed to download nginx startup script"
print_info "Please ensure you have internet connection and try again"
exit 1
fi
# Download README file
README_FILE="onyx_data/README.md"
print_info "Downloading README.md..."
if curl -fsSL -o "$README_FILE" "${GITHUB_RAW_URL}/README.md" 2>/dev/null; then
print_success "README.md downloaded successfully"
else
print_error "Failed to download README.md"
print_info "Please ensure you have internet connection and try again"
exit 1
fi
# Create empty local directory marker (if needed)
touch "onyx_data/data/nginx/local/.gitkeep"
print_success "All configuration files downloaded successfully"
# Set up deployment configuration
print_step "Setting up deployment configs"
ENV_FILE="onyx_data/deployment/.env"
# Check if services are already running
if [ -d "onyx_data/deployment" ] && [ -f "onyx_data/deployment/docker-compose.yml" ]; then
# Determine compose command
if docker compose version &> /dev/null; then
COMPOSE_CMD="docker compose"
elif command -v docker-compose &> /dev/null; then
COMPOSE_CMD="docker-compose"
else
COMPOSE_CMD=""
fi
if [ -n "$COMPOSE_CMD" ]; then
# Check if any containers are running
RUNNING_CONTAINERS=$(cd onyx_data/deployment && $COMPOSE_CMD -f docker-compose.yml ps -q 2>/dev/null | wc -l)
if [ "$RUNNING_CONTAINERS" -gt 0 ]; then
print_error "Onyx services are currently running!"
echo ""
print_info "To make configuration changes, you must first shut down the services."
echo ""
print_info "Please run the following command to shut down Onyx:"
echo -e " ${BOLD}./install.sh --shutdown${NC}"
echo ""
print_info "Then run this script again to make your changes."
exit 1
fi
fi
fi
if [ -f "$ENV_FILE" ]; then
print_info "Existing .env file found. What would you like to do?"
echo ""
echo "• Press Enter to restart with current configuration"
echo "• Type 'update' to update to a newer version"
echo ""
read -p "Choose an option [default: restart]: " -r
echo ""
if [ "$REPLY" = "update" ]; then
print_info "Update selected. Which tag would you like to deploy?"
echo ""
echo "• Press Enter for latest (recommended)"
echo "• Type a specific tag (e.g., v0.1.0)"
echo ""
read -p "Enter tag [default: latest]: " -r VERSION
echo ""
if [ -z "$VERSION" ]; then
VERSION="latest"
print_info "Selected: Latest version"
else
print_info "Selected: $VERSION"
fi
# Update .env file with new version
print_info "Updating configuration for version $VERSION..."
if grep -q "^IMAGE_TAG=" "$ENV_FILE"; then
# Update existing IMAGE_TAG line
sed -i.bak "s/^IMAGE_TAG=.*/IMAGE_TAG=$VERSION/" "$ENV_FILE"
else
# Add IMAGE_TAG line if it doesn't exist
echo "IMAGE_TAG=$VERSION" >> "$ENV_FILE"
fi
print_success "Updated IMAGE_TAG to $VERSION in .env file"
print_success "Configuration updated for upgrade"
else
print_info "Keeping existing configuration..."
print_success "Will restart with current settings"
fi
else
print_info "No existing .env file found. Setting up new deployment..."
echo ""
# Ask for version
print_info "Which tag would you like to deploy?"
echo ""
echo "• Press Enter for latest (recommended)"
echo "• Type a specific tag (e.g., v0.1.0)"
echo ""
read -p "Enter tag [default: latest]: " -r VERSION
echo ""
if [ -z "$VERSION" ]; then
VERSION="latest"
print_info "Selected: Latest tag"
else
print_info "Selected: $VERSION"
fi
# Ask for authentication schema
echo ""
print_info "Which authentication schema would you like to set up?"
echo ""
echo "1) Basic - Username/password authentication"
echo "2) No Auth - Open access (development/testing)"
echo ""
read -p "Choose an option (1-2) [default 1]: " -r AUTH_CHOICE
echo ""
case "${AUTH_CHOICE:-1}" in
1)
AUTH_SCHEMA="basic"
print_info "Selected: Basic authentication"
;;
2)
AUTH_SCHEMA="disabled"
print_info "Selected: No authentication"
;;
*)
AUTH_SCHEMA="basic"
print_info "Invalid choice, using basic authentication"
;;
esac
# Create .env file from template
print_info "Creating .env file with your selections..."
cp "$ENV_TEMPLATE" "$ENV_FILE"
# Update IMAGE_TAG with selected version
print_info "Setting IMAGE_TAG to $VERSION..."
sed -i.bak "s/^IMAGE_TAG=.*/IMAGE_TAG=$VERSION/" "$ENV_FILE"
print_success "IMAGE_TAG set to $VERSION"
# Configure authentication settings based on selection
if [ "$AUTH_SCHEMA" = "disabled" ]; then
# Disable authentication in .env file
sed -i.bak 's/^AUTH_TYPE=.*/AUTH_TYPE=disabled/' "$ENV_FILE" 2>/dev/null || true
print_success "Authentication disabled in configuration"
else
# Enable basic authentication
sed -i.bak 's/^AUTH_TYPE=.*/AUTH_TYPE=basic/' "$ENV_FILE" 2>/dev/null || true
print_success "Basic authentication enabled in configuration"
fi
print_success ".env file created with your preferences"
echo ""
print_info "IMPORTANT: The .env file has been configured with your selections."
print_info "You can customize it later for:"
echo " • Advanced authentication (OAuth, SAML, etc.)"
echo " • AI model configuration"
echo " • Domain settings (for production)"
echo ""
fi
# Function to check if a port is available
is_port_available() {
local port=$1
# Try netcat first if available
if command -v nc &> /dev/null; then
# Try to connect to the port, if it fails, the port is available
! nc -z localhost "$port" 2>/dev/null
# Fallback using curl/telnet approach
elif command -v curl &> /dev/null; then
# Try to connect with curl, if it fails, the port might be available
! curl -s --max-time 1 --connect-timeout 1 "http://localhost:$port" >/dev/null 2>&1
# Final fallback using lsof if available
elif command -v lsof &> /dev/null; then
# Check if any process is listening on the port
! lsof -i ":$port" >/dev/null 2>&1
else
# No port checking tools available, assume port is available
print_warning "No port checking tools available (nc, curl, lsof). Assuming port $port is available."
return 0
fi
}
# Function to find the first available port starting from a given port
find_available_port() {
local start_port=${1:-3000}
local port=$start_port
while [ $port -le 65535 ]; do
if is_port_available "$port"; then
echo "$port"
return 0
fi
port=$((port + 1))
done
# If no port found, return the original port as fallback
echo "$start_port"
return 1
}
# Check for port checking tools availability
PORT_CHECK_AVAILABLE=false
if command -v nc &> /dev/null || command -v curl &> /dev/null || command -v lsof &> /dev/null; then
PORT_CHECK_AVAILABLE=true
fi
if [ "$PORT_CHECK_AVAILABLE" = false ]; then
print_warning "No port checking tools found (nc, curl, lsof). Port detection may not work properly."
print_info "Consider installing one of these tools for reliable automatic port detection."
fi
# Find available port for nginx
print_step "Checking for available ports"
AVAILABLE_PORT=$(find_available_port 3000)
if [ "$AVAILABLE_PORT" != "3000" ]; then
print_info "Port 3000 is in use, found available port: $AVAILABLE_PORT"
else
print_info "Port 3000 is available"
fi
# Export HOST_PORT for docker-compose
export HOST_PORT=$AVAILABLE_PORT
print_success "Using port $AVAILABLE_PORT for nginx"
# Determine if we're using the latest tag
# Read IMAGE_TAG from .env file and remove any quotes or whitespace
CURRENT_IMAGE_TAG=$(grep "^IMAGE_TAG=" "$ENV_FILE" | head -1 | cut -d'=' -f2 | tr -d ' "'"'"'')
if [ "$CURRENT_IMAGE_TAG" = "latest" ]; then
USE_LATEST=true
print_info "Using 'latest' tag - will force pull and recreate containers"
else
USE_LATEST=false
fi
# Pull Docker images with reduced output
print_step "Pulling Docker images"
print_info "This may take several minutes depending on your internet connection..."
echo ""
print_info "Downloading Docker images (this may take a while)..."
(cd onyx_data/deployment && $COMPOSE_CMD -f docker-compose.yml pull --quiet)
if [ $? -eq 0 ]; then
print_success "Docker images downloaded successfully"
else
print_error "Failed to download Docker images"
exit 1
fi
# Start services
print_step "Starting Onyx services"
print_info "Launching containers..."
echo ""
if [ "$USE_LATEST" = true ]; then
print_info "Force pulling latest images and recreating containers..."
(cd onyx_data/deployment && $COMPOSE_CMD -f docker-compose.yml up -d --pull always --force-recreate)
else
(cd onyx_data/deployment && $COMPOSE_CMD -f docker-compose.yml up -d)
fi
if [ $? -ne 0 ]; then
print_error "Failed to start Onyx services"
exit 1
fi
# Monitor container startup
print_step "Verifying container health"
print_info "Waiting for containers to initialize (10 seconds)..."
# Progress bar for waiting
for i in {1..10}; do
printf "\r[%-10s] %d%%" $(printf '#%.0s' $(seq 1 $((i*10/10)))) $((i*100/10))
sleep 1
done
echo ""
echo ""
# Check for restart loops
print_info "Checking container health status..."
RESTART_ISSUES=false
CONTAINERS=$(cd onyx_data/deployment && $COMPOSE_CMD -f docker-compose.yml ps -q 2>/dev/null)
for CONTAINER in $CONTAINERS; do
CONTAINER_NAME=$(docker inspect --format '{{.Name}}' "$CONTAINER" | sed 's/^\/\|^onyx_data_deployment_//g')
RESTART_COUNT=$(docker inspect --format '{{.RestartCount}}' "$CONTAINER")
STATUS=$(docker inspect --format '{{.State.Status}}' "$CONTAINER")
if [ "$STATUS" = "running" ]; then
if [ "$RESTART_COUNT" -gt 2 ]; then
print_error "$CONTAINER_NAME is in a restart loop (restarted $RESTART_COUNT times)"
RESTART_ISSUES=true
else
print_success "$CONTAINER_NAME is healthy"
fi
elif [ "$STATUS" = "restarting" ]; then
print_error "$CONTAINER_NAME is stuck restarting"
RESTART_ISSUES=true
else
print_warning "$CONTAINER_NAME status: $STATUS"
fi
done
echo ""
if [ "$RESTART_ISSUES" = true ]; then
print_error "Some containers are experiencing issues!"
echo ""
print_info "Please check the logs for more information:"
echo " (cd onyx_data/deployment && $COMPOSE_CMD -f docker-compose.yml logs)"
echo ""
print_info "If the issue persists, please contact: founders@onyx.app"
echo "Include the output of the logs command in your message."
exit 1
fi
# Health check function
check_onyx_health() {
local max_attempts=600 # 10 minutes * 60 attempts per minute (every 1 second)
local attempt=1
local port=${HOST_PORT:-3000}
print_info "Checking Onyx service health..."
echo "Containers are healthy, waiting for database migrations and service initialization to finish."
echo ""
while [ $attempt -le $max_attempts ]; do
# Check for successful HTTP responses (200, 301, 302, etc.)
local http_code=$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:$port")
if echo "$http_code" | grep -qE "^(200|301|302|303|307|308)$"; then
return 0
fi
# Show animated progress with time elapsed
local elapsed=$((attempt))
local minutes=$((elapsed / 60))
local seconds=$((elapsed % 60))
# Create animated dots with fixed spacing (cycle through 1-3 dots)
local dots=""
case $((attempt % 3)) in
0) dots=". " ;;
1) dots=".. " ;;
2) dots="..." ;;
esac
# Clear line and show progress with fixed spacing
printf "\r\033[KChecking Onyx service%s (%dm %ds elapsed)" "$dots" "$minutes" "$seconds"
sleep 1
attempt=$((attempt + 1))
done
echo "" # New line after the progress line
return 1
}
# Success message
print_step "Installation Complete!"
print_success "All containers are running successfully!"
echo ""
# Run health check
if check_onyx_health; then
echo ""
echo -e "${GREEN}${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${GREEN}${BOLD} 🎉 Onyx service is ready! 🎉${NC}"
echo -e "${GREEN}${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
else
print_warning "Health check timed out after 10 minutes"
print_info "Containers are running, but the web service may still be initializing (or something went wrong)"
echo ""
echo -e "${YELLOW}${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${YELLOW}${BOLD} ⚠️ Onyx containers are running ⚠️${NC}"
echo -e "${YELLOW}${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
fi
echo ""
print_info "Access Onyx at:"
echo -e " ${BOLD}http://localhost:${HOST_PORT}${NC}"
echo ""
print_info "If authentication is enabled, you can create your admin account here:"
echo " • Visit http://localhost:${HOST_PORT}/auth/signup to create your admin account"
echo " • The first user created will automatically have admin privileges"
echo ""
print_info "Refer to the README in the onyx_data directory for more information."
echo ""
print_info "For help or issues, contact: founders@onyx.app"
echo ""