| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577 | 
							- x-shared-env: &shared-api-worker-env
 
- services:
 
-   # API service
 
-   api:
 
-     image: langgenius/dify-api:0.14.1
 
-     restart: always
 
-     environment:
 
-       # Use the shared environment variables.
 
-       <<: *shared-api-worker-env
 
-       # Startup mode, 'api' starts the API server.
 
-       MODE: api
 
-       SENTRY_DSN: ${API_SENTRY_DSN:-}
 
-       SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
 
-       SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
 
-     depends_on:
 
-       - db
 
-       - redis
 
-     volumes:
 
-       # Mount the storage directory to the container, for storing user files.
 
-       - ./volumes/app/storage:/app/api/storage
 
-     networks:
 
-       - ssrf_proxy_network
 
-       - default
 
-   # worker service
 
-   # The Celery worker for processing the queue.
 
-   worker:
 
-     image: langgenius/dify-api:0.14.1
 
-     restart: always
 
-     environment:
 
-       # Use the shared environment variables.
 
-       <<: *shared-api-worker-env
 
-       # Startup mode, 'worker' starts the Celery worker for processing the queue.
 
-       MODE: worker
 
-       SENTRY_DSN: ${API_SENTRY_DSN:-}
 
-       SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
 
-       SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
 
-     depends_on:
 
-       - db
 
-       - redis
 
-     volumes:
 
-       # Mount the storage directory to the container, for storing user files.
 
-       - ./volumes/app/storage:/app/api/storage
 
-     networks:
 
-       - ssrf_proxy_network
 
-       - default
 
-   # Frontend web application.
 
-   web:
 
-     image: langgenius/dify-web:0.14.1
 
-     restart: always
 
-     environment:
 
-       CONSOLE_API_URL: ${CONSOLE_API_URL:-}
 
-       APP_API_URL: ${APP_API_URL:-}
 
-       SENTRY_DSN: ${WEB_SENTRY_DSN:-}
 
-       NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
 
-       TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
 
-       CSP_WHITELIST: ${CSP_WHITELIST:-}
 
-   # The postgres database.
 
-   db:
 
-     image: postgres:15-alpine
 
-     restart: always
 
-     environment:
 
-       PGUSER: ${PGUSER:-postgres}
 
-       POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
 
-       POSTGRES_DB: ${POSTGRES_DB:-dify}
 
-       PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
 
-     command: >
 
-       postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
 
-                -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
 
-                -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
 
-                -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
 
-                -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
 
-     volumes:
 
-       - ./volumes/db/data:/var/lib/postgresql/data
 
-     healthcheck:
 
-       test: ['CMD', 'pg_isready']
 
-       interval: 1s
 
-       timeout: 3s
 
-       retries: 30
 
-   # The redis cache.
 
-   redis:
 
-     image: redis:6-alpine
 
-     restart: always
 
-     environment:
 
-       REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
 
-     volumes:
 
-       # Mount the redis data directory to the container.
 
-       - ./volumes/redis/data:/data
 
-     # Set the redis password when startup redis server.
 
-     command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
 
-     healthcheck:
 
-       test: ['CMD', 'redis-cli', 'ping']
 
-   # The DifySandbox
 
-   sandbox:
 
-     image: langgenius/dify-sandbox:0.2.10
 
-     restart: always
 
-     environment:
 
-       # The DifySandbox configurations
 
-       # Make sure you are changing this key for your deployment with a strong key.
 
-       # You can generate a strong key using `openssl rand -base64 42`.
 
-       API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
 
-       GIN_MODE: ${SANDBOX_GIN_MODE:-release}
 
-       WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
 
-       ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
 
-       HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
 
-       HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
 
-       SANDBOX_PORT: ${SANDBOX_PORT:-8194}
 
-     volumes:
 
-       - ./volumes/sandbox/dependencies:/dependencies
 
-     healthcheck:
 
-       test: ['CMD', 'curl', '-f', 'http://localhost:8194/health']
 
-     networks:
 
-       - ssrf_proxy_network
 
-   # ssrf_proxy server
 
-   # for more information, please refer to
 
-   # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
 
-   ssrf_proxy:
 
-     image: ubuntu/squid:latest
 
-     restart: always
 
-     volumes:
 
-       - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
 
-       - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
 
-     entrypoint:
 
-       [
 
-         'sh',
 
-         '-c',
 
-         "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
 
-       ]
 
-     environment:
 
-       # pls clearly modify the squid env vars to fit your network environment.
 
-       HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
 
-       COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
 
-       REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
 
-       SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
 
-       SANDBOX_PORT: ${SANDBOX_PORT:-8194}
 
-     networks:
 
-       - ssrf_proxy_network
 
-       - default
 
-   # Certbot service
 
-   # use `docker-compose --profile certbot up` to start the certbot service.
 
-   certbot:
 
-     image: certbot/certbot
 
-     profiles:
 
-       - certbot
 
-     volumes:
 
-       - ./volumes/certbot/conf:/etc/letsencrypt
 
-       - ./volumes/certbot/www:/var/www/html
 
-       - ./volumes/certbot/logs:/var/log/letsencrypt
 
-       - ./volumes/certbot/conf/live:/etc/letsencrypt/live
 
-       - ./certbot/update-cert.template.txt:/update-cert.template.txt
 
-       - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
 
-     environment:
 
-       - CERTBOT_EMAIL=${CERTBOT_EMAIL}
 
-       - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
 
-       - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
 
-     entrypoint: ['/docker-entrypoint.sh']
 
-     command: ['tail', '-f', '/dev/null']
 
-   # The nginx reverse proxy.
 
-   # used for reverse proxying the API service and Web service.
 
-   nginx:
 
-     image: nginx:latest
 
-     restart: always
 
-     volumes:
 
-       - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
 
-       - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
 
-       - ./nginx/https.conf.template:/etc/nginx/https.conf.template
 
-       - ./nginx/conf.d:/etc/nginx/conf.d
 
-       - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
 
-       - ./nginx/ssl:/etc/ssl # cert dir (legacy)
 
-       - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
 
-       - ./volumes/certbot/conf:/etc/letsencrypt
 
-       - ./volumes/certbot/www:/var/www/html
 
-     entrypoint:
 
-       [
 
-         'sh',
 
-         '-c',
 
-         "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
 
-       ]
 
-     environment:
 
-       NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
 
-       NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
 
-       NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
 
-       NGINX_PORT: ${NGINX_PORT:-80}
 
-       # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
 
-       # and modify the env vars below in .env if HTTPS_ENABLED is true.
 
-       NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
 
-       NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
 
-       NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
 
-       NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
 
-       NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
 
-       NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
 
-       NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
 
-       NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
 
-       NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
 
-       CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
 
-     depends_on:
 
-       - api
 
-       - web
 
-     ports:
 
-       - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
 
-       - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
 
-   # The TiDB vector store.
 
-   # For production use, please refer to https://github.com/pingcap/tidb-docker-compose
 
-   tidb:
 
-     image: pingcap/tidb:v8.4.0
 
-     profiles:
 
-       - tidb
 
-     command:
 
-       - --store=unistore
 
-     restart: always
 
-   # The Weaviate vector store.
 
-   weaviate:
 
-     image: semitechnologies/weaviate:1.19.0
 
-     profiles:
 
-       - ''
 
-       - weaviate
 
-     restart: always
 
-     volumes:
 
-       # Mount the Weaviate data directory to the con tainer.
 
-       - ./volumes/weaviate:/var/lib/weaviate
 
-     environment:
 
-       # The Weaviate configurations
 
-       # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
 
-       PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
 
-       QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
 
-       AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
 
-       DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
 
-       CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
 
-       AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
 
-       AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
 
-       AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
 
-       AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
 
-       AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
 
-   # Qdrant vector store.
 
-   # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
 
-   qdrant:
 
-     image: langgenius/qdrant:v1.7.3
 
-     profiles:
 
-       - qdrant
 
-     restart: always
 
-     volumes:
 
-       - ./volumes/qdrant:/qdrant/storage
 
-     environment:
 
-       QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
 
-   # The Couchbase vector store.
 
-   couchbase-server:
 
-     build: ./couchbase-server
 
-     profiles:
 
-       - couchbase
 
-     restart: always
 
-     environment:
 
-       - CLUSTER_NAME=dify_search
 
-       - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
 
-       - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
 
-       - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
 
-       - COUCHBASE_BUCKET_RAMSIZE=512
 
-       - COUCHBASE_RAM_SIZE=2048
 
-       - COUCHBASE_EVENTING_RAM_SIZE=512
 
-       - COUCHBASE_INDEX_RAM_SIZE=512
 
-       - COUCHBASE_FTS_RAM_SIZE=1024
 
-     hostname: couchbase-server
 
-     container_name: couchbase-server
 
-     working_dir: /opt/couchbase
 
-     stdin_open: true
 
-     tty: true
 
-     entrypoint: [""]
 
-     command: sh -c "/opt/couchbase/init/init-cbserver.sh"
 
-     volumes:
 
-       - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
 
-     healthcheck:
 
-       # ensure bucket was created before proceeding
 
-       test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
 
-       interval: 10s
 
-       retries: 10
 
-       start_period: 30s
 
-       timeout: 10s
 
-   # The pgvector vector database.
 
-   pgvector:
 
-     image: pgvector/pgvector:pg16
 
-     profiles:
 
-       - pgvector
 
-     restart: always
 
-     environment:
 
-       PGUSER: ${PGVECTOR_PGUSER:-postgres}
 
-       # The password for the default postgres user.
 
-       POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
 
-       # The name of the default postgres database.
 
-       POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
 
-       # postgres data directory
 
-       PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
 
-     volumes:
 
-       - ./volumes/pgvector/data:/var/lib/postgresql/data
 
-     healthcheck:
 
-       test: ['CMD', 'pg_isready']
 
-       interval: 1s
 
-       timeout: 3s
 
-       retries: 30
 
-   # pgvecto-rs vector store
 
-   pgvecto-rs:
 
-     image: tensorchord/pgvecto-rs:pg16-v0.3.0
 
-     profiles:
 
-       - pgvecto-rs
 
-     restart: always
 
-     environment:
 
-       PGUSER: ${PGVECTOR_PGUSER:-postgres}
 
-       # The password for the default postgres user.
 
-       POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
 
-       # The name of the default postgres database.
 
-       POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
 
-       # postgres data directory
 
-       PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
 
-     volumes:
 
-       - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
 
-     healthcheck:
 
-       test: ['CMD', 'pg_isready']
 
-       interval: 1s
 
-       timeout: 3s
 
-       retries: 30
 
-   # Chroma vector database
 
-   chroma:
 
-     image: ghcr.io/chroma-core/chroma:0.5.20
 
-     profiles:
 
-       - chroma
 
-     restart: always
 
-     volumes:
 
-       - ./volumes/chroma:/chroma/chroma
 
-     environment:
 
-       CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
 
-       CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
 
-       IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
 
-   # OceanBase vector database
 
-   oceanbase:
 
-     image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215
 
-     profiles:
 
-       - oceanbase
 
-     restart: always
 
-     volumes:
 
-       - ./volumes/oceanbase/data:/root/ob
 
-       - ./volumes/oceanbase/conf:/root/.obd/cluster
 
-       - ./volumes/oceanbase/init.d:/root/boot/init.d
 
-     environment:
 
-       OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
 
-       OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
 
-       OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
 
-       OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
 
-       OB_SERVER_IP: '127.0.0.1'
 
-   # Oracle vector database
 
-   oracle:
 
-     image: container-registry.oracle.com/database/free:latest
 
-     profiles:
 
-       - oracle
 
-     restart: always
 
-     volumes:
 
-       - source: oradata
 
-         type: volume
 
-         target: /opt/oracle/oradata
 
-       - ./startupscripts:/opt/oracle/scripts/startup
 
-     environment:
 
-       ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
 
-       ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
 
-   # Milvus vector database services
 
-   etcd:
 
-     container_name: milvus-etcd
 
-     image: quay.io/coreos/etcd:v3.5.5
 
-     profiles:
 
-       - milvus
 
-     environment:
 
-       ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
 
-       ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
 
-       ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
 
-       ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
 
-     volumes:
 
-       - ./volumes/milvus/etcd:/etcd
 
-     command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
 
-     healthcheck:
 
-       test: ['CMD', 'etcdctl', 'endpoint', 'health']
 
-       interval: 30s
 
-       timeout: 20s
 
-       retries: 3
 
-     networks:
 
-       - milvus
 
-   minio:
 
-     container_name: milvus-minio
 
-     image: minio/minio:RELEASE.2023-03-20T20-16-18Z
 
-     profiles:
 
-       - milvus
 
-     environment:
 
-       MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
 
-       MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
 
-     volumes:
 
-       - ./volumes/milvus/minio:/minio_data
 
-     command: minio server /minio_data --console-address ":9001"
 
-     healthcheck:
 
-       test: ['CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live']
 
-       interval: 30s
 
-       timeout: 20s
 
-       retries: 3
 
-     networks:
 
-       - milvus
 
-   milvus-standalone:
 
-     container_name: milvus-standalone
 
-     image: milvusdb/milvus:v2.3.1
 
-     profiles:
 
-       - milvus
 
-     command: ['milvus', 'run', 'standalone']
 
-     environment:
 
-       ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
 
-       MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
 
-       common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
 
-     volumes:
 
-       - ./volumes/milvus/milvus:/var/lib/milvus
 
-     healthcheck:
 
-       test: ['CMD', 'curl', '-f', 'http://localhost:9091/healthz']
 
-       interval: 30s
 
-       start_period: 90s
 
-       timeout: 20s
 
-       retries: 3
 
-     depends_on:
 
-       - etcd
 
-       - minio
 
-     ports:
 
-       - 19530:19530
 
-       - 9091:9091
 
-     networks:
 
-       - milvus
 
-   # Opensearch vector database
 
-   opensearch:
 
-     container_name: opensearch
 
-     image: opensearchproject/opensearch:latest
 
-     profiles:
 
-       - opensearch
 
-     environment:
 
-       discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
 
-       bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
 
-       OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
 
-       OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
 
-     ulimits:
 
-       memlock:
 
-         soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
 
-         hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
 
-       nofile:
 
-         soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
 
-         hard: ${OPENSEARCH_NOFILE_HARD:-65536}
 
-     volumes:
 
-       - ./volumes/opensearch/data:/usr/share/opensearch/data
 
-     networks:
 
-       - opensearch-net
 
-   opensearch-dashboards:
 
-     container_name: opensearch-dashboards
 
-     image: opensearchproject/opensearch-dashboards:latest
 
-     profiles:
 
-       - opensearch
 
-     environment:
 
-       OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
 
-     volumes:
 
-       - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
 
-     networks:
 
-       - opensearch-net
 
-     depends_on:
 
-       - opensearch
 
-   # MyScale vector database
 
-   myscale:
 
-     container_name: myscale
 
-     image: myscale/myscaledb:1.6.4
 
-     profiles:
 
-       - myscale
 
-     restart: always
 
-     tty: true
 
-     volumes:
 
-       - ./volumes/myscale/data:/var/lib/clickhouse
 
-       - ./volumes/myscale/log:/var/log/clickhouse-server
 
-       - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
 
-     ports:
 
-       - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
 
-   # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
 
-   # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
 
-   elasticsearch:
 
-     image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
 
-     container_name: elasticsearch
 
-     profiles:
 
-       - elasticsearch
 
-     restart: always
 
-     volumes:
 
-       - dify_es01_data:/usr/share/elasticsearch/data
 
-     environment:
 
-       ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
 
-       cluster.name: dify-es-cluster
 
-       node.name: dify-es0
 
-       discovery.type: single-node
 
-       xpack.license.self_generated.type: trial
 
-       xpack.security.enabled: 'true'
 
-       xpack.security.enrollment.enabled: 'false'
 
-       xpack.security.http.ssl.enabled: 'false'
 
-     ports:
 
-       - ${ELASTICSEARCH_PORT:-9200}:9200
 
-     healthcheck:
 
-       test: ['CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty']
 
-       interval: 30s
 
-       timeout: 10s
 
-       retries: 50
 
-   # https://www.elastic.co/guide/en/kibana/current/docker.html
 
-   # https://www.elastic.co/guide/en/kibana/current/settings.html
 
-   kibana:
 
-     image: docker.elastic.co/kibana/kibana:8.14.3
 
-     container_name: kibana
 
-     profiles:
 
-       - elasticsearch
 
-     depends_on:
 
-       - elasticsearch
 
-     restart: always
 
-     environment:
 
-       XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
 
-       NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
 
-       XPACK_SECURITY_ENABLED: 'true'
 
-       XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
 
-       XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
 
-       XPACK_FLEET_ISAIRGAPPED: 'true'
 
-       I18N_LOCALE: zh-CN
 
-       SERVER_PORT: '5601'
 
-       ELASTICSEARCH_HOSTS: http://elasticsearch:9200
 
-     ports:
 
-       - ${KIBANA_PORT:-5601}:5601
 
-     healthcheck:
 
-       test: ['CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1']
 
-       interval: 30s
 
-       timeout: 10s
 
-       retries: 3
 
-   # unstructured .
 
-   # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
 
-   unstructured:
 
-     image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
 
-     profiles:
 
-       - unstructured
 
-     restart: always
 
-     volumes:
 
-       - ./volumes/unstructured:/app/data
 
- networks:
 
-   # create a network between sandbox, api and ssrf_proxy, and can not access outside.
 
-   ssrf_proxy_network:
 
-     driver: bridge
 
-     internal: true
 
-   milvus:
 
-     driver: bridge
 
-   opensearch-net:
 
-     driver: bridge
 
-     internal: true
 
- volumes:
 
-   oradata:
 
-   dify_es01_data:
 
 
  |