docker-compose-template.yaml 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. x-shared-env: &shared-api-worker-env
  2. services:
  3. # API service
  4. api:
  5. image: langgenius/dify-api:1.0.0-beta.1
  6. restart: always
  7. environment:
  8. # Use the shared environment variables.
  9. <<: *shared-api-worker-env
  10. # Startup mode, 'api' starts the API server.
  11. MODE: api
  12. CONSOLE_API_URL: ${CONSOLE_API_URL:-http://localhost:5001}
  13. CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-http://localhost:3000}
  14. SENTRY_DSN: ${API_SENTRY_DSN:-}
  15. SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
  16. SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
  17. PLUGIN_API_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
  18. PLUGIN_API_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002}
  19. PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
  20. INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
  21. MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true}
  22. MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
  23. PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
  24. PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
  25. ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}}
  26. depends_on:
  27. - db
  28. - redis
  29. volumes:
  30. # Mount the storage directory to the container, for storing user files.
  31. - ./volumes/app/storage:/app/api/storage
  32. networks:
  33. - ssrf_proxy_network
  34. - default
  35. # worker service
  36. # The Celery worker for processing the queue.
  37. worker:
  38. image: langgenius/dify-api:1.0.0-beta.1
  39. restart: always
  40. environment:
  41. # Use the shared environment variables.
  42. <<: *shared-api-worker-env
  43. # Startup mode, 'worker' starts the Celery worker for processing the queue.
  44. MODE: worker
  45. SENTRY_DSN: ${API_SENTRY_DSN:-}
  46. SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
  47. SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
  48. PLUGIN_API_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
  49. PLUGIN_API_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002}
  50. PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
  51. INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
  52. MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-false}
  53. MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
  54. depends_on:
  55. - db
  56. - redis
  57. volumes:
  58. # Mount the storage directory to the container, for storing user files.
  59. - ./volumes/app/storage:/app/api/storage
  60. networks:
  61. - ssrf_proxy_network
  62. - default
  63. # Frontend web application.
  64. web:
  65. image: langgenius/dify-web:1.0.0-beta.1
  66. restart: always
  67. environment:
  68. CONSOLE_API_URL: ${CONSOLE_API_URL:-}
  69. APP_API_URL: ${APP_API_URL:-}
  70. SENTRY_DSN: ${WEB_SENTRY_DSN:-}
  71. NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
  72. TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
  73. CSP_WHITELIST: ${CSP_WHITELIST:-}
  74. MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
  75. MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
  76. TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
  77. # The postgres database.
  78. db:
  79. image: postgres:15-alpine
  80. restart: always
  81. environment:
  82. PGUSER: ${PGUSER:-postgres}
  83. POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
  84. POSTGRES_DB: ${POSTGRES_DB:-dify}
  85. PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
  86. command: >
  87. postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
  88. -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
  89. -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
  90. -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
  91. -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
  92. volumes:
  93. - ./volumes/db/data:/var/lib/postgresql/data
  94. healthcheck:
  95. test: [ 'CMD', 'pg_isready' ]
  96. interval: 1s
  97. timeout: 3s
  98. retries: 30
  99. ports:
  100. - '${EXPOSE_DB_PORT:-5432}:5432'
  101. # The redis cache.
  102. redis:
  103. image: redis:6-alpine
  104. restart: always
  105. environment:
  106. REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
  107. volumes:
  108. # Mount the redis data directory to the container.
  109. - ./volumes/redis/data:/data
  110. # Set the redis password when startup redis server.
  111. command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
  112. healthcheck:
  113. test: [ 'CMD', 'redis-cli', 'ping' ]
  114. # The DifySandbox
  115. sandbox:
  116. image: langgenius/dify-sandbox:0.2.10
  117. restart: always
  118. environment:
  119. # The DifySandbox configurations
  120. # Make sure you are changing this key for your deployment with a strong key.
  121. # You can generate a strong key using `openssl rand -base64 42`.
  122. API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
  123. GIN_MODE: ${SANDBOX_GIN_MODE:-release}
  124. WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
  125. ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
  126. HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
  127. HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
  128. SANDBOX_PORT: ${SANDBOX_PORT:-8194}
  129. volumes:
  130. - ./volumes/sandbox/dependencies:/dependencies
  131. healthcheck:
  132. test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
  133. networks:
  134. - ssrf_proxy_network
  135. # plugin daemon
  136. plugin_daemon:
  137. image: langgenius/dify-plugin-daemon:0.0.1-local
  138. restart: always
  139. environment:
  140. # Use the shared environment variables.
  141. <<: *shared-api-worker-env
  142. DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
  143. SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
  144. SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
  145. MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
  146. PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
  147. DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
  148. DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
  149. PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
  150. PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
  151. PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
  152. ports:
  153. - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
  154. volumes:
  155. - ./volumes/plugin_daemon:/app/storage
  156. # ssrf_proxy server
  157. # for more information, please refer to
  158. # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
  159. ssrf_proxy:
  160. image: ubuntu/squid:latest
  161. restart: always
  162. volumes:
  163. - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
  164. - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
  165. entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
  166. environment:
  167. # pls clearly modify the squid env vars to fit your network environment.
  168. HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
  169. COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
  170. REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
  171. SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
  172. SANDBOX_PORT: ${SANDBOX_PORT:-8194}
  173. networks:
  174. - ssrf_proxy_network
  175. - default
  176. # Certbot service
  177. # use `docker-compose --profile certbot up` to start the certbot service.
  178. certbot:
  179. image: certbot/certbot
  180. profiles:
  181. - certbot
  182. volumes:
  183. - ./volumes/certbot/conf:/etc/letsencrypt
  184. - ./volumes/certbot/www:/var/www/html
  185. - ./volumes/certbot/logs:/var/log/letsencrypt
  186. - ./volumes/certbot/conf/live:/etc/letsencrypt/live
  187. - ./certbot/update-cert.template.txt:/update-cert.template.txt
  188. - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
  189. environment:
  190. - CERTBOT_EMAIL=${CERTBOT_EMAIL}
  191. - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
  192. - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
  193. entrypoint: [ '/docker-entrypoint.sh' ]
  194. command: [ 'tail', '-f', '/dev/null' ]
  195. # The nginx reverse proxy.
  196. # used for reverse proxying the API service and Web service.
  197. nginx:
  198. image: nginx:latest
  199. restart: always
  200. volumes:
  201. - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
  202. - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
  203. - ./nginx/https.conf.template:/etc/nginx/https.conf.template
  204. - ./nginx/conf.d:/etc/nginx/conf.d
  205. - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
  206. - ./nginx/ssl:/etc/ssl # cert dir (legacy)
  207. - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
  208. - ./volumes/certbot/conf:/etc/letsencrypt
  209. - ./volumes/certbot/www:/var/www/html
  210. entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
  211. environment:
  212. NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
  213. NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
  214. NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
  215. NGINX_PORT: ${NGINX_PORT:-80}
  216. # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
  217. # and modify the env vars below in .env if HTTPS_ENABLED is true.
  218. NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
  219. NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
  220. NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
  221. NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
  222. NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
  223. NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
  224. NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
  225. NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
  226. NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
  227. CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
  228. depends_on:
  229. - api
  230. - web
  231. ports:
  232. - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
  233. - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
  234. # The TiDB vector store.
  235. # For production use, please refer to https://github.com/pingcap/tidb-docker-compose
  236. tidb:
  237. image: pingcap/tidb:v8.4.0
  238. profiles:
  239. - tidb
  240. command:
  241. - --store=unistore
  242. restart: always
  243. # The Weaviate vector store.
  244. weaviate:
  245. image: semitechnologies/weaviate:1.19.0
  246. profiles:
  247. - ''
  248. - weaviate
  249. restart: always
  250. volumes:
  251. # Mount the Weaviate data directory to the con tainer.
  252. - ./volumes/weaviate:/var/lib/weaviate
  253. environment:
  254. # The Weaviate configurations
  255. # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
  256. PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
  257. QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
  258. AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
  259. DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
  260. CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
  261. AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
  262. AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
  263. AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
  264. AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
  265. AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
  266. # Qdrant vector store.
  267. # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
  268. qdrant:
  269. image: langgenius/qdrant:v1.7.3
  270. profiles:
  271. - qdrant
  272. restart: always
  273. volumes:
  274. - ./volumes/qdrant:/qdrant/storage
  275. environment:
  276. QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
  277. # The Couchbase vector store.
  278. couchbase-server:
  279. build: ./couchbase-server
  280. profiles:
  281. - couchbase
  282. restart: always
  283. environment:
  284. - CLUSTER_NAME=dify_search
  285. - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
  286. - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
  287. - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
  288. - COUCHBASE_BUCKET_RAMSIZE=512
  289. - COUCHBASE_RAM_SIZE=2048
  290. - COUCHBASE_EVENTING_RAM_SIZE=512
  291. - COUCHBASE_INDEX_RAM_SIZE=512
  292. - COUCHBASE_FTS_RAM_SIZE=1024
  293. hostname: couchbase-server
  294. container_name: couchbase-server
  295. working_dir: /opt/couchbase
  296. stdin_open: true
  297. tty: true
  298. entrypoint: [ "" ]
  299. command: sh -c "/opt/couchbase/init/init-cbserver.sh"
  300. volumes:
  301. - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
  302. healthcheck:
  303. # ensure bucket was created before proceeding
  304. test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
  305. interval: 10s
  306. retries: 10
  307. start_period: 30s
  308. timeout: 10s
  309. # The pgvector vector database.
  310. pgvector:
  311. image: pgvector/pgvector:pg16
  312. profiles:
  313. - pgvector
  314. restart: always
  315. environment:
  316. PGUSER: ${PGVECTOR_PGUSER:-postgres}
  317. # The password for the default postgres user.
  318. POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
  319. # The name of the default postgres database.
  320. POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
  321. # postgres data directory
  322. PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
  323. volumes:
  324. - ./volumes/pgvector/data:/var/lib/postgresql/data
  325. healthcheck:
  326. test: [ 'CMD', 'pg_isready' ]
  327. interval: 1s
  328. timeout: 3s
  329. retries: 30
  330. # pgvecto-rs vector store
  331. pgvecto-rs:
  332. image: tensorchord/pgvecto-rs:pg16-v0.3.0
  333. profiles:
  334. - pgvecto-rs
  335. restart: always
  336. environment:
  337. PGUSER: ${PGVECTOR_PGUSER:-postgres}
  338. # The password for the default postgres user.
  339. POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
  340. # The name of the default postgres database.
  341. POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
  342. # postgres data directory
  343. PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
  344. volumes:
  345. - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
  346. healthcheck:
  347. test: [ 'CMD', 'pg_isready' ]
  348. interval: 1s
  349. timeout: 3s
  350. retries: 30
  351. # Chroma vector database
  352. chroma:
  353. image: ghcr.io/chroma-core/chroma:0.5.20
  354. profiles:
  355. - chroma
  356. restart: always
  357. volumes:
  358. - ./volumes/chroma:/chroma/chroma
  359. environment:
  360. CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
  361. CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
  362. IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
  363. # OceanBase vector database
  364. oceanbase:
  365. image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215
  366. profiles:
  367. - oceanbase
  368. restart: always
  369. volumes:
  370. - ./volumes/oceanbase/data:/root/ob
  371. - ./volumes/oceanbase/conf:/root/.obd/cluster
  372. - ./volumes/oceanbase/init.d:/root/boot/init.d
  373. environment:
  374. OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
  375. OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
  376. OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
  377. OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
  378. OB_SERVER_IP: '127.0.0.1'
  379. # Oracle vector database
  380. oracle:
  381. image: container-registry.oracle.com/database/free:latest
  382. profiles:
  383. - oracle
  384. restart: always
  385. volumes:
  386. - source: oradata
  387. type: volume
  388. target: /opt/oracle/oradata
  389. - ./startupscripts:/opt/oracle/scripts/startup
  390. environment:
  391. ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
  392. ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
  393. # Milvus vector database services
  394. etcd:
  395. container_name: milvus-etcd
  396. image: quay.io/coreos/etcd:v3.5.5
  397. profiles:
  398. - milvus
  399. environment:
  400. ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
  401. ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
  402. ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
  403. ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
  404. volumes:
  405. - ./volumes/milvus/etcd:/etcd
  406. command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
  407. healthcheck:
  408. test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
  409. interval: 30s
  410. timeout: 20s
  411. retries: 3
  412. networks:
  413. - milvus
  414. minio:
  415. container_name: milvus-minio
  416. image: minio/minio:RELEASE.2023-03-20T20-16-18Z
  417. profiles:
  418. - milvus
  419. environment:
  420. MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
  421. MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
  422. volumes:
  423. - ./volumes/milvus/minio:/minio_data
  424. command: minio server /minio_data --console-address ":9001"
  425. healthcheck:
  426. test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
  427. interval: 30s
  428. timeout: 20s
  429. retries: 3
  430. networks:
  431. - milvus
  432. milvus-standalone:
  433. container_name: milvus-standalone
  434. image: milvusdb/milvus:v2.5.0-beta
  435. profiles:
  436. - milvus
  437. command: [ 'milvus', 'run', 'standalone' ]
  438. environment:
  439. ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
  440. MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
  441. common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
  442. volumes:
  443. - ./volumes/milvus/milvus:/var/lib/milvus
  444. healthcheck:
  445. test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
  446. interval: 30s
  447. start_period: 90s
  448. timeout: 20s
  449. retries: 3
  450. depends_on:
  451. - etcd
  452. - minio
  453. ports:
  454. - 19530:19530
  455. - 9091:9091
  456. networks:
  457. - milvus
  458. # Opensearch vector database
  459. opensearch:
  460. container_name: opensearch
  461. image: opensearchproject/opensearch:latest
  462. profiles:
  463. - opensearch
  464. environment:
  465. discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
  466. bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
  467. OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
  468. OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
  469. ulimits:
  470. memlock:
  471. soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
  472. hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
  473. nofile:
  474. soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
  475. hard: ${OPENSEARCH_NOFILE_HARD:-65536}
  476. volumes:
  477. - ./volumes/opensearch/data:/usr/share/opensearch/data
  478. networks:
  479. - opensearch-net
  480. opensearch-dashboards:
  481. container_name: opensearch-dashboards
  482. image: opensearchproject/opensearch-dashboards:latest
  483. profiles:
  484. - opensearch
  485. environment:
  486. OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
  487. volumes:
  488. - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
  489. networks:
  490. - opensearch-net
  491. depends_on:
  492. - opensearch
  493. # MyScale vector database
  494. myscale:
  495. container_name: myscale
  496. image: myscale/myscaledb:1.6.4
  497. profiles:
  498. - myscale
  499. restart: always
  500. tty: true
  501. volumes:
  502. - ./volumes/myscale/data:/var/lib/clickhouse
  503. - ./volumes/myscale/log:/var/log/clickhouse-server
  504. - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
  505. ports:
  506. - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
  507. # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
  508. # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
  509. elasticsearch:
  510. image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
  511. container_name: elasticsearch
  512. profiles:
  513. - elasticsearch
  514. - elasticsearch-ja
  515. restart: always
  516. volumes:
  517. - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
  518. - dify_es01_data:/usr/share/elasticsearch/data
  519. environment:
  520. ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
  521. VECTOR_STORE: ${VECTOR_STORE:-}
  522. cluster.name: dify-es-cluster
  523. node.name: dify-es0
  524. discovery.type: single-node
  525. xpack.license.self_generated.type: basic
  526. xpack.security.enabled: 'true'
  527. xpack.security.enrollment.enabled: 'false'
  528. xpack.security.http.ssl.enabled: 'false'
  529. ports:
  530. - ${ELASTICSEARCH_PORT:-9200}:9200
  531. deploy:
  532. resources:
  533. limits:
  534. memory: 2g
  535. entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
  536. healthcheck:
  537. test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
  538. interval: 30s
  539. timeout: 10s
  540. retries: 50
  541. # https://www.elastic.co/guide/en/kibana/current/docker.html
  542. # https://www.elastic.co/guide/en/kibana/current/settings.html
  543. kibana:
  544. image: docker.elastic.co/kibana/kibana:8.14.3
  545. container_name: kibana
  546. profiles:
  547. - elasticsearch
  548. depends_on:
  549. - elasticsearch
  550. restart: always
  551. environment:
  552. XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
  553. NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
  554. XPACK_SECURITY_ENABLED: 'true'
  555. XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
  556. XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
  557. XPACK_FLEET_ISAIRGAPPED: 'true'
  558. I18N_LOCALE: zh-CN
  559. SERVER_PORT: '5601'
  560. ELASTICSEARCH_HOSTS: http://elasticsearch:9200
  561. ports:
  562. - ${KIBANA_PORT:-5601}:5601
  563. healthcheck:
  564. test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
  565. interval: 30s
  566. timeout: 10s
  567. retries: 3
  568. # unstructured .
  569. # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
  570. unstructured:
  571. image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
  572. profiles:
  573. - unstructured
  574. restart: always
  575. volumes:
  576. - ./volumes/unstructured:/app/data
  577. networks:
  578. # create a network between sandbox, api and ssrf_proxy, and can not access outside.
  579. ssrf_proxy_network:
  580. driver: bridge
  581. internal: true
  582. milvus:
  583. driver: bridge
  584. opensearch-net:
  585. driver: bridge
  586. internal: true
  587. volumes:
  588. oradata:
  589. dify_es01_data: