feat: full support for opendal and sync configurations between .env and docker-compose (#11754)

Signed-off-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
-LAN- 2024-12-18 09:05:54 +08:00 committed by GitHub
parent e86756cb39
commit 99f40a9682
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 1051 additions and 436 deletions

View File

@ -60,17 +60,8 @@ DB_DATABASE=dify
STORAGE_TYPE=opendal STORAGE_TYPE=opendal
# Apache OpenDAL storage configuration, refer to https://github.com/apache/opendal # Apache OpenDAL storage configuration, refer to https://github.com/apache/opendal
STORAGE_OPENDAL_SCHEME=fs OPENDAL_SCHEME=fs
# OpenDAL FS
OPENDAL_FS_ROOT=storage OPENDAL_FS_ROOT=storage
# OpenDAL S3
OPENDAL_S3_ROOT=/
OPENDAL_S3_BUCKET=your-bucket-name
OPENDAL_S3_ENDPOINT=https://s3.amazonaws.com
OPENDAL_S3_ACCESS_KEY_ID=your-access-key
OPENDAL_S3_SECRET_ACCESS_KEY=your-secret-key
OPENDAL_S3_REGION=your-region
OPENDAL_S3_SERVER_SIDE_ENCRYPTION=
# S3 Storage configuration # S3 Storage configuration
S3_USE_AWS_MANAGED_IAM=false S3_USE_AWS_MANAGED_IAM=false

View File

@ -1,5 +1,4 @@
import logging import logging
import os
import time import time
from configs import dify_config from configs import dify_config
@ -17,15 +16,6 @@ def create_flask_app_with_configs() -> DifyApp:
dify_app = DifyApp(__name__) dify_app = DifyApp(__name__)
dify_app.config.from_mapping(dify_config.model_dump()) dify_app.config.from_mapping(dify_config.model_dump())
# populate configs into system environment variables
for key, value in dify_app.config.items():
if isinstance(value, str):
os.environ[key] = value
elif isinstance(value, int | float | bool):
os.environ[key] = str(value)
elif value is None:
os.environ[key] = ""
return dify_app return dify_app

View File

@ -1,51 +1,9 @@
from enum import StrEnum
from typing import Literal
from pydantic import Field from pydantic import Field
from pydantic_settings import BaseSettings from pydantic_settings import BaseSettings
class OpenDALScheme(StrEnum):
FS = "fs"
S3 = "s3"
class OpenDALStorageConfig(BaseSettings): class OpenDALStorageConfig(BaseSettings):
STORAGE_OPENDAL_SCHEME: str = Field( OPENDAL_SCHEME: str = Field(
default=OpenDALScheme.FS.value, default="fs",
description="OpenDAL scheme.", description="OpenDAL scheme.",
) )
# FS
OPENDAL_FS_ROOT: str = Field(
default="storage",
description="Root path for local storage.",
)
# S3
OPENDAL_S3_ROOT: str = Field(
default="/",
description="Root path for S3 storage.",
)
OPENDAL_S3_BUCKET: str = Field(
default="",
description="S3 bucket name.",
)
OPENDAL_S3_ENDPOINT: str = Field(
default="https://s3.amazonaws.com",
description="S3 endpoint URL.",
)
OPENDAL_S3_ACCESS_KEY_ID: str = Field(
default="",
description="S3 access key ID.",
)
OPENDAL_S3_SECRET_ACCESS_KEY: str = Field(
default="",
description="S3 secret access key.",
)
OPENDAL_S3_REGION: str = Field(
default="",
description="S3 region.",
)
OPENDAL_S3_SERVER_SIDE_ENCRYPTION: Literal["aws:kms", ""] = Field(
default="",
description="S3 server-side encryption.",
)

View File

@ -1,11 +1,10 @@
import logging import logging
from collections.abc import Callable, Generator, Mapping from collections.abc import Callable, Generator
from typing import Union from typing import Union
from flask import Flask from flask import Flask
from configs import dify_config from configs import dify_config
from configs.middleware.storage.opendal_storage_config import OpenDALScheme
from dify_app import DifyApp from dify_app import DifyApp
from extensions.storage.base_storage import BaseStorage from extensions.storage.base_storage import BaseStorage
from extensions.storage.storage_type import StorageType from extensions.storage.storage_type import StorageType
@ -23,21 +22,17 @@ class Storage:
def get_storage_factory(storage_type: str) -> Callable[[], BaseStorage]: def get_storage_factory(storage_type: str) -> Callable[[], BaseStorage]:
match storage_type: match storage_type:
case StorageType.S3: case StorageType.S3:
from extensions.storage.opendal_storage import OpenDALStorage from extensions.storage.aws_s3_storage import AwsS3Storage
kwargs = _load_s3_storage_kwargs() return AwsS3Storage
return lambda: OpenDALStorage(scheme=OpenDALScheme.S3, **kwargs)
case StorageType.OPENDAL: case StorageType.OPENDAL:
from extensions.storage.opendal_storage import OpenDALStorage from extensions.storage.opendal_storage import OpenDALStorage
scheme = OpenDALScheme(dify_config.STORAGE_OPENDAL_SCHEME) return lambda: OpenDALStorage(dify_config.OPENDAL_SCHEME)
kwargs = _load_opendal_storage_kwargs(scheme)
return lambda: OpenDALStorage(scheme=scheme, **kwargs)
case StorageType.LOCAL: case StorageType.LOCAL:
from extensions.storage.opendal_storage import OpenDALStorage from extensions.storage.opendal_storage import OpenDALStorage
kwargs = _load_local_storage_kwargs() return lambda: OpenDALStorage(scheme="fs", root=dify_config.STORAGE_LOCAL_PATH)
return lambda: OpenDALStorage(scheme=OpenDALScheme.FS, **kwargs)
case StorageType.AZURE_BLOB: case StorageType.AZURE_BLOB:
from extensions.storage.azure_blob_storage import AzureBlobStorage from extensions.storage.azure_blob_storage import AzureBlobStorage
@ -75,7 +70,7 @@ class Storage:
return SupabaseStorage return SupabaseStorage
case _: case _:
raise ValueError(f"Unsupported storage type {storage_type}") raise ValueError(f"unsupported storage type {storage_type}")
def save(self, filename, data): def save(self, filename, data):
try: try:
@ -130,81 +125,6 @@ class Storage:
raise e raise e
def _load_s3_storage_kwargs() -> Mapping[str, str]:
"""
Load the kwargs for S3 storage based on dify_config.
Handles special cases like AWS managed IAM and R2.
"""
kwargs = {
"root": "/",
"bucket": dify_config.S3_BUCKET_NAME,
"endpoint": dify_config.S3_ENDPOINT,
"access_key_id": dify_config.S3_ACCESS_KEY,
"secret_access_key": dify_config.S3_SECRET_KEY,
"region": dify_config.S3_REGION,
}
kwargs = {k: v for k, v in kwargs.items() if isinstance(v, str)}
# For AWS managed IAM
if dify_config.S3_USE_AWS_MANAGED_IAM:
from extensions.storage.opendal_storage import S3_SSE_WITH_AWS_MANAGED_IAM_KWARGS
logger.debug("Using AWS managed IAM role for S3")
kwargs = {**kwargs, **{k: v for k, v in S3_SSE_WITH_AWS_MANAGED_IAM_KWARGS.items() if k not in kwargs}}
# For Cloudflare R2
if kwargs.get("endpoint"):
from extensions.storage.opendal_storage import S3_R2_COMPATIBLE_KWARGS, is_r2_endpoint
if is_r2_endpoint(kwargs["endpoint"]):
logger.debug("Using R2 for OpenDAL S3")
kwargs = {**kwargs, **{k: v for k, v in S3_R2_COMPATIBLE_KWARGS.items() if k not in kwargs}}
return kwargs
def _load_local_storage_kwargs() -> Mapping[str, str]:
"""
Load the kwargs for local storage based on dify_config.
"""
return {
"root": dify_config.STORAGE_LOCAL_PATH,
}
def _load_opendal_storage_kwargs(scheme: OpenDALScheme) -> Mapping[str, str]:
"""
Load the kwargs for OpenDAL storage based on the given scheme.
"""
match scheme:
case OpenDALScheme.FS:
kwargs = {
"root": dify_config.OPENDAL_FS_ROOT,
}
case OpenDALScheme.S3:
# Load OpenDAL S3-related configs
kwargs = {
"root": dify_config.OPENDAL_S3_ROOT,
"bucket": dify_config.OPENDAL_S3_BUCKET,
"endpoint": dify_config.OPENDAL_S3_ENDPOINT,
"access_key_id": dify_config.OPENDAL_S3_ACCESS_KEY_ID,
"secret_access_key": dify_config.OPENDAL_S3_SECRET_ACCESS_KEY,
"region": dify_config.OPENDAL_S3_REGION,
}
# For Cloudflare R2
if kwargs.get("endpoint"):
from extensions.storage.opendal_storage import S3_R2_COMPATIBLE_KWARGS, is_r2_endpoint
if is_r2_endpoint(kwargs["endpoint"]):
logger.debug("Using R2 for OpenDAL S3")
kwargs = {**kwargs, **{k: v for k, v in S3_R2_COMPATIBLE_KWARGS.items() if k not in kwargs}}
case _:
logger.warning(f"Unrecognized OpenDAL scheme: {scheme}, will fall back to default.")
kwargs = {}
return kwargs
storage = Storage() storage = Storage()

View File

@ -1,46 +1,57 @@
import logging
import os
from collections.abc import Generator from collections.abc import Generator
from pathlib import Path from pathlib import Path
from urllib.parse import urlparse
import opendal import opendal
from dotenv import dotenv_values
from configs.middleware.storage.opendal_storage_config import OpenDALScheme
from extensions.storage.base_storage import BaseStorage from extensions.storage.base_storage import BaseStorage
S3_R2_HOSTNAME = "r2.cloudflarestorage.com" logger = logging.getLogger(__name__)
S3_R2_COMPATIBLE_KWARGS = {
"delete_max_size": "700",
"disable_stat_with_override": "true",
"region": "auto",
}
S3_SSE_WITH_AWS_MANAGED_IAM_KWARGS = {
"server_side_encryption": "aws:kms",
}
def is_r2_endpoint(endpoint: str) -> bool: def _get_opendal_kwargs(*, scheme: str, env_file_path: str = ".env", prefix: str = "OPENDAL_"):
if not endpoint: kwargs = {}
return False config_prefix = prefix + scheme.upper() + "_"
for key, value in os.environ.items():
if key.startswith(config_prefix):
kwargs[key[len(config_prefix) :].lower()] = value
parsed_url = urlparse(endpoint) file_env_vars = dotenv_values(env_file_path)
return bool(parsed_url.hostname and parsed_url.hostname.endswith(S3_R2_HOSTNAME)) for key, value in file_env_vars.items():
if key.startswith(config_prefix) and key[len(config_prefix) :].lower() not in kwargs and value:
kwargs[key[len(config_prefix) :].lower()] = value
return kwargs
class OpenDALStorage(BaseStorage): class OpenDALStorage(BaseStorage):
def __init__(self, scheme: OpenDALScheme, **kwargs): def __init__(self, scheme: str, **kwargs):
if scheme == OpenDALScheme.FS: kwargs = kwargs or _get_opendal_kwargs(scheme=scheme)
Path(kwargs["root"]).mkdir(parents=True, exist_ok=True)
if scheme == "fs":
root = kwargs.get("root", "storage")
Path(root).mkdir(parents=True, exist_ok=True)
# self.op = opendal.Operator(scheme=scheme, **kwargs)
self.op = opendal.Operator(scheme=scheme, **kwargs) self.op = opendal.Operator(scheme=scheme, **kwargs)
logger.debug(f"opendal operator created with scheme {scheme}")
retry_layer = opendal.layers.RetryLayer(max_times=3, factor=2.0, jitter=True)
self.op = self.op.layer(retry_layer)
logger.debug("added retry layer to opendal operator")
def save(self, filename: str, data: bytes) -> None: def save(self, filename: str, data: bytes) -> None:
self.op.write(path=filename, bs=data) self.op.write(path=filename, bs=data)
logger.debug(f"file {filename} saved")
def load_once(self, filename: str) -> bytes: def load_once(self, filename: str) -> bytes:
if not self.exists(filename): if not self.exists(filename):
raise FileNotFoundError("File not found") raise FileNotFoundError("File not found")
return self.op.read(path=filename) content = self.op.read(path=filename)
logger.debug(f"file {filename} loaded")
return content
def load_stream(self, filename: str) -> Generator: def load_stream(self, filename: str) -> Generator:
if not self.exists(filename): if not self.exists(filename):
@ -50,6 +61,7 @@ class OpenDALStorage(BaseStorage):
file = self.op.open(path=filename, mode="rb") file = self.op.open(path=filename, mode="rb")
while chunk := file.read(batch_size): while chunk := file.read(batch_size):
yield chunk yield chunk
logger.debug(f"file {filename} loaded as stream")
def download(self, filename: str, target_filepath: str): def download(self, filename: str, target_filepath: str):
if not self.exists(filename): if not self.exists(filename):
@ -57,16 +69,22 @@ class OpenDALStorage(BaseStorage):
with Path(target_filepath).open("wb") as f: with Path(target_filepath).open("wb") as f:
f.write(self.op.read(path=filename)) f.write(self.op.read(path=filename))
logger.debug(f"file {filename} downloaded to {target_filepath}")
def exists(self, filename: str) -> bool: def exists(self, filename: str) -> bool:
# FIXME this is a workaround for opendal python-binding do not have a exists method and no better # FIXME this is a workaround for opendal python-binding do not have a exists method and no better
# error handler here when opendal python-binding has a exists method, we should use it # error handler here when opendal python-binding has a exists method, we should use it
# more https://github.com/apache/opendal/blob/main/bindings/python/src/operator.rs # more https://github.com/apache/opendal/blob/main/bindings/python/src/operator.rs
try: try:
return self.op.stat(path=filename).mode.is_file() res = self.op.stat(path=filename).mode.is_file()
except Exception as e: logger.debug(f"file {filename} checked")
return res
except Exception:
return False return False
def delete(self, filename: str): def delete(self, filename: str):
if self.exists(filename): if self.exists(filename):
self.op.delete(path=filename) self.op.delete(path=filename)
logger.debug(f"file {filename} deleted")
return
logger.debug(f"file {filename} not found, skip delete")

View File

@ -1,20 +0,0 @@
import pytest
from extensions.storage.opendal_storage import is_r2_endpoint
@pytest.mark.parametrize(
("endpoint", "expected"),
[
("https://bucket.r2.cloudflarestorage.com", True),
("https://custom-domain.r2.cloudflarestorage.com/", True),
("https://bucket.r2.cloudflarestorage.com/path", True),
("https://s3.amazonaws.com", False),
("https://storage.googleapis.com", False),
("http://localhost:9000", False),
("invalid-url", False),
("", False),
],
)
def test_is_r2_endpoint(endpoint: str, expected: bool):
assert is_r2_endpoint(endpoint) == expected

View File

@ -1,15 +1,12 @@
import os
from collections.abc import Generator from collections.abc import Generator
from pathlib import Path from pathlib import Path
import pytest import pytest
from configs.middleware.storage.opendal_storage_config import OpenDALScheme
from extensions.storage.opendal_storage import OpenDALStorage from extensions.storage.opendal_storage import OpenDALStorage
from tests.unit_tests.oss.__mock.base import ( from tests.unit_tests.oss.__mock.base import (
get_example_data, get_example_data,
get_example_filename, get_example_filename,
get_example_filepath,
get_opendal_bucket, get_opendal_bucket,
) )
@ -19,7 +16,7 @@ class TestOpenDAL:
def setup_method(self, *args, **kwargs): def setup_method(self, *args, **kwargs):
"""Executed before each test method.""" """Executed before each test method."""
self.storage = OpenDALStorage( self.storage = OpenDALStorage(
scheme=OpenDALScheme.FS, scheme="fs",
root=get_opendal_bucket(), root=get_opendal_bucket(),
) )

View File

@ -127,7 +127,7 @@ SERVER_WORKER_AMOUNT=
# Defaults to gevent. If using windows, it can be switched to sync or solo. # Defaults to gevent. If using windows, it can be switched to sync or solo.
SERVER_WORKER_CLASS= SERVER_WORKER_CLASS=
# Similar to SERVER_WORKER_CLASS. Default is gevent. # Similar to SERVER_WORKER_CLASS.
# If using windows, it can be switched to sync or solo. # If using windows, it can be switched to sync or solo.
CELERY_WORKER_CLASS= CELERY_WORKER_CLASS=
@ -227,6 +227,7 @@ REDIS_PORT=6379
REDIS_USERNAME= REDIS_USERNAME=
REDIS_PASSWORD=difyai123456 REDIS_PASSWORD=difyai123456
REDIS_USE_SSL=false REDIS_USE_SSL=false
REDIS_DB=0
# Whether to use Redis Sentinel mode. # Whether to use Redis Sentinel mode.
# If set to true, the application will automatically discover and connect to the master node through Sentinel. # If set to true, the application will automatically discover and connect to the master node through Sentinel.
@ -281,57 +282,39 @@ CONSOLE_CORS_ALLOW_ORIGINS=*
# ------------------------------ # ------------------------------
# The type of storage to use for storing user files. # The type of storage to use for storing user files.
# Supported values are `opendal` , `s3` , `azure-blob` , `google-storage`, `tencent-cos`, `huawei-obs`, `volcengine-tos`, `baidu-obs`, `supabase`
# Default: `opendal`
STORAGE_TYPE=opendal STORAGE_TYPE=opendal
# Apache OpenDAL Configuration, refer to https://github.com/apache/opendal # Apache OpenDAL Configuration, refer to https://github.com/apache/opendal
# The scheme for the OpenDAL storage. # The scheme for the OpenDAL storage.
STORAGE_OPENDAL_SCHEME=fs OPENDAL_SCHEME=fs
# OpenDAL FS # Configurations for OpenDAL Local File System.
OPENDAL_FS_ROOT=storage OPENDAL_FS_ROOT=storage
# OpenDAL S3
OPENDAL_S3_ROOT=/
OPENDAL_S3_BUCKET=your-bucket-name
OPENDAL_S3_ENDPOINT=https://s3.amazonaws.com
OPENDAL_S3_ACCESS_KEY_ID=your-access-key
OPENDAL_S3_SECRET_ACCESS_KEY=your-secret-key
OPENDAL_S3_REGION=your-region
OPENDAL_S3_SERVER_SIDE_ENCRYPTION=
# S3 Configuration # S3 Configuration
#
S3_ENDPOINT=
S3_REGION=us-east-1
S3_BUCKET_NAME=difyai
S3_ACCESS_KEY=
S3_SECRET_KEY=
# Whether to use AWS managed IAM roles for authenticating with the S3 service. # Whether to use AWS managed IAM roles for authenticating with the S3 service.
# If set to false, the access key and secret key must be provided. # If set to false, the access key and secret key must be provided.
S3_USE_AWS_MANAGED_IAM=false S3_USE_AWS_MANAGED_IAM=false
# The endpoint of the S3 service.
S3_ENDPOINT=
# The region of the S3 service.
S3_REGION=us-east-1
# The name of the S3 bucket to use for storing files.
S3_BUCKET_NAME=difyai
# The access key to use for authenticating with the S3 service.
S3_ACCESS_KEY=
# The secret key to use for authenticating with the S3 service.
S3_SECRET_KEY=
# Azure Blob Configuration # Azure Blob Configuration
# The name of the Azure Blob Storage account to use for storing files. #
AZURE_BLOB_ACCOUNT_NAME=difyai AZURE_BLOB_ACCOUNT_NAME=difyai
# The access key to use for authenticating with the Azure Blob Storage account.
AZURE_BLOB_ACCOUNT_KEY=difyai AZURE_BLOB_ACCOUNT_KEY=difyai
# The name of the Azure Blob Storage container to use for storing files.
AZURE_BLOB_CONTAINER_NAME=difyai-container AZURE_BLOB_CONTAINER_NAME=difyai-container
# The URL of the Azure Blob Storage account.
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
# Google Storage Configuration # Google Storage Configuration
# The name of the Google Storage bucket to use for storing files. #
GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name
# The service account JSON key to use for authenticating with the Google Storage service.
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string
# The Alibaba Cloud OSS configurations, # The Alibaba Cloud OSS configurations,
# only available when STORAGE_TYPE is `aliyun-oss` #
ALIYUN_OSS_BUCKET_NAME=your-bucket-name ALIYUN_OSS_BUCKET_NAME=your-bucket-name
ALIYUN_OSS_ACCESS_KEY=your-access-key ALIYUN_OSS_ACCESS_KEY=your-access-key
ALIYUN_OSS_SECRET_KEY=your-secret-key ALIYUN_OSS_SECRET_KEY=your-secret-key
@ -342,55 +325,47 @@ ALIYUN_OSS_AUTH_VERSION=v4
ALIYUN_OSS_PATH=your-path ALIYUN_OSS_PATH=your-path
# Tencent COS Configuration # Tencent COS Configuration
# The name of the Tencent COS bucket to use for storing files. #
TENCENT_COS_BUCKET_NAME=your-bucket-name TENCENT_COS_BUCKET_NAME=your-bucket-name
# The secret key to use for authenticating with the Tencent COS service.
TENCENT_COS_SECRET_KEY=your-secret-key TENCENT_COS_SECRET_KEY=your-secret-key
# The secret id to use for authenticating with the Tencent COS service.
TENCENT_COS_SECRET_ID=your-secret-id TENCENT_COS_SECRET_ID=your-secret-id
# The region of the Tencent COS service.
TENCENT_COS_REGION=your-region TENCENT_COS_REGION=your-region
# The scheme of the Tencent COS service.
TENCENT_COS_SCHEME=your-scheme TENCENT_COS_SCHEME=your-scheme
# Oracle Storage Configuration
#
OCI_ENDPOINT=https://objectstorage.us-ashburn-1.oraclecloud.com
OCI_BUCKET_NAME=your-bucket-name
OCI_ACCESS_KEY=your-access-key
OCI_SECRET_KEY=your-secret-key
OCI_REGION=us-ashburn-1
# Huawei OBS Configuration # Huawei OBS Configuration
# The name of the Huawei OBS bucket to use for storing files. #
HUAWEI_OBS_BUCKET_NAME=your-bucket-name HUAWEI_OBS_BUCKET_NAME=your-bucket-name
# The secret key to use for authenticating with the Huawei OBS service.
HUAWEI_OBS_SECRET_KEY=your-secret-key HUAWEI_OBS_SECRET_KEY=your-secret-key
# The access key to use for authenticating with the Huawei OBS service.
HUAWEI_OBS_ACCESS_KEY=your-access-key HUAWEI_OBS_ACCESS_KEY=your-access-key
# The server url of the HUAWEI OBS service.
HUAWEI_OBS_SERVER=your-server-url HUAWEI_OBS_SERVER=your-server-url
# Volcengine TOS Configuration # Volcengine TOS Configuration
# The name of the Volcengine TOS bucket to use for storing files. #
VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name
# The secret key to use for authenticating with the Volcengine TOS service.
VOLCENGINE_TOS_SECRET_KEY=your-secret-key VOLCENGINE_TOS_SECRET_KEY=your-secret-key
# The access key to use for authenticating with the Volcengine TOS service.
VOLCENGINE_TOS_ACCESS_KEY=your-access-key VOLCENGINE_TOS_ACCESS_KEY=your-access-key
# The endpoint of the Volcengine TOS service.
VOLCENGINE_TOS_ENDPOINT=your-server-url VOLCENGINE_TOS_ENDPOINT=your-server-url
# The region of the Volcengine TOS service.
VOLCENGINE_TOS_REGION=your-region VOLCENGINE_TOS_REGION=your-region
# Baidu OBS Storage Configuration # Baidu OBS Storage Configuration
# The name of the Baidu OBS bucket to use for storing files. #
BAIDU_OBS_BUCKET_NAME=your-bucket-name BAIDU_OBS_BUCKET_NAME=your-bucket-name
# The secret key to use for authenticating with the Baidu OBS service.
BAIDU_OBS_SECRET_KEY=your-secret-key BAIDU_OBS_SECRET_KEY=your-secret-key
# The access key to use for authenticating with the Baidu OBS service.
BAIDU_OBS_ACCESS_KEY=your-access-key BAIDU_OBS_ACCESS_KEY=your-access-key
# The endpoint of the Baidu OBS service.
BAIDU_OBS_ENDPOINT=your-server-url BAIDU_OBS_ENDPOINT=your-server-url
# Supabase Storage Configuration # Supabase Storage Configuration
# The name of the Supabase bucket to use for storing files. #
SUPABASE_BUCKET_NAME=your-bucket-name SUPABASE_BUCKET_NAME=your-bucket-name
# The api key to use for authenticating with the Supabase service.
SUPABASE_API_KEY=your-access-key SUPABASE_API_KEY=your-access-key
# The project endpoint url of the Supabase service.
SUPABASE_URL=your-server-url SUPABASE_URL=your-server-url
# ------------------------------ # ------------------------------
@ -403,28 +378,20 @@ VECTOR_STORE=weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
WEAVIATE_ENDPOINT=http://weaviate:8080 WEAVIATE_ENDPOINT=http://weaviate:8080
# The Weaviate API key.
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`. # The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
QDRANT_URL=http://qdrant:6333 QDRANT_URL=http://qdrant:6333
# The Qdrant API key.
QDRANT_API_KEY=difyai123456 QDRANT_API_KEY=difyai123456
# The Qdrant client timeout setting.
QDRANT_CLIENT_TIMEOUT=20 QDRANT_CLIENT_TIMEOUT=20
# The Qdrant client enable gRPC mode.
QDRANT_GRPC_ENABLED=false QDRANT_GRPC_ENABLED=false
# The Qdrant server gRPC mode PORT.
QDRANT_GRPC_PORT=6334 QDRANT_GRPC_PORT=6334
# Milvus configuration Only available when VECTOR_STORE is `milvus`. # Milvus configuration Only available when VECTOR_STORE is `milvus`.
# The milvus uri. # The milvus uri.
MILVUS_URI=http://127.0.0.1:19530 MILVUS_URI=http://127.0.0.1:19530
# The milvus token.
MILVUS_TOKEN= MILVUS_TOKEN=
# The milvus username.
MILVUS_USER=root MILVUS_USER=root
# The milvus password.
MILVUS_PASSWORD=Milvus MILVUS_PASSWORD=Milvus
# MyScale configuration, only available when VECTOR_STORE is `myscale` # MyScale configuration, only available when VECTOR_STORE is `myscale`
@ -478,8 +445,8 @@ ANALYTICDB_MAX_CONNECTION=5
# TiDB vector configurations, only available when VECTOR_STORE is `tidb` # TiDB vector configurations, only available when VECTOR_STORE is `tidb`
TIDB_VECTOR_HOST=tidb TIDB_VECTOR_HOST=tidb
TIDB_VECTOR_PORT=4000 TIDB_VECTOR_PORT=4000
TIDB_VECTOR_USER=xxx.root TIDB_VECTOR_USER=
TIDB_VECTOR_PASSWORD=xxxxxx TIDB_VECTOR_PASSWORD=
TIDB_VECTOR_DATABASE=dify TIDB_VECTOR_DATABASE=dify
# Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant` # Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant`
@ -502,7 +469,7 @@ CHROMA_PORT=8000
CHROMA_TENANT=default_tenant CHROMA_TENANT=default_tenant
CHROMA_DATABASE=default_database CHROMA_DATABASE=default_database
CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider
CHROMA_AUTH_CREDENTIALS=xxxxxx CHROMA_AUTH_CREDENTIALS=
# Oracle configuration, only available when VECTOR_STORE is `oracle` # Oracle configuration, only available when VECTOR_STORE is `oracle`
ORACLE_HOST=oracle ORACLE_HOST=oracle
@ -539,6 +506,7 @@ ELASTICSEARCH_HOST=0.0.0.0
ELASTICSEARCH_PORT=9200 ELASTICSEARCH_PORT=9200
ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_USERNAME=elastic
ELASTICSEARCH_PASSWORD=elastic ELASTICSEARCH_PASSWORD=elastic
KIBANA_PORT=5601
# baidu vector configurations, only available when VECTOR_STORE is `baidu` # baidu vector configurations, only available when VECTOR_STORE is `baidu`
BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287
@ -558,11 +526,10 @@ VIKINGDB_SCHEMA=http
VIKINGDB_CONNECTION_TIMEOUT=30 VIKINGDB_CONNECTION_TIMEOUT=30
VIKINGDB_SOCKET_TIMEOUT=30 VIKINGDB_SOCKET_TIMEOUT=30
# Lindorm configuration, only available when VECTOR_STORE is `lindorm` # Lindorm configuration, only available when VECTOR_STORE is `lindorm`
LINDORM_URL=http://ld-***************-proxy-search-pub.lindorm.aliyuncs.com:30070 LINDORM_URL=http://lindorm:30070
LINDORM_USERNAME=username LINDORM_USERNAME=lindorm
LINDORM_PASSWORD=password LINDORM_PASSWORD=lindorm
# OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase` # OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase`
OCEANBASE_VECTOR_HOST=oceanbase OCEANBASE_VECTOR_HOST=oceanbase
@ -570,8 +537,13 @@ OCEANBASE_VECTOR_PORT=2881
OCEANBASE_VECTOR_USER=root@test OCEANBASE_VECTOR_USER=root@test
OCEANBASE_VECTOR_PASSWORD=difyai123456 OCEANBASE_VECTOR_PASSWORD=difyai123456
OCEANBASE_VECTOR_DATABASE=test OCEANBASE_VECTOR_DATABASE=test
OCEANBASE_CLUSTER_NAME=difyai
OCEANBASE_MEMORY_LIMIT=6G OCEANBASE_MEMORY_LIMIT=6G
# Upstash Vector configuration, only available when VECTOR_STORE is `upstash`
UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io
UPSTASH_VECTOR_TOKEN=dify
# ------------------------------ # ------------------------------
# Knowledge Configuration # Knowledge Configuration
# ------------------------------ # ------------------------------
@ -620,13 +592,10 @@ CODE_GENERATION_MAX_TOKENS=1024
# It is generally recommended to use the more compatible base64 mode. # It is generally recommended to use the more compatible base64 mode.
# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document. # If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document.
MULTIMODAL_SEND_FORMAT=base64 MULTIMODAL_SEND_FORMAT=base64
# Upload image file size limit, default 10M. # Upload image file size limit, default 10M.
UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
# Upload video file size limit, default 100M. # Upload video file size limit, default 100M.
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
# Upload audio file size limit, default 50M. # Upload audio file size limit, default 50M.
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
@ -639,10 +608,8 @@ UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
# all monitoring information is not reported to Sentry. # all monitoring information is not reported to Sentry.
# If not set, Sentry error reporting will be disabled. # If not set, Sentry error reporting will be disabled.
API_SENTRY_DSN= API_SENTRY_DSN=
# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%. # API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%.
API_SENTRY_TRACES_SAMPLE_RATE=1.0 API_SENTRY_TRACES_SAMPLE_RATE=1.0
# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. # API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%.
API_SENTRY_PROFILES_SAMPLE_RATE=1.0 API_SENTRY_PROFILES_SAMPLE_RATE=1.0
@ -680,8 +647,10 @@ MAIL_TYPE=resend
MAIL_DEFAULT_SEND_FROM= MAIL_DEFAULT_SEND_FROM=
# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. # API-Key for the Resend email provider, used when MAIL_TYPE is `resend`.
RESEND_API_URL=https://api.resend.com
RESEND_API_KEY=your-resend-api-key RESEND_API_KEY=your-resend-api-key
# SMTP server configuration, used when MAIL_TYPE is `smtp` # SMTP server configuration, used when MAIL_TYPE is `smtp`
SMTP_SERVER= SMTP_SERVER=
SMTP_PORT=465 SMTP_PORT=465
@ -706,18 +675,19 @@ RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
# The sandbox service endpoint. # The sandbox service endpoint.
CODE_EXECUTION_ENDPOINT=http://sandbox:8194 CODE_EXECUTION_ENDPOINT=http://sandbox:8194
CODE_EXECUTION_API_KEY=dify-sandbox
CODE_MAX_NUMBER=9223372036854775807 CODE_MAX_NUMBER=9223372036854775807
CODE_MIN_NUMBER=-9223372036854775808 CODE_MIN_NUMBER=-9223372036854775808
CODE_MAX_DEPTH=5 CODE_MAX_DEPTH=5
CODE_MAX_PRECISION=20 CODE_MAX_PRECISION=20
CODE_MAX_STRING_LENGTH=80000 CODE_MAX_STRING_LENGTH=80000
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
CODE_MAX_STRING_ARRAY_LENGTH=30 CODE_MAX_STRING_ARRAY_LENGTH=30
CODE_MAX_OBJECT_ARRAY_LENGTH=30 CODE_MAX_OBJECT_ARRAY_LENGTH=30
CODE_MAX_NUMBER_ARRAY_LENGTH=1000 CODE_MAX_NUMBER_ARRAY_LENGTH=1000
CODE_EXECUTION_CONNECT_TIMEOUT=10 CODE_EXECUTION_CONNECT_TIMEOUT=10
CODE_EXECUTION_READ_TIMEOUT=60 CODE_EXECUTION_READ_TIMEOUT=60
CODE_EXECUTION_WRITE_TIMEOUT=10 CODE_EXECUTION_WRITE_TIMEOUT=10
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
# Workflow runtime configuration # Workflow runtime configuration
WORKFLOW_MAX_EXECUTION_STEPS=500 WORKFLOW_MAX_EXECUTION_STEPS=500
@ -946,3 +916,7 @@ CREATE_TIDB_SERVICE_JOB_ENABLED=false
# Maximum number of submitted thread count in a ThreadPool for parallel node execution # Maximum number of submitted thread count in a ThreadPool for parallel node execution
MAX_SUBMIT_COUNT=100 MAX_SUBMIT_COUNT=100
# Proxy
HTTP_PROXY=
HTTPS_PROXY=

View File

@ -0,0 +1,576 @@
x-shared-env: &shared-api-worker-env
services:
# API service
api:
image: langgenius/dify-api:0.14.0
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'api' starts the API server.
MODE: api
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
depends_on:
- db
- redis
volumes:
# Mount the storage directory to the container, for storing user files.
- ./volumes/app/storage:/app/api/storage
networks:
- ssrf_proxy_network
- default
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.14.0
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'worker' starts the Celery worker for processing the queue.
MODE: worker
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
depends_on:
- db
- redis
volumes:
# Mount the storage directory to the container, for storing user files.
- ./volumes/app/storage:/app/api/storage
networks:
- ssrf_proxy_network
- default
# Frontend web application.
web:
image: langgenius/dify-web:0.14.0
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
APP_API_URL: ${APP_API_URL:-}
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
CSP_WHITELIST: ${CSP_WHITELIST:-}
# The postgres database.
db:
image: postgres:15-alpine
restart: always
environment:
PGUSER: ${PGUSER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
POSTGRES_DB: ${POSTGRES_DB:-dify}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
command: >
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
interval: 1s
timeout: 3s
retries: 30
# The redis cache.
redis:
image: redis:6-alpine
restart: always
environment:
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
volumes:
# Mount the redis data directory to the container.
- ./volumes/redis/data:/data
# Set the redis password when startup redis server.
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.10
restart: always
environment:
# The DifySandbox configurations
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
volumes:
- ./volumes/sandbox/dependencies:/dependencies
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:8194/health']
networks:
- ssrf_proxy_network
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
ssrf_proxy:
image: ubuntu/squid:latest
restart: always
volumes:
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
entrypoint:
[
'sh',
'-c',
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
environment:
# pls clearly modify the squid env vars to fit your network environment.
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
networks:
- ssrf_proxy_network
- default
# Certbot service
# use `docker-compose --profile certbot up` to start the certbot service.
certbot:
image: certbot/certbot
profiles:
- certbot
volumes:
- ./volumes/certbot/conf:/etc/letsencrypt
- ./volumes/certbot/www:/var/www/html
- ./volumes/certbot/logs:/var/log/letsencrypt
- ./volumes/certbot/conf/live:/etc/letsencrypt/live
- ./certbot/update-cert.template.txt:/update-cert.template.txt
- ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
environment:
- CERTBOT_EMAIL=${CERTBOT_EMAIL}
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
entrypoint: ['/docker-entrypoint.sh']
command: ['tail', '-f', '/dev/null']
# The nginx reverse proxy.
# used for reverse proxying the API service and Web service.
nginx:
image: nginx:latest
restart: always
volumes:
- ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
- ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
- ./nginx/https.conf.template:/etc/nginx/https.conf.template
- ./nginx/conf.d:/etc/nginx/conf.d
- ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
- ./nginx/ssl:/etc/ssl # cert dir (legacy)
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
- ./volumes/certbot/conf:/etc/letsencrypt
- ./volumes/certbot/www:/var/www/html
entrypoint:
[
'sh',
'-c',
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
environment:
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
NGINX_PORT: ${NGINX_PORT:-80}
# You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
# and modify the env vars below in .env if HTTPS_ENABLED is true.
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
depends_on:
- api
- web
ports:
- '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
- '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
# The TiDB vector store.
# For production use, please refer to https://github.com/pingcap/tidb-docker-compose
tidb:
image: pingcap/tidb:v8.4.0
profiles:
- tidb
command:
- --store=unistore
restart: always
# The Weaviate vector store.
weaviate:
image: semitechnologies/weaviate:1.19.0
profiles:
- ''
- weaviate
restart: always
volumes:
# Mount the Weaviate data directory to the con tainer.
- ./volumes/weaviate:/var/lib/weaviate
environment:
# The Weaviate configurations
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
# Qdrant vector store.
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
qdrant:
image: langgenius/qdrant:v1.7.3
profiles:
- qdrant
restart: always
volumes:
- ./volumes/qdrant:/qdrant/storage
environment:
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
# The Couchbase vector store.
couchbase-server:
build: ./couchbase-server
profiles:
- couchbase
restart: always
environment:
- CLUSTER_NAME=dify_search
- COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
- COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
- COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
- COUCHBASE_BUCKET_RAMSIZE=512
- COUCHBASE_RAM_SIZE=2048
- COUCHBASE_EVENTING_RAM_SIZE=512
- COUCHBASE_INDEX_RAM_SIZE=512
- COUCHBASE_FTS_RAM_SIZE=1024
hostname: couchbase-server
container_name: couchbase-server
working_dir: /opt/couchbase
stdin_open: true
tty: true
entrypoint: [""]
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
volumes:
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
healthcheck:
# ensure bucket was created before proceeding
test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
interval: 10s
retries: 10
start_period: 30s
timeout: 10s
# The pgvector vector database.
pgvector:
image: pgvector/pgvector:pg16
profiles:
- pgvector
restart: always
environment:
PGUSER: ${PGVECTOR_PGUSER:-postgres}
# The password for the default postgres user.
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
# The name of the default postgres database.
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
# postgres data directory
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
volumes:
- ./volumes/pgvector/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
interval: 1s
timeout: 3s
retries: 30
# pgvecto-rs vector store
pgvecto-rs:
image: tensorchord/pgvecto-rs:pg16-v0.3.0
profiles:
- pgvecto-rs
restart: always
environment:
PGUSER: ${PGVECTOR_PGUSER:-postgres}
# The password for the default postgres user.
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
# The name of the default postgres database.
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
# postgres data directory
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
volumes:
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
interval: 1s
timeout: 3s
retries: 30
# Chroma vector database
chroma:
image: ghcr.io/chroma-core/chroma:0.5.20
profiles:
- chroma
restart: always
volumes:
- ./volumes/chroma:/chroma/chroma
environment:
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
# OceanBase vector database
oceanbase:
image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215
profiles:
- oceanbase
restart: always
volumes:
- ./volumes/oceanbase/data:/root/ob
- ./volumes/oceanbase/conf:/root/.obd/cluster
- ./volumes/oceanbase/init.d:/root/boot/init.d
environment:
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
OB_SERVER_IP: '127.0.0.1'
# Oracle vector database
oracle:
image: container-registry.oracle.com/database/free:latest
profiles:
- oracle
restart: always
volumes:
- source: oradata
type: volume
target: /opt/oracle/oradata
- ./startupscripts:/opt/oracle/scripts/startup
environment:
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
# Milvus vector database services
etcd:
container_name: milvus-etcd
image: quay.io/coreos/etcd:v3.5.5
profiles:
- milvus
environment:
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
volumes:
- ./volumes/milvus/etcd:/etcd
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck:
test: ['CMD', 'etcdctl', 'endpoint', 'health']
interval: 30s
timeout: 20s
retries: 3
networks:
- milvus
minio:
container_name: milvus-minio
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
profiles:
- milvus
environment:
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
volumes:
- ./volumes/milvus/minio:/minio_data
command: minio server /minio_data --console-address ":9001"
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live']
interval: 30s
timeout: 20s
retries: 3
networks:
- milvus
milvus-standalone:
container_name: milvus-standalone
image: milvusdb/milvus:v2.3.1
profiles:
- milvus
command: ['milvus', 'run', 'standalone']
environment:
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
volumes:
- ./volumes/milvus/milvus:/var/lib/milvus
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:9091/healthz']
interval: 30s
start_period: 90s
timeout: 20s
retries: 3
depends_on:
- etcd
- minio
ports:
- 19530:19530
- 9091:9091
networks:
- milvus
# Opensearch vector database
opensearch:
container_name: opensearch
image: opensearchproject/opensearch:latest
profiles:
- opensearch
environment:
discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
ulimits:
memlock:
soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
nofile:
soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
hard: ${OPENSEARCH_NOFILE_HARD:-65536}
volumes:
- ./volumes/opensearch/data:/usr/share/opensearch/data
networks:
- opensearch-net
opensearch-dashboards:
container_name: opensearch-dashboards
image: opensearchproject/opensearch-dashboards:latest
profiles:
- opensearch
environment:
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
volumes:
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
networks:
- opensearch-net
depends_on:
- opensearch
# MyScale vector database
myscale:
container_name: myscale
image: myscale/myscaledb:1.6.4
profiles:
- myscale
restart: always
tty: true
volumes:
- ./volumes/myscale/data:/var/lib/clickhouse
- ./volumes/myscale/log:/var/log/clickhouse-server
- ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
ports:
- ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
# https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
container_name: elasticsearch
profiles:
- elasticsearch
restart: always
volumes:
- dify_es01_data:/usr/share/elasticsearch/data
environment:
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
cluster.name: dify-es-cluster
node.name: dify-es0
discovery.type: single-node
xpack.license.self_generated.type: trial
xpack.security.enabled: 'true'
xpack.security.enrollment.enabled: 'false'
xpack.security.http.ssl.enabled: 'false'
ports:
- ${ELASTICSEARCH_PORT:-9200}:9200
healthcheck:
test: ['CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty']
interval: 30s
timeout: 10s
retries: 50
# https://www.elastic.co/guide/en/kibana/current/docker.html
# https://www.elastic.co/guide/en/kibana/current/settings.html
kibana:
image: docker.elastic.co/kibana/kibana:8.14.3
container_name: kibana
profiles:
- elasticsearch
depends_on:
- elasticsearch
restart: always
environment:
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
XPACK_SECURITY_ENABLED: 'true'
XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
XPACK_FLEET_ISAIRGAPPED: 'true'
I18N_LOCALE: zh-CN
SERVER_PORT: '5601'
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
ports:
- ${KIBANA_PORT:-5601}:5601
healthcheck:
test: ['CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1']
interval: 30s
timeout: 10s
retries: 3
# unstructured .
# (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
unstructured:
image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
profiles:
- unstructured
restart: always
volumes:
- ./volumes/unstructured:/app/data
networks:
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
ssrf_proxy_network:
driver: bridge
internal: true
milvus:
driver: bridge
opensearch-net:
driver: bridge
internal: true
volumes:
oradata:
dify_es01_data:

View File

@ -1,28 +1,33 @@
# ==================================================================
# WARNING: This file is auto-generated by generate_docker_compose
# Do not modify this file directly. Instead, update the .env.example
# or docker-compose-template.yaml and regenerate this file.
# ==================================================================
x-shared-env: &shared-api-worker-env x-shared-env: &shared-api-worker-env
WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} CONSOLE_API_URL: ${CONSOLE_API_URL:-}
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
SERVICE_API_URL: ${SERVICE_API_URL:-}
APP_API_URL: ${APP_API_URL:-}
APP_WEB_URL: ${APP_WEB_URL:-}
FILES_URL: ${FILES_URL:-}
LOG_LEVEL: ${LOG_LEVEL:-INFO} LOG_LEVEL: ${LOG_LEVEL:-INFO}
LOG_FILE: ${LOG_FILE:-} LOG_FILE: ${LOG_FILE:-/app/logs/server.log}
LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20}
LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5} LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5}
# Log dateformat LOG_DATEFORMAT: ${LOG_DATEFORMAT:-"%Y-%m-%d %H:%M:%S"}
LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S}
# Log Timezone
LOG_TZ: ${LOG_TZ:-UTC} LOG_TZ: ${LOG_TZ:-UTC}
DEBUG: ${DEBUG:-false} DEBUG: ${DEBUG:-false}
FLASK_DEBUG: ${FLASK_DEBUG:-false} FLASK_DEBUG: ${FLASK_DEBUG:-false}
SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U} SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
INIT_PASSWORD: ${INIT_PASSWORD:-} INIT_PASSWORD: ${INIT_PASSWORD:-}
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
SERVICE_API_URL: ${SERVICE_API_URL:-}
APP_WEB_URL: ${APP_WEB_URL:-}
CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai}
OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1}
FILES_URL: ${FILES_URL:-}
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION} DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-"https://updates.dify.ai"}
OPENAI_API_BASE: ${OPENAI_API_BASE:-"https://api.openai.com/v1"}
MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0} DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
DIFY_PORT: ${DIFY_PORT:-5001} DIFY_PORT: ${DIFY_PORT:-5001}
SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-} SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-}
@ -43,6 +48,11 @@ x-shared-env: &shared-api-worker-env
SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30} SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600} SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false} SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100}
POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB}
POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB}
POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}
POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}
REDIS_HOST: ${REDIS_HOST:-redis} REDIS_HOST: ${REDIS_HOST:-redis}
REDIS_PORT: ${REDIS_PORT:-6379} REDIS_PORT: ${REDIS_PORT:-6379}
REDIS_USERNAME: ${REDIS_USERNAME:-} REDIS_USERNAME: ${REDIS_USERNAME:-}
@ -55,75 +65,73 @@ x-shared-env: &shared-api-worker-env
REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-} REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-}
REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-} REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-}
REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1} REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1}
REDIS_CLUSTERS: ${REDIS_CLUSTERS:-}
REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false} REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false}
REDIS_CLUSTERS: ${REDIS_CLUSTERS:-}
REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-} REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-}
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60} CELERY_BROKER_URL: ${CELERY_BROKER_URL:-"redis://:difyai123456@redis:6379/1"}
CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1}
BROKER_USE_SSL: ${BROKER_USE_SSL:-false} BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false} CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-} CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
STORAGE_TYPE: ${STORAGE_TYPE:-local} STORAGE_TYPE: ${STORAGE_TYPE:-opendal}
STORAGE_LOCAL_PATH: ${STORAGE_LOCAL_PATH:-storage} OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs}
S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false} OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage}
S3_ENDPOINT: ${S3_ENDPOINT:-} S3_ENDPOINT: ${S3_ENDPOINT:-}
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-} S3_REGION: ${S3_REGION:-us-east-1}
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai}
S3_ACCESS_KEY: ${S3_ACCESS_KEY:-} S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
S3_SECRET_KEY: ${S3_SECRET_KEY:-} S3_SECRET_KEY: ${S3_SECRET_KEY:-}
S3_REGION: ${S3_REGION:-us-east-1} S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-} AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai}
AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-} AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai}
AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-} AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container}
AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-} AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-"https://<your_account_name>.blob.core.windows.net"}
GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-} GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name}
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-} GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-your-google-service-account-json-base64-string}
ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-} ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name}
ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-} ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key}
ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-} ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key}
ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-} ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-"https://oss-ap-southeast-1-internal.aliyuncs.com"}
ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-} ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1}
ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4} ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-} ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path}
TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-} TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name}
TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-} TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key}
TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-} TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id}
TENCENT_COS_REGION: ${TENCENT_COS_REGION:-} TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region}
TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-} TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme}
HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-} OCI_ENDPOINT: ${OCI_ENDPOINT:-"https://objectstorage.us-ashburn-1.oraclecloud.com"}
HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-} OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name}
HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-} OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key}
HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-} OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key}
OCI_ENDPOINT: ${OCI_ENDPOINT:-} OCI_REGION: ${OCI_REGION:-us-ashburn-1}
OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-} HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name}
OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-} HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key}
OCI_SECRET_KEY: ${OCI_SECRET_KEY:-} HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key}
OCI_REGION: ${OCI_REGION:-} HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url}
VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-} VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name}
VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-} VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key}
VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-} VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key}
VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-} VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url}
VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-} VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region}
BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-} BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name}
BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-} BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key}
BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-} BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key}
BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-} BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url}
SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name}
SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key}
SUPABASE_URL: ${SUPABASE_URL:-your-server-url}
VECTOR_STORE: ${VECTOR_STORE:-weaviate} VECTOR_STORE: ${VECTOR_STORE:-weaviate}
WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080} WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-"http://weaviate:8080"}
WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333} QDRANT_URL: ${QDRANT_URL:-"http://qdrant:6333"}
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20} QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false} QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false}
QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334} QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334}
COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-'couchbase-server'} MILVUS_URI: ${MILVUS_URI:-"http://127.0.0.1:19530"}
COUCHBASE_USER: ${COUCHBASE_USER:-Administrator}
COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password}
COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings}
COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default}
MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530}
MILVUS_TOKEN: ${MILVUS_TOKEN:-} MILVUS_TOKEN: ${MILVUS_TOKEN:-}
MILVUS_USER: ${MILVUS_USER:-root} MILVUS_USER: ${MILVUS_USER:-root}
MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus} MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus}
@ -133,161 +141,248 @@ x-shared-env: &shared-api-worker-env
MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-} MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-}
MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify} MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify}
MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-} MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-}
RELYT_HOST: ${RELYT_HOST:-db} COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-"couchbase://couchbase-server"}
RELYT_PORT: ${RELYT_PORT:-5432} COUCHBASE_USER: ${COUCHBASE_USER:-Administrator}
RELYT_USER: ${RELYT_USER:-postgres} COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password}
RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456} COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings}
RELYT_DATABASE: ${RELYT_DATABASE:-postgres} COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default}
PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector} PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector}
PGVECTOR_PORT: ${PGVECTOR_PORT:-5432} PGVECTOR_PORT: ${PGVECTOR_PORT:-5432}
PGVECTOR_USER: ${PGVECTOR_USER:-postgres} PGVECTOR_USER: ${PGVECTOR_USER:-postgres}
PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456} PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456}
PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify} PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify}
PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1}
PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5}
PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs}
PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432}
PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres}
PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456}
PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify}
ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak}
ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk}
ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou}
ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456}
ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount}
ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword}
ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword}
ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com}
ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432}
ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1}
ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5}
TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb} TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb}
TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000} TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000}
TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-} TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-}
TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-} TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-}
TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify} TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify}
TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1} TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-"http://127.0.0.1"}
TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify} TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify}
TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20} TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20}
TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false} TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false}
TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334} TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334}
TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify} TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify}
TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify} TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify}
TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1} TIDB_API_URL: ${TIDB_API_URL:-"http://127.0.0.1"}
TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1} TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-"http://127.0.0.1"}
TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1} TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1}
TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify} TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify}
TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100} TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100}
ORACLE_HOST: ${ORACLE_HOST:-oracle}
ORACLE_PORT: ${ORACLE_PORT:-1521}
ORACLE_USER: ${ORACLE_USER:-dify}
ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1} CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1}
CHROMA_PORT: ${CHROMA_PORT:-8000} CHROMA_PORT: ${CHROMA_PORT:-8000}
CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant} CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant}
CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database} CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider} CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-} CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0} ORACLE_HOST: ${ORACLE_HOST:-oracle}
ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200} ORACLE_PORT: ${ORACLE_PORT:-1521}
ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} ORACLE_USER: ${ORACLE_USER:-dify}
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070} ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm} RELYT_HOST: ${RELYT_HOST:-db}
LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm } RELYT_PORT: ${RELYT_PORT:-5432}
KIBANA_PORT: ${KIBANA_PORT:-5601} RELYT_USER: ${RELYT_USER:-postgres}
# AnalyticDB configuration RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-} RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-}
ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-}
ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-}
ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-}
ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-}
ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-}
ANALYTICDB_HOST: ${ANALYTICDB_HOST:-}
ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432}
ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1}
ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5}
OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch} OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch}
OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200} OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
OPENSEARCH_USER: ${OPENSEARCH_USER:-admin} OPENSEARCH_USER: ${OPENSEARCH_USER:-admin}
OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin} OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin}
OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true} OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true}
TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1} TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-"http://127.0.0.1"}
TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify} TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify}
TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30} TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30}
TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify} TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify}
TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify} TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify}
TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1} TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1}
TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2} TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2}
BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
KIBANA_PORT: ${KIBANA_PORT:-5601}
BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-"http://127.0.0.1:5287"}
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000}
BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root}
BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify} BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify}
BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify} BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify}
BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1} BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1}
BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3} BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3}
VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-dify} VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak}
VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-dify} VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk}
VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai} VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai}
VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com} VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com}
VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http} VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http}
UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io} VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30}
UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify} VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30}
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} LINDORM_URL: ${LINDORM_URL:-"http://lindorm:30070"}
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm}
ETL_TYPE: ${ETL_TYPE:-dify} LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm}
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-} OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase}
UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512}
CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024}
MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64}
UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
MAIL_TYPE: ${MAIL_TYPE:-resend}
MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
SMTP_SERVER: ${SMTP_SERVER:-}
SMTP_PORT: ${SMTP_PORT:-465}
SMTP_USERNAME: ${SMTP_USERNAME:-}
SMTP_PASSWORD: ${SMTP_PASSWORD:-}
SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com}
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5}
CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
CODE_EXECUTION_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10}
CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60}
CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10}
CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500}
WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-12000}
POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-}
POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-}
POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-}
POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-}
POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-}
POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-}
MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-http://oceanbase-vector}
OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881} OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test} OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-"https://xxx-vector.upstash.io"}
UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify}
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
ETL_TYPE: ${ETL_TYPE:-dify}
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true}
PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512}
CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024}
MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64}
UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
API_SENTRY_DSN: ${API_SENTRY_DSN:-}
API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-}
NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
MAIL_TYPE: ${MAIL_TYPE:-resend}
MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
RESEND_API_URL: ${RESEND_API_URL:-"https://api.resend.com"}
RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
SMTP_SERVER: ${SMTP_SERVER:-}
SMTP_PORT: ${SMTP_PORT:-465}
SMTP_USERNAME: ${SMTP_USERNAME:-}
SMTP_PASSWORD: ${SMTP_PASSWORD:-}
SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5}
CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-"http://sandbox:8194"}
CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox}
CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10}
CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60}
CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10}
TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500}
WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10}
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-"http://ssrf_proxy:3128"}
SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-"http://ssrf_proxy:3128"}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
PGUSER: ${PGUSER:-${DB_USERNAME}}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}}
POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release}
SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-"http://ssrf_proxy:3128"}
SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-"http://ssrf_proxy:3128"}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true}
WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-"etcd:2379"}
MINIO_ADDRESS: ${MINIO_ADDRESS:-"minio:9000"}
MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true}
PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres}
PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m}
OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1}
OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1}
OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536}
OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536}
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
NGINX_PORT: ${NGINX_PORT:-80}
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-"TLSv1.1 TLSv1.2 TLSv1.3"}
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com}
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com}
CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-}
SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
COMPOSE_PROFILES: ${COMPOSE_PROFILES:-"${VECTOR_STORE:-weaviate}"}
EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80}
EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443}
POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-}
POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-}
POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-}
POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-}
POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-}
POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-}
CSP_WHITELIST: ${CSP_WHITELIST:-}
CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100} MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
RETRIEVAL_TOP_N: ${RETRIEVAL_TOP_N:-0}
HTTP_PROXY: ${HTTP_PROXY:-} HTTP_PROXY: ${HTTP_PROXY:-}
HTTPS_PROXY: ${HTTPS_PROXY:-} HTTPS_PROXY: ${HTTPS_PROXY:-}
@ -301,6 +396,9 @@ services:
<<: *shared-api-worker-env <<: *shared-api-worker-env
# Startup mode, 'api' starts the API server. # Startup mode, 'api' starts the API server.
MODE: api MODE: api
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
depends_on: depends_on:
- db - db
- redis - redis
@ -321,6 +419,9 @@ services:
<<: *shared-api-worker-env <<: *shared-api-worker-env
# Startup mode, 'worker' starts the Celery worker for processing the queue. # Startup mode, 'worker' starts the Celery worker for processing the queue.
MODE: worker MODE: worker
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
depends_on: depends_on:
- db - db
- redis - redis

110
docker/generate_docker_compose Executable file
View File

@ -0,0 +1,110 @@
#!/usr/bin/env python3
import os
import re
import sys
def parse_env_example(file_path):
"""
Parses the .env.example file and returns a dictionary with variable names as keys and default values as values.
"""
env_vars = {}
with open(file_path, "r") as f:
for line_number, line in enumerate(f, 1):
line = line.strip()
# Ignore empty lines and comments
if not line or line.startswith("#"):
continue
# Use regex to parse KEY=VALUE
match = re.match(r"^([^=]+)=(.*)$", line)
if match:
key = match.group(1).strip()
value = match.group(2).strip()
# Remove possible quotes around the value
if (value.startswith('"') and value.endswith('"')) or (
value.startswith("'") and value.endswith("'")
):
value = value[1:-1]
env_vars[key] = value
else:
print(f"Warning: Unable to parse line {line_number}: {line}")
return env_vars
def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"):
"""
Generates a shared environment variables block as a YAML string.
"""
lines = [f"x-shared-env: &{anchor_name}"]
for key, default in env_vars.items():
# If default value is empty, use ${KEY:-}
if default == "":
lines.append(f" {key}: ${{{key}:-}}")
else:
# If default value contains special characters, wrap it in quotes
if re.search(r"[:\s]", default):
default = f'"{default}"'
lines.append(f" {key}: ${{{key}:-{default}}}")
return "\n".join(lines)
def insert_shared_env(template_path, output_path, shared_env_block, header_comments):
"""
Inserts the shared environment variables block and header comments into the template file,
removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file.
"""
with open(template_path, "r") as f:
template_content = f.read()
# Remove existing x-shared-env: &shared-api-worker-env lines
template_content = re.sub(
r"^x-shared-env: &shared-api-worker-env\s*\n?",
"",
template_content,
flags=re.MULTILINE,
)
# Prepare the final content with header comments and shared env block
final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}"
with open(output_path, "w") as f:
f.write(final_content)
print(f"Generated {output_path}")
def main():
env_example_path = ".env.example"
template_path = "docker-compose-template.yaml"
output_path = "docker-compose.yaml"
anchor_name = "shared-api-worker-env" # Can be modified as needed
# Define header comments to be added at the top of docker-compose.yaml
header_comments = (
"# ==================================================================\n"
"# WARNING: This file is auto-generated by generate_docker_compose\n"
"# Do not modify this file directly. Instead, update the .env.example\n"
"# or docker-compose-template.yaml and regenerate this file.\n"
"# ==================================================================\n"
)
# Check if required files exist
for path in [env_example_path, template_path]:
if not os.path.isfile(path):
print(f"Error: File {path} does not exist.")
sys.exit(1)
# Parse .env.example file
env_vars = parse_env_example(env_example_path)
if not env_vars:
print("Warning: No environment variables found in .env.example.")
# Generate shared environment variables block
shared_env_block = generate_shared_env_block(env_vars, anchor_name)
# Insert shared environment variables block and header comments into the template
insert_shared_env(template_path, output_path, shared_env_block, header_comments)
if __name__ == "__main__":
main()