Merge branch 'refs/heads/main' into feat/workflow-parallel-support

# Conflicts:
#	api/core/app/apps/advanced_chat/app_generator.py
#	api/core/app/apps/workflow/app_generator.py
This commit is contained in:
takatost 2024-09-02 17:52:51 +08:00
commit 81d09d471c
64 changed files with 853 additions and 179 deletions

View File

@ -8,7 +8,7 @@ In terms of licensing, please take a minute to read our short [License and Contr
## Before you jump in
[Find](https://github.com/langgenius/dify/issues?q=is:issue+is:closed) an existing issue, or [open](https://github.com/langgenius/dify/issues/new/choose) a new one. We categorize issues into 2 types:
[Find](https://github.com/langgenius/dify/issues?q=is:issue+is:open) an existing issue, or [open](https://github.com/langgenius/dify/issues/new/choose) a new one. We categorize issues into 2 types:
### Feature requests:

View File

@ -8,7 +8,7 @@
## 在开始之前
[查找](https://github.com/langgenius/dify/issues?q=is:issue+is:closed)现有问题,或 [创建](https://github.com/langgenius/dify/issues/new/choose) 一个新问题。我们将问题分为两类:
[查找](https://github.com/langgenius/dify/issues?q=is:issue+is:open)现有问题,或 [创建](https://github.com/langgenius/dify/issues/new/choose) 一个新问题。我们将问题分为两类:
### 功能请求:

View File

@ -10,7 +10,7 @@ Dify にコントリビュートしたいとお考えなのですね。それは
## 飛び込む前に
[既存の Issue](https://github.com/langgenius/dify/issues?q=is:issue+is:closed) を探すか、[新しい Issue](https://github.com/langgenius/dify/issues/new/choose) を作成してください。私たちは Issue を 2 つのタイプに分類しています。
[既存の Issue](https://github.com/langgenius/dify/issues?q=is:issue+is:open) を探すか、[新しい Issue](https://github.com/langgenius/dify/issues/new/choose) を作成してください。私たちは Issue を 2 つのタイプに分類しています。
### 機能リクエスト

View File

@ -8,7 +8,7 @@ Về vấn đề cấp phép, xin vui lòng dành chút thời gian đọc qua [
## Trước khi bắt đầu
[Tìm kiếm](https://github.com/langgenius/dify/issues?q=is:issue+is:closed) một vấn đề hiện có, hoặc [tạo mới](https://github.com/langgenius/dify/issues/new/choose) một vấn đề. Chúng tôi phân loại các vấn đề thành 2 loại:
[Tìm kiếm](https://github.com/langgenius/dify/issues?q=is:issue+is:open) một vấn đề hiện có, hoặc [tạo mới](https://github.com/langgenius/dify/issues/new/choose) một vấn đề. Chúng tôi phân loại các vấn đề thành 2 loại:
### Yêu cầu tính năng:

View File

@ -60,7 +60,8 @@ ALIYUN_OSS_SECRET_KEY=your-secret-key
ALIYUN_OSS_ENDPOINT=your-endpoint
ALIYUN_OSS_AUTH_VERSION=v1
ALIYUN_OSS_REGION=your-region
# Don't start with '/'. OSS doesn't support leading slash in object names.
ALIYUN_OSS_PATH=your-path
# Google Storage configuration
GOOGLE_STORAGE_BUCKET_NAME=yout-bucket-name
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string

View File

@ -559,8 +559,9 @@ def add_qdrant_doc_id_index(field: str):
@click.command("create-tenant", help="Create account and tenant.")
@click.option("--email", prompt=True, help="The email address of the tenant account.")
@click.option("--name", prompt=True, help="The workspace name of the tenant account.")
@click.option("--language", prompt=True, help="Account language, default: en-US.")
def create_tenant(email: str, language: Optional[str] = None):
def create_tenant(email: str, language: Optional[str] = None, name: Optional[str] = None):
"""
Create tenant account
"""
@ -580,13 +581,15 @@ def create_tenant(email: str, language: Optional[str] = None):
if language not in languages:
language = "en-US"
name = name.strip()
# generate random password
new_password = secrets.token_urlsafe(16)
# register account
account = RegisterService.register(email=email, name=account_name, password=new_password, language=language)
TenantService.create_owner_tenant_if_not_exist(account)
TenantService.create_owner_tenant_if_not_exist(account, name)
click.echo(
click.style(

View File

@ -1,4 +1,4 @@
from typing import Optional
from typing import Annotated, Optional
from pydantic import AliasChoices, Field, HttpUrl, NegativeInt, NonNegativeInt, PositiveInt, computed_field
from pydantic_settings import BaseSettings
@ -217,20 +217,17 @@ class HttpConfig(BaseSettings):
def WEB_API_CORS_ALLOW_ORIGINS(self) -> list[str]:
return self.inner_WEB_API_CORS_ALLOW_ORIGINS.split(",")
HTTP_REQUEST_MAX_CONNECT_TIMEOUT: NonNegativeInt = Field(
description="",
default=300,
)
HTTP_REQUEST_MAX_CONNECT_TIMEOUT: Annotated[
PositiveInt, Field(ge=10, description="connect timeout in seconds for HTTP request")
] = 10
HTTP_REQUEST_MAX_READ_TIMEOUT: NonNegativeInt = Field(
description="",
default=600,
)
HTTP_REQUEST_MAX_READ_TIMEOUT: Annotated[
PositiveInt, Field(ge=60, description="read timeout in seconds for HTTP request")
] = 60
HTTP_REQUEST_MAX_WRITE_TIMEOUT: NonNegativeInt = Field(
description="",
default=600,
)
HTTP_REQUEST_MAX_WRITE_TIMEOUT: Annotated[
PositiveInt, Field(ge=10, description="read timeout in seconds for HTTP request")
] = 20
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: PositiveInt = Field(
description="",

View File

@ -38,3 +38,8 @@ class AliyunOSSStorageConfig(BaseSettings):
description="Aliyun OSS authentication version",
default=None,
)
ALIYUN_OSS_PATH: Optional[str] = Field(
description="Aliyun OSS path",
default=None,
)

View File

@ -173,21 +173,18 @@ class ChatConversationApi(Resource):
if args["keyword"]:
keyword_filter = "%{}%".format(args["keyword"])
query = (
query.join(
Message,
Message.conversation_id == Conversation.id,
)
.join(subquery, subquery.c.conversation_id == Conversation.id)
.filter(
or_(
Message.query.ilike(keyword_filter),
Message.answer.ilike(keyword_filter),
Conversation.name.ilike(keyword_filter),
Conversation.introduction.ilike(keyword_filter),
subquery.c.from_end_user_session_id.ilike(keyword_filter),
),
)
message_subquery = (
db.session.query(Message.conversation_id)
.filter(or_(Message.query.ilike(keyword_filter), Message.answer.ilike(keyword_filter)))
.subquery()
)
query = query.join(subquery, subquery.c.conversation_id == Conversation.id).filter(
or_(
Conversation.id.in_(message_subquery),
Conversation.name.ilike(keyword_filter),
Conversation.introduction.ilike(keyword_filter),
subquery.c.from_end_user_session_id.ilike(keyword_filter),
),
)
account = current_user

View File

@ -122,6 +122,7 @@ class DatasetListApi(Resource):
name=args["name"],
indexing_technique=args["indexing_technique"],
account=current_user,
permission=DatasetPermissionEnum.ONLY_ME,
)
except services.errors.dataset.DatasetNameDuplicateError:
raise DatasetNameDuplicateError()

View File

@ -39,7 +39,7 @@ class FileApi(Resource):
@login_required
@account_initialization_required
@marshal_with(file_fields)
@cloud_edition_billing_resource_check(resource="documents")
@cloud_edition_billing_resource_check("documents")
def post(self):
# get file from request
file = request.files["file"]

View File

@ -35,6 +35,7 @@ class InstalledAppsListApi(Resource):
"uninstallable": current_tenant_id == installed_app.app_owner_tenant_id,
}
for installed_app in installed_apps
if installed_app.app is not None
]
installed_apps.sort(
key=lambda app: (

View File

@ -46,9 +46,7 @@ def only_edition_self_hosted(view):
return decorated
def cloud_edition_billing_resource_check(
resource: str, error_msg: str = "You have reached the limit of your subscription."
):
def cloud_edition_billing_resource_check(resource: str):
def interceptor(view):
@wraps(view)
def decorated(*args, **kwargs):
@ -60,22 +58,22 @@ def cloud_edition_billing_resource_check(
documents_upload_quota = features.documents_upload_quota
annotation_quota_limit = features.annotation_quota_limit
if resource == "members" and 0 < members.limit <= members.size:
abort(403, error_msg)
abort(403, "The number of members has reached the limit of your subscription.")
elif resource == "apps" and 0 < apps.limit <= apps.size:
abort(403, error_msg)
abort(403, "The number of apps has reached the limit of your subscription.")
elif resource == "vector_space" and 0 < vector_space.limit <= vector_space.size:
abort(403, error_msg)
abort(403, "The capacity of the vector space has reached the limit of your subscription.")
elif resource == "documents" and 0 < documents_upload_quota.limit <= documents_upload_quota.size:
# The api of file upload is used in the multiple places, so we need to check the source of the request from datasets
source = request.args.get("source")
if source == "datasets":
abort(403, error_msg)
abort(403, "The number of documents has reached the limit of your subscription.")
else:
return view(*args, **kwargs)
elif resource == "workspace_custom" and not features.can_replace_logo:
abort(403, error_msg)
abort(403, "The workspace custom feature has reached the limit of your subscription.")
elif resource == "annotation" and 0 < annotation_quota_limit.limit < annotation_quota_limit.size:
abort(403, error_msg)
abort(403, "The annotation quota has reached the limit of your subscription.")
else:
return view(*args, **kwargs)
@ -86,10 +84,7 @@ def cloud_edition_billing_resource_check(
return interceptor
def cloud_edition_billing_knowledge_limit_check(
resource: str,
error_msg: str = "To unlock this feature and elevate your Dify experience, please upgrade to a paid plan.",
):
def cloud_edition_billing_knowledge_limit_check(resource: str):
def interceptor(view):
@wraps(view)
def decorated(*args, **kwargs):
@ -97,7 +92,10 @@ def cloud_edition_billing_knowledge_limit_check(
if features.billing.enabled:
if resource == "add_segment":
if features.billing.subscription.plan == "sandbox":
abort(403, error_msg)
abort(
403,
"To unlock this feature and elevate your Dify experience, please upgrade to a paid plan.",
)
else:
return view(*args, **kwargs)

View File

@ -83,9 +83,7 @@ def validate_app_token(view: Optional[Callable] = None, *, fetch_user_arg: Optio
return decorator(view)
def cloud_edition_billing_resource_check(
resource: str, api_token_type: str, error_msg: str = "You have reached the limit of your subscription."
):
def cloud_edition_billing_resource_check(resource: str, api_token_type: str):
def interceptor(view):
def decorated(*args, **kwargs):
api_token = validate_and_get_api_token(api_token_type)
@ -98,13 +96,13 @@ def cloud_edition_billing_resource_check(
documents_upload_quota = features.documents_upload_quota
if resource == "members" and 0 < members.limit <= members.size:
raise Forbidden(error_msg)
raise Forbidden("The number of members has reached the limit of your subscription.")
elif resource == "apps" and 0 < apps.limit <= apps.size:
raise Forbidden(error_msg)
raise Forbidden("The number of apps has reached the limit of your subscription.")
elif resource == "vector_space" and 0 < vector_space.limit <= vector_space.size:
raise Forbidden(error_msg)
raise Forbidden("The capacity of the vector space has reached the limit of your subscription.")
elif resource == "documents" and 0 < documents_upload_quota.limit <= documents_upload_quota.size:
raise Forbidden(error_msg)
raise Forbidden("The number of documents has reached the limit of your subscription.")
else:
return view(*args, **kwargs)
@ -115,11 +113,7 @@ def cloud_edition_billing_resource_check(
return interceptor
def cloud_edition_billing_knowledge_limit_check(
resource: str,
api_token_type: str,
error_msg: str = "To unlock this feature and elevate your Dify experience, please upgrade to a paid plan.",
):
def cloud_edition_billing_knowledge_limit_check(resource: str, api_token_type: str):
def interceptor(view):
@wraps(view)
def decorated(*args, **kwargs):
@ -128,7 +122,9 @@ def cloud_edition_billing_knowledge_limit_check(
if features.billing.enabled:
if resource == "add_segment":
if features.billing.subscription.plan == "sandbox":
raise Forbidden(error_msg)
raise Forbidden(
"To unlock this feature and elevate your Dify experience, please upgrade to a paid plan."
)
else:
return view(*args, **kwargs)

View File

@ -93,7 +93,7 @@ class DatasetConfigManager:
reranking_model=dataset_configs.get('reranking_model'),
weights=dataset_configs.get('weights'),
reranking_enabled=dataset_configs.get('reranking_enabled', True),
rerank_mode=dataset_configs.get('rerank_mode', 'reranking_model'),
rerank_mode=dataset_configs.get('reranking_mode', 'reranking_model'),
)
)

View File

@ -4,7 +4,7 @@ import os
import threading
import uuid
from collections.abc import Generator
from typing import Any, Optional, Union
from typing import Any, Optional, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
@ -32,6 +32,26 @@ logger = logging.getLogger(__name__)
class AdvancedChatAppGenerator(MessageBasedAppGenerator):
@overload
def generate(
self, app_model: App,
workflow: Workflow,
user: Union[Account, EndUser],
args: dict,
invoke_from: InvokeFrom,
stream: Literal[True] = True,
) -> Generator[str, None, None]: ...
@overload
def generate(
self, app_model: App,
workflow: Workflow,
user: Union[Account, EndUser],
args: dict,
invoke_from: InvokeFrom,
stream: Literal[False] = False,
) -> dict: ...
def generate(
self,
app_model: App,

View File

@ -3,7 +3,7 @@ import os
import threading
import uuid
from collections.abc import Generator
from typing import Any, Union
from typing import Any, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
@ -28,6 +28,24 @@ logger = logging.getLogger(__name__)
class AgentChatAppGenerator(MessageBasedAppGenerator):
@overload
def generate(
self, app_model: App,
user: Union[Account, EndUser],
args: dict,
invoke_from: InvokeFrom,
stream: Literal[True] = True,
) -> Generator[dict, None, None]: ...
@overload
def generate(
self, app_model: App,
user: Union[Account, EndUser],
args: dict,
invoke_from: InvokeFrom,
stream: Literal[False] = False,
) -> dict: ...
def generate(self, app_model: App,
user: Union[Account, EndUser],
args: Any,

View File

@ -3,7 +3,7 @@ import os
import threading
import uuid
from collections.abc import Generator
from typing import Any, Union
from typing import Any, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
@ -28,13 +28,31 @@ logger = logging.getLogger(__name__)
class ChatAppGenerator(MessageBasedAppGenerator):
@overload
def generate(
self, app_model: App,
user: Union[Account, EndUser],
args: Any,
invoke_from: InvokeFrom,
stream: Literal[True] = True,
) -> Generator[str, None, None]: ...
@overload
def generate(
self, app_model: App,
user: Union[Account, EndUser],
args: Any,
invoke_from: InvokeFrom,
stream: Literal[False] = False,
) -> dict: ...
def generate(
self, app_model: App,
user: Union[Account, EndUser],
args: Any,
invoke_from: InvokeFrom,
stream: bool = True,
) -> Union[dict, Generator[dict, None, None]]:
) -> Union[dict, Generator[str, None, None]]:
"""
Generate App response.

View File

@ -3,7 +3,7 @@ import os
import threading
import uuid
from collections.abc import Generator
from typing import Any, Union
from typing import Any, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
@ -30,12 +30,30 @@ logger = logging.getLogger(__name__)
class CompletionAppGenerator(MessageBasedAppGenerator):
@overload
def generate(
self, app_model: App,
user: Union[Account, EndUser],
args: dict,
invoke_from: InvokeFrom,
stream: Literal[True] = True,
) -> Generator[str, None, None]: ...
@overload
def generate(
self, app_model: App,
user: Union[Account, EndUser],
args: dict,
invoke_from: InvokeFrom,
stream: Literal[False] = False,
) -> dict: ...
def generate(self, app_model: App,
user: Union[Account, EndUser],
args: Any,
invoke_from: InvokeFrom,
stream: bool = True) \
-> Union[dict, Generator[dict, None, None]]:
-> Union[dict, Generator[str, None, None]]:
"""
Generate App response.
@ -203,7 +221,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
user: Union[Account, EndUser],
invoke_from: InvokeFrom,
stream: bool = True) \
-> Union[dict, Generator[dict, None, None]]:
-> Union[dict, Generator[str, None, None]]:
"""
Generate App response.

View File

@ -4,7 +4,7 @@ import os
import threading
import uuid
from collections.abc import Generator
from typing import Any, Union
from typing import Any, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
@ -32,8 +32,28 @@ logger = logging.getLogger(__name__)
class WorkflowAppGenerator(BaseAppGenerator):
@overload
def generate(
self,
self, app_model: App,
workflow: Workflow,
user: Union[Account, EndUser],
args: dict,
invoke_from: InvokeFrom,
stream: Literal[True] = True,
) -> Generator[str, None, None]: ...
@overload
def generate(
self, app_model: App,
workflow: Workflow,
user: Union[Account, EndUser],
args: dict,
invoke_from: InvokeFrom,
stream: Literal[False] = False,
) -> dict: ...
def generate(
self,
app_model: App,
workflow: Workflow,
user: Union[Account, EndUser],
@ -109,7 +129,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
application_generate_entity: WorkflowAppGenerateEntity,
invoke_from: InvokeFrom,
stream: bool = True,
) -> dict[str, Any] | Generator[str, Any, None]:
) -> dict[str, Any] | Generator[str, None, None]:
"""
Generate App response.
@ -228,7 +248,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
application_generate_entity=application_generate_entity,
queue_manager=queue_manager
)
runner.run()
except GenerateTaskStoppedException:
pass

View File

@ -150,9 +150,9 @@ class OAIAPICompatLargeLanguageModel(_CommonOAI_API_Compat, LargeLanguageModel):
except json.JSONDecodeError as e:
raise CredentialsValidateFailedError('Credentials validation failed: JSON decode error')
if (completion_type is LLMMode.CHAT and json_result['object'] == ''):
if (completion_type is LLMMode.CHAT and json_result.get('object','') == ''):
json_result['object'] = 'chat.completion'
elif (completion_type is LLMMode.COMPLETION and json_result['object'] == ''):
elif (completion_type is LLMMode.COMPLETION and json_result.get('object','') == ''):
json_result['object'] = 'text_completion'
if (completion_type is LLMMode.CHAT

View File

@ -71,11 +71,24 @@ class ArkClientV3:
args = {
"base_url": credentials['api_endpoint_host'],
"region": credentials['volc_region'],
"ak": credentials['volc_access_key_id'],
"sk": credentials['volc_secret_access_key'],
}
if credentials.get("auth_method") == "api_key":
args = {
**args,
"api_key": credentials['volc_api_key'],
}
else:
args = {
**args,
"ak": credentials['volc_access_key_id'],
"sk": credentials['volc_secret_access_key'],
}
if cls.is_compatible_with_legacy(credentials):
args["base_url"] = DEFAULT_V3_ENDPOINT
args = {
**args,
"base_url": DEFAULT_V3_ENDPOINT
}
client = ArkClientV3(
**args

View File

@ -30,8 +30,28 @@ model_credential_schema:
en_US: Enter your Model Name
zh_Hans: 输入模型名称
credential_form_schemas:
- variable: auth_method
required: true
label:
en_US: Authentication Method
zh_Hans: 鉴权方式
type: select
default: aksk
options:
- label:
en_US: API Key
value: api_key
- label:
en_US: Access Key / Secret Access Key
value: aksk
placeholder:
en_US: Enter your Authentication Method
zh_Hans: 选择鉴权方式
- variable: volc_access_key_id
required: true
show_on:
- variable: auth_method
value: aksk
label:
en_US: Access Key
zh_Hans: Access Key
@ -41,6 +61,9 @@ model_credential_schema:
zh_Hans: 输入您的 Access Key
- variable: volc_secret_access_key
required: true
show_on:
- variable: auth_method
value: aksk
label:
en_US: Secret Access Key
zh_Hans: Secret Access Key
@ -48,6 +71,17 @@ model_credential_schema:
placeholder:
en_US: Enter your Secret Access Key
zh_Hans: 输入您的 Secret Access Key
- variable: volc_api_key
required: true
show_on:
- variable: auth_method
value: api_key
label:
en_US: API Key
type: secret-input
placeholder:
en_US: Enter your API Key
zh_Hans: 输入您的 API Key
- variable: volc_region
required: true
label:

View File

@ -38,7 +38,7 @@ parameter_rules:
min: 1
max: 8192
pricing:
input: '0.0001'
output: '0.0001'
input: '0'
output: '0'
unit: '0.001'
currency: RMB

View File

@ -37,3 +37,8 @@ parameter_rules:
default: 1024
min: 1
max: 8192
pricing:
input: '0.001'
output: '0.001'
unit: '0.001'
currency: RMB

View File

@ -37,3 +37,8 @@ parameter_rules:
default: 1024
min: 1
max: 8192
pricing:
input: '0.1'
output: '0.1'
unit: '0.001'
currency: RMB

View File

@ -30,4 +30,9 @@ parameter_rules:
use_template: max_tokens
default: 1024
min: 1
max: 4096
max: 8192
pricing:
input: '0.001'
output: '0.001'
unit: '0.001'
currency: RMB

View File

@ -0,0 +1,44 @@
model: glm-4-plus
label:
en_US: glm-4-plus
model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
parameter_rules:
- name: temperature
use_template: temperature
default: 0.95
min: 0.0
max: 1.0
help:
zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
- name: top_p
use_template: top_p
default: 0.7
help:
zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
- name: incremental
label:
zh_Hans: 增量返回
en_US: Incremental
type: boolean
help:
zh_Hans: SSE接口调用时用于控制每次返回内容方式是增量还是全量不提供此参数时默认为增量返回true 为增量返回false 为全量返回。
en_US: When the SSE interface is called, it is used to control whether the content is returned incrementally or in full. If this parameter is not provided, the default is incremental return. true means incremental return, false means full return.
required: false
- name: max_tokens
use_template: max_tokens
default: 1024
min: 1
max: 8192
pricing:
input: '0.05'
output: '0.05'
unit: '0.001'
currency: RMB

View File

@ -34,4 +34,9 @@ parameter_rules:
use_template: max_tokens
default: 1024
min: 1
max: 8192
max: 1024
pricing:
input: '0.05'
output: '0.05'
unit: '0.001'
currency: RMB

View File

@ -0,0 +1,42 @@
model: glm-4v-plus
label:
en_US: glm-4v-plus
model_type: llm
model_properties:
mode: chat
features:
- vision
parameter_rules:
- name: temperature
use_template: temperature
default: 0.95
min: 0.0
max: 1.0
help:
zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
- name: top_p
use_template: top_p
default: 0.7
help:
zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
- name: incremental
label:
zh_Hans: 增量返回
en_US: Incremental
type: boolean
help:
zh_Hans: SSE接口调用时用于控制每次返回内容方式是增量还是全量不提供此参数时默认为增量返回true 为增量返回false 为全量返回。
en_US: When the SSE interface is called, it is used to control whether the content is returned incrementally or in full. If this parameter is not provided, the default is incremental return. true means incremental return, false means full return.
required: false
- name: max_tokens
use_template: max_tokens
default: 1024
min: 1
max: 1024
pricing:
input: '0.01'
output: '0.01'
unit: '0.001'
currency: RMB

View File

@ -153,7 +153,8 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
:return: full response or stream response chunk generator result
"""
extra_model_kwargs = {}
if stop:
# request to glm-4v-plus with stop words will always response "finish_reason":"network_error"
if stop and model!= 'glm-4v-plus':
extra_model_kwargs['stop'] = stop
client = ZhipuAI(
@ -174,7 +175,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
if copy_prompt_message.role in [PromptMessageRole.USER, PromptMessageRole.SYSTEM, PromptMessageRole.TOOL]:
if isinstance(copy_prompt_message.content, list):
# check if model is 'glm-4v'
if model != 'glm-4v':
if model not in ('glm-4v', 'glm-4v-plus'):
# not support list message
continue
# get image and
@ -207,7 +208,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
else:
new_prompt_messages.append(copy_prompt_message)
if model == 'glm-4v':
if model == 'glm-4v' or model == 'glm-4v-plus':
params = self._construct_glm_4v_parameter(model, new_prompt_messages, model_parameters)
else:
params = {
@ -304,7 +305,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
return params
def _construct_glm_4v_messages(self, prompt_message: Union[str | list[PromptMessageContent]]) -> list[dict]:
def _construct_glm_4v_messages(self, prompt_message: Union[str, list[PromptMessageContent]]) -> list[dict]:
if isinstance(prompt_message, str):
return [{'type': 'text', 'text': prompt_message}]

View File

@ -1,5 +1,6 @@
- google
- bing
- perplexity
- duckduckgo
- searchapi
- serper

View File

@ -0,0 +1,3 @@
<svg width="400" height="400" viewBox="0 0 400 400" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M101.008 42L190.99 124.905L190.99 124.886L190.99 42.1913H208.506L208.506 125.276L298.891 42V136.524L336 136.524V272.866H299.005V357.035L208.506 277.525L208.506 357.948H190.99L190.99 278.836L101.11 358V272.866H64V136.524H101.008V42ZM177.785 153.826H81.5159V255.564H101.088V223.472L177.785 153.826ZM118.625 231.149V319.392L190.99 255.655L190.99 165.421L118.625 231.149ZM209.01 254.812V165.336L281.396 231.068V272.866H281.489V318.491L209.01 254.812ZM299.005 255.564H318.484V153.826L222.932 153.826L299.005 222.751V255.564ZM281.375 136.524V81.7983L221.977 136.524L281.375 136.524ZM177.921 136.524H118.524V81.7983L177.921 136.524Z" fill="black"/>
</svg>

After

Width:  |  Height:  |  Size: 798 B

View File

@ -0,0 +1,46 @@
from typing import Any
import requests
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.provider.builtin.perplexity.tools.perplexity_search import PERPLEXITY_API_URL
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
class PerplexityProvider(BuiltinToolProviderController):
def _validate_credentials(self, credentials: dict[str, Any]) -> None:
headers = {
"Authorization": f"Bearer {credentials.get('perplexity_api_key')}",
"Content-Type": "application/json"
}
payload = {
"model": "llama-3.1-sonar-small-128k-online",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Hello"
}
],
"max_tokens": 5,
"temperature": 0.1,
"top_p": 0.9,
"stream": False
}
try:
response = requests.post(PERPLEXITY_API_URL, json=payload, headers=headers)
response.raise_for_status()
except requests.RequestException as e:
raise ToolProviderCredentialValidationError(
f"Failed to validate Perplexity API key: {str(e)}"
)
if response.status_code != 200:
raise ToolProviderCredentialValidationError(
f"Perplexity API key is invalid. Status code: {response.status_code}"
)

View File

@ -0,0 +1,26 @@
identity:
author: Dify
name: perplexity
label:
en_US: Perplexity
zh_Hans: Perplexity
description:
en_US: Perplexity.AI
zh_Hans: Perplexity.AI
icon: icon.svg
tags:
- search
credentials_for_provider:
perplexity_api_key:
type: secret-input
required: true
label:
en_US: Perplexity API key
zh_Hans: Perplexity API key
placeholder:
en_US: Please input your Perplexity API key
zh_Hans: 请输入你的 Perplexity API key
help:
en_US: Get your Perplexity API key from Perplexity
zh_Hans: 从 Perplexity 获取您的 Perplexity API key
url: https://www.perplexity.ai/settings/api

View File

@ -0,0 +1,72 @@
import json
from typing import Any, Union
import requests
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
PERPLEXITY_API_URL = "https://api.perplexity.ai/chat/completions"
class PerplexityAITool(BuiltinTool):
def _parse_response(self, response: dict) -> dict:
"""Parse the response from Perplexity AI API"""
if 'choices' in response and len(response['choices']) > 0:
message = response['choices'][0]['message']
return {
'content': message.get('content', ''),
'role': message.get('role', ''),
'citations': response.get('citations', [])
}
else:
return {'content': 'Unable to get a valid response', 'role': 'assistant', 'citations': []}
def _invoke(self,
user_id: str,
tool_parameters: dict[str, Any],
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
headers = {
"Authorization": f"Bearer {self.runtime.credentials['perplexity_api_key']}",
"Content-Type": "application/json"
}
payload = {
"model": tool_parameters.get('model', 'llama-3.1-sonar-small-128k-online'),
"messages": [
{
"role": "system",
"content": "Be precise and concise."
},
{
"role": "user",
"content": tool_parameters['query']
}
],
"max_tokens": tool_parameters.get('max_tokens', 4096),
"temperature": tool_parameters.get('temperature', 0.7),
"top_p": tool_parameters.get('top_p', 1),
"top_k": tool_parameters.get('top_k', 5),
"presence_penalty": tool_parameters.get('presence_penalty', 0),
"frequency_penalty": tool_parameters.get('frequency_penalty', 1),
"stream": False
}
if 'search_recency_filter' in tool_parameters:
payload['search_recency_filter'] = tool_parameters['search_recency_filter']
if 'return_citations' in tool_parameters:
payload['return_citations'] = tool_parameters['return_citations']
if 'search_domain_filter' in tool_parameters:
if isinstance(tool_parameters['search_domain_filter'], str):
payload['search_domain_filter'] = [tool_parameters['search_domain_filter']]
elif isinstance(tool_parameters['search_domain_filter'], list):
payload['search_domain_filter'] = tool_parameters['search_domain_filter']
response = requests.post(url=PERPLEXITY_API_URL, json=payload, headers=headers)
response.raise_for_status()
valuable_res = self._parse_response(response.json())
return [
self.create_json_message(valuable_res),
self.create_text_message(json.dumps(valuable_res, ensure_ascii=False, indent=2))
]

View File

@ -0,0 +1,178 @@
identity:
name: perplexity
author: Dify
label:
en_US: Perplexity Search
description:
human:
en_US: Search information using Perplexity AI's language models.
llm: This tool is used to search information using Perplexity AI's language models.
parameters:
- name: query
type: string
required: true
label:
en_US: Query
zh_Hans: 查询
human_description:
en_US: The text query to be processed by the AI model.
zh_Hans: 要由 AI 模型处理的文本查询。
form: llm
- name: model
type: select
required: false
label:
en_US: Model Name
zh_Hans: 模型名称
human_description:
en_US: The Perplexity AI model to use for generating the response.
zh_Hans: 用于生成响应的 Perplexity AI 模型。
form: form
default: "llama-3.1-sonar-small-128k-online"
options:
- value: llama-3.1-sonar-small-128k-online
label:
en_US: llama-3.1-sonar-small-128k-online
zh_Hans: llama-3.1-sonar-small-128k-online
- value: llama-3.1-sonar-large-128k-online
label:
en_US: llama-3.1-sonar-large-128k-online
zh_Hans: llama-3.1-sonar-large-128k-online
- value: llama-3.1-sonar-huge-128k-online
label:
en_US: llama-3.1-sonar-huge-128k-online
zh_Hans: llama-3.1-sonar-huge-128k-online
- name: max_tokens
type: number
required: false
label:
en_US: Max Tokens
zh_Hans: 最大令牌数
pt_BR: Máximo de Tokens
human_description:
en_US: The maximum number of tokens to generate in the response.
zh_Hans: 在响应中生成的最大令牌数。
pt_BR: O número máximo de tokens a serem gerados na resposta.
form: form
default: 4096
min: 1
max: 4096
- name: temperature
type: number
required: false
label:
en_US: Temperature
zh_Hans: 温度
pt_BR: Temperatura
human_description:
en_US: Controls randomness in the output. Lower values make the output more focused and deterministic.
zh_Hans: 控制输出的随机性。较低的值使输出更加集中和确定。
form: form
default: 0.7
min: 0
max: 1
- name: top_k
type: number
required: false
label:
en_US: Top K
zh_Hans: 取样数量
human_description:
en_US: The number of top results to consider for response generation.
zh_Hans: 用于生成响应的顶部结果数量。
form: form
default: 5
min: 1
max: 100
- name: top_p
type: number
required: false
label:
en_US: Top P
zh_Hans: Top P
human_description:
en_US: Controls diversity via nucleus sampling.
zh_Hans: 通过核心采样控制多样性。
form: form
default: 1
min: 0.1
max: 1
step: 0.1
- name: presence_penalty
type: number
required: false
label:
en_US: Presence Penalty
zh_Hans: 存在惩罚
human_description:
en_US: Positive values penalize new tokens based on whether they appear in the text so far.
zh_Hans: 正值会根据新词元是否已经出现在文本中来对其进行惩罚。
form: form
default: 0
min: -1.0
max: 1.0
step: 0.1
- name: frequency_penalty
type: number
required: false
label:
en_US: Frequency Penalty
zh_Hans: 频率惩罚
human_description:
en_US: Positive values penalize new tokens based on their existing frequency in the text so far.
zh_Hans: 正值会根据新词元在文本中已经出现的频率来对其进行惩罚。
form: form
default: 1
min: 0.1
max: 1.0
step: 0.1
- name: return_citations
type: boolean
required: false
label:
en_US: Return Citations
zh_Hans: 返回引用
human_description:
en_US: Whether to return citations in the response.
zh_Hans: 是否在响应中返回引用。
form: form
default: true
- name: search_domain_filter
type: string
required: false
label:
en_US: Search Domain Filter
zh_Hans: 搜索域过滤器
human_description:
en_US: Domain to filter the search results.
zh_Hans: 用于过滤搜索结果的域名。
form: form
default: ""
- name: search_recency_filter
type: select
required: false
label:
en_US: Search Recency Filter
zh_Hans: 搜索时间过滤器
human_description:
en_US: Filter for search results based on recency.
zh_Hans: 基于时间筛选搜索结果。
form: form
default: "month"
options:
- value: day
label:
en_US: Day
zh_Hans:
- value: week
label:
en_US: Week
zh_Hans:
- value: month
label:
en_US: Month
zh_Hans:
- value: year
label:
en_US: Year
zh_Hans:

View File

@ -18,9 +18,9 @@ from core.workflow.nodes.http_request.http_executor import HttpExecutor, HttpExe
from models.workflow import WorkflowNodeExecutionStatus
HTTP_REQUEST_DEFAULT_TIMEOUT = HttpRequestNodeTimeout(
connect=min(10, dify_config.HTTP_REQUEST_MAX_CONNECT_TIMEOUT),
read=min(60, dify_config.HTTP_REQUEST_MAX_READ_TIMEOUT),
write=min(20, dify_config.HTTP_REQUEST_MAX_WRITE_TIMEOUT),
connect=dify_config.HTTP_REQUEST_MAX_CONNECT_TIMEOUT,
read=dify_config.HTTP_REQUEST_MAX_READ_TIMEOUT,
write=dify_config.HTTP_REQUEST_MAX_WRITE_TIMEOUT,
)
@ -100,12 +100,9 @@ class HttpRequestNode(BaseNode):
if timeout is None:
return HTTP_REQUEST_DEFAULT_TIMEOUT
timeout.connect = min(timeout.connect or HTTP_REQUEST_DEFAULT_TIMEOUT.connect,
dify_config.HTTP_REQUEST_MAX_CONNECT_TIMEOUT)
timeout.read = min(timeout.read or HTTP_REQUEST_DEFAULT_TIMEOUT.read,
dify_config.HTTP_REQUEST_MAX_READ_TIMEOUT)
timeout.write = min(timeout.write or HTTP_REQUEST_DEFAULT_TIMEOUT.write,
dify_config.HTTP_REQUEST_MAX_WRITE_TIMEOUT)
timeout.connect = timeout.connect or HTTP_REQUEST_DEFAULT_TIMEOUT.connect
timeout.read = timeout.read or HTTP_REQUEST_DEFAULT_TIMEOUT.read
timeout.write = timeout.write or HTTP_REQUEST_DEFAULT_TIMEOUT.write
return timeout
@classmethod

View File

@ -1,3 +1,4 @@
import openai
import sentry_sdk
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.flask import FlaskIntegration
@ -9,7 +10,7 @@ def init_app(app):
sentry_sdk.init(
dsn=app.config.get("SENTRY_DSN"),
integrations=[FlaskIntegration(), CeleryIntegration()],
ignore_errors=[HTTPException, ValueError],
ignore_errors=[HTTPException, ValueError, openai.APIStatusError],
traces_sample_rate=app.config.get("SENTRY_TRACES_SAMPLE_RATE", 1.0),
profiles_sample_rate=app.config.get("SENTRY_PROFILES_SAMPLE_RATE", 1.0),
environment=app.config.get("DEPLOY_ENV"),

View File

@ -15,6 +15,7 @@ class AliyunStorage(BaseStorage):
app_config = self.app.config
self.bucket_name = app_config.get("ALIYUN_OSS_BUCKET_NAME")
self.folder = app.config.get("ALIYUN_OSS_PATH")
oss_auth_method = aliyun_s3.Auth
region = None
if app_config.get("ALIYUN_OSS_AUTH_VERSION") == "v4":
@ -30,15 +31,29 @@ class AliyunStorage(BaseStorage):
)
def save(self, filename, data):
if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
filename = self.folder + "/" + filename
self.client.put_object(filename, data)
def load_once(self, filename: str) -> bytes:
if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
filename = self.folder + "/" + filename
with closing(self.client.get_object(filename)) as obj:
data = obj.read()
return data
def load_stream(self, filename: str) -> Generator:
def generate(filename: str = filename) -> Generator:
if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
filename = self.folder + "/" + filename
with closing(self.client.get_object(filename)) as obj:
while chunk := obj.read(4096):
yield chunk
@ -46,10 +61,24 @@ class AliyunStorage(BaseStorage):
return generate()
def download(self, filename, target_filepath):
if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
filename = self.folder + "/" + filename
self.client.get_object_to_file(filename, target_filepath)
def exists(self, filename):
if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
filename = self.folder + "/" + filename
return self.client.object_exists(filename)
def delete(self, filename):
if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
filename = self.folder + "/" + filename
self.client.delete_object(filename)

View File

@ -265,7 +265,7 @@ class TenantService:
return tenant
@staticmethod
def create_owner_tenant_if_not_exist(account: Account):
def create_owner_tenant_if_not_exist(account: Account, name: Optional[str] = None):
"""Create owner tenant if not exist"""
available_ta = (
TenantAccountJoin.query.filter_by(account_id=account.id).order_by(TenantAccountJoin.id.asc()).first()
@ -274,7 +274,10 @@ class TenantService:
if available_ta:
return
tenant = TenantService.create_tenant(f"{account.name}'s Workspace")
if name:
tenant = TenantService.create_tenant(name)
else:
tenant = TenantService.create_tenant(f"{account.name}'s Workspace")
TenantService.create_tenant_member(tenant, account, role="owner")
account.current_tenant = tenant
db.session.commit()

View File

@ -137,7 +137,7 @@ class DatasetService:
@staticmethod
def create_empty_dataset(
tenant_id: str, name: str, indexing_technique: Optional[str], account: Account, permission: Optional[str]
tenant_id: str, name: str, indexing_technique: Optional[str], account: Account, permission: Optional[str] = None
):
# check if dataset name already exists
if Dataset.query.filter_by(name=name, tenant_id=tenant_id).first():

View File

@ -19,7 +19,7 @@ def send_invite_member_mail_task(language: str, to: str, token: str, inviter_nam
:param inviter_name
:param workspace_name
Usage: send_invite_member_mail_task.delay(langauge, to, token, inviter_name, workspace_name)
Usage: send_invite_member_mail_task.delay(language, to, token, inviter_name, workspace_name)
"""
if not mail.is_inited():
return

View File

@ -19,6 +19,7 @@ def example_env_file(tmp_path, monkeypatch) -> str:
"""
CONSOLE_API_URL=https://example.com
CONSOLE_WEB_URL=https://example.com
HTTP_REQUEST_MAX_WRITE_TIMEOUT=30
"""
)
)
@ -48,6 +49,12 @@ def test_dify_config(example_env_file):
assert config.API_COMPRESSION_ENABLED is False
assert config.SENTRY_TRACES_SAMPLE_RATE == 1.0
# annotated field with default value
assert config.HTTP_REQUEST_MAX_READ_TIMEOUT == 60
# annotated field with configured value
assert config.HTTP_REQUEST_MAX_WRITE_TIMEOUT == 30
# NOTE: If there is a `.env` file in your Workspace, this test might not succeed as expected.
# This is due to `pymilvus` loading all the variables from the `.env` file into `os.environ`.

View File

@ -285,6 +285,8 @@ ALIYUN_OSS_SECRET_KEY=your-secret-key
ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com
ALIYUN_OSS_REGION=ap-southeast-1
ALIYUN_OSS_AUTH_VERSION=v4
# Don't start with '/'. OSS doesn't support leading slash in object names.
ALIYUN_OSS_PATH=your-path
# Tencent COS Configuration
# The name of the Tencent COS bucket to use for storing files.

View File

@ -66,6 +66,7 @@ x-shared-env: &shared-api-worker-env
ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-}
ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-}
ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
ALIYUN_OSS_PATHS: ${ALIYUN_OSS_PATH:-}
TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-}
TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-}
TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-}
@ -294,7 +295,7 @@ services:
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/learn-more/faq/self-host-faq#id-18.-why-is-ssrf_proxy-needed
# https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
ssrf_proxy:
image: ubuntu/squid:latest
restart: always

View File

@ -128,7 +128,7 @@ const AppDetailLayout: FC<IAppDetailLayoutProps> = (props) => {
if (e.status === 404)
router.replace('/apps')
})
}, [appId, isCurrentWorkspaceEditor])
}, [appId, isCurrentWorkspaceEditor, systemFeatures])
useUnmount(() => {
setAppDetail()

View File

@ -95,7 +95,7 @@ const CardView: FC<ICardViewProps> = ({ appId }) => {
if (systemFeatures.enable_web_sso_switch_component) {
const [sso_err] = await asyncRunSafe<AppSSO>(
updateAppSSO({ id: appId, enabled: params.enable_sso }) as Promise<AppSSO>,
updateAppSSO({ id: appId, enabled: Boolean(params.enable_sso) }) as Promise<AppSSO>,
)
if (sso_err) {
handleCallbackResult(sso_err)

View File

@ -255,7 +255,7 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
e.preventDefault()
getRedirection(isCurrentWorkspaceEditor, app, push)
}}
className='group flex col-span-1 bg-white border-2 border-solid border-transparent rounded-xl shadow-sm min-h-[160px] flex flex-col transition-all duration-200 ease-in-out cursor-pointer hover:shadow-lg'
className='relative group col-span-1 bg-white border-2 border-solid border-transparent rounded-xl shadow-sm flex flex-col transition-all duration-200 ease-in-out cursor-pointer hover:shadow-lg'
>
<div className='flex pt-[14px] px-[14px] pb-3 h-[66px] items-center gap-3 grow-0 shrink-0'>
<div className='relative shrink-0'>
@ -297,17 +297,16 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
</div>
</div>
</div>
<div
className={cn(
'grow mb-2 px-[14px] max-h-[72px] text-xs leading-normal text-gray-500 group-hover:line-clamp-2 group-hover:max-h-[36px]',
tags.length ? 'line-clamp-2' : 'line-clamp-4',
)}
title={app.description}
>
{app.description}
<div className='title-wrapper h-[90px] px-[14px] text-xs leading-normal text-gray-500'>
<div
className={cn(tags.length ? 'line-clamp-2' : 'line-clamp-4', 'group-hover:line-clamp-2')}
title={app.description}
>
{app.description}
</div>
</div>
<div className={cn(
'items-center shrink-0 mt-1 pt-1 pl-[14px] pr-[6px] pb-[6px] h-[42px]',
'absolute bottom-1 left-0 right-0 items-center shrink-0 pt-1 pl-[14px] pr-[6px] pb-[6px] h-[42px]',
tags.length ? 'flex' : '!hidden group-hover:!flex',
)}>
{isCurrentWorkspaceEditor && (

View File

@ -139,7 +139,7 @@ const Apps = () => {
<nav className='grid content-start grid-cols-1 gap-4 px-12 pt-2 sm:grid-cols-2 md:grid-cols-3 lg:grid-cols-4 grow shrink-0'>
{isCurrentWorkspaceEditor
&& <NewAppCard onSuccess={mutate} />}
{data?.map(({ data: apps }: any) => apps.map((app: any) => (
{data?.map(({ data: apps }) => apps.map(app => (
<AppCard key={app.id} app={app} onRefresh={mutate} />
)))}
<CheckModal />

View File

@ -134,8 +134,8 @@ function AppCard({
return (
<div
className={`shadow-xs border-[0.5px] rounded-lg border-gray-200 ${className ?? ''
}`}
className={
`shadow-xs border-[0.5px] rounded-lg border-gray-200 ${className ?? ''}`}
>
<div className={`px-6 py-5 ${customBgColor ?? bgColor} rounded-lg`}>
<div className="mb-2.5 flex flex-row items-start justify-between">
@ -176,7 +176,6 @@ function AppCard({
{isApp && <ShareQRCode content={isApp ? appUrl : apiUrl} selectorId={randomString(8)} className={'hover:bg-gray-200'} />}
<CopyFeedback
content={isApp ? appUrl : apiUrl}
selectorId={randomString(8)}
className={'hover:bg-gray-200'}
/>
{/* button copy link/ button regenerate */}
@ -202,8 +201,8 @@ function AppCard({
onClick={() => setShowConfirmDelete(true)}
>
<div
className={`w-full h-full ${style.refreshIcon} ${genLoading ? style.generateLogo : ''
}`}
className={
`w-full h-full ${style.refreshIcon} ${genLoading ? style.generateLogo : ''}`}
></div>
</div>
</Tooltip>

View File

@ -1,43 +1,83 @@
'use client'
import type { SVGProps } from 'react'
import React, { useState } from 'react'
import type { CSSProperties } from 'react'
import React from 'react'
import { useTranslation } from 'react-i18next'
import cn from 'classnames'
import { RiCloseCircleFill, RiErrorWarningLine, RiSearchLine } from '@remixicon/react'
import { type VariantProps, cva } from 'class-variance-authority'
import cn from '@/utils/classnames'
type InputProps = {
placeholder?: string
value?: string
defaultValue?: string
onChange?: (v: string) => void
className?: string
wrapperClassName?: string
type?: string
showPrefix?: React.ReactNode
prefixIcon?: React.ReactNode
}
const GlassIcon = ({ className }: SVGProps<SVGElement>) => (
<svg width="14" height="14" viewBox="0 0 14 14" fill="none" xmlns="http://www.w3.org/2000/svg" className={className ?? ''}>
<path d="M12.25 12.25L10.2084 10.2083M11.6667 6.70833C11.6667 9.44675 9.44675 11.6667 6.70833 11.6667C3.96992 11.6667 1.75 9.44675 1.75 6.70833C1.75 3.96992 3.96992 1.75 6.70833 1.75C9.44675 1.75 11.6667 3.96992 11.6667 6.70833Z" stroke="#344054" strokeWidth="1.25" strokeLinecap="round" strokeLinejoin="round" />
</svg>
export const inputVariants = cva(
'',
{
variants: {
size: {
regular: 'px-3 radius-md system-sm-regular',
large: 'px-4 radius-lg system-md-regular',
},
},
defaultVariants: {
size: 'regular',
},
},
)
const Input = ({ value, defaultValue, onChange, className = '', wrapperClassName = '', placeholder, type, showPrefix, prefixIcon }: InputProps) => {
const [localValue, setLocalValue] = useState(value ?? defaultValue)
export type InputProps = {
showLeftIcon?: boolean
showClearIcon?: boolean
onClear?: () => void
disabled?: boolean
destructive?: boolean
wrapperClassName?: string
styleCss?: CSSProperties
} & React.InputHTMLAttributes<HTMLInputElement> & VariantProps<typeof inputVariants>
const Input = ({
size,
disabled,
destructive,
showLeftIcon,
showClearIcon,
onClear,
wrapperClassName,
className,
styleCss,
value,
placeholder,
onChange,
...props
}: InputProps) => {
const { t } = useTranslation()
return (
<div className={`relative inline-flex w-full ${wrapperClassName}`}>
{showPrefix && <span className='whitespace-nowrap absolute left-2 self-center'>{prefixIcon ?? <GlassIcon className='h-3.5 w-3.5 stroke-current text-gray-700 stroke-2' />}</span>}
<div className={cn('relative w-full', wrapperClassName)}>
{showLeftIcon && <RiSearchLine className={cn('absolute left-2 top-1/2 -translate-y-1/2 w-4 h-4 text-components-input-text-placeholder')} />}
<input
type={type ?? 'text'}
className={cn('inline-flex h-7 w-full py-1 px-2 rounded-lg text-xs leading-normal bg-gray-100 caret-primary-600 hover:bg-gray-100 focus:ring-1 focus:ring-inset focus:ring-gray-200 focus-visible:outline-none focus:bg-white placeholder:text-gray-400', showPrefix ? '!pl-7' : '', className)}
placeholder={placeholder ?? (showPrefix ? t('common.operation.search') ?? '' : 'please input')}
value={localValue}
onChange={(e) => {
setLocalValue(e.target.value)
onChange && onChange(e.target.value)
}}
style={styleCss}
className={cn(
'w-full py-[7px] bg-components-input-bg-normal border border-transparent text-components-input-text-filled hover:bg-components-input-bg-hover hover:border-components-input-border-hover focus:bg-components-input-bg-active focus:border-components-input-border-active focus:shadow-xs placeholder:text-components-input-text-placeholder appearance-none outline-none caret-primary-600',
inputVariants({ size }),
showLeftIcon && 'pl-[26px]',
showLeftIcon && size === 'large' && 'pl-7',
showClearIcon && value && 'pr-[26px]',
showClearIcon && value && size === 'large' && 'pr-7',
destructive && 'pr-[26px]',
destructive && size === 'large' && 'pr-7',
disabled && 'bg-components-input-bg-disabled border-transparent text-components-input-text-filled-disabled cursor-not-allowed hover:bg-components-input-bg-disabled hover:border-transparent',
destructive && 'bg-components-input-bg-destructive border-components-input-border-destructive text-components-input-text-filled hover:bg-components-input-bg-destructive hover:border-components-input-border-destructive focus:bg-components-input-bg-destructive focus:border-components-input-border-destructive',
className,
)}
placeholder={placeholder ?? (showLeftIcon ? t('common.operation.search') ?? '' : 'please input')}
value={value}
onChange={onChange}
disabled={disabled}
{...props}
/>
{showClearIcon && value && !disabled && !destructive && (
<div className={cn('absolute right-2 top-1/2 -translate-y-1/2 group p-[1px] cursor-pointer')} onClick={onClear}>
<RiCloseCircleFill className='w-3.5 h-3.5 text-text-quaternary cursor-pointer group-hover:text-text-tertiary' />
</div>
)}
{destructive && (
<RiErrorWarningLine className='absolute right-2 top-1/2 -translate-y-1/2 w-4 h-4 text-text-destructive-secondary' />
)}
</div>
)
}

View File

@ -8,7 +8,7 @@ import RemarkGfm from 'remark-gfm'
import SyntaxHighlighter from 'react-syntax-highlighter'
import { atelierHeathLight } from 'react-syntax-highlighter/dist/esm/styles/hljs'
import type { RefObject } from 'react'
import { memo, useEffect, useMemo, useRef, useState } from 'react'
import { Component, memo, useEffect, useMemo, useRef, useState } from 'react'
import type { CodeComponent } from 'react-markdown/lib/ast-to-react'
import cn from '@/utils/classnames'
import CopyBtn from '@/app/components/base/copy-btn'
@ -104,7 +104,7 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }
const match = /language-(\w+)/.exec(className || '')
const language = match?.[1]
const languageShowName = getCorrectCapitalizationLanguageName(language || '')
let chartData = JSON.parse(String('{"title":{"text":"Something went wrong."}}').replace(/\n$/, ''))
let chartData = JSON.parse(String('{"title":{"text":"ECharts error - Wrong JSON format."}}').replace(/\n$/, ''))
if (language === 'echarts') {
try {
chartData = JSON.parse(String(children).replace(/\n$/, ''))
@ -143,10 +143,10 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }
? (<Flowchart PrimitiveCode={String(children).replace(/\n$/, '')} />)
: (
(language === 'echarts')
? (<div style={{ minHeight: '250px', minWidth: '250px' }}><ReactEcharts
? (<div style={{ minHeight: '250px', minWidth: '250px' }}><ErrorBoundary><ReactEcharts
option={chartData}
>
</ReactEcharts></div>)
</ReactEcharts></ErrorBoundary></div>)
: (<SyntaxHighlighter
{...props}
style={atelierHeathLight}
@ -211,3 +211,25 @@ export function Markdown(props: { content: string; className?: string }) {
</div>
)
}
// **Add an ECharts runtime error handler
// Avoid error #7832 (Crash when ECharts accesses undefined objects)
// This can happen when a component attempts to access an undefined object that references an unregistered map, causing the program to crash.
export default class ErrorBoundary extends Component {
constructor(props) {
super(props)
this.state = { hasError: false }
}
componentDidCatch(error, errorInfo) {
this.setState({ hasError: true })
console.error(error, errorInfo)
}
render() {
if (this.state.hasError)
return <div>Oops! ECharts reported a runtime error. <br />(see the browser console for more information)</div>
return this.props.children
}
}

View File

@ -191,7 +191,7 @@ const RetrievalParamConfig: FC<Props> = ({
<div className='truncate'>{option.label}</div>
<Tooltip
popupContent={<div className='w-[200px]'>{option.tips}</div>}
triggerClassName='ml-0.5 w-3.5 h-4.5'
triggerClassName='ml-0.5 w-3.5 h-3.5'
/>
</div>
))

View File

@ -58,7 +58,7 @@ const EmptyDatasetCreationModal = ({
<div className={s.tip}>{t('datasetCreation.stepOne.modal.tip')}</div>
<div className={s.form}>
<div className={s.label}>{t('datasetCreation.stepOne.modal.input')}</div>
<Input className='!h-8' value={inputValue} placeholder={t('datasetCreation.stepOne.modal.placeholder') || ''} onChange={setInputValue} />
<Input className='!h-8' value={inputValue} placeholder={t('datasetCreation.stepOne.modal.placeholder') || ''} onChange={e => setInputValue(e.target.value)} />
</div>
<div className='flex flex-row-reverse'>
<Button className='w-24 ml-2' variant='primary' onClick={submit}>{t('datasetCreation.stepOne.modal.confirmButton')}</Button>

View File

@ -391,7 +391,7 @@ const Completed: FC<ICompletedProps> = ({
defaultValue={'all'}
className={s.select}
wrapperClassName='h-fit w-[120px] mr-2' />
<Input showPrefix wrapperClassName='!w-52' className='!h-8' onChange={debounce(setSearchValue, 500)} />
<Input showLeftIcon wrapperClassName='!w-52' className='!h-8' onChange={debounce(e => setSearchValue(e.target.value), 500)} />
</div>
<InfiniteVirtualList
embeddingAvailable={embeddingAvailable}

View File

@ -79,7 +79,7 @@ export const FieldInfo: FC<IFieldInfoProps> = ({
/>
: <Input
className={s.input}
onChange={onUpdate}
onChange={e => onUpdate?.(e.target.value)}
value={value}
defaultValue={defaultValue}
placeholder={`${t('datasetDocuments.metadata.placeholder.add')}${label}`}

View File

@ -201,10 +201,10 @@ const Documents: FC<IDocumentsProps> = ({ datasetId }) => {
<div className='flex flex-col px-6 py-4 flex-1'>
<div className='flex items-center justify-between flex-wrap'>
<Input
showPrefix
showLeftIcon
wrapperClassName='!w-[200px]'
className='!h-8 !text-[13px]'
onChange={debounce(setSearchValue, 500)}
onChange={debounce(e => setSearchValue(e.target.value), 500)}
value={searchValue}
/>
<div className='flex gap-2 justify-center items-center !h-8'>

View File

@ -23,7 +23,7 @@ const AppCard = ({
const { t } = useTranslation()
const { app: appBasicInfo } = app
return (
<div className={cn('group flex col-span-1 bg-white border-2 border-solid border-transparent rounded-lg shadow-sm min-h-[160px] flex flex-col transition-all duration-200 ease-in-out cursor-pointer hover:shadow-lg')}>
<div className={cn('relative overflow-hidden pb-2 group col-span-1 bg-white border-2 border-solid border-transparent rounded-lg shadow-sm flex flex-col transition-all duration-200 ease-in-out cursor-pointer hover:shadow-lg')}>
<div className='flex pt-[14px] px-[14px] pb-3 h-[66px] items-center gap-3 grow-0 shrink-0'>
<div className='relative shrink-0'>
<AppIcon
@ -64,9 +64,13 @@ const AppCard = ({
</div>
</div>
</div>
<div className='mb-1 px-[14px] text-xs leading-normal text-gray-500 line-clamp-4 group-hover:line-clamp-2 group-hover:h-9'>{app.description}</div>
<div className="description-wrapper h-[90px] px-[14px] text-xs leading-normal text-gray-500 ">
<div className='line-clamp-4 group-hover:line-clamp-2'>
{app.description}
</div>
</div>
{isExplore && canCreate && (
<div className={cn('hidden items-center flex-wrap min-h-[42px] px-[14px] pt-2 pb-[10px] group-hover:flex')}>
<div className={cn('hidden items-center flex-wrap min-h-[42px] px-[14px] pt-2 pb-[10px] bg-white group-hover:flex absolute bottom-0 left-0 right-0')}>
<div className={cn('flex items-center w-full space-x-2')}>
<Button variant='primary' className='grow h-7' onClick={() => onCreate()}>
<PlusIcon className='w-4 h-4 mr-1' />
@ -76,7 +80,7 @@ const AppCard = ({
</div>
)}
{!isExplore && (
<div className={cn('hidden items-center flex-wrap min-h-[42px] px-[14px] pt-2 pb-[10px] group-hover:flex')}>
<div className={cn('hidden items-center flex-wrap min-h-[42px] px-[14px] pt-2 pb-[10px] bg-white group-hover:flex absolute bottom-0 left-0 right-0')}>
<div className={cn('flex items-center w-full space-x-2')}>
<Button variant='primary' className='grow h-7' onClick={() => onCreate()}>
<PlusIcon className='w-4 h-4 mr-1' />

View File

@ -148,7 +148,7 @@ const SystemModel: FC<SystemModelSelectorProps> = ({
{t('common.modelProvider.systemReasoningModel.tip')}
</div>
}
triggerClassName='ml-0.5'
triggerClassName='ml-0.5 w-4 h-4 shrink-0'
/>
</div>
<div>
@ -168,8 +168,7 @@ const SystemModel: FC<SystemModelSelectorProps> = ({
{t('common.modelProvider.embeddingModel.tip')}
</div>
}
needsDelay={false}
triggerClassName='ml-0.5'
triggerClassName='ml-0.5 w-4 h-4 shrink-0'
/>
</div>
<div>
@ -189,8 +188,7 @@ const SystemModel: FC<SystemModelSelectorProps> = ({
{t('common.modelProvider.rerankModel.tip')}
</div>
}
needsDelay={false}
triggerClassName='ml-0.5'
triggerClassName='ml-0.5 w-4 h-4 shrink-0'
/>
</div>
<div>
@ -210,8 +208,7 @@ const SystemModel: FC<SystemModelSelectorProps> = ({
{t('common.modelProvider.speechToTextModel.tip')}
</div>
}
needsDelay={false}
triggerClassName='ml-0.5'
triggerClassName='ml-0.5 w-4 h-4 shrink-0'
/>
</div>
<div>
@ -231,7 +228,7 @@ const SystemModel: FC<SystemModelSelectorProps> = ({
{t('common.modelProvider.ttsModel.tip')}
</div>
}
triggerClassName='ml-0.5'
triggerClassName='ml-0.5 w-4 h-4 shrink-0'
/>
</div>
<div>

View File

@ -114,7 +114,7 @@ const ConfigCredential: FC<Props> = ({
{t('tools.createTool.authMethod.keyTooltip')}
</div>
}
triggerClassName='ml-0.5'
triggerClassName='ml-0.5 w-4 h-4'
/>
</div>
<input

View File

@ -73,7 +73,7 @@ const KeyValueItem: FC<Props> = ({
<Input
className='rounded-none bg-white border-none system-sm-regular focus:ring-0 focus:bg-gray-100! hover:bg-gray-50'
value={payload.key}
onChange={handleChange('key')}
onChange={e => handleChange('key')(e.target.value)}
/>
)}
</div>

View File

@ -70,7 +70,7 @@ const RetrievalConfig: FC<Props> = ({
}
onMultipleRetrievalConfigChange({
top_k: configs.top_k,
score_threshold: configs.score_threshold_enabled ? (configs.score_threshold || DATASET_DEFAULT.score_threshold) : null,
score_threshold: configs.score_threshold_enabled ? (configs.score_threshold ?? DATASET_DEFAULT.score_threshold) : null,
reranking_model: payload.retrieval_mode === RETRIEVE_TYPE.oneWay
? undefined
: (!configs.reranking_model?.reranking_provider_name

View File

@ -93,7 +93,7 @@ const translation = {
},
env: {
envPanelTitle: '环境变量',
envDescription: '环境变量是一种存储敏感信息的方法,如 API 密钥、数据库密码等。它们被存储在工作流程中,而不是代码中,以便在不同环中共享。',
envDescription: '环境变量是一种存储敏感信息的方法,如 API 密钥、数据库密码等。它们被存储在工作流程中,而不是代码中,以便在不同环中共享。',
envPanelButton: '添加环境变量',
modal: {
title: '添加环境变量',