diff --git a/api/core/model_runtime/model_providers/_position.yaml b/api/core/model_runtime/model_providers/_position.yaml
index 1f5f64019a..80db22ea84 100644
--- a/api/core/model_runtime/model_providers/_position.yaml
+++ b/api/core/model_runtime/model_providers/_position.yaml
@@ -38,3 +38,5 @@
- perfxcloud
- zhinao
- fireworks
+- mixedbread
+- nomic
diff --git a/api/core/model_runtime/model_providers/jina/jina.yaml b/api/core/model_runtime/model_providers/jina/jina.yaml
index 2f2d6e6daa..4ff6ba0f22 100644
--- a/api/core/model_runtime/model_providers/jina/jina.yaml
+++ b/api/core/model_runtime/model_providers/jina/jina.yaml
@@ -1,4 +1,4 @@
-provider: Jina AI
+provider: jina
label:
en_US: Jina AI
description:
diff --git a/api/core/model_runtime/model_providers/mixedbread/__init__.py b/api/core/model_runtime/model_providers/mixedbread/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/core/model_runtime/model_providers/mixedbread/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/mixedbread/_assets/icon_l_en.png
new file mode 100644
index 0000000000..2027611bd5
Binary files /dev/null and b/api/core/model_runtime/model_providers/mixedbread/_assets/icon_l_en.png differ
diff --git a/api/core/model_runtime/model_providers/mixedbread/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/mixedbread/_assets/icon_s_en.png
new file mode 100644
index 0000000000..5c357bddbd
Binary files /dev/null and b/api/core/model_runtime/model_providers/mixedbread/_assets/icon_s_en.png differ
diff --git a/api/core/model_runtime/model_providers/mixedbread/mixedbread.py b/api/core/model_runtime/model_providers/mixedbread/mixedbread.py
new file mode 100644
index 0000000000..3c78150e6f
--- /dev/null
+++ b/api/core/model_runtime/model_providers/mixedbread/mixedbread.py
@@ -0,0 +1,27 @@
+import logging
+
+from core.model_runtime.entities.model_entities import ModelType
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.model_provider import ModelProvider
+
+logger = logging.getLogger(__name__)
+
+
+class MixedBreadProvider(ModelProvider):
+ def validate_provider_credentials(self, credentials: dict) -> None:
+ """
+ Validate provider credentials
+ if validate failed, raise exception
+
+ :param credentials: provider credentials, credentials form defined in `provider_credential_schema`.
+ """
+ try:
+ model_instance = self.get_model_instance(ModelType.TEXT_EMBEDDING)
+
+ # Use `mxbai-embed-large-v1` model for validate,
+ model_instance.validate_credentials(model="mxbai-embed-large-v1", credentials=credentials)
+ except CredentialsValidateFailedError as ex:
+ raise ex
+ except Exception as ex:
+ logger.exception(f"{self.get_provider_schema().provider} credentials validate failed")
+ raise ex
diff --git a/api/core/model_runtime/model_providers/mixedbread/mixedbread.yaml b/api/core/model_runtime/model_providers/mixedbread/mixedbread.yaml
new file mode 100644
index 0000000000..2f43aea6ad
--- /dev/null
+++ b/api/core/model_runtime/model_providers/mixedbread/mixedbread.yaml
@@ -0,0 +1,31 @@
+provider: mixedbread
+label:
+ en_US: MixedBread
+description:
+ en_US: Embedding and Rerank Model Supported
+icon_small:
+ en_US: icon_s_en.png
+icon_large:
+ en_US: icon_l_en.png
+background: "#EFFDFD"
+help:
+ title:
+ en_US: Get your API key from MixedBread AI
+ zh_Hans: 从 MixedBread 获取 API Key
+ url:
+ en_US: https://www.mixedbread.ai/
+supported_model_types:
+ - text-embedding
+ - rerank
+configurate_methods:
+ - predefined-model
+provider_credential_schema:
+ credential_form_schemas:
+ - variable: api_key
+ label:
+ en_US: API Key
+ type: secret-input
+ required: true
+ placeholder:
+ zh_Hans: 在此输入您的 API Key
+ en_US: Enter your API Key
diff --git a/api/core/model_runtime/model_providers/mixedbread/rerank/__init__.py b/api/core/model_runtime/model_providers/mixedbread/rerank/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/core/model_runtime/model_providers/mixedbread/rerank/mxbai-rerank-large-v1-en.yaml b/api/core/model_runtime/model_providers/mixedbread/rerank/mxbai-rerank-large-v1-en.yaml
new file mode 100644
index 0000000000..beda219953
--- /dev/null
+++ b/api/core/model_runtime/model_providers/mixedbread/rerank/mxbai-rerank-large-v1-en.yaml
@@ -0,0 +1,4 @@
+model: mxbai-rerank-large-v1
+model_type: rerank
+model_properties:
+ context_size: 512
diff --git a/api/core/model_runtime/model_providers/mixedbread/rerank/rerank.py b/api/core/model_runtime/model_providers/mixedbread/rerank/rerank.py
new file mode 100644
index 0000000000..bf3c12fd86
--- /dev/null
+++ b/api/core/model_runtime/model_providers/mixedbread/rerank/rerank.py
@@ -0,0 +1,125 @@
+from typing import Optional
+
+import httpx
+
+from core.model_runtime.entities.common_entities import I18nObject
+from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType
+from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult
+from core.model_runtime.errors.invoke import (
+ InvokeAuthorizationError,
+ InvokeBadRequestError,
+ InvokeConnectionError,
+ InvokeError,
+ InvokeRateLimitError,
+ InvokeServerUnavailableError,
+)
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.rerank_model import RerankModel
+
+
+class MixedBreadRerankModel(RerankModel):
+ """
+ Model class for MixedBread rerank model.
+ """
+
+ def _invoke(
+ self,
+ model: str,
+ credentials: dict,
+ query: str,
+ docs: list[str],
+ score_threshold: Optional[float] = None,
+ top_n: Optional[int] = None,
+ user: Optional[str] = None,
+ ) -> RerankResult:
+ """
+ Invoke rerank model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param query: search query
+ :param docs: docs for reranking
+ :param score_threshold: score threshold
+ :param top_n: top n documents to return
+ :param user: unique user id
+ :return: rerank result
+ """
+ if len(docs) == 0:
+ return RerankResult(model=model, docs=[])
+
+ base_url = credentials.get("base_url", "https://api.mixedbread.ai/v1")
+ base_url = base_url.removesuffix("/")
+
+ try:
+ response = httpx.post(
+ base_url + "/reranking",
+ json={"model": model, "query": query, "input": docs, "top_k": top_n, "return_input": True},
+ headers={"Authorization": f"Bearer {credentials.get('api_key')}", "Content-Type": "application/json"},
+ )
+ response.raise_for_status()
+ results = response.json()
+
+ rerank_documents = []
+ for result in results["data"]:
+ rerank_document = RerankDocument(
+ index=result["index"],
+ text=result["input"],
+ score=result["score"],
+ )
+ if score_threshold is None or result["score"] >= score_threshold:
+ rerank_documents.append(rerank_document)
+
+ return RerankResult(model=model, docs=rerank_documents)
+ except httpx.HTTPStatusError as e:
+ raise InvokeServerUnavailableError(str(e))
+
+ def validate_credentials(self, model: str, credentials: dict) -> None:
+ """
+ Validate model credentials
+
+ :param model: model name
+ :param credentials: model credentials
+ :return:
+ """
+ try:
+ self._invoke(
+ model=model,
+ credentials=credentials,
+ query="What is the capital of the United States?",
+ docs=[
+ "Carson City is the capital city of the American state of Nevada. At the 2010 United States "
+ "Census, Carson City had a population of 55,274.",
+ "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that "
+ "are a political division controlled by the United States. Its capital is Saipan.",
+ ],
+ score_threshold=0.8,
+ )
+ except Exception as ex:
+ raise CredentialsValidateFailedError(str(ex))
+
+ @property
+ def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
+ """
+ Map model invoke error to unified error
+ """
+ return {
+ InvokeConnectionError: [httpx.ConnectError],
+ InvokeServerUnavailableError: [httpx.RemoteProtocolError],
+ InvokeRateLimitError: [],
+ InvokeAuthorizationError: [httpx.HTTPStatusError],
+ InvokeBadRequestError: [httpx.RequestError],
+ }
+
+ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity:
+ """
+ generate custom model entities from credentials
+ """
+ entity = AIModelEntity(
+ model=model,
+ label=I18nObject(en_US=model),
+ model_type=ModelType.RERANK,
+ fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
+ model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", "512"))},
+ )
+
+ return entity
diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/__init__.py b/api/core/model_runtime/model_providers/mixedbread/text_embedding/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-2d-large-v1-en.yaml b/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-2d-large-v1-en.yaml
new file mode 100644
index 0000000000..0c3c863d06
--- /dev/null
+++ b/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-2d-large-v1-en.yaml
@@ -0,0 +1,8 @@
+model: mxbai-embed-2d-large-v1
+model_type: text-embedding
+model_properties:
+ context_size: 512
+pricing:
+ input: '0.0001'
+ unit: '0.001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-large-v1-en.yaml b/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-large-v1-en.yaml
new file mode 100644
index 0000000000..0c5cda2a72
--- /dev/null
+++ b/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-large-v1-en.yaml
@@ -0,0 +1,8 @@
+model: mxbai-embed-large-v1
+model_type: text-embedding
+model_properties:
+ context_size: 512
+pricing:
+ input: '0.0001'
+ unit: '0.001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py
new file mode 100644
index 0000000000..05d9a9a0c6
--- /dev/null
+++ b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py
@@ -0,0 +1,163 @@
+import time
+from json import JSONDecodeError, dumps
+from typing import Optional
+
+import requests
+
+from core.model_runtime.entities.common_entities import I18nObject
+from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType
+from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
+from core.model_runtime.errors.invoke import (
+ InvokeAuthorizationError,
+ InvokeBadRequestError,
+ InvokeConnectionError,
+ InvokeError,
+ InvokeRateLimitError,
+ InvokeServerUnavailableError,
+)
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
+
+
+class MixedBreadTextEmbeddingModel(TextEmbeddingModel):
+ """
+ Model class for MixedBread text embedding model.
+ """
+
+ api_base: str = "https://api.mixedbread.ai/v1"
+
+ def _invoke(
+ self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ ) -> TextEmbeddingResult:
+ """
+ Invoke text embedding model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :param user: unique user id
+ :return: embeddings result
+ """
+ api_key = credentials["api_key"]
+ if not api_key:
+ raise CredentialsValidateFailedError("api_key is required")
+
+ base_url = credentials.get("base_url", self.api_base)
+ base_url = base_url.removesuffix("/")
+
+ url = base_url + "/embeddings"
+ headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"}
+
+ data = {"model": model, "input": texts}
+
+ try:
+ response = requests.post(url, headers=headers, data=dumps(data))
+ except Exception as e:
+ raise InvokeConnectionError(str(e))
+
+ if response.status_code != 200:
+ try:
+ resp = response.json()
+ msg = resp["detail"]
+ if response.status_code == 401:
+ raise InvokeAuthorizationError(msg)
+ elif response.status_code == 429:
+ raise InvokeRateLimitError(msg)
+ elif response.status_code == 500:
+ raise InvokeServerUnavailableError(msg)
+ else:
+ raise InvokeBadRequestError(msg)
+ except JSONDecodeError as e:
+ raise InvokeServerUnavailableError(
+ f"Failed to convert response to json: {e} with text: {response.text}"
+ )
+
+ try:
+ resp = response.json()
+ embeddings = resp["data"]
+ usage = resp["usage"]
+ except Exception as e:
+ raise InvokeServerUnavailableError(f"Failed to convert response to json: {e} with text: {response.text}")
+
+ usage = self._calc_response_usage(model=model, credentials=credentials, tokens=usage["total_tokens"])
+
+ result = TextEmbeddingResult(
+ model=model, embeddings=[[float(data) for data in x["embedding"]] for x in embeddings], usage=usage
+ )
+
+ return result
+
+ def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int:
+ """
+ Get number of tokens for given prompt messages
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :return:
+ """
+ return sum(self._get_num_tokens_by_gpt2(text) for text in texts)
+
+ def validate_credentials(self, model: str, credentials: dict) -> None:
+ """
+ Validate model credentials
+
+ :param model: model name
+ :param credentials: model credentials
+ :return:
+ """
+ try:
+ self._invoke(model=model, credentials=credentials, texts=["ping"])
+ except Exception as e:
+ raise CredentialsValidateFailedError(f"Credentials validation failed: {e}")
+
+ @property
+ def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
+ return {
+ InvokeConnectionError: [InvokeConnectionError],
+ InvokeServerUnavailableError: [InvokeServerUnavailableError],
+ InvokeRateLimitError: [InvokeRateLimitError],
+ InvokeAuthorizationError: [InvokeAuthorizationError],
+ InvokeBadRequestError: [KeyError, InvokeBadRequestError],
+ }
+
+ def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage:
+ """
+ Calculate response usage
+
+ :param model: model name
+ :param credentials: model credentials
+ :param tokens: input tokens
+ :return: usage
+ """
+ # get input price info
+ input_price_info = self.get_price(
+ model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens
+ )
+
+ # transform usage
+ usage = EmbeddingUsage(
+ tokens=tokens,
+ total_tokens=tokens,
+ unit_price=input_price_info.unit_price,
+ price_unit=input_price_info.unit,
+ total_price=input_price_info.total_amount,
+ currency=input_price_info.currency,
+ latency=time.perf_counter() - self.started_at,
+ )
+
+ return usage
+
+ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity:
+ """
+ generate custom model entities from credentials
+ """
+ entity = AIModelEntity(
+ model=model,
+ label=I18nObject(en_US=model),
+ model_type=ModelType.TEXT_EMBEDDING,
+ fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
+ model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", "512"))},
+ )
+
+ return entity
diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py
index 6cccff6d46..ccbfd196a9 100644
--- a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py
@@ -77,15 +77,7 @@ class NomicTextEmbeddingModel(_CommonNomic, TextEmbeddingModel):
:param texts: texts to embed
:return:
"""
- if len(texts) == 0:
- return 0
-
- _, prompt_tokens, _ = self.embed_text(
- model=model,
- credentials=credentials,
- texts=texts,
- )
- return prompt_tokens
+ return sum(self._get_num_tokens_by_gpt2(text) for text in texts)
def validate_credentials(self, model: str, credentials: dict) -> None:
"""
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml
index e5de586c1c..d0ff443827 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml
@@ -67,15 +67,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml
index 6ab39cde2d..d9792e71ee 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml
index be6d9a0e07..0b03505c45 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml
index d2aca4f514..2a6c040853 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml
index a59a3350f6..bad7f4f472 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml
@@ -68,15 +68,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml
index cab7233c98..c14aee1e1e 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml
index f82fba0c01..9d74eeca3e 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml
index e2fb6e0e55..b8601a969a 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml
index 8803e747e5..4a948be597 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml
index 0dc5a066f0..bffe324a96 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml
index 2ac0e4692a..0747e96614 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml
index 9a7f1312e9..dffb5557ff 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml
index c0eef37557..8ae159f1bf 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml
@@ -67,15 +67,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml
index c12444bd7b..93fb37254e 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml
@@ -67,15 +67,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml
index 173c55b6b9..a5c9d49609 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml
@@ -67,15 +67,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml
index 692a38140d..e4a6dae637 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml
@@ -67,15 +67,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml
index dc234783cd..6fae8a7d38 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml
@@ -66,12 +66,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml
index afd7fb4b77..8e20968859 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml
@@ -67,15 +67,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml
index d02ba7af18..9bc50c73fc 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml
@@ -67,15 +67,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml
index 1111298c37..430599300b 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml
index ef8dd083ad..906995d2b9 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml
index 87a4417df5..b33e725dd0 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml
index 967f258fa9..bb394fad81 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml
index 9d44852ac9..118e304a97 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml
index df9448ae04..761312bc38 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml
@@ -67,15 +67,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml
index 32ccb8d615..430872fb31 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml
index bf976b518a..2628d824fe 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml
index 060e7fb4c9..8097459bf0 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml
index 97cd34929b..e43beeb195 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml
index 8d77ba7a2a..c30cb7ca10 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml
@@ -67,15 +67,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml
index 4458c706aa..e443d6888b 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml
index 12e9e0dd56..fd20377002 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml
@@ -69,15 +69,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml
index b811fdece4..31a9fb51bb 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml
@@ -69,15 +69,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml
index 188dea389a..5f90cf48bc 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml
@@ -69,15 +69,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml
index bc623e2f03..97820c0f3a 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml
@@ -69,15 +69,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml
index 8977e12e4f..6af36cd6f3 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml
@@ -69,15 +69,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml
index de237842af..158e2c7ee1 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml
index 1fda35abaf..e26a6923d1 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml
index 06fd33c5f4..589119b26e 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml
index ebf8099553..dd608fbf76 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml
index e9bc99339d..08237b3958 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml
index 3ed85dade8..640b019703 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml
index 328519c168..3a90ca7532 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml
index d1ed3c2a73..b79755eb9b 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml
index 0e88c24aa8..e9dd51a341 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml
index 35313cd1f7..04f26cf5fe 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml
index 35313cd1f7..04f26cf5fe 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml
@@ -65,15 +65,6 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- - name: enable_search
- type: boolean
- default: false
- label:
- zh_Hans: 联网搜索
- en_US: Web Search
- help:
- zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
- en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
diff --git a/api/core/tools/provider/builtin/jina/jina.yaml b/api/core/tools/provider/builtin/jina/jina.yaml
index 9ce5cbd6d1..346175c41f 100644
--- a/api/core/tools/provider/builtin/jina/jina.yaml
+++ b/api/core/tools/provider/builtin/jina/jina.yaml
@@ -1,6 +1,6 @@
identity:
author: Dify
- name: Jina AI
+ name: jina
label:
en_US: Jina AI
zh_Hans: Jina AI
diff --git a/api/pyproject.toml b/api/pyproject.toml
index 41244f516c..9e38c09456 100644
--- a/api/pyproject.toml
+++ b/api/pyproject.toml
@@ -122,6 +122,7 @@ CODE_EXECUTION_API_KEY = "dify-sandbox"
FIRECRAWL_API_KEY = "fc-"
TEI_EMBEDDING_SERVER_URL = "http://a.abc.com:11451"
TEI_RERANK_SERVER_URL = "http://a.abc.com:11451"
+MIXEDBREAD_API_KEY = "mk-aaaaaaaaaaaaaaaaaaaa"
[tool.poetry]
name = "dify-api"
diff --git a/api/tests/integration_tests/model_runtime/mixedbread/__init__.py b/api/tests/integration_tests/model_runtime/mixedbread/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/tests/integration_tests/model_runtime/mixedbread/test_provider.py b/api/tests/integration_tests/model_runtime/mixedbread/test_provider.py
new file mode 100644
index 0000000000..25c9f3ce8d
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/mixedbread/test_provider.py
@@ -0,0 +1,28 @@
+import os
+from unittest.mock import Mock, patch
+
+import pytest
+
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.mixedbread.mixedbread import MixedBreadProvider
+
+
+def test_validate_provider_credentials():
+ provider = MixedBreadProvider()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ provider.validate_provider_credentials(credentials={"api_key": "hahahaha"})
+ with patch("requests.post") as mock_post:
+ mock_response = Mock()
+ mock_response.json.return_value = {
+ "usage": {"prompt_tokens": 3, "total_tokens": 3},
+ "model": "mixedbread-ai/mxbai-embed-large-v1",
+ "data": [{"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"}],
+ "object": "list",
+ "normalized": "true",
+ "encoding_format": "float",
+ "dimensions": 1024,
+ }
+ mock_response.status_code = 200
+ mock_post.return_value = mock_response
+ provider.validate_provider_credentials(credentials={"api_key": os.environ.get("MIXEDBREAD_API_KEY")})
diff --git a/api/tests/integration_tests/model_runtime/mixedbread/test_rerank.py b/api/tests/integration_tests/model_runtime/mixedbread/test_rerank.py
new file mode 100644
index 0000000000..b65aab74aa
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/mixedbread/test_rerank.py
@@ -0,0 +1,100 @@
+import os
+from unittest.mock import Mock, patch
+
+import pytest
+
+from core.model_runtime.entities.rerank_entities import RerankResult
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.mixedbread.rerank.rerank import MixedBreadRerankModel
+
+
+def test_validate_credentials():
+ model = MixedBreadRerankModel()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ model.validate_credentials(
+ model="mxbai-rerank-large-v1",
+ credentials={"api_key": "invalid_key"},
+ )
+ with patch("httpx.post") as mock_post:
+ mock_response = Mock()
+ mock_response.json.return_value = {
+ "usage": {"prompt_tokens": 86, "total_tokens": 86},
+ "model": "mixedbread-ai/mxbai-rerank-large-v1",
+ "data": [
+ {
+ "index": 0,
+ "score": 0.06762695,
+ "input": "Carson City is the capital city of the American state of Nevada. At the 2010 United "
+ "States Census, Carson City had a population of 55,274.",
+ "object": "text_document",
+ },
+ {
+ "index": 1,
+ "score": 0.057403564,
+ "input": "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific "
+ "Ocean that are a political division controlled by the United States. Its capital is "
+ "Saipan.",
+ "object": "text_document",
+ },
+ ],
+ "object": "list",
+ "top_k": 2,
+ "return_input": True,
+ }
+ mock_response.status_code = 200
+ mock_post.return_value = mock_response
+ model.validate_credentials(
+ model="mxbai-rerank-large-v1",
+ credentials={
+ "api_key": os.environ.get("MIXEDBREAD_API_KEY"),
+ },
+ )
+
+
+def test_invoke_model():
+ model = MixedBreadRerankModel()
+ with patch("httpx.post") as mock_post:
+ mock_response = Mock()
+ mock_response.json.return_value = {
+ "usage": {"prompt_tokens": 56, "total_tokens": 56},
+ "model": "mixedbread-ai/mxbai-rerank-large-v1",
+ "data": [
+ {
+ "index": 0,
+ "score": 0.6044922,
+ "input": "Kasumi is a girl name of Japanese origin meaning mist.",
+ "object": "text_document",
+ },
+ {
+ "index": 1,
+ "score": 0.0703125,
+ "input": "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music and she leads a "
+ "team named PopiParty.",
+ "object": "text_document",
+ },
+ ],
+ "object": "list",
+ "top_k": 2,
+ "return_input": "true",
+ }
+ mock_response.status_code = 200
+ mock_post.return_value = mock_response
+ result = model.invoke(
+ model="mxbai-rerank-large-v1",
+ credentials={
+ "api_key": os.environ.get("MIXEDBREAD_API_KEY"),
+ },
+ query="Who is Kasumi?",
+ docs=[
+ "Kasumi is a girl name of Japanese origin meaning mist.",
+ "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music and she leads a team named "
+ "PopiParty.",
+ ],
+ score_threshold=0.5,
+ )
+
+ assert isinstance(result, RerankResult)
+ assert len(result.docs) == 1
+ assert result.docs[0].index == 0
+ assert result.docs[0].score >= 0.5
diff --git a/api/tests/integration_tests/model_runtime/mixedbread/test_text_embedding.py b/api/tests/integration_tests/model_runtime/mixedbread/test_text_embedding.py
new file mode 100644
index 0000000000..ca97a18951
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/mixedbread/test_text_embedding.py
@@ -0,0 +1,78 @@
+import os
+from unittest.mock import Mock, patch
+
+import pytest
+
+from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.mixedbread.text_embedding.text_embedding import MixedBreadTextEmbeddingModel
+
+
+def test_validate_credentials():
+ model = MixedBreadTextEmbeddingModel()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ model.validate_credentials(model="mxbai-embed-large-v1", credentials={"api_key": "invalid_key"})
+ with patch("requests.post") as mock_post:
+ mock_response = Mock()
+ mock_response.json.return_value = {
+ "usage": {"prompt_tokens": 3, "total_tokens": 3},
+ "model": "mixedbread-ai/mxbai-embed-large-v1",
+ "data": [{"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"}],
+ "object": "list",
+ "normalized": "true",
+ "encoding_format": "float",
+ "dimensions": 1024,
+ }
+ mock_response.status_code = 200
+ mock_post.return_value = mock_response
+ model.validate_credentials(
+ model="mxbai-embed-large-v1", credentials={"api_key": os.environ.get("MIXEDBREAD_API_KEY")}
+ )
+
+
+def test_invoke_model():
+ model = MixedBreadTextEmbeddingModel()
+
+ with patch("requests.post") as mock_post:
+ mock_response = Mock()
+ mock_response.json.return_value = {
+ "usage": {"prompt_tokens": 6, "total_tokens": 6},
+ "model": "mixedbread-ai/mxbai-embed-large-v1",
+ "data": [
+ {"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"},
+ {"embedding": [0.23333 for _ in range(1024)], "index": 1, "object": "embedding"},
+ ],
+ "object": "list",
+ "normalized": "true",
+ "encoding_format": "float",
+ "dimensions": 1024,
+ }
+ mock_response.status_code = 200
+ mock_post.return_value = mock_response
+ result = model.invoke(
+ model="mxbai-embed-large-v1",
+ credentials={
+ "api_key": os.environ.get("MIXEDBREAD_API_KEY"),
+ },
+ texts=["hello", "world"],
+ user="abc-123",
+ )
+
+ assert isinstance(result, TextEmbeddingResult)
+ assert len(result.embeddings) == 2
+ assert result.usage.total_tokens == 6
+
+
+def test_get_num_tokens():
+ model = MixedBreadTextEmbeddingModel()
+
+ num_tokens = model.get_num_tokens(
+ model="mxbai-embed-large-v1",
+ credentials={
+ "api_key": os.environ.get("MIXEDBREAD_API_KEY"),
+ },
+ texts=["ping"],
+ )
+
+ assert num_tokens == 1
diff --git a/dev/pytest/pytest_model_runtime.sh b/dev/pytest/pytest_model_runtime.sh
index 4c0083a2de..b60ff64fdc 100755
--- a/dev/pytest/pytest_model_runtime.sh
+++ b/dev/pytest/pytest_model_runtime.sh
@@ -8,4 +8,5 @@ pytest api/tests/integration_tests/model_runtime/anthropic \
api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py \
api/tests/integration_tests/model_runtime/upstage \
api/tests/integration_tests/model_runtime/fireworks \
- api/tests/integration_tests/model_runtime/nomic
+ api/tests/integration_tests/model_runtime/nomic \
+ api/tests/integration_tests/model_runtime/mixedbread
diff --git a/web/public/embed.js b/web/public/embed.js
index 8ed7a67dc8..3c2735b6fc 100644
--- a/web/public/embed.js
+++ b/web/public/embed.js
@@ -69,38 +69,47 @@
iframe.id = iframeId;
iframe.src = iframeUrl;
iframe.style.cssText = `
- border: none; position: fixed; flex-direction: column; justify-content: space-between;
+ border: none; position: absolute; flex-direction: column; justify-content: space-between;
box-shadow: rgba(150, 150, 150, 0.2) 0px 10px 30px 0px, rgba(150, 150, 150, 0.2) 0px 0px 0px 1px;
- bottom: 5rem; right: 1rem; width: 24rem; max-width: calc(100vw - 2rem); height: 40rem;
+ bottom: 55px; right: 0; width: 24rem; max-width: calc(100vw - 2rem); height: 40rem;
max-height: calc(100vh - 6rem); border-radius: 0.75rem; display: flex; z-index: 2147483647;
overflow: hidden; left: unset; background-color: #F3F4F6;user-select: none;
`;
- document.body.appendChild(iframe);
+ return iframe;
}
// Function to reset the iframe position
function resetIframePosition() {
+ if (window.innerWidth <= 640)
+ return
+
const targetIframe = document.getElementById(iframeId);
const targetButton = document.getElementById(buttonId);
if (targetIframe && targetButton) {
const buttonRect = targetButton.getBoundingClientRect();
- const buttonBottom = window.innerHeight - buttonRect.bottom;
- const buttonRight = window.innerWidth - buttonRect.right;
- const buttonLeft = buttonRect.left;
- // Adjust iframe position to stay within viewport
- targetIframe.style.bottom = `${
- buttonBottom + buttonRect.height + 5 + targetIframe.clientHeight > window.innerHeight
- ? buttonBottom - targetIframe.clientHeight - 5
- : buttonBottom + buttonRect.height + 5
- }px`;
+ const buttonInBottom = buttonRect.top - 5 > targetIframe.clientHeight
- targetIframe.style.right = `${
- buttonRight + targetIframe.clientWidth > window.innerWidth
- ? window.innerWidth - buttonLeft - targetIframe.clientWidth
- : buttonRight
- }px`;
+ if (buttonInBottom) {
+ targetIframe.style.bottom = `${buttonRect.height + 5}px`;
+ targetIframe.style.top = 'unset';
+ }
+ else {
+ targetIframe.style.bottom = 'unset';
+ targetIframe.style.top = `${buttonRect.height + 5}px`;
+ }
+
+ const buttonInRight = buttonRect.right > targetIframe.clientWidth;
+
+ if (buttonInRight) {
+ targetIframe.style.right = '0';
+ targetIframe.style.left = 'unset';
+ }
+ else {
+ targetIframe.style.right = 'unset';
+ targetIframe.style.left = 0;
+ }
}
}
@@ -146,12 +155,6 @@
box-shadow: var(--${containerDiv.id}-box-shadow, rgba(0, 0, 0, 0.2) 0px 4px 8px 0px);
cursor: pointer;
z-index: 2147483647;
- transition: all 0.2s ease-in-out 0s;
- }
- `);
- styleSheet.sheet.insertRule(`
- #${containerDiv.id}:hover {
- transform: var(--${containerDiv.id}-hover-transform, scale(1.1));
}
`);
@@ -167,7 +170,7 @@
containerDiv.addEventListener("click", function () {
const targetIframe = document.getElementById(iframeId);
if (!targetIframe) {
- createIframe();
+ containerDiv.appendChild(createIframe());
resetIframePosition();
this.title = "Exit (ESC)";
displayDiv.innerHTML = svgIcons.close;
@@ -255,9 +258,6 @@
if (!document.getElementById(buttonId)) {
createButton();
}
-
- createIframe();
- document.getElementById(iframeId).style.display = 'none';
}
// Add esc Exit keyboard event triggered
@@ -279,4 +279,4 @@
} else {
document.body.onload = embedChatbot;
}
-})();
+})();
\ No newline at end of file
diff --git a/web/public/embed.min.js b/web/public/embed.min.js
index 0e023cb5d1..eb20858148 100644
--- a/web/public/embed.min.js
+++ b/web/public/embed.min.js
@@ -1,31 +1,26 @@
-(()=>{let t="difyChatbotConfig",a="dify-chatbot-bubble-button",c="dify-chatbot-bubble-window",h=window[t],p={open:``,close:``};async function e(){if(h&&h.token){var e=new URLSearchParams(await(async()=>{var e=h?.inputs||{};let n={};return await Promise.all(Object.entries(e).map(async([e,t])=>{n[e]=(e=t,e=(new TextEncoder).encode(e),e=new Response(new Blob([e]).stream().pipeThrough(new CompressionStream("gzip"))).arrayBuffer(),e=new Uint8Array(await e),await btoa(String.fromCharCode(...e)))})),n})());let t=`${h.baseUrl||`https://${h.isDev?"dev.":""}udify.app`}/chatbot/${h.token}?`+e;function o(){var e=document.createElement("iframe");e.allow="fullscreen;microphone",e.title="dify chatbot bubble window",e.id=c,e.src=t,e.style.cssText=`
- border: none; position: fixed; flex-direction: column; justify-content: space-between;
- box-shadow: rgba(150, 150, 150, 0.2) 0px 10px 30px 0px, rgba(150, 150, 150, 0.2) 0px 0px 0px 1px;
- bottom: 5rem; right: 1rem; width: 24rem; max-width: calc(100vw - 2rem); height: 40rem;
- max-height: calc(100vh - 6rem); border-radius: 0.75rem; display: flex; z-index: 2147483647;
- overflow: hidden; left: unset; background-color: #F3F4F6;user-select: none;
- `,document.body.appendChild(e)}function i(){var e,t,n,o=document.getElementById(c),i=document.getElementById(a);o&&i&&(i=i.getBoundingClientRect(),e=window.innerHeight-i.bottom,t=window.innerWidth-i.right,n=i.left,o.style.bottom=`${e+i.height+5+o.clientHeight>window.innerHeight?e-o.clientHeight-5:e+i.height+5}px`,o.style.right=`${t+o.clientWidth>window.innerWidth?window.innerWidth-n-o.clientWidth:t}px`)}function n(){let n=document.createElement("div");Object.entries(h.containerProps||{}).forEach(([e,t])=>{"className"===e?n.classList.add(...t.split(" ")):"style"===e?"object"==typeof t?Object.assign(n.style,t):n.style.cssText=t:"function"==typeof t?n.addEventListener(e.replace(/^on/,"").toLowerCase(),t):n[e]=t}),n.id=a;var e=document.createElement("style");document.head.appendChild(e),e.sheet.insertRule(`
- #${n.id} {
- position: fixed;
- bottom: var(--${n.id}-bottom, 1rem);
- right: var(--${n.id}-right, 1rem);
- left: var(--${n.id}-left, unset);
- top: var(--${n.id}-top, unset);
- width: var(--${n.id}-width, 50px);
- height: var(--${n.id}-height, 50px);
- border-radius: var(--${n.id}-border-radius, 25px);
- background-color: var(--${n.id}-bg-color, #155EEF);
- box-shadow: var(--${n.id}-box-shadow, rgba(0, 0, 0, 0.2) 0px 4px 8px 0px);
- cursor: pointer;
- z-index: 2147483647;
- transition: all 0.2s ease-in-out 0s;
- }
- `),e.sheet.insertRule(`
- #${n.id}:hover {
- transform: var(--${n.id}-hover-transform, scale(1.1));
- }
- `);let t=document.createElement("div");if(t.style.cssText="display: flex; align-items: center; justify-content: center; width: 100%; height: 100%; z-index: 2147483647;",t.innerHTML=p.open,n.appendChild(t),document.body.appendChild(n),n.addEventListener("click",function(){var e=document.getElementById(c);e?(e.style.display="none"===e.style.display?"block":"none",t.innerHTML="none"===e.style.display?p.open:p.close,"none"===e.style.display?document.removeEventListener("keydown",d):document.addEventListener("keydown",d),i()):(o(),i(),this.title="Exit (ESC)",t.innerHTML=p.close,document.addEventListener("keydown",d))}),h.draggable){var s=n;var l=h.dragAxis||"both";let i=!1,d,r;s.addEventListener("mousedown",function(e){i=!0,d=e.clientX-s.offsetLeft,r=e.clientY-s.offsetTop}),document.addEventListener("mousemove",function(e){var t,n,o;i&&(s.style.transition="none",s.style.cursor="grabbing",(t=document.getElementById(c))&&(t.style.display="none",s.querySelector("div").innerHTML=p.open),t=e.clientX-d,e=window.innerHeight-e.clientY-r,o=s.getBoundingClientRect(),n=window.innerWidth-o.width,o=window.innerHeight-o.height,"x"!==l&&"both"!==l||s.style.setProperty(`--${a}-left`,Math.max(0,Math.min(t,n))+"px"),"y"!==l&&"both"!==l||s.style.setProperty(`--${a}-bottom`,Math.max(0,Math.min(e,o))+"px"))}),document.addEventListener("mouseup",function(){i=!1,s.style.transition="",s.style.cursor="pointer"})}}2048
+
+ `,close:``};async function e(){if(p&&p.token){var e=new URLSearchParams(await async function(){var e=p?.inputs||{};const n={};return await Promise.all(Object.entries(e).map(async([e,t])=>{n[e]=(e=t,e=(new TextEncoder).encode(e),e=new Response(new Blob([e]).stream().pipeThrough(new CompressionStream("gzip"))).arrayBuffer(),e=new Uint8Array(await e),await btoa(String.fromCharCode(...e)))})),n}());const i=`${p.baseUrl||`https://${p.isDev?"dev.":""}udify.app`}/chatbot/${p.token}?`+e;function o(){var e,t;window.innerWidth<=640||(e=document.getElementById(c),t=document.getElementById(a),e&&t&&((t=t.getBoundingClientRect()).top-5>e.clientHeight?(e.style.bottom=t.height+5+"px",e.style.top="unset"):(e.style.bottom="unset",e.style.top=t.height+5+"px"),t.right>e.clientWidth?(e.style.right="0",e.style.left="unset"):(e.style.right="unset",e.style.left=0)))}function t(){const n=document.createElement("div");Object.entries(p.containerProps||{}).forEach(([e,t])=>{"className"===e?n.classList.add(...t.split(" ")):"style"===e?"object"==typeof t?Object.assign(n.style,t):n.style.cssText=t:"function"==typeof t?n.addEventListener(e.replace(/^on/,"").toLowerCase(),t):n[e]=t}),n.id=a;var e=document.createElement("style");document.head.appendChild(e),e.sheet.insertRule(`
+ #${n.id} {
+ position: fixed;
+ bottom: var(--${n.id}-bottom, 1rem);
+ right: var(--${n.id}-right, 1rem);
+ left: var(--${n.id}-left, unset);
+ top: var(--${n.id}-top, unset);
+ width: var(--${n.id}-width, 50px);
+ height: var(--${n.id}-height, 50px);
+ border-radius: var(--${n.id}-border-radius, 25px);
+ background-color: var(--${n.id}-bg-color, #155EEF);
+ box-shadow: var(--${n.id}-box-shadow, rgba(0, 0, 0, 0.2) 0px 4px 8px 0px);
+ cursor: pointer;
+ z-index: 2147483647;
+ }
+ `);const t=document.createElement("div");if(t.style.cssText="display: flex; align-items: center; justify-content: center; width: 100%; height: 100%; z-index: 2147483647;",t.innerHTML=h.open,n.appendChild(t),document.body.appendChild(n),n.addEventListener("click",function(){var e=document.getElementById(c);e?(e.style.display="none"===e.style.display?"block":"none",t.innerHTML="none"===e.style.display?h.open:h.close,"none"===e.style.display?document.removeEventListener("keydown",d):document.addEventListener("keydown",d),o()):(n.appendChild(((e=document.createElement("iframe")).allow="fullscreen;microphone",e.title="dify chatbot bubble window",e.id=c,e.src=i,e.style.cssText=`
+ border: none; position: absolute; flex-direction: column; justify-content: space-between;
+ box-shadow: rgba(150, 150, 150, 0.2) 0px 10px 30px 0px, rgba(150, 150, 150, 0.2) 0px 0px 0px 1px;
+ bottom: 55px; right: 0; width: 24rem; max-width: calc(100vw - 2rem); height: 40rem;
+ max-height: calc(100vh - 6rem); border-radius: 0.75rem; display: flex; z-index: 2147483647;
+ overflow: hidden; left: unset; background-color: #F3F4F6;user-select: none;
+ `,e)),o(),this.title="Exit (ESC)",t.innerHTML=h.close,document.addEventListener("keydown",d))}),p.draggable){var s=n;var l=p.dragAxis||"both";let i=!1,d,r;s.addEventListener("mousedown",function(e){i=!0,d=e.clientX-s.offsetLeft,r=e.clientY-s.offsetTop}),document.addEventListener("mousemove",function(e){var t,n,o;i&&(s.style.transition="none",s.style.cursor="grabbing",(t=document.getElementById(c))&&(t.style.display="none",s.querySelector("div").innerHTML=h.open),t=e.clientX-d,e=window.innerHeight-e.clientY-r,o=s.getBoundingClientRect(),n=window.innerWidth-o.width,o=window.innerHeight-o.height,"x"!==l&&"both"!==l||s.style.setProperty(`--${a}-left`,Math.max(0,Math.min(t,n))+"px"),"y"!==l&&"both"!==l||s.style.setProperty(`--${a}-bottom`,Math.max(0,Math.min(e,o))+"px"))}),document.addEventListener("mouseup",function(){i=!1,s.style.transition="",s.style.cursor="pointer"})}}2048