diff --git a/api/core/model_runtime/model_providers/openai/moderation/moderation.py b/api/core/model_runtime/model_providers/openai/moderation/moderation.py deleted file mode 100644 index 9bf055ce6b..0000000000 --- a/api/core/model_runtime/model_providers/openai/moderation/moderation.py +++ /dev/null @@ -1,170 +0,0 @@ -from collections.abc import Mapping -from typing import Optional - -import openai -from httpx import Timeout -from openai import OpenAI -from openai.types import ModerationCreateResponse - -from core.model_runtime.entities.model_entities import ModelPropertyKey -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.moderation_model import ModerationModel - - -class OpenAIModerationModel(ModerationModel): - """ - Model class for OpenAI text moderation model. - """ - - def _invoke(self, model: str, credentials: dict, text: str, user: Optional[str] = None) -> bool: - """ - Invoke moderation model - - :param model: model name - :param credentials: model credentials - :param text: text to moderate - :param user: unique user id - :return: false if text is safe, true otherwise - """ - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - - # init model client - client = OpenAI(**credentials_kwargs) - - # chars per chunk - length = self._get_max_characters_per_chunk(model, credentials) - text_chunks = [text[i : i + length] for i in range(0, len(text), length)] - - max_text_chunks = self._get_max_chunks(model, credentials) - chunks = [text_chunks[i : i + max_text_chunks] for i in range(0, len(text_chunks), max_text_chunks)] - - for text_chunk in chunks: - moderation_result = self._moderation_invoke(model=model, client=client, texts=text_chunk) - - for result in moderation_result.results: - if result.flagged is True: - return True - - return False - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - # call moderation model - self._moderation_invoke( - model=model, - client=client, - texts=["ping"], - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _moderation_invoke(self, model: str, client: OpenAI, texts: list[str]) -> ModerationCreateResponse: - """ - Invoke moderation model - - :param model: model name - :param client: model client - :param texts: texts to moderate - :return: false if text is safe, true otherwise - """ - # call moderation model - moderation_result = client.moderations.create(model=model, input=texts) - - return moderation_result - - def _get_max_characters_per_chunk(self, model: str, credentials: dict) -> int: - """ - Get max characters per chunk - - :param model: model name - :param credentials: model credentials - :return: max characters per chunk - """ - model_schema = self.get_model_schema(model, credentials) - - if model_schema and ModelPropertyKey.MAX_CHARACTERS_PER_CHUNK in model_schema.model_properties: - max_characters_per_chunk: int = model_schema.model_properties[ModelPropertyKey.MAX_CHARACTERS_PER_CHUNK] - return max_characters_per_chunk - - return 2000 - - def _get_max_chunks(self, model: str, credentials: dict) -> int: - """ - Get max chunks for given embedding model - - :param model: model name - :param credentials: model credentials - :return: max chunks - """ - model_schema = self.get_model_schema(model, credentials) - - if model_schema and ModelPropertyKey.MAX_CHUNKS in model_schema.model_properties: - max_chunks: int = model_schema.model_properties[ModelPropertyKey.MAX_CHUNKS] - return max_chunks - - return 1 - - def _to_credential_kwargs(self, credentials: Mapping) -> dict: - """ - Transform credentials to kwargs for model instance - - :param credentials: - :return: - """ - credentials_kwargs = { - "api_key": credentials["openai_api_key"], - "timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0), - "max_retries": 1, - } - - if credentials.get("openai_api_base"): - openai_api_base = credentials["openai_api_base"].rstrip("/") - credentials_kwargs["base_url"] = openai_api_base + "/v1" - - if "openai_organization" in credentials: - credentials_kwargs["organization"] = credentials["openai_organization"] - - return credentials_kwargs - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError], - InvokeServerUnavailableError: [openai.InternalServerError], - InvokeRateLimitError: [openai.RateLimitError], - InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError], - InvokeBadRequestError: [ - openai.BadRequestError, - openai.NotFoundError, - openai.UnprocessableEntityError, - openai.APIError, - ], - } diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/_position.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/_position.yaml deleted file mode 100644 index 321a492323..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/_position.yaml +++ /dev/null @@ -1,22 +0,0 @@ -- claude-3-haiku@20240307 -- claude-3-opus@20240229 -- claude-3-sonnet@20240229 -- claude-3-5-sonnet-v2@20241022 -- claude-3-5-sonnet@20240620 -- gemini-1.0-pro-vision-001 -- gemini-1.0-pro-002 -- gemini-1.5-flash-001 -- gemini-1.5-flash-002 -- gemini-1.5-pro-001 -- gemini-1.5-pro-002 -- gemini-2.0-flash-001 -- gemini-2.0-flash-exp -- gemini-2.0-flash-lite-preview-02-05 -- gemini-2.0-flash-thinking-exp-01-21 -- gemini-2.0-flash-thinking-exp-1219 -- gemini-2.0-pro-exp-02-05 -- gemini-exp-1114 -- gemini-exp-1121 -- gemini-exp-1206 -- gemini-flash-experimental -- gemini-pro-experimental diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-001.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-001.yaml deleted file mode 100644 index bef7ca5eef..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-001.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: gemini-2.0-flash-001 -label: - en_US: Gemini 2.0 Flash 001 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call - - document - - video - - audio -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_output_tokens - use_template: max_tokens - default: 8192 - min: 1 - max: 8192 - - name: json_schema - use_template: json_schema -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-lite-preview-02-05.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-lite-preview-02-05.yaml deleted file mode 100644 index 9c0a1e0620..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-lite-preview-02-05.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: gemini-2.0-flash-lite-preview-02-05 -label: - en_US: Gemini 2.0 Flash Lite Preview 0205 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call - - document - - video - - audio -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_output_tokens - use_template: max_tokens - default: 8192 - min: 1 - max: 8192 - - name: json_schema - use_template: json_schema -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-thinking-exp-01-21.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-thinking-exp-01-21.yaml deleted file mode 100644 index 6e2fc7678e..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-thinking-exp-01-21.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: gemini-2.0-flash-thinking-exp-01-21 -label: - en_US: Gemini 2.0 Flash Thinking Exp 0121 -model_type: llm -features: - - agent-thought - - vision - - document - - video - - audio -model_properties: - mode: chat - context_size: 32767 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_output_tokens - use_template: max_tokens - default: 8192 - min: 1 - max: 8192 - - name: json_schema - use_template: json_schema -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-thinking-exp-1219.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-thinking-exp-1219.yaml deleted file mode 100644 index dfcf8fd050..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-thinking-exp-1219.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: gemini-2.0-flash-thinking-exp-1219 -label: - en_US: Gemini 2.0 Flash Thinking Exp 1219 -model_type: llm -features: - - agent-thought - - vision - - document - - video - - audio -model_properties: - mode: chat - context_size: 32767 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_output_tokens - use_template: max_tokens - default: 8192 - min: 1 - max: 8192 - - name: json_schema - use_template: json_schema -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-pro-exp-02-05.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-pro-exp-02-05.yaml deleted file mode 100644 index 96926a1756..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-pro-exp-02-05.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: gemini-2.0-pro-exp-02-05 -label: - en_US: Gemini 2.0 Pro Exp 0205 -model_type: llm -features: - - agent-thought - - document -model_properties: - mode: chat - context_size: 2000000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - en_US: Top k - type: int - help: - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_output_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1114.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1114.yaml deleted file mode 100644 index bd49b47693..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1114.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: gemini-exp-1114 -label: - en_US: Gemini exp 1114 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call - - document - - video - - audio -model_properties: - mode: chat - context_size: 32767 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_output_tokens - use_template: max_tokens - default: 8192 - min: 1 - max: 8192 - - name: json_schema - use_template: json_schema -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1121.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1121.yaml deleted file mode 100644 index 8e3f218df4..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1121.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: gemini-exp-1121 -label: - en_US: Gemini exp 1121 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call - - document - - video - - audio -model_properties: - mode: chat - context_size: 32767 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_output_tokens - use_template: max_tokens - default: 8192 - min: 1 - max: 8192 - - name: json_schema - use_template: json_schema -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1206.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1206.yaml deleted file mode 100644 index 7a7c361c43..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1206.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: gemini-exp-1206 -label: - en_US: Gemini exp 1206 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call - - document - - video - - audio -model_properties: - mode: chat - context_size: 2097152 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_output_tokens - use_template: max_tokens - default: 8192 - min: 1 - max: 8192 - - name: json_schema - use_template: json_schema -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air-0111.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air-0111.yaml deleted file mode 100644 index 8d301fc69d..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air-0111.yaml +++ /dev/null @@ -1,66 +0,0 @@ -model: glm-4-air-0111 -label: - en_US: glm-4-air-0111 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 4095 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.0005' - output: '0.0005' - unit: '0.001' - currency: RMB