From d9579f418dc6617d1c4fd9e9e61a17781324c087 Mon Sep 17 00:00:00 2001 From: CXwudi Date: Fri, 22 Nov 2024 00:14:20 -0500 Subject: [PATCH] chore: Added the new gemini exp-1121 and learnlm-1.5 models (#10963) --- .../google/llm/gemini-exp-1114.yaml | 2 +- .../google/llm/gemini-exp-1121.yaml | 38 +++++++++++++++++++ .../llm/learnlm-1.5-pro-experimental.yaml | 38 +++++++++++++++++++ 3 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 api/core/model_runtime/model_providers/google/llm/gemini-exp-1121.yaml create mode 100644 api/core/model_runtime/model_providers/google/llm/learnlm-1.5-pro-experimental.yaml diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-exp-1114.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-exp-1114.yaml index f126627689..2d4965ad25 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-exp-1114.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-exp-1114.yaml @@ -9,7 +9,7 @@ features: - stream-tool-call model_properties: mode: chat - context_size: 2097152 + context_size: 32767 parameter_rules: - name: temperature use_template: temperature diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-exp-1121.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-exp-1121.yaml new file mode 100644 index 0000000000..9ca4f6e675 --- /dev/null +++ b/api/core/model_runtime/model_providers/google/llm/gemini-exp-1121.yaml @@ -0,0 +1,38 @@ +model: gemini-exp-1121 +label: + en_US: Gemini exp 1121 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 32767 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/learnlm-1.5-pro-experimental.yaml b/api/core/model_runtime/model_providers/google/llm/learnlm-1.5-pro-experimental.yaml new file mode 100644 index 0000000000..0b29814289 --- /dev/null +++ b/api/core/model_runtime/model_providers/google/llm/learnlm-1.5-pro-experimental.yaml @@ -0,0 +1,38 @@ +model: learnlm-1.5-pro-experimental +label: + en_US: LearnLM 1.5 Pro Experimental +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 32767 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD