From 2348abe4bfa189e79c35e0e2f113f7a7eea8c734 Mon Sep 17 00:00:00 2001 From: Riddhimaan-Senapati <114703025+Riddhimaan-Senapati@users.noreply.github.com> Date: Thu, 6 Feb 2025 20:11:25 -0500 Subject: [PATCH] =?UTF-8?q?feat:=20added=20a=20couple=20of=20models=20not?= =?UTF-8?q?=20defined=20in=20vertex=20ai,=20that=20were=20already=20?= =?UTF-8?q?=E2=80=A6=20(#13296)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../gemini-2.0-flash-thinking-exp-1219.yaml | 39 ++++++++++++++++++ .../vertex_ai/llm/gemini-exp-1114.yaml | 41 +++++++++++++++++++ .../vertex_ai/llm/gemini-exp-1121.yaml | 41 +++++++++++++++++++ .../vertex_ai/llm/gemini-exp-1206.yaml | 41 +++++++++++++++++++ 4 files changed, 162 insertions(+) create mode 100644 api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-thinking-exp-1219.yaml create mode 100644 api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1114.yaml create mode 100644 api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1121.yaml create mode 100644 api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1206.yaml diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-thinking-exp-1219.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-thinking-exp-1219.yaml new file mode 100644 index 0000000000..dfcf8fd050 --- /dev/null +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-thinking-exp-1219.yaml @@ -0,0 +1,39 @@ +model: gemini-2.0-flash-thinking-exp-1219 +label: + en_US: Gemini 2.0 Flash Thinking Exp 1219 +model_type: llm +features: + - agent-thought + - vision + - document + - video + - audio +model_properties: + mode: chat + context_size: 32767 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1114.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1114.yaml new file mode 100644 index 0000000000..bd49b47693 --- /dev/null +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1114.yaml @@ -0,0 +1,41 @@ +model: gemini-exp-1114 +label: + en_US: Gemini exp 1114 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call + - document + - video + - audio +model_properties: + mode: chat + context_size: 32767 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1121.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1121.yaml new file mode 100644 index 0000000000..8e3f218df4 --- /dev/null +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1121.yaml @@ -0,0 +1,41 @@ +model: gemini-exp-1121 +label: + en_US: Gemini exp 1121 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call + - document + - video + - audio +model_properties: + mode: chat + context_size: 32767 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1206.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1206.yaml new file mode 100644 index 0000000000..7a7c361c43 --- /dev/null +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-exp-1206.yaml @@ -0,0 +1,41 @@ +model: gemini-exp-1206 +label: + en_US: Gemini exp 1206 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call + - document + - video + - audio +model_properties: + mode: chat + context_size: 2097152 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD