mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-15 17:45:58 +08:00
parent
8e75eb5c63
commit
fe1846c437
@ -19,8 +19,8 @@ class GoogleProvider(ModelProvider):
|
|||||||
try:
|
try:
|
||||||
model_instance = self.get_model_instance(ModelType.LLM)
|
model_instance = self.get_model_instance(ModelType.LLM)
|
||||||
|
|
||||||
# Use `gemini-pro` model for validate,
|
# Use `gemini-2.0-flash` model for validate,
|
||||||
model_instance.validate_credentials(model="gemini-pro", credentials=credentials)
|
model_instance.validate_credentials(model="gemini-2.0-flash", credentials=credentials)
|
||||||
except CredentialsValidateFailedError as ex:
|
except CredentialsValidateFailedError as ex:
|
||||||
raise ex
|
raise ex
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
|
@ -19,5 +19,3 @@
|
|||||||
- gemini-exp-1206
|
- gemini-exp-1206
|
||||||
- gemini-exp-1121
|
- gemini-exp-1121
|
||||||
- gemini-exp-1114
|
- gemini-exp-1114
|
||||||
- gemini-pro
|
|
||||||
- gemini-pro-vision
|
|
||||||
|
@ -1,35 +0,0 @@
|
|||||||
model: gemini-pro-vision
|
|
||||||
label:
|
|
||||||
en_US: Gemini Pro Vision
|
|
||||||
model_type: llm
|
|
||||||
features:
|
|
||||||
- vision
|
|
||||||
model_properties:
|
|
||||||
mode: chat
|
|
||||||
context_size: 12288
|
|
||||||
parameter_rules:
|
|
||||||
- name: temperature
|
|
||||||
use_template: temperature
|
|
||||||
- name: top_p
|
|
||||||
use_template: top_p
|
|
||||||
- name: top_k
|
|
||||||
label:
|
|
||||||
zh_Hans: 取样数量
|
|
||||||
en_US: Top k
|
|
||||||
type: int
|
|
||||||
help:
|
|
||||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
|
||||||
en_US: Only sample from the top K options for each subsequent token.
|
|
||||||
required: false
|
|
||||||
- name: max_tokens_to_sample
|
|
||||||
use_template: max_tokens
|
|
||||||
required: true
|
|
||||||
default: 4096
|
|
||||||
min: 1
|
|
||||||
max: 4096
|
|
||||||
pricing:
|
|
||||||
input: '0.00'
|
|
||||||
output: '0.00'
|
|
||||||
unit: '0.000001'
|
|
||||||
currency: USD
|
|
||||||
deprecated: true
|
|
@ -1,39 +0,0 @@
|
|||||||
model: gemini-pro
|
|
||||||
label:
|
|
||||||
en_US: Gemini Pro
|
|
||||||
model_type: llm
|
|
||||||
features:
|
|
||||||
- agent-thought
|
|
||||||
- tool-call
|
|
||||||
- stream-tool-call
|
|
||||||
model_properties:
|
|
||||||
mode: chat
|
|
||||||
context_size: 30720
|
|
||||||
parameter_rules:
|
|
||||||
- name: temperature
|
|
||||||
use_template: temperature
|
|
||||||
- name: top_p
|
|
||||||
use_template: top_p
|
|
||||||
- name: top_k
|
|
||||||
label:
|
|
||||||
zh_Hans: 取样数量
|
|
||||||
en_US: Top k
|
|
||||||
type: int
|
|
||||||
help:
|
|
||||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
|
||||||
en_US: Only sample from the top K options for each subsequent token.
|
|
||||||
required: false
|
|
||||||
- name: max_tokens_to_sample
|
|
||||||
use_template: max_tokens
|
|
||||||
required: true
|
|
||||||
default: 2048
|
|
||||||
min: 1
|
|
||||||
max: 2048
|
|
||||||
- name: response_format
|
|
||||||
use_template: response_format
|
|
||||||
pricing:
|
|
||||||
input: '0.00'
|
|
||||||
output: '0.00'
|
|
||||||
unit: '0.000001'
|
|
||||||
currency: USD
|
|
||||||
deprecated: true
|
|
Loading…
x
Reference in New Issue
Block a user