feat: support Ernie-lite-pro-128k (#12161)

Co-authored-by: bigfish49 <bigfish49@126.com>
This commit is contained in:
Kepler 2024-12-27 20:23:46 +08:00 committed by GitHub
parent 9d86056f1c
commit 2a909e634b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 43 additions and 0 deletions

View File

@ -122,6 +122,7 @@ class _CommonWenxin:
"bge-large-zh": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/bge_large_zh",
"tao-8k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/tao_8k",
"bce-reranker-base_v1": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/reranker/bce_reranker_base",
"ernie-lite-pro-128k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-lite-pro-128k",
}
function_calling_supports = [

View File

@ -0,0 +1,42 @@
model: ernie-lite-pro-128k
label:
en_US: Ernie-Lite-Pro-128K
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
min: 0.1
max: 1.0
default: 0.8
- name: top_p
use_template: top_p
- name: min_output_tokens
label:
en_US: "Min Output Tokens"
zh_Hans: "最小输出Token数"
use_template: max_tokens
min: 2
max: 2048
help:
zh_Hans: 指定模型最小输出token数
en_US: Specifies the lower limit on the length of generated results.
- name: max_output_tokens
label:
en_US: "Max Output Tokens"
zh_Hans: "最大输出Token数"
use_template: max_tokens
min: 2
max: 2048
default: 2048
help:
zh_Hans: 指定模型最大输出token数
en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
- name: presence_penalty
use_template: presence_penalty
- name: frequency_penalty
use_template: frequency_penalty