feat: add qwen 2.5 models for silicon flow (#3203)

### What problem does this PR solve?

add qwen 2.5 models for silicon flow

### Type of change

- [X] New Feature (non-breaking change which adds functionality)
This commit is contained in:
Yangong 2024-11-05 13:58:29 +08:00 committed by GitHub
parent b7b30c4b57
commit 7e89be5ed1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -2017,6 +2017,60 @@
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2.5-72B-Instruct-128K",
"tags": "LLM,CHAT,128k",
"max_tokens": 131072,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2.5-72B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2.5-7B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2.5-14B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2.5-32B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2.5-Math-72B-Instruct",
"tags": "LLM,CHAT,Math,4k",
"max_tokens": 4096,
"model_type": "chat"
},
{
"llm_name": "Qwen/Qwen2.5-Coder-7B-Instruct",
"tags": "LLM,CHAT,FIM,Coder,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Pro/Qwen/Qwen2.5-7B-Instruct",
"tags": "LLM,CHAT,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "Pro/Qwen/Qwen2.5-Coder-7B-Instruct",
"tags": "LLM,CHAT,FIM,Coder,32k",
"max_tokens": 32768,
"model_type": "chat"
},
{
"llm_name": "01-ai/Yi-1.5-34B-Chat-16K",
"tags": "LLM,CHAT,16k",
@ -2376,11 +2430,11 @@
"llm": []
},
{
"name": "HuggingFace",
"logo": "",
"tags": "TEXT EMBEDDING",
"status": "1",
"llm": []
}
"name": "HuggingFace",
"logo": "",
"tags": "TEXT EMBEDDING",
"status": "1",
"llm": []
}
]
}
}