Updated Model Information for Tongyi-Qianwen and ZHIPU-AI (#2003)

### What problem does this PR solve?

_Briefly describe what this PR aims to solve. Include background context
that will help reviewers understand the purpose of the PR._

### Type of change

- [X] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
Morler 2024-08-20 09:44:15 +08:00 committed by GitHub
parent 8d2f8ed561
commit 83c673e093
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -104,6 +104,12 @@
"max_tokens": 2048, "max_tokens": 2048,
"model_type": "embedding" "model_type": "embedding"
}, },
{
"llm_name": "text-embedding-v3",
"tags": "TEXT EMBEDDING,2K",
"max_tokens": 2048,
"model_type": "embedding"
},
{ {
"llm_name": "paraformer-realtime-8k-v1", "llm_name": "paraformer-realtime-8k-v1",
"tags": "SPEECH2TEXT", "tags": "SPEECH2TEXT",
@ -131,13 +137,37 @@
"status": "1", "status": "1",
"llm": [ "llm": [
{ {
"llm_name": "glm-3-turbo", "llm_name": "glm-4",
"tags": "LLM,CHAT,", "tags": "LLM,CHAT,",
"max_tokens": 128000, "max_tokens": 128000,
"model_type": "chat" "model_type": "chat"
}, },
{ {
"llm_name": "glm-4", "llm_name": "glm-4-airx",
"tags": "LLM,CHAT,",
"max_tokens": 8000,
"model_type": "chat"
},
{
"llm_name": "glm-4-air",
"tags": "LLM,CHAT,",
"max_tokens": 128000,
"model_type": "chat"
},
{
"llm_name": "glm-4-flash",
"tags": "LLM,CHAT,",
"max_tokens": 128000,
"model_type": "chat"
},
{
"llm_name": "glm-4-long",
"tags": "LLM,CHAT,",
"max_tokens": 1000000,
"model_type": "chat"
},
{
"llm_name": "glm-3-turbo",
"tags": "LLM,CHAT,", "tags": "LLM,CHAT,",
"max_tokens": 128000, "max_tokens": 128000,
"model_type": "chat" "model_type": "chat"
@ -153,6 +183,12 @@
"tags": "TEXT EMBEDDING", "tags": "TEXT EMBEDDING",
"max_tokens": 512, "max_tokens": 512,
"model_type": "embedding" "model_type": "embedding"
},
{
"llm_name": "embedding-3",
"tags": "TEXT EMBEDDING",
"max_tokens": 512,
"model_type": "embedding"
} }
] ]
}, },