From 91d38a535f9a4254cf3abba3fd00a9c7641aeb2e Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Fri, 21 Jun 2024 16:49:33 +0800 Subject: [PATCH] fix: max_tokens of qwen-plus & qwen-plus-chat (#5480) --- .../model_providers/tongyi/llm/qwen-plus-chat.yaml | 4 ++-- .../model_runtime/model_providers/tongyi/llm/qwen-plus.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml index ae3ec0fc04..5681f5c7b0 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml @@ -22,9 +22,9 @@ parameter_rules: - name: max_tokens use_template: max_tokens type: int - default: 1500 + default: 2000 min: 1 - max: 1500 + max: 2000 help: zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml index bfa04792a0..71dabb55f0 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml @@ -20,9 +20,9 @@ parameter_rules: - name: max_tokens use_template: max_tokens type: int - default: 1500 + default: 2000 min: 1 - max: 1500 + max: 2000 help: zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.