From 0bec6a037c67ca6fdc2600dad8c95ba5f212b749 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Mon, 9 Sep 2024 19:09:42 +0800 Subject: [PATCH] update qwen-long (#8157) --- .../model_runtime/model_providers/tongyi/llm/qwen-long.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml index b2cf3dd486..33b3435eb6 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml @@ -24,7 +24,7 @@ parameter_rules: type: int default: 2000 min: 1 - max: 2000 + max: 6000 help: zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.