From c163521b9e5e1a4dc69dbb3042020990aab1d9b5 Mon Sep 17 00:00:00 2001 From: Richards Tu <142148415+richards199999@users.noreply.github.com> Date: Mon, 17 Jun 2024 21:40:04 +0800 Subject: [PATCH] Update and fix the model param of Deepseek (#5329) --- .../model_providers/deepseek/llm/deepseek-chat.yaml | 2 +- .../model_providers/deepseek/llm/deepseek-coder.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml index 3a5a63fa61..80607ca9e5 100644 --- a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml +++ b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml @@ -23,7 +23,7 @@ parameter_rules: type: int default: 4096 min: 1 - max: 32000 + max: 4096 help: zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. diff --git a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml index 8f156be101..f6ce775d76 100644 --- a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml +++ b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml @@ -7,7 +7,7 @@ features: - agent-thought model_properties: mode: chat - context_size: 16000 + context_size: 32000 parameter_rules: - name: temperature use_template: temperature @@ -22,5 +22,5 @@ parameter_rules: - name: max_tokens use_template: max_tokens min: 1 - max: 32000 + max: 4096 default: 1024