diff --git a/api/core/model_providers/models/llm/wenxin_model.py b/api/core/model_providers/models/llm/wenxin_model.py index 3a9e534fac..00ddbb82dd 100644 --- a/api/core/model_providers/models/llm/wenxin_model.py +++ b/api/core/model_providers/models/llm/wenxin_model.py @@ -18,6 +18,7 @@ class WenxinModel(BaseLLM): provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) # TODO load price_config from configs(db) return Wenxin( + model=self.name, streaming=self.streaming, callbacks=self.callbacks, **self.credentials, diff --git a/api/core/model_providers/providers/wenxin_provider.py b/api/core/model_providers/providers/wenxin_provider.py index 0def5f15b0..d6d1816323 100644 --- a/api/core/model_providers/providers/wenxin_provider.py +++ b/api/core/model_providers/providers/wenxin_provider.py @@ -61,13 +61,18 @@ class WenxinProvider(BaseModelProvider): :param model_type: :return: """ + model_max_tokens = { + 'ernie-bot': 4800, + 'ernie-bot-turbo': 11200, + } + if model_name in ['ernie-bot', 'ernie-bot-turbo']: return ModelKwargsRules( temperature=KwargRule[float](min=0.01, max=1, default=0.95, precision=2), top_p=KwargRule[float](min=0.01, max=1, default=0.8, precision=2), presence_penalty=KwargRule[float](enabled=False), frequency_penalty=KwargRule[float](enabled=False), - max_tokens=KwargRule[int](enabled=False), + max_tokens=KwargRule[int](enabled=False, max=model_max_tokens.get(model_name)), ) else: return ModelKwargsRules(