mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-14 04:05:53 +08:00
fix: max token not exist in generate summary when calc rest tokens (#891)
This commit is contained in:
parent
4f5f9506ab
commit
2f7b234cc5
@ -51,6 +51,7 @@ class LLMGenerator:
|
|||||||
prompt_with_empty_context = prompt.format(context='')
|
prompt_with_empty_context = prompt.format(context='')
|
||||||
prompt_tokens = model_instance.get_num_tokens([PromptMessage(content=prompt_with_empty_context)])
|
prompt_tokens = model_instance.get_num_tokens([PromptMessage(content=prompt_with_empty_context)])
|
||||||
max_context_token_length = model_instance.model_rules.max_tokens.max
|
max_context_token_length = model_instance.model_rules.max_tokens.max
|
||||||
|
max_context_token_length = max_context_token_length if max_context_token_length else 1500
|
||||||
rest_tokens = max_context_token_length - prompt_tokens - max_tokens - 1
|
rest_tokens = max_context_token_length - prompt_tokens - max_tokens - 1
|
||||||
|
|
||||||
context = ''
|
context = ''
|
||||||
|
Loading…
x
Reference in New Issue
Block a user