mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-14 20:55:53 +08:00
feat: set default memory messages limit to infinite (#5002)
This commit is contained in:
parent
52585aea74
commit
3929d289e0
@ -1,3 +1,5 @@
|
|||||||
|
from typing import Optional
|
||||||
|
|
||||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||||
from core.file.message_file_parser import MessageFileParser
|
from core.file.message_file_parser import MessageFileParser
|
||||||
from core.model_manager import ModelInstance
|
from core.model_manager import ModelInstance
|
||||||
@ -19,7 +21,7 @@ class TokenBufferMemory:
|
|||||||
self.model_instance = model_instance
|
self.model_instance = model_instance
|
||||||
|
|
||||||
def get_history_prompt_messages(self, max_token_limit: int = 2000,
|
def get_history_prompt_messages(self, max_token_limit: int = 2000,
|
||||||
message_limit: int = 10) -> list[PromptMessage]:
|
message_limit: Optional[int] = None) -> list[PromptMessage]:
|
||||||
"""
|
"""
|
||||||
Get history prompt messages.
|
Get history prompt messages.
|
||||||
:param max_token_limit: max token limit
|
:param max_token_limit: max token limit
|
||||||
@ -28,10 +30,15 @@ class TokenBufferMemory:
|
|||||||
app_record = self.conversation.app
|
app_record = self.conversation.app
|
||||||
|
|
||||||
# fetch limited messages, and return reversed
|
# fetch limited messages, and return reversed
|
||||||
messages = db.session.query(Message).filter(
|
query = db.session.query(Message).filter(
|
||||||
Message.conversation_id == self.conversation.id,
|
Message.conversation_id == self.conversation.id,
|
||||||
Message.answer != ''
|
Message.answer != ''
|
||||||
).order_by(Message.created_at.desc()).limit(message_limit).all()
|
).order_by(Message.created_at.desc())
|
||||||
|
|
||||||
|
if message_limit and message_limit > 0:
|
||||||
|
messages = query.limit(message_limit).all()
|
||||||
|
else:
|
||||||
|
messages = query.all()
|
||||||
|
|
||||||
messages = list(reversed(messages))
|
messages = list(reversed(messages))
|
||||||
message_file_parser = MessageFileParser(
|
message_file_parser = MessageFileParser(
|
||||||
@ -93,7 +100,7 @@ class TokenBufferMemory:
|
|||||||
def get_history_prompt_text(self, human_prefix: str = "Human",
|
def get_history_prompt_text(self, human_prefix: str = "Human",
|
||||||
ai_prefix: str = "Assistant",
|
ai_prefix: str = "Assistant",
|
||||||
max_token_limit: int = 2000,
|
max_token_limit: int = 2000,
|
||||||
message_limit: int = 10) -> str:
|
message_limit: Optional[int] = None) -> str:
|
||||||
"""
|
"""
|
||||||
Get history prompt text.
|
Get history prompt text.
|
||||||
:param human_prefix: human prefix
|
:param human_prefix: human prefix
|
||||||
|
@ -9,7 +9,7 @@ features:
|
|||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 4096
|
context_size: 16385
|
||||||
parameter_rules:
|
parameter_rules:
|
||||||
- name: temperature
|
- name: temperature
|
||||||
use_template: temperature
|
use_template: temperature
|
||||||
|
@ -79,5 +79,5 @@ class PromptTransform:
|
|||||||
if (memory_config.window.enabled
|
if (memory_config.window.enabled
|
||||||
and memory_config.window.size is not None
|
and memory_config.window.size is not None
|
||||||
and memory_config.window.size > 0)
|
and memory_config.window.size > 0)
|
||||||
else 10
|
else None
|
||||||
)
|
)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user