mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-14 02:45:53 +08:00
refactor: Remove unused code in large_language_model.py (#5433)
This commit is contained in:
parent
39c14ec7c1
commit
142dc0afd7
@ -489,37 +489,6 @@ if you are not sure about the structure.
|
||||
"""Cut off the text as soon as any stop words occur."""
|
||||
return re.split("|".join(stop), text, maxsplit=1)[0]
|
||||
|
||||
def _llm_result_to_stream(self, result: LLMResult) -> Generator:
|
||||
"""
|
||||
from typing_extensions import deprecated
|
||||
Transform llm result to stream
|
||||
|
||||
:param result: llm result
|
||||
:return: stream
|
||||
"""
|
||||
index = 0
|
||||
|
||||
tool_calls = result.message.tool_calls
|
||||
|
||||
for word in result.message.content:
|
||||
assistant_prompt_message = AssistantPromptMessage(
|
||||
content=word,
|
||||
tool_calls=tool_calls if index == (len(result.message.content) - 1) else []
|
||||
)
|
||||
|
||||
yield LLMResultChunk(
|
||||
model=result.model,
|
||||
prompt_messages=result.prompt_messages,
|
||||
system_fingerprint=result.system_fingerprint,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=index,
|
||||
message=assistant_prompt_message,
|
||||
)
|
||||
)
|
||||
|
||||
index += 1
|
||||
time.sleep(0.01)
|
||||
|
||||
def get_parameter_rules(self, model: str, credentials: dict) -> list[ParameterRule]:
|
||||
"""
|
||||
Get parameter rules
|
||||
|
@ -156,11 +156,6 @@ def test_invoke_chat_model(setup_openai_mock):
|
||||
assert isinstance(result, LLMResult)
|
||||
assert len(result.message.content) > 0
|
||||
|
||||
for chunk in model._llm_result_to_stream(result):
|
||||
assert isinstance(chunk, LLMResultChunk)
|
||||
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||||
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||||
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||||
|
||||
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
|
||||
def test_invoke_stream_chat_model(setup_openai_mock):
|
||||
|
@ -136,12 +136,6 @@ def test_invoke_chat_model():
|
||||
assert isinstance(result, LLMResult)
|
||||
assert len(result.message.content) > 0
|
||||
|
||||
for chunk in model._llm_result_to_stream(result):
|
||||
assert isinstance(chunk, LLMResultChunk)
|
||||
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||||
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||||
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||||
|
||||
|
||||
def test_invoke_stream_chat_model():
|
||||
model = CohereLargeLanguageModel()
|
||||
|
@ -156,12 +156,6 @@ def test_invoke_chat_model(setup_openai_mock):
|
||||
assert isinstance(result, LLMResult)
|
||||
assert len(result.message.content) > 0
|
||||
|
||||
for chunk in model._llm_result_to_stream(result):
|
||||
assert isinstance(chunk, LLMResultChunk)
|
||||
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||||
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||||
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||||
|
||||
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
|
||||
def test_invoke_chat_model_with_vision(setup_openai_mock):
|
||||
model = OpenAILargeLanguageModel()
|
||||
|
Loading…
x
Reference in New Issue
Block a user