From b2ee738bb139e68aef481fb3233b49f185435638 Mon Sep 17 00:00:00 2001 From: johnpccd Date: Fri, 16 Feb 2024 03:00:10 +0100 Subject: [PATCH] Ignore SSE comments to support openrouter streaming (#2432) --- .../model_providers/openai_api_compatible/llm/llm.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py index ae856c5ce9..cf90633aa6 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py @@ -367,13 +367,16 @@ class OAIAPICompatLargeLanguageModel(_CommonOAI_API_Compat, LargeLanguageModel): for chunk in response.iter_lines(decode_unicode=True, delimiter=delimiter): if chunk: + #ignore sse comments + if chunk.startswith(':'): + continue decoded_chunk = chunk.strip().lstrip('data: ').lstrip() chunk_json = None try: chunk_json = json.loads(decoded_chunk) # stream ended except json.JSONDecodeError as e: - logger.error(f"decoded_chunk error,delimiter={delimiter},decoded_chunk={decoded_chunk}") + logger.error(f"decoded_chunk error: {e}, delimiter={delimiter}, decoded_chunk={decoded_chunk}") yield create_final_llm_result_chunk( index=chunk_index + 1, message=AssistantPromptMessage(content=""),