From 1a5acf43aa455712e6b2d4303d059a4bb607f701 Mon Sep 17 00:00:00 2001 From: John Wang Date: Thu, 25 May 2023 21:31:11 +0800 Subject: [PATCH] Fix/shared lock (#210) --- .../callback_handler/index_tool_callback_handler.py | 6 +++++- api/core/completion.py | 10 +++++++++- api/core/conversation_message_task.py | 5 ++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/api/core/callback_handler/index_tool_callback_handler.py b/api/core/callback_handler/index_tool_callback_handler.py index f0c9379413..db430efe08 100644 --- a/api/core/callback_handler/index_tool_callback_handler.py +++ b/api/core/callback_handler/index_tool_callback_handler.py @@ -34,5 +34,9 @@ class DatasetIndexToolCallbackHandler(IndexToolCallbackHandler): db.session.query(DocumentSegment).filter( DocumentSegment.dataset_id == self.dataset_id, DocumentSegment.index_node_id == index_node_id - ).update({DocumentSegment.hit_count: DocumentSegment.hit_count + 1}, synchronize_session=False) + ).update( + {DocumentSegment.hit_count: DocumentSegment.hit_count + 1}, + synchronize_session=False + ) + db.session.commit() diff --git a/api/core/completion.py b/api/core/completion.py index 5e559ac7c7..a4bc5a2498 100644 --- a/api/core/completion.py +++ b/api/core/completion.py @@ -1,14 +1,17 @@ +import logging from typing import Optional, List, Union, Tuple from langchain.callbacks import CallbackManager from langchain.chat_models.base import BaseChatModel from langchain.llms import BaseLLM from langchain.schema import BaseMessage, BaseLanguageModel, HumanMessage +from requests.exceptions import ChunkedEncodingError + from core.constant import llm_constant from core.callback_handler.llm_callback_handler import LLMCallbackHandler from core.callback_handler.std_out_callback_handler import DifyStreamingStdOutCallbackHandler, \ DifyStdOutCallbackHandler -from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException +from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException, PubHandler from core.llm.error import LLMBadRequestError from core.llm.llm_builder import LLMBuilder from core.chain.main_chain_builder import MainChainBuilder @@ -84,6 +87,11 @@ class Completion: ) except ConversationTaskStoppedException: return + except ChunkedEncodingError as e: + # Interrupt by LLM (like OpenAI), handle it. + logging.warning(f'ChunkedEncodingError: {e}') + conversation_message_task.end() + return @classmethod def run_final_llm(cls, tenant_id: str, mode: str, app_model_config: AppModelConfig, query: str, inputs: dict, diff --git a/api/core/conversation_message_task.py b/api/core/conversation_message_task.py index b23d6664bd..dcf0c0b957 100644 --- a/api/core/conversation_message_task.py +++ b/api/core/conversation_message_task.py @@ -171,7 +171,7 @@ class ConversationMessageTask: ) if not by_stopped: - self._pub_handler.pub_end() + self.end() def update_provider_quota(self): llm_provider_service = LLMProviderService( @@ -268,6 +268,9 @@ class ConversationMessageTask: total_price = message_tokens_per_1k * message_unit_price + answer_tokens_per_1k * answer_unit_price return total_price.quantize(decimal.Decimal('0.0000001'), rounding=decimal.ROUND_HALF_UP) + def end(self): + self._pub_handler.pub_end() + class PubHandler: def __init__(self, user: Union[Account | EndUser], task_id: str,