mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-14 05:16:00 +08:00
feat: optimize error raise (#820)
This commit is contained in:
parent
2f179d61dc
commit
1bd0a76a20
@ -96,4 +96,4 @@ class LLMCallbackHandler(BaseCallbackHandler):
|
|||||||
)
|
)
|
||||||
self.conversation_message_task.save_message(llm_message=self.llm_message, by_stopped=True)
|
self.conversation_message_task.save_message(llm_message=self.llm_message, by_stopped=True)
|
||||||
else:
|
else:
|
||||||
logging.exception(error)
|
logging.debug("on_llm_error: %s", error)
|
||||||
|
@ -2,6 +2,7 @@ import logging
|
|||||||
|
|
||||||
from langchain.schema import OutputParserException
|
from langchain.schema import OutputParserException
|
||||||
|
|
||||||
|
from core.model_providers.error import LLMError
|
||||||
from core.model_providers.model_factory import ModelFactory
|
from core.model_providers.model_factory import ModelFactory
|
||||||
from core.model_providers.models.entity.message import PromptMessage, MessageType
|
from core.model_providers.models.entity.message import PromptMessage, MessageType
|
||||||
from core.model_providers.models.entity.model_params import ModelKwargs
|
from core.model_providers.models.entity.model_params import ModelKwargs
|
||||||
@ -120,8 +121,10 @@ class LLMGenerator:
|
|||||||
try:
|
try:
|
||||||
output = model_instance.run(prompts)
|
output = model_instance.run(prompts)
|
||||||
questions = output_parser.parse(output.content)
|
questions = output_parser.parse(output.content)
|
||||||
except Exception:
|
except LLMError:
|
||||||
logging.exception("Error generating suggested questions after answer")
|
questions = []
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(e)
|
||||||
questions = []
|
questions = []
|
||||||
|
|
||||||
return questions
|
return questions
|
||||||
@ -157,10 +160,12 @@ class LLMGenerator:
|
|||||||
try:
|
try:
|
||||||
output = model_instance.run(prompts)
|
output = model_instance.run(prompts)
|
||||||
rule_config = output_parser.parse(output.content)
|
rule_config = output_parser.parse(output.content)
|
||||||
|
except LLMError as e:
|
||||||
|
raise e
|
||||||
except OutputParserException:
|
except OutputParserException:
|
||||||
raise ValueError('Please give a valid input for intended audience or hoping to solve problems.')
|
raise ValueError('Please give a valid input for intended audience or hoping to solve problems.')
|
||||||
except Exception:
|
except Exception as e:
|
||||||
logging.exception("Error generating prompt")
|
logging.exception(e)
|
||||||
rule_config = {
|
rule_config = {
|
||||||
"prompt": "",
|
"prompt": "",
|
||||||
"variables": [],
|
"variables": [],
|
||||||
|
@ -283,7 +283,11 @@ class AzureOpenAIProvider(BaseModelProvider):
|
|||||||
if obfuscated:
|
if obfuscated:
|
||||||
credentials['openai_api_key'] = encrypter.obfuscated_token(credentials['openai_api_key'])
|
credentials['openai_api_key'] = encrypter.obfuscated_token(credentials['openai_api_key'])
|
||||||
|
|
||||||
return credentials
|
return {
|
||||||
|
'openai_api_base': credentials['openai_api_base'],
|
||||||
|
'openai_api_key': credentials['openai_api_key'],
|
||||||
|
'base_model_name': credentials['base_model_name']
|
||||||
|
}
|
||||||
else:
|
else:
|
||||||
if hosted_model_providers.azure_openai:
|
if hosted_model_providers.azure_openai:
|
||||||
return {
|
return {
|
||||||
|
@ -6,6 +6,7 @@ from celery import shared_task
|
|||||||
from werkzeug.exceptions import NotFound
|
from werkzeug.exceptions import NotFound
|
||||||
|
|
||||||
from core.generator.llm_generator import LLMGenerator
|
from core.generator.llm_generator import LLMGenerator
|
||||||
|
from core.model_providers.error import LLMError
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from models.model import Conversation, Message
|
from models.model import Conversation, Message
|
||||||
|
|
||||||
@ -42,5 +43,7 @@ def generate_conversation_summary_task(conversation_id: str):
|
|||||||
|
|
||||||
end_at = time.perf_counter()
|
end_at = time.perf_counter()
|
||||||
logging.info(click.style('Conversation summary generated: {} latency: {}'.format(conversation_id, end_at - start_at), fg='green'))
|
logging.info(click.style('Conversation summary generated: {} latency: {}'.format(conversation_id, end_at - start_at), fg='green'))
|
||||||
except Exception:
|
except LLMError:
|
||||||
logging.exception("generate conversation summary failed")
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(e)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user