fix: #18132 when deepseek llm model, auto_generate name can't work (#18646)

Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
This commit is contained in:
cooper.wu 2025-05-26 18:04:52 +08:00 committed by GitHub
parent eb26dc3213
commit 2cad98f01f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -51,15 +51,19 @@ class LLMGenerator:
response = cast(
LLMResult,
model_instance.invoke_llm(
prompt_messages=list(prompts), model_parameters={"max_tokens": 100, "temperature": 1}, stream=False
prompt_messages=list(prompts), model_parameters={"max_tokens": 500, "temperature": 1}, stream=False
),
)
answer = cast(str, response.message.content)
cleaned_answer = re.sub(r"^.*(\{.*\}).*$", r"\1", answer, flags=re.DOTALL)
if cleaned_answer is None:
return ""
result_dict = json.loads(cleaned_answer)
answer = result_dict["Your Output"]
try:
result_dict = json.loads(cleaned_answer)
answer = result_dict["Your Output"]
except json.JSONDecodeError as e:
logging.exception("Failed to generate name after answer, use query instead")
answer = query
name = answer.strip()
if len(name) > 75: