mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-14 22:05:59 +08:00
fix: wenxin error not raise when stream mode (#884)
This commit is contained in:
parent
a58f95fa91
commit
c4d759dfba
26
api/core/third_party/langchain/llms/wenxin.py
vendored
26
api/core/third_party/langchain/llms/wenxin.py
vendored
@ -3,6 +3,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
from json import JSONDecodeError
|
||||||
from typing import (
|
from typing import (
|
||||||
Any,
|
Any,
|
||||||
Dict,
|
Dict,
|
||||||
@ -223,11 +224,24 @@ class Wenxin(LLM):
|
|||||||
for token in self._client.post(request).iter_lines():
|
for token in self._client.post(request).iter_lines():
|
||||||
if token:
|
if token:
|
||||||
token = token.decode("utf-8")
|
token = token.decode("utf-8")
|
||||||
completion = json.loads(token[5:])
|
|
||||||
|
|
||||||
yield GenerationChunk(text=completion['result'])
|
if token.startswith('data:'):
|
||||||
if run_manager:
|
completion = json.loads(token[5:])
|
||||||
run_manager.on_llm_new_token(completion['result'])
|
|
||||||
|
|
||||||
if completion['is_end']:
|
yield GenerationChunk(text=completion['result'])
|
||||||
break
|
if run_manager:
|
||||||
|
run_manager.on_llm_new_token(completion['result'])
|
||||||
|
|
||||||
|
if completion['is_end']:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
json_response = json.loads(token)
|
||||||
|
except JSONDecodeError:
|
||||||
|
raise ValueError(f"Wenxin Response Error {token}")
|
||||||
|
|
||||||
|
raise ValueError(
|
||||||
|
f"Wenxin API {json_response['error_code']}"
|
||||||
|
f" error: {json_response['error_msg']}, "
|
||||||
|
f"please confirm if the model you have chosen is already paid for."
|
||||||
|
)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user