mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-15 01:46:01 +08:00
refine TTS (#2500)
### What problem does this PR solve? ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
parent
d545633a6c
commit
b5d1d2fec4
@ -228,7 +228,8 @@ def tts():
|
|||||||
|
|
||||||
def stream_audio():
|
def stream_audio():
|
||||||
try:
|
try:
|
||||||
for chunk in tts_mdl.tts(text):
|
for txt in re.split(r"[,。/《》?;:!\n\r:;]+", text):
|
||||||
|
for chunk in tts_mdl.tts(txt):
|
||||||
yield chunk
|
yield chunk
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield ("data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
yield ("data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||||
|
@ -93,24 +93,27 @@ def set_api_key():
|
|||||||
if msg:
|
if msg:
|
||||||
return get_data_error_result(retmsg=msg)
|
return get_data_error_result(retmsg=msg)
|
||||||
|
|
||||||
llm = {
|
llm_config = {
|
||||||
"api_key": req["api_key"],
|
"api_key": req["api_key"],
|
||||||
"api_base": req.get("base_url", "")
|
"api_base": req.get("base_url", "")
|
||||||
}
|
}
|
||||||
for n in ["model_type", "llm_name"]:
|
for n in ["model_type", "llm_name"]:
|
||||||
if n in req:
|
if n in req:
|
||||||
llm[n] = req[n]
|
llm_config[n] = req[n]
|
||||||
|
|
||||||
if not TenantLLMService.filter_update(
|
|
||||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory], llm):
|
|
||||||
for llm in LLMService.query(fid=factory):
|
for llm in LLMService.query(fid=factory):
|
||||||
|
if not TenantLLMService.filter_update(
|
||||||
|
[TenantLLM.tenant_id == current_user.id,
|
||||||
|
TenantLLM.llm_factory == factory,
|
||||||
|
TenantLLM.llm_name == llm.llm_name],
|
||||||
|
llm_config):
|
||||||
TenantLLMService.save(
|
TenantLLMService.save(
|
||||||
tenant_id=current_user.id,
|
tenant_id=current_user.id,
|
||||||
llm_factory=factory,
|
llm_factory=factory,
|
||||||
llm_name=llm.llm_name,
|
llm_name=llm.llm_name,
|
||||||
model_type=llm.model_type,
|
model_type=llm.model_type,
|
||||||
api_key=req["api_key"],
|
api_key=llm_config["api_key"],
|
||||||
api_base=req.get("base_url", "")
|
api_base=llm_config["api_base"]
|
||||||
)
|
)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
@ -161,6 +161,7 @@ class QwenTTS(Base):
|
|||||||
|
|
||||||
class OpenAITTS(Base):
|
class OpenAITTS(Base):
|
||||||
def __init__(self, key, model_name="tts-1", base_url="https://api.openai.com/v1"):
|
def __init__(self, key, model_name="tts-1", base_url="https://api.openai.com/v1"):
|
||||||
|
if not base_url: base_url="https://api.openai.com/v1"
|
||||||
self.api_key = key
|
self.api_key = key
|
||||||
self.model_name = model_name
|
self.model_name = model_name
|
||||||
self.base_url = base_url
|
self.base_url = base_url
|
||||||
@ -181,6 +182,6 @@ class OpenAITTS(Base):
|
|||||||
|
|
||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
raise Exception(f"**Error**: {response.status_code}, {response.text}")
|
raise Exception(f"**Error**: {response.status_code}, {response.text}")
|
||||||
for chunk in response.iter_content(chunk_size=1024):
|
for chunk in response.iter_content():
|
||||||
if chunk:
|
if chunk:
|
||||||
yield chunk
|
yield chunk
|
||||||
|
Loading…
x
Reference in New Issue
Block a user