Fix: Qwen-vl-plus url error (#7281)

### What problem does this PR solve?

Fix Qwen-vl-* url error. #7277

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
Yongteng Lei 2025-04-25 09:20:10 +08:00 committed by GitHub
parent 7e1464a950
commit 97a13ef1ab
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -565,7 +565,7 @@ class QWenChat(Base):
dashscope.api_key = key dashscope.api_key = key
self.model_name = model_name self.model_name = model_name
if self.is_reasoning_model(self.model_name): if self.is_reasoning_model(self.model_name) or self.model_name in ["qwen-vl-plus", "qwen-vl-plus-latest", "qwen-vl-max", "qwen-vl-max-latest"]:
super().__init__(key, model_name, "https://dashscope.aliyuncs.com/compatible-mode/v1") super().__init__(key, model_name, "https://dashscope.aliyuncs.com/compatible-mode/v1")
def chat_with_tools(self, system: str, history: list, gen_conf: dict) -> tuple[str, int]: def chat_with_tools(self, system: str, history: list, gen_conf: dict) -> tuple[str, int]:
@ -643,7 +643,7 @@ class QWenChat(Base):
def chat(self, system, history, gen_conf): def chat(self, system, history, gen_conf):
if "max_tokens" in gen_conf: if "max_tokens" in gen_conf:
del gen_conf["max_tokens"] del gen_conf["max_tokens"]
if self.is_reasoning_model(self.model_name): if self.is_reasoning_model(self.model_name) or self.model_name in ["qwen-vl-plus", "qwen-vl-plus-latest", "qwen-vl-max", "qwen-vl-max-latest"]:
return super().chat(system, history, gen_conf) return super().chat(system, history, gen_conf)
stream_flag = str(os.environ.get("QWEN_CHAT_BY_STREAM", "true")).lower() == "true" stream_flag = str(os.environ.get("QWEN_CHAT_BY_STREAM", "true")).lower() == "true"
@ -811,7 +811,7 @@ class QWenChat(Base):
def chat_streamly(self, system, history, gen_conf): def chat_streamly(self, system, history, gen_conf):
if "max_tokens" in gen_conf: if "max_tokens" in gen_conf:
del gen_conf["max_tokens"] del gen_conf["max_tokens"]
if self.is_reasoning_model(self.model_name): if self.is_reasoning_model(self.model_name) or self.model_name in ["qwen-vl-plus", "qwen-vl-plus-latest", "qwen-vl-max", "qwen-vl-max-latest"]:
return super().chat_streamly(system, history, gen_conf) return super().chat_streamly(system, history, gen_conf)
return self._chat_streamly(system, history, gen_conf) return self._chat_streamly(system, history, gen_conf)