Fix: url path join issue. (#8013)

### What problem does this PR solve?

Close #7980

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
Kevin Hu 2025-06-03 14:18:40 +08:00 committed by GitHub
parent 37075eab98
commit 156290f8d0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 25 additions and 41 deletions

View File

@ -22,6 +22,7 @@ import re
import time import time
from abc import ABC from abc import ABC
from typing import Any, Protocol from typing import Any, Protocol
from urllib.parse import urljoin
import openai import openai
import requests import requests
@ -445,8 +446,7 @@ class XinferenceChat(Base):
def __init__(self, key=None, model_name="", base_url=""): def __init__(self, key=None, model_name="", base_url=""):
if not base_url: if not base_url:
raise ValueError("Local llm url cannot be None") raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
super().__init__(key, model_name, base_url) super().__init__(key, model_name, base_url)
@ -454,8 +454,7 @@ class HuggingFaceChat(Base):
def __init__(self, key=None, model_name="", base_url=""): def __init__(self, key=None, model_name="", base_url=""):
if not base_url: if not base_url:
raise ValueError("Local llm url cannot be None") raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
super().__init__(key, model_name.split("___")[0], base_url) super().__init__(key, model_name.split("___")[0], base_url)
@ -463,9 +462,7 @@ class ModelScopeChat(Base):
def __init__(self, key=None, model_name="", base_url=""): def __init__(self, key=None, model_name="", base_url=""):
if not base_url: if not base_url:
raise ValueError("Local llm url cannot be None") raise ValueError("Local llm url cannot be None")
base_url = base_url.rstrip("/") base_url = urljoin(base_url, "v1")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
super().__init__(key, model_name.split("___")[0], base_url) super().__init__(key, model_name.split("___")[0], base_url)
@ -983,8 +980,7 @@ class LocalAIChat(Base):
if not base_url: if not base_url:
raise ValueError("Local llm url cannot be None") raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key="empty", base_url=base_url) self.client = OpenAI(api_key="empty", base_url=base_url)
self.model_name = model_name.split("___")[0] self.model_name = model_name.split("___")[0]
@ -1442,8 +1438,7 @@ class LmStudioChat(Base):
def __init__(self, key, model_name, base_url): def __init__(self, key, model_name, base_url):
if not base_url: if not base_url:
raise ValueError("Local llm url cannot be None") raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
super().__init__(key, model_name, base_url) super().__init__(key, model_name, base_url)
self.client = OpenAI(api_key="lm-studio", base_url=base_url) self.client = OpenAI(api_key="lm-studio", base_url=base_url)
self.model_name = model_name self.model_name = model_name
@ -1542,7 +1537,7 @@ class CoHereChat(Base):
class LeptonAIChat(Base): class LeptonAIChat(Base):
def __init__(self, key, model_name, base_url=None): def __init__(self, key, model_name, base_url=None):
if not base_url: if not base_url:
base_url = os.path.join("https://" + model_name + ".lepton.run", "api", "v1") base_url = urljoin("https://" + model_name + ".lepton.run", "api/v1")
super().__init__(key, model_name, base_url) super().__init__(key, model_name, base_url)
@ -2016,6 +2011,5 @@ class GPUStackChat(Base):
def __init__(self, key=None, model_name="", base_url=""): def __init__(self, key=None, model_name="", base_url=""):
if not base_url: if not base_url:
raise ValueError("Local llm url cannot be None") raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
super().__init__(key, model_name, base_url) super().__init__(key, model_name, base_url)

View File

@ -19,6 +19,7 @@ import json
import os import os
from abc import ABC from abc import ABC
from io import BytesIO from io import BytesIO
from urllib.parse import urljoin
import requests import requests
from ollama import Client from ollama import Client
@ -546,8 +547,7 @@ class LocalAICV(GptV4):
def __init__(self, key, model_name, base_url, lang="Chinese"): def __init__(self, key, model_name, base_url, lang="Chinese"):
if not base_url: if not base_url:
raise ValueError("Local cv model url cannot be None") raise ValueError("Local cv model url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key="empty", base_url=base_url) self.client = OpenAI(api_key="empty", base_url=base_url)
self.model_name = model_name.split("___")[0] self.model_name = model_name.split("___")[0]
self.lang = lang self.lang = lang
@ -555,8 +555,7 @@ class LocalAICV(GptV4):
class XinferenceCV(Base): class XinferenceCV(Base):
def __init__(self, key, model_name="", lang="Chinese", base_url=""): def __init__(self, key, model_name="", lang="Chinese", base_url=""):
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url) self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name self.model_name = model_name
self.lang = lang self.lang = lang
@ -706,11 +705,9 @@ class NvidiaCV(Base):
self.lang = lang self.lang = lang
factory, llm_name = model_name.split("/") factory, llm_name = model_name.split("/")
if factory != "liuhaotian": if factory != "liuhaotian":
self.base_url = os.path.join(base_url, factory, llm_name) self.base_url = urljoin(base_url, f"{factory}/{llm_name}")
else: else:
self.base_url = os.path.join( self.base_url = urljoin(f"{base_url}/community", llm_name.replace("-v1.6", "16"))
base_url, "community", llm_name.replace("-v1.6", "16")
)
self.key = key self.key = key
def describe(self, image): def describe(self, image):
@ -799,8 +796,7 @@ class LmStudioCV(GptV4):
def __init__(self, key, model_name, lang="Chinese", base_url=""): def __init__(self, key, model_name, lang="Chinese", base_url=""):
if not base_url: if not base_url:
raise ValueError("Local llm url cannot be None") raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key="lm-studio", base_url=base_url) self.client = OpenAI(api_key="lm-studio", base_url=base_url)
self.model_name = model_name self.model_name = model_name
self.lang = lang self.lang = lang
@ -810,8 +806,7 @@ class OpenAI_APICV(GptV4):
def __init__(self, key, model_name, lang="Chinese", base_url=""): def __init__(self, key, model_name, lang="Chinese", base_url=""):
if not base_url: if not base_url:
raise ValueError("url cannot be None") raise ValueError("url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url) self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name.split("___")[0] self.model_name = model_name.split("___")[0]
self.lang = lang self.lang = lang
@ -1032,8 +1027,7 @@ class GPUStackCV(GptV4):
def __init__(self, key, model_name, lang="Chinese", base_url=""): def __init__(self, key, model_name, lang="Chinese", base_url=""):
if not base_url: if not base_url:
raise ValueError("Local llm url cannot be None") raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url) self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name self.model_name = model_name
self.lang = lang self.lang = lang

View File

@ -16,6 +16,8 @@
import logging import logging
import re import re
import threading import threading
from urllib.parse import urljoin
import requests import requests
from huggingface_hub import snapshot_download from huggingface_hub import snapshot_download
from zhipuai import ZhipuAI from zhipuai import ZhipuAI
@ -141,8 +143,7 @@ class LocalAIEmbed(Base):
def __init__(self, key, model_name, base_url): def __init__(self, key, model_name, base_url):
if not base_url: if not base_url:
raise ValueError("Local embedding model url cannot be None") raise ValueError("Local embedding model url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key="empty", base_url=base_url) self.client = OpenAI(api_key="empty", base_url=base_url)
self.model_name = model_name.split("___")[0] self.model_name = model_name.split("___")[0]
@ -322,8 +323,7 @@ class FastEmbed(DefaultEmbedding):
class XinferenceEmbed(Base): class XinferenceEmbed(Base):
def __init__(self, key, model_name="", base_url=""): def __init__(self, key, model_name="", base_url=""):
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url) self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name self.model_name = model_name
@ -598,8 +598,7 @@ class LmStudioEmbed(LocalAIEmbed):
def __init__(self, key, model_name, base_url): def __init__(self, key, model_name, base_url):
if not base_url: if not base_url:
raise ValueError("Local llm url cannot be None") raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key="lm-studio", base_url=base_url) self.client = OpenAI(api_key="lm-studio", base_url=base_url)
self.model_name = model_name self.model_name = model_name
@ -608,8 +607,7 @@ class OpenAI_APIEmbed(OpenAIEmbed):
def __init__(self, key, model_name, base_url): def __init__(self, key, model_name, base_url):
if not base_url: if not base_url:
raise ValueError("url cannot be None") raise ValueError("url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url) self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name.split("___")[0] self.model_name = model_name.split("___")[0]
@ -833,8 +831,7 @@ class GPUStackEmbed(OpenAIEmbed):
def __init__(self, key, model_name, base_url): def __init__(self, key, model_name, base_url):
if not base_url: if not base_url:
raise ValueError("url cannot be None") raise ValueError("url cannot be None")
if base_url.split("/")[-1] != "v1": base_url = urljoin(base_url, "v1")
base_url = os.path.join(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url) self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name self.model_name = model_name

View File

@ -296,12 +296,11 @@ class NvidiaRerank(Base):
self.model_name = model_name self.model_name = model_name
if self.model_name == "nvidia/nv-rerankqa-mistral-4b-v3": if self.model_name == "nvidia/nv-rerankqa-mistral-4b-v3":
self.base_url = os.path.join( self.base_url = urljoin(base_url, "nv-rerankqa-mistral-4b-v3/reranking"
base_url, "nv-rerankqa-mistral-4b-v3", "reranking"
) )
if self.model_name == "nvidia/rerank-qa-mistral-4b": if self.model_name == "nvidia/rerank-qa-mistral-4b":
self.base_url = os.path.join(base_url, "reranking") self.base_url = urljoin(base_url, "reranking")
self.model_name = "nv-rerank-qa-mistral-4b:1" self.model_name = "nv-rerank-qa-mistral-4b:1"
self.headers = { self.headers = {