diff --git a/rag/llm/chat_model.py b/rag/llm/chat_model.py index 40dbff111..2b5ab68dd 100644 --- a/rag/llm/chat_model.py +++ b/rag/llm/chat_model.py @@ -102,7 +102,7 @@ class XinferenceChat(Base): if not base_url: raise ValueError("Local llm url cannot be None") if base_url.split("/")[-1] != "v1": - self.base_url = os.path.join(base_url, "v1") + base_url = os.path.join(base_url, "v1") key = "xxx" super().__init__(key, model_name, base_url) @@ -373,8 +373,8 @@ class LocalAIChat(Base): if not base_url: raise ValueError("Local llm url cannot be None") if base_url.split("/")[-1] != "v1": - self.base_url = os.path.join(base_url, "v1") - self.client = OpenAI(api_key="empty", base_url=self.base_url) + base_url = os.path.join(base_url, "v1") + self.client = OpenAI(api_key="empty", base_url=base_url) self.model_name = model_name.split("___")[0] diff --git a/rag/llm/embedding_model.py b/rag/llm/embedding_model.py index 3c3a018d4..3b0ef5f71 100644 --- a/rag/llm/embedding_model.py +++ b/rag/llm/embedding_model.py @@ -510,8 +510,8 @@ class LmStudioEmbed(LocalAIEmbed): if not base_url: raise ValueError("Local llm url cannot be None") if base_url.split("/")[-1] != "v1": - self.base_url = os.path.join(base_url, "v1") - self.client = OpenAI(api_key="lm-studio", base_url=self.base_url) + base_url = os.path.join(base_url, "v1") + self.client = OpenAI(api_key="lm-studio", base_url=base_url) self.model_name = model_name @@ -520,6 +520,6 @@ class OpenAI_APIEmbed(OpenAIEmbed): if not base_url: raise ValueError("url cannot be None") if base_url.split("/")[-1] != "v1": - self.base_url = os.path.join(base_url, "v1") + base_url = os.path.join(base_url, "v1") self.client = OpenAI(api_key=key, base_url=base_url) self.model_name = model_name.split("___")[0] \ No newline at end of file