mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-06-04 11:24:00 +08:00
Integrates LLM Azure OpenAI (#1318)
### What problem does this PR solve? feat: Integrates LLM Azure OpenAI #716 ### Type of change - [x] New Feature (non-breaking change which adds functionality) ### Other It's just the back-end code, the front-end needs to provide the Azure OpenAI model addition form. #### Required parameters - base_url - api_key --------- Co-authored-by: yonghui li <yonghui.li@bondex.com.cn>
This commit is contained in:
parent
dec3bf7503
commit
a6765e9ca4
@ -165,6 +165,11 @@ factory_infos = [{
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "Azure-OpenAI",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
}
|
||||
# {
|
||||
# "name": "文心一言",
|
||||
@ -649,6 +654,83 @@ def init_llm_factory():
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.EMBEDDING
|
||||
},
|
||||
# ------------------------ Azure OpenAI -----------------------
|
||||
# Please ensure the llm_name is the same as the name in Azure
|
||||
# OpenAI deployment name (e.g., azure-gpt-4o). And the llm_name
|
||||
# must different from the OpenAI llm_name
|
||||
#
|
||||
# Each model must be deployed in the Azure OpenAI service, otherwise,
|
||||
# you will receive an error message 'The API deployment for
|
||||
# this resource does not exist'
|
||||
{
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-4o",
|
||||
"tags": "LLM,CHAT,128K",
|
||||
"max_tokens": 128000,
|
||||
"model_type": LLMType.CHAT.value + "," + LLMType.IMAGE2TEXT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-35-turbo",
|
||||
"tags": "LLM,CHAT,4K",
|
||||
"max_tokens": 4096,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-35-turbo-16k",
|
||||
"tags": "LLM,CHAT,16k",
|
||||
"max_tokens": 16385,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-text-embedding-ada-002",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-text-embedding-3-small",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-text-embedding-3-large",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},{
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-whisper-1",
|
||||
"tags": "SPEECH2TEXT",
|
||||
"max_tokens": 25 * 1024 * 1024,
|
||||
"model_type": LLMType.SPEECH2TEXT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-4",
|
||||
"tags": "LLM,CHAT,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-4-turbo",
|
||||
"tags": "LLM,CHAT,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-4-32k",
|
||||
"tags": "LLM,CHAT,32K",
|
||||
"max_tokens": 32768,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[15]["name"],
|
||||
"llm_name": "azure-gpt-4-vision-preview",
|
||||
"tags": "LLM,CHAT,IMAGE2TEXT",
|
||||
"max_tokens": 765,
|
||||
"model_type": LLMType.IMAGE2TEXT.value
|
||||
},
|
||||
|
||||
]
|
||||
for info in factory_infos:
|
||||
try:
|
||||
|
@ -69,6 +69,12 @@ default_llm = {
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"Azure-OpenAI": {
|
||||
"chat_model": "azure-gpt-35-turbo",
|
||||
"embedding_model": "azure-text-embedding-ada-002",
|
||||
"image2text_model": "azure-gpt-4-vision-preview",
|
||||
"asr_model": "azure-whisper-1",
|
||||
},
|
||||
"ZHIPU-AI": {
|
||||
"chat_model": "glm-3-turbo",
|
||||
"embedding_model": "embedding-2",
|
||||
|
@ -22,6 +22,7 @@ from .rerank_model import *
|
||||
EmbeddingModel = {
|
||||
"Ollama": OllamaEmbed,
|
||||
"OpenAI": OpenAIEmbed,
|
||||
"Azure-OpenAI": AzureEmbed,
|
||||
"Xinference": XinferenceEmbed,
|
||||
"Tongyi-Qianwen": QWenEmbed,
|
||||
"ZHIPU-AI": ZhipuEmbed,
|
||||
@ -36,6 +37,7 @@ EmbeddingModel = {
|
||||
|
||||
CvModel = {
|
||||
"OpenAI": GptV4,
|
||||
"Azure-OpenAI": AzureGptV4,
|
||||
"Ollama": OllamaCV,
|
||||
"Xinference": XinferenceCV,
|
||||
"Tongyi-Qianwen": QWenCV,
|
||||
@ -46,6 +48,7 @@ CvModel = {
|
||||
|
||||
ChatModel = {
|
||||
"OpenAI": GptTurbo,
|
||||
"Azure-OpenAI": AzureChat,
|
||||
"ZHIPU-AI": ZhipuChat,
|
||||
"Tongyi-Qianwen": QWenChat,
|
||||
"Ollama": OllamaChat,
|
||||
|
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from openai.lib.azure import AzureOpenAI
|
||||
from zhipuai import ZhipuAI
|
||||
from dashscope import Generation
|
||||
from abc import ABC
|
||||
@ -94,6 +95,11 @@ class DeepSeekChat(Base):
|
||||
if not base_url: base_url="https://api.deepseek.com/v1"
|
||||
super().__init__(key, model_name, base_url)
|
||||
|
||||
class AzureChat(Base):
|
||||
def __init__(self, key, model_name, **kwargs):
|
||||
self.client = AzureOpenAI(api_key=key, azure_endpoint=kwargs["base_url"], api_version="2024-02-01")
|
||||
self.model_name = model_name
|
||||
|
||||
|
||||
class BaiChuanChat(Base):
|
||||
def __init__(self, key, model_name="Baichuan3-Turbo", base_url="https://api.baichuan-ai.com/v1"):
|
||||
|
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from openai.lib.azure import AzureOpenAI
|
||||
from zhipuai import ZhipuAI
|
||||
import io
|
||||
from abc import ABC
|
||||
@ -87,6 +88,25 @@ class GptV4(Base):
|
||||
)
|
||||
return res.choices[0].message.content.strip(), res.usage.total_tokens
|
||||
|
||||
class AzureGptV4(Base):
|
||||
def __init__(self, key, model_name, lang="Chinese", **kwargs):
|
||||
self.client = AzureOpenAI(api_key=key, azure_endpoint=kwargs["base_url"], api_version="2024-02-01")
|
||||
self.model_name = model_name
|
||||
self.lang = lang
|
||||
|
||||
def describe(self, image, max_tokens=300):
|
||||
b64 = self.image2base64(image)
|
||||
prompt = self.prompt(b64)
|
||||
for i in range(len(prompt)):
|
||||
for c in prompt[i]["content"]:
|
||||
if "text" in c: c["type"] = "text"
|
||||
|
||||
res = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=prompt,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
return res.choices[0].message.content.strip(), res.usage.total_tokens
|
||||
|
||||
class QWenCV(Base):
|
||||
def __init__(self, key, model_name="qwen-vl-chat-v1", lang="Chinese", **kwargs):
|
||||
|
@ -18,6 +18,7 @@ from typing import Optional
|
||||
import threading
|
||||
import requests
|
||||
from huggingface_hub import snapshot_download
|
||||
from openai.lib.azure import AzureOpenAI
|
||||
from zhipuai import ZhipuAI
|
||||
import os
|
||||
from abc import ABC
|
||||
@ -110,6 +111,11 @@ class OpenAIEmbed(Base):
|
||||
return np.array(res.data[0].embedding), res.usage.total_tokens
|
||||
|
||||
|
||||
class AzureEmbed(Base):
|
||||
def __init__(self, key, model_name, **kwargs):
|
||||
self.client = AzureOpenAI(api_key=key, azure_endpoint=kwargs["base_url"], api_version="2024-02-01")
|
||||
self.model_name = model_name
|
||||
|
||||
class BaiChuanEmbed(OpenAIEmbed):
|
||||
def __init__(self, key,
|
||||
model_name='Baichuan-Text-Embedding',
|
||||
|
Loading…
x
Reference in New Issue
Block a user