ragflow/rag/llm/__init__.py
黄腾 b4a281eca1
add support for NVIDIA llm (#1645)
### What problem does this PR solve?

add support for NVIDIA llm
### Type of change

- [x] New Feature (non-breaking change which adds functionality)

---------

Co-authored-by: Zhedong Cen <cenzhedong2@126.com>
2024-07-23 10:43:09 +08:00

97 lines
2.4 KiB
Python

#
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .embedding_model import *
from .chat_model import *
from .cv_model import *
from .rerank_model import *
from .sequence2txt_model import *
EmbeddingModel = {
"Ollama": OllamaEmbed,
"LocalAI": LocalAIEmbed,
"OpenAI": OpenAIEmbed,
"Azure-OpenAI": AzureEmbed,
"Xinference": XinferenceEmbed,
"Tongyi-Qianwen": QWenEmbed,
"ZHIPU-AI": ZhipuEmbed,
"FastEmbed": FastEmbed,
"Youdao": YoudaoEmbed,
"BaiChuan": BaiChuanEmbed,
"Jina": JinaEmbed,
"BAAI": DefaultEmbedding,
"Mistral": MistralEmbed,
"Bedrock": BedrockEmbed,
"Gemini":GeminiEmbed,
"NVIDIA":NvidiaEmbed
}
CvModel = {
"OpenAI": GptV4,
"Azure-OpenAI": AzureGptV4,
"Ollama": OllamaCV,
"Xinference": XinferenceCV,
"Tongyi-Qianwen": QWenCV,
"ZHIPU-AI": Zhipu4V,
"Moonshot": LocalCV,
'Gemini':GeminiCV,
'OpenRouter':OpenRouterCV,
"LocalAI":LocalAICV,
"NVIDIA":NvidiaCV
}
ChatModel = {
"OpenAI": GptTurbo,
"Azure-OpenAI": AzureChat,
"ZHIPU-AI": ZhipuChat,
"Tongyi-Qianwen": QWenChat,
"Ollama": OllamaChat,
"LocalAI": LocalAIChat,
"Xinference": XinferenceChat,
"Moonshot": MoonshotChat,
"DeepSeek": DeepSeekChat,
"VolcEngine": VolcEngineChat,
"BaiChuan": BaiChuanChat,
"MiniMax": MiniMaxChat,
"Minimax": MiniMaxChat,
"Mistral": MistralChat,
'Gemini' : GeminiChat,
"Bedrock": BedrockChat,
"Groq": GroqChat,
'OpenRouter':OpenRouterChat,
"StepFun":StepFunChat,
"NVIDIA":NvidiaChat
}
RerankModel = {
"BAAI": DefaultRerank,
"Jina": JinaRerank,
"Youdao": YoudaoRerank,
"Xinference": XInferenceRerank,
"NVIDIA":NvidiaRerank
}
Seq2txtModel = {
"OpenAI": GPTSeq2txt,
"Tongyi-Qianwen": QWenSeq2txt,
"Ollama": OllamaSeq2txt,
"Azure-OpenAI": AzureSeq2txt,
"Xinference": XinferenceSeq2txt
}