mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-05-11 06:19:08 +08:00

#1432 #1447 This PR adds support for the GROQ LLM (Large Language Model). Groq is an AI solutions company delivering ultra-low latency inference with the first-ever LPU™ Inference Engine. The Groq API enables developers to integrate state-of-the-art LLMs, such as Llama-2 and llama3-70b-8192, into low latency applications with the request limits specified below. Learn more at [groq.com](https://groq.com/). Supported Models | ID | Requests per Minute | Requests per Day | Tokens per Minute | |----------------------|---------------------|------------------|-------------------| | gemma-7b-it | 30 | 14,400 | 15,000 | | gemma2-9b-it | 30 | 14,400 | 15,000 | | llama3-70b-8192 | 30 | 14,400 | 6,000 | | llama3-8b-8192 | 30 | 14,400 | 30,000 | | mixtral-8x7b-32768 | 30 | 14,400 | 5,000 | --------- Co-authored-by: paresh0628 <paresh.tuvoc@gmail.com> Co-authored-by: Kevin Hu <kevinhu.sh@gmail.com>
74 lines
1.8 KiB
Python
74 lines
1.8 KiB
Python
#
|
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
from .embedding_model import *
|
|
from .chat_model import *
|
|
from .cv_model import *
|
|
from .rerank_model import *
|
|
|
|
|
|
EmbeddingModel = {
|
|
"Ollama": OllamaEmbed,
|
|
"OpenAI": OpenAIEmbed,
|
|
"Azure-OpenAI": AzureEmbed,
|
|
"Xinference": XinferenceEmbed,
|
|
"Tongyi-Qianwen": QWenEmbed,
|
|
"ZHIPU-AI": ZhipuEmbed,
|
|
"FastEmbed": FastEmbed,
|
|
"Youdao": YoudaoEmbed,
|
|
"BaiChuan": BaiChuanEmbed,
|
|
"Jina": JinaEmbed,
|
|
"BAAI": DefaultEmbedding,
|
|
"Mistral": MistralEmbed,
|
|
"Bedrock": BedrockEmbed,
|
|
"Groq": GroqChat
|
|
}
|
|
|
|
|
|
CvModel = {
|
|
"OpenAI": GptV4,
|
|
"Azure-OpenAI": AzureGptV4,
|
|
"Ollama": OllamaCV,
|
|
"Xinference": XinferenceCV,
|
|
"Tongyi-Qianwen": QWenCV,
|
|
"ZHIPU-AI": Zhipu4V,
|
|
"Moonshot": LocalCV
|
|
}
|
|
|
|
|
|
ChatModel = {
|
|
"OpenAI": GptTurbo,
|
|
"Azure-OpenAI": AzureChat,
|
|
"ZHIPU-AI": ZhipuChat,
|
|
"Tongyi-Qianwen": QWenChat,
|
|
"Ollama": OllamaChat,
|
|
"Xinference": XinferenceChat,
|
|
"Moonshot": MoonshotChat,
|
|
"DeepSeek": DeepSeekChat,
|
|
"VolcEngine": VolcEngineChat,
|
|
"BaiChuan": BaiChuanChat,
|
|
"MiniMax": MiniMaxChat,
|
|
"Mistral": MistralChat,
|
|
"Bedrock": BedrockChat
|
|
}
|
|
|
|
|
|
RerankModel = {
|
|
"BAAI": DefaultRerank,
|
|
"Jina": JinaRerank,
|
|
"Youdao": YoudaoRerank,
|
|
"Xinference": XInferenceRerank
|
|
}
|