From 57208d8e53345dcb0670e1ee573cfdc351c6cf81 Mon Sep 17 00:00:00 2001 From: Kevin Hu Date: Wed, 27 Nov 2024 18:06:43 +0800 Subject: [PATCH] Fix batch size issue. (#3675) ### What problem does this PR solve? #3657 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue) --- rag/llm/embedding_model.py | 34 +++++++++++++++++----------------- rag/nlp/query.py | 2 +- rag/nlp/term_weight.py | 4 ++-- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/rag/llm/embedding_model.py b/rag/llm/embedding_model.py index 5ad6e2a92..b6e2b887c 100644 --- a/rag/llm/embedding_model.py +++ b/rag/llm/embedding_model.py @@ -38,7 +38,7 @@ class Base(ABC): def __init__(self, key, model_name): pass - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): raise NotImplementedError("Please implement encode method!") def encode_queries(self, text: str): @@ -78,7 +78,7 @@ class DefaultEmbedding(Base): use_fp16=torch.cuda.is_available()) self._model = DefaultEmbedding._model - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): texts = [truncate(t, 2048) for t in texts] token_count = 0 for t in texts: @@ -101,7 +101,7 @@ class OpenAIEmbed(Base): self.client = OpenAI(api_key=key, base_url=base_url) self.model_name = model_name - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): texts = [truncate(t, 8191) for t in texts] res = self.client.embeddings.create(input=texts, model=self.model_name) @@ -123,7 +123,7 @@ class LocalAIEmbed(Base): self.client = OpenAI(api_key="empty", base_url=base_url) self.model_name = model_name.split("___")[0] - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): res = self.client.embeddings.create(input=texts, model=self.model_name) return ( np.array([d.embedding for d in res.data]), @@ -200,7 +200,7 @@ class ZhipuEmbed(Base): self.client = ZhipuAI(api_key=key) self.model_name = model_name - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): arr = [] tks_num = 0 for txt in texts: @@ -221,7 +221,7 @@ class OllamaEmbed(Base): self.client = Client(host=kwargs["base_url"]) self.model_name = model_name - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): arr = [] tks_num = 0 for txt in texts: @@ -252,7 +252,7 @@ class FastEmbed(Base): from fastembed import TextEmbedding self._model = TextEmbedding(model_name, cache_dir, threads, **kwargs) - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): # Using the internal tokenizer to encode the texts and get the total # number of tokens encodings = self._model.model.tokenizer.encode_batch(texts) @@ -278,7 +278,7 @@ class XinferenceEmbed(Base): self.client = OpenAI(api_key=key, base_url=base_url) self.model_name = model_name - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): res = self.client.embeddings.create(input=texts, model=self.model_name) return np.array([d.embedding for d in res.data] @@ -394,7 +394,7 @@ class MistralEmbed(Base): self.client = MistralClient(api_key=key) self.model_name = model_name - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): texts = [truncate(t, 8196) for t in texts] res = self.client.embeddings(input=texts, model=self.model_name) @@ -418,7 +418,7 @@ class BedrockEmbed(Base): self.client = boto3.client(service_name='bedrock-runtime', region_name=self.bedrock_region, aws_access_key_id=self.bedrock_ak, aws_secret_access_key=self.bedrock_sk) - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): texts = [truncate(t, 8196) for t in texts] embeddings = [] token_count = 0 @@ -456,7 +456,7 @@ class GeminiEmbed(Base): genai.configure(api_key=key) self.model_name = 'models/' + model_name - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): texts = [truncate(t, 2048) for t in texts] token_count = sum(num_tokens_from_string(text) for text in texts) result = genai.embed_content( @@ -541,7 +541,7 @@ class CoHereEmbed(Base): self.client = Client(api_key=key) self.model_name = model_name - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): res = self.client.embed( texts=texts, model=self.model_name, @@ -599,7 +599,7 @@ class SILICONFLOWEmbed(Base): self.base_url = base_url self.model_name = model_name - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): payload = { "model": self.model_name, "input": texts, @@ -628,7 +628,7 @@ class ReplicateEmbed(Base): self.model_name = model_name self.client = Client(api_token=key) - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): res = self.client.run(self.model_name, input={"texts": json.dumps(texts)}) return np.array(res), sum([num_tokens_from_string(text) for text in texts]) @@ -647,7 +647,7 @@ class BaiduYiyanEmbed(Base): self.client = qianfan.Embedding(ak=ak, sk=sk) self.model_name = model_name - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): res = self.client.do(model=self.model_name, texts=texts).body return ( np.array([r["embedding"] for r in res["data"]]), @@ -669,7 +669,7 @@ class VoyageEmbed(Base): self.client = voyageai.Client(api_key=key) self.model_name = model_name - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): res = self.client.embed( texts=texts, model=self.model_name, input_type="document" ) @@ -691,7 +691,7 @@ class HuggingFaceEmbed(Base): self.model_name = model_name self.base_url = base_url or "http://127.0.0.1:8080" - def encode(self, texts: list, batch_size=32): + def encode(self, texts: list, batch_size=16): embeddings = [] for text in texts: response = requests.post( diff --git a/rag/nlp/query.py b/rag/nlp/query.py index 3da59d31b..63fed29b8 100644 --- a/rag/nlp/query.py +++ b/rag/nlp/query.py @@ -54,7 +54,7 @@ class FulltextQueryer: def rmWWW(txt): patts = [ ( - r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀)是*", + r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀|谁|哪位|哪个)是*", "", ), (r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "), diff --git a/rag/nlp/term_weight.py b/rag/nlp/term_weight.py index 374065a0c..810f6b881 100644 --- a/rag/nlp/term_weight.py +++ b/rag/nlp/term_weight.py @@ -228,7 +228,7 @@ class Dealer: idf2 = np.array([idf(df(t), 1000000000) for t in tks]) wts = (0.3 * idf1 + 0.7 * idf2) * \ np.array([ner(t) * postag(t) for t in tks]) - wts = [math.pow(s, 2) for s in wts] + wts = [s for s in wts] tw = list(zip(tks, wts)) else: for tk in tks: @@ -237,7 +237,7 @@ class Dealer: idf2 = np.array([idf(df(t), 1000000000) for t in tt]) wts = (0.3 * idf1 + 0.7 * idf2) * \ np.array([ner(t) * postag(t) for t in tt]) - wts = [math.pow(s, 2) for s in wts] + wts = [s for s in wts] tw.extend(zip(tt, wts)) S = np.sum([s for _, s in tw])