boost highlight performace (#2419)

### What problem does this PR solve?

#2415

### Type of change

- [x] Performance Improvement
This commit is contained in:
Kevin Hu 2024-09-13 18:10:32 +08:00 committed by GitHub
parent bdcf195b20
commit 54342ae0a2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 7 additions and 6 deletions

View File

@ -50,7 +50,7 @@ class EsQueryer:
patts = [ patts = [
(r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀)是*", ""), (r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀)是*", ""),
(r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "), (r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "),
(r"(^| )('s|'re|is|are|were|was|do|does|did|don't|doesn't|didn't|has|have|be|there|you|me|your|my|mine|just|please|may|i|should|would|wouldn't|will|won't|done|go|for|with|so|the|a|an|by|i'm|it's|he's|she's|they|they're|you're|as|by|on|in|at|up|out|down) ", " ") (r"(^| )('s|'re|is|are|were|was|do|does|did|don't|doesn't|didn't|has|have|be|there|you|me|your|my|mine|just|please|may|i|should|would|wouldn't|will|won't|done|go|for|with|so|the|a|an|by|i'm|it's|he's|she's|they|they're|you're|as|by|on|in|at|up|out|down|of) ", " ")
] ]
for r, p in patts: for r, p in patts:
txt = re.sub(r, p, txt, flags=re.IGNORECASE) txt = re.sub(r, p, txt, flags=re.IGNORECASE)
@ -80,7 +80,7 @@ class EsQueryer:
must=Q("query_string", fields=self.flds, must=Q("query_string", fields=self.flds,
type="best_fields", query=" ".join(q), type="best_fields", query=" ".join(q),
boost=1)#, minimum_should_match=min_match) boost=1)#, minimum_should_match=min_match)
), tks ), list(set([t for t in txt.split(" ") if t]))
def need_fine_grained_tokenize(tk): def need_fine_grained_tokenize(tk):
if len(tk) < 3: if len(tk) < 3:
@ -93,8 +93,10 @@ class EsQueryer:
for tt in self.tw.split(txt)[:256]: # .split(" "): for tt in self.tw.split(txt)[:256]: # .split(" "):
if not tt: if not tt:
continue continue
keywords.append(tt)
twts = self.tw.weights([tt]) twts = self.tw.weights([tt])
syns = self.syn.lookup(tt) syns = self.syn.lookup(tt)
if syns: keywords.extend(syns)
logging.info(json.dumps(twts, ensure_ascii=False)) logging.info(json.dumps(twts, ensure_ascii=False))
tms = [] tms = []
for tk, w in sorted(twts, key=lambda x: x[1] * -1): for tk, w in sorted(twts, key=lambda x: x[1] * -1):
@ -147,7 +149,7 @@ class EsQueryer:
return Q("bool", return Q("bool",
must=mst, must=mst,
), keywords ), list(set(keywords))
def hybrid_similarity(self, avec, bvecs, atks, btkss, tkweight=0.3, def hybrid_similarity(self, avec, bvecs, atks, btkss, tkweight=0.3,
vtweight=0.7): vtweight=0.7):

View File

@ -189,10 +189,9 @@ class Dealer:
txt = d["_source"][fieldnm] txt = d["_source"][fieldnm]
txt = re.sub(r"[\r\n]", " ", txt, flags=re.IGNORECASE|re.MULTILINE) txt = re.sub(r"[\r\n]", " ", txt, flags=re.IGNORECASE|re.MULTILINE)
txts = [] txts = []
for w in keywords:
txt = re.sub(r"(^|[ .?/'\"\(\)!,:;-])(%s)([ .?/'\"\(\)!,:;-])"%re.escape(w), r"\1<em>\2</em>\3", txt, flags=re.IGNORECASE|re.MULTILINE)
for t in re.split(r"[.?!;\n]", txt): for t in re.split(r"[.?!;\n]", txt):
for w in keywords:
t = re.sub(r"(^|[ .?/'\"\(\)!,:;-])(%s)([ .?/'\"\(\)!,:;-])"%re.escape(w), r"\1<em>\2</em>\3", t, flags=re.IGNORECASE|re.MULTILINE)
if not re.search(r"<em>[^<>]+</em>", t, flags=re.IGNORECASE|re.MULTILINE): continue if not re.search(r"<em>[^<>]+</em>", t, flags=re.IGNORECASE|re.MULTILINE): continue
txts.append(t) txts.append(t)
ans[d["_id"]] = "...".join(txts) if txts else "...".join([a for a in list(hlts.items())[0][1]]) ans[d["_id"]] = "...".join(txts) if txts else "...".join([a for a in list(hlts.items())[0][1]])