Optimize prompt. (#5541)

### What problem does this PR solve?

#5526

### Type of change

- [x] Performance Improvement
This commit is contained in:
Kevin Hu 2025-03-03 13:12:38 +08:00 committed by GitHub
parent 606ed0c8ab
commit 7a81fa00e9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -238,7 +238,6 @@ def chat(dialog, messages, stream=True, **kwargs):
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
prompt = msg[0]["content"]
prompt += "\n\n### Query:\n%s" % " ".join(questions)
if "max_tokens" in gen_conf:
gen_conf["max_tokens"] = min(
@ -246,7 +245,7 @@ def chat(dialog, messages, stream=True, **kwargs):
max_tokens - used_token_count)
def decorate_answer(answer):
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions
refs = []
ans = answer.split("</think>")
@ -290,6 +289,7 @@ def chat(dialog, messages, stream=True, **kwargs):
retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000
generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
prompt += "\n\n### Query:\n%s" % " ".join(questions)
prompt = f"{prompt}\n\n - Total: {total_time_cost:.1f}ms\n - Check LLM: {check_llm_time_cost:.1f}ms\n - Create retriever: {create_retriever_time_cost:.1f}ms\n - Bind embedding: {bind_embedding_time_cost:.1f}ms\n - Bind LLM: {bind_llm_time_cost:.1f}ms\n - Tune question: {refine_question_time_cost:.1f}ms\n - Bind reranker: {bind_reranker_time_cost:.1f}ms\n - Generate keyword: {generate_keyword_time_cost:.1f}ms\n - Retrieval: {retrieval_time_cost:.1f}ms\n - Generate answer: {generate_result_time_cost:.1f}ms"
return {"answer": think+answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}