diff --git a/api/db/services/dialog_service.py b/api/db/services/dialog_service.py index 78f18f778..d77d19f20 100644 --- a/api/db/services/dialog_service.py +++ b/api/db/services/dialog_service.py @@ -238,7 +238,6 @@ def chat(dialog, messages, stream=True, **kwargs): used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97)) assert len(msg) >= 2, f"message_fit_in has bug: {msg}" prompt = msg[0]["content"] - prompt += "\n\n### Query:\n%s" % " ".join(questions) if "max_tokens" in gen_conf: gen_conf["max_tokens"] = min( @@ -246,7 +245,7 @@ def chat(dialog, messages, stream=True, **kwargs): max_tokens - used_token_count) def decorate_answer(answer): - nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts + nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions refs = [] ans = answer.split("") @@ -290,6 +289,7 @@ def chat(dialog, messages, stream=True, **kwargs): retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000 generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000 + prompt += "\n\n### Query:\n%s" % " ".join(questions) prompt = f"{prompt}\n\n - Total: {total_time_cost:.1f}ms\n - Check LLM: {check_llm_time_cost:.1f}ms\n - Create retriever: {create_retriever_time_cost:.1f}ms\n - Bind embedding: {bind_embedding_time_cost:.1f}ms\n - Bind LLM: {bind_llm_time_cost:.1f}ms\n - Tune question: {refine_question_time_cost:.1f}ms\n - Bind reranker: {bind_reranker_time_cost:.1f}ms\n - Generate keyword: {generate_keyword_time_cost:.1f}ms\n - Retrieval: {retrieval_time_cost:.1f}ms\n - Generate answer: {generate_result_time_cost:.1f}ms" return {"answer": think+answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}