From 1287558f243035480c7d6e0c105464b2a819dd2c Mon Sep 17 00:00:00 2001 From: Kevin Hu Date: Wed, 12 Feb 2025 13:15:23 +0800 Subject: [PATCH] Fix xinference chat role order issue. (#4898) ### What problem does this PR solve? #4831 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue) --- graphrag/light/graph_extractor.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/graphrag/light/graph_extractor.py b/graphrag/light/graph_extractor.py index c81c4726d..5b17cf7b4 100644 --- a/graphrag/light/graph_extractor.py +++ b/graphrag/light/graph_extractor.py @@ -94,11 +94,11 @@ class GraphExtractor(Extractor): gen_conf = {"temperature": 0.8} final_result = self._chat(hint_prompt, [{"role": "user", "content": "Output:"}], gen_conf) token_count += num_tokens_from_string(hint_prompt + final_result) - history = pack_user_ass_to_openai_messages(hint_prompt, final_result) + history = pack_user_ass_to_openai_messages("Output:", final_result, self._continue_prompt) for now_glean_index in range(self._max_gleanings): - glean_result = self._chat(self._continue_prompt, history, gen_conf) - token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + glean_result + self._continue_prompt) - history += pack_user_ass_to_openai_messages(self._continue_prompt, glean_result) + glean_result = self._chat(hint_prompt, history, gen_conf) + history.extend([{"role": "assistant", "content": glean_result}, {"role": "user", "content": self._continue_prompt}]) + token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + hint_prompt + self._continue_prompt) final_result += glean_result if now_glean_index == self._max_gleanings - 1: break