mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-14 19:55:56 +08:00
Fix xinference chat role order issue. (#4898)
### What problem does this PR solve? #4831 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
parent
6b389e01b5
commit
1287558f24
@ -94,11 +94,11 @@ class GraphExtractor(Extractor):
|
||||
gen_conf = {"temperature": 0.8}
|
||||
final_result = self._chat(hint_prompt, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||
token_count += num_tokens_from_string(hint_prompt + final_result)
|
||||
history = pack_user_ass_to_openai_messages(hint_prompt, final_result)
|
||||
history = pack_user_ass_to_openai_messages("Output:", final_result, self._continue_prompt)
|
||||
for now_glean_index in range(self._max_gleanings):
|
||||
glean_result = self._chat(self._continue_prompt, history, gen_conf)
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + glean_result + self._continue_prompt)
|
||||
history += pack_user_ass_to_openai_messages(self._continue_prompt, glean_result)
|
||||
glean_result = self._chat(hint_prompt, history, gen_conf)
|
||||
history.extend([{"role": "assistant", "content": glean_result}, {"role": "user", "content": self._continue_prompt}])
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + hint_prompt + self._continue_prompt)
|
||||
final_result += glean_result
|
||||
if now_glean_index == self._max_gleanings - 1:
|
||||
break
|
||||
|
Loading…
x
Reference in New Issue
Block a user