mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-04-23 06:30:00 +08:00
Fix graphrag : "role" user (#2273)
### What problem does this PR solve? #2270 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
parent
fd3e55cfcf
commit
c6e723f2ee
@ -100,8 +100,8 @@ class Jin10(ComponentBase, ABC):
|
|||||||
if self._param.symbols_datatype == "quotes":
|
if self._param.symbols_datatype == "quotes":
|
||||||
for i in response['data']:
|
for i in response['data']:
|
||||||
i['Selling Price'] = i['a']
|
i['Selling Price'] = i['a']
|
||||||
i['buying price'] = i['b']
|
i['Buying Price'] = i['b']
|
||||||
i['commodity code'] = i['c']
|
i['Commodity Code'] = i['c']
|
||||||
i['Stock Exchange'] = i['e']
|
i['Stock Exchange'] = i['e']
|
||||||
i['Highest Price'] = i['h']
|
i['Highest Price'] = i['h']
|
||||||
i['Yesterday’s Closing Price'] = i['hc']
|
i['Yesterday’s Closing Price'] = i['hc']
|
||||||
|
@ -170,7 +170,7 @@ class ClaimExtractor:
|
|||||||
}
|
}
|
||||||
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
||||||
gen_conf = {"temperature": 0.5}
|
gen_conf = {"temperature": 0.5}
|
||||||
results = self._llm.chat(text, [], gen_conf)
|
results = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||||
claims = results.strip().removesuffix(completion_delimiter)
|
claims = results.strip().removesuffix(completion_delimiter)
|
||||||
history = [{"role": "system", "content": text}, {"role": "assistant", "content": results}]
|
history = [{"role": "system", "content": text}, {"role": "assistant", "content": results}]
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ class CommunityReportsExtractor:
|
|||||||
text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables)
|
text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables)
|
||||||
gen_conf = {"temperature": 0.3}
|
gen_conf = {"temperature": 0.3}
|
||||||
try:
|
try:
|
||||||
response = self._llm.chat(text, [], gen_conf)
|
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||||
token_count += num_tokens_from_string(text + response)
|
token_count += num_tokens_from_string(text + response)
|
||||||
response = re.sub(r"^[^\{]*", "", response)
|
response = re.sub(r"^[^\{]*", "", response)
|
||||||
response = re.sub(r"[^\}]*$", "", response)
|
response = re.sub(r"[^\}]*$", "", response)
|
||||||
@ -125,4 +125,5 @@ class CommunityReportsExtractor:
|
|||||||
report_sections = "\n\n".join(
|
report_sections = "\n\n".join(
|
||||||
f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings
|
f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings
|
||||||
)
|
)
|
||||||
return f"# {title}\n\n{summary}\n\n{report_sections}"
|
|
||||||
|
return f"# {title}\n\n{summary}\n\n{report_sections}"
|
||||||
|
@ -125,7 +125,7 @@ class EntityResolution:
|
|||||||
}
|
}
|
||||||
text = perform_variable_replacements(self._resolution_prompt, variables=variables)
|
text = perform_variable_replacements(self._resolution_prompt, variables=variables)
|
||||||
|
|
||||||
response = self._llm.chat(text, [], gen_conf)
|
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||||
result = self._process_results(len(candidate_resolution_i[1]), response,
|
result = self._process_results(len(candidate_resolution_i[1]), response,
|
||||||
prompt_variables.get(self._record_delimiter_key,
|
prompt_variables.get(self._record_delimiter_key,
|
||||||
DEFAULT_RECORD_DELIMITER),
|
DEFAULT_RECORD_DELIMITER),
|
||||||
|
@ -163,7 +163,7 @@ class GraphExtractor:
|
|||||||
token_count = 0
|
token_count = 0
|
||||||
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
||||||
gen_conf = {"temperature": 0.3}
|
gen_conf = {"temperature": 0.3}
|
||||||
response = self._llm.chat(text, [], gen_conf)
|
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||||
token_count = num_tokens_from_string(text + response)
|
token_count = num_tokens_from_string(text + response)
|
||||||
|
|
||||||
results = response or ""
|
results = response or ""
|
||||||
|
@ -180,7 +180,7 @@ class MindMapExtractor:
|
|||||||
}
|
}
|
||||||
text = perform_variable_replacements(self._mind_map_prompt, variables=variables)
|
text = perform_variable_replacements(self._mind_map_prompt, variables=variables)
|
||||||
gen_conf = {"temperature": 0.5}
|
gen_conf = {"temperature": 0.5}
|
||||||
response = self._llm.chat(text, [], gen_conf)
|
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
|
||||||
response = re.sub(r"```[^\n]*", "", response)
|
response = re.sub(r"```[^\n]*", "", response)
|
||||||
print(response)
|
print(response)
|
||||||
print("---------------------------------------------------\n", self._todict(markdown_to_json.dictify(response)))
|
print("---------------------------------------------------\n", self._todict(markdown_to_json.dictify(response)))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user