Fix graphrag : "role" user (#2273)

### What problem does this PR solve?

#2270 

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
H 2024-09-06 10:04:01 +08:00 committed by GitHub
parent fd3e55cfcf
commit c6e723f2ee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 9 additions and 8 deletions

View File

@ -100,8 +100,8 @@ class Jin10(ComponentBase, ABC):
if self._param.symbols_datatype == "quotes": if self._param.symbols_datatype == "quotes":
for i in response['data']: for i in response['data']:
i['Selling Price'] = i['a'] i['Selling Price'] = i['a']
i['buying price'] = i['b'] i['Buying Price'] = i['b']
i['commodity code'] = i['c'] i['Commodity Code'] = i['c']
i['Stock Exchange'] = i['e'] i['Stock Exchange'] = i['e']
i['Highest Price'] = i['h'] i['Highest Price'] = i['h']
i['Yesterdays Closing Price'] = i['hc'] i['Yesterdays Closing Price'] = i['hc']

View File

@ -170,7 +170,7 @@ class ClaimExtractor:
} }
text = perform_variable_replacements(self._extraction_prompt, variables=variables) text = perform_variable_replacements(self._extraction_prompt, variables=variables)
gen_conf = {"temperature": 0.5} gen_conf = {"temperature": 0.5}
results = self._llm.chat(text, [], gen_conf) results = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
claims = results.strip().removesuffix(completion_delimiter) claims = results.strip().removesuffix(completion_delimiter)
history = [{"role": "system", "content": text}, {"role": "assistant", "content": results}] history = [{"role": "system", "content": text}, {"role": "assistant", "content": results}]

View File

@ -76,7 +76,7 @@ class CommunityReportsExtractor:
text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables) text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables)
gen_conf = {"temperature": 0.3} gen_conf = {"temperature": 0.3}
try: try:
response = self._llm.chat(text, [], gen_conf) response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
token_count += num_tokens_from_string(text + response) token_count += num_tokens_from_string(text + response)
response = re.sub(r"^[^\{]*", "", response) response = re.sub(r"^[^\{]*", "", response)
response = re.sub(r"[^\}]*$", "", response) response = re.sub(r"[^\}]*$", "", response)
@ -125,4 +125,5 @@ class CommunityReportsExtractor:
report_sections = "\n\n".join( report_sections = "\n\n".join(
f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings
) )
return f"# {title}\n\n{summary}\n\n{report_sections}"
return f"# {title}\n\n{summary}\n\n{report_sections}"

View File

@ -125,7 +125,7 @@ class EntityResolution:
} }
text = perform_variable_replacements(self._resolution_prompt, variables=variables) text = perform_variable_replacements(self._resolution_prompt, variables=variables)
response = self._llm.chat(text, [], gen_conf) response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
result = self._process_results(len(candidate_resolution_i[1]), response, result = self._process_results(len(candidate_resolution_i[1]), response,
prompt_variables.get(self._record_delimiter_key, prompt_variables.get(self._record_delimiter_key,
DEFAULT_RECORD_DELIMITER), DEFAULT_RECORD_DELIMITER),

View File

@ -163,7 +163,7 @@ class GraphExtractor:
token_count = 0 token_count = 0
text = perform_variable_replacements(self._extraction_prompt, variables=variables) text = perform_variable_replacements(self._extraction_prompt, variables=variables)
gen_conf = {"temperature": 0.3} gen_conf = {"temperature": 0.3}
response = self._llm.chat(text, [], gen_conf) response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
token_count = num_tokens_from_string(text + response) token_count = num_tokens_from_string(text + response)
results = response or "" results = response or ""

View File

@ -180,7 +180,7 @@ class MindMapExtractor:
} }
text = perform_variable_replacements(self._mind_map_prompt, variables=variables) text = perform_variable_replacements(self._mind_map_prompt, variables=variables)
gen_conf = {"temperature": 0.5} gen_conf = {"temperature": 0.5}
response = self._llm.chat(text, [], gen_conf) response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
response = re.sub(r"```[^\n]*", "", response) response = re.sub(r"```[^\n]*", "", response)
print(response) print(response)
print("---------------------------------------------------\n", self._todict(markdown_to_json.dictify(response))) print("---------------------------------------------------\n", self._todict(markdown_to_json.dictify(response)))