fix disable and enable llm setting in dialog (#616)

### What problem does this PR solve?
#614 

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
KevinHuSh 2024-04-30 11:04:14 +08:00 committed by GitHub
parent 4c1476032d
commit 674b3aeafd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 29 additions and 22 deletions

View File

@ -35,13 +35,7 @@ def set_dialog():
top_n = req.get("top_n", 6) top_n = req.get("top_n", 6)
similarity_threshold = req.get("similarity_threshold", 0.1) similarity_threshold = req.get("similarity_threshold", 0.1)
vector_similarity_weight = req.get("vector_similarity_weight", 0.3) vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
llm_setting = req.get("llm_setting", { llm_setting = req.get("llm_setting", {})
"temperature": 0.1,
"top_p": 0.3,
"frequency_penalty": 0.7,
"presence_penalty": 0.4,
"max_tokens": 215
})
default_prompt = { default_prompt = {
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。 "system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
以下是知识库 以下是知识库

View File

@ -67,7 +67,7 @@ def get_rag_python_directory(*args):
def get_home_cache_dir(): def get_home_cache_dir():
dir = os.path.join(os.path.expanduser('~'), ".raglow") dir = os.path.join(os.path.expanduser('~'), ".ragflow")
try: try:
os.mkdir(dir) os.mkdir(dir)
except OSError as error: except OSError as error:

View File

@ -116,18 +116,31 @@ def chunk(filename, binary=None, lang="Chinese", callback=None, **kwargs):
break break
txt += l txt += l
lines = txt.split("\n") lines = txt.split("\n")
#is_english([rmPrefix(l) for l in lines[:100]]) comma, tab = 0, 0
for l in lines:
if len(l.split(",")) == 2: comma += 1
if len(l.split("\t")) == 2: tab += 1
delimiter = "\t" if tab >= comma else ","
fails = [] fails = []
for i, line in enumerate(lines): question, answer = "", ""
arr = [l for l in line.split("\t") if len(l) > 1] i = 0
while i < len(lines):
arr = lines[i].split(delimiter)
if len(arr) != 2: if len(arr) != 2:
fails.append(str(i)) if question: answer += "\n" + lines[i]
continue else:
res.append(beAdoc(deepcopy(doc), arr[0], arr[1], eng)) fails.append(str(i+1))
elif len(arr) == 2:
if question and answer: res.append(beAdoc(deepcopy(doc), question, answer, eng))
question, answer = arr
i += 1
if len(res) % 999 == 0: if len(res) % 999 == 0:
callback(len(res) * 0.6 / len(lines), ("Extract Q&A: {}".format(len(res)) + ( callback(len(res) * 0.6 / len(lines), ("Extract Q&A: {}".format(len(res)) + (
f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else ""))) f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
if question: res.append(beAdoc(deepcopy(doc), question, answer, eng))
callback(0.6, ("Extract Q&A: {}".format(len(res)) + ( callback(0.6, ("Extract Q&A: {}".format(len(res)) + (
f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else ""))) f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))

View File

@ -141,12 +141,12 @@ class OllamaChat(Base):
if system: if system:
history.insert(0, {"role": "system", "content": system}) history.insert(0, {"role": "system", "content": system})
try: try:
options = {"temperature": gen_conf.get("temperature", 0.1), options = {}
"num_predict": gen_conf.get("max_tokens", 128), if "temperature" in gen_conf: options["temperature"] = gen_conf["temperature"]
"top_k": gen_conf.get("top_p", 0.3), if "max_tokens" in gen_conf: options["num_predict"] = gen_conf["max_tokens"]
"presence_penalty": gen_conf.get("presence_penalty", 0.4), if "top_p" in gen_conf: options["top_k"] = gen_conf["top_p"]
"frequency_penalty": gen_conf.get("frequency_penalty", 0.7), if "presence_penalty" in gen_conf: options["presence_penalty"] = gen_conf["presence_penalty"]
} if "frequency_penalty" in gen_conf: options["frequency_penalty"] = gen_conf["frequency_penalty"]
response = self.client.chat( response = self.client.chat(
model=self.model_name, model=self.model_name,
messages=history, messages=history,

View File

@ -236,8 +236,8 @@ class YoudaoEmbed(Base):
try: try:
print("LOADING BCE...") print("LOADING BCE...")
YoudaoEmbed._client = qanthing(model_name_or_path=os.path.join( YoudaoEmbed._client = qanthing(model_name_or_path=os.path.join(
get_project_base_directory(), get_home_cache_dir(),
"rag/res/bce-embedding-base_v1")) "bce-embedding-base_v1"))
except Exception as e: except Exception as e:
YoudaoEmbed._client = qanthing( YoudaoEmbed._client = qanthing(
model_name_or_path=model_name.replace( model_name_or_path=model_name.replace(