Refactor Document API (#2833)

### What problem does this PR solve?

Refactor Document API

### Type of change


- [x] Refactoring

Co-authored-by: liuhua <10215101452@stu.ecun.edu.cn>
This commit is contained in:
liuhua 2024-10-14 20:03:33 +08:00 committed by GitHub
parent df223eddf3
commit 6329427ad5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 393 additions and 418 deletions

View File

@ -243,7 +243,7 @@ def list(tenant_id):
page_number = int(request.args.get("page", 1))
items_per_page = int(request.args.get("page_size", 1024))
orderby = request.args.get("orderby", "create_time")
if request.args.get("desc") == "False":
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
desc = False
else:
desc = True

View File

@ -107,11 +107,6 @@ def update(tenant_id,dataset_id):
if req["tenant_id"] != tenant_id:
return get_error_data_result(
retmsg="Can't change tenant_id.")
if "embedding_model" in req:
if req["embedding_model"] != t.embd_id:
return get_error_data_result(
retmsg="Can't change embedding_model.")
req.pop("embedding_model")
e, kb = KnowledgebaseService.get_by_id(dataset_id)
if "chunk_count" in req:
if req["chunk_count"] != kb.chunk_num:
@ -128,6 +123,11 @@ def update(tenant_id,dataset_id):
return get_error_data_result(
retmsg="If chunk count is not 0, parse method is not changable.")
req['parser_id'] = req.pop('parse_method')
if "embedding_model" in req:
if kb.chunk_num != 0 and req['parse_method'] != kb.parser_id:
return get_error_data_result(
retmsg="If chunk count is not 0, parse method is not changable.")
req['embd_id'] = req.pop('embedding_model')
if "name" in req:
req["name"] = req["name"].strip()
if req["name"].lower() != kb.name.lower() \
@ -150,7 +150,7 @@ def list(tenant_id):
page_number = int(request.args.get("page", 1))
items_per_page = int(request.args.get("page_size", 1024))
orderby = request.args.get("orderby", "create_time")
if request.args.get("desc") == "False":
if request.args.get("desc") == "False" or request.args.get("desc") == "false" :
desc = False
else:
desc = True

View File

@ -8,6 +8,7 @@ from botocore.docs.method import document_model_driven_method
from flask import request
from flask_login import login_required, current_user
from elasticsearch_dsl import Q
from pygments import highlight
from sphinx.addnodes import document
from rag.app.qa import rmPrefix, beAdoc
@ -158,7 +159,7 @@ def download(tenant_id, dataset_id, document_id):
return get_error_data_result(retmsg=f'You do not own the dataset {dataset_id}.')
doc = DocumentService.query(kb_id=dataset_id, id=document_id)
if not doc:
return get_error_data_result(retmsg=f'The dataset not own the document {doc.id}.')
return get_error_data_result(retmsg=f'The dataset not own the document {document_id}.')
# The process of downloading
doc_id, doc_location = File2DocumentService.get_storage_address(doc_id=document_id) # minio address
file_stream = STORAGE_IMPL.get(doc_id, doc_location)
@ -294,7 +295,7 @@ def stop_parsing(tenant_id,dataset_id):
return get_result()
@manager.route('/dataset/{dataset_id}/document/{document_id}/chunk', methods=['GET'])
@manager.route('/dataset/<dataset_id>/document/<document_id>/chunk', methods=['GET'])
@token_required
def list_chunk(tenant_id,dataset_id,document_id):
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
@ -361,7 +362,7 @@ def list_chunk(tenant_id,dataset_id,document_id):
return server_error_response(e)
@manager.route('/dataset/{dataset_id}/document/{document_id}/chunk', methods=['POST'])
@manager.route('/dataset/<dataset_id>/document/<document_id>/chunk', methods=['POST'])
@token_required
def create(tenant_id,dataset_id,document_id):
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
@ -369,6 +370,7 @@ def create(tenant_id,dataset_id,document_id):
doc = DocumentService.query(id=document_id, kb_id=dataset_id)
if not doc:
return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
doc = doc[0]
req = request.json
if not req.get("content"):
return get_error_data_result(retmsg="`content` is required")
@ -418,7 +420,7 @@ def create(tenant_id,dataset_id,document_id):
# return get_result(data={"chunk_id": chunk_id})
@manager.route('dataset/{dataset_id}/document/{document_id}/chunk', methods=['DELETE'])
@manager.route('dataset/<dataset_id>/document/<document_id>/chunk', methods=['DELETE'])
@token_required
def rm_chunk(tenant_id,dataset_id,document_id):
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
@ -426,9 +428,16 @@ def rm_chunk(tenant_id,dataset_id,document_id):
doc = DocumentService.query(id=document_id, kb_id=dataset_id)
if not doc:
return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
doc = doc[0]
req = request.json
if not req.get("chunk_ids"):
return get_error_data_result("`chunk_ids` is required")
for chunk_id in req.get("chunk_ids"):
res = ELASTICSEARCH.get(
chunk_id, search.index_name(
tenant_id))
if not res.get("found"):
return server_error_response(f"Chunk {chunk_id} not found")
if not ELASTICSEARCH.deleteByQuery(
Q("ids", values=req["chunk_ids"]), search.index_name(tenant_id)):
return get_error_data_result(retmsg="Index updating failure")
@ -439,25 +448,26 @@ def rm_chunk(tenant_id,dataset_id,document_id):
@manager.route('/dataset/{dataset_id}/document/{document_id}/chunk/{chunk_id}', methods=['PUT'])
@manager.route('/dataset/<dataset_id>/document/<document_id>/chunk/<chunk_id>', methods=['PUT'])
@token_required
def set(tenant_id,dataset_id,document_id,chunk_id):
res = ELASTICSEARCH.get(
chunk_id, search.index_name(
tenant_id))
if not res.get("found"):
return get_error_data_result(f"Chunk {chunk_id} not found")
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
doc = DocumentService.query(id=document_id, kb_id=dataset_id)
if not doc:
return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
req = request.json
if not req.get("content"):
return get_error_data_result("`content` is required")
if not req.get("important_keywords"):
return get_error_data_result("`important_keywords` is required")
d = {
"id": chunk_id,
"content_with_weight": req["content"]}
"content_with_weight": req.get("content",res.get["content_with_weight"])}
d["content_ltks"] = rag_tokenizer.tokenize(req["content"])
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
d["important_kwd"] = req["important_keywords"]
d["important_kwd"] = req.get("important_keywords",[])
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"]))
if "available" in req:
d["available_int"] = req["available"]
@ -488,23 +498,27 @@ def set(tenant_id,dataset_id,document_id,chunk_id):
@token_required
def retrieval_test(tenant_id):
req = request.args
if not req.get("datasets"):
req_json = request.json
if not req_json.get("datasets"):
return get_error_data_result("`datasets` is required.")
for id in req.get("datasets"):
for id in req_json.get("datasets"):
if not KnowledgebaseService.query(id=id,tenant_id=tenant_id):
return get_error_data_result(f"You don't own the dataset {id}.")
if not req.get("question"):
if "question" not in req_json:
return get_error_data_result("`question` is required.")
page = int(req.get("offset", 1))
size = int(req.get("limit", 30))
question = req["question"]
kb_id = req["datasets"]
question = req_json["question"]
kb_id = req_json["datasets"]
if isinstance(kb_id, str): kb_id = [kb_id]
doc_ids = req.get("documents", [])
similarity_threshold = float(req.get("similarity_threshold", 0.2))
doc_ids = req_json.get("documents", [])
similarity_threshold = float(req.get("similarity_threshold", 0.0))
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
top = int(req.get("top_k", 1024))
if req.get("highlight")=="False" or req.get("highlight")=="false":
highlight = False
else:
highlight = True
try:
e, kb = KnowledgebaseService.get_by_id(kb_id[0])
if not e:
@ -524,7 +538,7 @@ def retrieval_test(tenant_id):
retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler
ranks = retr.retrieval(question, embd_mdl, kb.tenant_id, kb_id, page, size,
similarity_threshold, vector_similarity_weight, top,
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"))
doc_ids, rerank_mdl=rerank_mdl, highlight=highlight)
for c in ranks["chunks"]:
if "vector" in c:
del c["vector"]
@ -543,11 +557,11 @@ def retrieval_test(tenant_id):
for key, value in chunk.items():
new_key = key_mapping.get(key, key)
rename_chunk[new_key] = value
renamed_chunks.append(rename_chunk)
renamed_chunks.append(rename_chunk)
ranks["chunks"] = renamed_chunks
return get_result(data=ranks)
except Exception as e:
if str(e).find("not_found") > 0:
return get_result(retmsg=f'No chunk found! Check the chunk status please!',
return get_result(retmsg=f'No chunk found! Check the chunk statu s please!',
retcode=RetCode.DATA_ERROR)
return server_error_response(e)

View File

@ -163,7 +163,7 @@ def list(chat_id,tenant_id):
page_number = int(request.args.get("page", 1))
items_per_page = int(request.args.get("page_size", 1024))
orderby = request.args.get("orderby", "create_time")
if request.args.get("desc") == "False":
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
desc = False
else:
desc = True

View File

@ -5,7 +5,7 @@
**POST** `/api/v1/dataset`
Creates a knowledge base (dataset).
Creates a dataset.
### Request
@ -31,11 +31,11 @@ Creates a knowledge base (dataset).
#### Request example
```bash
# "id": "id" must not be provided.
# "name": name is required and cannot be duplicated.
# "id": id must not be provided.
# "name": name is required and can't be duplicated.
# "tenant_id": tenant_id must not be provided.
# "embedding_model": REQUIRED.
# "naive": general.
# "embedding_model": embedding_model must not be provided.
# "navie" means general.
curl --request POST \
--url http://{address}/api/v1/dataset \
--header 'Content-Type: application/json' \
@ -51,21 +51,21 @@ curl --request POST \
#### Request parameters
- `"id"`: (*Body parameter*)
The unique identifier of each created dataset.
- When creating a dataset, `id` must not be provided.
The ID of the created dataset used to uniquely identify different datasets.
- If creating a dataset, `id` must not be provided.
- `"name"`: (*Body parameter*)
The name of the dataset, which must adhere to the following requirements:
- Required when creating a dataset and must be unique.
- When updating a dataset, `name` must still be unique.
- If updating a dataset, `name` must still be unique.
- `"avatar"`: (*Body parameter*)
Base64 encoding of the avatar.
- `"tenant_id"`: (*Body parameter*)
The ID of the tenant associated with the dataset, used to link it with specific users.
- When creating a dataset, `tenant_id` must not be provided.
- When updating a dataset, `tenant_id` cannot be changed.
- If creating a dataset, `tenant_id` must not be provided.
- If updating a dataset, `tenant_id` cannot be changed.
- `"description"`: (*Body parameter*)
The description of the dataset.
@ -74,31 +74,31 @@ curl --request POST \
The language setting for the dataset.
- `"embedding_model"`: (*Body parameter*)
Embedding model used in the dataset for generating vector embeddings.
- When creating a dataset, `embedding_model` must not be provided.
- When updating a dataset, `embedding_model` cannot be changed.
Embedding model used in the dataset to generate vector embeddings.
- If creating a dataset, `embedding_model` must not be provided.
- If updating a dataset, `embedding_model` cannot be changed.
- `"permission"`: (*Body parameter*)
Specifies who can manipulate the dataset.
- `"document_count"`: (*Body parameter*)
Document count of the dataset.
- When updating a dataset, `document_count` cannot be changed.
- If updating a dataset, `document_count` cannot be changed.
- `"chunk_count"`: (*Body parameter*)
Chunk count of the dataset.
- When updating a dataset, `chunk_count` cannot be changed.
- If updating a dataset, `chunk_count` cannot be changed.
- `"parse_method"`: (*Body parameter*)
Parsing method of the dataset.
- When updating `parse_method`, `chunk_count` must be greater than 0.
- If updating `parse_method`, `chunk_count` must be greater than 0.
- `"parser_config"`: (*Body parameter*)
The configuration settings for the dataset parser.
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```json
{
@ -139,7 +139,8 @@ A successful response includes a JSON object like the following:
- `"error_code"`: `integer`
`0`: The operation succeeds.
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```json
{
@ -152,7 +153,7 @@ An error response includes a JSON object like the following:
**DELETE** `/api/v1/dataset`
Deletes datasets by their IDs.
Deletes datasets by ids.
### Request
@ -168,7 +169,7 @@ Deletes datasets by their IDs.
#### Request example
```bash
# Specify either "ids" or "names", NOT both.
# Either id or name must be provided, but not both.
curl --request DELETE \
--url http://{address}/api/v1/dataset \
--header 'Content-Type: application/json' \
@ -181,12 +182,12 @@ curl --request DELETE \
#### Request parameters
- `"ids"`: (*Body parameter*)
IDs of the datasets to delete.
Dataset IDs to delete.
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```json
{
@ -198,7 +199,7 @@ A successful response includes a JSON object like the following:
`0`: The operation succeeds.
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```json
{
@ -211,7 +212,7 @@ An error response includes a JSON object like the following:
**PUT** `/api/v1/dataset/{dataset_id}`
Updates a dataset by its ID.
Updates a dataset by its id.
### Request
@ -226,14 +227,14 @@ Updates a dataset by its ID.
#### Request example
```bash
# "id": REQUIRED
# "name": If you update "name", it cannot be duplicated.
# "tenant_id": If you update "tenant_id", it cannot be changed
# "embedding_model": If you update "embedding_model", it cannot be changed.
# "chunk_count": If you update "chunk_count", it cannot be changed.
# "document_count": If you update "document_count", it cannot be changed.
# "parse_method": If you update "parse_method", "chunk_count" must be 0.
# "naive": General.
# "id": id is required.
# "name": If you update name, it can't be duplicated.
# "tenant_id": If you update tenant_id, it can't be changed
# "embedding_model": If you update embedding_model, it can't be changed.
# "chunk_count": If you update chunk_count, it can't be changed.
# "document_count": If you update document_count, it can't be changed.
# "parse_method": If you update parse_method, chunk_count must be 0.
# "navie" means general.
curl --request PUT \
--url http://{address}/api/v1/dataset/{dataset_id} \
--header 'Content-Type: application/json' \
@ -244,18 +245,17 @@ curl --request PUT \
"embedding_model": "BAAI/bge-zh-v1.5",
"chunk_count": 0,
"document_count": 0,
"parse_method": "naive"
"parse_method": "navie"
}'
```
#### Request parameters
See the "Create Dataset" for the complete structure of the request parameters.
(Refer to the "Create Dataset" for the complete structure of the request parameters.)
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```json
{
@ -267,7 +267,7 @@ A successful response includes a JSON object like the following:
`0`: The operation succeeds.
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```json
{
@ -321,7 +321,7 @@ curl --request GET \
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```json
{
@ -365,7 +365,7 @@ A successful response includes a JSON object like the following:
```
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```json
{
@ -392,12 +392,12 @@ Uploads files to a dataset.
#### Request example
```shell
```bash
curl --request POST \
--url http://{address}/api/v1/dataset/{dataset_id}/document \
--header 'Content-Type: multipart/form-data' \
--header 'Authorization: Bearer {YOUR_ACCESS_TOKEN}' \
--form 'file=@test.txt'
--form 'file=@./test.txt'
```
#### Request parameters
@ -409,9 +409,9 @@ curl --request POST \
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```shell
```json
{
"code": 0
}
@ -421,12 +421,12 @@ A successful response includes a JSON object like the following:
`0`: The operation succeeds.
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```shell
```json
{
"code": 3016,
"message": "Can't connect database"
"code": 101,
"message": "No file part!"
}
```
@ -447,7 +447,7 @@ Downloads files from a dataset.
- '{FILE_NAME}'
#### Request example
```shell
```bash
curl --request GET \
--url http://{address}/api/v1/dataset/{dataset_id}/document/{documents_id} \
--header 'Content-Type: application/json' \
@ -464,31 +464,29 @@ curl --request GET \
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```shell
{
"code": 0
}
```text
test_2.
```
- `"error_code"`: `integer`
`0`: The operation succeeds.
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```shell
```json
{
"code": 3016,
"message": "Can't connect database"
"code": 102,
"message": "You do not own the dataset 7898da028a0511efbf750242ac1220005."
}
```
## List files of a dataset
**GET** `/api/v1/dataset/{dataset_id}/info?keywords={keyword}&page={page}&page_size={limit}&orderby={orderby}&desc={desc}&name={name}`
**GET** `/api/v1/dataset/{dataset_id}/info?offset={offset}&limit={limit}&orderby={orderby}&desc={desc}&keywords={keywords}&id={document_id}`
List files to a dataset.
@ -502,48 +500,47 @@ List files to a dataset.
#### Request example
```shell
```bash
curl --request GET \
--url http://{address}/api/v1/dataset/{dataset_id}/info?keywords=rag&page=0&page_size=10&orderby=create_time&desc=yes \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer {YOUR_ACCESS_TOKEN}'
--url http://{address}/api/v1/dataset/{dataset_id}/info?offset={offset}&limit={limit}&orderby={orderby}&desc={desc}&keywords={keywords}&id={document_id} \
--header 'Authorization: Bearer {YOUR_ACCESS_TOKEN}'
```
#### Request parameters
- `"dataset_id"`: (*PATH parameter*)
The dataset id
- `offset`: (*Filter parameter*)
The beginning number of records for paging.
- `keywords`: (*Filter parameter*)
The keywords matches the search key workds;
- `page`: (*Filter parameter*)
The current page number to retrieve from the paginated data. This parameter determines which set of records will be fetched.
- `page_size`: (*Filter parameter*)
The number of records to retrieve per page. This controls how many records will be included in each page.
- `limit`: (*Filter parameter*)
Records number to return.
- `orderby`: (*Filter parameter*)
The field by which the records should be sorted. This specifies the attribute or column used to order the results.
- `desc`: (*Filter parameter*)
A boolean flag indicating whether the sorting should be in descending order.
- `name`: (*Filter parameter*)
File name.
- `id`: (*Filter parameter*)
The id of the document to be got.
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```shell
```json
{
"code": 0,
"data": {
"docs": [
{
"chunk_count": 0,
"create_date": "Wed, 18 Sep 2024 08:20:49 GMT",
"create_time": 1726647649379,
"created_by": "134408906b6811efbcd20242ac120005",
"id": "e970a94a759611efae5b0242ac120004",
"knowledgebase_id": "e95f574e759611efbc850242ac120004",
"location": "Test Document222.txt",
"name": "Test Document222.txt",
"create_date": "Mon, 14 Oct 2024 09:11:01 GMT",
"create_time": 1728897061948,
"created_by": "69736c5e723611efb51b0242ac120007",
"id": "3bcfbf8a8a0c11ef8aba0242ac120006",
"knowledgebase_id": "7898da028a0511efbf750242ac120005",
"location": "Test_2.txt",
"name": "Test_2.txt",
"parser_config": {
"chunk_token_count": 128,
"delimiter": "\n!?。;!?",
@ -556,48 +553,18 @@ A successful response includes a JSON object like the following:
"progress": 0.0,
"progress_msg": "",
"run": "0",
"size": 46,
"size": 7,
"source_type": "local",
"status": "1",
"thumbnail": null,
"token_count": 0,
"type": "doc",
"update_date": "Wed, 18 Sep 2024 08:20:49 GMT",
"update_time": 1726647649379
},
{
"chunk_count": 0,
"create_date": "Wed, 18 Sep 2024 08:20:49 GMT",
"create_time": 1726647649340,
"created_by": "134408906b6811efbcd20242ac120005",
"id": "e96aad9c759611ef9ab60242ac120004",
"knowledgebase_id": "e95f574e759611efbc850242ac120004",
"location": "Test Document111.txt",
"name": "Test Document111.txt",
"parser_config": {
"chunk_token_count": 128,
"delimiter": "\n!?。;!?",
"layout_recognize": true,
"task_page_size": 12
},
"parser_method": "naive",
"process_begin_at": null,
"process_duation": 0.0,
"progress": 0.0,
"progress_msg": "",
"run": "0",
"size": 46,
"source_type": "local",
"status": "1",
"thumbnail": null,
"token_count": 0,
"type": "doc",
"update_date": "Wed, 18 Sep 2024 08:20:49 GMT",
"update_time": 1726647649340
"update_date": "Mon, 14 Oct 2024 09:11:01 GMT",
"update_time": 1728897061948
}
],
"total": 2
},
"total": 1
}
}
```
@ -605,12 +572,12 @@ A successful response includes a JSON object like the following:
`0`: The operation succeeds.
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```shell
```json
{
"code": 3016,
"message": "Can't connect database"
"code": 102,
"message": "You don't own the dataset 7898da028a0511efbf750242ac1220005. "
}
```
@ -623,56 +590,114 @@ Update a file in a dataset
### Request
- Method: PUT
- URL: `/api/v1/dataset/{dataset_id}/document`
- URL: `http://{address}/api/v1/dataset/{dataset_id}/document/{document_id}`
- Headers:
- `content-Type: application/json`
- 'Authorization: Bearer {YOUR_ACCESS_TOKEN}'
#### Request example
```shell
```bash
curl --request PUT \
--url http://{address}/api/v1/dataset/{dataset_id}/info/{document_id} \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer {YOUR_ACCESS_TOKEN}'
--raw '{
"document_id": "f6b170ac758811efa0660242ac120004",
"document_name": "manual.txt",
"thumbnail": null,
"knowledgebase_id": "779333c0758611ef910f0242ac120004",
"parser_method": "manual",
"parser_config": {"chunk_token_count": 128, "delimiter": "\n!?。;!?", "layout_recognize": true, "task_page_size": 12},
"source_type": "local", "type": "doc",
"created_by": "134408906b6811efbcd20242ac120005",
"size": 0, "token_count": 0, "chunk_count": 0,
"progress": 0.0,
"progress_msg": "",
"process_begin_at": null,
"process_duration": 0.0
}'
--url http://{address}/api/v1/dataset/{dataset_id}/document/{document_id} \
--header 'Authorization: Bearer {YOUR_ACCESS TOKEN}' \
--header 'Content-Type: application/json' \
--data '{
"name": "manual.txt",
"thumbnail": null,
"knowledgebase_id": "779333c0758611ef910f0242ac120004",
"parser_method": "manual",
"parser_config": {"chunk_token_count": 128, "delimiter": "\n!?。;!?", "layout_recognize": true, "task_page_size": 12},
"source_type": "local", "type": "doc",
"created_by": "134408906b6811efbcd20242ac120005",
"size": 0, "token_count": 0, "chunk_count": 0,
"progress": 0.0,
"progress_msg": "",
"process_begin_at": null,
"process_duration": 0.0
}'
```
#### Request parameters
- `"document_id"`: (*Body parameter*)
- `"document_name"`: (*Body parameter*)
- `"thumbnail"`: (*Body parameter*)
Thumbnail image of the document.
- `""`
- `"knowledgebase_id"`: (*Body parameter*)
Knowledge base ID related to the document.
- `""`
- `"parser_method"`: (*Body parameter*)
Method used to parse the document.
- `""`
- `"parser_config"`: (*Body parameter*)
Configuration object for the parser.
- If the value is `None`, a dictionary with default values will be generated.
- `"source_type"`: (*Body parameter*)
Source type of the document.
- `""`
- `"type"`: (*Body parameter*)
Type or category of the document.
- `""`
- `"created_by"`: (*Body parameter*)
Creator of the document.
- `""`
- `"name"`: (*Body parameter*)
Name or title of the document.
- `""`
- `"size"`: (*Body parameter*)
Size of the document in bytes or some other unit.
- `0`
- `"token_count"`: (*Body parameter*)
Number of tokens in the document.
- `0`
- `"chunk_count"`: (*Body parameter*)
Number of chunks the document is split into.
- `0`
- `"progress"`: (*Body parameter*)
Current processing progress as a percentage.
- `0.0`
- `"progress_msg"`: (*Body parameter*)
Message indicating current progress status.
- `""`
- `"process_begin_at"`: (*Body parameter*)
Start time of the document processing.
- `None`
- `"process_duration"`: (*Body parameter*)
Duration of the processing in seconds or minutes.
- `0.0`
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```shell
```json
{
"code": 0
}
```
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```shell
```json
{
"code": 3016,
"message": "Can't connect database"
"code": 102,
"message": "The dataset not own the document."
}
```
@ -710,7 +735,7 @@ curl --request POST \
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```shell
{
@ -718,7 +743,7 @@ A successful response includes a JSON object like the following:
}
```
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```shell
{
@ -761,7 +786,7 @@ curl --request DELETE \
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```shell
{
@ -769,7 +794,7 @@ A successful response includes a JSON object like the following:
}
```
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```shell
{
@ -808,7 +833,7 @@ curl --request GET \
### Response
A successful response includes a JSON object like the following:
The successful response includes a JSON object like the following:
```shell
{
@ -863,7 +888,7 @@ A successful response includes a JSON object like the following:
}
```
An error response includes a JSON object like the following:
The error response includes a JSON object like the following:
```shell
{

View File

@ -24,7 +24,7 @@ Creates a knowledge base (dataset).
### Parameters
#### name: *Required*
#### name: `str`, *Required*
The unique name of the dataset to create. It must adhere to the following requirements:
@ -36,81 +36,70 @@ The unique name of the dataset to create. It must adhere to the following requir
- Maximum 65,535 characters.
- Case-insensitive.
#### avatar
#### avatar: `str`
Base64 encoding of the avatar. Defaults to `""`
#### tenant_id
#### tenant_id: `str`
The id of the tenant associated with the created dataset is used to identify different users. Defaults to `None`.
- When creating a dataset, `tenant_id` must not be provided.
- When updating a dataset, `tenant_id` cannot be changed.
- If creating a dataset, tenant_id must not be provided.
- If updating a dataset, tenant_id can't be changed.
#### description
#### description: `str`
The description of the created dataset. Defaults to `""`.
#### language
#### language: `str`
The language setting of the created dataset. Defaults to `"English"`.
The language setting of the created dataset. Defaults to `"English"`. ????????????
#### embedding_model
#### embedding_model: `str`
The specific model used by the dataset to generate vector embeddings. Defaults to `""`.
- When creating a dataset, `embedding_model` must not be provided.
- When updating a dataset, `embedding_model` cannot be changed.
- If creating a dataset, embedding_model must not be provided.
- If updating a dataset, embedding_model can't be changed.
#### permission
#### permission: `str`
The person who can operate on the dataset. Defaults to `"me"`.
Specify who can operate on the dataset. Defaults to `"me"`.
#### document_count
#### document_count: `int`
The number of documents associated with the dataset. Defaults to `0`.
:::tip NOTE
When updating a dataset, `document_count` cannot be changed.
:::
- If updating a dataset, `document_count` can't be changed.
#### chunk_count
#### chunk_count: `int`
The number of data chunks generated or processed by the created dataset. Defaults to `0`.
:::tip NOTE
When updating a dataset, `chunk_count` cannot be changed.
:::
- If updating a dataset, chunk_count can't be changed.
#### parse_method
#### parse_method, `str`
The method used by the dataset to parse and process data. Defaults to `"naive"`.
The method used by the dataset to parse and process data.
:::tip NOTE
When updating `parse_method` in a dataset, `chunk_count` must be greater than 0.
:::
- If updating parse_method in a dataset, chunk_count must be greater than 0. Defaults to `"naive"`.
#### parser_config
#### parser_config, `Dataset.ParserConfig`
The parser configuration of the dataset. A `ParserConfig` object contains the following attributes:
- `chunk_token_count`: Defaults to `128`.
- `layout_recognize`: Defaults to `True`.
- `delimiter`: Defaults to `'\n!?。;!?'`.
- `task_page_size`: Defaults to `12`.
The configuration settings for the parser used by the dataset.
### Returns
- Success: A `dataset` object.
- Failure: `Exception`
```python
DataSet
description: dataset object
```
### Examples
```python
from ragflow import RAGFlow
rag_object = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
ds = rag_object.create_dataset(name="kb_1")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
ds = rag.create_dataset(name="kb_1")
```
---
@ -118,27 +107,28 @@ ds = rag_object.create_dataset(name="kb_1")
## Delete knowledge bases
```python
RAGFlow.delete_datasets(ids: list[str] = None)
RAGFlow.delete_datasets(ids: List[str] = None)
```
Deletes knowledge bases by name or ID.
Deletes knowledge bases.
### Parameters
#### ids
#### ids: `List[str]`
The ids of the datasets to be deleted.
The IDs of the knowledge bases to delete.
### Returns
- Success: No value is returned.
- Failure: `Exception`
```python
no return
```
### Examples
#### Delete knowledge bases by name
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
rag.delete_datasets(ids=["id_1","id_2"])
```
@ -154,84 +144,76 @@ RAGFlow.list_datasets(
desc: bool = True,
id: str = None,
name: str = None
) -> list[DataSet]
) -> List[DataSet]
```
Lists all knowledge bases.
Lists all knowledge bases in the RAGFlow system.
### Parameters
#### page
#### page: `int`
The current page number to retrieve from the paginated data. This parameter determines which set of records will be fetched. Defaults to `1`.
#### page_size
#### page_size: `int`
The number of records to retrieve per page. This controls how many records will be included in each page. Defaults to `1024`.
#### order_by
#### order_by: `str`
The attribute by which the results are sorted. Defaults to `"create_time"`.
The field by which the records should be sorted. This specifies the attribute or column used to order the results. Defaults to `"create_time"`.
#### desc
#### desc: `bool`
Indicates whether to sort the results in descending order. Defaults to `True`.
Whether the sorting should be in descending order. Defaults to `True`.
#### id
#### id: `str`
The ID of the dataset to retrieve. Defaults to `None`.
The id of the dataset to be got. Defaults to `None`.
#### name
#### name: `str`
The name of the dataset to retrieve. Defaults to `None`.
The name of the dataset to be got. Defaults to `None`.
### Returns
- Success: A list of `DataSet` objects representing the retrieved knowledge bases.
- Failure: `Exception`.
```python
List[DataSet]
description:the list of datasets.
```
### Examples
#### Retrieve a list of knowledge bases associated with the current user
```python
for ds in rag_object.list_datasets():
print(ds.name)
```
from ragflow import RAGFlow
#### Retrieve a knowledge base by ID
```python
ds = rag_object.list_datasets(id = "id_1")
print(ds.name)
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
for ds in rag.list_datasets():
print(ds)
```
---
## Update knowledge base
```python
DataSet.update(update_message: dict)
```
Updates the current knowledge base.
### Parameters
#### update_message
### Returns
- Success: No value is returned.
- Failure: `Exception`
```python
no return
```
### Examples
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
ds = rag.list_datasets(name="kb_1")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
ds = rag.get_dataset(name="kb_1")
ds.update({"parse_method":"manual", ...}}
```
@ -249,8 +231,6 @@ RAGFLOW.upload_document(ds:DataSet, name:str, blob:bytes)-> bool
### Parameters
#### ds
#### name
#### blob
@ -354,7 +334,7 @@ Duration of the processing in seconds or minutes. Defaults to `0.0`.
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
doc = rag.get_document(id="wdfxb5t547d",name='testdocument.txt')
print(doc)
```
@ -376,7 +356,7 @@ bool
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
doc = rag.get_document(id="wdfxb5t547d")
doc.parser_method= "manual"
doc.save()
@ -399,7 +379,7 @@ bytes of the document.
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
doc = rag.get_document(id="wdfxb5t547d")
open("~/ragflow.txt", "w+").write(doc.download())
print(doc)
@ -410,7 +390,7 @@ print(doc)
## List documents
```python
Dataset.list_docs(keywords: str=None, offset: int=0, limit:int = -1) -> list[Document]
Dataset.list_docs(keywords: str=None, offset: int=0, limit:int = -1) -> List[Document]
```
### Parameters
@ -425,18 +405,18 @@ The beginning number of records for paging. Defaults to `0`.
#### limit: `int`
Records number to return, -1 means all of them.
Records number to return, -1 means all of them. Records number to return, -1 means all of them.
### Returns
list[Document]
List[Document]
### Examples
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
ds = rag.create_dataset(name="kb_1")
filename1 = "~/ragflow.txt"
@ -466,7 +446,7 @@ description: delete success or not
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
ds = rag.create_dataset(name="kb_1")
filename1 = "~/ragflow.txt"
@ -599,7 +579,7 @@ chunk
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
doc = rag.get_document(id="wdfxb5t547d")
chunk = doc.add_chunk(content="xxxxxxx")
```
@ -621,7 +601,7 @@ bool
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
doc = rag.get_document(id="wdfxb5t547d")
chunk = doc.add_chunk(content="xxxxxxx")
chunk.delete()
@ -644,7 +624,7 @@ bool
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
doc = rag.get_document(id="wdfxb5t547d")
chunk = doc.add_chunk(content="xxxxxxx")
chunk.content = "sdfx"
@ -656,7 +636,7 @@ chunk.save()
## Retrieval
```python
RAGFlow.retrieval(question:str, datasets:list[Dataset], document=list[Document]=None, offset:int=0, limit:int=6, similarity_threshold:float=0.1, vector_similarity_weight:float=0.3, top_k:int=1024) -> list[Chunk]
RAGFlow.retrieval(question:str, datasets:List[Dataset], document=List[Document]=None, offset:int=0, limit:int=6, similarity_threshold:float=0.1, vector_similarity_weight:float=0.3, top_k:int=1024) -> List[Chunk]
```
### Parameters
@ -665,11 +645,11 @@ RAGFlow.retrieval(question:str, datasets:list[Dataset], document=list[Document]=
The user query or query keywords. Defaults to `""`.
#### datasets: `list[Dataset]`, *Required*
#### datasets: `List[Dataset]`, *Required*
The scope of datasets.
#### document: `list[Document]`
#### document: `List[Document]`
The scope of document. `None` means no limitation. Defaults to `None`.
@ -695,14 +675,14 @@ Number of records engaged in vector cosine computaton. Defaults to `1024`.
### Returns
list[Chunk]
List[Chunk]
### Examples
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
ds = rag.get_dataset(name="ragflow")
name = 'ragflow_test.txt'
path = 'test_data/ragflow_test.txt'
@ -733,7 +713,7 @@ Chat APIs
RAGFlow.create_chat(
name: str = "assistant",
avatar: str = "path",
knowledgebases: list[DataSet] = ["kb1"],
knowledgebases: List[DataSet] = ["kb1"],
llm: Chat.LLM = None,
prompt: Chat.Prompt = None
) -> Chat
@ -754,7 +734,7 @@ The name of the created chat. Defaults to `"assistant"`.
The icon of the created chat. Defaults to `"path"`.
#### knowledgebases: `list[DataSet]`
#### knowledgebases: `List[DataSet]`
Select knowledgebases associated. Defaults to `["kb1"]`.
@ -796,7 +776,7 @@ You are an intelligent assistant. Please summarize the content of the knowledge
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
kb = rag.get_dataset(name="kb_1")
assi = rag.create_chat("Miss R", knowledgebases=[kb])
```
@ -820,7 +800,7 @@ no return
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
kb = rag.get_knowledgebase(name="kb_1")
assi = rag.create_chat("Miss R" knowledgebases=[kb])
assi.update({"temperature":0.8})
@ -831,7 +811,7 @@ assi.update({"temperature":0.8})
## Delete chats
```python
RAGFlow.delete_chats(ids: list[str] = None)
RAGFlow.delete_chats(ids: List[str] = None)
```
### Parameters
@ -851,7 +831,7 @@ no return
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
rag.delete_chats(ids=["id_1","id_2"])
```
@ -867,7 +847,7 @@ RAGFlow.list_chats(
desc: bool = True,
id: str = None,
name: str = None
) -> list[Chat]
) -> List[Chat]
```
### Parameters
@ -910,7 +890,7 @@ A list of chat objects.
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
for assi in rag.list_chats():
print(assi)
```
@ -940,7 +920,7 @@ The id of the created session is used to identify different sessions.
The name of the created session. Defaults to `"New session"`.
#### messages: `list[Message]`
#### messages: `List[Message]`
The messages of the created session.
- messages cannot be provided.
@ -963,7 +943,7 @@ The id of associated chat
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
assi = rag.list_chats(name="Miss R")
assi = assi[0]
sess = assi.create_session()
@ -985,7 +965,7 @@ no return
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
assi = rag.list_chats(name="Miss R")
assi = assi[0]
sess = assi.create_session("new_session")
@ -1023,7 +1003,7 @@ The id of the message. `id` is automatically generated. Defaults to `None`. ????
The content of the message. Defaults to `"Hi! I am your assistant, can I help you?"`.
#### reference: `list[Chunk]`
#### reference: `List[Chunk]`
The auto-generated reference of the message. Each `chunk` object includes the following attributes:
@ -1045,7 +1025,7 @@ The auto-generated reference of the message. Each `chunk` object includes the fo
A similarity score based on vector representations. This score is obtained by converting texts, words, or objects into vectors and then calculating the cosine similarity or other distance measures between these vectors to determine the similarity in vector space. A higher value indicates greater similarity in the vector space. Defaults to `None`. ?????????????????????????????????
- **term_similarity**: `float`
The similarity score based on terms or keywords. This score is calculated by comparing the similarity of key terms between texts or datasets, typically measuring how similar two words or phrases are in meaning or context. A higher value indicates a stronger similarity between terms. Defaults to `None`. ???????????????????
- **position**: `list[string]`
- **position**: `List[string]`
Indicates the position or index of keywords or specific terms within the text. An array is typically used to mark the location of keywords or specific elements, facilitating precise operations or analysis of the text. Defaults to `None`. ??????????????
### Examples
@ -1053,7 +1033,7 @@ The auto-generated reference of the message. Each `chunk` object includes the fo
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
assi = rag.list_chats(name="Miss R")
assi = assi[0]
sess = assi.create_session()
@ -1084,12 +1064,12 @@ Chat.list_sessions(
desc: bool = True,
id: str = None,
name: str = None
) -> list[Session]
) -> List[Session]
```
### Returns
list[Session]
List[Session]
description: the List contains information about multiple assistant object, with each dictionary containing information about one assistant.
### Examples
@ -1097,7 +1077,7 @@ description: the List contains information about multiple assistant object, with
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
assi = rag.list_chats(name="Miss R")
assi = assi[0]
for sess in assi.list_sessions():
@ -1140,7 +1120,7 @@ The name of the chat to be retrieved.
## Delete session
```python
Chat.delete_sessions(ids:list[str] = None)
Chat.delete_sessions(ids:List[str] = None)
```
### Returns
@ -1152,13 +1132,13 @@ no return
```python
from ragflow import RAGFlow
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380")
assi = rag.list_chats(name="Miss R")
assi = assi[0]
assi.delete_sessions(ids=["id_1","id_2"])
```
### Parameters
#### ids: `list[string]`
#### ids: `List[string]`
IDs of the sessions to be deleted.
- `None`

View File

@ -18,8 +18,8 @@ class Base(object):
pr[name] = value
return pr
def post(self, path, json, stream=False):
res = self.rag.post(path, json, stream=stream)
def post(self, path, json=None, stream=False, files=None):
res = self.rag.post(path, json, stream=stream,files=files)
return res
def get(self, path, params):

View File

@ -1,5 +1,7 @@
from typing import Optional, List
from transformers.models.bloom.modeling_bloom import bloom_gelu_back
from .document import Document
from .base import Base
@ -39,39 +41,27 @@ class DataSet(Base):
if res.get("code") != 0:
raise Exception(res["message"])
def upload_documents(self,document_list: List[dict]):
url = f"/dataset/{self.id}/document"
files = [("file",(ele["name"],ele["blob"])) for ele in document_list]
res = self.post(path=url,json=None,files=files)
res = res.json()
if res.get("code") != 0:
raise Exception(res.get("message"))
def list_docs(self, keywords: Optional[str] = None, offset: int = 0, limit: int = -1) -> List[Document]:
"""
List the documents in the dataset, optionally filtering by keywords, with pagination support.
Args:
keywords (Optional[str]): A string of keywords to filter the documents. Defaults to None.
offset (int): The starting point for pagination. Defaults to 0.
limit (int): The maximum number of documents to return. Defaults to -1 (no limit).
Returns:
List[Document]: A list of Document objects.
"""
# Construct the request payload for listing documents
payload = {
"knowledgebase_id": self.id,
"keywords": keywords,
"offset": offset,
"limit": limit
}
# Send the request to the server to list documents
res = self.get(f'/doc/dataset/{self.id}/documents', payload)
res_json = res.json()
# Handle response and error checking
if res_json.get("retmsg") != "success":
raise Exception(res_json.get("retmsg"))
# Parse the document data from the response
def list_documents(self, id: str = None, keywords: str = None, offset: int =1, limit: int = 1024, orderby: str = "create_time", desc: bool = True):
res = self.get(f"/dataset/{self.id}/info",params={"id": id,"keywords": keywords,"offset": offset,"limit": limit,"orderby": orderby,"desc": desc})
res = res.json()
documents = []
for doc_data in res_json["data"].get("docs", []):
doc = Document(self.rag, doc_data)
documents.append(doc)
if res.get("code") == 0:
for document in res["data"].get("docs"):
documents.append(Document(self.rag,document))
return documents
raise Exception(res["message"])
def delete_documents(self,ids: List[str] = None):
res = self.rm(f"/dataset/{self.id}/document",{"ids":ids})
res = res.json()
if res.get("code") != 0:
raise Exception(res["message"])
return documents

View File

@ -29,18 +29,14 @@ class Document(Base):
res_dict.pop(k)
super().__init__(rag, res_dict)
def save(self) -> bool:
def update(self,update_message:dict) -> bool:
"""
Save the document details to the server.
"""
res = self.post('/doc/save',
{"id": self.id, "name": self.name, "thumbnail": self.thumbnail, "knowledgebase_id": self.knowledgebase_id,
"parser_method": self.parser_method, "parser_config": self.parser_config.to_json(),
})
res = self.post(f'/dataset/{self.knowledgebase_id}/info/{self.id}',update_message)
res = res.json()
if res.get("retmsg") == "success":
return True
raise Exception(res["retmsg"])
if res.get("code") != 0:
raise Exception(res["message"])
def delete(self) -> bool:
"""
@ -60,8 +56,7 @@ class Document(Base):
:return: The downloaded document content in bytes.
"""
# Construct the URL for the API request using the document ID and knowledge base ID
res = self.get(f"/doc/{self.id}",
{"headers": self.rag.authorization_header, "id": self.id, "name": self.name, "stream": True})
res = self.get(f"/dataset/{self.knowledgebase_id}/document/{self.id}")
# Check the response status code to ensure the request was successful
if res.status_code == 200:

View File

@ -32,12 +32,12 @@ class RAGFlow:
self.api_url = f"{base_url}/api/{version}"
self.authorization_header = {"Authorization": "{} {}".format("Bearer", self.user_key)}
def post(self, path, json, stream=False):
res = requests.post(url=self.api_url + path, json=json, headers=self.authorization_header, stream=stream)
def post(self, path, json=None, stream=False, files=None):
res = requests.post(url=self.api_url + path, json=json, headers=self.authorization_header, stream=stream,files=files)
return res
def get(self, path, params=None):
res = requests.get(url=self.api_url + path, params=params, headers=self.authorization_header)
def get(self, path, params=None, json=None):
res = requests.get(url=self.api_url + path, params=params, headers=self.authorization_header,json=json)
return res
def delete(self, path, json):
@ -151,31 +151,7 @@ class RAGFlow:
return result_list
raise Exception(res["message"])
def create_document(self, ds: DataSet, name: str, blob: bytes) -> bool:
url = f"/doc/dataset/{ds.id}/documents/upload"
files = {
'file': (name, blob)
}
headers = {
'Authorization': f"Bearer {ds.rag.user_key}"
}
response = requests.post(self.api_url + url, files=files,
headers=headers)
if response.status_code == 200 and response.json().get('retmsg') == 'success':
return True
else:
raise Exception(f"Upload failed: {response.json().get('retmsg')}")
return False
def get_document(self, id: str = None, name: str = None) -> Document:
res = self.get("/doc/infos", {"id": id, "name": name})
res = res.json()
if res.get("retmsg") == "success":
return Document(self, res['data'])
raise Exception(res["retmsg"])
def async_parse_documents(self, doc_ids):
"""

View File

@ -21,22 +21,16 @@ class TestDocument(TestSdk):
# Step 2: Create a new document
# The blob is the actual file content or a placeholder in this case
name = "TestDocument.txt"
blob = b"Sample document content for ingestion test."
res = rag.create_document(ds, name=name, blob=blob)
blob_2 = b"test_2."
list_1 = []
list_1.append({"name":"Test_1.txt",
"blob":blob})
list_1.append({"name":"Test_2.txt",
"blob":blob_2})
res = ds.upload_documents(list_1)
# Ensure document ingestion was successful
assert res is True, f"Failed to create document, error: {res}"
def test_get_detail_document_with_success(self):
"""
Test getting a document's detail with success
"""
rag = RAGFlow(API_KEY, HOST_ADDRESS)
doc = rag.get_document(name="TestDocument.txt")
assert isinstance(doc, Document), f"Failed to get dataset, error: {doc}."
assert doc.name == "TestDocument.txt", "Name does not match"
assert res is None, f"Failed to create document, error: {res}"
def test_update_document_with_success(self):
"""
@ -44,12 +38,13 @@ class TestDocument(TestSdk):
Update name or parser_method are supported
"""
rag = RAGFlow(API_KEY, HOST_ADDRESS)
doc = rag.get_document(name="TestDocument.txt")
ds = rag.list_datasets(name="God")
ds = ds[0]
doc = ds.list_documents()
doc = doc[0]
if isinstance(doc, Document):
doc.parser_method = "manual"
doc.name = "manual.txt"
res = doc.save()
assert res is True, f"Failed to update document, error: {res}"
res = doc.update({"parser_method":"manual","name":"manual.txt"})
assert res is None, f"Failed to update document, error: {res}"
else:
assert False, f"Failed to get document, error: {doc}"
@ -61,8 +56,10 @@ class TestDocument(TestSdk):
rag = RAGFlow(API_KEY, HOST_ADDRESS)
# Retrieve a document
doc = rag.get_document(name="manual.txt")
ds = rag.list_datasets(name="God")
ds = ds[0]
doc = ds.list_documents(name="manual.txt")
doc = doc[0]
# Check if the retrieved document is of type Document
if isinstance(doc, Document):
# Download the document content and save it to a file
@ -81,7 +78,7 @@ class TestDocument(TestSdk):
# If the document retrieval fails, assert failure
assert False, f"Failed to get document, error: {doc}"
def test_list_all_documents_in_dataset_with_success(self):
def test_list_documents_in_dataset_with_success(self):
"""
Test list all documents into a dataset with success.
"""
@ -101,12 +98,10 @@ class TestDocument(TestSdk):
blob1 = b"Sample document content for ingestion test111."
name2 = "Test Document222.txt"
blob2 = b"Sample document content for ingestion test222."
rag.create_document(ds, name=name1, blob=blob1)
rag.create_document(ds, name=name2, blob=blob2)
list_1 = [{"name":name1,"blob":blob1},{"name":name2,"blob":blob2}]
ds.upload_documents(list_1)
for d in ds.list_docs(keywords="test", offset=0, limit=12):
assert isinstance(d, Document)
print(d)
assert isinstance(d, Document), "Failed to upload documents"
def test_delete_documents_in_dataset_with_success(self):
"""