mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-02 06:10:36 +08:00
Feat: add partition of file uploads (#5248)
### What problem does this PR solve? Partitions the upload of documents in parts of 20 to avoid the size limit error. Allows uploading 100s of documents on a single interaction. ### Type of change - [X] New Feature (non-breaking change which adds functionality)
This commit is contained in:
parent
4f2816c01c
commit
3d605a23fe
@ -248,27 +248,60 @@ export const useUploadNextDocument = () => {
|
||||
} = useMutation({
|
||||
mutationKey: ['uploadDocument'],
|
||||
mutationFn: async (fileList: UploadFile[]) => {
|
||||
const formData = new FormData();
|
||||
formData.append('kb_id', knowledgeId);
|
||||
fileList.forEach((file: any) => {
|
||||
formData.append('file', file);
|
||||
});
|
||||
const partitionedFileList = fileList.reduce<UploadFile[][]>(
|
||||
(acc, cur, index) => {
|
||||
const partIndex = Math.floor(index / 20); // Uploads 20 documents at a time
|
||||
if (!acc[partIndex]) {
|
||||
acc[partIndex] = [];
|
||||
}
|
||||
acc[partIndex].push(cur);
|
||||
return acc;
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
try {
|
||||
const ret = await kbService.document_upload(formData);
|
||||
const code = get(ret, 'data.code');
|
||||
if (code === 0) {
|
||||
message.success(i18n.t('message.uploaded'));
|
||||
}
|
||||
let allRet = [];
|
||||
for (const listPart of partitionedFileList) {
|
||||
const formData = new FormData();
|
||||
formData.append('kb_id', knowledgeId);
|
||||
listPart.forEach((file: any) => {
|
||||
formData.append('file', file);
|
||||
});
|
||||
|
||||
if (code === 0 || code === 500) {
|
||||
queryClient.invalidateQueries({ queryKey: ['fetchDocumentList'] });
|
||||
try {
|
||||
const ret = await kbService.document_upload(formData);
|
||||
allRet.push(ret);
|
||||
} catch (error) {
|
||||
allRet.push({ data: { code: 500 } });
|
||||
|
||||
const filenames = listPart.map((file: any) => file.name).join(', ');
|
||||
console.warn(error);
|
||||
console.warn('Error uploading files:', filenames);
|
||||
}
|
||||
return ret?.data;
|
||||
} catch (error) {
|
||||
console.warn(error);
|
||||
return {};
|
||||
}
|
||||
|
||||
const succeed = allRet.every((ret) => get(ret, 'data.code') === 0);
|
||||
const any500 = allRet.some((ret) => get(ret, 'data.code') === 500);
|
||||
|
||||
if (succeed) {
|
||||
message.success(i18n.t('message.uploaded'));
|
||||
}
|
||||
|
||||
if (succeed || any500) {
|
||||
queryClient.invalidateQueries({ queryKey: ['fetchDocumentList'] });
|
||||
}
|
||||
|
||||
const allData = {
|
||||
code: any500
|
||||
? 500
|
||||
: succeed
|
||||
? 0
|
||||
: allRet.filter((ret) => get(ret, 'data.code') !== 0)[0]?.data
|
||||
?.code,
|
||||
data: succeed,
|
||||
message: allRet.map((ret) => get(ret, 'data.message')).join('/n'),
|
||||
};
|
||||
return allData;
|
||||
},
|
||||
});
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user