mirror of
https://git.mirrors.martin98.com/https://github.com/mendableai/firecrawl
synced 2025-08-20 07:59:11 +08:00
Update generate-llmstxt-service.ts
This commit is contained in:
parent
f5de803a9d
commit
d4cf2269ed
@ -53,6 +53,7 @@ export async function performGenerateLlmsTxt(options: GenerateLLMsTextServiceOpt
|
||||
showFullText: showFullText,
|
||||
});
|
||||
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
@ -86,9 +87,13 @@ export async function performGenerateLlmsTxt(options: GenerateLLMsTextServiceOpt
|
||||
let llmsFulltxt = `# ${url} llms-full.txt\n\n`;
|
||||
|
||||
|
||||
// Scrape each URL
|
||||
for (const url of urls) {
|
||||
// Process URLs in batches of 10
|
||||
for (let i = 0; i < urls.length; i += 10) {
|
||||
const batch = urls.slice(i, i + 10);
|
||||
|
||||
const batchResults = await Promise.all(batch.map(async (url) => {
|
||||
_logger.debug(`Scraping URL: ${url}`);
|
||||
try {
|
||||
const document = await scrapeDocument(
|
||||
{
|
||||
url,
|
||||
@ -103,14 +108,11 @@ export async function performGenerateLlmsTxt(options: GenerateLLMsTextServiceOpt
|
||||
{ onlyMainContent: true }
|
||||
);
|
||||
|
||||
if (!document) {
|
||||
if (!document || !document.markdown) {
|
||||
logger.error(`Failed to scrape URL ${url}`);
|
||||
continue;
|
||||
return null;
|
||||
}
|
||||
|
||||
// Process scraped result
|
||||
if (!document.markdown) continue;
|
||||
|
||||
_logger.debug(`Generating description for ${document.metadata?.url}`);
|
||||
|
||||
const completion = await openai.beta.chat.completions.parse({
|
||||
@ -124,24 +126,33 @@ export async function performGenerateLlmsTxt(options: GenerateLLMsTextServiceOpt
|
||||
response_format: zodResponseFormat(DescriptionSchema, "description")
|
||||
});
|
||||
|
||||
try {
|
||||
const parsedResponse = completion.choices[0].message.parsed;
|
||||
const description = parsedResponse!.description;
|
||||
const title = parsedResponse!.title;
|
||||
return {
|
||||
title: parsedResponse!.title,
|
||||
description: parsedResponse!.description,
|
||||
url: document.metadata?.url,
|
||||
markdown: document.markdown
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error(`Failed to process URL ${url}`, { error });
|
||||
return null;
|
||||
}
|
||||
}));
|
||||
|
||||
llmstxt += `- [${title}](${document.metadata?.url}): ${description}\n`;
|
||||
llmsFulltxt += `## ${title}\n${document.markdown}\n\n`;
|
||||
// Process successful results from batch
|
||||
for (const result of batchResults) {
|
||||
if (!result) continue;
|
||||
|
||||
// Update progress with both generated text and full text
|
||||
llmstxt += `- [${result.title}](${result.url}): ${result.description}\n`;
|
||||
llmsFulltxt += `## ${result.title}\n${result.markdown}\n\n`;
|
||||
}
|
||||
|
||||
// Update progress after each batch
|
||||
await updateGeneratedLlmsTxt(generationId, {
|
||||
status: "processing",
|
||||
generatedText: llmstxt,
|
||||
fullText: llmsFulltxt,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(`Failed to parse LLM response for ${document.metadata?.url}`, { error });
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// After successful generation, save to cache
|
||||
|
Loading…
x
Reference in New Issue
Block a user