mirror of
https://git.mirrors.martin98.com/https://github.com/mendableai/firecrawl
synced 2025-08-12 03:29:01 +08:00
feat(crawl): URL deduplication
This commit is contained in:
parent
25e94ffd28
commit
6ecf24b85e
@ -143,6 +143,7 @@ export const scrapeOptions = z.object({
|
|||||||
}).optional(),
|
}).optional(),
|
||||||
skipTlsVerification: z.boolean().default(false),
|
skipTlsVerification: z.boolean().default(false),
|
||||||
removeBase64Images: z.boolean().default(true),
|
removeBase64Images: z.boolean().default(true),
|
||||||
|
deduplicateSimilarURLs: z.boolean().default(true),
|
||||||
}).strict(strictMessage)
|
}).strict(strictMessage)
|
||||||
|
|
||||||
|
|
||||||
|
@ -90,6 +90,13 @@ export async function getThrottledJobs(teamId: string): Promise<string[]> {
|
|||||||
return await redisConnection.zrangebyscore("concurrency-limiter:" + teamId + ":throttled", Date.now(), Infinity);
|
return await redisConnection.zrangebyscore("concurrency-limiter:" + teamId + ":throttled", Date.now(), Infinity);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function normalizeURL(url: string): string {
|
||||||
|
const urlO = new URL(url);
|
||||||
|
urlO.search = "";
|
||||||
|
urlO.hash = "";
|
||||||
|
return urlO.href;
|
||||||
|
}
|
||||||
|
|
||||||
export async function lockURL(id: string, sc: StoredCrawl, url: string): Promise<boolean> {
|
export async function lockURL(id: string, sc: StoredCrawl, url: string): Promise<boolean> {
|
||||||
if (typeof sc.crawlerOptions?.limit === "number") {
|
if (typeof sc.crawlerOptions?.limit === "number") {
|
||||||
if (await redisConnection.scard("crawl:" + id + ":visited") >= sc.crawlerOptions.limit) {
|
if (await redisConnection.scard("crawl:" + id + ":visited") >= sc.crawlerOptions.limit) {
|
||||||
@ -97,16 +104,42 @@ export async function lockURL(id: string, sc: StoredCrawl, url: string): Promise
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
url = normalizeURL(url);
|
||||||
|
|
||||||
|
let res: boolean;
|
||||||
|
if (!sc.scrapeOptions.deduplicateSimilarURLs) {
|
||||||
|
res = (await redisConnection.sadd("crawl:" + id + ":visited", url)) !== 0
|
||||||
|
} else {
|
||||||
const urlO = new URL(url);
|
const urlO = new URL(url);
|
||||||
urlO.search = "";
|
|
||||||
urlO.hash = "";
|
// Construct two versions, one with www., one without
|
||||||
url = urlO.href;
|
const urlWithWWW = new URL(urlO);
|
||||||
} catch (error) {
|
const urlWithoutWWW = new URL(urlO);
|
||||||
logger.warn("Failed to normalize URL " + JSON.stringify(url) + ": " + error);
|
if (urlO.hostname.startsWith("www.")) {
|
||||||
|
urlWithoutWWW.hostname = urlWithWWW.hostname.slice(4);
|
||||||
|
} else {
|
||||||
|
urlWithWWW.hostname = "www." + urlWithoutWWW.hostname;
|
||||||
|
}
|
||||||
|
|
||||||
|
let permutations = [urlWithWWW, urlWithoutWWW];
|
||||||
|
|
||||||
|
// Construct more versions for http/https
|
||||||
|
permutations = permutations.flatMap(urlO => {
|
||||||
|
if (!["http:", "https:"].includes(urlO.protocol)) {
|
||||||
|
return [urlO];
|
||||||
|
}
|
||||||
|
|
||||||
|
const urlWithHTTP = new URL(urlO);
|
||||||
|
const urlWithHTTPS = new URL(urlO);
|
||||||
|
urlWithHTTP.protocol = "http:";
|
||||||
|
urlWithHTTPS.protocol = "https:";
|
||||||
|
|
||||||
|
return [urlWithHTTP, urlWithHTTPS];
|
||||||
|
});
|
||||||
|
|
||||||
|
res = (await redisConnection.sadd("crawl:" + id + ":visited", ...permutations.map(x => x.href))) === permutations.length;
|
||||||
}
|
}
|
||||||
|
|
||||||
const res = (await redisConnection.sadd("crawl:" + id + ":visited", url)) !== 0
|
|
||||||
await redisConnection.expire("crawl:" + id + ":visited", 24 * 60 * 60, "NX");
|
await redisConnection.expire("crawl:" + id + ":visited", 24 * 60 * 60, "NX");
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,7 @@ import {
|
|||||||
getCrawl,
|
getCrawl,
|
||||||
getCrawlJobs,
|
getCrawlJobs,
|
||||||
lockURL,
|
lockURL,
|
||||||
|
normalizeURL,
|
||||||
} from "../lib/crawl-redis";
|
} from "../lib/crawl-redis";
|
||||||
import { StoredCrawl } from "../lib/crawl-redis";
|
import { StoredCrawl } from "../lib/crawl-redis";
|
||||||
import { addScrapeJob } from "./queue-jobs";
|
import { addScrapeJob } from "./queue-jobs";
|
||||||
@ -305,6 +306,11 @@ async function processJob(job: Job & { id: string }, token: string) {
|
|||||||
|
|
||||||
if (job.data.crawl_id) {
|
if (job.data.crawl_id) {
|
||||||
const sc = (await getCrawl(job.data.crawl_id)) as StoredCrawl;
|
const sc = (await getCrawl(job.data.crawl_id)) as StoredCrawl;
|
||||||
|
|
||||||
|
if (doc.metadata.url !== undefined && doc.metadata.sourceURL !== undefined && normalizeURL(doc.metadata.url) !== normalizeURL(doc.metadata.sourceURL)) {
|
||||||
|
logger.debug("Was redirected, locking new URL...");
|
||||||
|
await lockURL(job.data.crawl_id, sc, doc.metadata.url);
|
||||||
|
}
|
||||||
|
|
||||||
await logJob({
|
await logJob({
|
||||||
job_id: job.id as string,
|
job_id: job.id as string,
|
||||||
|
@ -86,6 +86,9 @@ export interface CrawlScrapeOptions {
|
|||||||
country?: string;
|
country?: string;
|
||||||
languages?: string[];
|
languages?: string[];
|
||||||
};
|
};
|
||||||
|
skipTlsVerification?: boolean;
|
||||||
|
removeBase64Images?: boolean;
|
||||||
|
deduplicateSimilarURLs?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
export type Action = {
|
export type Action = {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user