mirror of
https://git.mirrors.martin98.com/https://github.com/mendableai/firecrawl
synced 2025-06-23 01:37:14 +08:00

* feat: use strictNullChecking * feat: switch logger to Winston * feat(scrapeURL): first batch * fix(scrapeURL): error swallow * fix(scrapeURL): add timeout to EngineResultsTracker * fix(scrapeURL): report unexpected error to sentry * chore: remove unused modules * feat(transfomers/coerce): warn when a format's response is missing * feat(scrapeURL): feature flag priorities, engine quality sorting, PDF and DOCX support * (add note) * feat(scrapeURL): wip readme * feat(scrapeURL): LLM extract * feat(scrapeURL): better warnings * fix(scrapeURL/engines/fire-engine;playwright): fix screenshot * feat(scrapeURL): add forceEngine internal option * feat(scrapeURL/engines): scrapingbee * feat(scrapeURL/transformars): uploadScreenshot * feat(scrapeURL): more intense tests * bunch of stuff * get rid of WebScraper (mostly) * adapt batch scrape * add staging deploy workflow * fix yaml * fix logger issues * fix v1 test schema * feat(scrapeURL/fire-engine/chrome-cdp): remove wait inserts on actions * scrapeURL: v0 backwards compat * logger fixes * feat(scrapeurl): v0 returnOnlyUrls support * fix(scrapeURL/v0): URL leniency * fix(batch-scrape): ts non-nullable * fix(scrapeURL/fire-engine/chromecdp): fix wait action * fix(logger): remove error debug key * feat(requests.http): use dotenv expression * fix(scrapeURL/extractMetadata): extract custom metadata * fix crawl option conversion * feat(scrapeURL): Add retry logic to robustFetch * fix(scrapeURL): crawl stuff * fix(scrapeURL): LLM extract * fix(scrapeURL/v0): search fix * fix(tests/v0): grant larger response size to v0 crawl status * feat(scrapeURL): basic fetch engine * feat(scrapeURL): playwright engine * feat(scrapeURL): add url-specific parameters * Update readme and examples * added e2e tests for most parameters. Still a few actions, location and iframes to be done. * fixed type * Nick: * Update scrape.ts * Update index.ts * added actions and base64 check * Nick: skipTls feature flag? * 403 * todo * todo * fixes * yeet headers from url specific params * add warning when final engine has feature deficit * expose engine results tracker for ScrapeEvents implementation * ingest scrape events * fixed some tests * comment * Update index.test.ts * fixed rawHtml * Update index.test.ts * update comments * move geolocation to global f-e option, fix removeBase64Images * Nick: * trim url-specific params * Update index.ts --------- Co-authored-by: Eric Ciarla <ericciarla@yahoo.com> Co-authored-by: rafaelmmiller <8574157+rafaelmmiller@users.noreply.github.com> Co-authored-by: Nicolas <nicolascamara29@gmail.com>
156 lines
5.5 KiB
TypeScript
156 lines
5.5 KiB
TypeScript
import { InternalOptions } from "../scraper/scrapeURL";
|
|
import { ScrapeOptions } from "../controllers/v1/types";
|
|
import { WebCrawler } from "../scraper/WebScraper/crawler";
|
|
import { redisConnection } from "../services/queue-service";
|
|
import { logger } from "./logger";
|
|
|
|
export type StoredCrawl = {
|
|
originUrl?: string;
|
|
crawlerOptions: any;
|
|
scrapeOptions: Omit<ScrapeOptions, "timeout">;
|
|
internalOptions: InternalOptions;
|
|
team_id: string;
|
|
plan?: string;
|
|
robots?: string;
|
|
cancelled?: boolean;
|
|
createdAt: number;
|
|
};
|
|
|
|
export async function saveCrawl(id: string, crawl: StoredCrawl) {
|
|
await redisConnection.set("crawl:" + id, JSON.stringify(crawl));
|
|
await redisConnection.expire("crawl:" + id, 24 * 60 * 60, "NX");
|
|
}
|
|
|
|
export async function getCrawl(id: string): Promise<StoredCrawl | null> {
|
|
const x = await redisConnection.get("crawl:" + id);
|
|
|
|
if (x === null) {
|
|
return null;
|
|
}
|
|
|
|
return JSON.parse(x);
|
|
}
|
|
|
|
export async function getCrawlExpiry(id: string): Promise<Date> {
|
|
const d = new Date();
|
|
const ttl = await redisConnection.pttl("crawl:" + id);
|
|
d.setMilliseconds(d.getMilliseconds() + ttl);
|
|
d.setMilliseconds(0);
|
|
return d;
|
|
}
|
|
|
|
export async function addCrawlJob(id: string, job_id: string) {
|
|
await redisConnection.sadd("crawl:" + id + ":jobs", job_id);
|
|
await redisConnection.expire("crawl:" + id + ":jobs", 24 * 60 * 60, "NX");
|
|
}
|
|
|
|
export async function addCrawlJobs(id: string, job_ids: string[]) {
|
|
await redisConnection.sadd("crawl:" + id + ":jobs", ...job_ids);
|
|
await redisConnection.expire("crawl:" + id + ":jobs", 24 * 60 * 60, "NX");
|
|
}
|
|
|
|
export async function addCrawlJobDone(id: string, job_id: string) {
|
|
await redisConnection.sadd("crawl:" + id + ":jobs_done", job_id);
|
|
await redisConnection.lpush("crawl:" + id + ":jobs_done_ordered", job_id);
|
|
await redisConnection.expire("crawl:" + id + ":jobs_done", 24 * 60 * 60, "NX");
|
|
await redisConnection.expire("crawl:" + id + ":jobs_done_ordered", 24 * 60 * 60, "NX");
|
|
}
|
|
|
|
export async function getDoneJobsOrderedLength(id: string): Promise<number> {
|
|
return await redisConnection.llen("crawl:" + id + ":jobs_done_ordered");
|
|
}
|
|
|
|
export async function getDoneJobsOrdered(id: string, start = 0, end = -1): Promise<string[]> {
|
|
return await redisConnection.lrange("crawl:" + id + ":jobs_done_ordered", start, end);
|
|
}
|
|
|
|
export async function isCrawlFinished(id: string) {
|
|
return (await redisConnection.scard("crawl:" + id + ":jobs_done")) === (await redisConnection.scard("crawl:" + id + ":jobs"));
|
|
}
|
|
|
|
export async function isCrawlFinishedLocked(id: string) {
|
|
return (await redisConnection.exists("crawl:" + id + ":finish"));
|
|
}
|
|
|
|
export async function finishCrawl(id: string) {
|
|
if (await isCrawlFinished(id)) {
|
|
const set = await redisConnection.setnx("crawl:" + id + ":finish", "yes");
|
|
if (set === 1) {
|
|
await redisConnection.expire("crawl:" + id + ":finish", 24 * 60 * 60);
|
|
}
|
|
return set === 1
|
|
}
|
|
}
|
|
|
|
export async function getCrawlJobs(id: string): Promise<string[]> {
|
|
return await redisConnection.smembers("crawl:" + id + ":jobs");
|
|
}
|
|
|
|
export async function getThrottledJobs(teamId: string): Promise<string[]> {
|
|
return await redisConnection.zrangebyscore("concurrency-limiter:" + teamId + ":throttled", Date.now(), Infinity);
|
|
}
|
|
|
|
export async function lockURL(id: string, sc: StoredCrawl, url: string): Promise<boolean> {
|
|
if (typeof sc.crawlerOptions?.limit === "number") {
|
|
if (await redisConnection.scard("crawl:" + id + ":visited") >= sc.crawlerOptions.limit) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
try {
|
|
const urlO = new URL(url);
|
|
urlO.search = "";
|
|
urlO.hash = "";
|
|
url = urlO.href;
|
|
} catch (error) {
|
|
logger.warn("Failed to normalize URL " + JSON.stringify(url) + ": " + error);
|
|
}
|
|
|
|
const res = (await redisConnection.sadd("crawl:" + id + ":visited", url)) !== 0
|
|
await redisConnection.expire("crawl:" + id + ":visited", 24 * 60 * 60, "NX");
|
|
return res;
|
|
}
|
|
|
|
/// NOTE: does not check limit. only use if limit is checked beforehand e.g. with sitemap
|
|
export async function lockURLs(id: string, urls: string[]): Promise<boolean> {
|
|
urls = urls.map(url => {
|
|
try {
|
|
const urlO = new URL(url);
|
|
urlO.search = "";
|
|
urlO.hash = "";
|
|
return urlO.href;
|
|
} catch (error) {
|
|
logger.warn("Failed to normalize URL " + JSON.stringify(url) + ": " + error);
|
|
}
|
|
|
|
return url;
|
|
});
|
|
|
|
const res = (await redisConnection.sadd("crawl:" + id + ":visited", ...urls)) !== 0
|
|
await redisConnection.expire("crawl:" + id + ":visited", 24 * 60 * 60, "NX");
|
|
return res;
|
|
}
|
|
|
|
export function crawlToCrawler(id: string, sc: StoredCrawl): WebCrawler {
|
|
const crawler = new WebCrawler({
|
|
jobId: id,
|
|
initialUrl: sc.originUrl!,
|
|
includes: sc.crawlerOptions?.includes ?? [],
|
|
excludes: sc.crawlerOptions?.excludes ?? [],
|
|
maxCrawledLinks: sc.crawlerOptions?.maxCrawledLinks ?? 1000,
|
|
maxCrawledDepth: sc.crawlerOptions?.maxDepth ?? 10,
|
|
limit: sc.crawlerOptions?.limit ?? 10000,
|
|
generateImgAltText: sc.crawlerOptions?.generateImgAltText ?? false,
|
|
allowBackwardCrawling: sc.crawlerOptions?.allowBackwardCrawling ?? false,
|
|
allowExternalContentLinks: sc.crawlerOptions?.allowExternalContentLinks ?? false,
|
|
});
|
|
|
|
if (sc.robots !== undefined) {
|
|
try {
|
|
crawler.importRobotsTxt(sc.robots);
|
|
} catch (_) {}
|
|
}
|
|
|
|
return crawler;
|
|
}
|