This commit is contained in:
Nicolas 2024-07-03 20:18:11 -03:00
parent 5ecd9cb6f5
commit 32849b017f
3 changed files with 67 additions and 48 deletions

View File

@ -117,7 +117,7 @@ export async function scrapWithFireEngine({
} finally { } finally {
const endTime = Date.now(); const endTime = Date.now();
logParams.time_taken_seconds = (endTime - logParams.startTime) / 1000; logParams.time_taken_seconds = (endTime - logParams.startTime) / 1000;
await logScrape(logParams); await logScrape(logParams, pageOptions);
} }
} }

View File

@ -1,4 +1,4 @@
import { ExtractorOptions } from './../../lib/entities'; import { ExtractorOptions } from "./../../lib/entities";
import { supabase_service } from "../supabase"; import { supabase_service } from "../supabase";
import { FirecrawlJob } from "../../types"; import { FirecrawlJob } from "../../types";
import { posthog } from "../posthog"; import { posthog } from "../posthog";
@ -11,6 +11,16 @@ export async function logJob(job: FirecrawlJob) {
return; return;
} }
// Redact any pages that have an authorization header
if (
job.pageOptions &&
job.pageOptions.headers &&
job.pageOptions.headers["Authorization"]
) {
job.pageOptions.headers["Authorization"] = "REDACTED";
job.docs = [{ content: "REDACTED DUE TO AUTHORIZATION HEADER", html: "REDACTED DUE TO AUTHORIZATION HEADER" }];
}
const { data, error } = await supabase_service const { data, error } = await supabase_service
.from("firecrawl_jobs") .from("firecrawl_jobs")
.insert([ .insert([
@ -27,16 +37,15 @@ export async function logJob(job: FirecrawlJob) {
page_options: job.pageOptions, page_options: job.pageOptions,
origin: job.origin, origin: job.origin,
extractor_options: job.extractor_options, extractor_options: job.extractor_options,
num_tokens: job.num_tokens num_tokens: job.num_tokens,
}, },
]); ]);
if (process.env.POSTHOG_API_KEY) { if (process.env.POSTHOG_API_KEY) {
let phLog = { let phLog = {
distinctId: "from-api", //* To identify this on the group level, setting distinctid to a static string per posthog docs: https://posthog.com/docs/product-analytics/group-analytics#advanced-server-side-only-capturing-group-events-without-a-user distinctId: "from-api", //* To identify this on the group level, setting distinctid to a static string per posthog docs: https://posthog.com/docs/product-analytics/group-analytics#advanced-server-side-only-capturing-group-events-without-a-user
...(job.team_id !== "preview" && { ...(job.team_id !== "preview" && {
groups: { team: job.team_id } groups: { team: job.team_id },
}), //* Identifying event on this team }), //* Identifying event on this team
event: "job-logged", event: "job-logged",
properties: { properties: {
@ -51,9 +60,9 @@ export async function logJob(job: FirecrawlJob) {
page_options: job.pageOptions, page_options: job.pageOptions,
origin: job.origin, origin: job.origin,
extractor_options: job.extractor_options, extractor_options: job.extractor_options,
num_tokens: job.num_tokens num_tokens: job.num_tokens,
}, },
} };
posthog.capture(phLog); posthog.capture(phLog);
} }
if (error) { if (error) {

View File

@ -1,17 +1,27 @@
import "dotenv/config"; import "dotenv/config";
import { ScrapeLog } from "../../types"; import { ScrapeLog } from "../../types";
import { supabase_service } from "../supabase"; import { supabase_service } from "../supabase";
import { PageOptions } from "../../lib/entities";
export async function logScrape(scrapeLog: ScrapeLog) { export async function logScrape(
scrapeLog: ScrapeLog,
pageOptions?: PageOptions
) {
try { try {
// Only log jobs in production // Only log jobs in production
// if (process.env.ENV !== "production") { // if (process.env.ENV !== "production") {
// return; // return;
// } // }
// Redact any pages that have an authorization header
if (
pageOptions &&
pageOptions.headers &&
pageOptions.headers["Authorization"]
) {
scrapeLog.html = "REDACTED DUE TO AUTHORIZATION HEADER";
}
const { data, error } = await supabase_service const { data, error } = await supabase_service.from("scrape_logs").insert([
.from("scrape_logs")
.insert([
{ {
url: scrapeLog.url, url: scrapeLog.url,
scraper: scrapeLog.scraper, scraper: scrapeLog.scraper,