Output re-structured

This commit is contained in:
Aparup Ganguly 2025-02-20 02:21:43 +05:30
parent 3a4ef05a70
commit 21b22d9f5c

View File

@ -24,6 +24,7 @@ client = genai.Client(api_key=os.getenv("GOOGLE_API_KEY"))
firecrawl_api_key = os.getenv("FIRECRAWL_API_KEY") firecrawl_api_key = os.getenv("FIRECRAWL_API_KEY")
serp_api_key = os.getenv("SERP_API_KEY") serp_api_key = os.getenv("SERP_API_KEY")
# Add this debug print (remember to remove it before committing)
if not firecrawl_api_key: if not firecrawl_api_key:
print(f"{Colors.RED}Warning: FIRECRAWL_API_KEY not found in environment variables{Colors.RESET}") print(f"{Colors.RED}Warning: FIRECRAWL_API_KEY not found in environment variables{Colors.RESET}")
@ -31,11 +32,7 @@ def search_google(query):
"""Search Google using SerpAPI and return top results.""" """Search Google using SerpAPI and return top results."""
print(f"{Colors.YELLOW}Searching Google for '{query}'...{Colors.RESET}") print(f"{Colors.YELLOW}Searching Google for '{query}'...{Colors.RESET}")
search = GoogleSearch({"q": query, "api_key": serp_api_key}) search = GoogleSearch({"q": query, "api_key": serp_api_key})
results = search.get_dict().get("organic_results", []) return search.get_dict().get("organic_results", [])
if results:
print("Finding Results...")
return results
def select_urls_with_gemini(company, objective, serp_results): def select_urls_with_gemini(company, objective, serp_results):
""" """
@ -43,61 +40,55 @@ def select_urls_with_gemini(company, objective, serp_results):
Returns a list of URLs. Returns a list of URLs.
""" """
try: try:
print(f"{Colors.CYAN}Processing {len(serp_results)} search results...{Colors.RESET}")
serp_data = [{"title": r.get("title"), "link": r.get("link"), "snippet": r.get("snippet")} serp_data = [{"title": r.get("title"), "link": r.get("link"), "snippet": r.get("snippet")}
for r in serp_results if r.get("link")] for r in serp_results if r.get("link")]
print(f"{Colors.CYAN}Prepared {len(serp_data)} valid results for processing{Colors.RESET}")
prompt = ( prompt = (
"You are a URL selector that always responds with valid JSON. You select URLs from the SERP results relevant to the company and objective. Your response must be a JSON object with a 'selected_urls' array property containing strings.\n\n" "Task: Select relevant URLs from search results.\n\n"
"Instructions:\n"
"1. Analyze the search results for information about the specified company\n"
"2. Select URLs that are most likely to contain the requested information\n"
"3. Return ONLY a JSON object with the following structure: {\"selected_urls\": [\"url1\", \"url2\"]}\n"
"4. Do not include social media links\n\n"
f"Company: {company}\n" f"Company: {company}\n"
f"Objective: {objective}\n" f"Information Needed: {objective}\n"
f"SERP Results: {json.dumps(serp_data)}\n\n" f"Search Results: {json.dumps(serp_data, indent=2)}\n\n"
"Return a JSON object with a property 'selected_urls' that contains an array " "Response Format: {\"selected_urls\": [\"https://example.com\", \"https://example2.com\"]}"
"of URLs most likely to help meet the objective. Add a /* to the end of the URL if you think it should search all of the pages in the site. Do not return any social media links. For example: {\"selected_urls\": [\"https://example.com\", \"https://example2.com\"]}"
) )
print(f"{Colors.CYAN}Calling Gemini API...{Colors.RESET}")
response = client.models.generate_content( response = client.models.generate_content(
model="gemini-2.0-flash", model="gemini-2.0-flash",
contents=prompt contents=prompt
) )
# Get response text and clean it up # Clean the response text
response_text = response.text.strip() cleaned_response = response.text.strip()
if cleaned_response.startswith('```'):
# Remove markdown code block if present cleaned_response = cleaned_response.split('```')[1]
if response_text.startswith('```'): if cleaned_response.startswith('json'):
response_text = response_text.split('\n', 1)[1] # Remove first line cleaned_response = cleaned_response[4:]
if response_text.endswith('```'): cleaned_response = cleaned_response.strip()
response_text = response_text.rsplit('\n', 1)[0] # Remove last line
if response_text.startswith('json'):
response_text = response_text.split('\n', 1)[1] # Remove "json" line
response_text = response_text.strip()
try: try:
result = json.loads(response_text) # Parse JSON response
result = json.loads(cleaned_response)
if isinstance(result, dict) and "selected_urls" in result: if isinstance(result, dict) and "selected_urls" in result:
urls = result["selected_urls"] urls = result["selected_urls"]
else: else:
urls = [line.strip() for line in response_text.split('\n') # Fallback to text parsing
urls = [line.strip() for line in cleaned_response.split('\n')
if line.strip().startswith(('http://', 'https://'))] if line.strip().startswith(('http://', 'https://'))]
except json.JSONDecodeError: except json.JSONDecodeError:
print(f"{Colors.YELLOW}Failed to parse JSON, falling back to text parsing{Colors.RESET}") # Fallback to text parsing
# If JSON parsing fails, fall back to text parsing urls = [line.strip() for line in cleaned_response.split('\n')
urls = [line.strip() for line in response_text.split('\n')
if line.strip().startswith(('http://', 'https://'))] if line.strip().startswith(('http://', 'https://'))]
# Clean up URLs - remove wildcards and trailing slashes # Clean up URLs
cleaned_urls = [url.replace('/*', '').rstrip('/') for url in urls] cleaned_urls = [url.replace('/*', '').rstrip('/') for url in urls]
cleaned_urls = [url for url in cleaned_urls if url] cleaned_urls = [url for url in cleaned_urls if url]
if not cleaned_urls: if not cleaned_urls:
print(f"{Colors.YELLOW}No valid URLs found.{Colors.RESET}") print(f"{Colors.YELLOW}No valid URLs found in response.{Colors.RESET}")
return [] return []
print(f"{Colors.CYAN}Selected URLs for extraction:{Colors.RESET}") print(f"{Colors.CYAN}Selected URLs for extraction:{Colors.RESET}")
@ -111,11 +102,6 @@ def select_urls_with_gemini(company, objective, serp_results):
return [] return []
def extract_company_info(urls, prompt, company, api_key): def extract_company_info(urls, prompt, company, api_key):
if not api_key:
print(f"{Colors.RED}Error: Firecrawl API key is missing or invalid{Colors.RESET}")
return None
"""Use requests to call Firecrawl's extract endpoint with selected URLs.""" """Use requests to call Firecrawl's extract endpoint with selected URLs."""
print(f"{Colors.YELLOW}Extracting structured data from the provided URLs using Firecrawl...{Colors.RESET}") print(f"{Colors.YELLOW}Extracting structured data from the provided URLs using Firecrawl...{Colors.RESET}")
@ -161,13 +147,15 @@ def extract_company_info(urls, prompt, company, api_key):
print(f"{Colors.RED}Failed to extract data: {e}{Colors.RESET}") print(f"{Colors.RED}Failed to extract data: {e}{Colors.RESET}")
return None return None
def poll_firecrawl_result(extraction_id, api_key, interval=5, max_attempts=36): def poll_firecrawl_result(extraction_id, api_key, interval=10, max_attempts=60):
"""Poll Firecrawl API to get the extraction result.""" """Poll Firecrawl API to get the extraction result."""
url = f"https://api.firecrawl.dev/v1/extract/{extraction_id}" url = f"https://api.firecrawl.dev/v1/extract/{extraction_id}"
headers = { headers = {
'Authorization': f'Bearer {api_key}' 'Authorization': f'Bearer {api_key}'
} }
print(f"{Colors.YELLOW}Waiting for extraction to complete...{Colors.RESET}")
for attempt in range(1, max_attempts + 1): for attempt in range(1, max_attempts + 1):
try: try:
response = requests.get(url, headers=headers, timeout=30) response = requests.get(url, headers=headers, timeout=30)
@ -179,16 +167,21 @@ def poll_firecrawl_result(extraction_id, api_key, interval=5, max_attempts=36):
print(json.dumps(data['data'], indent=2)) print(json.dumps(data['data'], indent=2))
return data['data'] return data['data']
elif data.get('success') and not data.get('data'): elif data.get('success') and not data.get('data'):
if attempt % 6 == 0: # Show progress every minute
print(f"{Colors.YELLOW}Still processing... (attempt {attempt}/{max_attempts}){Colors.RESET}")
time.sleep(interval) time.sleep(interval)
else: else:
print(f"{Colors.RED}API Error: {data.get('error', 'No error message provided')}{Colors.RESET}") print(f"{Colors.RED}API Error: {data.get('error', 'No error message provided')}{Colors.RESET}")
return None return None
except requests.exceptions.RequestException: except requests.exceptions.RequestException as e:
print(f"{Colors.RED}Request error: {str(e)}{Colors.RESET}")
return None return None
except json.JSONDecodeError: except json.JSONDecodeError as e:
print(f"{Colors.RED}JSON parsing error: {str(e)}{Colors.RESET}")
return None return None
except Exception: except Exception as e:
print(f"{Colors.RED}Unexpected error: {str(e)}{Colors.RESET}")
return None return None
print(f"{Colors.RED}Max polling attempts reached. Extraction did not complete in time.{Colors.RESET}") print(f"{Colors.RED}Max polling attempts reached. Extraction did not complete in time.{Colors.RESET}")
@ -198,13 +191,11 @@ def main():
company = input(f"{Colors.BLUE}Enter the company name: {Colors.RESET}") company = input(f"{Colors.BLUE}Enter the company name: {Colors.RESET}")
objective = input(f"{Colors.BLUE}Enter what information you want about the company: {Colors.RESET}") objective = input(f"{Colors.BLUE}Enter what information you want about the company: {Colors.RESET}")
# Make the search query more specific
serp_results = search_google(f"{company}") serp_results = search_google(f"{company}")
if not serp_results: if not serp_results:
print(f"{Colors.RED}No search results found.{Colors.RESET}") print(f"{Colors.RED}No search results found.{Colors.RESET}")
return return
# Use Gemini 2.0 Flash for URL selection
selected_urls = select_urls_with_gemini(company, objective, serp_results) selected_urls = select_urls_with_gemini(company, objective, serp_results)
if not selected_urls: if not selected_urls: