mirror of
https://git.mirrors.martin98.com/https://github.com/mendableai/firecrawl
synced 2025-07-14 21:51:46 +08:00
Add examples/ Llama 4 Maverick Crawler
This commit is contained in:
parent
66e65d9422
commit
17ea3ff355
5
examples/llama-4-maverick-web-crawler/.env.example
Normal file
5
examples/llama-4-maverick-web-crawler/.env.example
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# Firecrawl API Key
|
||||||
|
FIRECRAWL_API_KEY=your_firecrawl_api_key_here
|
||||||
|
|
||||||
|
# Together AI API Key
|
||||||
|
TOGETHER_API_KEY=your_together_api_key_here
|
48
examples/llama-4-maverick-web-crawler/.gitignore
vendored
Normal file
48
examples/llama-4-maverick-web-crawler/.gitignore
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
# Dependencies
|
||||||
|
node_modules/
|
||||||
|
venv/
|
||||||
|
.env
|
||||||
|
.env.local
|
||||||
|
.env.*.local
|
||||||
|
|
||||||
|
# Build outputs
|
||||||
|
dist/
|
||||||
|
build/
|
||||||
|
*.pyc
|
||||||
|
__pycache__/
|
||||||
|
.cache/
|
||||||
|
.pytest_cache/
|
||||||
|
|
||||||
|
# IDE and editor files
|
||||||
|
.idea/
|
||||||
|
.vscode/
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
# Logs
|
||||||
|
*.log
|
||||||
|
npm-debug.log*
|
||||||
|
yarn-debug.log*
|
||||||
|
yarn-error.log*
|
||||||
|
|
||||||
|
# Coverage and test reports
|
||||||
|
coverage/
|
||||||
|
.coverage
|
||||||
|
htmlcov/
|
||||||
|
|
||||||
|
# Temporary files
|
||||||
|
*.tmp
|
||||||
|
*.temp
|
||||||
|
.tmp/
|
||||||
|
temp/
|
||||||
|
|
||||||
|
# System files
|
||||||
|
.DS_Store
|
||||||
|
.DS_Store?
|
||||||
|
._*
|
||||||
|
.Spotlight-V100
|
||||||
|
.Trashes
|
||||||
|
ehthumbs.db
|
||||||
|
Thumbs.db
|
78
examples/llama-4-maverick-web-crawler/README.md
Normal file
78
examples/llama-4-maverick-web-crawler/README.md
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# Llama 4 Maverick Web Crawler
|
||||||
|
|
||||||
|
This project combines the power of Firecrawl for web crawling and Llama 4 Maverick (via Together AI) for intelligent content analysis. It helps you find specific information on websites by crawling pages and analyzing their content using advanced language models.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Intelligent URL mapping and relevance ranking
|
||||||
|
- Content analysis using Llama 4 Maverick model
|
||||||
|
- Automatic extraction of relevant information
|
||||||
|
- Color-coded console output for better readability
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Python 3.8 or higher
|
||||||
|
- Firecrawl API key
|
||||||
|
- Together AI API key
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
1. Clone this repository
|
||||||
|
2. Install the required packages:
|
||||||
|
```bash
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
3. Copy the `.env.example` file to `.env`:
|
||||||
|
```bash
|
||||||
|
cp .env.example .env
|
||||||
|
```
|
||||||
|
4. Add your API keys to the `.env` file:
|
||||||
|
```
|
||||||
|
FIRECRAWL_API_KEY=your_firecrawl_api_key_here
|
||||||
|
TOGETHER_API_KEY=your_together_api_key_here
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Run the script using:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python llama4-maverick-web-crawler.py
|
||||||
|
```
|
||||||
|
|
||||||
|
You will be prompted to:
|
||||||
|
|
||||||
|
1. Enter the website URL to crawl
|
||||||
|
2. Specify your objective/what information you're looking for
|
||||||
|
|
||||||
|
The script will then:
|
||||||
|
|
||||||
|
1. Map the website and find relevant pages
|
||||||
|
2. Analyze the content using Llama 4 Maverick
|
||||||
|
3. Extract and return the requested information in JSON format
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
Enter the website to crawl: https://example.com
|
||||||
|
Enter your objective: Find the company's contact information
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
The script includes comprehensive error handling and will provide clear feedback if:
|
||||||
|
|
||||||
|
- API keys are missing
|
||||||
|
- Website is inaccessible
|
||||||
|
- No relevant information is found
|
||||||
|
- Any other errors occur during execution
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
- firecrawl: For web crawling and content extraction
|
||||||
|
- together: For accessing the Llama 4 Maverick model
|
||||||
|
- python-dotenv: For environment variable management
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
[Your chosen license]
|
@ -0,0 +1,239 @@
|
|||||||
|
import os
|
||||||
|
from firecrawl import FirecrawlApp
|
||||||
|
import json
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from together import Together
|
||||||
|
|
||||||
|
# ANSI color codes
|
||||||
|
class Colors:
|
||||||
|
CYAN = '\033[96m'
|
||||||
|
YELLOW = '\033[93m'
|
||||||
|
GREEN = '\033[92m'
|
||||||
|
RED = '\033[91m'
|
||||||
|
MAGENTA = '\033[95m'
|
||||||
|
BLUE = '\033[94m'
|
||||||
|
RESET = '\033[0m'
|
||||||
|
|
||||||
|
# Load environment variables
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Retrieve API keys from environment variables
|
||||||
|
firecrawl_api_key = os.getenv("FIRECRAWL_API_KEY")
|
||||||
|
together_api_key = os.getenv("TOGETHER_API_KEY")
|
||||||
|
|
||||||
|
# Initialize the FirecrawlApp and Together client
|
||||||
|
app = FirecrawlApp(api_key=firecrawl_api_key)
|
||||||
|
client = Together(api_key=together_api_key)
|
||||||
|
|
||||||
|
# Find the page that most likely contains the objective
|
||||||
|
def find_relevant_page_via_map(objective, url, app, client):
|
||||||
|
try:
|
||||||
|
print(f"{Colors.CYAN}Understood. The objective is: {objective}{Colors.RESET}")
|
||||||
|
print(f"{Colors.CYAN}Initiating search on the website: {url}{Colors.RESET}")
|
||||||
|
|
||||||
|
map_prompt = f"""
|
||||||
|
The map function generates a list of URLs from a website and it accepts a search parameter. Based on the objective of: {objective}, come up with a 1-2 word search parameter that will help us find the information we need. Only respond with 1-2 words nothing else.
|
||||||
|
"""
|
||||||
|
|
||||||
|
print(f"{Colors.YELLOW}Analyzing objective to determine optimal search parameter...{Colors.RESET}")
|
||||||
|
completion = client.chat.completions.create(
|
||||||
|
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": map_prompt
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
map_search_parameter = completion.choices[0].message.content
|
||||||
|
print(f"{Colors.GREEN}Optimal search parameter identified: {map_search_parameter}{Colors.RESET}")
|
||||||
|
|
||||||
|
print(f"{Colors.YELLOW}Mapping website using the identified search parameter...{Colors.RESET}")
|
||||||
|
map_website = app.map_url(url, params={"search": map_search_parameter})
|
||||||
|
|
||||||
|
# Debug print to see the response structure
|
||||||
|
print(f"{Colors.MAGENTA}Debug - Map response structure: {json.dumps(map_website, indent=2)}{Colors.RESET}")
|
||||||
|
|
||||||
|
print(f"{Colors.GREEN}Website mapping completed successfully.{Colors.RESET}")
|
||||||
|
|
||||||
|
# Handle the response based on its structure
|
||||||
|
if isinstance(map_website, dict):
|
||||||
|
# Assuming the links are in a 'urls' or similar key
|
||||||
|
links = map_website.get('urls', []) or map_website.get('links', [])
|
||||||
|
elif isinstance(map_website, str):
|
||||||
|
try:
|
||||||
|
parsed = json.loads(map_website)
|
||||||
|
links = parsed.get('urls', []) or parsed.get('links', [])
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
links = []
|
||||||
|
else:
|
||||||
|
links = map_website if isinstance(map_website, list) else []
|
||||||
|
|
||||||
|
if not links:
|
||||||
|
print(f"{Colors.RED}No links found in map response.{Colors.RESET}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
rank_prompt = f"""
|
||||||
|
Given this list of URLs and the objective: {objective}
|
||||||
|
Analyze each URL and rank the top 3 most relevant ones that are most likely to contain the information we need.
|
||||||
|
|
||||||
|
IMPORTANT: You must ONLY return a JSON array with exactly 3 objects. Do not include ANY explanation text.
|
||||||
|
Do not include markdown formatting or ```json blocks. Return ONLY the raw JSON array.
|
||||||
|
|
||||||
|
Each object in the array must have exactly these fields:
|
||||||
|
- "url": the full URL
|
||||||
|
- "relevance_score": number between 0-100
|
||||||
|
- "reason": brief explanation of why this URL is relevant
|
||||||
|
|
||||||
|
URLs to analyze:
|
||||||
|
{json.dumps(links, indent=2)}
|
||||||
|
"""
|
||||||
|
|
||||||
|
print(f"{Colors.YELLOW}Ranking URLs by relevance to objective...{Colors.RESET}")
|
||||||
|
completion = client.chat.completions.create(
|
||||||
|
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": rank_prompt
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Debug print to see LLM's raw response
|
||||||
|
print(f"{Colors.MAGENTA}Debug - LLM raw response:{Colors.RESET}")
|
||||||
|
print(f"{Colors.MAGENTA}{completion.choices[0].message.content}{Colors.RESET}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Try to clean the response by stripping any potential markdown or extra whitespace
|
||||||
|
cleaned_response = completion.choices[0].message.content.strip()
|
||||||
|
if cleaned_response.startswith("```json"):
|
||||||
|
cleaned_response = cleaned_response.split("```json")[1]
|
||||||
|
if cleaned_response.endswith("```"):
|
||||||
|
cleaned_response = cleaned_response.rsplit("```", 1)[0]
|
||||||
|
cleaned_response = cleaned_response.strip()
|
||||||
|
|
||||||
|
ranked_results = json.loads(cleaned_response)
|
||||||
|
|
||||||
|
# Validate the structure of the results
|
||||||
|
if not isinstance(ranked_results, list):
|
||||||
|
raise ValueError("Response is not a list")
|
||||||
|
|
||||||
|
for result in ranked_results:
|
||||||
|
if not all(key in result for key in ["url", "relevance_score", "reason"]):
|
||||||
|
raise ValueError("Response items missing required fields")
|
||||||
|
|
||||||
|
links = [result["url"] for result in ranked_results]
|
||||||
|
|
||||||
|
# Print detailed ranking info
|
||||||
|
print(f"{Colors.CYAN}Top 3 ranked URLs:{Colors.RESET}")
|
||||||
|
for result in ranked_results:
|
||||||
|
print(f"{Colors.GREEN}URL: {result['url']}{Colors.RESET}")
|
||||||
|
print(f"{Colors.YELLOW}Relevance Score: {result['relevance_score']}{Colors.RESET}")
|
||||||
|
print(f"{Colors.BLUE}Reason: {result['reason']}{Colors.RESET}")
|
||||||
|
print("---")
|
||||||
|
|
||||||
|
if not links:
|
||||||
|
print(f"{Colors.RED}No relevant links identified.{Colors.RESET}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except (json.JSONDecodeError, KeyError) as e:
|
||||||
|
print(f"{Colors.RED}Error parsing ranked results: {str(e)}{Colors.RESET}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
print(f"{Colors.GREEN}Located {len(links)} relevant links.{Colors.RESET}")
|
||||||
|
return links
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"{Colors.RED}Error encountered during relevant page identification: {str(e)}{Colors.RESET}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Scrape the top 3 pages and see if the objective is met, if so return in json format else return None
|
||||||
|
def find_objective_in_top_pages(map_website, objective, app, client):
|
||||||
|
try:
|
||||||
|
# Get top 3 links from the map result
|
||||||
|
if not map_website:
|
||||||
|
print(f"{Colors.RED}No links found to analyze.{Colors.RESET}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
top_links = map_website[:3]
|
||||||
|
print(f"{Colors.CYAN}Proceeding to analyze top {len(top_links)} links: {top_links}{Colors.RESET}")
|
||||||
|
|
||||||
|
for link in top_links:
|
||||||
|
print(f"{Colors.YELLOW}Initiating scrape of page: {link}{Colors.RESET}")
|
||||||
|
scrape_result = app.scrape_url(link, params={'formats': ['markdown']})
|
||||||
|
print(f"{Colors.GREEN}Page scraping completed successfully.{Colors.RESET}")
|
||||||
|
|
||||||
|
check_prompt = f"""
|
||||||
|
Given the following scraped content and objective, determine if the objective is met.
|
||||||
|
|
||||||
|
IMPORTANT: You must ONLY return one of two possible responses:
|
||||||
|
1. If objective is NOT met, respond with exactly: Objective not met
|
||||||
|
2. If objective IS met, respond with ONLY a JSON object containing the relevant information.
|
||||||
|
Do not include ANY explanation text, markdown formatting, or ```json blocks.
|
||||||
|
Return ONLY the raw JSON object.
|
||||||
|
|
||||||
|
Objective: {objective}
|
||||||
|
Scraped content: {scrape_result['markdown']}
|
||||||
|
"""
|
||||||
|
|
||||||
|
completion = client.chat.completions.create(
|
||||||
|
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
|
messages=[{"role": "user", "content": check_prompt}]
|
||||||
|
)
|
||||||
|
|
||||||
|
result = completion.choices[0].message.content.strip()
|
||||||
|
|
||||||
|
# Clean up the response if it contains markdown formatting
|
||||||
|
if result.startswith("```json"):
|
||||||
|
result = result.split("```json")[1]
|
||||||
|
if result.endswith("```"):
|
||||||
|
result = result.rsplit("```", 1)[0]
|
||||||
|
result = result.strip()
|
||||||
|
|
||||||
|
if result == "Objective not met":
|
||||||
|
print(f"{Colors.YELLOW}Objective not met on this page. Proceeding to next link...{Colors.RESET}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
json_result = json.loads(result)
|
||||||
|
print(f"{Colors.GREEN}Objective fulfilled. Relevant information found.{Colors.RESET}")
|
||||||
|
return json_result
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
print(f"{Colors.RED}Error parsing JSON response: {str(e)}{Colors.RESET}")
|
||||||
|
print(f"{Colors.MAGENTA}Raw response: {result}{Colors.RESET}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"{Colors.RED}All available pages analyzed. Objective not fulfilled in examined content.{Colors.RESET}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"{Colors.RED}Error encountered during page analysis: {str(e)}{Colors.RESET}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Main function to execute the process
|
||||||
|
def main():
|
||||||
|
# Get user input
|
||||||
|
url = input(f"{Colors.BLUE}Enter the website to crawl : {Colors.RESET}")
|
||||||
|
objective = input(f"{Colors.BLUE}Enter your objective: {Colors.RESET}")
|
||||||
|
|
||||||
|
print(f"{Colors.YELLOW}Initiating web crawling process...{Colors.RESET}")
|
||||||
|
# Find the relevant page
|
||||||
|
map_website = find_relevant_page_via_map(objective, url, app, client)
|
||||||
|
|
||||||
|
if map_website:
|
||||||
|
print(f"{Colors.GREEN}Relevant pages identified. Proceeding with detailed analysis using Llama 4 Maverick...{Colors.RESET}")
|
||||||
|
# Find objective in top pages
|
||||||
|
result = find_objective_in_top_pages(map_website, objective, app, client)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
print(f"{Colors.GREEN}Objective successfully fulfilled. Extracted information :{Colors.RESET}")
|
||||||
|
print(f"{Colors.MAGENTA}{json.dumps(result, indent=2)}{Colors.RESET}")
|
||||||
|
else:
|
||||||
|
print(f"{Colors.RED}Unable to fulfill the objective with the available content.{Colors.RESET}")
|
||||||
|
else:
|
||||||
|
print(f"{Colors.RED}No relevant pages identified. Consider refining the search parameters or trying a different website.{Colors.RESET}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
3
examples/llama-4-maverick-web-crawler/requirements.txt
Normal file
3
examples/llama-4-maverick-web-crawler/requirements.txt
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
firecrawl>=0.1.0
|
||||||
|
together>=0.2.0
|
||||||
|
python-dotenv>=0.19.0
|
Loading…
x
Reference in New Issue
Block a user