0
+ assert 'metadata' in response['data'][0]
+ assert 'title' in response['data'][0]['metadata']
+ assert 'description' in response['data'][0]['metadata']
+ assert 'language' in response['data'][0]['metadata']
+ assert 'sourceURL' in response['data'][0]['metadata']
+ assert 'statusCode' in response['data'][0]['metadata']
+ assert 'error' not in response['data'][0]['metadata']
+
+def test_crawl_url_with_idempotency_key_e2e():
+ app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
+ uniqueIdempotencyKey = str(uuid4())
+ response = app.crawl_url('https://roastmywebsite.ai', {'excludePaths': ['blog/*']}, False, 2, uniqueIdempotencyKey)
+ assert response is not None
+ assert 'id' in response
+
+ with pytest.raises(Exception) as excinfo:
+ app.crawl_url('https://firecrawl.dev', {'excludePaths': ['blog/*']}, True, 2, uniqueIdempotencyKey)
+ assert "Idempotency key already used" in str(excinfo.value)
+
+def test_check_crawl_status_e2e():
+ app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
+ response = app.crawl_url('https://firecrawl.dev', {'scrapeOptions': {'formats': ['markdown', 'html', 'rawHtml', 'screenshot', 'links']}}, False)
+ assert response is not None
+ assert 'id' in response
+
+ max_checks = 15
+ checks = 0
+ status_response = app.check_crawl_status(response['id'])
+
+ while status_response['status'] == 'scraping' and checks < max_checks:
+ time.sleep(1) # wait for 1 second
+ assert 'partial_data' not in status_response
+ assert 'current' not in status_response
+ assert 'data' in status_response
+ assert 'total' in status_response
+ assert 'creditsUsed' in status_response
+ assert 'expiresAt' in status_response
+ assert 'status' in status_response
+ assert 'next' in status_response
+ assert status_response['total'] > 0
+ assert status_response['creditsUsed'] > 0
+ assert datetime.strptime(status_response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now()
+ assert status_response['status'] == 'scraping'
+ assert '/v1/crawl/' in status_response['next']
+ status_response = app.check_crawl_status(response['id'])
+ checks += 1
+
+ assert status_response is not None
+ assert 'total' in status_response
+ assert status_response['total'] > 0
+ assert 'creditsUsed' in status_response
+ assert status_response['creditsUsed'] > 0
+ assert 'expiresAt' in status_response
+ assert datetime.strptime(status_response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now()
+ assert 'status' in status_response
+ assert status_response['status'] == 'completed'
+ assert len(status_response['data']) > 0
+ assert 'markdown' in status_response['data'][0]
+ assert len(status_response['data'][0]['markdown']) > 10
+ assert 'content' not in status_response['data'][0]
+ assert 'html' in status_response['data'][0]
+ assert "
0
+ assert 'metadata' in status_response['data'][0]
+ assert 'title' in status_response['data'][0]['metadata']
+ assert 'description' in status_response['data'][0]['metadata']
+ assert 'language' in status_response['data'][0]['metadata']
+ assert 'sourceURL' in status_response['data'][0]['metadata']
+ assert 'statusCode' in status_response['data'][0]['metadata']
+ assert 'error' not in status_response['data'][0]['metadata']
+
+def test_invalid_api_key_on_map():
+ invalid_app = FirecrawlApp(api_key="invalid_api_key", api_url=API_URL)
+ with pytest.raises(Exception) as excinfo:
+ invalid_app.map_url('https://roastmywebsite.ai')
+ assert "Unauthorized: Invalid token" in str(excinfo.value)
+
+def test_blocklisted_url_on_map():
+ app = FirecrawlApp(api_key=TEST_API_KEY, api_url=API_URL)
+ blocklisted_url = "https://facebook.com/fake-test"
+ with pytest.raises(Exception) as excinfo:
+ app.map_url(blocklisted_url)
+ assert "URL is blocked. Firecrawl currently does not support social media scraping due to policy restrictions." in str(excinfo.value)
+
+def test_successful_response_with_valid_preview_token_on_map():
+ app = FirecrawlApp(api_key="this_is_just_a_preview_token", api_url=API_URL)
+ response = app.map_url('https://roastmywebsite.ai')
+ assert response is not None
+ assert len(response) > 0
+
+def test_successful_response_for_valid_map():
+ app = FirecrawlApp(api_key=TEST_API_KEY, api_url=API_URL)
+ response = app.map_url('https://roastmywebsite.ai')
+ assert response is not None
+ assert len(response) > 0
+ assert any("https://" in link for link in response)
+ filtered_links = [link for link in response if "roastmywebsite.ai" in link]
+ assert len(filtered_links) > 0
+
+def test_search_e2e():
+ app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
+ with pytest.raises(NotImplementedError) as excinfo:
+ app.search("test query")
+ assert "Search is not supported in v1" in str(excinfo.value)
+
+# def test_llm_extraction():
+# app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
+# response = app.scrape_url("https://mendable.ai", {
+# 'extractorOptions': {
+# 'mode': 'llm-extraction',
+# 'extractionPrompt': "Based on the information on the page, find what the company's mission is and whether it supports SSO, and whether it is open source",
+# 'extractionSchema': {
+# 'type': 'object',
+# 'properties': {
+# 'company_mission': {'type': 'string'},
+# 'supports_sso': {'type': 'boolean'},
+# 'is_open_source': {'type': 'boolean'}
+# },
+# 'required': ['company_mission', 'supports_sso', 'is_open_source']
+# }
+# }
+# })
+# assert response is not None
+# assert 'llm_extraction' in response
+# llm_extraction = response['llm_extraction']
+# assert 'company_mission' in llm_extraction
+# assert isinstance(llm_extraction['supports_sso'], bool)
+# assert isinstance(llm_extraction['is_open_source'], bool)
+
+
+
\ No newline at end of file
diff --git a/apps/python-sdk/firecrawl/firecrawl.py b/apps/python-sdk/firecrawl/firecrawl.py
index 7ec0d33f..89c51803 100644
--- a/apps/python-sdk/firecrawl/firecrawl.py
+++ b/apps/python-sdk/firecrawl/firecrawl.py
@@ -19,24 +19,22 @@ import requests
logger : logging.Logger = logging.getLogger("firecrawl")
class FirecrawlApp:
- """
- Initialize the FirecrawlApp instance.
+ def __init__(self, api_key: Optional[str] = None, api_url: Optional[str] = None, version: str = 'v1') -> None:
+ """
+ Initialize the FirecrawlApp instance with API key, API URL, and version.
- Args:
- api_key (Optional[str]): API key for authenticating with the Firecrawl API.
- api_url (Optional[str]): Base URL for the Firecrawl API.
- """
- def __init__(self, api_key: Optional[str] = None, api_url: Optional[str] = None) -> None:
- self.api_key = api_key or os.getenv('FIRECRAWL_API_KEY')
- if self.api_key is None:
- logger.warning("No API key provided")
- raise ValueError('No API key provided')
- else:
- logger.debug("Initialized FirecrawlApp with API key: %s", self.api_key)
-
- self.api_url = api_url or os.getenv('FIRECRAWL_API_URL', 'https://api.firecrawl.dev')
- if self.api_url != 'https://api.firecrawl.dev':
- logger.debug("Initialized FirecrawlApp with API URL: %s", self.api_url)
+ Args:
+ api_key (Optional[str]): API key for authenticating with the Firecrawl API.
+ api_url (Optional[str]): Base URL for the Firecrawl API.
+ version (str): API version, either 'v0' or 'v1'.
+ """
+ self.api_key = api_key or os.getenv('FIRECRAWL_API_KEY')
+ self.api_url = api_url or os.getenv('FIRECRAWL_API_URL', 'https://api.firecrawl.dev')
+ self.version = version
+ if self.api_key is None:
+ logger.warning("No API key provided")
+ raise ValueError('No API key provided')
+ logger.debug(f"Initialized FirecrawlApp with API key: {self.api_key} and version: {self.version}")
def scrape_url(self, url: str, params: Optional[Dict[str, Any]] = None) -> Any:
"""
@@ -75,9 +73,11 @@ class FirecrawlApp:
for key, value in params.items():
if key != 'extractorOptions':
scrape_params[key] = value
+
+ endpoint = f'/{self.version}/scrape'
# Make the POST request with the prepared headers and JSON data
response = requests.post(
- f'{self.api_url}/v0/scrape',
+ f'{self.api_url}{endpoint}',
headers=headers,
json=scrape_params,
)
@@ -104,6 +104,9 @@ class FirecrawlApp:
Raises:
Exception: If the search request fails.
"""
+ if self.version == 'v1':
+ raise NotImplementedError("Search is not supported in v1")
+
headers = self._prepare_headers()
json_data = {'query': query}
if params:
@@ -145,26 +148,37 @@ class FirecrawlApp:
Raises:
Exception: If the crawl job initiation or monitoring fails.
"""
+ endpoint = f'/{self.version}/crawl'
headers = self._prepare_headers(idempotency_key)
json_data = {'url': url}
if params:
json_data.update(params)
- response = self._post_request(f'{self.api_url}/v0/crawl', json_data, headers)
+ response = self._post_request(f'{self.api_url}{endpoint}', json_data, headers)
if response.status_code == 200:
- job_id = response.json().get('jobId')
- if wait_until_done:
- return self._monitor_job_status(job_id, headers, poll_interval)
+ if self.version == 'v0':
+ id = response.json().get('jobId')
else:
- return {'jobId': job_id}
+ id = response.json().get('id')
+
+ if wait_until_done:
+ check_url = None
+ if self.version == 'v1':
+ check_url = response.json().get('url')
+ return self._monitor_job_status(id, headers, poll_interval, check_url)
+ else:
+ if self.version == 'v0':
+ return {'jobId': id}
+ else:
+ return {'id': id}
else:
self._handle_error(response, 'start crawl job')
- def check_crawl_status(self, job_id: str) -> Any:
+ def check_crawl_status(self, id: str) -> Any:
"""
Check the status of a crawl job using the Firecrawl API.
Args:
- job_id (str): The ID of the crawl job.
+ id (str): The ID of the crawl job.
Returns:
Any: The status of the crawl job.
@@ -172,13 +186,73 @@ class FirecrawlApp:
Raises:
Exception: If the status check request fails.
"""
+
+ if self.version == 'v0':
+ endpoint = f'/{self.version}/crawl/status/{id}'
+ else:
+ endpoint = f'/{self.version}/crawl/{id}'
+
headers = self._prepare_headers()
- response = self._get_request(f'{self.api_url}/v0/crawl/status/{job_id}', headers)
+ response = self._get_request(f'{self.api_url}{endpoint}', headers)
if response.status_code == 200:
- return response.json()
+ data = response.json()
+ if self.version == 'v0':
+ return {
+ 'success': True,
+ 'status': data.get('status'),
+ 'current': data.get('current'),
+ 'current_url': data.get('current_url'),
+ 'current_step': data.get('current_step'),
+ 'total': data.get('total'),
+ 'data': data.get('data'),
+ 'partial_data': data.get('partial_data') if not data.get('data') else None,
+ }
+ elif self.version == 'v1':
+ return {
+ 'success': True,
+ 'status': data.get('status'),
+ 'total': data.get('total'),
+ 'completed': data.get('completed'),
+ 'creditsUsed': data.get('creditsUsed'),
+ 'expiresAt': data.get('expiresAt'),
+ 'next': data.get('next'),
+ 'data': data.get('data'),
+ 'error': data.get('error')
+ }
else:
self._handle_error(response, 'check crawl status')
+ def map_url(self, url: str, params: Optional[Dict[str, Any]] = None) -> Any:
+ """
+ Perform a map search using the Firecrawl API.
+ """
+ if self.version == 'v0':
+ raise NotImplementedError("Map is not supported in v0")
+
+ endpoint = f'/{self.version}/map'
+ headers = self._prepare_headers()
+
+ # Prepare the base scrape parameters with the URL
+ json_data = {'url': url}
+ if params:
+ json_data.update(params)
+
+ # Make the POST request with the prepared headers and JSON data
+ response = requests.post(
+ f'{self.api_url}{endpoint}',
+ headers=headers,
+ json=json_data,
+ )
+ if response.status_code == 200:
+ response = response.json()
+ print(response)
+ if response['success'] and 'links' in response:
+ return response['links']
+ else:
+ raise Exception(f'Failed to map URL. Error: {response["error"]}')
+ else:
+ self._handle_error(response, 'map')
+
def _prepare_headers(self, idempotency_key: Optional[str] = None) -> Dict[str, str]:
"""
Prepare the headers for API requests.
@@ -257,15 +331,15 @@ class FirecrawlApp:
return response
return response
- def _monitor_job_status(self, job_id: str, headers: Dict[str, str], poll_interval: int) -> Any:
+ def _monitor_job_status(self, id: str, headers: Dict[str, str], poll_interval: int, check_url: Optional[str] = None) -> Any:
"""
Monitor the status of a crawl job until completion.
Args:
- job_id (str): The ID of the crawl job.
+ id (str): The ID of the crawl job.
headers (Dict[str, str]): The headers to include in the status check requests.
poll_interval (int): Secounds between status checks.
-
+ check_url (Optional[str]): The URL to check for the crawl job.
Returns:
Any: The crawl results if the job is completed successfully.
@@ -273,15 +347,30 @@ class FirecrawlApp:
Exception: If the job fails or an error occurs during status checks.
"""
while True:
- status_response = self._get_request(f'{self.api_url}/v0/crawl/status/{job_id}', headers)
+ api_url = ''
+ if (self.version == 'v0'):
+ if check_url:
+ api_url = check_url
+ else:
+ api_url = f'{self.api_url}/v0/crawl/status/{id}'
+ else:
+ if check_url:
+ api_url = check_url
+ else:
+ api_url = f'{self.api_url}/v1/crawl/{id}'
+
+ status_response = self._get_request(api_url, headers)
if status_response.status_code == 200:
status_data = status_response.json()
if status_data['status'] == 'completed':
if 'data' in status_data:
- return status_data['data']
+ if self.version == 'v0':
+ return status_data['data']
+ else:
+ return status_data
else:
raise Exception('Crawl job completed but no data was returned')
- elif status_data['status'] in ['active', 'paused', 'pending', 'queued', 'waiting']:
+ elif status_data['status'] in ['active', 'paused', 'pending', 'queued', 'waiting', 'scraping']:
poll_interval=max(poll_interval,2)
time.sleep(poll_interval) # Wait for the specified interval before checking again
else:
@@ -300,18 +389,19 @@ class FirecrawlApp:
Raises:
Exception: An exception with a message containing the status code and error details from the response.
"""
- error_message = response.json().get('error', 'No additional error details provided.')
+ error_message = response.json().get('error', 'No error message provided.')
+ error_details = response.json().get('details', 'No additional error details provided.')
if response.status_code == 402:
- message = f"Payment Required: Failed to {action}. {error_message}"
+ message = f"Payment Required: Failed to {action}. {error_message} - {error_details}"
elif response.status_code == 408:
- message = f"Request Timeout: Failed to {action} as the request timed out. {error_message}"
+ message = f"Request Timeout: Failed to {action} as the request timed out. {error_message} - {error_details}"
elif response.status_code == 409:
- message = f"Conflict: Failed to {action} due to a conflict. {error_message}"
+ message = f"Conflict: Failed to {action} due to a conflict. {error_message} - {error_details}"
elif response.status_code == 500:
- message = f"Internal Server Error: Failed to {action}. {error_message}"
+ message = f"Internal Server Error: Failed to {action}. {error_message} - {error_details}"
else:
- message = f"Unexpected error during {action}: Status code {response.status_code}. {error_message}"
+ message = f"Unexpected error during {action}: Status code {response.status_code}. {error_message} - {error_details}"
# Raise an HTTPError with the custom message and attach the response
raise requests.exceptions.HTTPError(message, response=response)
diff --git a/apps/python-sdk/firecrawl_py.egg-info/PKG-INFO b/apps/python-sdk/firecrawl_py.egg-info/PKG-INFO
deleted file mode 100644
index 288eb7a5..00000000
--- a/apps/python-sdk/firecrawl_py.egg-info/PKG-INFO
+++ /dev/null
@@ -1,179 +0,0 @@
-Metadata-Version: 2.1
-Name: firecrawl-py
-Version: 0.0.12
-Summary: Python SDK for Firecrawl API
-Home-page: https://github.com/mendableai/firecrawl
-Author: Mendable.ai
-Author-email: nick@mendable.ai
-License: GNU General Public License v3 (GPLv3)
-Project-URL: Documentation, https://docs.firecrawl.dev
-Project-URL: Source, https://github.com/mendableai/firecrawl
-Project-URL: Tracker, https://github.com/mendableai/firecrawl/issues
-Keywords: SDK API firecrawl
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Environment :: Web Environment
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
-Classifier: Natural Language :: English
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Programming Language :: Python :: 3.10
-Classifier: Topic :: Internet
-Classifier: Topic :: Internet :: WWW/HTTP
-Classifier: Topic :: Internet :: WWW/HTTP :: Indexing/Search
-Classifier: Topic :: Software Development
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Topic :: Text Processing
-Classifier: Topic :: Text Processing :: Indexing
-Requires-Python: >=3.8
-Description-Content-Type: text/markdown
-
-# Firecrawl Python SDK
-
-The Firecrawl Python SDK is a library that allows you to easily scrape and crawl websites, and output the data in a format ready for use with language models (LLMs). It provides a simple and intuitive interface for interacting with the Firecrawl API.
-
-## Installation
-
-To install the Firecrawl Python SDK, you can use pip:
-
-```bash
-pip install firecrawl-py
-```
-
-## Usage
-
-1. Get an API key from [firecrawl.dev](https://firecrawl.dev)
-2. Set the API key as an environment variable named `FIRECRAWL_API_KEY` or pass it as a parameter to the `FirecrawlApp` class.
-
-
-Here's an example of how to use the SDK:
-
-```python
-from firecrawl import FirecrawlApp
-
-# Initialize the FirecrawlApp with your API key
-app = FirecrawlApp(api_key='your_api_key')
-
-# Scrape a single URL
-url = 'https://mendable.ai'
-scraped_data = app.scrape_url(url)
-
-# Crawl a website
-crawl_url = 'https://mendable.ai'
-params = {
- 'pageOptions': {
- 'onlyMainContent': True
- }
-}
-crawl_result = app.crawl_url(crawl_url, params=params)
-```
-
-### Scraping a URL
-
-To scrape a single URL, use the `scrape_url` method. It takes the URL as a parameter and returns the scraped data as a dictionary.
-
-```python
-url = 'https://example.com'
-scraped_data = app.scrape_url(url)
-```
-### Extracting structured data from a URL
-
-With LLM extraction, you can easily extract structured data from any URL. We support pydantic schemas to make it easier for you too. Here is how you to use it:
-
-```python
-class ArticleSchema(BaseModel):
- title: str
- points: int
- by: str
- commentsURL: str
-
-class TopArticlesSchema(BaseModel):
- top: List[ArticleSchema] = Field(..., max_items=5, description="Top 5 stories")
-
-data = app.scrape_url('https://news.ycombinator.com', {
- 'extractorOptions': {
- 'extractionSchema': TopArticlesSchema.model_json_schema(),
- 'mode': 'llm-extraction'
- },
- 'pageOptions':{
- 'onlyMainContent': True
- }
-})
-print(data["llm_extraction"])
-```
-
-### Search for a query
-
-Used to search the web, get the most relevant results, scrap each page and return the markdown.
-
-```python
-query = 'what is mendable?'
-search_result = app.search(query)
-```
-
-### Crawling a Website
-
-To crawl a website, use the `crawl_url` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the crawl job, such as the maximum number of pages to crawl, allowed domains, and the output format.
-
-The `wait_until_done` parameter determines whether the method should wait for the crawl job to complete before returning the result. If set to `True`, the method will periodically check the status of the crawl job until it is completed or the specified `timeout` (in seconds) is reached. If set to `False`, the method will return immediately with the job ID, and you can manually check the status of the crawl job using the `check_crawl_status` method.
-
-```python
-crawl_url = 'https://example.com'
-params = {
- 'crawlerOptions': {
- 'excludes': ['blog/*'],
- 'includes': [], # leave empty for all pages
- 'limit': 1000,
- },
- 'pageOptions': {
- 'onlyMainContent': True
- }
-}
-crawl_result = app.crawl_url(crawl_url, params=params, wait_until_done=True, timeout=5)
-```
-
-If `wait_until_done` is set to `True`, the `crawl_url` method will return the crawl result once the job is completed. If the job fails or is stopped, an exception will be raised.
-
-### Checking Crawl Status
-
-To check the status of a crawl job, use the `check_crawl_status` method. It takes the job ID as a parameter and returns the current status of the crawl job.
-
-```python
-job_id = crawl_result['jobId']
-status = app.check_crawl_status(job_id)
-```
-
-## Error Handling
-
-The SDK handles errors returned by the Firecrawl API and raises appropriate exceptions. If an error occurs during a request, an exception will be raised with a descriptive error message.
-
-## Running the Tests with Pytest
-
-To ensure the functionality of the Firecrawl Python SDK, we have included end-to-end tests using `pytest`. These tests cover various aspects of the SDK, including URL scraping, web searching, and website crawling.
-
-### Running the Tests
-
-To run the tests, execute the following commands:
-
-Install pytest:
-```bash
-pip install pytest
-```
-
-Run:
-```bash
-pytest firecrawl/__tests__/e2e_withAuth/test.py
-```
-
-
-## Contributing
-
-Contributions to the Firecrawl Python SDK are welcome! If you find any issues or have suggestions for improvements, please open an issue or submit a pull request on the GitHub repository.
-
-## License
-
-The Firecrawl Python SDK is open-source and released under the [MIT License](https://opensource.org/licenses/MIT).
diff --git a/apps/python-sdk/firecrawl_py.egg-info/SOURCES.txt b/apps/python-sdk/firecrawl_py.egg-info/SOURCES.txt
deleted file mode 100644
index c25567c5..00000000
--- a/apps/python-sdk/firecrawl_py.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-README.md
-setup.py
-firecrawl/__init__.py
-firecrawl/firecrawl.py
-firecrawl_py.egg-info/PKG-INFO
-firecrawl_py.egg-info/SOURCES.txt
-firecrawl_py.egg-info/dependency_links.txt
-firecrawl_py.egg-info/requires.txt
-firecrawl_py.egg-info/top_level.txt
\ No newline at end of file
diff --git a/apps/python-sdk/firecrawl_py.egg-info/dependency_links.txt b/apps/python-sdk/firecrawl_py.egg-info/dependency_links.txt
deleted file mode 100644
index 8b137891..00000000
--- a/apps/python-sdk/firecrawl_py.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/apps/python-sdk/firecrawl_py.egg-info/requires.txt b/apps/python-sdk/firecrawl_py.egg-info/requires.txt
deleted file mode 100644
index c8d341f5..00000000
--- a/apps/python-sdk/firecrawl_py.egg-info/requires.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-requests
-pytest
-python-dotenv
diff --git a/apps/python-sdk/firecrawl_py.egg-info/top_level.txt b/apps/python-sdk/firecrawl_py.egg-info/top_level.txt
deleted file mode 100644
index 8bce1a1f..00000000
--- a/apps/python-sdk/firecrawl_py.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-firecrawl
diff --git a/docker-compose.yaml b/docker-compose.yaml
index 8c160f4a..24b51762 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -15,7 +15,6 @@ x-common-service: &common-service
- OPENAI_BASE_URL=${OPENAI_BASE_URL}
- MODEL_NAME=${MODEL_NAME:-gpt-4o}
- SLACK_WEBHOOK_URL=${SLACK_WEBHOOK_URL}
- - SERPER_API_KEY=${SERPER_API_KEY}
- LLAMAPARSE_API_KEY=${LLAMAPARSE_API_KEY}
- LOGTAIL_KEY=${LOGTAIL_KEY}
- BULL_AUTH_KEY=${BULL_AUTH_KEY}
diff --git a/examples/kubernetes/cluster-install/secret.yaml b/examples/kubernetes/cluster-install/secret.yaml
index 2be96320..6d8eed3b 100644
--- a/examples/kubernetes/cluster-install/secret.yaml
+++ b/examples/kubernetes/cluster-install/secret.yaml
@@ -6,7 +6,6 @@ type: Opaque
data:
OPENAI_API_KEY: ""
SLACK_WEBHOOK_URL: ""
- SERPER_API_KEY: ""
LLAMAPARSE_API_KEY: ""
LOGTAIL_KEY: ""
BULL_AUTH_KEY: ""