mirror of
https://git.mirrors.martin98.com/https://github.com/mendableai/firecrawl
synced 2025-08-15 12:05:57 +08:00
37 lines
1.1 KiB
YAML
37 lines
1.1 KiB
YAML
name: Run Eval Benchmark Prod
|
|
|
|
env:
|
|
EVAL_API_URL: ${{ secrets.EVAL_API_URL }}
|
|
EVAL_API_KEY: ${{ secrets.EVAL_API_KEY }}
|
|
EVAL_EXPERIMENT_ID: ${{ secrets.EVAL_BENCHMARK_EXPERIMENT_ID }}
|
|
|
|
on:
|
|
workflow_run:
|
|
workflows: ["Deploy Images to GHCR"]
|
|
types:
|
|
- completed
|
|
branches:
|
|
- main
|
|
workflow_dispatch:
|
|
|
|
jobs:
|
|
run-eval-benchmark-prod:
|
|
runs-on: ubuntu-latest
|
|
if: ${{ github.event.workflow_run.conclusion == 'success' || github.event_name == 'workflow_dispatch' }}
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@v3
|
|
|
|
- name: 'Install dependencies'
|
|
run: |
|
|
python -m pip install --upgrade pip
|
|
pip install requests
|
|
|
|
# make sure the image is deployed before running the eval benchmark
|
|
- name: Wait for 2 minutes
|
|
run: sleep 120
|
|
|
|
- name: 'Run Eval Benchmark Prod'
|
|
run: |
|
|
python .github/scripts/eval_run.py --label prod.${{ github.sha }} --api-url ${{ env.EVAL_API_URL }} --api-key ${{ env.EVAL_API_KEY }} --experiment-id ${{ env.EVAL_EXPERIMENT_ID }}
|