firecrawl-py 1.12.0__py3-none-any.whl → 1.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of firecrawl-py might be problematic. Click here for more details.
- firecrawl/__init__.py +1 -1
- firecrawl/firecrawl.py +287 -8
- {firecrawl_py-1.12.0.dist-info → firecrawl_py-1.13.0.dist-info}/METADATA +1 -1
- firecrawl_py-1.13.0.dist-info/RECORD +11 -0
- firecrawl_py-1.13.0.dist-info/top_level.txt +1 -0
- build/lib/firecrawl/__init__.py +0 -79
- build/lib/firecrawl/__tests__/e2e_withAuth/__init__.py +0 -0
- build/lib/firecrawl/__tests__/e2e_withAuth/test.py +0 -170
- build/lib/firecrawl/__tests__/v1/e2e_withAuth/__init__.py +0 -0
- build/lib/firecrawl/__tests__/v1/e2e_withAuth/test.py +0 -440
- build/lib/firecrawl/firecrawl.py +0 -988
- firecrawl_py-1.12.0.dist-info/RECORD +0 -17
- firecrawl_py-1.12.0.dist-info/top_level.txt +0 -3
- {firecrawl_py-1.12.0.dist-info → firecrawl_py-1.13.0.dist-info}/LICENSE +0 -0
- {firecrawl_py-1.12.0.dist-info → firecrawl_py-1.13.0.dist-info}/WHEEL +0 -0
firecrawl/__init__.py
CHANGED
firecrawl/firecrawl.py
CHANGED
|
@@ -12,7 +12,7 @@ Classes:
|
|
|
12
12
|
import logging
|
|
13
13
|
import os
|
|
14
14
|
import time
|
|
15
|
-
from typing import Any, Dict, Optional, List, Union
|
|
15
|
+
from typing import Any, Dict, Optional, List, Union, Callable
|
|
16
16
|
import json
|
|
17
17
|
|
|
18
18
|
import requests
|
|
@@ -33,6 +33,46 @@ class SearchParams(pydantic.BaseModel):
|
|
|
33
33
|
timeout: Optional[int] = 60000
|
|
34
34
|
scrapeOptions: Optional[Dict[str, Any]] = None
|
|
35
35
|
|
|
36
|
+
class GenerateLLMsTextParams(pydantic.BaseModel):
|
|
37
|
+
"""
|
|
38
|
+
Parameters for the LLMs.txt generation operation.
|
|
39
|
+
"""
|
|
40
|
+
maxUrls: Optional[int] = 10
|
|
41
|
+
showFullText: Optional[bool] = False
|
|
42
|
+
__experimental_stream: Optional[bool] = None
|
|
43
|
+
|
|
44
|
+
class DeepResearchParams(pydantic.BaseModel):
|
|
45
|
+
"""
|
|
46
|
+
Parameters for the deep research operation.
|
|
47
|
+
"""
|
|
48
|
+
maxDepth: Optional[int] = 7
|
|
49
|
+
timeLimit: Optional[int] = 270
|
|
50
|
+
maxUrls: Optional[int] = 20
|
|
51
|
+
__experimental_streamSteps: Optional[bool] = None
|
|
52
|
+
|
|
53
|
+
class DeepResearchResponse(pydantic.BaseModel):
|
|
54
|
+
"""
|
|
55
|
+
Response from the deep research operation.
|
|
56
|
+
"""
|
|
57
|
+
success: bool
|
|
58
|
+
id: str
|
|
59
|
+
error: Optional[str] = None
|
|
60
|
+
|
|
61
|
+
class DeepResearchStatusResponse(pydantic.BaseModel):
|
|
62
|
+
"""
|
|
63
|
+
Status response from the deep research operation.
|
|
64
|
+
"""
|
|
65
|
+
success: bool
|
|
66
|
+
data: Optional[Dict[str, Any]] = None
|
|
67
|
+
status: str
|
|
68
|
+
error: Optional[str] = None
|
|
69
|
+
expiresAt: str
|
|
70
|
+
currentDepth: int
|
|
71
|
+
maxDepth: int
|
|
72
|
+
activities: List[Dict[str, Any]]
|
|
73
|
+
sources: List[Dict[str, Any]]
|
|
74
|
+
summaries: List[str]
|
|
75
|
+
|
|
36
76
|
class FirecrawlApp:
|
|
37
77
|
class SearchResponse(pydantic.BaseModel):
|
|
38
78
|
"""
|
|
@@ -137,6 +177,7 @@ class FirecrawlApp:
|
|
|
137
177
|
f'{self.api_url}{endpoint}',
|
|
138
178
|
headers=headers,
|
|
139
179
|
json=scrape_params,
|
|
180
|
+
timeout=(scrape_params["timeout"] + 5000 if "timeout" in scrape_params else None),
|
|
140
181
|
)
|
|
141
182
|
if response.status_code == 200:
|
|
142
183
|
try:
|
|
@@ -425,7 +466,7 @@ class FirecrawlApp:
|
|
|
425
466
|
else:
|
|
426
467
|
self._handle_error(response, 'map')
|
|
427
468
|
|
|
428
|
-
def batch_scrape_urls(self, urls:
|
|
469
|
+
def batch_scrape_urls(self, urls: List[str],
|
|
429
470
|
params: Optional[Dict[str, Any]] = None,
|
|
430
471
|
poll_interval: Optional[int] = 2,
|
|
431
472
|
idempotency_key: Optional[str] = None) -> Any:
|
|
@@ -433,7 +474,7 @@ class FirecrawlApp:
|
|
|
433
474
|
Initiate a batch scrape job for the specified URLs using the Firecrawl API.
|
|
434
475
|
|
|
435
476
|
Args:
|
|
436
|
-
urls (
|
|
477
|
+
urls (List[str]): The URLs to scrape.
|
|
437
478
|
params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
|
|
438
479
|
poll_interval (Optional[int]): Time in seconds between status checks when waiting for job completion. Defaults to 2 seconds.
|
|
439
480
|
idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
|
|
@@ -468,12 +509,12 @@ class FirecrawlApp:
|
|
|
468
509
|
self._handle_error(response, 'start batch scrape job')
|
|
469
510
|
|
|
470
511
|
|
|
471
|
-
def async_batch_scrape_urls(self, urls:
|
|
512
|
+
def async_batch_scrape_urls(self, urls: List[str], params: Optional[Dict[str, Any]] = None, idempotency_key: Optional[str] = None) -> Dict[str, Any]:
|
|
472
513
|
"""
|
|
473
514
|
Initiate a crawl job asynchronously.
|
|
474
515
|
|
|
475
516
|
Args:
|
|
476
|
-
urls (
|
|
517
|
+
urls (List[str]): The URLs to scrape.
|
|
477
518
|
params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
|
|
478
519
|
idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
|
|
479
520
|
|
|
@@ -497,12 +538,12 @@ class FirecrawlApp:
|
|
|
497
538
|
else:
|
|
498
539
|
self._handle_error(response, 'start batch scrape job')
|
|
499
540
|
|
|
500
|
-
def batch_scrape_urls_and_watch(self, urls:
|
|
541
|
+
def batch_scrape_urls_and_watch(self, urls: List[str], params: Optional[Dict[str, Any]] = None, idempotency_key: Optional[str] = None) -> 'CrawlWatcher':
|
|
501
542
|
"""
|
|
502
543
|
Initiate a batch scrape job and return a CrawlWatcher to monitor the job via WebSocket.
|
|
503
544
|
|
|
504
545
|
Args:
|
|
505
|
-
urls (
|
|
546
|
+
urls (List[str]): The URLs to scrape.
|
|
506
547
|
params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
|
|
507
548
|
idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
|
|
508
549
|
|
|
@@ -756,6 +797,123 @@ class FirecrawlApp:
|
|
|
756
797
|
except Exception as e:
|
|
757
798
|
raise ValueError(str(e), 500)
|
|
758
799
|
|
|
800
|
+
def generate_llms_text(self, url: str, params: Optional[Union[Dict[str, Any], GenerateLLMsTextParams]] = None) -> Dict[str, Any]:
|
|
801
|
+
"""
|
|
802
|
+
Generate LLMs.txt for a given URL and poll until completion.
|
|
803
|
+
|
|
804
|
+
Args:
|
|
805
|
+
url (str): The URL to generate LLMs.txt from.
|
|
806
|
+
params (Optional[Union[Dict[str, Any], GenerateLLMsTextParams]]): Parameters for the LLMs.txt generation.
|
|
807
|
+
|
|
808
|
+
Returns:
|
|
809
|
+
Dict[str, Any]: A dictionary containing the generation results. The structure includes:
|
|
810
|
+
- 'success' (bool): Indicates if the generation was successful.
|
|
811
|
+
- 'status' (str): The final status of the generation job.
|
|
812
|
+
- 'data' (Dict): The generated LLMs.txt data.
|
|
813
|
+
- 'error' (Optional[str]): Error message if the generation failed.
|
|
814
|
+
- 'expiresAt' (str): ISO 8601 formatted date-time string indicating when the data expires.
|
|
815
|
+
|
|
816
|
+
Raises:
|
|
817
|
+
Exception: If the generation job fails or an error occurs during status checks.
|
|
818
|
+
"""
|
|
819
|
+
if params is None:
|
|
820
|
+
params = {}
|
|
821
|
+
|
|
822
|
+
if isinstance(params, dict):
|
|
823
|
+
generation_params = GenerateLLMsTextParams(**params)
|
|
824
|
+
else:
|
|
825
|
+
generation_params = params
|
|
826
|
+
|
|
827
|
+
response = self.async_generate_llms_text(url, generation_params)
|
|
828
|
+
if not response.get('success') or 'id' not in response:
|
|
829
|
+
return response
|
|
830
|
+
|
|
831
|
+
job_id = response['id']
|
|
832
|
+
while True:
|
|
833
|
+
status = self.check_generate_llms_text_status(job_id)
|
|
834
|
+
|
|
835
|
+
if status['status'] == 'completed':
|
|
836
|
+
return status
|
|
837
|
+
elif status['status'] == 'failed':
|
|
838
|
+
raise Exception(f'LLMs.txt generation failed. Error: {status.get("error")}')
|
|
839
|
+
elif status['status'] != 'processing':
|
|
840
|
+
break
|
|
841
|
+
|
|
842
|
+
time.sleep(2) # Polling interval
|
|
843
|
+
|
|
844
|
+
return {'success': False, 'error': 'LLMs.txt generation job terminated unexpectedly'}
|
|
845
|
+
|
|
846
|
+
def async_generate_llms_text(self, url: str, params: Optional[Union[Dict[str, Any], GenerateLLMsTextParams]] = None) -> Dict[str, Any]:
|
|
847
|
+
"""
|
|
848
|
+
Initiate an asynchronous LLMs.txt generation operation.
|
|
849
|
+
|
|
850
|
+
Args:
|
|
851
|
+
url (str): The URL to generate LLMs.txt from.
|
|
852
|
+
params (Optional[Union[Dict[str, Any], GenerateLLMsTextParams]]): Parameters for the LLMs.txt generation.
|
|
853
|
+
|
|
854
|
+
Returns:
|
|
855
|
+
Dict[str, Any]: A dictionary containing the generation initiation response. The structure includes:
|
|
856
|
+
- 'success' (bool): Indicates if the generation initiation was successful.
|
|
857
|
+
- 'id' (str): The unique identifier for the generation job.
|
|
858
|
+
|
|
859
|
+
Raises:
|
|
860
|
+
Exception: If the generation job initiation fails.
|
|
861
|
+
"""
|
|
862
|
+
if params is None:
|
|
863
|
+
params = {}
|
|
864
|
+
|
|
865
|
+
if isinstance(params, dict):
|
|
866
|
+
generation_params = GenerateLLMsTextParams(**params)
|
|
867
|
+
else:
|
|
868
|
+
generation_params = params
|
|
869
|
+
|
|
870
|
+
headers = self._prepare_headers()
|
|
871
|
+
json_data = {'url': url, **generation_params.dict(exclude_none=True)}
|
|
872
|
+
|
|
873
|
+
try:
|
|
874
|
+
response = self._post_request(f'{self.api_url}/v1/llmstxt', json_data, headers)
|
|
875
|
+
if response.status_code == 200:
|
|
876
|
+
try:
|
|
877
|
+
return response.json()
|
|
878
|
+
except:
|
|
879
|
+
raise Exception('Failed to parse Firecrawl response as JSON.')
|
|
880
|
+
else:
|
|
881
|
+
self._handle_error(response, 'start LLMs.txt generation')
|
|
882
|
+
except Exception as e:
|
|
883
|
+
raise ValueError(str(e))
|
|
884
|
+
|
|
885
|
+
return {'success': False, 'error': 'Internal server error'}
|
|
886
|
+
|
|
887
|
+
def check_generate_llms_text_status(self, id: str) -> Dict[str, Any]:
|
|
888
|
+
"""
|
|
889
|
+
Check the status of a LLMs.txt generation operation.
|
|
890
|
+
|
|
891
|
+
Args:
|
|
892
|
+
id (str): The ID of the LLMs.txt generation operation.
|
|
893
|
+
|
|
894
|
+
Returns:
|
|
895
|
+
Dict[str, Any]: The current status and results of the generation operation.
|
|
896
|
+
|
|
897
|
+
Raises:
|
|
898
|
+
Exception: If the status check fails.
|
|
899
|
+
"""
|
|
900
|
+
headers = self._prepare_headers()
|
|
901
|
+
try:
|
|
902
|
+
response = self._get_request(f'{self.api_url}/v1/llmstxt/{id}', headers)
|
|
903
|
+
if response.status_code == 200:
|
|
904
|
+
try:
|
|
905
|
+
return response.json()
|
|
906
|
+
except:
|
|
907
|
+
raise Exception('Failed to parse Firecrawl response as JSON.')
|
|
908
|
+
elif response.status_code == 404:
|
|
909
|
+
raise Exception('LLMs.txt generation job not found')
|
|
910
|
+
else:
|
|
911
|
+
self._handle_error(response, 'check LLMs.txt generation status')
|
|
912
|
+
except Exception as e:
|
|
913
|
+
raise ValueError(str(e))
|
|
914
|
+
|
|
915
|
+
return {'success': False, 'error': 'Internal server error'}
|
|
916
|
+
|
|
759
917
|
def _prepare_headers(self, idempotency_key: Optional[str] = None) -> Dict[str, str]:
|
|
760
918
|
"""
|
|
761
919
|
Prepare the headers for API requests.
|
|
@@ -800,7 +958,7 @@ class FirecrawlApp:
|
|
|
800
958
|
requests.RequestException: If the request fails after the specified retries.
|
|
801
959
|
"""
|
|
802
960
|
for attempt in range(retries):
|
|
803
|
-
response = requests.post(url, headers=headers, json=data)
|
|
961
|
+
response = requests.post(url, headers=headers, json=data, timeout=((data["timeout"] + 5000) if "timeout" in data else None))
|
|
804
962
|
if response.status_code == 502:
|
|
805
963
|
time.sleep(backoff_factor * (2 ** attempt))
|
|
806
964
|
else:
|
|
@@ -940,6 +1098,127 @@ class FirecrawlApp:
|
|
|
940
1098
|
# Raise an HTTPError with the custom message and attach the response
|
|
941
1099
|
raise requests.exceptions.HTTPError(message, response=response)
|
|
942
1100
|
|
|
1101
|
+
def deep_research(self, query: str, params: Optional[Union[Dict[str, Any], DeepResearchParams]] = None,
|
|
1102
|
+
on_activity: Optional[Callable[[Dict[str, Any]], None]] = None,
|
|
1103
|
+
on_source: Optional[Callable[[Dict[str, Any]], None]] = None) -> Dict[str, Any]:
|
|
1104
|
+
"""
|
|
1105
|
+
Initiates a deep research operation on a given query and polls until completion.
|
|
1106
|
+
|
|
1107
|
+
Args:
|
|
1108
|
+
query (str): The query to research.
|
|
1109
|
+
params (Optional[Union[Dict[str, Any], DeepResearchParams]]): Parameters for the deep research operation.
|
|
1110
|
+
on_activity (Optional[Callable[[Dict[str, Any]], None]]): Optional callback to receive activity updates in real-time.
|
|
1111
|
+
|
|
1112
|
+
Returns:
|
|
1113
|
+
Dict[str, Any]: The final research results.
|
|
1114
|
+
|
|
1115
|
+
Raises:
|
|
1116
|
+
Exception: If the research operation fails.
|
|
1117
|
+
"""
|
|
1118
|
+
if params is None:
|
|
1119
|
+
params = {}
|
|
1120
|
+
|
|
1121
|
+
if isinstance(params, dict):
|
|
1122
|
+
research_params = DeepResearchParams(**params)
|
|
1123
|
+
else:
|
|
1124
|
+
research_params = params
|
|
1125
|
+
|
|
1126
|
+
response = self.async_deep_research(query, research_params)
|
|
1127
|
+
if not response.get('success') or 'id' not in response:
|
|
1128
|
+
return response
|
|
1129
|
+
|
|
1130
|
+
job_id = response['id']
|
|
1131
|
+
while True:
|
|
1132
|
+
status = self.check_deep_research_status(job_id)
|
|
1133
|
+
|
|
1134
|
+
if on_activity and 'activities' in status:
|
|
1135
|
+
for activity in status['activities']:
|
|
1136
|
+
on_activity(activity)
|
|
1137
|
+
|
|
1138
|
+
if on_source and 'sources' in status:
|
|
1139
|
+
for source in status['sources']:
|
|
1140
|
+
on_source(source)
|
|
1141
|
+
|
|
1142
|
+
if status['status'] == 'completed':
|
|
1143
|
+
return status
|
|
1144
|
+
elif status['status'] == 'failed':
|
|
1145
|
+
raise Exception(f'Deep research failed. Error: {status.get("error")}')
|
|
1146
|
+
elif status['status'] != 'processing':
|
|
1147
|
+
break
|
|
1148
|
+
|
|
1149
|
+
time.sleep(2) # Polling interval
|
|
1150
|
+
|
|
1151
|
+
return {'success': False, 'error': 'Deep research job terminated unexpectedly'}
|
|
1152
|
+
|
|
1153
|
+
def async_deep_research(self, query: str, params: Optional[Union[Dict[str, Any], DeepResearchParams]] = None) -> Dict[str, Any]:
|
|
1154
|
+
"""
|
|
1155
|
+
Initiates an asynchronous deep research operation.
|
|
1156
|
+
|
|
1157
|
+
Args:
|
|
1158
|
+
query (str): The query to research.
|
|
1159
|
+
params (Optional[Union[Dict[str, Any], DeepResearchParams]]): Parameters for the deep research operation.
|
|
1160
|
+
|
|
1161
|
+
Returns:
|
|
1162
|
+
Dict[str, Any]: The response from the deep research initiation.
|
|
1163
|
+
|
|
1164
|
+
Raises:
|
|
1165
|
+
Exception: If the research initiation fails.
|
|
1166
|
+
"""
|
|
1167
|
+
if params is None:
|
|
1168
|
+
params = {}
|
|
1169
|
+
|
|
1170
|
+
if isinstance(params, dict):
|
|
1171
|
+
research_params = DeepResearchParams(**params)
|
|
1172
|
+
else:
|
|
1173
|
+
research_params = params
|
|
1174
|
+
|
|
1175
|
+
headers = self._prepare_headers()
|
|
1176
|
+
json_data = {'query': query, **research_params.dict(exclude_none=True)}
|
|
1177
|
+
|
|
1178
|
+
try:
|
|
1179
|
+
response = self._post_request(f'{self.api_url}/v1/research', json_data, headers)
|
|
1180
|
+
if response.status_code == 200:
|
|
1181
|
+
try:
|
|
1182
|
+
return response.json()
|
|
1183
|
+
except:
|
|
1184
|
+
raise Exception('Failed to parse Firecrawl response as JSON.')
|
|
1185
|
+
else:
|
|
1186
|
+
self._handle_error(response, 'start deep research')
|
|
1187
|
+
except Exception as e:
|
|
1188
|
+
raise ValueError(str(e))
|
|
1189
|
+
|
|
1190
|
+
return {'success': False, 'error': 'Internal server error'}
|
|
1191
|
+
|
|
1192
|
+
def check_deep_research_status(self, id: str) -> Dict[str, Any]:
|
|
1193
|
+
"""
|
|
1194
|
+
Check the status of a deep research operation.
|
|
1195
|
+
|
|
1196
|
+
Args:
|
|
1197
|
+
id (str): The ID of the deep research operation.
|
|
1198
|
+
|
|
1199
|
+
Returns:
|
|
1200
|
+
Dict[str, Any]: The current status and results of the research operation.
|
|
1201
|
+
|
|
1202
|
+
Raises:
|
|
1203
|
+
Exception: If the status check fails.
|
|
1204
|
+
"""
|
|
1205
|
+
headers = self._prepare_headers()
|
|
1206
|
+
try:
|
|
1207
|
+
response = self._get_request(f'{self.api_url}/v1/research/{id}', headers)
|
|
1208
|
+
if response.status_code == 200:
|
|
1209
|
+
try:
|
|
1210
|
+
return response.json()
|
|
1211
|
+
except:
|
|
1212
|
+
raise Exception('Failed to parse Firecrawl response as JSON.')
|
|
1213
|
+
elif response.status_code == 404:
|
|
1214
|
+
raise Exception('Deep research job not found')
|
|
1215
|
+
else:
|
|
1216
|
+
self._handle_error(response, 'check deep research status')
|
|
1217
|
+
except Exception as e:
|
|
1218
|
+
raise ValueError(str(e))
|
|
1219
|
+
|
|
1220
|
+
return {'success': False, 'error': 'Internal server error'}
|
|
1221
|
+
|
|
943
1222
|
class CrawlWatcher:
|
|
944
1223
|
def __init__(self, id: str, app: FirecrawlApp):
|
|
945
1224
|
self.id = id
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
firecrawl/__init__.py,sha256=riFAe05OaS3h1naKPjfPAp9XXX5PKHvELOyTo0ozC1s,2544
|
|
2
|
+
firecrawl/firecrawl.py,sha256=nkpH9L2PbJrwYPQRPVRmGCCdCzJKw1iKeqicPOgH9RQ,51581
|
|
3
|
+
firecrawl/__tests__/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
firecrawl/__tests__/e2e_withAuth/test.py,sha256=6OawnVF4IPeGyXg_Izi3t8U7MyT90roaJBJIG5UfllM,7935
|
|
5
|
+
firecrawl/__tests__/v1/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
firecrawl/__tests__/v1/e2e_withAuth/test.py,sha256=tL5kJJ4el37Wc-Z2TRSuSWwWG2M40h3VPxHYuWijD00,19888
|
|
7
|
+
firecrawl_py-1.13.0.dist-info/LICENSE,sha256=nPCunEDwjRGHlmjvsiDUyIWbkqqyj3Ej84ntnh0g0zA,1084
|
|
8
|
+
firecrawl_py-1.13.0.dist-info/METADATA,sha256=RfJSIzeCl_Ovlr7t_R6elePzHg92pHOg3BwGKOOEJFY,10575
|
|
9
|
+
firecrawl_py-1.13.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
|
10
|
+
firecrawl_py-1.13.0.dist-info/top_level.txt,sha256=jTvz79zWhiyAezfmmHe4FQ-hR60C59UU5FrjMjijLu8,10
|
|
11
|
+
firecrawl_py-1.13.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
firecrawl
|
build/lib/firecrawl/__init__.py
DELETED
|
@@ -1,79 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
This is the Firecrawl package.
|
|
3
|
-
|
|
4
|
-
This package provides a Python SDK for interacting with the Firecrawl API.
|
|
5
|
-
It includes methods to scrape URLs, perform searches, initiate and monitor crawl jobs,
|
|
6
|
-
and check the status of these jobs.
|
|
7
|
-
|
|
8
|
-
For more information visit https://github.com/firecrawl/
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
import logging
|
|
12
|
-
import os
|
|
13
|
-
|
|
14
|
-
from .firecrawl import FirecrawlApp # noqa
|
|
15
|
-
|
|
16
|
-
__version__ = "1.12.0"
|
|
17
|
-
|
|
18
|
-
# Define the logger for the Firecrawl project
|
|
19
|
-
logger: logging.Logger = logging.getLogger("firecrawl")
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def _configure_logger() -> None:
|
|
23
|
-
"""
|
|
24
|
-
Configure the firecrawl logger for console output.
|
|
25
|
-
|
|
26
|
-
The function attaches a handler for console output with a specific format and date
|
|
27
|
-
format to the firecrawl logger.
|
|
28
|
-
"""
|
|
29
|
-
try:
|
|
30
|
-
# Create the formatter
|
|
31
|
-
formatter = logging.Formatter(
|
|
32
|
-
"[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
|
|
33
|
-
datefmt="%Y-%m-%d %H:%M:%S",
|
|
34
|
-
)
|
|
35
|
-
|
|
36
|
-
# Create the console handler and set the formatter
|
|
37
|
-
console_handler = logging.StreamHandler()
|
|
38
|
-
console_handler.setFormatter(formatter)
|
|
39
|
-
|
|
40
|
-
# Add the console handler to the firecrawl logger
|
|
41
|
-
logger.addHandler(console_handler)
|
|
42
|
-
except Exception as e:
|
|
43
|
-
logger.error("Failed to configure logging: %s", e)
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
def setup_logging() -> None:
|
|
47
|
-
"""Set up logging based on the FIRECRAWL_LOGGING_LEVEL environment variable."""
|
|
48
|
-
# Check if the firecrawl logger already has a handler
|
|
49
|
-
if logger.hasHandlers():
|
|
50
|
-
return # To prevent duplicate logging
|
|
51
|
-
|
|
52
|
-
# Check if the FIRECRAWL_LOGGING_LEVEL environment variable is set
|
|
53
|
-
if not (env := os.getenv("FIRECRAWL_LOGGING_LEVEL", "").upper()):
|
|
54
|
-
# Attach a no-op handler to prevent warnings about no handlers
|
|
55
|
-
logger.addHandler(logging.NullHandler())
|
|
56
|
-
return
|
|
57
|
-
|
|
58
|
-
# Attach the console handler to the firecrawl logger
|
|
59
|
-
_configure_logger()
|
|
60
|
-
|
|
61
|
-
# Set the logging level based on the FIRECRAWL_LOGGING_LEVEL environment variable
|
|
62
|
-
if env == "DEBUG":
|
|
63
|
-
logger.setLevel(logging.DEBUG)
|
|
64
|
-
elif env == "INFO":
|
|
65
|
-
logger.setLevel(logging.INFO)
|
|
66
|
-
elif env == "WARNING":
|
|
67
|
-
logger.setLevel(logging.WARNING)
|
|
68
|
-
elif env == "ERROR":
|
|
69
|
-
logger.setLevel(logging.ERROR)
|
|
70
|
-
elif env == "CRITICAL":
|
|
71
|
-
logger.setLevel(logging.CRITICAL)
|
|
72
|
-
else:
|
|
73
|
-
logger.setLevel(logging.INFO)
|
|
74
|
-
logger.warning("Unknown logging level: %s, defaulting to INFO", env)
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
# Initialize logging configuration when the module is imported
|
|
78
|
-
setup_logging()
|
|
79
|
-
logger.debug("Debugging logger setup")
|
|
File without changes
|
|
@@ -1,170 +0,0 @@
|
|
|
1
|
-
import importlib.util
|
|
2
|
-
import pytest
|
|
3
|
-
import time
|
|
4
|
-
import os
|
|
5
|
-
from uuid import uuid4
|
|
6
|
-
from dotenv import load_dotenv
|
|
7
|
-
|
|
8
|
-
load_dotenv()
|
|
9
|
-
|
|
10
|
-
API_URL = "http://127.0.0.1:3002"
|
|
11
|
-
ABSOLUTE_FIRECRAWL_PATH = "firecrawl/firecrawl.py"
|
|
12
|
-
TEST_API_KEY = os.getenv('TEST_API_KEY')
|
|
13
|
-
|
|
14
|
-
print(f"ABSOLUTE_FIRECRAWL_PATH: {ABSOLUTE_FIRECRAWL_PATH}")
|
|
15
|
-
|
|
16
|
-
spec = importlib.util.spec_from_file_location("FirecrawlApp", ABSOLUTE_FIRECRAWL_PATH)
|
|
17
|
-
firecrawl = importlib.util.module_from_spec(spec)
|
|
18
|
-
spec.loader.exec_module(firecrawl)
|
|
19
|
-
FirecrawlApp = firecrawl.FirecrawlApp
|
|
20
|
-
|
|
21
|
-
def test_no_api_key():
|
|
22
|
-
with pytest.raises(Exception) as excinfo:
|
|
23
|
-
invalid_app = FirecrawlApp(api_url=API_URL, version='v0')
|
|
24
|
-
assert "No API key provided" in str(excinfo.value)
|
|
25
|
-
|
|
26
|
-
def test_scrape_url_invalid_api_key():
|
|
27
|
-
invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key", version='v0')
|
|
28
|
-
with pytest.raises(Exception) as excinfo:
|
|
29
|
-
invalid_app.scrape_url('https://firecrawl.dev')
|
|
30
|
-
assert "Unexpected error during scrape URL: Status code 401. Unauthorized: Invalid token" in str(excinfo.value)
|
|
31
|
-
|
|
32
|
-
# def test_blocklisted_url():
|
|
33
|
-
# blocklisted_url = "https://facebook.com/fake-test"
|
|
34
|
-
# app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
35
|
-
# with pytest.raises(Exception) as excinfo:
|
|
36
|
-
# app.scrape_url(blocklisted_url)
|
|
37
|
-
# assert "Unexpected error during scrape URL: Status code 403. Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it." in str(excinfo.value)
|
|
38
|
-
|
|
39
|
-
def test_successful_response_with_valid_preview_token():
|
|
40
|
-
app = FirecrawlApp(api_url=API_URL, api_key="this_is_just_a_preview_token", version='v0')
|
|
41
|
-
response = app.scrape_url('https://roastmywebsite.ai')
|
|
42
|
-
assert response is not None
|
|
43
|
-
assert 'content' in response
|
|
44
|
-
assert "_Roast_" in response['content']
|
|
45
|
-
|
|
46
|
-
def test_scrape_url_e2e():
|
|
47
|
-
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
48
|
-
response = app.scrape_url('https://roastmywebsite.ai')
|
|
49
|
-
print(response)
|
|
50
|
-
|
|
51
|
-
assert response is not None
|
|
52
|
-
assert 'content' in response
|
|
53
|
-
assert 'markdown' in response
|
|
54
|
-
assert 'metadata' in response
|
|
55
|
-
assert 'html' not in response
|
|
56
|
-
assert "_Roast_" in response['content']
|
|
57
|
-
|
|
58
|
-
def test_successful_response_with_valid_api_key_and_include_html():
|
|
59
|
-
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
60
|
-
response = app.scrape_url('https://roastmywebsite.ai', {'pageOptions': {'includeHtml': True}})
|
|
61
|
-
assert response is not None
|
|
62
|
-
assert 'content' in response
|
|
63
|
-
assert 'markdown' in response
|
|
64
|
-
assert 'html' in response
|
|
65
|
-
assert 'metadata' in response
|
|
66
|
-
assert "_Roast_" in response['content']
|
|
67
|
-
assert "_Roast_" in response['markdown']
|
|
68
|
-
assert "<h1" in response['html']
|
|
69
|
-
|
|
70
|
-
def test_successful_response_for_valid_scrape_with_pdf_file():
|
|
71
|
-
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
72
|
-
response = app.scrape_url('https://arxiv.org/pdf/astro-ph/9301001.pdf')
|
|
73
|
-
assert response is not None
|
|
74
|
-
assert 'content' in response
|
|
75
|
-
assert 'metadata' in response
|
|
76
|
-
assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['content']
|
|
77
|
-
|
|
78
|
-
def test_successful_response_for_valid_scrape_with_pdf_file_without_explicit_extension():
|
|
79
|
-
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
80
|
-
response = app.scrape_url('https://arxiv.org/pdf/astro-ph/9301001')
|
|
81
|
-
time.sleep(6) # wait for 6 seconds
|
|
82
|
-
assert response is not None
|
|
83
|
-
assert 'content' in response
|
|
84
|
-
assert 'metadata' in response
|
|
85
|
-
assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['content']
|
|
86
|
-
|
|
87
|
-
def test_crawl_url_invalid_api_key():
|
|
88
|
-
invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key", version='v0')
|
|
89
|
-
with pytest.raises(Exception) as excinfo:
|
|
90
|
-
invalid_app.crawl_url('https://firecrawl.dev')
|
|
91
|
-
assert "Unexpected error during start crawl job: Status code 401. Unauthorized: Invalid token" in str(excinfo.value)
|
|
92
|
-
|
|
93
|
-
# def test_should_return_error_for_blocklisted_url():
|
|
94
|
-
# app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
95
|
-
# blocklisted_url = "https://twitter.com/fake-test"
|
|
96
|
-
# with pytest.raises(Exception) as excinfo:
|
|
97
|
-
# app.crawl_url(blocklisted_url)
|
|
98
|
-
# assert "Unexpected error during start crawl job: Status code 403. Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it." in str(excinfo.value)
|
|
99
|
-
|
|
100
|
-
def test_crawl_url_wait_for_completion_e2e():
|
|
101
|
-
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
102
|
-
response = app.crawl_url('https://roastmywebsite.ai', {'crawlerOptions': {'excludes': ['blog/*']}}, True)
|
|
103
|
-
assert response is not None
|
|
104
|
-
assert len(response) > 0
|
|
105
|
-
assert 'content' in response[0]
|
|
106
|
-
assert "_Roast_" in response[0]['content']
|
|
107
|
-
|
|
108
|
-
def test_crawl_url_with_idempotency_key_e2e():
|
|
109
|
-
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
110
|
-
uniqueIdempotencyKey = str(uuid4())
|
|
111
|
-
response = app.crawl_url('https://roastmywebsite.ai', {'crawlerOptions': {'excludes': ['blog/*']}}, True, 2, uniqueIdempotencyKey)
|
|
112
|
-
assert response is not None
|
|
113
|
-
assert len(response) > 0
|
|
114
|
-
assert 'content' in response[0]
|
|
115
|
-
assert "_Roast_" in response[0]['content']
|
|
116
|
-
|
|
117
|
-
with pytest.raises(Exception) as excinfo:
|
|
118
|
-
app.crawl_url('https://firecrawl.dev', {'crawlerOptions': {'excludes': ['blog/*']}}, True, 2, uniqueIdempotencyKey)
|
|
119
|
-
assert "Conflict: Failed to start crawl job due to a conflict. Idempotency key already used" in str(excinfo.value)
|
|
120
|
-
|
|
121
|
-
def test_check_crawl_status_e2e():
|
|
122
|
-
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
123
|
-
response = app.crawl_url('https://firecrawl.dev', {'crawlerOptions': {'excludes': ['blog/*']}}, False)
|
|
124
|
-
assert response is not None
|
|
125
|
-
assert 'jobId' in response
|
|
126
|
-
|
|
127
|
-
time.sleep(30) # wait for 30 seconds
|
|
128
|
-
status_response = app.check_crawl_status(response['jobId'])
|
|
129
|
-
assert status_response is not None
|
|
130
|
-
assert 'status' in status_response
|
|
131
|
-
assert status_response['status'] == 'completed'
|
|
132
|
-
assert 'data' in status_response
|
|
133
|
-
assert len(status_response['data']) > 0
|
|
134
|
-
|
|
135
|
-
def test_search_e2e():
|
|
136
|
-
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
137
|
-
response = app.search("test query")
|
|
138
|
-
assert response is not None
|
|
139
|
-
assert 'content' in response[0]
|
|
140
|
-
assert len(response) > 2
|
|
141
|
-
|
|
142
|
-
def test_search_invalid_api_key():
|
|
143
|
-
invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key", version='v0')
|
|
144
|
-
with pytest.raises(Exception) as excinfo:
|
|
145
|
-
invalid_app.search("test query")
|
|
146
|
-
assert "Unexpected error during search: Status code 401. Unauthorized: Invalid token" in str(excinfo.value)
|
|
147
|
-
|
|
148
|
-
def test_llm_extraction():
|
|
149
|
-
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
|
|
150
|
-
response = app.scrape_url("https://firecrawl.dev", {
|
|
151
|
-
'extractorOptions': {
|
|
152
|
-
'mode': 'llm-extraction',
|
|
153
|
-
'extractionPrompt': "Based on the information on the page, find what the company's mission is and whether it supports SSO, and whether it is open source",
|
|
154
|
-
'extractionSchema': {
|
|
155
|
-
'type': 'object',
|
|
156
|
-
'properties': {
|
|
157
|
-
'company_mission': {'type': 'string'},
|
|
158
|
-
'supports_sso': {'type': 'boolean'},
|
|
159
|
-
'is_open_source': {'type': 'boolean'}
|
|
160
|
-
},
|
|
161
|
-
'required': ['company_mission', 'supports_sso', 'is_open_source']
|
|
162
|
-
}
|
|
163
|
-
}
|
|
164
|
-
})
|
|
165
|
-
assert response is not None
|
|
166
|
-
assert 'llm_extraction' in response
|
|
167
|
-
llm_extraction = response['llm_extraction']
|
|
168
|
-
assert 'company_mission' in llm_extraction
|
|
169
|
-
assert isinstance(llm_extraction['supports_sso'], bool)
|
|
170
|
-
assert isinstance(llm_extraction['is_open_source'], bool)
|
|
File without changes
|