firecrawl 1.3.1__tar.gz → 1.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of firecrawl might be problematic. Click here for more details.
- {firecrawl-1.3.1 → firecrawl-1.5.0}/PKG-INFO +64 -1
- {firecrawl-1.3.1 → firecrawl-1.5.0}/README.md +63 -0
- firecrawl-1.5.0/firecrawl/__init__.py +79 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/firecrawl/firecrawl.py +174 -11
- {firecrawl-1.3.1 → firecrawl-1.5.0}/firecrawl.egg-info/PKG-INFO +64 -1
- firecrawl-1.3.1/firecrawl/__init__.py +0 -57
- {firecrawl-1.3.1 → firecrawl-1.5.0}/LICENSE +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/firecrawl/__tests__/e2e_withAuth/__init__.py +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/firecrawl/__tests__/e2e_withAuth/test.py +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/firecrawl/__tests__/v1/e2e_withAuth/__init__.py +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/firecrawl/__tests__/v1/e2e_withAuth/test.py +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/firecrawl.egg-info/SOURCES.txt +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/firecrawl.egg-info/dependency_links.txt +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/firecrawl.egg-info/requires.txt +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/firecrawl.egg-info/top_level.txt +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/pyproject.toml +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/setup.cfg +0 -0
- {firecrawl-1.3.1 → firecrawl-1.5.0}/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: firecrawl
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.5.0
|
|
4
4
|
Summary: Python SDK for Firecrawl API
|
|
5
5
|
Home-page: https://github.com/mendableai/firecrawl
|
|
6
6
|
Author: Mendable.ai
|
|
@@ -189,6 +189,69 @@ async def start_crawl_and_watch():
|
|
|
189
189
|
await start_crawl_and_watch()
|
|
190
190
|
```
|
|
191
191
|
|
|
192
|
+
### Scraping multiple URLs in batch
|
|
193
|
+
|
|
194
|
+
To batch scrape multiple URLs, use the `batch_scrape_urls` method. It takes the URLs and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper such as the output formats.
|
|
195
|
+
|
|
196
|
+
```python
|
|
197
|
+
idempotency_key = str(uuid.uuid4()) # optional idempotency key
|
|
198
|
+
batch_scrape_result = app.batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']}, 2, idempotency_key)
|
|
199
|
+
print(batch_scrape_result)
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
### Asynchronous batch scrape
|
|
203
|
+
|
|
204
|
+
To run a batch scrape asynchronously, use the `async_batch_scrape_urls` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
batch_scrape_result = app.async_batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
|
|
208
|
+
print(batch_scrape_result)
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
### Checking batch scrape status
|
|
212
|
+
|
|
213
|
+
To check the status of an asynchronous batch scrape job, use the `check_batch_scrape_status` method. It takes the job ID as a parameter and returns the current status of the batch scrape job.
|
|
214
|
+
|
|
215
|
+
```python
|
|
216
|
+
id = batch_scrape_result['id']
|
|
217
|
+
status = app.check_batch_scrape_status(id)
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
### Batch scrape with WebSockets
|
|
221
|
+
|
|
222
|
+
To use batch scrape with WebSockets, use the `batch_scrape_urls_and_watch` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
|
|
223
|
+
|
|
224
|
+
```python
|
|
225
|
+
# inside an async function...
|
|
226
|
+
nest_asyncio.apply()
|
|
227
|
+
|
|
228
|
+
# Define event handlers
|
|
229
|
+
def on_document(detail):
|
|
230
|
+
print("DOC", detail)
|
|
231
|
+
|
|
232
|
+
def on_error(detail):
|
|
233
|
+
print("ERR", detail['error'])
|
|
234
|
+
|
|
235
|
+
def on_done(detail):
|
|
236
|
+
print("DONE", detail['status'])
|
|
237
|
+
|
|
238
|
+
# Function to start the crawl and watch process
|
|
239
|
+
async def start_crawl_and_watch():
|
|
240
|
+
# Initiate the crawl job and get the watcher
|
|
241
|
+
watcher = app.batch_scrape_urls_and_watch(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
|
|
242
|
+
|
|
243
|
+
# Add event listeners
|
|
244
|
+
watcher.add_event_listener("document", on_document)
|
|
245
|
+
watcher.add_event_listener("error", on_error)
|
|
246
|
+
watcher.add_event_listener("done", on_done)
|
|
247
|
+
|
|
248
|
+
# Start the watcher
|
|
249
|
+
await watcher.connect()
|
|
250
|
+
|
|
251
|
+
# Run the event loop
|
|
252
|
+
await start_crawl_and_watch()
|
|
253
|
+
```
|
|
254
|
+
|
|
192
255
|
## Error Handling
|
|
193
256
|
|
|
194
257
|
The SDK handles errors returned by the Firecrawl API and raises appropriate exceptions. If an error occurs during a request, an exception will be raised with a descriptive error message.
|
|
@@ -149,6 +149,69 @@ async def start_crawl_and_watch():
|
|
|
149
149
|
await start_crawl_and_watch()
|
|
150
150
|
```
|
|
151
151
|
|
|
152
|
+
### Scraping multiple URLs in batch
|
|
153
|
+
|
|
154
|
+
To batch scrape multiple URLs, use the `batch_scrape_urls` method. It takes the URLs and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper such as the output formats.
|
|
155
|
+
|
|
156
|
+
```python
|
|
157
|
+
idempotency_key = str(uuid.uuid4()) # optional idempotency key
|
|
158
|
+
batch_scrape_result = app.batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']}, 2, idempotency_key)
|
|
159
|
+
print(batch_scrape_result)
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
### Asynchronous batch scrape
|
|
163
|
+
|
|
164
|
+
To run a batch scrape asynchronously, use the `async_batch_scrape_urls` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
|
|
165
|
+
|
|
166
|
+
```python
|
|
167
|
+
batch_scrape_result = app.async_batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
|
|
168
|
+
print(batch_scrape_result)
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
### Checking batch scrape status
|
|
172
|
+
|
|
173
|
+
To check the status of an asynchronous batch scrape job, use the `check_batch_scrape_status` method. It takes the job ID as a parameter and returns the current status of the batch scrape job.
|
|
174
|
+
|
|
175
|
+
```python
|
|
176
|
+
id = batch_scrape_result['id']
|
|
177
|
+
status = app.check_batch_scrape_status(id)
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
### Batch scrape with WebSockets
|
|
181
|
+
|
|
182
|
+
To use batch scrape with WebSockets, use the `batch_scrape_urls_and_watch` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
|
|
183
|
+
|
|
184
|
+
```python
|
|
185
|
+
# inside an async function...
|
|
186
|
+
nest_asyncio.apply()
|
|
187
|
+
|
|
188
|
+
# Define event handlers
|
|
189
|
+
def on_document(detail):
|
|
190
|
+
print("DOC", detail)
|
|
191
|
+
|
|
192
|
+
def on_error(detail):
|
|
193
|
+
print("ERR", detail['error'])
|
|
194
|
+
|
|
195
|
+
def on_done(detail):
|
|
196
|
+
print("DONE", detail['status'])
|
|
197
|
+
|
|
198
|
+
# Function to start the crawl and watch process
|
|
199
|
+
async def start_crawl_and_watch():
|
|
200
|
+
# Initiate the crawl job and get the watcher
|
|
201
|
+
watcher = app.batch_scrape_urls_and_watch(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
|
|
202
|
+
|
|
203
|
+
# Add event listeners
|
|
204
|
+
watcher.add_event_listener("document", on_document)
|
|
205
|
+
watcher.add_event_listener("error", on_error)
|
|
206
|
+
watcher.add_event_listener("done", on_done)
|
|
207
|
+
|
|
208
|
+
# Start the watcher
|
|
209
|
+
await watcher.connect()
|
|
210
|
+
|
|
211
|
+
# Run the event loop
|
|
212
|
+
await start_crawl_and_watch()
|
|
213
|
+
```
|
|
214
|
+
|
|
152
215
|
## Error Handling
|
|
153
216
|
|
|
154
217
|
The SDK handles errors returned by the Firecrawl API and raises appropriate exceptions. If an error occurs during a request, an exception will be raised with a descriptive error message.
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This is the Firecrawl package.
|
|
3
|
+
|
|
4
|
+
This package provides a Python SDK for interacting with the Firecrawl API.
|
|
5
|
+
It includes methods to scrape URLs, perform searches, initiate and monitor crawl jobs,
|
|
6
|
+
and check the status of these jobs.
|
|
7
|
+
|
|
8
|
+
For more information visit https://github.com/firecrawl/
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import logging
|
|
12
|
+
import os
|
|
13
|
+
|
|
14
|
+
from .firecrawl import FirecrawlApp # noqa
|
|
15
|
+
|
|
16
|
+
__version__ = "1.5.0"
|
|
17
|
+
|
|
18
|
+
# Define the logger for the Firecrawl project
|
|
19
|
+
logger: logging.Logger = logging.getLogger("firecrawl")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _configure_logger() -> None:
|
|
23
|
+
"""
|
|
24
|
+
Configure the firecrawl logger for console output.
|
|
25
|
+
|
|
26
|
+
The function attaches a handler for console output with a specific format and date
|
|
27
|
+
format to the firecrawl logger.
|
|
28
|
+
"""
|
|
29
|
+
try:
|
|
30
|
+
# Create the formatter
|
|
31
|
+
formatter = logging.Formatter(
|
|
32
|
+
"[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
|
|
33
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Create the console handler and set the formatter
|
|
37
|
+
console_handler = logging.StreamHandler()
|
|
38
|
+
console_handler.setFormatter(formatter)
|
|
39
|
+
|
|
40
|
+
# Add the console handler to the firecrawl logger
|
|
41
|
+
logger.addHandler(console_handler)
|
|
42
|
+
except Exception as e:
|
|
43
|
+
logger.error("Failed to configure logging: %s", e)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def setup_logging() -> None:
|
|
47
|
+
"""Set up logging based on the FIRECRAWL_LOGGING_LEVEL environment variable."""
|
|
48
|
+
# Check if the firecrawl logger already has a handler
|
|
49
|
+
if logger.hasHandlers():
|
|
50
|
+
return # To prevent duplicate logging
|
|
51
|
+
|
|
52
|
+
# Check if the FIRECRAWL_LOGGING_LEVEL environment variable is set
|
|
53
|
+
if not (env := os.getenv("FIRECRAWL_LOGGING_LEVEL", "").upper()):
|
|
54
|
+
# Attach a no-op handler to prevent warnings about no handlers
|
|
55
|
+
logger.addHandler(logging.NullHandler())
|
|
56
|
+
return
|
|
57
|
+
|
|
58
|
+
# Attach the console handler to the firecrawl logger
|
|
59
|
+
_configure_logger()
|
|
60
|
+
|
|
61
|
+
# Set the logging level based on the FIRECRAWL_LOGGING_LEVEL environment variable
|
|
62
|
+
if env == "DEBUG":
|
|
63
|
+
logger.setLevel(logging.DEBUG)
|
|
64
|
+
elif env == "INFO":
|
|
65
|
+
logger.setLevel(logging.INFO)
|
|
66
|
+
elif env == "WARNING":
|
|
67
|
+
logger.setLevel(logging.WARNING)
|
|
68
|
+
elif env == "ERROR":
|
|
69
|
+
logger.setLevel(logging.ERROR)
|
|
70
|
+
elif env == "CRITICAL":
|
|
71
|
+
logger.setLevel(logging.CRITICAL)
|
|
72
|
+
else:
|
|
73
|
+
logger.setLevel(logging.INFO)
|
|
74
|
+
logger.warning("Unknown logging level: %s, defaulting to INFO", env)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# Initialize logging configuration when the module is imported
|
|
78
|
+
setup_logging()
|
|
79
|
+
logger.debug("Debugging logger setup")
|
|
@@ -81,8 +81,10 @@ class FirecrawlApp:
|
|
|
81
81
|
response = response.json()
|
|
82
82
|
if response['success'] and 'data' in response:
|
|
83
83
|
return response['data']
|
|
84
|
-
|
|
84
|
+
elif "error" in response:
|
|
85
85
|
raise Exception(f'Failed to scrape URL. Error: {response["error"]}')
|
|
86
|
+
else:
|
|
87
|
+
raise Exception(f'Failed to scrape URL. Error: {response}')
|
|
86
88
|
else:
|
|
87
89
|
self._handle_error(response, 'scrape URL')
|
|
88
90
|
|
|
@@ -187,17 +189,38 @@ class FirecrawlApp:
|
|
|
187
189
|
headers = self._prepare_headers()
|
|
188
190
|
response = self._get_request(f'{self.api_url}{endpoint}', headers)
|
|
189
191
|
if response.status_code == 200:
|
|
190
|
-
|
|
192
|
+
status_data = response.json()
|
|
193
|
+
if status_data['status'] == 'completed':
|
|
194
|
+
if 'data' in status_data:
|
|
195
|
+
data = status_data['data']
|
|
196
|
+
while 'next' in status_data:
|
|
197
|
+
next_url = status_data.get('next')
|
|
198
|
+
if not next_url:
|
|
199
|
+
logger.warning("Expected 'next' URL is missing.")
|
|
200
|
+
break
|
|
201
|
+
try:
|
|
202
|
+
status_response = self._get_request(next_url, headers)
|
|
203
|
+
if status_response.status_code != 200:
|
|
204
|
+
logger.error(f"Failed to fetch next page: {status_response.status_code}")
|
|
205
|
+
break
|
|
206
|
+
status_data = status_response.json()
|
|
207
|
+
data.extend(status_data.get('data', []))
|
|
208
|
+
except Exception as e:
|
|
209
|
+
logger.error(f"Error during pagination request: {e}")
|
|
210
|
+
break
|
|
211
|
+
status_data.pop('next', None)
|
|
212
|
+
status_data['data'] = data
|
|
213
|
+
|
|
191
214
|
return {
|
|
192
215
|
'success': True,
|
|
193
|
-
'status':
|
|
194
|
-
'total':
|
|
195
|
-
'completed':
|
|
196
|
-
'creditsUsed':
|
|
197
|
-
'expiresAt':
|
|
198
|
-
'
|
|
199
|
-
'
|
|
200
|
-
'
|
|
216
|
+
'status': status_data.get('status'),
|
|
217
|
+
'total': status_data.get('total'),
|
|
218
|
+
'completed': status_data.get('completed'),
|
|
219
|
+
'creditsUsed': status_data.get('creditsUsed'),
|
|
220
|
+
'expiresAt': status_data.get('expiresAt'),
|
|
221
|
+
'data': status_data.get('data'),
|
|
222
|
+
'error': status_data.get('error'),
|
|
223
|
+
'next': status_data.get('next', None)
|
|
201
224
|
}
|
|
202
225
|
else:
|
|
203
226
|
self._handle_error(response, 'check crawl status')
|
|
@@ -266,11 +289,151 @@ class FirecrawlApp:
|
|
|
266
289
|
response = response.json()
|
|
267
290
|
if response['success'] and 'links' in response:
|
|
268
291
|
return response
|
|
269
|
-
|
|
292
|
+
elif 'error' in response:
|
|
270
293
|
raise Exception(f'Failed to map URL. Error: {response["error"]}')
|
|
294
|
+
else:
|
|
295
|
+
raise Exception(f'Failed to map URL. Error: {response}')
|
|
271
296
|
else:
|
|
272
297
|
self._handle_error(response, 'map')
|
|
273
298
|
|
|
299
|
+
def batch_scrape_urls(self, urls: list[str],
|
|
300
|
+
params: Optional[Dict[str, Any]] = None,
|
|
301
|
+
poll_interval: Optional[int] = 2,
|
|
302
|
+
idempotency_key: Optional[str] = None) -> Any:
|
|
303
|
+
"""
|
|
304
|
+
Initiate a batch scrape job for the specified URLs using the Firecrawl API.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
urls (list[str]): The URLs to scrape.
|
|
308
|
+
params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
|
|
309
|
+
poll_interval (Optional[int]): Time in seconds between status checks when waiting for job completion. Defaults to 2 seconds.
|
|
310
|
+
idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
Dict[str, Any]: A dictionary containing the scrape results. The structure includes:
|
|
314
|
+
- 'success' (bool): Indicates if the batch scrape was successful.
|
|
315
|
+
- 'status' (str): The final status of the batch scrape job (e.g., 'completed').
|
|
316
|
+
- 'completed' (int): Number of scraped pages that completed.
|
|
317
|
+
- 'total' (int): Total number of scraped pages.
|
|
318
|
+
- 'creditsUsed' (int): Estimated number of API credits used for this batch scrape.
|
|
319
|
+
- 'expiresAt' (str): ISO 8601 formatted date-time string indicating when the batch scrape data expires.
|
|
320
|
+
- 'data' (List[Dict]): List of all the scraped pages.
|
|
321
|
+
|
|
322
|
+
Raises:
|
|
323
|
+
Exception: If the batch scrape job initiation or monitoring fails.
|
|
324
|
+
"""
|
|
325
|
+
endpoint = f'/v1/batch/scrape'
|
|
326
|
+
headers = self._prepare_headers(idempotency_key)
|
|
327
|
+
json_data = {'urls': urls}
|
|
328
|
+
if params:
|
|
329
|
+
json_data.update(params)
|
|
330
|
+
response = self._post_request(f'{self.api_url}{endpoint}', json_data, headers)
|
|
331
|
+
if response.status_code == 200:
|
|
332
|
+
id = response.json().get('id')
|
|
333
|
+
return self._monitor_job_status(id, headers, poll_interval)
|
|
334
|
+
|
|
335
|
+
else:
|
|
336
|
+
self._handle_error(response, 'start batch scrape job')
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def async_batch_scrape_urls(self, urls: list[str], params: Optional[Dict[str, Any]] = None, idempotency_key: Optional[str] = None) -> Dict[str, Any]:
|
|
340
|
+
"""
|
|
341
|
+
Initiate a crawl job asynchronously.
|
|
342
|
+
|
|
343
|
+
Args:
|
|
344
|
+
urls (list[str]): The URLs to scrape.
|
|
345
|
+
params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
|
|
346
|
+
idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
|
|
347
|
+
|
|
348
|
+
Returns:
|
|
349
|
+
Dict[str, Any]: A dictionary containing the batch scrape initiation response. The structure includes:
|
|
350
|
+
- 'success' (bool): Indicates if the batch scrape initiation was successful.
|
|
351
|
+
- 'id' (str): The unique identifier for the batch scrape job.
|
|
352
|
+
- 'url' (str): The URL to check the status of the batch scrape job.
|
|
353
|
+
"""
|
|
354
|
+
endpoint = f'/v1/batch/scrape'
|
|
355
|
+
headers = self._prepare_headers(idempotency_key)
|
|
356
|
+
json_data = {'urls': urls}
|
|
357
|
+
if params:
|
|
358
|
+
json_data.update(params)
|
|
359
|
+
response = self._post_request(f'{self.api_url}{endpoint}', json_data, headers)
|
|
360
|
+
if response.status_code == 200:
|
|
361
|
+
return response.json()
|
|
362
|
+
else:
|
|
363
|
+
self._handle_error(response, 'start batch scrape job')
|
|
364
|
+
|
|
365
|
+
def batch_scrape_urls_and_watch(self, urls: list[str], params: Optional[Dict[str, Any]] = None, idempotency_key: Optional[str] = None) -> 'CrawlWatcher':
|
|
366
|
+
"""
|
|
367
|
+
Initiate a batch scrape job and return a CrawlWatcher to monitor the job via WebSocket.
|
|
368
|
+
|
|
369
|
+
Args:
|
|
370
|
+
urls (list[str]): The URLs to scrape.
|
|
371
|
+
params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
|
|
372
|
+
idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
|
|
373
|
+
|
|
374
|
+
Returns:
|
|
375
|
+
CrawlWatcher: An instance of CrawlWatcher to monitor the batch scrape job.
|
|
376
|
+
"""
|
|
377
|
+
crawl_response = self.async_batch_scrape_urls(urls, params, idempotency_key)
|
|
378
|
+
if crawl_response['success'] and 'id' in crawl_response:
|
|
379
|
+
return CrawlWatcher(crawl_response['id'], self)
|
|
380
|
+
else:
|
|
381
|
+
raise Exception("Batch scrape job failed to start")
|
|
382
|
+
|
|
383
|
+
def check_batch_scrape_status(self, id: str) -> Any:
|
|
384
|
+
"""
|
|
385
|
+
Check the status of a batch scrape job using the Firecrawl API.
|
|
386
|
+
|
|
387
|
+
Args:
|
|
388
|
+
id (str): The ID of the batch scrape job.
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
Any: The status of the batch scrape job.
|
|
392
|
+
|
|
393
|
+
Raises:
|
|
394
|
+
Exception: If the status check request fails.
|
|
395
|
+
"""
|
|
396
|
+
endpoint = f'/v1/batch/scrape/{id}'
|
|
397
|
+
|
|
398
|
+
headers = self._prepare_headers()
|
|
399
|
+
response = self._get_request(f'{self.api_url}{endpoint}', headers)
|
|
400
|
+
if response.status_code == 200:
|
|
401
|
+
status_data = response.json()
|
|
402
|
+
if status_data['status'] == 'completed':
|
|
403
|
+
if 'data' in status_data:
|
|
404
|
+
data = status_data['data']
|
|
405
|
+
while 'next' in status_data:
|
|
406
|
+
next_url = status_data.get('next')
|
|
407
|
+
if not next_url:
|
|
408
|
+
logger.warning("Expected 'next' URL is missing.")
|
|
409
|
+
break
|
|
410
|
+
try:
|
|
411
|
+
status_response = self._get_request(next_url, headers)
|
|
412
|
+
if status_response.status_code != 200:
|
|
413
|
+
logger.error(f"Failed to fetch next page: {status_response.status_code}")
|
|
414
|
+
break
|
|
415
|
+
status_data = status_response.json()
|
|
416
|
+
data.extend(status_data.get('data', []))
|
|
417
|
+
except Exception as e:
|
|
418
|
+
logger.error(f"Error during pagination request: {e}")
|
|
419
|
+
break
|
|
420
|
+
status_data.pop('next', None)
|
|
421
|
+
status_data['data'] = data
|
|
422
|
+
|
|
423
|
+
return {
|
|
424
|
+
'success': True,
|
|
425
|
+
'status': status_data.get('status'),
|
|
426
|
+
'total': status_data.get('total'),
|
|
427
|
+
'completed': status_data.get('completed'),
|
|
428
|
+
'creditsUsed': status_data.get('creditsUsed'),
|
|
429
|
+
'expiresAt': status_data.get('expiresAt'),
|
|
430
|
+
'data': status_data.get('data'),
|
|
431
|
+
'error': status_data.get('error'),
|
|
432
|
+
'next': status_data.get('next', None)
|
|
433
|
+
}
|
|
434
|
+
else:
|
|
435
|
+
self._handle_error(response, 'check batch scrape status')
|
|
436
|
+
|
|
274
437
|
def _prepare_headers(self, idempotency_key: Optional[str] = None) -> Dict[str, str]:
|
|
275
438
|
"""
|
|
276
439
|
Prepare the headers for API requests.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: firecrawl
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.5.0
|
|
4
4
|
Summary: Python SDK for Firecrawl API
|
|
5
5
|
Home-page: https://github.com/mendableai/firecrawl
|
|
6
6
|
Author: Mendable.ai
|
|
@@ -189,6 +189,69 @@ async def start_crawl_and_watch():
|
|
|
189
189
|
await start_crawl_and_watch()
|
|
190
190
|
```
|
|
191
191
|
|
|
192
|
+
### Scraping multiple URLs in batch
|
|
193
|
+
|
|
194
|
+
To batch scrape multiple URLs, use the `batch_scrape_urls` method. It takes the URLs and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper such as the output formats.
|
|
195
|
+
|
|
196
|
+
```python
|
|
197
|
+
idempotency_key = str(uuid.uuid4()) # optional idempotency key
|
|
198
|
+
batch_scrape_result = app.batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']}, 2, idempotency_key)
|
|
199
|
+
print(batch_scrape_result)
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
### Asynchronous batch scrape
|
|
203
|
+
|
|
204
|
+
To run a batch scrape asynchronously, use the `async_batch_scrape_urls` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
batch_scrape_result = app.async_batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
|
|
208
|
+
print(batch_scrape_result)
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
### Checking batch scrape status
|
|
212
|
+
|
|
213
|
+
To check the status of an asynchronous batch scrape job, use the `check_batch_scrape_status` method. It takes the job ID as a parameter and returns the current status of the batch scrape job.
|
|
214
|
+
|
|
215
|
+
```python
|
|
216
|
+
id = batch_scrape_result['id']
|
|
217
|
+
status = app.check_batch_scrape_status(id)
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
### Batch scrape with WebSockets
|
|
221
|
+
|
|
222
|
+
To use batch scrape with WebSockets, use the `batch_scrape_urls_and_watch` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
|
|
223
|
+
|
|
224
|
+
```python
|
|
225
|
+
# inside an async function...
|
|
226
|
+
nest_asyncio.apply()
|
|
227
|
+
|
|
228
|
+
# Define event handlers
|
|
229
|
+
def on_document(detail):
|
|
230
|
+
print("DOC", detail)
|
|
231
|
+
|
|
232
|
+
def on_error(detail):
|
|
233
|
+
print("ERR", detail['error'])
|
|
234
|
+
|
|
235
|
+
def on_done(detail):
|
|
236
|
+
print("DONE", detail['status'])
|
|
237
|
+
|
|
238
|
+
# Function to start the crawl and watch process
|
|
239
|
+
async def start_crawl_and_watch():
|
|
240
|
+
# Initiate the crawl job and get the watcher
|
|
241
|
+
watcher = app.batch_scrape_urls_and_watch(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
|
|
242
|
+
|
|
243
|
+
# Add event listeners
|
|
244
|
+
watcher.add_event_listener("document", on_document)
|
|
245
|
+
watcher.add_event_listener("error", on_error)
|
|
246
|
+
watcher.add_event_listener("done", on_done)
|
|
247
|
+
|
|
248
|
+
# Start the watcher
|
|
249
|
+
await watcher.connect()
|
|
250
|
+
|
|
251
|
+
# Run the event loop
|
|
252
|
+
await start_crawl_and_watch()
|
|
253
|
+
```
|
|
254
|
+
|
|
192
255
|
## Error Handling
|
|
193
256
|
|
|
194
257
|
The SDK handles errors returned by the Firecrawl API and raises appropriate exceptions. If an error occurs during a request, an exception will be raised with a descriptive error message.
|
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
This is the Firecrawl package.
|
|
3
|
-
|
|
4
|
-
This package provides a Python SDK for interacting with the Firecrawl API.
|
|
5
|
-
It includes methods to scrape URLs, perform searches, initiate and monitor crawl jobs,
|
|
6
|
-
and check the status of these jobs.
|
|
7
|
-
|
|
8
|
-
For more information visit https://github.com/firecrawl/
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
import logging
|
|
12
|
-
import os
|
|
13
|
-
|
|
14
|
-
from .firecrawl import FirecrawlApp
|
|
15
|
-
|
|
16
|
-
__version__ = "1.3.1"
|
|
17
|
-
|
|
18
|
-
# Define the logger for the Firecrawl project
|
|
19
|
-
logger: logging.Logger = logging.getLogger("firecrawl")
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def _basic_config() -> None:
|
|
23
|
-
"""Set up basic configuration for logging with a specific format and date format."""
|
|
24
|
-
try:
|
|
25
|
-
logging.basicConfig(
|
|
26
|
-
format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
|
|
27
|
-
datefmt="%Y-%m-%d %H:%M:%S",
|
|
28
|
-
)
|
|
29
|
-
except Exception as e:
|
|
30
|
-
logger.error("Failed to configure logging: %s", e)
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
def setup_logging() -> None:
|
|
34
|
-
"""Set up logging based on the FIRECRAWL_LOGGING_LEVEL environment variable."""
|
|
35
|
-
env = os.environ.get(
|
|
36
|
-
"FIRECRAWL_LOGGING_LEVEL", "INFO"
|
|
37
|
-
).upper() # Default to 'INFO' level
|
|
38
|
-
_basic_config()
|
|
39
|
-
|
|
40
|
-
if env == "DEBUG":
|
|
41
|
-
logger.setLevel(logging.DEBUG)
|
|
42
|
-
elif env == "INFO":
|
|
43
|
-
logger.setLevel(logging.INFO)
|
|
44
|
-
elif env == "WARNING":
|
|
45
|
-
logger.setLevel(logging.WARNING)
|
|
46
|
-
elif env == "ERROR":
|
|
47
|
-
logger.setLevel(logging.ERROR)
|
|
48
|
-
elif env == "CRITICAL":
|
|
49
|
-
logger.setLevel(logging.CRITICAL)
|
|
50
|
-
else:
|
|
51
|
-
logger.setLevel(logging.INFO)
|
|
52
|
-
logger.warning("Unknown logging level: %s, defaulting to INFO", env)
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
# Initialize logging configuration when the module is imported
|
|
56
|
-
setup_logging()
|
|
57
|
-
logger.debug("Debugging logger setup")
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|