firecrawl 1.3.1__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of firecrawl might be problematic. Click here for more details.

firecrawl/__init__.py CHANGED
@@ -11,32 +11,54 @@ For more information visit https://github.com/firecrawl/
11
11
  import logging
12
12
  import os
13
13
 
14
- from .firecrawl import FirecrawlApp
14
+ from .firecrawl import FirecrawlApp # noqa
15
15
 
16
- __version__ = "1.3.1"
16
+ __version__ = "1.5.0"
17
17
 
18
18
  # Define the logger for the Firecrawl project
19
19
  logger: logging.Logger = logging.getLogger("firecrawl")
20
20
 
21
21
 
22
- def _basic_config() -> None:
23
- """Set up basic configuration for logging with a specific format and date format."""
22
+ def _configure_logger() -> None:
23
+ """
24
+ Configure the firecrawl logger for console output.
25
+
26
+ The function attaches a handler for console output with a specific format and date
27
+ format to the firecrawl logger.
28
+ """
24
29
  try:
25
- logging.basicConfig(
26
- format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
30
+ # Create the formatter
31
+ formatter = logging.Formatter(
32
+ "[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
27
33
  datefmt="%Y-%m-%d %H:%M:%S",
28
34
  )
35
+
36
+ # Create the console handler and set the formatter
37
+ console_handler = logging.StreamHandler()
38
+ console_handler.setFormatter(formatter)
39
+
40
+ # Add the console handler to the firecrawl logger
41
+ logger.addHandler(console_handler)
29
42
  except Exception as e:
30
43
  logger.error("Failed to configure logging: %s", e)
31
44
 
32
45
 
33
46
  def setup_logging() -> None:
34
47
  """Set up logging based on the FIRECRAWL_LOGGING_LEVEL environment variable."""
35
- env = os.environ.get(
36
- "FIRECRAWL_LOGGING_LEVEL", "INFO"
37
- ).upper() # Default to 'INFO' level
38
- _basic_config()
48
+ # Check if the firecrawl logger already has a handler
49
+ if logger.hasHandlers():
50
+ return # To prevent duplicate logging
51
+
52
+ # Check if the FIRECRAWL_LOGGING_LEVEL environment variable is set
53
+ if not (env := os.getenv("FIRECRAWL_LOGGING_LEVEL", "").upper()):
54
+ # Attach a no-op handler to prevent warnings about no handlers
55
+ logger.addHandler(logging.NullHandler())
56
+ return
57
+
58
+ # Attach the console handler to the firecrawl logger
59
+ _configure_logger()
39
60
 
61
+ # Set the logging level based on the FIRECRAWL_LOGGING_LEVEL environment variable
40
62
  if env == "DEBUG":
41
63
  logger.setLevel(logging.DEBUG)
42
64
  elif env == "INFO":
firecrawl/firecrawl.py CHANGED
@@ -81,8 +81,10 @@ class FirecrawlApp:
81
81
  response = response.json()
82
82
  if response['success'] and 'data' in response:
83
83
  return response['data']
84
- else:
84
+ elif "error" in response:
85
85
  raise Exception(f'Failed to scrape URL. Error: {response["error"]}')
86
+ else:
87
+ raise Exception(f'Failed to scrape URL. Error: {response}')
86
88
  else:
87
89
  self._handle_error(response, 'scrape URL')
88
90
 
@@ -187,17 +189,38 @@ class FirecrawlApp:
187
189
  headers = self._prepare_headers()
188
190
  response = self._get_request(f'{self.api_url}{endpoint}', headers)
189
191
  if response.status_code == 200:
190
- data = response.json()
192
+ status_data = response.json()
193
+ if status_data['status'] == 'completed':
194
+ if 'data' in status_data:
195
+ data = status_data['data']
196
+ while 'next' in status_data:
197
+ next_url = status_data.get('next')
198
+ if not next_url:
199
+ logger.warning("Expected 'next' URL is missing.")
200
+ break
201
+ try:
202
+ status_response = self._get_request(next_url, headers)
203
+ if status_response.status_code != 200:
204
+ logger.error(f"Failed to fetch next page: {status_response.status_code}")
205
+ break
206
+ status_data = status_response.json()
207
+ data.extend(status_data.get('data', []))
208
+ except Exception as e:
209
+ logger.error(f"Error during pagination request: {e}")
210
+ break
211
+ status_data.pop('next', None)
212
+ status_data['data'] = data
213
+
191
214
  return {
192
215
  'success': True,
193
- 'status': data.get('status'),
194
- 'total': data.get('total'),
195
- 'completed': data.get('completed'),
196
- 'creditsUsed': data.get('creditsUsed'),
197
- 'expiresAt': data.get('expiresAt'),
198
- 'next': data.get('next'),
199
- 'data': data.get('data'),
200
- 'error': data.get('error')
216
+ 'status': status_data.get('status'),
217
+ 'total': status_data.get('total'),
218
+ 'completed': status_data.get('completed'),
219
+ 'creditsUsed': status_data.get('creditsUsed'),
220
+ 'expiresAt': status_data.get('expiresAt'),
221
+ 'data': status_data.get('data'),
222
+ 'error': status_data.get('error'),
223
+ 'next': status_data.get('next', None)
201
224
  }
202
225
  else:
203
226
  self._handle_error(response, 'check crawl status')
@@ -266,11 +289,151 @@ class FirecrawlApp:
266
289
  response = response.json()
267
290
  if response['success'] and 'links' in response:
268
291
  return response
269
- else:
292
+ elif 'error' in response:
270
293
  raise Exception(f'Failed to map URL. Error: {response["error"]}')
294
+ else:
295
+ raise Exception(f'Failed to map URL. Error: {response}')
271
296
  else:
272
297
  self._handle_error(response, 'map')
273
298
 
299
+ def batch_scrape_urls(self, urls: list[str],
300
+ params: Optional[Dict[str, Any]] = None,
301
+ poll_interval: Optional[int] = 2,
302
+ idempotency_key: Optional[str] = None) -> Any:
303
+ """
304
+ Initiate a batch scrape job for the specified URLs using the Firecrawl API.
305
+
306
+ Args:
307
+ urls (list[str]): The URLs to scrape.
308
+ params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
309
+ poll_interval (Optional[int]): Time in seconds between status checks when waiting for job completion. Defaults to 2 seconds.
310
+ idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
311
+
312
+ Returns:
313
+ Dict[str, Any]: A dictionary containing the scrape results. The structure includes:
314
+ - 'success' (bool): Indicates if the batch scrape was successful.
315
+ - 'status' (str): The final status of the batch scrape job (e.g., 'completed').
316
+ - 'completed' (int): Number of scraped pages that completed.
317
+ - 'total' (int): Total number of scraped pages.
318
+ - 'creditsUsed' (int): Estimated number of API credits used for this batch scrape.
319
+ - 'expiresAt' (str): ISO 8601 formatted date-time string indicating when the batch scrape data expires.
320
+ - 'data' (List[Dict]): List of all the scraped pages.
321
+
322
+ Raises:
323
+ Exception: If the batch scrape job initiation or monitoring fails.
324
+ """
325
+ endpoint = f'/v1/batch/scrape'
326
+ headers = self._prepare_headers(idempotency_key)
327
+ json_data = {'urls': urls}
328
+ if params:
329
+ json_data.update(params)
330
+ response = self._post_request(f'{self.api_url}{endpoint}', json_data, headers)
331
+ if response.status_code == 200:
332
+ id = response.json().get('id')
333
+ return self._monitor_job_status(id, headers, poll_interval)
334
+
335
+ else:
336
+ self._handle_error(response, 'start batch scrape job')
337
+
338
+
339
+ def async_batch_scrape_urls(self, urls: list[str], params: Optional[Dict[str, Any]] = None, idempotency_key: Optional[str] = None) -> Dict[str, Any]:
340
+ """
341
+ Initiate a crawl job asynchronously.
342
+
343
+ Args:
344
+ urls (list[str]): The URLs to scrape.
345
+ params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
346
+ idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
347
+
348
+ Returns:
349
+ Dict[str, Any]: A dictionary containing the batch scrape initiation response. The structure includes:
350
+ - 'success' (bool): Indicates if the batch scrape initiation was successful.
351
+ - 'id' (str): The unique identifier for the batch scrape job.
352
+ - 'url' (str): The URL to check the status of the batch scrape job.
353
+ """
354
+ endpoint = f'/v1/batch/scrape'
355
+ headers = self._prepare_headers(idempotency_key)
356
+ json_data = {'urls': urls}
357
+ if params:
358
+ json_data.update(params)
359
+ response = self._post_request(f'{self.api_url}{endpoint}', json_data, headers)
360
+ if response.status_code == 200:
361
+ return response.json()
362
+ else:
363
+ self._handle_error(response, 'start batch scrape job')
364
+
365
+ def batch_scrape_urls_and_watch(self, urls: list[str], params: Optional[Dict[str, Any]] = None, idempotency_key: Optional[str] = None) -> 'CrawlWatcher':
366
+ """
367
+ Initiate a batch scrape job and return a CrawlWatcher to monitor the job via WebSocket.
368
+
369
+ Args:
370
+ urls (list[str]): The URLs to scrape.
371
+ params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
372
+ idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
373
+
374
+ Returns:
375
+ CrawlWatcher: An instance of CrawlWatcher to monitor the batch scrape job.
376
+ """
377
+ crawl_response = self.async_batch_scrape_urls(urls, params, idempotency_key)
378
+ if crawl_response['success'] and 'id' in crawl_response:
379
+ return CrawlWatcher(crawl_response['id'], self)
380
+ else:
381
+ raise Exception("Batch scrape job failed to start")
382
+
383
+ def check_batch_scrape_status(self, id: str) -> Any:
384
+ """
385
+ Check the status of a batch scrape job using the Firecrawl API.
386
+
387
+ Args:
388
+ id (str): The ID of the batch scrape job.
389
+
390
+ Returns:
391
+ Any: The status of the batch scrape job.
392
+
393
+ Raises:
394
+ Exception: If the status check request fails.
395
+ """
396
+ endpoint = f'/v1/batch/scrape/{id}'
397
+
398
+ headers = self._prepare_headers()
399
+ response = self._get_request(f'{self.api_url}{endpoint}', headers)
400
+ if response.status_code == 200:
401
+ status_data = response.json()
402
+ if status_data['status'] == 'completed':
403
+ if 'data' in status_data:
404
+ data = status_data['data']
405
+ while 'next' in status_data:
406
+ next_url = status_data.get('next')
407
+ if not next_url:
408
+ logger.warning("Expected 'next' URL is missing.")
409
+ break
410
+ try:
411
+ status_response = self._get_request(next_url, headers)
412
+ if status_response.status_code != 200:
413
+ logger.error(f"Failed to fetch next page: {status_response.status_code}")
414
+ break
415
+ status_data = status_response.json()
416
+ data.extend(status_data.get('data', []))
417
+ except Exception as e:
418
+ logger.error(f"Error during pagination request: {e}")
419
+ break
420
+ status_data.pop('next', None)
421
+ status_data['data'] = data
422
+
423
+ return {
424
+ 'success': True,
425
+ 'status': status_data.get('status'),
426
+ 'total': status_data.get('total'),
427
+ 'completed': status_data.get('completed'),
428
+ 'creditsUsed': status_data.get('creditsUsed'),
429
+ 'expiresAt': status_data.get('expiresAt'),
430
+ 'data': status_data.get('data'),
431
+ 'error': status_data.get('error'),
432
+ 'next': status_data.get('next', None)
433
+ }
434
+ else:
435
+ self._handle_error(response, 'check batch scrape status')
436
+
274
437
  def _prepare_headers(self, idempotency_key: Optional[str] = None) -> Dict[str, str]:
275
438
  """
276
439
  Prepare the headers for API requests.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: firecrawl
3
- Version: 1.3.1
3
+ Version: 1.5.0
4
4
  Summary: Python SDK for Firecrawl API
5
5
  Home-page: https://github.com/mendableai/firecrawl
6
6
  Author: Mendable.ai
@@ -189,6 +189,69 @@ async def start_crawl_and_watch():
189
189
  await start_crawl_and_watch()
190
190
  ```
191
191
 
192
+ ### Scraping multiple URLs in batch
193
+
194
+ To batch scrape multiple URLs, use the `batch_scrape_urls` method. It takes the URLs and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper such as the output formats.
195
+
196
+ ```python
197
+ idempotency_key = str(uuid.uuid4()) # optional idempotency key
198
+ batch_scrape_result = app.batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']}, 2, idempotency_key)
199
+ print(batch_scrape_result)
200
+ ```
201
+
202
+ ### Asynchronous batch scrape
203
+
204
+ To run a batch scrape asynchronously, use the `async_batch_scrape_urls` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
205
+
206
+ ```python
207
+ batch_scrape_result = app.async_batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
208
+ print(batch_scrape_result)
209
+ ```
210
+
211
+ ### Checking batch scrape status
212
+
213
+ To check the status of an asynchronous batch scrape job, use the `check_batch_scrape_status` method. It takes the job ID as a parameter and returns the current status of the batch scrape job.
214
+
215
+ ```python
216
+ id = batch_scrape_result['id']
217
+ status = app.check_batch_scrape_status(id)
218
+ ```
219
+
220
+ ### Batch scrape with WebSockets
221
+
222
+ To use batch scrape with WebSockets, use the `batch_scrape_urls_and_watch` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
223
+
224
+ ```python
225
+ # inside an async function...
226
+ nest_asyncio.apply()
227
+
228
+ # Define event handlers
229
+ def on_document(detail):
230
+ print("DOC", detail)
231
+
232
+ def on_error(detail):
233
+ print("ERR", detail['error'])
234
+
235
+ def on_done(detail):
236
+ print("DONE", detail['status'])
237
+
238
+ # Function to start the crawl and watch process
239
+ async def start_crawl_and_watch():
240
+ # Initiate the crawl job and get the watcher
241
+ watcher = app.batch_scrape_urls_and_watch(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
242
+
243
+ # Add event listeners
244
+ watcher.add_event_listener("document", on_document)
245
+ watcher.add_event_listener("error", on_error)
246
+ watcher.add_event_listener("done", on_done)
247
+
248
+ # Start the watcher
249
+ await watcher.connect()
250
+
251
+ # Run the event loop
252
+ await start_crawl_and_watch()
253
+ ```
254
+
192
255
  ## Error Handling
193
256
 
194
257
  The SDK handles errors returned by the Firecrawl API and raises appropriate exceptions. If an error occurs during a request, an exception will be raised with a descriptive error message.
@@ -0,0 +1,11 @@
1
+ firecrawl/__init__.py,sha256=6UEPRP09ZHMJ99KG4N8qiTyWW7RNaGzY18bYkrXRAqw,2543
2
+ firecrawl/firecrawl.py,sha256=09QENx-ME8455WiRpKV53-2cFh7T4MX0fX5vRP7It0M,28045
3
+ firecrawl/__tests__/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ firecrawl/__tests__/e2e_withAuth/test.py,sha256=L-umFR3WyrJso1EwqkxjbTMr5AEI4t5zDfhQcCzitOI,7911
5
+ firecrawl/__tests__/v1/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ firecrawl/__tests__/v1/e2e_withAuth/test.py,sha256=KQMmGAtJAIafja6EGtJ-W9162w2Hm6PNjqKl3_RQXLA,16456
7
+ firecrawl-1.5.0.dist-info/LICENSE,sha256=nPCunEDwjRGHlmjvsiDUyIWbkqqyj3Ej84ntnh0g0zA,1084
8
+ firecrawl-1.5.0.dist-info/METADATA,sha256=Wwo7Do4R_42Fbkq0jhOMxhgCTC4iZxRJ1aILD2ijFzs,10596
9
+ firecrawl-1.5.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
10
+ firecrawl-1.5.0.dist-info/top_level.txt,sha256=jTvz79zWhiyAezfmmHe4FQ-hR60C59UU5FrjMjijLu8,10
11
+ firecrawl-1.5.0.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- firecrawl/__init__.py,sha256=ywBQO_FgxUjvRrCUWiuOTlASMKIQmpB1e7uRPBnaw-U,1682
2
- firecrawl/firecrawl.py,sha256=V7V3kmzCFCoqjomD_I9yPD0h8uq8GkUsi7-x6NGrw0A,20008
3
- firecrawl/__tests__/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- firecrawl/__tests__/e2e_withAuth/test.py,sha256=L-umFR3WyrJso1EwqkxjbTMr5AEI4t5zDfhQcCzitOI,7911
5
- firecrawl/__tests__/v1/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- firecrawl/__tests__/v1/e2e_withAuth/test.py,sha256=KQMmGAtJAIafja6EGtJ-W9162w2Hm6PNjqKl3_RQXLA,16456
7
- firecrawl-1.3.1.dist-info/LICENSE,sha256=nPCunEDwjRGHlmjvsiDUyIWbkqqyj3Ej84ntnh0g0zA,1084
8
- firecrawl-1.3.1.dist-info/METADATA,sha256=eHpv5fJmiPN--bfaSHcPsmIQKNrUcHf0F8HcsvAWtfE,8249
9
- firecrawl-1.3.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
10
- firecrawl-1.3.1.dist-info/top_level.txt,sha256=jTvz79zWhiyAezfmmHe4FQ-hR60C59UU5FrjMjijLu8,10
11
- firecrawl-1.3.1.dist-info/RECORD,,