firecrawl 1.3.0__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of firecrawl might be problematic. Click here for more details.

firecrawl/firecrawl.py CHANGED
@@ -81,8 +81,10 @@ class FirecrawlApp:
81
81
  response = response.json()
82
82
  if response['success'] and 'data' in response:
83
83
  return response['data']
84
- else:
84
+ elif "error" in response:
85
85
  raise Exception(f'Failed to scrape URL. Error: {response["error"]}')
86
+ else:
87
+ raise Exception(f'Failed to scrape URL. Error: {response}')
86
88
  else:
87
89
  self._handle_error(response, 'scrape URL')
88
90
 
@@ -117,7 +119,14 @@ class FirecrawlApp:
117
119
  idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
118
120
 
119
121
  Returns:
120
- Any: The crawl job ID or the crawl results if waiting until completion.
122
+ Dict[str, Any]: A dictionary containing the crawl results. The structure includes:
123
+ - 'success' (bool): Indicates if the crawl was successful.
124
+ - 'status' (str): The final status of the crawl job (e.g., 'completed').
125
+ - 'completed' (int): Number of scraped pages that completed.
126
+ - 'total' (int): Total number of scraped pages.
127
+ - 'creditsUsed' (int): Estimated number of API credits used for this crawl.
128
+ - 'expiresAt' (str): ISO 8601 formatted date-time string indicating when the crawl data expires.
129
+ - 'data' (List[Dict]): List of all the scraped pages.
121
130
 
122
131
  Raises:
123
132
  Exception: If the crawl job initiation or monitoring fails.
@@ -146,7 +155,10 @@ class FirecrawlApp:
146
155
  idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
147
156
 
148
157
  Returns:
149
- Dict[str, Any]: The response from the crawl initiation request.
158
+ Dict[str, Any]: A dictionary containing the crawl initiation response. The structure includes:
159
+ - 'success' (bool): Indicates if the crawl initiation was successful.
160
+ - 'id' (str): The unique identifier for the crawl job.
161
+ - 'url' (str): The URL to check the status of the crawl job.
150
162
  """
151
163
  endpoint = f'/v1/crawl'
152
164
  headers = self._prepare_headers(idempotency_key)
@@ -236,7 +248,7 @@ class FirecrawlApp:
236
248
  params (Optional[Dict[str, Any]]): Additional parameters for the map search.
237
249
 
238
250
  Returns:
239
- Any: The result of the map search, typically a dictionary containing mapping data.
251
+ List[str]: A list of URLs discovered during the map search.
240
252
  """
241
253
  endpoint = f'/v1/map'
242
254
  headers = self._prepare_headers()
@@ -256,11 +268,130 @@ class FirecrawlApp:
256
268
  response = response.json()
257
269
  if response['success'] and 'links' in response:
258
270
  return response
259
- else:
271
+ elif 'error' in response:
260
272
  raise Exception(f'Failed to map URL. Error: {response["error"]}')
273
+ else:
274
+ raise Exception(f'Failed to map URL. Error: {response}')
261
275
  else:
262
276
  self._handle_error(response, 'map')
263
277
 
278
+ def batch_scrape_urls(self, urls: list[str],
279
+ params: Optional[Dict[str, Any]] = None,
280
+ poll_interval: Optional[int] = 2,
281
+ idempotency_key: Optional[str] = None) -> Any:
282
+ """
283
+ Initiate a batch scrape job for the specified URLs using the Firecrawl API.
284
+
285
+ Args:
286
+ urls (list[str]): The URLs to scrape.
287
+ params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
288
+ poll_interval (Optional[int]): Time in seconds between status checks when waiting for job completion. Defaults to 2 seconds.
289
+ idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
290
+
291
+ Returns:
292
+ Dict[str, Any]: A dictionary containing the scrape results. The structure includes:
293
+ - 'success' (bool): Indicates if the batch scrape was successful.
294
+ - 'status' (str): The final status of the batch scrape job (e.g., 'completed').
295
+ - 'completed' (int): Number of scraped pages that completed.
296
+ - 'total' (int): Total number of scraped pages.
297
+ - 'creditsUsed' (int): Estimated number of API credits used for this batch scrape.
298
+ - 'expiresAt' (str): ISO 8601 formatted date-time string indicating when the batch scrape data expires.
299
+ - 'data' (List[Dict]): List of all the scraped pages.
300
+
301
+ Raises:
302
+ Exception: If the batch scrape job initiation or monitoring fails.
303
+ """
304
+ endpoint = f'/v1/batch/scrape'
305
+ headers = self._prepare_headers(idempotency_key)
306
+ json_data = {'urls': urls}
307
+ if params:
308
+ json_data.update(params)
309
+ response = self._post_request(f'{self.api_url}{endpoint}', json_data, headers)
310
+ if response.status_code == 200:
311
+ id = response.json().get('id')
312
+ return self._monitor_job_status(id, headers, poll_interval)
313
+
314
+ else:
315
+ self._handle_error(response, 'start batch scrape job')
316
+
317
+
318
+ def async_batch_scrape_urls(self, urls: list[str], params: Optional[Dict[str, Any]] = None, idempotency_key: Optional[str] = None) -> Dict[str, Any]:
319
+ """
320
+ Initiate a crawl job asynchronously.
321
+
322
+ Args:
323
+ urls (list[str]): The URLs to scrape.
324
+ params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
325
+ idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
326
+
327
+ Returns:
328
+ Dict[str, Any]: A dictionary containing the batch scrape initiation response. The structure includes:
329
+ - 'success' (bool): Indicates if the batch scrape initiation was successful.
330
+ - 'id' (str): The unique identifier for the batch scrape job.
331
+ - 'url' (str): The URL to check the status of the batch scrape job.
332
+ """
333
+ endpoint = f'/v1/batch/scrape'
334
+ headers = self._prepare_headers(idempotency_key)
335
+ json_data = {'urls': urls}
336
+ if params:
337
+ json_data.update(params)
338
+ response = self._post_request(f'{self.api_url}{endpoint}', json_data, headers)
339
+ if response.status_code == 200:
340
+ return response.json()
341
+ else:
342
+ self._handle_error(response, 'start batch scrape job')
343
+
344
+ def batch_scrape_urls_and_watch(self, urls: list[str], params: Optional[Dict[str, Any]] = None, idempotency_key: Optional[str] = None) -> 'CrawlWatcher':
345
+ """
346
+ Initiate a batch scrape job and return a CrawlWatcher to monitor the job via WebSocket.
347
+
348
+ Args:
349
+ urls (list[str]): The URLs to scrape.
350
+ params (Optional[Dict[str, Any]]): Additional parameters for the scraper.
351
+ idempotency_key (Optional[str]): A unique uuid key to ensure idempotency of requests.
352
+
353
+ Returns:
354
+ CrawlWatcher: An instance of CrawlWatcher to monitor the batch scrape job.
355
+ """
356
+ crawl_response = self.async_batch_scrape_urls(urls, params, idempotency_key)
357
+ if crawl_response['success'] and 'id' in crawl_response:
358
+ return CrawlWatcher(crawl_response['id'], self)
359
+ else:
360
+ raise Exception("Batch scrape job failed to start")
361
+
362
+ def check_batch_scrape_status(self, id: str) -> Any:
363
+ """
364
+ Check the status of a batch scrape job using the Firecrawl API.
365
+
366
+ Args:
367
+ id (str): The ID of the batch scrape job.
368
+
369
+ Returns:
370
+ Any: The status of the batch scrape job.
371
+
372
+ Raises:
373
+ Exception: If the status check request fails.
374
+ """
375
+ endpoint = f'/v1/batch/scrape/{id}'
376
+
377
+ headers = self._prepare_headers()
378
+ response = self._get_request(f'{self.api_url}{endpoint}', headers)
379
+ if response.status_code == 200:
380
+ data = response.json()
381
+ return {
382
+ 'success': True,
383
+ 'status': data.get('status'),
384
+ 'total': data.get('total'),
385
+ 'completed': data.get('completed'),
386
+ 'creditsUsed': data.get('creditsUsed'),
387
+ 'expiresAt': data.get('expiresAt'),
388
+ 'next': data.get('next'),
389
+ 'data': data.get('data'),
390
+ 'error': data.get('error')
391
+ }
392
+ else:
393
+ self._handle_error(response, 'check batch scrape status')
394
+
264
395
  def _prepare_headers(self, idempotency_key: Optional[str] = None) -> Dict[str, str]:
265
396
  """
266
397
  Prepare the headers for API requests.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: firecrawl
3
- Version: 1.3.0
3
+ Version: 1.4.0
4
4
  Summary: Python SDK for Firecrawl API
5
5
  Home-page: https://github.com/mendableai/firecrawl
6
6
  Author: Mendable.ai
@@ -76,7 +76,6 @@ crawl_status = app.crawl_url(
76
76
  'limit': 100,
77
77
  'scrapeOptions': {'formats': ['markdown', 'html']}
78
78
  },
79
- wait_until_done=True,
80
79
  poll_interval=30
81
80
  )
82
81
  print(crawl_status)
@@ -190,6 +189,69 @@ async def start_crawl_and_watch():
190
189
  await start_crawl_and_watch()
191
190
  ```
192
191
 
192
+ ### Scraping multiple URLs in batch
193
+
194
+ To batch scrape multiple URLs, use the `batch_scrape_urls` method. It takes the URLs and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper such as the output formats.
195
+
196
+ ```python
197
+ idempotency_key = str(uuid.uuid4()) # optional idempotency key
198
+ batch_scrape_result = app.batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']}, 2, idempotency_key)
199
+ print(batch_scrape_result)
200
+ ```
201
+
202
+ ### Asynchronous batch scrape
203
+
204
+ To run a batch scrape asynchronously, use the `async_batch_scrape_urls` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
205
+
206
+ ```python
207
+ batch_scrape_result = app.async_batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
208
+ print(batch_scrape_result)
209
+ ```
210
+
211
+ ### Checking batch scrape status
212
+
213
+ To check the status of an asynchronous batch scrape job, use the `check_batch_scrape_job` method. It takes the job ID as a parameter and returns the current status of the batch scrape job.
214
+
215
+ ```python
216
+ id = batch_scrape_result['id']
217
+ status = app.check_batch_scrape_job(id)
218
+ ```
219
+
220
+ ### Batch scrape with WebSockets
221
+
222
+ To use batch scrape with WebSockets, use the `batch_scrape_urls_and_watch` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
223
+
224
+ ```python
225
+ # inside an async function...
226
+ nest_asyncio.apply()
227
+
228
+ # Define event handlers
229
+ def on_document(detail):
230
+ print("DOC", detail)
231
+
232
+ def on_error(detail):
233
+ print("ERR", detail['error'])
234
+
235
+ def on_done(detail):
236
+ print("DONE", detail['status'])
237
+
238
+ # Function to start the crawl and watch process
239
+ async def start_crawl_and_watch():
240
+ # Initiate the crawl job and get the watcher
241
+ watcher = app.batch_scrape_urls_and_watch(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
242
+
243
+ # Add event listeners
244
+ watcher.add_event_listener("document", on_document)
245
+ watcher.add_event_listener("error", on_error)
246
+ watcher.add_event_listener("done", on_done)
247
+
248
+ # Start the watcher
249
+ await watcher.connect()
250
+
251
+ # Run the event loop
252
+ await start_crawl_and_watch()
253
+ ```
254
+
193
255
  ## Error Handling
194
256
 
195
257
  The SDK handles errors returned by the Firecrawl API and raises appropriate exceptions. If an error occurs during a request, an exception will be raised with a descriptive error message.
@@ -0,0 +1,17 @@
1
+ build/lib/firecrawl/__init__.py,sha256=i3QdFjbzC5dv_8HkJAw-ni6TuLF8kEIBIjhClhKTzmI,1682
2
+ build/lib/firecrawl/firecrawl.py,sha256=buMYnSQnQyoivaqd_e3lz8rBRP9jxvKC4KfwAOmbzXM,25637
3
+ build/lib/firecrawl/__tests__/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ build/lib/firecrawl/__tests__/e2e_withAuth/test.py,sha256=L-umFR3WyrJso1EwqkxjbTMr5AEI4t5zDfhQcCzitOI,7911
5
+ build/lib/firecrawl/__tests__/v1/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ build/lib/firecrawl/__tests__/v1/e2e_withAuth/test.py,sha256=KQMmGAtJAIafja6EGtJ-W9162w2Hm6PNjqKl3_RQXLA,16456
7
+ firecrawl/__init__.py,sha256=i3QdFjbzC5dv_8HkJAw-ni6TuLF8kEIBIjhClhKTzmI,1682
8
+ firecrawl/firecrawl.py,sha256=buMYnSQnQyoivaqd_e3lz8rBRP9jxvKC4KfwAOmbzXM,25637
9
+ firecrawl/__tests__/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ firecrawl/__tests__/e2e_withAuth/test.py,sha256=L-umFR3WyrJso1EwqkxjbTMr5AEI4t5zDfhQcCzitOI,7911
11
+ firecrawl/__tests__/v1/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ firecrawl/__tests__/v1/e2e_withAuth/test.py,sha256=KQMmGAtJAIafja6EGtJ-W9162w2Hm6PNjqKl3_RQXLA,16456
13
+ firecrawl-1.4.0.dist-info/LICENSE,sha256=nPCunEDwjRGHlmjvsiDUyIWbkqqyj3Ej84ntnh0g0zA,1084
14
+ firecrawl-1.4.0.dist-info/METADATA,sha256=s6tDD-xsN1O9_qizzbldK1YGHSIQhD4bOjVXZUBN7jw,10590
15
+ firecrawl-1.4.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
16
+ firecrawl-1.4.0.dist-info/top_level.txt,sha256=GglqAyOiUmWSqRivafO_tbV_rz2OdmRzMG9BRS3A6Zo,21
17
+ firecrawl-1.4.0.dist-info/RECORD,,
@@ -0,0 +1,3 @@
1
+ build
2
+ dist
3
+ firecrawl
@@ -1,11 +0,0 @@
1
- firecrawl/__init__.py,sha256=PLBoKqU5OAidzN7dJSD96fjhO6dzeJr0M13p2A9-6co,1682
2
- firecrawl/firecrawl.py,sha256=aCJ-UMOnt35AiOVAe9kobUFjaS-K81mMKe49TZAKB-4,19175
3
- firecrawl/__tests__/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- firecrawl/__tests__/e2e_withAuth/test.py,sha256=L-umFR3WyrJso1EwqkxjbTMr5AEI4t5zDfhQcCzitOI,7911
5
- firecrawl/__tests__/v1/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- firecrawl/__tests__/v1/e2e_withAuth/test.py,sha256=KQMmGAtJAIafja6EGtJ-W9162w2Hm6PNjqKl3_RQXLA,16456
7
- firecrawl-1.3.0.dist-info/LICENSE,sha256=nPCunEDwjRGHlmjvsiDUyIWbkqqyj3Ej84ntnh0g0zA,1084
8
- firecrawl-1.3.0.dist-info/METADATA,sha256=EGLvZGl6JyT3iBgx9YEBRr1NCiaiL859YYGlxc6JppU,8274
9
- firecrawl-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
10
- firecrawl-1.3.0.dist-info/top_level.txt,sha256=jTvz79zWhiyAezfmmHe4FQ-hR60C59UU5FrjMjijLu8,10
11
- firecrawl-1.3.0.dist-info/RECORD,,
@@ -1 +0,0 @@
1
- firecrawl