firecrawl 1.4.0__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of firecrawl might be problematic. Click here for more details.

firecrawl/__init__.py CHANGED
@@ -11,32 +11,54 @@ For more information visit https://github.com/firecrawl/
11
11
  import logging
12
12
  import os
13
13
 
14
- from .firecrawl import FirecrawlApp
14
+ from .firecrawl import FirecrawlApp # noqa
15
15
 
16
- __version__ = "1.4.0"
16
+ __version__ = "1.5.0"
17
17
 
18
18
  # Define the logger for the Firecrawl project
19
19
  logger: logging.Logger = logging.getLogger("firecrawl")
20
20
 
21
21
 
22
- def _basic_config() -> None:
23
- """Set up basic configuration for logging with a specific format and date format."""
22
+ def _configure_logger() -> None:
23
+ """
24
+ Configure the firecrawl logger for console output.
25
+
26
+ The function attaches a handler for console output with a specific format and date
27
+ format to the firecrawl logger.
28
+ """
24
29
  try:
25
- logging.basicConfig(
26
- format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
30
+ # Create the formatter
31
+ formatter = logging.Formatter(
32
+ "[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
27
33
  datefmt="%Y-%m-%d %H:%M:%S",
28
34
  )
35
+
36
+ # Create the console handler and set the formatter
37
+ console_handler = logging.StreamHandler()
38
+ console_handler.setFormatter(formatter)
39
+
40
+ # Add the console handler to the firecrawl logger
41
+ logger.addHandler(console_handler)
29
42
  except Exception as e:
30
43
  logger.error("Failed to configure logging: %s", e)
31
44
 
32
45
 
33
46
  def setup_logging() -> None:
34
47
  """Set up logging based on the FIRECRAWL_LOGGING_LEVEL environment variable."""
35
- env = os.environ.get(
36
- "FIRECRAWL_LOGGING_LEVEL", "INFO"
37
- ).upper() # Default to 'INFO' level
38
- _basic_config()
48
+ # Check if the firecrawl logger already has a handler
49
+ if logger.hasHandlers():
50
+ return # To prevent duplicate logging
51
+
52
+ # Check if the FIRECRAWL_LOGGING_LEVEL environment variable is set
53
+ if not (env := os.getenv("FIRECRAWL_LOGGING_LEVEL", "").upper()):
54
+ # Attach a no-op handler to prevent warnings about no handlers
55
+ logger.addHandler(logging.NullHandler())
56
+ return
57
+
58
+ # Attach the console handler to the firecrawl logger
59
+ _configure_logger()
39
60
 
61
+ # Set the logging level based on the FIRECRAWL_LOGGING_LEVEL environment variable
40
62
  if env == "DEBUG":
41
63
  logger.setLevel(logging.DEBUG)
42
64
  elif env == "INFO":
firecrawl/firecrawl.py CHANGED
@@ -189,17 +189,38 @@ class FirecrawlApp:
189
189
  headers = self._prepare_headers()
190
190
  response = self._get_request(f'{self.api_url}{endpoint}', headers)
191
191
  if response.status_code == 200:
192
- data = response.json()
192
+ status_data = response.json()
193
+ if status_data['status'] == 'completed':
194
+ if 'data' in status_data:
195
+ data = status_data['data']
196
+ while 'next' in status_data:
197
+ next_url = status_data.get('next')
198
+ if not next_url:
199
+ logger.warning("Expected 'next' URL is missing.")
200
+ break
201
+ try:
202
+ status_response = self._get_request(next_url, headers)
203
+ if status_response.status_code != 200:
204
+ logger.error(f"Failed to fetch next page: {status_response.status_code}")
205
+ break
206
+ status_data = status_response.json()
207
+ data.extend(status_data.get('data', []))
208
+ except Exception as e:
209
+ logger.error(f"Error during pagination request: {e}")
210
+ break
211
+ status_data.pop('next', None)
212
+ status_data['data'] = data
213
+
193
214
  return {
194
215
  'success': True,
195
- 'status': data.get('status'),
196
- 'total': data.get('total'),
197
- 'completed': data.get('completed'),
198
- 'creditsUsed': data.get('creditsUsed'),
199
- 'expiresAt': data.get('expiresAt'),
200
- 'next': data.get('next'),
201
- 'data': data.get('data'),
202
- 'error': data.get('error')
216
+ 'status': status_data.get('status'),
217
+ 'total': status_data.get('total'),
218
+ 'completed': status_data.get('completed'),
219
+ 'creditsUsed': status_data.get('creditsUsed'),
220
+ 'expiresAt': status_data.get('expiresAt'),
221
+ 'data': status_data.get('data'),
222
+ 'error': status_data.get('error'),
223
+ 'next': status_data.get('next', None)
203
224
  }
204
225
  else:
205
226
  self._handle_error(response, 'check crawl status')
@@ -377,17 +398,38 @@ class FirecrawlApp:
377
398
  headers = self._prepare_headers()
378
399
  response = self._get_request(f'{self.api_url}{endpoint}', headers)
379
400
  if response.status_code == 200:
380
- data = response.json()
401
+ status_data = response.json()
402
+ if status_data['status'] == 'completed':
403
+ if 'data' in status_data:
404
+ data = status_data['data']
405
+ while 'next' in status_data:
406
+ next_url = status_data.get('next')
407
+ if not next_url:
408
+ logger.warning("Expected 'next' URL is missing.")
409
+ break
410
+ try:
411
+ status_response = self._get_request(next_url, headers)
412
+ if status_response.status_code != 200:
413
+ logger.error(f"Failed to fetch next page: {status_response.status_code}")
414
+ break
415
+ status_data = status_response.json()
416
+ data.extend(status_data.get('data', []))
417
+ except Exception as e:
418
+ logger.error(f"Error during pagination request: {e}")
419
+ break
420
+ status_data.pop('next', None)
421
+ status_data['data'] = data
422
+
381
423
  return {
382
424
  'success': True,
383
- 'status': data.get('status'),
384
- 'total': data.get('total'),
385
- 'completed': data.get('completed'),
386
- 'creditsUsed': data.get('creditsUsed'),
387
- 'expiresAt': data.get('expiresAt'),
388
- 'next': data.get('next'),
389
- 'data': data.get('data'),
390
- 'error': data.get('error')
425
+ 'status': status_data.get('status'),
426
+ 'total': status_data.get('total'),
427
+ 'completed': status_data.get('completed'),
428
+ 'creditsUsed': status_data.get('creditsUsed'),
429
+ 'expiresAt': status_data.get('expiresAt'),
430
+ 'data': status_data.get('data'),
431
+ 'error': status_data.get('error'),
432
+ 'next': status_data.get('next', None)
391
433
  }
392
434
  else:
393
435
  self._handle_error(response, 'check batch scrape status')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: firecrawl
3
- Version: 1.4.0
3
+ Version: 1.5.0
4
4
  Summary: Python SDK for Firecrawl API
5
5
  Home-page: https://github.com/mendableai/firecrawl
6
6
  Author: Mendable.ai
@@ -210,11 +210,11 @@ print(batch_scrape_result)
210
210
 
211
211
  ### Checking batch scrape status
212
212
 
213
- To check the status of an asynchronous batch scrape job, use the `check_batch_scrape_job` method. It takes the job ID as a parameter and returns the current status of the batch scrape job.
213
+ To check the status of an asynchronous batch scrape job, use the `check_batch_scrape_status` method. It takes the job ID as a parameter and returns the current status of the batch scrape job.
214
214
 
215
215
  ```python
216
216
  id = batch_scrape_result['id']
217
- status = app.check_batch_scrape_job(id)
217
+ status = app.check_batch_scrape_status(id)
218
218
  ```
219
219
 
220
220
  ### Batch scrape with WebSockets
@@ -0,0 +1,11 @@
1
+ firecrawl/__init__.py,sha256=6UEPRP09ZHMJ99KG4N8qiTyWW7RNaGzY18bYkrXRAqw,2543
2
+ firecrawl/firecrawl.py,sha256=09QENx-ME8455WiRpKV53-2cFh7T4MX0fX5vRP7It0M,28045
3
+ firecrawl/__tests__/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ firecrawl/__tests__/e2e_withAuth/test.py,sha256=L-umFR3WyrJso1EwqkxjbTMr5AEI4t5zDfhQcCzitOI,7911
5
+ firecrawl/__tests__/v1/e2e_withAuth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ firecrawl/__tests__/v1/e2e_withAuth/test.py,sha256=KQMmGAtJAIafja6EGtJ-W9162w2Hm6PNjqKl3_RQXLA,16456
7
+ firecrawl-1.5.0.dist-info/LICENSE,sha256=nPCunEDwjRGHlmjvsiDUyIWbkqqyj3Ej84ntnh0g0zA,1084
8
+ firecrawl-1.5.0.dist-info/METADATA,sha256=Wwo7Do4R_42Fbkq0jhOMxhgCTC4iZxRJ1aILD2ijFzs,10596
9
+ firecrawl-1.5.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
10
+ firecrawl-1.5.0.dist-info/top_level.txt,sha256=jTvz79zWhiyAezfmmHe4FQ-hR60C59UU5FrjMjijLu8,10
11
+ firecrawl-1.5.0.dist-info/RECORD,,
@@ -0,0 +1 @@
1
+ firecrawl
@@ -1,57 +0,0 @@
1
- """
2
- This is the Firecrawl package.
3
-
4
- This package provides a Python SDK for interacting with the Firecrawl API.
5
- It includes methods to scrape URLs, perform searches, initiate and monitor crawl jobs,
6
- and check the status of these jobs.
7
-
8
- For more information visit https://github.com/firecrawl/
9
- """
10
-
11
- import logging
12
- import os
13
-
14
- from .firecrawl import FirecrawlApp
15
-
16
- __version__ = "1.4.0"
17
-
18
- # Define the logger for the Firecrawl project
19
- logger: logging.Logger = logging.getLogger("firecrawl")
20
-
21
-
22
- def _basic_config() -> None:
23
- """Set up basic configuration for logging with a specific format and date format."""
24
- try:
25
- logging.basicConfig(
26
- format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
27
- datefmt="%Y-%m-%d %H:%M:%S",
28
- )
29
- except Exception as e:
30
- logger.error("Failed to configure logging: %s", e)
31
-
32
-
33
- def setup_logging() -> None:
34
- """Set up logging based on the FIRECRAWL_LOGGING_LEVEL environment variable."""
35
- env = os.environ.get(
36
- "FIRECRAWL_LOGGING_LEVEL", "INFO"
37
- ).upper() # Default to 'INFO' level
38
- _basic_config()
39
-
40
- if env == "DEBUG":
41
- logger.setLevel(logging.DEBUG)
42
- elif env == "INFO":
43
- logger.setLevel(logging.INFO)
44
- elif env == "WARNING":
45
- logger.setLevel(logging.WARNING)
46
- elif env == "ERROR":
47
- logger.setLevel(logging.ERROR)
48
- elif env == "CRITICAL":
49
- logger.setLevel(logging.CRITICAL)
50
- else:
51
- logger.setLevel(logging.INFO)
52
- logger.warning("Unknown logging level: %s, defaulting to INFO", env)
53
-
54
-
55
- # Initialize logging configuration when the module is imported
56
- setup_logging()
57
- logger.debug("Debugging logger setup")
File without changes
@@ -1,170 +0,0 @@
1
- import importlib.util
2
- import pytest
3
- import time
4
- import os
5
- from uuid import uuid4
6
- from dotenv import load_dotenv
7
-
8
- load_dotenv()
9
-
10
- API_URL = "http://127.0.0.1:3002"
11
- ABSOLUTE_FIRECRAWL_PATH = "firecrawl/firecrawl.py"
12
- TEST_API_KEY = os.getenv('TEST_API_KEY')
13
-
14
- print(f"ABSOLUTE_FIRECRAWL_PATH: {ABSOLUTE_FIRECRAWL_PATH}")
15
-
16
- spec = importlib.util.spec_from_file_location("FirecrawlApp", ABSOLUTE_FIRECRAWL_PATH)
17
- firecrawl = importlib.util.module_from_spec(spec)
18
- spec.loader.exec_module(firecrawl)
19
- FirecrawlApp = firecrawl.FirecrawlApp
20
-
21
- def test_no_api_key():
22
- with pytest.raises(Exception) as excinfo:
23
- invalid_app = FirecrawlApp(api_url=API_URL, version='v0')
24
- assert "No API key provided" in str(excinfo.value)
25
-
26
- def test_scrape_url_invalid_api_key():
27
- invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key", version='v0')
28
- with pytest.raises(Exception) as excinfo:
29
- invalid_app.scrape_url('https://firecrawl.dev')
30
- assert "Unexpected error during scrape URL: Status code 401. Unauthorized: Invalid token" in str(excinfo.value)
31
-
32
- def test_blocklisted_url():
33
- blocklisted_url = "https://facebook.com/fake-test"
34
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
35
- with pytest.raises(Exception) as excinfo:
36
- app.scrape_url(blocklisted_url)
37
- assert "Unexpected error during scrape URL: Status code 403. Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it." in str(excinfo.value)
38
-
39
- def test_successful_response_with_valid_preview_token():
40
- app = FirecrawlApp(api_url=API_URL, api_key="this_is_just_a_preview_token", version='v0')
41
- response = app.scrape_url('https://roastmywebsite.ai')
42
- assert response is not None
43
- assert 'content' in response
44
- assert "_Roast_" in response['content']
45
-
46
- def test_scrape_url_e2e():
47
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
48
- response = app.scrape_url('https://roastmywebsite.ai')
49
- print(response)
50
-
51
- assert response is not None
52
- assert 'content' in response
53
- assert 'markdown' in response
54
- assert 'metadata' in response
55
- assert 'html' not in response
56
- assert "_Roast_" in response['content']
57
-
58
- def test_successful_response_with_valid_api_key_and_include_html():
59
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
60
- response = app.scrape_url('https://roastmywebsite.ai', {'pageOptions': {'includeHtml': True}})
61
- assert response is not None
62
- assert 'content' in response
63
- assert 'markdown' in response
64
- assert 'html' in response
65
- assert 'metadata' in response
66
- assert "_Roast_" in response['content']
67
- assert "_Roast_" in response['markdown']
68
- assert "<h1" in response['html']
69
-
70
- def test_successful_response_for_valid_scrape_with_pdf_file():
71
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
72
- response = app.scrape_url('https://arxiv.org/pdf/astro-ph/9301001.pdf')
73
- assert response is not None
74
- assert 'content' in response
75
- assert 'metadata' in response
76
- assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['content']
77
-
78
- def test_successful_response_for_valid_scrape_with_pdf_file_without_explicit_extension():
79
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
80
- response = app.scrape_url('https://arxiv.org/pdf/astro-ph/9301001')
81
- time.sleep(6) # wait for 6 seconds
82
- assert response is not None
83
- assert 'content' in response
84
- assert 'metadata' in response
85
- assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['content']
86
-
87
- def test_crawl_url_invalid_api_key():
88
- invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key", version='v0')
89
- with pytest.raises(Exception) as excinfo:
90
- invalid_app.crawl_url('https://firecrawl.dev')
91
- assert "Unexpected error during start crawl job: Status code 401. Unauthorized: Invalid token" in str(excinfo.value)
92
-
93
- def test_should_return_error_for_blocklisted_url():
94
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
95
- blocklisted_url = "https://twitter.com/fake-test"
96
- with pytest.raises(Exception) as excinfo:
97
- app.crawl_url(blocklisted_url)
98
- assert "Unexpected error during start crawl job: Status code 403. Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it." in str(excinfo.value)
99
-
100
- def test_crawl_url_wait_for_completion_e2e():
101
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
102
- response = app.crawl_url('https://roastmywebsite.ai', {'crawlerOptions': {'excludes': ['blog/*']}}, True)
103
- assert response is not None
104
- assert len(response) > 0
105
- assert 'content' in response[0]
106
- assert "_Roast_" in response[0]['content']
107
-
108
- def test_crawl_url_with_idempotency_key_e2e():
109
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
110
- uniqueIdempotencyKey = str(uuid4())
111
- response = app.crawl_url('https://roastmywebsite.ai', {'crawlerOptions': {'excludes': ['blog/*']}}, True, 2, uniqueIdempotencyKey)
112
- assert response is not None
113
- assert len(response) > 0
114
- assert 'content' in response[0]
115
- assert "_Roast_" in response[0]['content']
116
-
117
- with pytest.raises(Exception) as excinfo:
118
- app.crawl_url('https://firecrawl.dev', {'crawlerOptions': {'excludes': ['blog/*']}}, True, 2, uniqueIdempotencyKey)
119
- assert "Conflict: Failed to start crawl job due to a conflict. Idempotency key already used" in str(excinfo.value)
120
-
121
- def test_check_crawl_status_e2e():
122
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
123
- response = app.crawl_url('https://firecrawl.dev', {'crawlerOptions': {'excludes': ['blog/*']}}, False)
124
- assert response is not None
125
- assert 'jobId' in response
126
-
127
- time.sleep(30) # wait for 30 seconds
128
- status_response = app.check_crawl_status(response['jobId'])
129
- assert status_response is not None
130
- assert 'status' in status_response
131
- assert status_response['status'] == 'completed'
132
- assert 'data' in status_response
133
- assert len(status_response['data']) > 0
134
-
135
- def test_search_e2e():
136
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
137
- response = app.search("test query")
138
- assert response is not None
139
- assert 'content' in response[0]
140
- assert len(response) > 2
141
-
142
- def test_search_invalid_api_key():
143
- invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key", version='v0')
144
- with pytest.raises(Exception) as excinfo:
145
- invalid_app.search("test query")
146
- assert "Unexpected error during search: Status code 401. Unauthorized: Invalid token" in str(excinfo.value)
147
-
148
- def test_llm_extraction():
149
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY, version='v0')
150
- response = app.scrape_url("https://firecrawl.dev", {
151
- 'extractorOptions': {
152
- 'mode': 'llm-extraction',
153
- 'extractionPrompt': "Based on the information on the page, find what the company's mission is and whether it supports SSO, and whether it is open source",
154
- 'extractionSchema': {
155
- 'type': 'object',
156
- 'properties': {
157
- 'company_mission': {'type': 'string'},
158
- 'supports_sso': {'type': 'boolean'},
159
- 'is_open_source': {'type': 'boolean'}
160
- },
161
- 'required': ['company_mission', 'supports_sso', 'is_open_source']
162
- }
163
- }
164
- })
165
- assert response is not None
166
- assert 'llm_extraction' in response
167
- llm_extraction = response['llm_extraction']
168
- assert 'company_mission' in llm_extraction
169
- assert isinstance(llm_extraction['supports_sso'], bool)
170
- assert isinstance(llm_extraction['is_open_source'], bool)
File without changes