firecrawl 1.4.0__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of firecrawl might be problematic. Click here for more details.

@@ -1,352 +0,0 @@
1
- import importlib.util
2
- import pytest
3
- import time
4
- import os
5
- from uuid import uuid4
6
- from dotenv import load_dotenv
7
- from datetime import datetime
8
-
9
- load_dotenv()
10
-
11
- API_URL = "http://127.0.0.1:3002";
12
- ABSOLUTE_FIRECRAWL_PATH = "firecrawl/firecrawl.py"
13
- TEST_API_KEY = os.getenv('TEST_API_KEY')
14
-
15
- print(f"ABSOLUTE_FIRECRAWL_PATH: {ABSOLUTE_FIRECRAWL_PATH}")
16
-
17
- spec = importlib.util.spec_from_file_location("FirecrawlApp", ABSOLUTE_FIRECRAWL_PATH)
18
- firecrawl = importlib.util.module_from_spec(spec)
19
- spec.loader.exec_module(firecrawl)
20
- FirecrawlApp = firecrawl.FirecrawlApp
21
-
22
- def test_no_api_key():
23
- with pytest.raises(Exception) as excinfo:
24
- invalid_app = FirecrawlApp(api_url=API_URL)
25
- assert "No API key provided" in str(excinfo.value)
26
-
27
- def test_scrape_url_invalid_api_key():
28
- invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key")
29
- with pytest.raises(Exception) as excinfo:
30
- invalid_app.scrape_url('https://firecrawl.dev')
31
- assert "Unauthorized: Invalid token" in str(excinfo.value)
32
-
33
- def test_blocklisted_url():
34
- blocklisted_url = "https://facebook.com/fake-test"
35
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
36
- with pytest.raises(Exception) as excinfo:
37
- app.scrape_url(blocklisted_url)
38
- assert "URL is blocked. Firecrawl currently does not support social media scraping due to policy restrictions." in str(excinfo.value)
39
-
40
- def test_successful_response_with_valid_preview_token():
41
- app = FirecrawlApp(api_url=API_URL, api_key="this_is_just_a_preview_token")
42
- response = app.scrape_url('https://roastmywebsite.ai')
43
- assert response is not None
44
- assert "_Roast_" in response['markdown']
45
- assert "content" not in response
46
- assert "html" not in response
47
- assert "metadata" in response
48
- assert "links" not in response
49
- assert "rawHtml" not in response
50
-
51
- def test_successful_response_for_valid_scrape():
52
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
53
- response = app.scrape_url('https://roastmywebsite.ai')
54
- assert response is not None
55
- assert 'markdown' in response
56
- assert "_Roast_" in response['markdown']
57
- assert 'metadata' in response
58
- assert 'content' not in response
59
- assert 'html' not in response
60
- assert 'rawHtml' not in response
61
- assert 'screenshot' not in response
62
- assert 'links' not in response
63
-
64
- def test_successful_response_with_valid_api_key_and_options():
65
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
66
- params = {
67
- 'formats': ['markdown', 'html', 'rawHtml', 'screenshot', 'links'],
68
- 'headers': {'x-key': 'test'},
69
- 'includeTags': ['h1'],
70
- 'excludeTags': ['h2'],
71
- 'onlyMainContent': True,
72
- 'timeout': 30000,
73
- 'waitFor': 1000
74
- }
75
- response = app.scrape_url('https://roastmywebsite.ai', params)
76
- assert response is not None
77
- assert 'content' not in response
78
- assert 'markdown' in response
79
- assert 'html' in response
80
- assert 'rawHtml' in response
81
- assert 'screenshot' in response
82
- assert 'links' in response
83
- assert "_Roast_" in response['markdown']
84
- assert "<h1" in response['html']
85
- assert "<h1" in response['rawHtml']
86
- assert "https://" in response['screenshot']
87
- assert len(response['links']) > 0
88
- assert "https://" in response['links'][0]
89
- assert 'metadata' in response
90
- assert 'title' in response['metadata']
91
- assert 'description' in response['metadata']
92
- assert 'keywords' in response['metadata']
93
- assert 'robots' in response['metadata']
94
- assert 'ogTitle' in response['metadata']
95
- assert 'ogDescription' in response['metadata']
96
- assert 'ogUrl' in response['metadata']
97
- assert 'ogImage' in response['metadata']
98
- assert 'ogLocaleAlternate' in response['metadata']
99
- assert 'ogSiteName' in response['metadata']
100
- assert 'sourceURL' in response['metadata']
101
- assert 'statusCode' in response['metadata']
102
- assert 'pageStatusCode' not in response['metadata']
103
- assert 'pageError' not in response['metadata']
104
- assert 'error' not in response['metadata']
105
- assert response['metadata']['title'] == "Roast My Website"
106
- assert response['metadata']['description'] == "Welcome to Roast My Website, the ultimate tool for putting your website through the wringer! This repository harnesses the power of Firecrawl to scrape and capture screenshots of websites, and then unleashes the latest LLM vision models to mercilessly roast them. 🌶️"
107
- assert response['metadata']['keywords'] == "Roast My Website,Roast,Website,GitHub,Firecrawl"
108
- assert response['metadata']['robots'] == "follow, index"
109
- assert response['metadata']['ogTitle'] == "Roast My Website"
110
- assert response['metadata']['ogDescription'] == "Welcome to Roast My Website, the ultimate tool for putting your website through the wringer! This repository harnesses the power of Firecrawl to scrape and capture screenshots of websites, and then unleashes the latest LLM vision models to mercilessly roast them. 🌶️"
111
- assert response['metadata']['ogUrl'] == "https://www.roastmywebsite.ai"
112
- assert response['metadata']['ogImage'] == "https://www.roastmywebsite.ai/og.png"
113
- assert response['metadata']['ogLocaleAlternate'] == []
114
- assert response['metadata']['ogSiteName'] == "Roast My Website"
115
- assert response['metadata']['sourceURL'] == "https://roastmywebsite.ai"
116
- assert response['metadata']['statusCode'] == 200
117
-
118
- def test_successful_response_for_valid_scrape_with_pdf_file():
119
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
120
- response = app.scrape_url('https://arxiv.org/pdf/astro-ph/9301001.pdf')
121
- assert response is not None
122
- assert 'content' not in response
123
- assert 'metadata' in response
124
- assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['markdown']
125
-
126
- def test_successful_response_for_valid_scrape_with_pdf_file_without_explicit_extension():
127
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
128
- response = app.scrape_url('https://arxiv.org/pdf/astro-ph/9301001')
129
- time.sleep(1) # wait for 1 second
130
- assert response is not None
131
- assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['markdown']
132
-
133
- def test_crawl_url_invalid_api_key():
134
- invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key")
135
- with pytest.raises(Exception) as excinfo:
136
- invalid_app.crawl_url('https://firecrawl.dev')
137
- assert "Unauthorized: Invalid token" in str(excinfo.value)
138
-
139
- def test_should_return_error_for_blocklisted_url():
140
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
141
- blocklisted_url = "https://twitter.com/fake-test"
142
- with pytest.raises(Exception) as excinfo:
143
- app.crawl_url(blocklisted_url)
144
- assert "URL is blocked. Firecrawl currently does not support social media scraping due to policy restrictions." in str(excinfo.value)
145
-
146
- def test_crawl_url_wait_for_completion_e2e():
147
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
148
- response = app.crawl_url('https://roastmywebsite.ai', {'excludePaths': ['blog/*']}, True, 30)
149
- assert response is not None
150
- assert 'total' in response
151
- assert response['total'] > 0
152
- assert 'creditsUsed' in response
153
- assert response['creditsUsed'] > 0
154
- assert 'expiresAt' in response
155
- assert datetime.strptime(response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now()
156
- assert 'status' in response
157
- assert response['status'] == 'completed'
158
- assert 'next' not in response
159
- assert len(response['data']) > 0
160
- assert 'markdown' in response['data'][0]
161
- assert "_Roast_" in response['data'][0]['markdown']
162
- assert 'content' not in response['data'][0]
163
- assert 'html' not in response['data'][0]
164
- assert 'rawHtml' not in response['data'][0]
165
- assert 'screenshot' not in response['data'][0]
166
- assert 'links' not in response['data'][0]
167
- assert 'metadata' in response['data'][0]
168
- assert 'title' in response['data'][0]['metadata']
169
- assert 'description' in response['data'][0]['metadata']
170
- assert 'language' in response['data'][0]['metadata']
171
- assert 'sourceURL' in response['data'][0]['metadata']
172
- assert 'statusCode' in response['data'][0]['metadata']
173
- assert 'error' not in response['data'][0]['metadata']
174
-
175
- def test_crawl_url_with_options_and_wait_for_completion():
176
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
177
- response = app.crawl_url('https://roastmywebsite.ai', {
178
- 'excludePaths': ['blog/*'],
179
- 'includePaths': ['/'],
180
- 'maxDepth': 2,
181
- 'ignoreSitemap': True,
182
- 'limit': 10,
183
- 'allowBackwardLinks': True,
184
- 'allowExternalLinks': True,
185
- 'scrapeOptions': {
186
- 'formats': ['markdown', 'html', 'rawHtml', 'screenshot', 'links'],
187
- 'headers': {"x-key": "test"},
188
- 'includeTags': ['h1'],
189
- 'excludeTags': ['h2'],
190
- 'onlyMainContent': True,
191
- 'waitFor': 1000
192
- }
193
- }, True, 30)
194
- assert response is not None
195
- assert 'total' in response
196
- assert response['total'] > 0
197
- assert 'creditsUsed' in response
198
- assert response['creditsUsed'] > 0
199
- assert 'expiresAt' in response
200
- assert datetime.strptime(response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now()
201
- assert 'status' in response
202
- assert response['status'] == 'completed'
203
- assert 'next' not in response
204
- assert len(response['data']) > 0
205
- assert 'markdown' in response['data'][0]
206
- assert "_Roast_" in response['data'][0]['markdown']
207
- assert 'content' not in response['data'][0]
208
- assert 'html' in response['data'][0]
209
- assert "<h1" in response['data'][0]['html']
210
- assert 'rawHtml' in response['data'][0]
211
- assert "<h1" in response['data'][0]['rawHtml']
212
- assert 'screenshot' in response['data'][0]
213
- assert "https://" in response['data'][0]['screenshot']
214
- assert 'links' in response['data'][0]
215
- assert len(response['data'][0]['links']) > 0
216
- assert 'metadata' in response['data'][0]
217
- assert 'title' in response['data'][0]['metadata']
218
- assert 'description' in response['data'][0]['metadata']
219
- assert 'language' in response['data'][0]['metadata']
220
- assert 'sourceURL' in response['data'][0]['metadata']
221
- assert 'statusCode' in response['data'][0]['metadata']
222
- assert 'error' not in response['data'][0]['metadata']
223
-
224
- def test_crawl_url_with_idempotency_key_e2e():
225
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
226
- uniqueIdempotencyKey = str(uuid4())
227
- response = app.crawl_url('https://roastmywebsite.ai', {'excludePaths': ['blog/*']}, False, 2, uniqueIdempotencyKey)
228
- assert response is not None
229
- assert 'id' in response
230
-
231
- with pytest.raises(Exception) as excinfo:
232
- app.crawl_url('https://firecrawl.dev', {'excludePaths': ['blog/*']}, True, 2, uniqueIdempotencyKey)
233
- assert "Idempotency key already used" in str(excinfo.value)
234
-
235
- def test_check_crawl_status_e2e():
236
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
237
- response = app.crawl_url('https://firecrawl.dev', {'scrapeOptions': {'formats': ['markdown', 'html', 'rawHtml', 'screenshot', 'links']}}, False)
238
- assert response is not None
239
- assert 'id' in response
240
-
241
- max_checks = 15
242
- checks = 0
243
- status_response = app.check_crawl_status(response['id'])
244
-
245
- while status_response['status'] == 'scraping' and checks < max_checks:
246
- time.sleep(1) # wait for 1 second
247
- assert 'partial_data' not in status_response
248
- assert 'current' not in status_response
249
- assert 'data' in status_response
250
- assert 'total' in status_response
251
- assert 'creditsUsed' in status_response
252
- assert 'expiresAt' in status_response
253
- assert 'status' in status_response
254
- assert 'next' in status_response
255
- assert status_response['total'] > 0
256
- assert status_response['creditsUsed'] > 0
257
- assert datetime.strptime(status_response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now()
258
- assert status_response['status'] == 'scraping'
259
- assert '/v1/crawl/' in status_response['next']
260
- status_response = app.check_crawl_status(response['id'])
261
- checks += 1
262
-
263
- assert status_response is not None
264
- assert 'total' in status_response
265
- assert status_response['total'] > 0
266
- assert 'creditsUsed' in status_response
267
- assert status_response['creditsUsed'] > 0
268
- assert 'expiresAt' in status_response
269
- assert datetime.strptime(status_response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now()
270
- assert 'status' in status_response
271
- assert status_response['status'] == 'completed'
272
- assert len(status_response['data']) > 0
273
- assert 'markdown' in status_response['data'][0]
274
- assert len(status_response['data'][0]['markdown']) > 10
275
- assert 'content' not in status_response['data'][0]
276
- assert 'html' in status_response['data'][0]
277
- assert "<div" in status_response['data'][0]['html']
278
- assert 'rawHtml' in status_response['data'][0]
279
- assert "<div" in status_response['data'][0]['rawHtml']
280
- assert 'screenshot' in status_response['data'][0]
281
- assert "https://" in status_response['data'][0]['screenshot']
282
- assert 'links' in status_response['data'][0]
283
- assert status_response['data'][0]['links'] is not None
284
- assert len(status_response['data'][0]['links']) > 0
285
- assert 'metadata' in status_response['data'][0]
286
- assert 'title' in status_response['data'][0]['metadata']
287
- assert 'description' in status_response['data'][0]['metadata']
288
- assert 'language' in status_response['data'][0]['metadata']
289
- assert 'sourceURL' in status_response['data'][0]['metadata']
290
- assert 'statusCode' in status_response['data'][0]['metadata']
291
- assert 'error' not in status_response['data'][0]['metadata']
292
-
293
- def test_invalid_api_key_on_map():
294
- invalid_app = FirecrawlApp(api_key="invalid_api_key", api_url=API_URL)
295
- with pytest.raises(Exception) as excinfo:
296
- invalid_app.map_url('https://roastmywebsite.ai')
297
- assert "Unauthorized: Invalid token" in str(excinfo.value)
298
-
299
- def test_blocklisted_url_on_map():
300
- app = FirecrawlApp(api_key=TEST_API_KEY, api_url=API_URL)
301
- blocklisted_url = "https://facebook.com/fake-test"
302
- with pytest.raises(Exception) as excinfo:
303
- app.map_url(blocklisted_url)
304
- assert "URL is blocked. Firecrawl currently does not support social media scraping due to policy restrictions." in str(excinfo.value)
305
-
306
- def test_successful_response_with_valid_preview_token_on_map():
307
- app = FirecrawlApp(api_key="this_is_just_a_preview_token", api_url=API_URL)
308
- response = app.map_url('https://roastmywebsite.ai')
309
- assert response is not None
310
- assert len(response) > 0
311
-
312
- def test_successful_response_for_valid_map():
313
- app = FirecrawlApp(api_key=TEST_API_KEY, api_url=API_URL)
314
- response = app.map_url('https://roastmywebsite.ai')
315
- assert response is not None
316
- assert len(response) > 0
317
- assert any("https://" in link for link in response)
318
- filtered_links = [link for link in response if "roastmywebsite.ai" in link]
319
- assert len(filtered_links) > 0
320
-
321
- def test_search_e2e():
322
- app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
323
- with pytest.raises(NotImplementedError) as excinfo:
324
- app.search("test query")
325
- assert "Search is not supported in v1" in str(excinfo.value)
326
-
327
- # def test_llm_extraction():
328
- # app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
329
- # response = app.scrape_url("https://mendable.ai", {
330
- # 'extractorOptions': {
331
- # 'mode': 'llm-extraction',
332
- # 'extractionPrompt': "Based on the information on the page, find what the company's mission is and whether it supports SSO, and whether it is open source",
333
- # 'extractionSchema': {
334
- # 'type': 'object',
335
- # 'properties': {
336
- # 'company_mission': {'type': 'string'},
337
- # 'supports_sso': {'type': 'boolean'},
338
- # 'is_open_source': {'type': 'boolean'}
339
- # },
340
- # 'required': ['company_mission', 'supports_sso', 'is_open_source']
341
- # }
342
- # }
343
- # })
344
- # assert response is not None
345
- # assert 'llm_extraction' in response
346
- # llm_extraction = response['llm_extraction']
347
- # assert 'company_mission' in llm_extraction
348
- # assert isinstance(llm_extraction['supports_sso'], bool)
349
- # assert isinstance(llm_extraction['is_open_source'], bool)
350
-
351
-
352
-