firecrawl 1.6.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of firecrawl might be problematic. Click here for more details.
- firecrawl-1.6.0/LICENSE +21 -0
- firecrawl-1.6.0/PKG-INFO +291 -0
- firecrawl-1.6.0/README.md +251 -0
- firecrawl-1.6.0/firecrawl/__init__.py +79 -0
- firecrawl-1.6.0/firecrawl/__tests__/e2e_withAuth/__init__.py +0 -0
- firecrawl-1.6.0/firecrawl/__tests__/e2e_withAuth/test.py +170 -0
- firecrawl-1.6.0/firecrawl/__tests__/v1/e2e_withAuth/__init__.py +0 -0
- firecrawl-1.6.0/firecrawl/__tests__/v1/e2e_withAuth/test.py +352 -0
- firecrawl-1.6.0/firecrawl/firecrawl.py +721 -0
- firecrawl-1.6.0/firecrawl.egg-info/PKG-INFO +291 -0
- firecrawl-1.6.0/firecrawl.egg-info/SOURCES.txt +15 -0
- firecrawl-1.6.0/firecrawl.egg-info/dependency_links.txt +1 -0
- firecrawl-1.6.0/firecrawl.egg-info/requires.txt +4 -0
- firecrawl-1.6.0/firecrawl.egg-info/top_level.txt +1 -0
- firecrawl-1.6.0/pyproject.toml +51 -0
- firecrawl-1.6.0/setup.cfg +4 -0
- firecrawl-1.6.0/setup.py +66 -0
firecrawl-1.6.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 Sideguide Technologies Inc.
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
firecrawl-1.6.0/PKG-INFO
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: firecrawl
|
|
3
|
+
Version: 1.6.0
|
|
4
|
+
Summary: Python SDK for Firecrawl API
|
|
5
|
+
Home-page: https://github.com/mendableai/firecrawl
|
|
6
|
+
Author: Mendable.ai
|
|
7
|
+
Author-email: "Mendable.ai" <nick@mendable.ai>
|
|
8
|
+
Maintainer-email: "Mendable.ai" <nick@mendable.ai>
|
|
9
|
+
License: GNU Affero General Public License v3 (AGPLv3)
|
|
10
|
+
Project-URL: Documentation, https://docs.firecrawl.dev
|
|
11
|
+
Project-URL: Source, https://github.com/mendableai/firecrawl
|
|
12
|
+
Project-URL: Tracker, https://github.com/mendableai/firecrawl/issues
|
|
13
|
+
Keywords: SDK,API,firecrawl
|
|
14
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
15
|
+
Classifier: Environment :: Web Environment
|
|
16
|
+
Classifier: Intended Audience :: Developers
|
|
17
|
+
Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
|
|
18
|
+
Classifier: Natural Language :: English
|
|
19
|
+
Classifier: Operating System :: OS Independent
|
|
20
|
+
Classifier: Programming Language :: Python
|
|
21
|
+
Classifier: Programming Language :: Python :: 3
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
23
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
24
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
25
|
+
Classifier: Topic :: Internet
|
|
26
|
+
Classifier: Topic :: Internet :: WWW/HTTP
|
|
27
|
+
Classifier: Topic :: Internet :: WWW/HTTP :: Indexing/Search
|
|
28
|
+
Classifier: Topic :: Software Development
|
|
29
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
30
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
31
|
+
Classifier: Topic :: Text Processing
|
|
32
|
+
Classifier: Topic :: Text Processing :: Indexing
|
|
33
|
+
Requires-Python: >=3.8
|
|
34
|
+
Description-Content-Type: text/markdown
|
|
35
|
+
License-File: LICENSE
|
|
36
|
+
Requires-Dist: requests
|
|
37
|
+
Requires-Dist: python-dotenv
|
|
38
|
+
Requires-Dist: websockets
|
|
39
|
+
Requires-Dist: nest-asyncio
|
|
40
|
+
|
|
41
|
+
# Firecrawl Python SDK
|
|
42
|
+
|
|
43
|
+
The Firecrawl Python SDK is a library that allows you to easily scrape and crawl websites, and output the data in a format ready for use with language models (LLMs). It provides a simple and intuitive interface for interacting with the Firecrawl API.
|
|
44
|
+
|
|
45
|
+
## Installation
|
|
46
|
+
|
|
47
|
+
To install the Firecrawl Python SDK, you can use pip:
|
|
48
|
+
|
|
49
|
+
```bash
|
|
50
|
+
pip install firecrawl-py
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
## Usage
|
|
54
|
+
|
|
55
|
+
1. Get an API key from [firecrawl.dev](https://firecrawl.dev)
|
|
56
|
+
2. Set the API key as an environment variable named `FIRECRAWL_API_KEY` or pass it as a parameter to the `FirecrawlApp` class.
|
|
57
|
+
|
|
58
|
+
Here's an example of how to use the SDK:
|
|
59
|
+
|
|
60
|
+
```python
|
|
61
|
+
from firecrawl.firecrawl import FirecrawlApp
|
|
62
|
+
|
|
63
|
+
app = FirecrawlApp(api_key="fc-YOUR_API_KEY")
|
|
64
|
+
|
|
65
|
+
# Scrape a website:
|
|
66
|
+
scrape_status = app.scrape_url(
|
|
67
|
+
'https://firecrawl.dev',
|
|
68
|
+
params={'formats': ['markdown', 'html']}
|
|
69
|
+
)
|
|
70
|
+
print(scrape_status)
|
|
71
|
+
|
|
72
|
+
# Crawl a website:
|
|
73
|
+
crawl_status = app.crawl_url(
|
|
74
|
+
'https://firecrawl.dev',
|
|
75
|
+
params={
|
|
76
|
+
'limit': 100,
|
|
77
|
+
'scrapeOptions': {'formats': ['markdown', 'html']}
|
|
78
|
+
},
|
|
79
|
+
poll_interval=30
|
|
80
|
+
)
|
|
81
|
+
print(crawl_status)
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
### Scraping a URL
|
|
85
|
+
|
|
86
|
+
To scrape a single URL, use the `scrape_url` method. It takes the URL as a parameter and returns the scraped data as a dictionary.
|
|
87
|
+
|
|
88
|
+
```python
|
|
89
|
+
url = 'https://example.com'
|
|
90
|
+
scraped_data = app.scrape_url(url)
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Extracting structured data from a URL
|
|
94
|
+
|
|
95
|
+
With LLM extraction, you can easily extract structured data from any URL. We support pydantic schemas to make it easier for you too. Here is how you to use it:
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
class ArticleSchema(BaseModel):
|
|
99
|
+
title: str
|
|
100
|
+
points: int
|
|
101
|
+
by: str
|
|
102
|
+
commentsURL: str
|
|
103
|
+
|
|
104
|
+
class TopArticlesSchema(BaseModel):
|
|
105
|
+
top: List[ArticleSchema] = Field(..., max_items=5, description="Top 5 stories")
|
|
106
|
+
|
|
107
|
+
data = app.scrape_url('https://news.ycombinator.com', {
|
|
108
|
+
'extractorOptions': {
|
|
109
|
+
'extractionSchema': TopArticlesSchema.model_json_schema(),
|
|
110
|
+
'mode': 'llm-extraction'
|
|
111
|
+
},
|
|
112
|
+
'pageOptions':{
|
|
113
|
+
'onlyMainContent': True
|
|
114
|
+
}
|
|
115
|
+
})
|
|
116
|
+
print(data["llm_extraction"])
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### Crawling a Website
|
|
120
|
+
|
|
121
|
+
To crawl a website, use the `crawl_url` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the crawl job, such as the maximum number of pages to crawl, allowed domains, and the output format.
|
|
122
|
+
|
|
123
|
+
```python
|
|
124
|
+
idempotency_key = str(uuid.uuid4()) # optional idempotency key
|
|
125
|
+
crawl_result = app.crawl_url('firecrawl.dev', {'excludePaths': ['blog/*']}, 2, idempotency_key)
|
|
126
|
+
print(crawl_result)
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
### Asynchronous Crawl a Website
|
|
130
|
+
|
|
131
|
+
To crawl a website asynchronously, use the `async_crawl_url` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the crawl job, such as the maximum number of pages to crawl, allowed domains, and the output format.
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
crawl_result = app.async_crawl_url('firecrawl.dev', {'excludePaths': ['blog/*']}, "")
|
|
135
|
+
print(crawl_result)
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
### Checking Crawl Status
|
|
139
|
+
|
|
140
|
+
To check the status of a crawl job, use the `check_crawl_status` method. It takes the job ID as a parameter and returns the current status of the crawl job.
|
|
141
|
+
|
|
142
|
+
```python
|
|
143
|
+
id = crawl_result['id']
|
|
144
|
+
status = app.check_crawl_status(id)
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
### Map a Website
|
|
148
|
+
|
|
149
|
+
Use `map_url` to generate a list of URLs from a website. The `params` argument let you customize the mapping process, including options to exclude subdomains or to utilize the sitemap.
|
|
150
|
+
|
|
151
|
+
```python
|
|
152
|
+
# Map a website:
|
|
153
|
+
map_result = app.map_url('https://example.com')
|
|
154
|
+
print(map_result)
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
### Crawl a website with WebSockets
|
|
158
|
+
|
|
159
|
+
To crawl a website with WebSockets, use the `crawl_url_and_watch` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the crawl job, such as the maximum number of pages to crawl, allowed domains, and the output format.
|
|
160
|
+
|
|
161
|
+
```python
|
|
162
|
+
# inside an async function...
|
|
163
|
+
nest_asyncio.apply()
|
|
164
|
+
|
|
165
|
+
# Define event handlers
|
|
166
|
+
def on_document(detail):
|
|
167
|
+
print("DOC", detail)
|
|
168
|
+
|
|
169
|
+
def on_error(detail):
|
|
170
|
+
print("ERR", detail['error'])
|
|
171
|
+
|
|
172
|
+
def on_done(detail):
|
|
173
|
+
print("DONE", detail['status'])
|
|
174
|
+
|
|
175
|
+
# Function to start the crawl and watch process
|
|
176
|
+
async def start_crawl_and_watch():
|
|
177
|
+
# Initiate the crawl job and get the watcher
|
|
178
|
+
watcher = app.crawl_url_and_watch('firecrawl.dev', { 'excludePaths': ['blog/*'], 'limit': 5 })
|
|
179
|
+
|
|
180
|
+
# Add event listeners
|
|
181
|
+
watcher.add_event_listener("document", on_document)
|
|
182
|
+
watcher.add_event_listener("error", on_error)
|
|
183
|
+
watcher.add_event_listener("done", on_done)
|
|
184
|
+
|
|
185
|
+
# Start the watcher
|
|
186
|
+
await watcher.connect()
|
|
187
|
+
|
|
188
|
+
# Run the event loop
|
|
189
|
+
await start_crawl_and_watch()
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
### Scraping multiple URLs in batch
|
|
193
|
+
|
|
194
|
+
To batch scrape multiple URLs, use the `batch_scrape_urls` method. It takes the URLs and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper such as the output formats.
|
|
195
|
+
|
|
196
|
+
```python
|
|
197
|
+
idempotency_key = str(uuid.uuid4()) # optional idempotency key
|
|
198
|
+
batch_scrape_result = app.batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']}, 2, idempotency_key)
|
|
199
|
+
print(batch_scrape_result)
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
### Asynchronous batch scrape
|
|
203
|
+
|
|
204
|
+
To run a batch scrape asynchronously, use the `async_batch_scrape_urls` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
batch_scrape_result = app.async_batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
|
|
208
|
+
print(batch_scrape_result)
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
### Checking batch scrape status
|
|
212
|
+
|
|
213
|
+
To check the status of an asynchronous batch scrape job, use the `check_batch_scrape_status` method. It takes the job ID as a parameter and returns the current status of the batch scrape job.
|
|
214
|
+
|
|
215
|
+
```python
|
|
216
|
+
id = batch_scrape_result['id']
|
|
217
|
+
status = app.check_batch_scrape_status(id)
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
### Batch scrape with WebSockets
|
|
221
|
+
|
|
222
|
+
To use batch scrape with WebSockets, use the `batch_scrape_urls_and_watch` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
|
|
223
|
+
|
|
224
|
+
```python
|
|
225
|
+
# inside an async function...
|
|
226
|
+
nest_asyncio.apply()
|
|
227
|
+
|
|
228
|
+
# Define event handlers
|
|
229
|
+
def on_document(detail):
|
|
230
|
+
print("DOC", detail)
|
|
231
|
+
|
|
232
|
+
def on_error(detail):
|
|
233
|
+
print("ERR", detail['error'])
|
|
234
|
+
|
|
235
|
+
def on_done(detail):
|
|
236
|
+
print("DONE", detail['status'])
|
|
237
|
+
|
|
238
|
+
# Function to start the crawl and watch process
|
|
239
|
+
async def start_crawl_and_watch():
|
|
240
|
+
# Initiate the crawl job and get the watcher
|
|
241
|
+
watcher = app.batch_scrape_urls_and_watch(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
|
|
242
|
+
|
|
243
|
+
# Add event listeners
|
|
244
|
+
watcher.add_event_listener("document", on_document)
|
|
245
|
+
watcher.add_event_listener("error", on_error)
|
|
246
|
+
watcher.add_event_listener("done", on_done)
|
|
247
|
+
|
|
248
|
+
# Start the watcher
|
|
249
|
+
await watcher.connect()
|
|
250
|
+
|
|
251
|
+
# Run the event loop
|
|
252
|
+
await start_crawl_and_watch()
|
|
253
|
+
```
|
|
254
|
+
|
|
255
|
+
## Error Handling
|
|
256
|
+
|
|
257
|
+
The SDK handles errors returned by the Firecrawl API and raises appropriate exceptions. If an error occurs during a request, an exception will be raised with a descriptive error message.
|
|
258
|
+
|
|
259
|
+
## Running the Tests with Pytest
|
|
260
|
+
|
|
261
|
+
To ensure the functionality of the Firecrawl Python SDK, we have included end-to-end tests using `pytest`. These tests cover various aspects of the SDK, including URL scraping, web searching, and website crawling.
|
|
262
|
+
|
|
263
|
+
### Running the Tests
|
|
264
|
+
|
|
265
|
+
To run the tests, execute the following commands:
|
|
266
|
+
|
|
267
|
+
Install pytest:
|
|
268
|
+
|
|
269
|
+
```bash
|
|
270
|
+
pip install pytest
|
|
271
|
+
```
|
|
272
|
+
|
|
273
|
+
Run:
|
|
274
|
+
|
|
275
|
+
```bash
|
|
276
|
+
pytest firecrawl/__tests__/e2e_withAuth/test.py
|
|
277
|
+
```
|
|
278
|
+
|
|
279
|
+
## Contributing
|
|
280
|
+
|
|
281
|
+
Contributions to the Firecrawl Python SDK are welcome! If you find any issues or have suggestions for improvements, please open an issue or submit a pull request on the GitHub repository.
|
|
282
|
+
|
|
283
|
+
## License
|
|
284
|
+
|
|
285
|
+
The Firecrawl Python SDK is licensed under the MIT License. This means you are free to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the SDK, subject to the following conditions:
|
|
286
|
+
|
|
287
|
+
- The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
288
|
+
|
|
289
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
290
|
+
|
|
291
|
+
Please note that while this SDK is MIT licensed, it is part of a larger project which may be under different licensing terms. Always refer to the license information in the root directory of the main project for overall licensing details.
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
# Firecrawl Python SDK
|
|
2
|
+
|
|
3
|
+
The Firecrawl Python SDK is a library that allows you to easily scrape and crawl websites, and output the data in a format ready for use with language models (LLMs). It provides a simple and intuitive interface for interacting with the Firecrawl API.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
To install the Firecrawl Python SDK, you can use pip:
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install firecrawl-py
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Usage
|
|
14
|
+
|
|
15
|
+
1. Get an API key from [firecrawl.dev](https://firecrawl.dev)
|
|
16
|
+
2. Set the API key as an environment variable named `FIRECRAWL_API_KEY` or pass it as a parameter to the `FirecrawlApp` class.
|
|
17
|
+
|
|
18
|
+
Here's an example of how to use the SDK:
|
|
19
|
+
|
|
20
|
+
```python
|
|
21
|
+
from firecrawl.firecrawl import FirecrawlApp
|
|
22
|
+
|
|
23
|
+
app = FirecrawlApp(api_key="fc-YOUR_API_KEY")
|
|
24
|
+
|
|
25
|
+
# Scrape a website:
|
|
26
|
+
scrape_status = app.scrape_url(
|
|
27
|
+
'https://firecrawl.dev',
|
|
28
|
+
params={'formats': ['markdown', 'html']}
|
|
29
|
+
)
|
|
30
|
+
print(scrape_status)
|
|
31
|
+
|
|
32
|
+
# Crawl a website:
|
|
33
|
+
crawl_status = app.crawl_url(
|
|
34
|
+
'https://firecrawl.dev',
|
|
35
|
+
params={
|
|
36
|
+
'limit': 100,
|
|
37
|
+
'scrapeOptions': {'formats': ['markdown', 'html']}
|
|
38
|
+
},
|
|
39
|
+
poll_interval=30
|
|
40
|
+
)
|
|
41
|
+
print(crawl_status)
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
### Scraping a URL
|
|
45
|
+
|
|
46
|
+
To scrape a single URL, use the `scrape_url` method. It takes the URL as a parameter and returns the scraped data as a dictionary.
|
|
47
|
+
|
|
48
|
+
```python
|
|
49
|
+
url = 'https://example.com'
|
|
50
|
+
scraped_data = app.scrape_url(url)
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
### Extracting structured data from a URL
|
|
54
|
+
|
|
55
|
+
With LLM extraction, you can easily extract structured data from any URL. We support pydantic schemas to make it easier for you too. Here is how you to use it:
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
class ArticleSchema(BaseModel):
|
|
59
|
+
title: str
|
|
60
|
+
points: int
|
|
61
|
+
by: str
|
|
62
|
+
commentsURL: str
|
|
63
|
+
|
|
64
|
+
class TopArticlesSchema(BaseModel):
|
|
65
|
+
top: List[ArticleSchema] = Field(..., max_items=5, description="Top 5 stories")
|
|
66
|
+
|
|
67
|
+
data = app.scrape_url('https://news.ycombinator.com', {
|
|
68
|
+
'extractorOptions': {
|
|
69
|
+
'extractionSchema': TopArticlesSchema.model_json_schema(),
|
|
70
|
+
'mode': 'llm-extraction'
|
|
71
|
+
},
|
|
72
|
+
'pageOptions':{
|
|
73
|
+
'onlyMainContent': True
|
|
74
|
+
}
|
|
75
|
+
})
|
|
76
|
+
print(data["llm_extraction"])
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Crawling a Website
|
|
80
|
+
|
|
81
|
+
To crawl a website, use the `crawl_url` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the crawl job, such as the maximum number of pages to crawl, allowed domains, and the output format.
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
idempotency_key = str(uuid.uuid4()) # optional idempotency key
|
|
85
|
+
crawl_result = app.crawl_url('firecrawl.dev', {'excludePaths': ['blog/*']}, 2, idempotency_key)
|
|
86
|
+
print(crawl_result)
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
### Asynchronous Crawl a Website
|
|
90
|
+
|
|
91
|
+
To crawl a website asynchronously, use the `async_crawl_url` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the crawl job, such as the maximum number of pages to crawl, allowed domains, and the output format.
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
crawl_result = app.async_crawl_url('firecrawl.dev', {'excludePaths': ['blog/*']}, "")
|
|
95
|
+
print(crawl_result)
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
### Checking Crawl Status
|
|
99
|
+
|
|
100
|
+
To check the status of a crawl job, use the `check_crawl_status` method. It takes the job ID as a parameter and returns the current status of the crawl job.
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
id = crawl_result['id']
|
|
104
|
+
status = app.check_crawl_status(id)
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
### Map a Website
|
|
108
|
+
|
|
109
|
+
Use `map_url` to generate a list of URLs from a website. The `params` argument let you customize the mapping process, including options to exclude subdomains or to utilize the sitemap.
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
# Map a website:
|
|
113
|
+
map_result = app.map_url('https://example.com')
|
|
114
|
+
print(map_result)
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
### Crawl a website with WebSockets
|
|
118
|
+
|
|
119
|
+
To crawl a website with WebSockets, use the `crawl_url_and_watch` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the crawl job, such as the maximum number of pages to crawl, allowed domains, and the output format.
|
|
120
|
+
|
|
121
|
+
```python
|
|
122
|
+
# inside an async function...
|
|
123
|
+
nest_asyncio.apply()
|
|
124
|
+
|
|
125
|
+
# Define event handlers
|
|
126
|
+
def on_document(detail):
|
|
127
|
+
print("DOC", detail)
|
|
128
|
+
|
|
129
|
+
def on_error(detail):
|
|
130
|
+
print("ERR", detail['error'])
|
|
131
|
+
|
|
132
|
+
def on_done(detail):
|
|
133
|
+
print("DONE", detail['status'])
|
|
134
|
+
|
|
135
|
+
# Function to start the crawl and watch process
|
|
136
|
+
async def start_crawl_and_watch():
|
|
137
|
+
# Initiate the crawl job and get the watcher
|
|
138
|
+
watcher = app.crawl_url_and_watch('firecrawl.dev', { 'excludePaths': ['blog/*'], 'limit': 5 })
|
|
139
|
+
|
|
140
|
+
# Add event listeners
|
|
141
|
+
watcher.add_event_listener("document", on_document)
|
|
142
|
+
watcher.add_event_listener("error", on_error)
|
|
143
|
+
watcher.add_event_listener("done", on_done)
|
|
144
|
+
|
|
145
|
+
# Start the watcher
|
|
146
|
+
await watcher.connect()
|
|
147
|
+
|
|
148
|
+
# Run the event loop
|
|
149
|
+
await start_crawl_and_watch()
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
### Scraping multiple URLs in batch
|
|
153
|
+
|
|
154
|
+
To batch scrape multiple URLs, use the `batch_scrape_urls` method. It takes the URLs and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper such as the output formats.
|
|
155
|
+
|
|
156
|
+
```python
|
|
157
|
+
idempotency_key = str(uuid.uuid4()) # optional idempotency key
|
|
158
|
+
batch_scrape_result = app.batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']}, 2, idempotency_key)
|
|
159
|
+
print(batch_scrape_result)
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
### Asynchronous batch scrape
|
|
163
|
+
|
|
164
|
+
To run a batch scrape asynchronously, use the `async_batch_scrape_urls` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
|
|
165
|
+
|
|
166
|
+
```python
|
|
167
|
+
batch_scrape_result = app.async_batch_scrape_urls(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
|
|
168
|
+
print(batch_scrape_result)
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
### Checking batch scrape status
|
|
172
|
+
|
|
173
|
+
To check the status of an asynchronous batch scrape job, use the `check_batch_scrape_status` method. It takes the job ID as a parameter and returns the current status of the batch scrape job.
|
|
174
|
+
|
|
175
|
+
```python
|
|
176
|
+
id = batch_scrape_result['id']
|
|
177
|
+
status = app.check_batch_scrape_status(id)
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
### Batch scrape with WebSockets
|
|
181
|
+
|
|
182
|
+
To use batch scrape with WebSockets, use the `batch_scrape_urls_and_watch` method. It takes the starting URL and optional parameters as arguments. The `params` argument allows you to specify additional options for the scraper, such as the output formats.
|
|
183
|
+
|
|
184
|
+
```python
|
|
185
|
+
# inside an async function...
|
|
186
|
+
nest_asyncio.apply()
|
|
187
|
+
|
|
188
|
+
# Define event handlers
|
|
189
|
+
def on_document(detail):
|
|
190
|
+
print("DOC", detail)
|
|
191
|
+
|
|
192
|
+
def on_error(detail):
|
|
193
|
+
print("ERR", detail['error'])
|
|
194
|
+
|
|
195
|
+
def on_done(detail):
|
|
196
|
+
print("DONE", detail['status'])
|
|
197
|
+
|
|
198
|
+
# Function to start the crawl and watch process
|
|
199
|
+
async def start_crawl_and_watch():
|
|
200
|
+
# Initiate the crawl job and get the watcher
|
|
201
|
+
watcher = app.batch_scrape_urls_and_watch(['firecrawl.dev', 'mendable.ai'], {'formats': ['markdown', 'html']})
|
|
202
|
+
|
|
203
|
+
# Add event listeners
|
|
204
|
+
watcher.add_event_listener("document", on_document)
|
|
205
|
+
watcher.add_event_listener("error", on_error)
|
|
206
|
+
watcher.add_event_listener("done", on_done)
|
|
207
|
+
|
|
208
|
+
# Start the watcher
|
|
209
|
+
await watcher.connect()
|
|
210
|
+
|
|
211
|
+
# Run the event loop
|
|
212
|
+
await start_crawl_and_watch()
|
|
213
|
+
```
|
|
214
|
+
|
|
215
|
+
## Error Handling
|
|
216
|
+
|
|
217
|
+
The SDK handles errors returned by the Firecrawl API and raises appropriate exceptions. If an error occurs during a request, an exception will be raised with a descriptive error message.
|
|
218
|
+
|
|
219
|
+
## Running the Tests with Pytest
|
|
220
|
+
|
|
221
|
+
To ensure the functionality of the Firecrawl Python SDK, we have included end-to-end tests using `pytest`. These tests cover various aspects of the SDK, including URL scraping, web searching, and website crawling.
|
|
222
|
+
|
|
223
|
+
### Running the Tests
|
|
224
|
+
|
|
225
|
+
To run the tests, execute the following commands:
|
|
226
|
+
|
|
227
|
+
Install pytest:
|
|
228
|
+
|
|
229
|
+
```bash
|
|
230
|
+
pip install pytest
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
Run:
|
|
234
|
+
|
|
235
|
+
```bash
|
|
236
|
+
pytest firecrawl/__tests__/e2e_withAuth/test.py
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
## Contributing
|
|
240
|
+
|
|
241
|
+
Contributions to the Firecrawl Python SDK are welcome! If you find any issues or have suggestions for improvements, please open an issue or submit a pull request on the GitHub repository.
|
|
242
|
+
|
|
243
|
+
## License
|
|
244
|
+
|
|
245
|
+
The Firecrawl Python SDK is licensed under the MIT License. This means you are free to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the SDK, subject to the following conditions:
|
|
246
|
+
|
|
247
|
+
- The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
248
|
+
|
|
249
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
250
|
+
|
|
251
|
+
Please note that while this SDK is MIT licensed, it is part of a larger project which may be under different licensing terms. Always refer to the license information in the root directory of the main project for overall licensing details.
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This is the Firecrawl package.
|
|
3
|
+
|
|
4
|
+
This package provides a Python SDK for interacting with the Firecrawl API.
|
|
5
|
+
It includes methods to scrape URLs, perform searches, initiate and monitor crawl jobs,
|
|
6
|
+
and check the status of these jobs.
|
|
7
|
+
|
|
8
|
+
For more information visit https://github.com/firecrawl/
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import logging
|
|
12
|
+
import os
|
|
13
|
+
|
|
14
|
+
from .firecrawl import FirecrawlApp # noqa
|
|
15
|
+
|
|
16
|
+
__version__ = "1.6.0"
|
|
17
|
+
|
|
18
|
+
# Define the logger for the Firecrawl project
|
|
19
|
+
logger: logging.Logger = logging.getLogger("firecrawl")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _configure_logger() -> None:
|
|
23
|
+
"""
|
|
24
|
+
Configure the firecrawl logger for console output.
|
|
25
|
+
|
|
26
|
+
The function attaches a handler for console output with a specific format and date
|
|
27
|
+
format to the firecrawl logger.
|
|
28
|
+
"""
|
|
29
|
+
try:
|
|
30
|
+
# Create the formatter
|
|
31
|
+
formatter = logging.Formatter(
|
|
32
|
+
"[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
|
|
33
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Create the console handler and set the formatter
|
|
37
|
+
console_handler = logging.StreamHandler()
|
|
38
|
+
console_handler.setFormatter(formatter)
|
|
39
|
+
|
|
40
|
+
# Add the console handler to the firecrawl logger
|
|
41
|
+
logger.addHandler(console_handler)
|
|
42
|
+
except Exception as e:
|
|
43
|
+
logger.error("Failed to configure logging: %s", e)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def setup_logging() -> None:
|
|
47
|
+
"""Set up logging based on the FIRECRAWL_LOGGING_LEVEL environment variable."""
|
|
48
|
+
# Check if the firecrawl logger already has a handler
|
|
49
|
+
if logger.hasHandlers():
|
|
50
|
+
return # To prevent duplicate logging
|
|
51
|
+
|
|
52
|
+
# Check if the FIRECRAWL_LOGGING_LEVEL environment variable is set
|
|
53
|
+
if not (env := os.getenv("FIRECRAWL_LOGGING_LEVEL", "").upper()):
|
|
54
|
+
# Attach a no-op handler to prevent warnings about no handlers
|
|
55
|
+
logger.addHandler(logging.NullHandler())
|
|
56
|
+
return
|
|
57
|
+
|
|
58
|
+
# Attach the console handler to the firecrawl logger
|
|
59
|
+
_configure_logger()
|
|
60
|
+
|
|
61
|
+
# Set the logging level based on the FIRECRAWL_LOGGING_LEVEL environment variable
|
|
62
|
+
if env == "DEBUG":
|
|
63
|
+
logger.setLevel(logging.DEBUG)
|
|
64
|
+
elif env == "INFO":
|
|
65
|
+
logger.setLevel(logging.INFO)
|
|
66
|
+
elif env == "WARNING":
|
|
67
|
+
logger.setLevel(logging.WARNING)
|
|
68
|
+
elif env == "ERROR":
|
|
69
|
+
logger.setLevel(logging.ERROR)
|
|
70
|
+
elif env == "CRITICAL":
|
|
71
|
+
logger.setLevel(logging.CRITICAL)
|
|
72
|
+
else:
|
|
73
|
+
logger.setLevel(logging.INFO)
|
|
74
|
+
logger.warning("Unknown logging level: %s, defaulting to INFO", env)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# Initialize logging configuration when the module is imported
|
|
78
|
+
setup_logging()
|
|
79
|
+
logger.debug("Debugging logger setup")
|
|
File without changes
|