apify 1.7.0b1__tar.gz → 2.2.0b14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of apify might be problematic. Click here for more details.
- apify-2.2.0b14/PKG-INFO +211 -0
- apify-2.2.0b14/README.md +171 -0
- apify-2.2.0b14/pyproject.toml +199 -0
- apify-2.2.0b14/src/apify/__init__.py +24 -0
- apify-2.2.0b14/src/apify/_actor.py +1030 -0
- apify-2.2.0b14/src/apify/_configuration.py +370 -0
- apify-2.2.0b14/src/apify/_consts.py +10 -0
- {apify-1.7.0b1 → apify-2.2.0b14}/src/apify/_crypto.py +31 -27
- apify-2.2.0b14/src/apify/_models.py +117 -0
- apify-2.2.0b14/src/apify/_platform_event_manager.py +231 -0
- apify-2.2.0b14/src/apify/_proxy_configuration.py +320 -0
- apify-2.2.0b14/src/apify/_utils.py +58 -0
- apify-2.2.0b14/src/apify/apify_storage_client/__init__.py +3 -0
- apify-2.2.0b14/src/apify/apify_storage_client/_apify_storage_client.py +68 -0
- apify-2.2.0b14/src/apify/apify_storage_client/_dataset_client.py +190 -0
- apify-2.2.0b14/src/apify/apify_storage_client/_dataset_collection_client.py +51 -0
- apify-2.2.0b14/src/apify/apify_storage_client/_key_value_store_client.py +94 -0
- apify-2.2.0b14/src/apify/apify_storage_client/_key_value_store_collection_client.py +51 -0
- apify-2.2.0b14/src/apify/apify_storage_client/_request_queue_client.py +176 -0
- apify-2.2.0b14/src/apify/apify_storage_client/_request_queue_collection_client.py +51 -0
- apify-2.2.0b14/src/apify/log.py +41 -0
- apify-2.2.0b14/src/apify/py.typed +0 -0
- apify-2.2.0b14/src/apify/scrapy/__init__.py +11 -0
- apify-2.2.0b14/src/apify/scrapy/middlewares/__init__.py +3 -0
- {apify-1.7.0b1 → apify-2.2.0b14}/src/apify/scrapy/middlewares/apify_proxy.py +29 -27
- apify-2.2.0b14/src/apify/scrapy/middlewares/py.typed +0 -0
- apify-2.2.0b14/src/apify/scrapy/pipelines/__init__.py +3 -0
- {apify-1.7.0b1 → apify-2.2.0b14}/src/apify/scrapy/pipelines/actor_dataset_push.py +6 -3
- apify-2.2.0b14/src/apify/scrapy/pipelines/py.typed +0 -0
- apify-2.2.0b14/src/apify/scrapy/py.typed +0 -0
- {apify-1.7.0b1 → apify-2.2.0b14}/src/apify/scrapy/requests.py +60 -58
- {apify-1.7.0b1 → apify-2.2.0b14}/src/apify/scrapy/scheduler.py +28 -19
- {apify-1.7.0b1 → apify-2.2.0b14}/src/apify/scrapy/utils.py +10 -32
- apify-2.2.0b14/src/apify/storages/__init__.py +5 -0
- apify-2.2.0b14/src/apify/storages/_request_list.py +150 -0
- apify-2.2.0b14/src/apify/storages/py.typed +0 -0
- apify-1.7.0b1/PKG-INFO +0 -149
- apify-1.7.0b1/README.md +0 -90
- apify-1.7.0b1/pyproject.toml +0 -154
- apify-1.7.0b1/setup.cfg +0 -4
- apify-1.7.0b1/src/apify/__init__.py +0 -9
- apify-1.7.0b1/src/apify/_memory_storage/__init__.py +0 -3
- apify-1.7.0b1/src/apify/_memory_storage/file_storage_utils.py +0 -71
- apify-1.7.0b1/src/apify/_memory_storage/memory_storage_client.py +0 -219
- apify-1.7.0b1/src/apify/_memory_storage/resource_clients/__init__.py +0 -19
- apify-1.7.0b1/src/apify/_memory_storage/resource_clients/base_resource_client.py +0 -141
- apify-1.7.0b1/src/apify/_memory_storage/resource_clients/base_resource_collection_client.py +0 -114
- apify-1.7.0b1/src/apify/_memory_storage/resource_clients/dataset.py +0 -452
- apify-1.7.0b1/src/apify/_memory_storage/resource_clients/dataset_collection.py +0 -48
- apify-1.7.0b1/src/apify/_memory_storage/resource_clients/key_value_store.py +0 -533
- apify-1.7.0b1/src/apify/_memory_storage/resource_clients/key_value_store_collection.py +0 -48
- apify-1.7.0b1/src/apify/_memory_storage/resource_clients/request_queue.py +0 -466
- apify-1.7.0b1/src/apify/_memory_storage/resource_clients/request_queue_collection.py +0 -48
- apify-1.7.0b1/src/apify/_utils.py +0 -524
- apify-1.7.0b1/src/apify/actor.py +0 -1351
- apify-1.7.0b1/src/apify/config.py +0 -127
- apify-1.7.0b1/src/apify/consts.py +0 -67
- apify-1.7.0b1/src/apify/event_manager.py +0 -236
- apify-1.7.0b1/src/apify/log.py +0 -124
- apify-1.7.0b1/src/apify/proxy_configuration.py +0 -365
- apify-1.7.0b1/src/apify/scrapy/__init__.py +0 -3
- apify-1.7.0b1/src/apify/scrapy/middlewares/__init__.py +0 -1
- apify-1.7.0b1/src/apify/scrapy/pipelines/__init__.py +0 -1
- apify-1.7.0b1/src/apify/storages/__init__.py +0 -11
- apify-1.7.0b1/src/apify/storages/base_storage.py +0 -181
- apify-1.7.0b1/src/apify/storages/dataset.py +0 -494
- apify-1.7.0b1/src/apify/storages/key_value_store.py +0 -257
- apify-1.7.0b1/src/apify/storages/request_queue.py +0 -602
- apify-1.7.0b1/src/apify/storages/storage_client_manager.py +0 -72
- apify-1.7.0b1/src/apify.egg-info/PKG-INFO +0 -149
- apify-1.7.0b1/src/apify.egg-info/SOURCES.txt +0 -44
- apify-1.7.0b1/src/apify.egg-info/dependency_links.txt +0 -1
- apify-1.7.0b1/src/apify.egg-info/requires.txt +0 -34
- apify-1.7.0b1/src/apify.egg-info/top_level.txt +0 -1
- {apify-1.7.0b1 → apify-2.2.0b14}/LICENSE +0 -0
- {apify-1.7.0b1/src/apify → apify-2.2.0b14/src/apify/apify_storage_client}/py.typed +0 -0
apify-2.2.0b14/PKG-INFO
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: apify
|
|
3
|
+
Version: 2.2.0b14
|
|
4
|
+
Summary: Apify SDK for Python
|
|
5
|
+
License: Apache-2.0
|
|
6
|
+
Keywords: apify,sdk,automation,chrome,crawlee,crawler,headless,scraper,scraping
|
|
7
|
+
Author: Apify Technologies s.r.o.
|
|
8
|
+
Author-email: support@apify.com
|
|
9
|
+
Requires-Python: >=3.9,<4.0
|
|
10
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
13
|
+
Classifier: Operating System :: OS Independent
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
21
|
+
Provides-Extra: scrapy
|
|
22
|
+
Requires-Dist: apify-client (>=1.8.1)
|
|
23
|
+
Requires-Dist: apify-shared (>=1.2.1)
|
|
24
|
+
Requires-Dist: crawlee (>=0.5.1,<0.6.0)
|
|
25
|
+
Requires-Dist: cryptography (>=42.0.0)
|
|
26
|
+
Requires-Dist: httpx (>=0.27.0)
|
|
27
|
+
Requires-Dist: lazy-object-proxy (>=1.10.0)
|
|
28
|
+
Requires-Dist: more_itertools (>=10.2.0)
|
|
29
|
+
Requires-Dist: scrapy (>=2.11.0) ; extra == "scrapy"
|
|
30
|
+
Requires-Dist: typing-extensions (>=4.1.0)
|
|
31
|
+
Requires-Dist: websockets (>=10.0,<14.0.0)
|
|
32
|
+
Project-URL: Apify Homepage, https://apify.com
|
|
33
|
+
Project-URL: Changelog, https://docs.apify.com/sdk/python/docs/changelog
|
|
34
|
+
Project-URL: Documentation, https://docs.apify.com/sdk/python/
|
|
35
|
+
Project-URL: Homepage, https://docs.apify.com/sdk/python/
|
|
36
|
+
Project-URL: Issue Tracker, https://github.com/apify/apify-sdk-python/issues
|
|
37
|
+
Project-URL: Repository, https://github.com/apify/apify-sdk-python
|
|
38
|
+
Description-Content-Type: text/markdown
|
|
39
|
+
|
|
40
|
+
# Apify SDK for Python
|
|
41
|
+
|
|
42
|
+
The Apify SDK for Python is the official library to create [Apify Actors](https://docs.apify.com/platform/actors)
|
|
43
|
+
in Python. It provides useful features like Actor lifecycle management, local storage emulation, and Actor
|
|
44
|
+
event handling.
|
|
45
|
+
|
|
46
|
+
If you just need to access the [Apify API](https://docs.apify.com/api/v2) from your Python applications,
|
|
47
|
+
check out the [Apify Client for Python](https://docs.apify.com/api/client/python) instead.
|
|
48
|
+
|
|
49
|
+
## Installation
|
|
50
|
+
|
|
51
|
+
The Apify SDK for Python is available on PyPI as the `apify` package.
|
|
52
|
+
For default installation, using Pip, run the following:
|
|
53
|
+
|
|
54
|
+
```bash
|
|
55
|
+
pip install apify
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
For users interested in integrating Apify with Scrapy, we provide a package extra called `scrapy`.
|
|
59
|
+
To install Apify with the `scrapy` extra, use the following command:
|
|
60
|
+
|
|
61
|
+
```bash
|
|
62
|
+
pip install apify[scrapy]
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
## Documentation
|
|
66
|
+
|
|
67
|
+
For usage instructions, check the documentation on [Apify Docs](https://docs.apify.com/sdk/python/).
|
|
68
|
+
|
|
69
|
+
## Examples
|
|
70
|
+
|
|
71
|
+
Below are few examples demonstrating how to use the Apify SDK with some web scraping-related libraries.
|
|
72
|
+
|
|
73
|
+
### Apify SDK with HTTPX and BeautifulSoup
|
|
74
|
+
|
|
75
|
+
This example illustrates how to integrate the Apify SDK with [HTTPX](https://www.python-httpx.org/) and [BeautifulSoup](https://pypi.org/project/beautifulsoup4/) to scrape data from web pages.
|
|
76
|
+
|
|
77
|
+
```python
|
|
78
|
+
from apify import Actor
|
|
79
|
+
from bs4 import BeautifulSoup
|
|
80
|
+
from httpx import AsyncClient
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
async def main() -> None:
|
|
84
|
+
async with Actor:
|
|
85
|
+
# Retrieve the Actor input, and use default values if not provided.
|
|
86
|
+
actor_input = await Actor.get_input() or {}
|
|
87
|
+
start_urls = actor_input.get('start_urls', [{'url': 'https://apify.com'}])
|
|
88
|
+
|
|
89
|
+
# Open the default request queue for handling URLs to be processed.
|
|
90
|
+
request_queue = await Actor.open_request_queue()
|
|
91
|
+
|
|
92
|
+
# Enqueue the start URLs.
|
|
93
|
+
for start_url in start_urls:
|
|
94
|
+
url = start_url.get('url')
|
|
95
|
+
await request_queue.add_request(url)
|
|
96
|
+
|
|
97
|
+
# Process the URLs from the request queue.
|
|
98
|
+
while request := await request_queue.fetch_next_request():
|
|
99
|
+
Actor.log.info(f'Scraping {request.url} ...')
|
|
100
|
+
|
|
101
|
+
# Fetch the HTTP response from the specified URL using HTTPX.
|
|
102
|
+
async with AsyncClient() as client:
|
|
103
|
+
response = await client.get(request.url)
|
|
104
|
+
|
|
105
|
+
# Parse the HTML content using Beautiful Soup.
|
|
106
|
+
soup = BeautifulSoup(response.content, 'html.parser')
|
|
107
|
+
|
|
108
|
+
# Extract the desired data.
|
|
109
|
+
data = {
|
|
110
|
+
'url': actor_input['url'],
|
|
111
|
+
'title': soup.title.string,
|
|
112
|
+
'h1s': [h1.text for h1 in soup.find_all('h1')],
|
|
113
|
+
'h2s': [h2.text for h2 in soup.find_all('h2')],
|
|
114
|
+
'h3s': [h3.text for h3 in soup.find_all('h3')],
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
# Store the extracted data to the default dataset.
|
|
118
|
+
await Actor.push_data(data)
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
### Apify SDK with PlaywrightCrawler from Crawlee
|
|
122
|
+
|
|
123
|
+
This example demonstrates how to use the Apify SDK alongside `PlaywrightCrawler` from [Crawlee](https://crawlee.dev/python) to perform web scraping.
|
|
124
|
+
|
|
125
|
+
```python
|
|
126
|
+
from apify import Actor, Request
|
|
127
|
+
from crawlee.playwright_crawler import PlaywrightCrawler, PlaywrightCrawlingContext
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
async def main() -> None:
|
|
131
|
+
async with Actor:
|
|
132
|
+
# Retrieve the Actor input, and use default values if not provided.
|
|
133
|
+
actor_input = await Actor.get_input() or {}
|
|
134
|
+
start_urls = [url.get('url') for url in actor_input.get('start_urls', [{'url': 'https://apify.com'}])]
|
|
135
|
+
|
|
136
|
+
# Exit if no start URLs are provided.
|
|
137
|
+
if not start_urls:
|
|
138
|
+
Actor.log.info('No start URLs specified in Actor input, exiting...')
|
|
139
|
+
await Actor.exit()
|
|
140
|
+
|
|
141
|
+
# Create a crawler.
|
|
142
|
+
crawler = PlaywrightCrawler(
|
|
143
|
+
# Limit the crawl to max requests. Remove or increase it for crawling all links.
|
|
144
|
+
max_requests_per_crawl=50,
|
|
145
|
+
headless=True,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# Define a request handler, which will be called for every request.
|
|
149
|
+
@crawler.router.default_handler
|
|
150
|
+
async def request_handler(context: PlaywrightCrawlingContext) -> None:
|
|
151
|
+
url = context.request.url
|
|
152
|
+
Actor.log.info(f'Scraping {url}...')
|
|
153
|
+
|
|
154
|
+
# Extract the desired data.
|
|
155
|
+
data = {
|
|
156
|
+
'url': context.request.url,
|
|
157
|
+
'title': await context.page.title(),
|
|
158
|
+
'h1s': [await h1.text_content() for h1 in await context.page.locator('h1').all()],
|
|
159
|
+
'h2s': [await h2.text_content() for h2 in await context.page.locator('h2').all()],
|
|
160
|
+
'h3s': [await h3.text_content() for h3 in await context.page.locator('h3').all()],
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
# Store the extracted data to the default dataset.
|
|
164
|
+
await context.push_data(data)
|
|
165
|
+
|
|
166
|
+
# Enqueue additional links found on the current page.
|
|
167
|
+
await context.enqueue_links()
|
|
168
|
+
|
|
169
|
+
# Run the crawler with the starting URLs.
|
|
170
|
+
await crawler.run(start_urls)
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
## What are Actors?
|
|
174
|
+
|
|
175
|
+
Actors are serverless cloud programs that can do almost anything a human can do in a web browser.
|
|
176
|
+
They can do anything from small tasks such as filling in forms or unsubscribing from online services,
|
|
177
|
+
all the way up to scraping and processing vast numbers of web pages.
|
|
178
|
+
|
|
179
|
+
They can be run either locally, or on the [Apify platform](https://docs.apify.com/platform/),
|
|
180
|
+
where you can run them at scale, monitor them, schedule them, or publish and monetize them.
|
|
181
|
+
|
|
182
|
+
If you're new to Apify, learn [what is Apify](https://docs.apify.com/platform/about)
|
|
183
|
+
in the Apify platform documentation.
|
|
184
|
+
|
|
185
|
+
## Creating Actors
|
|
186
|
+
|
|
187
|
+
To create and run Actors through Apify Console,
|
|
188
|
+
see the [Console documentation](https://docs.apify.com/academy/getting-started/creating-actors#choose-your-template).
|
|
189
|
+
|
|
190
|
+
To create and run Python Actors locally, check the documentation for
|
|
191
|
+
[how to create and run Python Actors locally](https://docs.apify.com/sdk/python/docs/overview/running-locally).
|
|
192
|
+
|
|
193
|
+
## Guides
|
|
194
|
+
|
|
195
|
+
To see how you can use the Apify SDK with other popular libraries used for web scraping,
|
|
196
|
+
check out our guides for using
|
|
197
|
+
[Requests and HTTPX](https://docs.apify.com/sdk/python/docs/guides/requests-and-httpx),
|
|
198
|
+
[Beautiful Soup](https://docs.apify.com/sdk/python/docs/guides/beautiful-soup),
|
|
199
|
+
[Playwright](https://docs.apify.com/sdk/python/docs/guides/playwright),
|
|
200
|
+
[Selenium](https://docs.apify.com/sdk/python/docs/guides/selenium),
|
|
201
|
+
or [Scrapy](https://docs.apify.com/sdk/python/docs/guides/scrapy).
|
|
202
|
+
|
|
203
|
+
## Usage concepts
|
|
204
|
+
|
|
205
|
+
To learn more about the features of the Apify SDK and how to use them,
|
|
206
|
+
check out the Usage Concepts section in the sidebar,
|
|
207
|
+
particularly the guides for the [Actor lifecycle](https://docs.apify.com/sdk/python/docs/concepts/actor-lifecycle),
|
|
208
|
+
[working with storages](https://docs.apify.com/sdk/python/docs/concepts/storages),
|
|
209
|
+
[handling Actor events](https://docs.apify.com/sdk/python/docs/concepts/actor-events)
|
|
210
|
+
or [how to use proxies](https://docs.apify.com/sdk/python/docs/concepts/proxy-management).
|
|
211
|
+
|
apify-2.2.0b14/README.md
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
# Apify SDK for Python
|
|
2
|
+
|
|
3
|
+
The Apify SDK for Python is the official library to create [Apify Actors](https://docs.apify.com/platform/actors)
|
|
4
|
+
in Python. It provides useful features like Actor lifecycle management, local storage emulation, and Actor
|
|
5
|
+
event handling.
|
|
6
|
+
|
|
7
|
+
If you just need to access the [Apify API](https://docs.apify.com/api/v2) from your Python applications,
|
|
8
|
+
check out the [Apify Client for Python](https://docs.apify.com/api/client/python) instead.
|
|
9
|
+
|
|
10
|
+
## Installation
|
|
11
|
+
|
|
12
|
+
The Apify SDK for Python is available on PyPI as the `apify` package.
|
|
13
|
+
For default installation, using Pip, run the following:
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
pip install apify
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
For users interested in integrating Apify with Scrapy, we provide a package extra called `scrapy`.
|
|
20
|
+
To install Apify with the `scrapy` extra, use the following command:
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
pip install apify[scrapy]
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Documentation
|
|
27
|
+
|
|
28
|
+
For usage instructions, check the documentation on [Apify Docs](https://docs.apify.com/sdk/python/).
|
|
29
|
+
|
|
30
|
+
## Examples
|
|
31
|
+
|
|
32
|
+
Below are few examples demonstrating how to use the Apify SDK with some web scraping-related libraries.
|
|
33
|
+
|
|
34
|
+
### Apify SDK with HTTPX and BeautifulSoup
|
|
35
|
+
|
|
36
|
+
This example illustrates how to integrate the Apify SDK with [HTTPX](https://www.python-httpx.org/) and [BeautifulSoup](https://pypi.org/project/beautifulsoup4/) to scrape data from web pages.
|
|
37
|
+
|
|
38
|
+
```python
|
|
39
|
+
from apify import Actor
|
|
40
|
+
from bs4 import BeautifulSoup
|
|
41
|
+
from httpx import AsyncClient
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
async def main() -> None:
|
|
45
|
+
async with Actor:
|
|
46
|
+
# Retrieve the Actor input, and use default values if not provided.
|
|
47
|
+
actor_input = await Actor.get_input() or {}
|
|
48
|
+
start_urls = actor_input.get('start_urls', [{'url': 'https://apify.com'}])
|
|
49
|
+
|
|
50
|
+
# Open the default request queue for handling URLs to be processed.
|
|
51
|
+
request_queue = await Actor.open_request_queue()
|
|
52
|
+
|
|
53
|
+
# Enqueue the start URLs.
|
|
54
|
+
for start_url in start_urls:
|
|
55
|
+
url = start_url.get('url')
|
|
56
|
+
await request_queue.add_request(url)
|
|
57
|
+
|
|
58
|
+
# Process the URLs from the request queue.
|
|
59
|
+
while request := await request_queue.fetch_next_request():
|
|
60
|
+
Actor.log.info(f'Scraping {request.url} ...')
|
|
61
|
+
|
|
62
|
+
# Fetch the HTTP response from the specified URL using HTTPX.
|
|
63
|
+
async with AsyncClient() as client:
|
|
64
|
+
response = await client.get(request.url)
|
|
65
|
+
|
|
66
|
+
# Parse the HTML content using Beautiful Soup.
|
|
67
|
+
soup = BeautifulSoup(response.content, 'html.parser')
|
|
68
|
+
|
|
69
|
+
# Extract the desired data.
|
|
70
|
+
data = {
|
|
71
|
+
'url': actor_input['url'],
|
|
72
|
+
'title': soup.title.string,
|
|
73
|
+
'h1s': [h1.text for h1 in soup.find_all('h1')],
|
|
74
|
+
'h2s': [h2.text for h2 in soup.find_all('h2')],
|
|
75
|
+
'h3s': [h3.text for h3 in soup.find_all('h3')],
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
# Store the extracted data to the default dataset.
|
|
79
|
+
await Actor.push_data(data)
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### Apify SDK with PlaywrightCrawler from Crawlee
|
|
83
|
+
|
|
84
|
+
This example demonstrates how to use the Apify SDK alongside `PlaywrightCrawler` from [Crawlee](https://crawlee.dev/python) to perform web scraping.
|
|
85
|
+
|
|
86
|
+
```python
|
|
87
|
+
from apify import Actor, Request
|
|
88
|
+
from crawlee.playwright_crawler import PlaywrightCrawler, PlaywrightCrawlingContext
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
async def main() -> None:
|
|
92
|
+
async with Actor:
|
|
93
|
+
# Retrieve the Actor input, and use default values if not provided.
|
|
94
|
+
actor_input = await Actor.get_input() or {}
|
|
95
|
+
start_urls = [url.get('url') for url in actor_input.get('start_urls', [{'url': 'https://apify.com'}])]
|
|
96
|
+
|
|
97
|
+
# Exit if no start URLs are provided.
|
|
98
|
+
if not start_urls:
|
|
99
|
+
Actor.log.info('No start URLs specified in Actor input, exiting...')
|
|
100
|
+
await Actor.exit()
|
|
101
|
+
|
|
102
|
+
# Create a crawler.
|
|
103
|
+
crawler = PlaywrightCrawler(
|
|
104
|
+
# Limit the crawl to max requests. Remove or increase it for crawling all links.
|
|
105
|
+
max_requests_per_crawl=50,
|
|
106
|
+
headless=True,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
# Define a request handler, which will be called for every request.
|
|
110
|
+
@crawler.router.default_handler
|
|
111
|
+
async def request_handler(context: PlaywrightCrawlingContext) -> None:
|
|
112
|
+
url = context.request.url
|
|
113
|
+
Actor.log.info(f'Scraping {url}...')
|
|
114
|
+
|
|
115
|
+
# Extract the desired data.
|
|
116
|
+
data = {
|
|
117
|
+
'url': context.request.url,
|
|
118
|
+
'title': await context.page.title(),
|
|
119
|
+
'h1s': [await h1.text_content() for h1 in await context.page.locator('h1').all()],
|
|
120
|
+
'h2s': [await h2.text_content() for h2 in await context.page.locator('h2').all()],
|
|
121
|
+
'h3s': [await h3.text_content() for h3 in await context.page.locator('h3').all()],
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
# Store the extracted data to the default dataset.
|
|
125
|
+
await context.push_data(data)
|
|
126
|
+
|
|
127
|
+
# Enqueue additional links found on the current page.
|
|
128
|
+
await context.enqueue_links()
|
|
129
|
+
|
|
130
|
+
# Run the crawler with the starting URLs.
|
|
131
|
+
await crawler.run(start_urls)
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
## What are Actors?
|
|
135
|
+
|
|
136
|
+
Actors are serverless cloud programs that can do almost anything a human can do in a web browser.
|
|
137
|
+
They can do anything from small tasks such as filling in forms or unsubscribing from online services,
|
|
138
|
+
all the way up to scraping and processing vast numbers of web pages.
|
|
139
|
+
|
|
140
|
+
They can be run either locally, or on the [Apify platform](https://docs.apify.com/platform/),
|
|
141
|
+
where you can run them at scale, monitor them, schedule them, or publish and monetize them.
|
|
142
|
+
|
|
143
|
+
If you're new to Apify, learn [what is Apify](https://docs.apify.com/platform/about)
|
|
144
|
+
in the Apify platform documentation.
|
|
145
|
+
|
|
146
|
+
## Creating Actors
|
|
147
|
+
|
|
148
|
+
To create and run Actors through Apify Console,
|
|
149
|
+
see the [Console documentation](https://docs.apify.com/academy/getting-started/creating-actors#choose-your-template).
|
|
150
|
+
|
|
151
|
+
To create and run Python Actors locally, check the documentation for
|
|
152
|
+
[how to create and run Python Actors locally](https://docs.apify.com/sdk/python/docs/overview/running-locally).
|
|
153
|
+
|
|
154
|
+
## Guides
|
|
155
|
+
|
|
156
|
+
To see how you can use the Apify SDK with other popular libraries used for web scraping,
|
|
157
|
+
check out our guides for using
|
|
158
|
+
[Requests and HTTPX](https://docs.apify.com/sdk/python/docs/guides/requests-and-httpx),
|
|
159
|
+
[Beautiful Soup](https://docs.apify.com/sdk/python/docs/guides/beautiful-soup),
|
|
160
|
+
[Playwright](https://docs.apify.com/sdk/python/docs/guides/playwright),
|
|
161
|
+
[Selenium](https://docs.apify.com/sdk/python/docs/guides/selenium),
|
|
162
|
+
or [Scrapy](https://docs.apify.com/sdk/python/docs/guides/scrapy).
|
|
163
|
+
|
|
164
|
+
## Usage concepts
|
|
165
|
+
|
|
166
|
+
To learn more about the features of the Apify SDK and how to use them,
|
|
167
|
+
check out the Usage Concepts section in the sidebar,
|
|
168
|
+
particularly the guides for the [Actor lifecycle](https://docs.apify.com/sdk/python/docs/concepts/actor-lifecycle),
|
|
169
|
+
[working with storages](https://docs.apify.com/sdk/python/docs/concepts/storages),
|
|
170
|
+
[handling Actor events](https://docs.apify.com/sdk/python/docs/concepts/actor-events)
|
|
171
|
+
or [how to use proxies](https://docs.apify.com/sdk/python/docs/concepts/proxy-management).
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["poetry-core"]
|
|
3
|
+
build-backend = "poetry.core.masonry.api"
|
|
4
|
+
|
|
5
|
+
[tool.poetry]
|
|
6
|
+
name = "apify"
|
|
7
|
+
version = "2.2.0b14"
|
|
8
|
+
description = "Apify SDK for Python"
|
|
9
|
+
authors = ["Apify Technologies s.r.o. <support@apify.com>"]
|
|
10
|
+
license = "Apache-2.0"
|
|
11
|
+
readme = "README.md"
|
|
12
|
+
packages = [{ include = "apify", from = "src" }]
|
|
13
|
+
classifiers = [
|
|
14
|
+
"Development Status :: 5 - Production/Stable",
|
|
15
|
+
"Intended Audience :: Developers",
|
|
16
|
+
"License :: OSI Approved :: Apache Software License",
|
|
17
|
+
"Operating System :: OS Independent",
|
|
18
|
+
"Programming Language :: Python :: 3.9",
|
|
19
|
+
"Programming Language :: Python :: 3.10",
|
|
20
|
+
"Programming Language :: Python :: 3.11",
|
|
21
|
+
"Programming Language :: Python :: 3.12",
|
|
22
|
+
"Topic :: Software Development :: Libraries",
|
|
23
|
+
]
|
|
24
|
+
keywords = [
|
|
25
|
+
"apify",
|
|
26
|
+
"sdk",
|
|
27
|
+
"automation",
|
|
28
|
+
"chrome",
|
|
29
|
+
"crawlee",
|
|
30
|
+
"crawler",
|
|
31
|
+
"headless",
|
|
32
|
+
"scraper",
|
|
33
|
+
"scraping",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
[tool.poetry.urls]
|
|
37
|
+
"Homepage" = "https://docs.apify.com/sdk/python/"
|
|
38
|
+
"Apify Homepage" = "https://apify.com"
|
|
39
|
+
"Changelog" = "https://docs.apify.com/sdk/python/docs/changelog"
|
|
40
|
+
"Documentation" = "https://docs.apify.com/sdk/python/"
|
|
41
|
+
"Issue Tracker" = "https://github.com/apify/apify-sdk-python/issues"
|
|
42
|
+
"Repository" = "https://github.com/apify/apify-sdk-python"
|
|
43
|
+
|
|
44
|
+
[tool.poetry.dependencies]
|
|
45
|
+
python = "^3.9"
|
|
46
|
+
apify-client = ">=1.8.1"
|
|
47
|
+
apify-shared = ">=1.2.1"
|
|
48
|
+
crawlee = "~0.5.1"
|
|
49
|
+
cryptography = ">=42.0.0"
|
|
50
|
+
httpx = ">=0.27.0"
|
|
51
|
+
lazy-object-proxy = ">=1.10.0"
|
|
52
|
+
more_itertools = ">=10.2.0"
|
|
53
|
+
scrapy = { version = ">=2.11.0", optional = true }
|
|
54
|
+
typing-extensions = ">=4.1.0"
|
|
55
|
+
# TODO: Relax the upper bound once the issue is resolved:
|
|
56
|
+
# https://github.com/apify/apify-sdk-python/issues/325
|
|
57
|
+
websockets = ">=10.0 <14.0.0"
|
|
58
|
+
|
|
59
|
+
[tool.poetry.group.dev.dependencies]
|
|
60
|
+
build = "~1.2.0"
|
|
61
|
+
filelock = "~3.16.0"
|
|
62
|
+
griffe = "~1.5.0"
|
|
63
|
+
mypy = "~1.14.0"
|
|
64
|
+
pre-commit = "~4.0.0"
|
|
65
|
+
pydoc-markdown = "~4.8.0"
|
|
66
|
+
pytest = "~8.3.0"
|
|
67
|
+
pytest-asyncio = "~0.25.0"
|
|
68
|
+
pytest-cov = "~6.0.0"
|
|
69
|
+
pytest-only = "~2.1.0"
|
|
70
|
+
pytest-timeout = "~2.3.0"
|
|
71
|
+
pytest-xdist = "~3.6.0"
|
|
72
|
+
respx = "~0.22.0"
|
|
73
|
+
ruff = "~0.9.0"
|
|
74
|
+
setuptools = "~75.8.0" # setuptools are used by pytest but not explicitly required
|
|
75
|
+
|
|
76
|
+
[tool.poetry.extras]
|
|
77
|
+
scrapy = ["scrapy"]
|
|
78
|
+
|
|
79
|
+
[tool.ruff]
|
|
80
|
+
line-length = 120
|
|
81
|
+
|
|
82
|
+
[tool.ruff.lint]
|
|
83
|
+
select = ["ALL"]
|
|
84
|
+
ignore = [
|
|
85
|
+
"ANN401", # Dynamically typed expressions (typing.Any) are disallowed in {filename}
|
|
86
|
+
"ASYNC109", # Async function definition with a `timeout` parameter
|
|
87
|
+
"BLE001", # Do not catch blind exception
|
|
88
|
+
"C901", # `{name}` is too complex
|
|
89
|
+
"COM812", # This rule may cause conflicts when used with the formatter
|
|
90
|
+
"D100", # Missing docstring in public module
|
|
91
|
+
"D104", # Missing docstring in public package
|
|
92
|
+
"D107", # Missing docstring in `__init__`
|
|
93
|
+
"EM", # flake8-errmsg
|
|
94
|
+
"G004", # Logging statement uses f-string
|
|
95
|
+
"ISC001", # This rule may cause conflicts when used with the formatter
|
|
96
|
+
"FIX", # flake8-fixme
|
|
97
|
+
"PLR0911", # Too many return statements
|
|
98
|
+
"PLR0913", # Too many arguments in function definition
|
|
99
|
+
"PLR0915", # Too many statements
|
|
100
|
+
"PTH", # flake8-use-pathlib
|
|
101
|
+
"PYI034", # `__aenter__` methods in classes like `{name}` usually return `self` at runtime
|
|
102
|
+
"PYI036", # The second argument in `__aexit__` should be annotated with `object` or `BaseException | None`
|
|
103
|
+
"S102", # Use of `exec` detected
|
|
104
|
+
"S105", # Possible hardcoded password assigned to
|
|
105
|
+
"S106", # Possible hardcoded password assigned to argument: "{name}"
|
|
106
|
+
"S301", # `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue
|
|
107
|
+
"S303", # Use of insecure MD2, MD4, MD5, or SHA1 hash function
|
|
108
|
+
"S311", # Standard pseudo-random generators are not suitable for cryptographic purposes
|
|
109
|
+
"TD002", # Missing author in TODO; try: `# TODO(<author_name>): ...` or `# TODO @<author_name>: ...
|
|
110
|
+
"TRY003", # Avoid specifying long messages outside the exception class
|
|
111
|
+
]
|
|
112
|
+
|
|
113
|
+
[tool.ruff.format]
|
|
114
|
+
quote-style = "single"
|
|
115
|
+
indent-style = "space"
|
|
116
|
+
|
|
117
|
+
[tool.ruff.lint.per-file-ignores]
|
|
118
|
+
"**/__init__.py" = [
|
|
119
|
+
"F401", # Unused imports
|
|
120
|
+
]
|
|
121
|
+
"**/{tests}/*" = [
|
|
122
|
+
"D", # Everything from the pydocstyle
|
|
123
|
+
"INP001", # File {filename} is part of an implicit namespace package, add an __init__.py
|
|
124
|
+
"PLR2004", # Magic value used in comparison, consider replacing {value} with a constant variable
|
|
125
|
+
"S101", # Use of assert detected
|
|
126
|
+
"SLF001", # Private member accessed: `{name}`
|
|
127
|
+
"T20", # flake8-print
|
|
128
|
+
"TRY301", # Abstract `raise` to an inner function
|
|
129
|
+
"TID252", # Prefer absolute imports over relative imports from parent modules
|
|
130
|
+
]
|
|
131
|
+
"**/{docs}/**" = [
|
|
132
|
+
"D", # Everything from the pydocstyle
|
|
133
|
+
"INP001", # File {filename} is part of an implicit namespace package, add an __init__.py
|
|
134
|
+
"F841", # Local variable {variable} is assigned to but never used
|
|
135
|
+
]
|
|
136
|
+
|
|
137
|
+
[tool.ruff.lint.flake8-quotes]
|
|
138
|
+
docstring-quotes = "double"
|
|
139
|
+
inline-quotes = "single"
|
|
140
|
+
|
|
141
|
+
[tool.ruff.lint.flake8-type-checking]
|
|
142
|
+
runtime-evaluated-base-classes = [
|
|
143
|
+
"pydantic.BaseModel",
|
|
144
|
+
"crawlee.configuration.Configuration",
|
|
145
|
+
]
|
|
146
|
+
|
|
147
|
+
[tool.ruff.lint.flake8-builtins]
|
|
148
|
+
builtins-ignorelist = ["id"]
|
|
149
|
+
|
|
150
|
+
[tool.ruff.lint.pydocstyle]
|
|
151
|
+
convention = "google"
|
|
152
|
+
|
|
153
|
+
[tool.ruff.lint.isort]
|
|
154
|
+
known-local-folder = ["apify"]
|
|
155
|
+
known-first-party = ["apify_client", "apify_shared", "crawlee"]
|
|
156
|
+
|
|
157
|
+
[tool.ruff.lint.pylint]
|
|
158
|
+
max-branches = 18
|
|
159
|
+
|
|
160
|
+
[tool.pytest.ini_options]
|
|
161
|
+
addopts = "-ra"
|
|
162
|
+
asyncio_default_fixture_loop_scope = "function"
|
|
163
|
+
asyncio_mode = "auto"
|
|
164
|
+
timeout = 1200
|
|
165
|
+
|
|
166
|
+
[tool.mypy]
|
|
167
|
+
python_version = "3.9"
|
|
168
|
+
plugins = ["pydantic.mypy"]
|
|
169
|
+
files = ["src", "tests"]
|
|
170
|
+
check_untyped_defs = true
|
|
171
|
+
disallow_incomplete_defs = true
|
|
172
|
+
disallow_untyped_calls = true
|
|
173
|
+
disallow_untyped_decorators = true
|
|
174
|
+
disallow_untyped_defs = true
|
|
175
|
+
no_implicit_optional = true
|
|
176
|
+
warn_redundant_casts = true
|
|
177
|
+
warn_return_any = true
|
|
178
|
+
warn_unreachable = true
|
|
179
|
+
warn_unused_ignores = true
|
|
180
|
+
exclude = []
|
|
181
|
+
|
|
182
|
+
[[tool.mypy.overrides]]
|
|
183
|
+
module = ['scrapy', 'scrapy.*', 'lazy_object_proxy']
|
|
184
|
+
ignore_missing_imports = true
|
|
185
|
+
|
|
186
|
+
[tool.basedpyright]
|
|
187
|
+
pythonVersion = "3.9"
|
|
188
|
+
typeCheckingMode = "standard"
|
|
189
|
+
include = ["src", "tests"]
|
|
190
|
+
|
|
191
|
+
[tool.coverage.report]
|
|
192
|
+
exclude_lines = [
|
|
193
|
+
"pragma: no cover",
|
|
194
|
+
"if TYPE_CHECKING:",
|
|
195
|
+
"assert_never()",
|
|
196
|
+
]
|
|
197
|
+
|
|
198
|
+
[tool.ipdb]
|
|
199
|
+
context = 7
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from importlib import metadata
|
|
2
|
+
|
|
3
|
+
from apify_shared.consts import WebhookEventType
|
|
4
|
+
from crawlee import Request
|
|
5
|
+
from crawlee.events._types import Event
|
|
6
|
+
|
|
7
|
+
from apify._actor import Actor
|
|
8
|
+
from apify._configuration import Configuration
|
|
9
|
+
from apify._models import Webhook
|
|
10
|
+
from apify._proxy_configuration import ProxyConfiguration, ProxyInfo
|
|
11
|
+
|
|
12
|
+
__version__ = metadata.version('apify')
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
'Actor',
|
|
16
|
+
'Configuration',
|
|
17
|
+
'Event',
|
|
18
|
+
'ProxyConfiguration',
|
|
19
|
+
'ProxyInfo',
|
|
20
|
+
'Request',
|
|
21
|
+
'Webhook',
|
|
22
|
+
'WebhookEventType',
|
|
23
|
+
'__version__',
|
|
24
|
+
]
|