apify 1.6.1b1__tar.gz → 1.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of apify might be problematic. Click here for more details.

Files changed (46) hide show
  1. {apify-1.6.1b1 → apify-1.7.0}/PKG-INFO +1 -1
  2. {apify-1.6.1b1 → apify-1.7.0}/pyproject.toml +1 -1
  3. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_utils.py +117 -5
  4. {apify-1.6.1b1 → apify-1.7.0}/src/apify/scrapy/requests.py +51 -38
  5. {apify-1.6.1b1 → apify-1.7.0}/src/apify/scrapy/scheduler.py +10 -1
  6. {apify-1.6.1b1 → apify-1.7.0}/src/apify/storages/request_queue.py +42 -10
  7. {apify-1.6.1b1 → apify-1.7.0}/src/apify.egg-info/PKG-INFO +1 -1
  8. {apify-1.6.1b1 → apify-1.7.0}/LICENSE +0 -0
  9. {apify-1.6.1b1 → apify-1.7.0}/README.md +0 -0
  10. {apify-1.6.1b1 → apify-1.7.0}/setup.cfg +0 -0
  11. {apify-1.6.1b1 → apify-1.7.0}/src/apify/__init__.py +0 -0
  12. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_crypto.py +0 -0
  13. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/__init__.py +0 -0
  14. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/file_storage_utils.py +0 -0
  15. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/memory_storage_client.py +0 -0
  16. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/resource_clients/__init__.py +0 -0
  17. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/resource_clients/base_resource_client.py +0 -0
  18. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/resource_clients/base_resource_collection_client.py +0 -0
  19. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/resource_clients/dataset.py +0 -0
  20. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/resource_clients/dataset_collection.py +0 -0
  21. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/resource_clients/key_value_store.py +0 -0
  22. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/resource_clients/key_value_store_collection.py +0 -0
  23. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/resource_clients/request_queue.py +0 -0
  24. {apify-1.6.1b1 → apify-1.7.0}/src/apify/_memory_storage/resource_clients/request_queue_collection.py +0 -0
  25. {apify-1.6.1b1 → apify-1.7.0}/src/apify/actor.py +0 -0
  26. {apify-1.6.1b1 → apify-1.7.0}/src/apify/config.py +0 -0
  27. {apify-1.6.1b1 → apify-1.7.0}/src/apify/consts.py +0 -0
  28. {apify-1.6.1b1 → apify-1.7.0}/src/apify/event_manager.py +0 -0
  29. {apify-1.6.1b1 → apify-1.7.0}/src/apify/log.py +0 -0
  30. {apify-1.6.1b1 → apify-1.7.0}/src/apify/proxy_configuration.py +0 -0
  31. {apify-1.6.1b1 → apify-1.7.0}/src/apify/py.typed +0 -0
  32. {apify-1.6.1b1 → apify-1.7.0}/src/apify/scrapy/__init__.py +0 -0
  33. {apify-1.6.1b1 → apify-1.7.0}/src/apify/scrapy/middlewares/__init__.py +0 -0
  34. {apify-1.6.1b1 → apify-1.7.0}/src/apify/scrapy/middlewares/apify_proxy.py +0 -0
  35. {apify-1.6.1b1 → apify-1.7.0}/src/apify/scrapy/pipelines/__init__.py +0 -0
  36. {apify-1.6.1b1 → apify-1.7.0}/src/apify/scrapy/pipelines/actor_dataset_push.py +0 -0
  37. {apify-1.6.1b1 → apify-1.7.0}/src/apify/scrapy/utils.py +0 -0
  38. {apify-1.6.1b1 → apify-1.7.0}/src/apify/storages/__init__.py +0 -0
  39. {apify-1.6.1b1 → apify-1.7.0}/src/apify/storages/base_storage.py +0 -0
  40. {apify-1.6.1b1 → apify-1.7.0}/src/apify/storages/dataset.py +0 -0
  41. {apify-1.6.1b1 → apify-1.7.0}/src/apify/storages/key_value_store.py +0 -0
  42. {apify-1.6.1b1 → apify-1.7.0}/src/apify/storages/storage_client_manager.py +0 -0
  43. {apify-1.6.1b1 → apify-1.7.0}/src/apify.egg-info/SOURCES.txt +0 -0
  44. {apify-1.6.1b1 → apify-1.7.0}/src/apify.egg-info/dependency_links.txt +0 -0
  45. {apify-1.6.1b1 → apify-1.7.0}/src/apify.egg-info/requires.txt +0 -0
  46. {apify-1.6.1b1 → apify-1.7.0}/src/apify.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: apify
3
- Version: 1.6.1b1
3
+ Version: 1.7.0
4
4
  Summary: Apify SDK for Python
5
5
  Author-email: "Apify Technologies s.r.o." <support@apify.com>
6
6
  License: Apache Software License
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "apify"
3
- version = "1.6.1b1"
3
+ version = "1.7.0"
4
4
  description = "Apify SDK for Python"
5
5
  readme = "README.md"
6
6
  license = { text = "Apache Software License" }
@@ -1,11 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
- import base64
5
4
  import builtins
6
5
  import contextlib
7
6
  import functools
8
- import hashlib
9
7
  import inspect
10
8
  import json
11
9
  import mimetypes
@@ -13,10 +11,13 @@ import os
13
11
  import re
14
12
  import sys
15
13
  import time
14
+ from base64 import b64encode
16
15
  from collections import OrderedDict
17
16
  from collections.abc import MutableMapping
18
17
  from datetime import datetime, timezone
18
+ from hashlib import sha256
19
19
  from importlib import metadata
20
+ from logging import getLogger
20
21
  from typing import (
21
22
  Any,
22
23
  Callable,
@@ -30,6 +31,7 @@ from typing import (
30
31
  overload,
31
32
  )
32
33
  from typing import OrderedDict as OrderedDictType
34
+ from urllib.parse import parse_qsl, urlencode, urlparse
33
35
 
34
36
  import aioshutil
35
37
  import psutil
@@ -59,6 +61,7 @@ from apify_shared.utils import (
59
61
  from apify.consts import REQUEST_ID_LENGTH, StorageTypes
60
62
 
61
63
  T = TypeVar('T')
64
+ logger = getLogger(__name__)
62
65
 
63
66
 
64
67
  def get_system_info() -> dict:
@@ -292,9 +295,8 @@ def maybe_parse_body(body: bytes, content_type: str) -> Any:
292
295
 
293
296
  def unique_key_to_request_id(unique_key: str) -> str:
294
297
  """Generate request ID based on unique key in a deterministic way."""
295
- id = re.sub(r'(\+|\/|=)', '', base64.b64encode(hashlib.sha256(unique_key.encode('utf-8')).digest()).decode('utf-8')) # noqa: A001
296
-
297
- return id[:REQUEST_ID_LENGTH] if len(id) > REQUEST_ID_LENGTH else id
298
+ request_id = re.sub(r'(\+|\/|=)', '', b64encode(sha256(unique_key.encode('utf-8')).digest()).decode('utf-8'))
299
+ return request_id[:REQUEST_ID_LENGTH] if len(request_id) > REQUEST_ID_LENGTH else request_id
298
300
 
299
301
 
300
302
  async def force_rename(src_dir: str, dst_dir: str) -> None:
@@ -410,3 +412,113 @@ def budget_ow(
410
412
  PARSE_DATE_FIELDS_MAX_DEPTH = 3
411
413
  PARSE_DATE_FIELDS_KEY_SUFFIX = 'At'
412
414
  ListOrDictOrAny = TypeVar('ListOrDictOrAny', list, dict, Any)
415
+
416
+
417
+ def compute_short_hash(data: bytes, *, length: int = 8) -> str:
418
+ """Computes a hexadecimal SHA-256 hash of the provided data and returns a substring (prefix) of it.
419
+
420
+ Args:
421
+ data: The binary data to be hashed.
422
+ length: The length of the hash to be returned.
423
+
424
+ Returns:
425
+ A substring (prefix) of the hexadecimal hash of the data.
426
+ """
427
+ hash_object = sha256(data)
428
+ return hash_object.hexdigest()[:length]
429
+
430
+
431
+ def normalize_url(url: str, *, keep_url_fragment: bool = False) -> str:
432
+ """Normalizes a URL.
433
+
434
+ This function cleans and standardizes a URL by removing leading and trailing whitespaces,
435
+ converting the scheme and netloc to lower case, stripping unwanted tracking parameters
436
+ (specifically those beginning with 'utm_'), sorting the remaining query parameters alphabetically,
437
+ and optionally retaining the URL fragment. The goal is to ensure that URLs that are functionally
438
+ identical but differ in trivial ways (such as parameter order or casing) are treated as the same.
439
+
440
+ Args:
441
+ url: The URL to be normalized.
442
+ keep_url_fragment: Flag to determine whether the fragment part of the URL should be retained.
443
+
444
+ Returns:
445
+ A string containing the normalized URL.
446
+ """
447
+ # Parse the URL
448
+ parsed_url = urlparse(url.strip())
449
+ search_params = dict(parse_qsl(parsed_url.query)) # Convert query to a dict
450
+
451
+ # Remove any 'utm_' parameters
452
+ search_params = {k: v for k, v in search_params.items() if not k.startswith('utm_')}
453
+
454
+ # Construct the new query string
455
+ sorted_keys = sorted(search_params.keys())
456
+ sorted_query = urlencode([(k, search_params[k]) for k in sorted_keys])
457
+
458
+ # Construct the final URL
459
+ new_url = (
460
+ parsed_url._replace(
461
+ query=sorted_query,
462
+ scheme=parsed_url.scheme,
463
+ netloc=parsed_url.netloc,
464
+ path=parsed_url.path.rstrip('/'),
465
+ )
466
+ .geturl()
467
+ .lower()
468
+ )
469
+
470
+ # Retain the URL fragment if required
471
+ if not keep_url_fragment:
472
+ new_url = new_url.split('#')[0]
473
+
474
+ return new_url
475
+
476
+
477
+ def compute_unique_key(
478
+ url: str,
479
+ method: str = 'GET',
480
+ payload: bytes | None = None,
481
+ *,
482
+ keep_url_fragment: bool = False,
483
+ use_extended_unique_key: bool = False,
484
+ ) -> str:
485
+ """Computes a unique key for caching & deduplication of requests.
486
+
487
+ This function computes a unique key by normalizing the provided URL and method.
488
+ If 'use_extended_unique_key' is True and a payload is provided, the payload is hashed and
489
+ included in the key. Otherwise, the unique key is just the normalized URL.
490
+
491
+ Args:
492
+ url: The request URL.
493
+ method: The HTTP method, defaults to 'GET'.
494
+ payload: The request payload, defaults to None.
495
+ keep_url_fragment: A flag indicating whether to keep the URL fragment, defaults to False.
496
+ use_extended_unique_key: A flag indicating whether to include a hashed payload in the key, defaults to False.
497
+
498
+ Returns:
499
+ A string representing the unique key for the request.
500
+ """
501
+ # Normalize the URL and method.
502
+ try:
503
+ normalized_url = normalize_url(url, keep_url_fragment=keep_url_fragment)
504
+ except Exception as exc:
505
+ logger.warning(f'Failed to normalize URL: {exc}')
506
+ normalized_url = url
507
+
508
+ normalized_method = method.upper()
509
+
510
+ # Compute and return the extended unique key if required.
511
+ if use_extended_unique_key:
512
+ payload_hash = compute_short_hash(payload) if payload else ''
513
+ return f'{normalized_method}({payload_hash}):{normalized_url}'
514
+
515
+ # Log information if there is a non-GET request with a payload.
516
+ if normalized_method != 'GET' and payload:
517
+ logger.info(
518
+ f'We have encountered a {normalized_method} Request with a payload. This is fine. Just letting you know '
519
+ 'that if your requests point to the same URL and differ only in method and payload, you should consider '
520
+ 'using the "use_extended_unique_key" option.'
521
+ )
522
+
523
+ # Return the normalized URL as the unique key.
524
+ return normalized_url
@@ -13,6 +13,7 @@ except ImportError as exc:
13
13
  ) from exc
14
14
 
15
15
  from apify._crypto import crypto_random_object_id
16
+ from apify._utils import compute_unique_key
16
17
  from apify.actor import Actor
17
18
 
18
19
 
@@ -24,57 +25,69 @@ def _is_request_produced_by_middleware(scrapy_request: Request) -> bool:
24
25
  return bool(scrapy_request.meta.get('redirect_times')) or bool(scrapy_request.meta.get('retry_times'))
25
26
 
26
27
 
27
- def to_apify_request(scrapy_request: Request, spider: Spider) -> dict:
28
+ def to_apify_request(scrapy_request: Request, spider: Spider) -> dict | None:
28
29
  """Convert a Scrapy request to an Apify request.
29
30
 
30
31
  Args:
31
32
  scrapy_request: The Scrapy request to be converted.
32
33
  spider: The Scrapy spider that the request is associated with.
33
34
 
34
- Raises:
35
- TypeError: If the scrapy_request is not an instance of the scrapy.Request class.
36
-
37
35
  Returns:
38
- The converted Apify request.
36
+ The converted Apify request if the conversion was successful, otherwise None.
39
37
  """
40
38
  if not isinstance(scrapy_request, Request):
41
- raise TypeError('scrapy_request must be an instance of the scrapy.Request class')
39
+ Actor.log.warning('Failed to convert to Apify request: Scrapy request must be a Request instance.')
40
+ return None
42
41
 
43
42
  call_id = crypto_random_object_id(8)
44
43
  Actor.log.debug(f'[{call_id}]: to_apify_request was called (scrapy_request={scrapy_request})...')
45
44
 
46
- apify_request = {
47
- 'url': scrapy_request.url,
48
- 'method': scrapy_request.method,
49
- 'userData': scrapy_request.meta.get('userData', {}),
50
- }
51
-
52
- if isinstance(scrapy_request.headers, Headers):
53
- apify_request['headers'] = dict(scrapy_request.headers.to_unicode_dict())
54
- else:
55
- Actor.log.warning(
56
- f'scrapy_request.headers is not an instance of the scrapy.http.headers.Headers class, scrapy_request.headers = {scrapy_request.headers}',
57
- )
58
-
59
- if _is_request_produced_by_middleware(scrapy_request):
60
- apify_request['uniqueKey'] = scrapy_request.url
61
- else:
62
- # Add 'id' to the apify_request
63
- if scrapy_request.meta.get('apify_request_id'):
64
- apify_request['id'] = scrapy_request.meta['apify_request_id']
65
-
66
- # Add 'uniqueKey' to the apify_request
67
- if scrapy_request.meta.get('apify_request_unique_key'):
68
- apify_request['uniqueKey'] = scrapy_request.meta['apify_request_unique_key']
69
-
70
- # Serialize the Scrapy Request and store it in the apify_request.
71
- # - This process involves converting the Scrapy Request object into a dictionary, encoding it to base64,
72
- # and storing it as 'scrapy_request' within the 'userData' dictionary of the apify_request.
73
- # - The serialization process can be referenced at: https://stackoverflow.com/questions/30469575/.
74
- scrapy_request_dict = scrapy_request.to_dict(spider=spider)
75
- scrapy_request_dict_encoded = codecs.encode(pickle.dumps(scrapy_request_dict), 'base64').decode()
76
-
77
- apify_request['userData']['scrapy_request'] = scrapy_request_dict_encoded
45
+ try:
46
+ apify_request = {
47
+ 'url': scrapy_request.url,
48
+ 'method': scrapy_request.method,
49
+ 'payload': scrapy_request.body,
50
+ 'userData': scrapy_request.meta.get('userData', {}),
51
+ }
52
+
53
+ # Convert Scrapy's headers to a dictionary and store them in the apify_request
54
+ if isinstance(scrapy_request.headers, Headers):
55
+ apify_request['headers'] = dict(scrapy_request.headers.to_unicode_dict())
56
+ else:
57
+ Actor.log.warning(f'Invalid scrapy_request.headers type, not scrapy.http.headers.Headers: {scrapy_request.headers}')
58
+
59
+ # If the request was produced by the middleware (e.g. retry or redirect), we must compute the unique key here
60
+ if _is_request_produced_by_middleware(scrapy_request):
61
+ apify_request['uniqueKey'] = compute_unique_key(
62
+ url=scrapy_request.url,
63
+ method=scrapy_request.method,
64
+ payload=scrapy_request.body,
65
+ use_extended_unique_key=True,
66
+ )
67
+ # Othwerwise, we can use the unique key (also the id) from the meta
68
+ else:
69
+ if scrapy_request.meta.get('apify_request_id'):
70
+ apify_request['id'] = scrapy_request.meta['apify_request_id']
71
+
72
+ if scrapy_request.meta.get('apify_request_unique_key'):
73
+ apify_request['uniqueKey'] = scrapy_request.meta['apify_request_unique_key']
74
+
75
+ # If the request's dont_filter field is set, we must generate a random `uniqueKey` to avoid deduplication
76
+ # of the request in the Request Queue.
77
+ if scrapy_request.dont_filter:
78
+ apify_request['uniqueKey'] = crypto_random_object_id(8)
79
+
80
+ # Serialize the Scrapy Request and store it in the apify_request.
81
+ # - This process involves converting the Scrapy Request object into a dictionary, encoding it to base64,
82
+ # and storing it as 'scrapy_request' within the 'userData' dictionary of the apify_request.
83
+ # - The serialization process can be referenced at: https://stackoverflow.com/questions/30469575/.
84
+ scrapy_request_dict = scrapy_request.to_dict(spider=spider)
85
+ scrapy_request_dict_encoded = codecs.encode(pickle.dumps(scrapy_request_dict), 'base64').decode()
86
+ apify_request['userData']['scrapy_request'] = scrapy_request_dict_encoded
87
+
88
+ except Exception as exc:
89
+ Actor.log.warning(f'Conversion of Scrapy request {scrapy_request} to Apify request failed; {exc}')
90
+ return None
78
91
 
79
92
  Actor.log.debug(f'[{call_id}]: scrapy_request was converted to the apify_request={apify_request}')
80
93
  return apify_request
@@ -85,13 +85,22 @@ class ApifyScheduler(BaseScheduler):
85
85
  raise TypeError('self.spider must be an instance of the Spider class')
86
86
 
87
87
  apify_request = to_apify_request(request, spider=self.spider)
88
+ if apify_request is None:
89
+ Actor.log.error(f'Request {request} was not enqueued because it could not be converted to Apify request.')
90
+ return False
91
+
88
92
  Actor.log.debug(f'[{call_id}]: scrapy_request was transformed to apify_request (apify_request={apify_request})')
89
93
 
90
94
  if not isinstance(self._rq, RequestQueue):
91
95
  raise TypeError('self._rq must be an instance of the RequestQueue class')
92
96
 
93
97
  try:
94
- result = nested_event_loop.run_until_complete(self._rq.add_request(apify_request))
98
+ result = nested_event_loop.run_until_complete(
99
+ self._rq.add_request(
100
+ apify_request,
101
+ use_extended_unique_key=True,
102
+ )
103
+ )
95
104
  except BaseException:
96
105
  traceback.print_exc()
97
106
  raise
@@ -9,7 +9,7 @@ from typing import OrderedDict as OrderedDictType
9
9
  from apify_shared.utils import ignore_docs
10
10
 
11
11
  from apify._crypto import crypto_random_object_id
12
- from apify._utils import LRUCache, budget_ow, unique_key_to_request_id
12
+ from apify._utils import LRUCache, budget_ow, compute_unique_key, unique_key_to_request_id
13
13
  from apify.consts import REQUEST_QUEUE_HEAD_MAX_LIMIT
14
14
  from apify.log import logger
15
15
  from apify.storages.base_storage import BaseStorage
@@ -140,15 +140,43 @@ class RequestQueue(BaseStorage):
140
140
  ) -> RequestQueueCollectionClientAsync | RequestQueueCollectionClient:
141
141
  return client.request_queues()
142
142
 
143
- async def add_request(self: RequestQueue, request: dict, *, forefront: bool = False) -> dict:
144
- """Add a request to the queue.
143
+ async def add_request(
144
+ self: RequestQueue,
145
+ request: dict,
146
+ *,
147
+ forefront: bool = False,
148
+ keep_url_fragment: bool = False,
149
+ use_extended_unique_key: bool = False,
150
+ ) -> dict:
151
+ """Adds a request to the `RequestQueue` while managing deduplication and positioning within the queue.
152
+
153
+ The deduplication of requests relies on the `uniqueKey` field within the request dictionary. If `uniqueKey`
154
+ exists, it remains unchanged; if it does not, it is generated based on the request's `url`, `method`,
155
+ and `payload` fields. The generation of `uniqueKey` can be influenced by the `keep_url_fragment` and
156
+ `use_extended_unique_key` flags, which dictate whether to include the URL fragment and the request's method
157
+ and payload, respectively, in its computation.
158
+
159
+ The request can be added to the forefront (beginning) or the back of the queue based on the `forefront`
160
+ parameter. Information about the request's addition to the queue, including whether it was already present or
161
+ handled, is returned in an output dictionary.
145
162
 
146
163
  Args:
147
- request (dict): The request to add to the queue
148
- forefront (bool, optional): Whether to add the request to the head or the end of the queue
164
+ request: The request object to be added to the queue. Must include at least the `url` key.
165
+ Optionaly it can include the `method`, `payload` and `uniqueKey` keys.
149
166
 
150
- Returns:
151
- dict: Information about the queue operation with keys `requestId`, `uniqueKey`, `wasAlreadyPresent`, `wasAlreadyHandled`.
167
+ forefront: If True, adds the request to the forefront of the queue; otherwise, adds it to the end.
168
+
169
+ keep_url_fragment: Determines whether the URL fragment (the part of the URL after '#') should be retained
170
+ in the unique key computation.
171
+
172
+ use_extended_unique_key: Determines whether to use an extended unique key, incorporating the request's
173
+ method and payload into the unique key computation.
174
+
175
+ Returns: A dictionary containing information about the operation, including:
176
+ - `requestId` (str): The ID of the request.
177
+ - `uniqueKey` (str): The unique key associated with the request.
178
+ - `wasAlreadyPresent` (bool): Indicates whether the request was already in the queue.
179
+ - `wasAlreadyHandled` (bool): Indicates whether the request was already processed.
152
180
  """
153
181
  budget_ow(
154
182
  request,
@@ -159,9 +187,13 @@ class RequestQueue(BaseStorage):
159
187
  self._last_activity = datetime.now(timezone.utc)
160
188
 
161
189
  if request.get('uniqueKey') is None:
162
- # TODO: Check Request class in crawlee and replicate uniqueKey generation logic...
163
- # https://github.com/apify/apify-sdk-python/issues/141
164
- request['uniqueKey'] = request['url']
190
+ request['uniqueKey'] = compute_unique_key(
191
+ url=request['url'],
192
+ method=request.get('method', 'GET'),
193
+ payload=request.get('payload'),
194
+ keep_url_fragment=keep_url_fragment,
195
+ use_extended_unique_key=use_extended_unique_key,
196
+ )
165
197
 
166
198
  cache_key = unique_key_to_request_id(request['uniqueKey'])
167
199
  cached_info = self._requests_cache.get(cache_key)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: apify
3
- Version: 1.6.1b1
3
+ Version: 1.7.0
4
4
  Summary: Apify SDK for Python
5
5
  Author-email: "Apify Technologies s.r.o." <support@apify.com>
6
6
  License: Apache Software License
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes