apify 2.2.2b2__py3-none-any.whl → 2.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of apify might be problematic. Click here for more details.

@@ -0,0 +1,122 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import threading
5
+ from concurrent import futures
6
+ from datetime import timedelta
7
+ from logging import getLogger
8
+ from typing import TYPE_CHECKING, Any
9
+
10
+ if TYPE_CHECKING:
11
+ from collections.abc import Coroutine
12
+
13
+ logger = getLogger(__name__)
14
+
15
+
16
+ class AsyncThread:
17
+ """Class for running an asyncio event loop in a separate thread.
18
+
19
+ This allows running asynchronous coroutines from synchronous code by executingthem on an event loop
20
+ that runs in its own dedicated thread.
21
+ """
22
+
23
+ def __init__(self) -> None:
24
+ self._eventloop = asyncio.new_event_loop()
25
+
26
+ # Start the event loop in a dedicated daemon thread.
27
+ self._thread = threading.Thread(
28
+ target=self._start_event_loop,
29
+ daemon=True,
30
+ )
31
+ self._thread.start()
32
+
33
+ def run_coro(
34
+ self,
35
+ coro: Coroutine,
36
+ timeout: timedelta = timedelta(seconds=60),
37
+ ) -> Any:
38
+ """Run a coroutine on an event loop running in a separate thread.
39
+
40
+ This method schedules the coroutine to run on the event loop and blocks until the coroutine completes
41
+ or the specified timeout is reached.
42
+
43
+ Args:
44
+ coro: The coroutine to run.
45
+ timeout: The maximum number of seconds to wait for the coroutine to finish.
46
+
47
+ Returns:
48
+ The result returned by the coroutine.
49
+
50
+ Raises:
51
+ RuntimeError: If the event loop is not running.
52
+ TimeoutError: If the coroutine does not complete within the timeout.
53
+ Exception: Any exception raised during coroutine execution.
54
+ """
55
+ if not self._eventloop.is_running():
56
+ raise RuntimeError(f'The coroutine {coro} cannot be executed because the event loop is not running.')
57
+
58
+ # Submit the coroutine to the event loop running in the other thread.
59
+ future = asyncio.run_coroutine_threadsafe(coro, self._eventloop)
60
+ try:
61
+ # Wait for the coroutine's result until the specified timeout.
62
+ return future.result(timeout=timeout.total_seconds())
63
+ except futures.TimeoutError as exc:
64
+ logger.exception('Coroutine execution timed out.', exc_info=exc)
65
+ raise
66
+ except Exception as exc:
67
+ logger.exception('Coroutine execution raised an exception.', exc_info=exc)
68
+ raise
69
+
70
+ def close(self, timeout: timedelta = timedelta(seconds=60)) -> None:
71
+ """Close the event loop and its thread gracefully.
72
+
73
+ This method cancels all pending tasks, stops the event loop, and waits for the thread to exit.
74
+ If the thread does not exit within the given timeout, a forced shutdown is attempted.
75
+
76
+ Args:
77
+ timeout: The maximum number of seconds to wait for the event loop thread to exit.
78
+ """
79
+ if self._eventloop.is_running():
80
+ # Cancel all pending tasks in the event loop.
81
+ self.run_coro(self._shutdown_tasks())
82
+
83
+ # Schedule the event loop to stop.
84
+ self._eventloop.call_soon_threadsafe(self._eventloop.stop)
85
+
86
+ # Wait for the event loop thread to finish execution.
87
+ self._thread.join(timeout=timeout.total_seconds())
88
+
89
+ # If the thread is still running after the timeout, force a shutdown.
90
+ if self._thread.is_alive():
91
+ logger.warning('Event loop thread did not exit cleanly! Forcing shutdown...')
92
+ self._force_exit_event_loop()
93
+
94
+ def _start_event_loop(self) -> None:
95
+ """Set up and run the asyncio event loop in the dedicated thread."""
96
+ asyncio.set_event_loop(self._eventloop)
97
+ try:
98
+ self._eventloop.run_forever()
99
+ finally:
100
+ self._eventloop.close()
101
+ logger.debug('Asyncio event loop has been closed.')
102
+
103
+ async def _shutdown_tasks(self) -> None:
104
+ """Cancel all pending tasks in the event loop."""
105
+ # Retrieve all tasks for the event loop, excluding the current task.
106
+ tasks = [task for task in asyncio.all_tasks(self._eventloop) if task is not asyncio.current_task()]
107
+
108
+ # Cancel each pending task.
109
+ for task in tasks:
110
+ task.cancel()
111
+
112
+ # Wait until all tasks have been cancelled or finished.
113
+ await asyncio.gather(*tasks, return_exceptions=True)
114
+
115
+ def _force_exit_event_loop(self) -> None:
116
+ """Forcefully shut down the event loop and its thread."""
117
+ try:
118
+ logger.info('Forced shutdown of the event loop and its thread...')
119
+ self._eventloop.call_soon_threadsafe(self._eventloop.stop)
120
+ self._thread.join(timeout=5)
121
+ except Exception as exc:
122
+ logger.exception('Exception occurred during forced event loop shutdown.', exc_info=exc)
@@ -0,0 +1,55 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from typing import Any
5
+
6
+ from scrapy.utils import log as scrapy_logging
7
+ from scrapy.utils.project import get_project_settings
8
+
9
+ from apify.log import ActorLogFormatter
10
+
11
+ # Define logger names.
12
+ _PRIMARY_LOGGERS = ['apify', 'apify_client', 'scrapy']
13
+ _SUPPLEMENTAL_LOGGERS = ['filelock', 'hpack', 'httpcore', 'httpx', 'protego', 'twisted']
14
+ _ALL_LOGGERS = _PRIMARY_LOGGERS + _SUPPLEMENTAL_LOGGERS
15
+
16
+
17
+ def _configure_logger(name: str | None, logging_level: str, handler: logging.Handler) -> None:
18
+ """Clear and reconfigure the logger."""
19
+ logger = logging.getLogger(name)
20
+ logger.handlers.clear()
21
+ logger.setLevel(logging_level)
22
+
23
+ if name is None: # Root logger.
24
+ logger.addHandler(handler)
25
+ logger.propagate = False
26
+ else:
27
+ logger.propagate = True
28
+
29
+
30
+ def initialize_logging() -> None:
31
+ """Configure logging for Apify Actors and adjust Scrapy's logging settings."""
32
+ # Retrieve Scrapy project settings and determine the logging level.
33
+ settings = get_project_settings()
34
+ logging_level = settings.get('LOG_LEVEL', 'INFO') # Default to INFO.
35
+
36
+ # Create a custom handler with the Apify log formatter.
37
+ handler = logging.StreamHandler()
38
+ handler.setFormatter(ActorLogFormatter(include_logger_name=True))
39
+
40
+ # Configure the root logger and all other defined loggers.
41
+ for logger_name in [None, *_ALL_LOGGERS]:
42
+ _configure_logger(logger_name, logging_level, handler)
43
+
44
+ # Set the 'httpx' logger to a less verbose level.
45
+ logging.getLogger('httpx').setLevel('WARNING')
46
+
47
+ # Monkey-patch Scrapy's logging configuration to re-apply our settings.
48
+ original_configure_logging = scrapy_logging.configure_logging
49
+
50
+ def new_configure_logging(*args: Any, **kwargs: Any) -> None:
51
+ original_configure_logging(*args, **kwargs)
52
+ for logger_name in [None, *_ALL_LOGGERS]:
53
+ _configure_logger(logger_name, logging_level, handler)
54
+
55
+ scrapy_logging.configure_logging = new_configure_logging
@@ -3,19 +3,15 @@ from __future__ import annotations
3
3
  from typing import TYPE_CHECKING
4
4
  from urllib.parse import ParseResult, urlparse
5
5
 
6
- try:
7
- if TYPE_CHECKING:
8
- from scrapy import Request, Spider
9
- from scrapy.crawler import Crawler
10
- from scrapy.core.downloader.handlers.http11 import TunnelError
11
- from scrapy.exceptions import NotConfigured
12
- except ImportError as exc:
13
- raise ImportError(
14
- 'To use this module, you need to install the "scrapy" extra. Run "pip install apify[scrapy]".',
15
- ) from exc
6
+ from scrapy.core.downloader.handlers.http11 import TunnelError
7
+ from scrapy.exceptions import NotConfigured
16
8
 
17
9
  from apify import Actor, ProxyConfiguration
18
- from apify.scrapy.utils import get_basic_auth_header
10
+ from apify.scrapy import get_basic_auth_header
11
+
12
+ if TYPE_CHECKING:
13
+ from scrapy import Request, Spider
14
+ from scrapy.crawler import Crawler
19
15
 
20
16
 
21
17
  class ApifyHttpProxyMiddleware:
@@ -51,7 +47,7 @@ class ApifyHttpProxyMiddleware:
51
47
  proxy_settings: dict | None = crawler.settings.get('APIFY_PROXY_SETTINGS')
52
48
 
53
49
  if proxy_settings is None:
54
- Actor.log.warning(
50
+ Actor.log.info(
55
51
  'ApifyHttpProxyMiddleware is not going to be used. Object "proxyConfiguration" is probably missing '
56
52
  ' in the Actor input.'
57
53
  )
@@ -60,7 +56,7 @@ class ApifyHttpProxyMiddleware:
60
56
  use_apify_proxy = proxy_settings.get('useApifyProxy', False)
61
57
 
62
58
  if use_apify_proxy is not True:
63
- Actor.log.warning(
59
+ Actor.log.info(
64
60
  'ApifyHttpProxyMiddleware is not going to be used. Actor input field '
65
61
  '"proxyConfiguration.useApifyProxy" is set to False.'
66
62
  )
@@ -1,19 +1,17 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from logging import getLogger
3
4
  from typing import TYPE_CHECKING
4
5
 
5
6
  from itemadapter.adapter import ItemAdapter
6
7
 
7
- try:
8
- if TYPE_CHECKING:
9
- from scrapy import Item, Spider
10
- except ImportError as exc:
11
- raise ImportError(
12
- 'To use this module, you need to install the "scrapy" extra. Run "pip install apify[scrapy]".',
13
- ) from exc
14
-
15
8
  from apify import Actor
16
9
 
10
+ if TYPE_CHECKING:
11
+ from scrapy import Item, Spider
12
+
13
+ logger = getLogger(__name__)
14
+
17
15
 
18
16
  class ActorDatasetPushPipeline:
19
17
  """A Scrapy pipeline for pushing items to an Actor's default dataset.
@@ -28,6 +26,6 @@ class ActorDatasetPushPipeline:
28
26
  ) -> Item:
29
27
  """Pushes the provided Scrapy item to the Actor's default dataset."""
30
28
  item_dict = ItemAdapter(item).asdict()
31
- Actor.log.debug(f'Pushing item={item_dict} produced by spider={spider} to the dataset.')
29
+ logger.debug(f'Pushing item={item_dict} produced by spider={spider} to the dataset.')
32
30
  await Actor.push_data(item_dict)
33
31
  return item
apify/scrapy/requests.py CHANGED
@@ -2,37 +2,21 @@ from __future__ import annotations
2
2
 
3
3
  import codecs
4
4
  import pickle
5
+ from logging import getLogger
5
6
  from typing import Any, cast
6
7
 
7
- from apify_shared.utils import ignore_docs
8
+ from scrapy import Request as ScrapyRequest
9
+ from scrapy import Spider
10
+ from scrapy.http.headers import Headers
11
+ from scrapy.utils.request import request_from_dict
8
12
 
9
- try:
10
- from scrapy import Request, Spider
11
- from scrapy.http.headers import Headers
12
- from scrapy.utils.request import request_from_dict
13
- except ImportError as exc:
14
- raise ImportError(
15
- 'To use this module, you need to install the "scrapy" extra. Run "pip install apify[scrapy]".',
16
- ) from exc
17
-
18
- from crawlee import Request as CrawleeRequest
13
+ from crawlee import Request as ApifyRequest
19
14
  from crawlee._types import HttpHeaders
20
- from crawlee._utils.crypto import crypto_random_object_id
21
- from crawlee._utils.requests import compute_unique_key, unique_key_to_request_id
22
-
23
- from apify import Actor
24
-
25
15
 
26
- def _is_request_produced_by_middleware(scrapy_request: Request) -> bool:
27
- """Returns True if the Scrapy request was produced by a downloader middleware, otherwise False.
28
-
29
- Works for RetryMiddleware and RedirectMiddleware.
30
- """
31
- return bool(scrapy_request.meta.get('redirect_times')) or bool(scrapy_request.meta.get('retry_times'))
16
+ logger = getLogger(__name__)
32
17
 
33
18
 
34
- @ignore_docs
35
- def to_apify_request(scrapy_request: Request, spider: Spider) -> CrawleeRequest | None:
19
+ def to_apify_request(scrapy_request: ScrapyRequest, spider: Spider) -> ApifyRequest | None:
36
20
  """Convert a Scrapy request to an Apify request.
37
21
 
38
22
  Args:
@@ -42,54 +26,45 @@ def to_apify_request(scrapy_request: Request, spider: Spider) -> CrawleeRequest
42
26
  Returns:
43
27
  The converted Apify request if the conversion was successful, otherwise None.
44
28
  """
45
- if not isinstance(scrapy_request, Request):
46
- Actor.log.warning( # type: ignore[unreachable]
47
- 'Failed to convert to Apify request: Scrapy request must be a Request instance.'
48
- )
29
+ if not isinstance(scrapy_request, ScrapyRequest):
30
+ logger.warning('Failed to convert to Apify request: Scrapy request must be a ScrapyRequest instance.') # type: ignore[unreachable]
49
31
  return None
50
32
 
51
- call_id = crypto_random_object_id(8)
52
- Actor.log.debug(f'[{call_id}]: to_apify_request was called (scrapy_request={scrapy_request})...')
33
+ logger.debug(f'to_apify_request was called (scrapy_request={scrapy_request})...')
34
+
35
+ # Configuration to behave as similarly as possible to Scrapy's default RFPDupeFilter.
36
+ request_kwargs: dict[str, Any] = {
37
+ 'url': scrapy_request.url,
38
+ 'method': scrapy_request.method,
39
+ 'payload': scrapy_request.body,
40
+ 'use_extended_unique_key': True,
41
+ 'keep_url_fragment': False,
42
+ }
53
43
 
54
44
  try:
55
- if _is_request_produced_by_middleware(scrapy_request):
56
- unique_key = compute_unique_key(
57
- url=scrapy_request.url,
58
- method=scrapy_request.method, # type: ignore[arg-type] # str vs literal
59
- payload=scrapy_request.body,
60
- use_extended_unique_key=True,
61
- )
62
- elif scrapy_request.dont_filter:
63
- unique_key = crypto_random_object_id(8)
64
- elif scrapy_request.meta.get('apify_request_unique_key'):
65
- unique_key = scrapy_request.meta['apify_request_unique_key']
45
+ if scrapy_request.dont_filter:
46
+ request_kwargs['always_enqueue'] = True
66
47
  else:
67
- unique_key = crypto_random_object_id(8)
48
+ if scrapy_request.meta.get('apify_request_unique_key'):
49
+ request_kwargs['unique_key'] = scrapy_request.meta['apify_request_unique_key']
68
50
 
69
- if scrapy_request.meta.get('apify_request_id'):
70
- request_id = scrapy_request.meta['apify_request_id']
71
- else:
72
- request_id = unique_key_to_request_id(unique_key)
73
-
74
- apify_request = CrawleeRequest(
75
- url=scrapy_request.url,
76
- method=scrapy_request.method,
77
- payload=scrapy_request.body,
78
- user_data=scrapy_request.meta.get('userData', {}),
79
- unique_key=unique_key,
80
- id=request_id,
81
- )
51
+ if scrapy_request.meta.get('apify_request_id'):
52
+ request_kwargs['id'] = scrapy_request.meta['apify_request_id']
53
+
54
+ request_kwargs['user_data'] = scrapy_request.meta.get('userData', {})
82
55
 
83
56
  # Convert Scrapy's headers to a HttpHeaders and store them in the apify_request
84
57
  if isinstance(scrapy_request.headers, Headers):
85
- apify_request.headers = HttpHeaders(dict(scrapy_request.headers.to_unicode_dict()))
58
+ request_kwargs['headers'] = HttpHeaders(dict(scrapy_request.headers.to_unicode_dict()))
86
59
  else:
87
- Actor.log.warning( # type: ignore[unreachable]
60
+ logger.warning( # type: ignore[unreachable]
88
61
  f'Invalid scrapy_request.headers type, not scrapy.http.headers.Headers: {scrapy_request.headers}'
89
62
  )
90
63
 
91
- # Serialize the Scrapy Request and store it in the apify_request.
92
- # - This process involves converting the Scrapy Request object into a dictionary, encoding it to base64,
64
+ apify_request = ApifyRequest.from_url(**request_kwargs)
65
+
66
+ # Serialize the Scrapy ScrapyRequest and store it in the apify_request.
67
+ # - This process involves converting the Scrapy ScrapyRequest object into a dictionary, encoding it to base64,
93
68
  # and storing it as 'scrapy_request' within the 'userData' dictionary of the apify_request.
94
69
  # - The serialization process can be referenced at: https://stackoverflow.com/questions/30469575/.
95
70
  scrapy_request_dict = scrapy_request.to_dict(spider=spider)
@@ -97,15 +72,14 @@ def to_apify_request(scrapy_request: Request, spider: Spider) -> CrawleeRequest
97
72
  apify_request.user_data['scrapy_request'] = scrapy_request_dict_encoded
98
73
 
99
74
  except Exception as exc:
100
- Actor.log.warning(f'Conversion of Scrapy request {scrapy_request} to Apify request failed; {exc}')
75
+ logger.warning(f'Conversion of Scrapy request {scrapy_request} to Apify request failed; {exc}')
101
76
  return None
102
77
 
103
- Actor.log.debug(f'[{call_id}]: scrapy_request was converted to the apify_request={apify_request}')
78
+ logger.debug(f'scrapy_request was converted to the apify_request={apify_request}')
104
79
  return apify_request
105
80
 
106
81
 
107
- @ignore_docs
108
- def to_scrapy_request(apify_request: CrawleeRequest, spider: Spider) -> Request:
82
+ def to_scrapy_request(apify_request: ApifyRequest, spider: Spider) -> ScrapyRequest:
109
83
  """Convert an Apify request to a Scrapy request.
110
84
 
111
85
  Args:
@@ -113,24 +87,23 @@ def to_scrapy_request(apify_request: CrawleeRequest, spider: Spider) -> Request:
113
87
  spider: The Scrapy spider that the request is associated with.
114
88
 
115
89
  Raises:
116
- TypeError: If the apify_request is not a crawlee request.
117
- ValueError: If the apify_request does not contain the required keys.
90
+ TypeError: If the Apify request is not an instance of the `ApifyRequest` class.
91
+ ValueError: If the Apify request does not contain the required keys.
118
92
 
119
93
  Returns:
120
94
  The converted Scrapy request.
121
95
  """
122
- if not isinstance(cast(Any, apify_request), CrawleeRequest):
123
- raise TypeError('apify_request must be a crawlee.Request instance')
96
+ if not isinstance(cast(Any, apify_request), ApifyRequest):
97
+ raise TypeError('apify_request must be a crawlee.ScrapyRequest instance')
124
98
 
125
- call_id = crypto_random_object_id(8)
126
- Actor.log.debug(f'[{call_id}]: to_scrapy_request was called (apify_request={apify_request})...')
99
+ logger.debug(f'to_scrapy_request was called (apify_request={apify_request})...')
127
100
 
128
101
  # If the apify_request comes from the Scrapy
129
102
  if 'scrapy_request' in apify_request.user_data:
130
- # Deserialize the Scrapy Request from the apify_request.
103
+ # Deserialize the Scrapy ScrapyRequest from the apify_request.
131
104
  # - This process involves decoding the base64-encoded request data and reconstructing
132
- # the Scrapy Request object from its dictionary representation.
133
- Actor.log.debug(f'[{call_id}]: Restoring the Scrapy Request from the apify_request...')
105
+ # the Scrapy ScrapyRequest object from its dictionary representation.
106
+ logger.debug('Restoring the Scrapy ScrapyRequest from the apify_request...')
134
107
 
135
108
  scrapy_request_dict_encoded = apify_request.user_data['scrapy_request']
136
109
  if not isinstance(scrapy_request_dict_encoded, str):
@@ -141,10 +114,10 @@ def to_scrapy_request(apify_request: CrawleeRequest, spider: Spider) -> Request:
141
114
  raise TypeError('scrapy_request_dict must be a dictionary')
142
115
 
143
116
  scrapy_request = request_from_dict(scrapy_request_dict, spider=spider)
144
- if not isinstance(scrapy_request, Request):
145
- raise TypeError('scrapy_request must be an instance of the Request class')
117
+ if not isinstance(scrapy_request, ScrapyRequest):
118
+ raise TypeError('scrapy_request must be an instance of the ScrapyRequest class')
146
119
 
147
- Actor.log.debug(f'[{call_id}]: Scrapy Request successfully reconstructed (scrapy_request={scrapy_request})...')
120
+ logger.debug(f'Scrapy ScrapyRequest successfully reconstructed (scrapy_request={scrapy_request})...')
148
121
 
149
122
  # Update the meta field with the meta field from the apify_request
150
123
  meta = scrapy_request.meta or {}
@@ -152,11 +125,11 @@ def to_scrapy_request(apify_request: CrawleeRequest, spider: Spider) -> Request:
152
125
  # scrapy_request.meta is a property, so we have to set it like this
153
126
  scrapy_request._meta = meta # noqa: SLF001
154
127
 
155
- # If the apify_request comes directly from the Request Queue, typically start URLs
128
+ # If the apify_request comes directly from the Scrapy, typically start URLs.
156
129
  else:
157
- Actor.log.debug(f'[{call_id}]: gonna create a new Scrapy Request (cannot be restored)')
130
+ logger.debug('Gonna create a new Scrapy ScrapyRequest (cannot be restored)')
158
131
 
159
- scrapy_request = Request(
132
+ scrapy_request = ScrapyRequest(
160
133
  url=apify_request.url,
161
134
  method=apify_request.method,
162
135
  meta={
@@ -173,5 +146,5 @@ def to_scrapy_request(apify_request: CrawleeRequest, spider: Spider) -> Request:
173
146
  if apify_request.user_data:
174
147
  scrapy_request.meta['userData'] = apify_request.user_data
175
148
 
176
- Actor.log.debug(f'[{call_id}]: an apify_request was converted to the scrapy_request={scrapy_request}')
149
+ logger.debug(f'an apify_request was converted to the scrapy_request={scrapy_request}')
177
150
  return scrapy_request