crawlee 1.1.1b1__py3-none-any.whl → 1.2.1b7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlee might be problematic. Click here for more details.

Files changed (37) hide show
  1. crawlee/__init__.py +2 -1
  2. crawlee/_request.py +29 -10
  3. crawlee/_types.py +42 -2
  4. crawlee/_utils/context.py +2 -2
  5. crawlee/_utils/file.py +7 -0
  6. crawlee/_utils/recurring_task.py +2 -1
  7. crawlee/_utils/time.py +41 -1
  8. crawlee/crawlers/__init__.py +2 -1
  9. crawlee/crawlers/_abstract_http/__init__.py +2 -1
  10. crawlee/crawlers/_abstract_http/_abstract_http_crawler.py +52 -14
  11. crawlee/crawlers/_adaptive_playwright/_adaptive_playwright_crawler.py +10 -33
  12. crawlee/crawlers/_adaptive_playwright/_adaptive_playwright_crawling_context.py +6 -2
  13. crawlee/crawlers/_basic/_basic_crawler.py +135 -118
  14. crawlee/crawlers/_basic/_context_utils.py +24 -0
  15. crawlee/crawlers/_basic/_logging_utils.py +23 -4
  16. crawlee/crawlers/_beautifulsoup/_beautifulsoup_crawler.py +2 -2
  17. crawlee/crawlers/_parsel/_parsel_crawler.py +2 -2
  18. crawlee/crawlers/_playwright/_playwright_crawler.py +58 -17
  19. crawlee/crawlers/_playwright/_playwright_http_client.py +7 -1
  20. crawlee/crawlers/_playwright/_playwright_pre_nav_crawling_context.py +4 -1
  21. crawlee/crawlers/_playwright/_types.py +12 -2
  22. crawlee/errors.py +4 -0
  23. crawlee/events/_event_manager.py +1 -3
  24. crawlee/http_clients/_base.py +4 -0
  25. crawlee/http_clients/_curl_impersonate.py +12 -0
  26. crawlee/http_clients/_httpx.py +16 -6
  27. crawlee/http_clients/_impit.py +25 -10
  28. crawlee/router.py +13 -3
  29. crawlee/storage_clients/_file_system/_dataset_client.py +2 -2
  30. crawlee/storage_clients/_file_system/_key_value_store_client.py +3 -3
  31. crawlee/storage_clients/_file_system/_request_queue_client.py +3 -3
  32. crawlee/storage_clients/_sql/_storage_client.py +0 -9
  33. {crawlee-1.1.1b1.dist-info → crawlee-1.2.1b7.dist-info}/METADATA +10 -16
  34. {crawlee-1.1.1b1.dist-info → crawlee-1.2.1b7.dist-info}/RECORD +37 -36
  35. {crawlee-1.1.1b1.dist-info → crawlee-1.2.1b7.dist-info}/WHEEL +1 -1
  36. {crawlee-1.1.1b1.dist-info → crawlee-1.2.1b7.dist-info}/entry_points.txt +0 -0
  37. {crawlee-1.1.1b1.dist-info → crawlee-1.2.1b7.dist-info}/licenses/LICENSE +0 -0
@@ -3,19 +3,25 @@ from __future__ import annotations
3
3
  import asyncio
4
4
  import logging
5
5
  import warnings
6
+ from datetime import timedelta
6
7
  from functools import partial
7
8
  from typing import TYPE_CHECKING, Any, Generic, Literal
8
9
 
10
+ import playwright.async_api
9
11
  from more_itertools import partition
10
12
  from pydantic import ValidationError
11
13
  from typing_extensions import NotRequired, TypedDict, TypeVar
12
14
 
13
15
  from crawlee import service_locator
14
- from crawlee._request import Request, RequestOptions
15
- from crawlee._types import ConcurrencySettings
16
+ from crawlee._request import Request, RequestOptions, RequestState
17
+ from crawlee._types import (
18
+ BasicCrawlingContext,
19
+ ConcurrencySettings,
20
+ )
16
21
  from crawlee._utils.blocked import RETRY_CSS_SELECTORS
17
22
  from crawlee._utils.docs import docs_group
18
23
  from crawlee._utils.robots import RobotsTxtFile
24
+ from crawlee._utils.time import SharedTimeout
19
25
  from crawlee._utils.urls import to_absolute_url_iterator
20
26
  from crawlee.browsers import BrowserPool
21
27
  from crawlee.crawlers._basic import BasicCrawler, BasicCrawlerOptions, ContextPipeline
@@ -29,6 +35,7 @@ from crawlee.statistics import StatisticsState
29
35
  from ._playwright_crawling_context import PlaywrightCrawlingContext
30
36
  from ._playwright_http_client import PlaywrightHttpClient, browser_page_context
31
37
  from ._playwright_pre_nav_crawling_context import PlaywrightPreNavCrawlingContext
38
+ from ._types import GotoOptions
32
39
  from ._utils import block_requests, infinite_scroll
33
40
 
34
41
  TCrawlingContext = TypeVar('TCrawlingContext', bound=PlaywrightCrawlingContext)
@@ -44,7 +51,6 @@ if TYPE_CHECKING:
44
51
 
45
52
  from crawlee import RequestTransformAction
46
53
  from crawlee._types import (
47
- BasicCrawlingContext,
48
54
  EnqueueLinksKwargs,
49
55
  ExtractLinksFunction,
50
56
  HttpHeaders,
@@ -103,9 +109,11 @@ class PlaywrightCrawler(BasicCrawler[PlaywrightCrawlingContext, StatisticsState]
103
109
  user_data_dir: str | Path | None = None,
104
110
  browser_launch_options: Mapping[str, Any] | None = None,
105
111
  browser_new_context_options: Mapping[str, Any] | None = None,
112
+ goto_options: GotoOptions | None = None,
106
113
  fingerprint_generator: FingerprintGenerator | None | Literal['default'] = 'default',
107
114
  headless: bool | None = None,
108
115
  use_incognito_pages: bool | None = None,
116
+ navigation_timeout: timedelta | None = None,
109
117
  **kwargs: Unpack[BasicCrawlerOptions[PlaywrightCrawlingContext, StatisticsState]],
110
118
  ) -> None:
111
119
  """Initialize a new instance.
@@ -134,12 +142,18 @@ class PlaywrightCrawler(BasicCrawler[PlaywrightCrawlingContext, StatisticsState]
134
142
  use_incognito_pages: By default pages share the same browser context. If set to True each page uses its
135
143
  own context that is destroyed once the page is closed or crashes.
136
144
  This option should not be used if `browser_pool` is provided.
145
+ navigation_timeout: Timeout for navigation (the process between opening a Playwright page and calling
146
+ the request handler)
147
+ goto_options: Additional options to pass to Playwright's `Page.goto()` method. The `timeout` option is
148
+ not supported, use `navigation_timeout` instead.
137
149
  kwargs: Additional keyword arguments to pass to the underlying `BasicCrawler`.
138
150
  """
139
151
  configuration = kwargs.pop('configuration', None)
140
152
  if configuration is not None:
141
153
  service_locator.set_configuration(configuration)
142
154
 
155
+ self._shared_navigation_timeouts: dict[int, SharedTimeout] = {}
156
+
143
157
  if browser_pool:
144
158
  # Raise an exception if browser_pool is provided together with other browser-related arguments.
145
159
  if any(
@@ -202,6 +216,9 @@ class PlaywrightCrawler(BasicCrawler[PlaywrightCrawlingContext, StatisticsState]
202
216
  if 'concurrency_settings' not in kwargs or kwargs['concurrency_settings'] is None:
203
217
  kwargs['concurrency_settings'] = ConcurrencySettings(desired_concurrency=1)
204
218
 
219
+ self._navigation_timeout = navigation_timeout or timedelta(minutes=1)
220
+ self._goto_options = goto_options or GotoOptions()
221
+
205
222
  super().__init__(**kwargs)
206
223
 
207
224
  async def _open_page(
@@ -226,12 +243,21 @@ class PlaywrightCrawler(BasicCrawler[PlaywrightCrawlingContext, StatisticsState]
226
243
  log=context.log,
227
244
  page=crawlee_page.page,
228
245
  block_requests=partial(block_requests, page=crawlee_page.page),
246
+ goto_options=GotoOptions(**self._goto_options),
229
247
  )
230
248
 
231
- async with browser_page_context(crawlee_page.page):
232
- for hook in self._pre_navigation_hooks:
233
- await hook(pre_navigation_context)
234
- yield pre_navigation_context
249
+ context_id = id(pre_navigation_context)
250
+ self._shared_navigation_timeouts[context_id] = SharedTimeout(self._navigation_timeout)
251
+
252
+ try:
253
+ async with browser_page_context(crawlee_page.page):
254
+ for hook in self._pre_navigation_hooks:
255
+ async with self._shared_navigation_timeouts[context_id]:
256
+ await hook(pre_navigation_context)
257
+
258
+ yield pre_navigation_context
259
+ finally:
260
+ self._shared_navigation_timeouts.pop(context_id, None)
235
261
 
236
262
  def _prepare_request_interceptor(
237
263
  self,
@@ -266,6 +292,7 @@ class PlaywrightCrawler(BasicCrawler[PlaywrightCrawlingContext, StatisticsState]
266
292
  Raises:
267
293
  ValueError: If the browser pool is not initialized.
268
294
  SessionError: If the URL cannot be loaded by the browser.
295
+ TimeoutError: If navigation does not succeed within the navigation timeout.
269
296
 
270
297
  Yields:
271
298
  The enhanced crawling context with the Playwright-specific features (page, response, enqueue_links,
@@ -297,7 +324,14 @@ class PlaywrightCrawler(BasicCrawler[PlaywrightCrawlingContext, StatisticsState]
297
324
  # Set route_handler only for current request
298
325
  await context.page.route(context.request.url, route_handler)
299
326
 
300
- response = await context.page.goto(context.request.url)
327
+ try:
328
+ async with self._shared_navigation_timeouts[id(context)] as remaining_timeout:
329
+ response = await context.page.goto(
330
+ context.request.url, timeout=remaining_timeout.total_seconds() * 1000, **context.goto_options
331
+ )
332
+ context.request.state = RequestState.AFTER_NAV
333
+ except playwright.async_api.TimeoutError as exc:
334
+ raise asyncio.TimeoutError from exc
301
335
 
302
336
  if response is None:
303
337
  raise SessionError(f'Failed to load the URL: {context.request.url}')
@@ -324,6 +358,7 @@ class PlaywrightCrawler(BasicCrawler[PlaywrightCrawlingContext, StatisticsState]
324
358
  extract_links=extract_links,
325
359
  enqueue_links=self._create_enqueue_links_function(context, extract_links),
326
360
  block_requests=partial(block_requests, page=context.page),
361
+ goto_options=context.goto_options,
327
362
  )
328
363
 
329
364
  if context.session:
@@ -364,14 +399,18 @@ class PlaywrightCrawler(BasicCrawler[PlaywrightCrawlingContext, StatisticsState]
364
399
  robots_txt_file = await self._get_robots_txt_file_for_url(context.request.url)
365
400
 
366
401
  kwargs.setdefault('strategy', 'same-hostname')
402
+ strategy = kwargs.get('strategy', 'same-hostname')
367
403
 
368
404
  elements = await context.page.query_selector_all(selector)
369
405
  links_iterator: Iterator[str] = iter(
370
406
  [url for element in elements if (url := await element.get_attribute('href')) is not None]
371
407
  )
372
- links_iterator = to_absolute_url_iterator(
373
- context.request.loaded_url or context.request.url, links_iterator, logger=context.log
374
- )
408
+
409
+ # Get base URL from <base> tag if present
410
+ extracted_base_url = await context.page.evaluate('document.baseURI')
411
+ base_url: str = extracted_base_url or context.request.loaded_url or context.request.url
412
+
413
+ links_iterator = to_absolute_url_iterator(base_url, links_iterator, logger=context.log)
375
414
 
376
415
  if robots_txt_file:
377
416
  skipped, links_iterator = partition(lambda url: robots_txt_file.is_allowed(url), links_iterator)
@@ -379,17 +418,19 @@ class PlaywrightCrawler(BasicCrawler[PlaywrightCrawlingContext, StatisticsState]
379
418
  skipped = iter([])
380
419
 
381
420
  for url in self._enqueue_links_filter_iterator(links_iterator, context.request.url, **kwargs):
382
- request_option = RequestOptions({'url': url, 'user_data': {**base_user_data}, 'label': label})
421
+ request_options = RequestOptions(
422
+ url=url, user_data={**base_user_data}, label=label, enqueue_strategy=strategy
423
+ )
383
424
 
384
425
  if transform_request_function:
385
- transform_request_option = transform_request_function(request_option)
386
- if transform_request_option == 'skip':
426
+ transform_request_options = transform_request_function(request_options)
427
+ if transform_request_options == 'skip':
387
428
  continue
388
- if transform_request_option != 'unchanged':
389
- request_option = transform_request_option
429
+ if transform_request_options != 'unchanged':
430
+ request_options = transform_request_options
390
431
 
391
432
  try:
392
- request = Request.from_url(**request_option)
433
+ request = Request.from_url(**request_options)
393
434
  except ValidationError as exc:
394
435
  context.log.debug(
395
436
  f'Skipping URL "{url}" due to invalid format: {exc}. '
@@ -59,6 +59,7 @@ class PlaywrightHttpClient(HttpClient):
59
59
  session: Session | None = None,
60
60
  proxy_info: ProxyInfo | None = None,
61
61
  statistics: Statistics | None = None,
62
+ timeout: timedelta | None = None,
62
63
  ) -> HttpCrawlingResult:
63
64
  raise NotImplementedError('The `crawl` method should not be used for `PlaywrightHttpClient`')
64
65
 
@@ -72,6 +73,7 @@ class PlaywrightHttpClient(HttpClient):
72
73
  payload: HttpPayload | None = None,
73
74
  session: Session | None = None,
74
75
  proxy_info: ProxyInfo | None = None,
76
+ timeout: timedelta | None = None,
75
77
  ) -> HttpResponse:
76
78
  # `proxy_info` are not used because `APIRequestContext` inherits the proxy from `BrowserContext`
77
79
  # TODO: Use `session` to restore all the fingerprint headers according to the `BrowserContext`, after resolved
@@ -87,7 +89,11 @@ class PlaywrightHttpClient(HttpClient):
87
89
 
88
90
  # Proxies appropriate to the browser context are used
89
91
  response = await browser_context.request.fetch(
90
- url_or_request=url, method=method.lower(), headers=dict(headers) if headers else None, data=payload
92
+ url_or_request=url,
93
+ method=method.lower(),
94
+ headers=dict(headers) if headers else None,
95
+ data=payload,
96
+ timeout=timeout.total_seconds() if timeout else None,
91
97
  )
92
98
 
93
99
  return await PlaywrightHttpResponse.from_playwright_response(response, protocol='')
@@ -9,7 +9,7 @@ from crawlee._utils.docs import docs_group
9
9
  if TYPE_CHECKING:
10
10
  from playwright.async_api import Page
11
11
 
12
- from ._types import BlockRequestsFunction
12
+ from ._types import BlockRequestsFunction, GotoOptions
13
13
 
14
14
 
15
15
  @dataclass(frozen=True)
@@ -26,6 +26,9 @@ class PlaywrightPreNavCrawlingContext(BasicCrawlingContext):
26
26
  block_requests: BlockRequestsFunction
27
27
  """Blocks network requests matching specified URL patterns."""
28
28
 
29
+ goto_options: GotoOptions
30
+ """Additional options to pass to Playwright's `Page.goto()` method. The `timeout` option is not supported."""
31
+
29
32
  async def get_snapshot(self) -> PageSnapshot:
30
33
  """Get snapshot of crawled page."""
31
34
  html = None
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from dataclasses import dataclass
4
- from typing import TYPE_CHECKING, Protocol
4
+ from typing import TYPE_CHECKING, Literal, Protocol, TypedDict
5
5
 
6
6
  from crawlee import HttpHeaders
7
7
  from crawlee._utils.docs import docs_group
@@ -10,7 +10,7 @@ if TYPE_CHECKING:
10
10
  from collections.abc import AsyncGenerator
11
11
 
12
12
  from playwright.async_api import APIResponse, Response
13
- from typing_extensions import Self
13
+ from typing_extensions import NotRequired, Self
14
14
 
15
15
 
16
16
  @docs_group('Functions')
@@ -58,3 +58,13 @@ class PlaywrightHttpResponse:
58
58
  _content = await response.body()
59
59
 
60
60
  return cls(http_version=http_version, status_code=status_code, headers=headers, _content=_content)
61
+
62
+
63
+ class GotoOptions(TypedDict):
64
+ """Keyword arguments for Playwright's `Page.goto()` method."""
65
+
66
+ wait_until: NotRequired[Literal['domcontentloaded', 'load', 'networkidle', 'commit']]
67
+ """When to consider operation succeeded, defaults to 'load' event."""
68
+
69
+ referer: NotRequired[str]
70
+ """Referer header value."""
crawlee/errors.py CHANGED
@@ -29,6 +29,10 @@ class UserDefinedErrorHandlerError(Exception):
29
29
  """Wraps an exception thrown from an user-defined error handler."""
30
30
 
31
31
 
32
+ class UserHandlerTimeoutError(UserDefinedErrorHandlerError):
33
+ """Raised when a router fails due to user raised timeout. This is different from user-defined handler timing out."""
34
+
35
+
32
36
  @docs_group('Errors')
33
37
  class SessionError(Exception):
34
38
  """Errors of `SessionError` type will trigger a session rotation.
@@ -174,11 +174,9 @@ class EventManager:
174
174
  # to avoid blocking the event loop
175
175
  coro = (
176
176
  listener(*bound_args.args, **bound_args.kwargs)
177
- if asyncio.iscoroutinefunction(listener)
177
+ if inspect.iscoroutinefunction(listener)
178
178
  else asyncio.to_thread(cast('Callable[..., None]', listener), *bound_args.args, **bound_args.kwargs)
179
179
  )
180
- # Note: use `asyncio.iscoroutinefunction` rather then `inspect.iscoroutinefunction` since it works with
181
- # unittests.mock.AsyncMock. See https://github.com/python/cpython/issues/84753.
182
180
 
183
181
  listener_task = asyncio.create_task(coro, name=f'Task-{event.value}-{listener.__name__}')
184
182
  self._listener_tasks.add(listener_task)
@@ -104,6 +104,7 @@ class HttpClient(ABC):
104
104
  session: Session | None = None,
105
105
  proxy_info: ProxyInfo | None = None,
106
106
  statistics: Statistics | None = None,
107
+ timeout: timedelta | None = None,
107
108
  ) -> HttpCrawlingResult:
108
109
  """Perform the crawling for a given request.
109
110
 
@@ -114,6 +115,7 @@ class HttpClient(ABC):
114
115
  session: The session associated with the request.
115
116
  proxy_info: The information about the proxy to be used.
116
117
  statistics: The statistics object to register status codes.
118
+ timeout: Maximum time allowed to process the request.
117
119
 
118
120
  Raises:
119
121
  ProxyError: Raised if a proxy-related error occurs.
@@ -132,6 +134,7 @@ class HttpClient(ABC):
132
134
  payload: HttpPayload | None = None,
133
135
  session: Session | None = None,
134
136
  proxy_info: ProxyInfo | None = None,
137
+ timeout: timedelta | None = None,
135
138
  ) -> HttpResponse:
136
139
  """Send an HTTP request via the client.
137
140
 
@@ -144,6 +147,7 @@ class HttpClient(ABC):
144
147
  payload: The data to be sent as the request body.
145
148
  session: The session associated with the request.
146
149
  proxy_info: The information about the proxy to be used.
150
+ timeout: Maximum time allowed to process the request.
147
151
 
148
152
  Raises:
149
153
  ProxyError: Raised if a proxy-related error occurs.
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import asyncio
3
4
  from contextlib import asynccontextmanager
4
5
  from typing import TYPE_CHECKING, Any
5
6
 
@@ -10,6 +11,7 @@ from curl_cffi.requests.cookies import Cookies as CurlCookies
10
11
  from curl_cffi.requests.cookies import CurlMorsel
11
12
  from curl_cffi.requests.exceptions import ProxyError as CurlProxyError
12
13
  from curl_cffi.requests.exceptions import RequestException as CurlRequestError
14
+ from curl_cffi.requests.exceptions import Timeout
13
15
  from curl_cffi.requests.impersonate import DEFAULT_CHROME as CURL_DEFAULT_CHROME
14
16
  from typing_extensions import override
15
17
 
@@ -147,6 +149,7 @@ class CurlImpersonateHttpClient(HttpClient):
147
149
  session: Session | None = None,
148
150
  proxy_info: ProxyInfo | None = None,
149
151
  statistics: Statistics | None = None,
152
+ timeout: timedelta | None = None,
150
153
  ) -> HttpCrawlingResult:
151
154
  client = self._get_client(proxy_info.url if proxy_info else None)
152
155
 
@@ -157,7 +160,10 @@ class CurlImpersonateHttpClient(HttpClient):
157
160
  headers=request.headers,
158
161
  data=request.payload,
159
162
  cookies=session.cookies.jar if session else None,
163
+ timeout=timeout.total_seconds() if timeout else None,
160
164
  )
165
+ except Timeout as exc:
166
+ raise asyncio.TimeoutError from exc
161
167
  except CurlRequestError as exc:
162
168
  if self._is_proxy_error(exc):
163
169
  raise ProxyError from exc
@@ -186,6 +192,7 @@ class CurlImpersonateHttpClient(HttpClient):
186
192
  payload: HttpPayload | None = None,
187
193
  session: Session | None = None,
188
194
  proxy_info: ProxyInfo | None = None,
195
+ timeout: timedelta | None = None,
189
196
  ) -> HttpResponse:
190
197
  if isinstance(headers, dict) or headers is None:
191
198
  headers = HttpHeaders(headers or {})
@@ -200,7 +207,10 @@ class CurlImpersonateHttpClient(HttpClient):
200
207
  headers=dict(headers) if headers else None,
201
208
  data=payload,
202
209
  cookies=session.cookies.jar if session else None,
210
+ timeout=timeout.total_seconds() if timeout else None,
203
211
  )
212
+ except Timeout as exc:
213
+ raise asyncio.TimeoutError from exc
204
214
  except CurlRequestError as exc:
205
215
  if self._is_proxy_error(exc):
206
216
  raise ProxyError from exc
@@ -241,6 +251,8 @@ class CurlImpersonateHttpClient(HttpClient):
241
251
  stream=True,
242
252
  timeout=timeout.total_seconds() if timeout else None,
243
253
  )
254
+ except Timeout as exc:
255
+ raise asyncio.TimeoutError from exc
244
256
  except CurlRequestError as exc:
245
257
  if self._is_proxy_error(exc):
246
258
  raise ProxyError from exc
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import asyncio
3
4
  from contextlib import asynccontextmanager
4
5
  from logging import getLogger
5
6
  from typing import TYPE_CHECKING, Any, cast
@@ -146,6 +147,7 @@ class HttpxHttpClient(HttpClient):
146
147
  session: Session | None = None,
147
148
  proxy_info: ProxyInfo | None = None,
148
149
  statistics: Statistics | None = None,
150
+ timeout: timedelta | None = None,
149
151
  ) -> HttpCrawlingResult:
150
152
  client = self._get_client(proxy_info.url if proxy_info else None)
151
153
  headers = self._combine_headers(request.headers)
@@ -157,10 +159,13 @@ class HttpxHttpClient(HttpClient):
157
159
  content=request.payload,
158
160
  cookies=session.cookies.jar if session else None,
159
161
  extensions={'crawlee_session': session if self._persist_cookies_per_session else None},
162
+ timeout=timeout.total_seconds() if timeout is not None else httpx.USE_CLIENT_DEFAULT,
160
163
  )
161
164
 
162
165
  try:
163
166
  response = await client.send(http_request)
167
+ except httpx.TimeoutException as exc:
168
+ raise asyncio.TimeoutError from exc
164
169
  except httpx.TransportError as exc:
165
170
  if self._is_proxy_error(exc):
166
171
  raise ProxyError from exc
@@ -185,6 +190,7 @@ class HttpxHttpClient(HttpClient):
185
190
  payload: HttpPayload | None = None,
186
191
  session: Session | None = None,
187
192
  proxy_info: ProxyInfo | None = None,
193
+ timeout: timedelta | None = None,
188
194
  ) -> HttpResponse:
189
195
  client = self._get_client(proxy_info.url if proxy_info else None)
190
196
 
@@ -195,10 +201,13 @@ class HttpxHttpClient(HttpClient):
195
201
  headers=headers,
196
202
  payload=payload,
197
203
  session=session,
204
+ timeout=httpx.Timeout(timeout.total_seconds()) if timeout is not None else None,
198
205
  )
199
206
 
200
207
  try:
201
208
  response = await client.send(http_request)
209
+ except httpx.TimeoutException as exc:
210
+ raise asyncio.TimeoutError from exc
202
211
  except httpx.TransportError as exc:
203
212
  if self._is_proxy_error(exc):
204
213
  raise ProxyError from exc
@@ -228,10 +237,13 @@ class HttpxHttpClient(HttpClient):
228
237
  headers=headers,
229
238
  payload=payload,
230
239
  session=session,
231
- timeout=timeout,
240
+ timeout=httpx.Timeout(None, connect=timeout.total_seconds()) if timeout else None,
232
241
  )
233
242
 
234
- response = await client.send(http_request, stream=True)
243
+ try:
244
+ response = await client.send(http_request, stream=True)
245
+ except httpx.TimeoutException as exc:
246
+ raise asyncio.TimeoutError from exc
235
247
 
236
248
  try:
237
249
  yield _HttpxResponse(response)
@@ -246,7 +258,7 @@ class HttpxHttpClient(HttpClient):
246
258
  headers: HttpHeaders | dict[str, str] | None,
247
259
  payload: HttpPayload | None,
248
260
  session: Session | None = None,
249
- timeout: timedelta | None = None,
261
+ timeout: httpx.Timeout | None = None,
250
262
  ) -> httpx.Request:
251
263
  """Build an `httpx.Request` using the provided parameters."""
252
264
  if isinstance(headers, dict) or headers is None:
@@ -254,15 +266,13 @@ class HttpxHttpClient(HttpClient):
254
266
 
255
267
  headers = self._combine_headers(headers)
256
268
 
257
- httpx_timeout = httpx.Timeout(None, connect=timeout.total_seconds()) if timeout else None
258
-
259
269
  return client.build_request(
260
270
  url=url,
261
271
  method=method,
262
272
  headers=dict(headers) if headers else None,
263
273
  content=payload,
264
274
  extensions={'crawlee_session': session if self._persist_cookies_per_session else None},
265
- timeout=httpx_timeout,
275
+ timeout=timeout if timeout else httpx.USE_CLIENT_DEFAULT,
266
276
  )
267
277
 
268
278
  def _get_client(self, proxy_url: str | None) -> httpx.AsyncClient:
@@ -6,7 +6,7 @@ from logging import getLogger
6
6
  from typing import TYPE_CHECKING, Any, TypedDict
7
7
 
8
8
  from cachetools import LRUCache
9
- from impit import AsyncClient, Browser, HTTPError, Response, TransportError
9
+ from impit import AsyncClient, Browser, HTTPError, Response, TimeoutException, TransportError
10
10
  from impit import ProxyError as ImpitProxyError
11
11
  from typing_extensions import override
12
12
 
@@ -125,6 +125,7 @@ class ImpitHttpClient(HttpClient):
125
125
  session: Session | None = None,
126
126
  proxy_info: ProxyInfo | None = None,
127
127
  statistics: Statistics | None = None,
128
+ timeout: timedelta | None = None,
128
129
  ) -> HttpCrawlingResult:
129
130
  client = self._get_client(proxy_info.url if proxy_info else None, session.cookies.jar if session else None)
130
131
 
@@ -134,7 +135,10 @@ class ImpitHttpClient(HttpClient):
134
135
  method=request.method,
135
136
  content=request.payload,
136
137
  headers=dict(request.headers) if request.headers else None,
138
+ timeout=timeout.total_seconds() if timeout else None,
137
139
  )
140
+ except TimeoutException as exc:
141
+ raise asyncio.TimeoutError from exc
138
142
  except (TransportError, HTTPError) as exc:
139
143
  if self._is_proxy_error(exc):
140
144
  raise ProxyError from exc
@@ -157,6 +161,7 @@ class ImpitHttpClient(HttpClient):
157
161
  payload: HttpPayload | None = None,
158
162
  session: Session | None = None,
159
163
  proxy_info: ProxyInfo | None = None,
164
+ timeout: timedelta | None = None,
160
165
  ) -> HttpResponse:
161
166
  if isinstance(headers, dict) or headers is None:
162
167
  headers = HttpHeaders(headers or {})
@@ -165,8 +170,14 @@ class ImpitHttpClient(HttpClient):
165
170
 
166
171
  try:
167
172
  response = await client.request(
168
- method=method, url=url, content=payload, headers=dict(headers) if headers else None
173
+ method=method,
174
+ url=url,
175
+ content=payload,
176
+ headers=dict(headers) if headers else None,
177
+ timeout=timeout.total_seconds() if timeout else None,
169
178
  )
179
+ except TimeoutException as exc:
180
+ raise asyncio.TimeoutError from exc
170
181
  except (TransportError, HTTPError) as exc:
171
182
  if self._is_proxy_error(exc):
172
183
  raise ProxyError from exc
@@ -189,14 +200,18 @@ class ImpitHttpClient(HttpClient):
189
200
  ) -> AsyncGenerator[HttpResponse]:
190
201
  client = self._get_client(proxy_info.url if proxy_info else None, session.cookies.jar if session else None)
191
202
 
192
- response = await client.request(
193
- method=method,
194
- url=url,
195
- content=payload,
196
- headers=dict(headers) if headers else None,
197
- timeout=timeout.total_seconds() if timeout else None,
198
- stream=True,
199
- )
203
+ try:
204
+ response = await client.request(
205
+ method=method,
206
+ url=url,
207
+ content=payload,
208
+ headers=dict(headers) if headers else None,
209
+ timeout=timeout.total_seconds() if timeout else None,
210
+ stream=True,
211
+ )
212
+ except TimeoutException as exc:
213
+ raise asyncio.TimeoutError from exc
214
+
200
215
  try:
201
216
  yield _ImpitResponse(response)
202
217
  finally:
crawlee/router.py CHANGED
@@ -1,13 +1,17 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import asyncio
3
4
  from collections.abc import Awaitable, Callable
4
5
  from typing import Generic, TypeVar
5
6
 
7
+ from crawlee._request import RequestState
6
8
  from crawlee._types import BasicCrawlingContext
7
9
  from crawlee._utils.docs import docs_group
8
10
 
9
11
  __all__ = ['Router']
10
12
 
13
+ from crawlee.errors import UserHandlerTimeoutError
14
+
11
15
  TCrawlingContext = TypeVar('TCrawlingContext', bound=BasicCrawlingContext)
12
16
  RequestHandler = Callable[[TCrawlingContext], Awaitable[None]]
13
17
 
@@ -89,13 +93,19 @@ class Router(Generic[TCrawlingContext]):
89
93
 
90
94
  async def __call__(self, context: TCrawlingContext) -> None:
91
95
  """Invoke a request handler that matches the request label (or the default)."""
96
+ context.request.state = RequestState.REQUEST_HANDLER
92
97
  if context.request.label is None or context.request.label not in self._handlers_by_label:
93
98
  if self._default_handler is None:
94
99
  raise RuntimeError(
95
100
  f'No handler matches label `{context.request.label}` and no default handler is configured'
96
101
  )
97
102
 
98
- return await self._default_handler(context)
103
+ user_defined_handler = self._default_handler
104
+ else:
105
+ user_defined_handler = self._handlers_by_label[context.request.label]
99
106
 
100
- handler = self._handlers_by_label[context.request.label]
101
- return await handler(context)
107
+ try:
108
+ return await user_defined_handler(context)
109
+ except asyncio.TimeoutError as e:
110
+ # Timeout in handler, but not timeout of handler.
111
+ raise UserHandlerTimeoutError('Timeout raised by user defined handler') from e
@@ -134,7 +134,7 @@ class FileSystemDatasetClient(DatasetClient):
134
134
  continue
135
135
 
136
136
  try:
137
- file = await asyncio.to_thread(path_to_metadata.open)
137
+ file = await asyncio.to_thread(path_to_metadata.open, 'r', encoding='utf-8')
138
138
  try:
139
139
  file_content = json.load(file)
140
140
  metadata = DatasetMetadata(**file_content)
@@ -163,7 +163,7 @@ class FileSystemDatasetClient(DatasetClient):
163
163
 
164
164
  # If the dataset directory exists, reconstruct the client from the metadata file.
165
165
  if path_to_dataset.exists() and path_to_metadata.exists():
166
- file = await asyncio.to_thread(open, path_to_metadata)
166
+ file = await asyncio.to_thread(open, path_to_metadata, 'r', encoding='utf-8')
167
167
  try:
168
168
  file_content = json.load(file)
169
169
  finally:
@@ -133,7 +133,7 @@ class FileSystemKeyValueStoreClient(KeyValueStoreClient):
133
133
  continue
134
134
 
135
135
  try:
136
- file = await asyncio.to_thread(path_to_metadata.open)
136
+ file = await asyncio.to_thread(path_to_metadata.open, 'r', encoding='utf-8')
137
137
  try:
138
138
  file_content = json.load(file)
139
139
  metadata = KeyValueStoreMetadata(**file_content)
@@ -162,7 +162,7 @@ class FileSystemKeyValueStoreClient(KeyValueStoreClient):
162
162
 
163
163
  # If the key-value store directory exists, reconstruct the client from the metadata file.
164
164
  if path_to_kvs.exists() and path_to_metadata.exists():
165
- file = await asyncio.to_thread(open, path_to_metadata)
165
+ file = await asyncio.to_thread(open, path_to_metadata, 'r', encoding='utf-8')
166
166
  try:
167
167
  file_content = json.load(file)
168
168
  finally:
@@ -239,7 +239,7 @@ class FileSystemKeyValueStoreClient(KeyValueStoreClient):
239
239
  # Read the metadata file
240
240
  async with self._lock:
241
241
  try:
242
- file = await asyncio.to_thread(open, record_metadata_filepath)
242
+ file = await asyncio.to_thread(open, record_metadata_filepath, 'r', encoding='utf-8')
243
243
  except FileNotFoundError:
244
244
  logger.warning(f'Metadata file disappeared for key "{key}", aborting get_value')
245
245
  return None