apify 2.7.1b7__py3-none-any.whl → 2.7.1b9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of apify might be problematic. Click here for more details.

Files changed (43) hide show
  1. apify/_actor.py +6 -7
  2. apify/_configuration.py +33 -0
  3. apify/_proxy_configuration.py +8 -5
  4. apify/_utils.py +9 -1
  5. apify/events/__init__.py +5 -0
  6. apify/events/_apify_event_manager.py +140 -0
  7. apify/events/_types.py +102 -0
  8. apify/log.py +0 -7
  9. apify/request_loaders/__init__.py +18 -0
  10. apify/{storages/_request_list.py → request_loaders/_apify_request_list.py} +22 -15
  11. apify/request_loaders/py.typed +0 -0
  12. apify/scrapy/_logging_config.py +1 -4
  13. apify/scrapy/extensions/_httpcache.py +9 -5
  14. apify/scrapy/requests.py +3 -3
  15. apify/scrapy/scheduler.py +8 -5
  16. apify/storage_clients/__init__.py +10 -0
  17. apify/storage_clients/_apify/__init__.py +11 -0
  18. apify/storage_clients/_apify/_dataset_client.py +304 -0
  19. apify/storage_clients/_apify/_key_value_store_client.py +241 -0
  20. apify/storage_clients/_apify/_models.py +107 -0
  21. apify/storage_clients/_apify/_request_queue_client.py +785 -0
  22. apify/storage_clients/_apify/_storage_client.py +80 -0
  23. apify/storage_clients/_apify/py.typed +0 -0
  24. apify/storage_clients/_file_system/__init__.py +2 -0
  25. apify/storage_clients/_file_system/_key_value_store_client.py +36 -0
  26. apify/storage_clients/_file_system/_storage_client.py +35 -0
  27. apify/storage_clients/py.typed +0 -0
  28. apify/storages/__init__.py +1 -3
  29. {apify-2.7.1b7.dist-info → apify-2.7.1b9.dist-info}/METADATA +7 -5
  30. apify-2.7.1b9.dist-info/RECORD +52 -0
  31. apify/_platform_event_manager.py +0 -215
  32. apify/apify_storage_client/__init__.py +0 -3
  33. apify/apify_storage_client/_apify_storage_client.py +0 -72
  34. apify/apify_storage_client/_dataset_client.py +0 -190
  35. apify/apify_storage_client/_dataset_collection_client.py +0 -51
  36. apify/apify_storage_client/_key_value_store_client.py +0 -109
  37. apify/apify_storage_client/_key_value_store_collection_client.py +0 -51
  38. apify/apify_storage_client/_request_queue_client.py +0 -176
  39. apify/apify_storage_client/_request_queue_collection_client.py +0 -51
  40. apify-2.7.1b7.dist-info/RECORD +0 -44
  41. /apify/{apify_storage_client → events}/py.typed +0 -0
  42. {apify-2.7.1b7.dist-info → apify-2.7.1b9.dist-info}/WHEEL +0 -0
  43. {apify-2.7.1b7.dist-info → apify-2.7.1b9.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,785 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import re
5
+ from base64 import b64encode
6
+ from collections import deque
7
+ from datetime import datetime, timedelta, timezone
8
+ from hashlib import sha256
9
+ from logging import getLogger
10
+ from typing import TYPE_CHECKING, Final
11
+
12
+ from cachetools import LRUCache
13
+ from typing_extensions import override
14
+
15
+ from apify_client import ApifyClientAsync
16
+ from crawlee._utils.crypto import crypto_random_object_id
17
+ from crawlee.storage_clients._base import RequestQueueClient
18
+ from crawlee.storage_clients.models import AddRequestsResponse, ProcessedRequest, RequestQueueMetadata
19
+
20
+ from ._models import CachedRequest, ProlongRequestLockResponse, RequestQueueHead
21
+ from apify import Request
22
+
23
+ if TYPE_CHECKING:
24
+ from collections.abc import Sequence
25
+
26
+ from apify_client.clients import RequestQueueClientAsync
27
+
28
+ from apify import Configuration
29
+
30
+ logger = getLogger(__name__)
31
+
32
+
33
+ def unique_key_to_request_id(unique_key: str, *, request_id_length: int = 15) -> str:
34
+ """Generate a deterministic request ID based on a unique key.
35
+
36
+ Args:
37
+ unique_key: The unique key to convert into a request ID.
38
+ request_id_length: The length of the request ID.
39
+
40
+ Returns:
41
+ A URL-safe, truncated request ID based on the unique key.
42
+ """
43
+ # Encode the unique key and compute its SHA-256 hash
44
+ hashed_key = sha256(unique_key.encode('utf-8')).digest()
45
+
46
+ # Encode the hash in base64 and decode it to get a string
47
+ base64_encoded = b64encode(hashed_key).decode('utf-8')
48
+
49
+ # Remove characters that are not URL-safe ('+', '/', or '=')
50
+ url_safe_key = re.sub(r'(\+|\/|=)', '', base64_encoded)
51
+
52
+ # Truncate the key to the desired length
53
+ return url_safe_key[:request_id_length]
54
+
55
+
56
+ class ApifyRequestQueueClient(RequestQueueClient):
57
+ """An Apify platform implementation of the request queue client."""
58
+
59
+ _DEFAULT_LOCK_TIME: Final[timedelta] = timedelta(minutes=3)
60
+ """The default lock time for requests in the queue."""
61
+
62
+ _MAX_CACHED_REQUESTS: Final[int] = 1_000_000
63
+ """Maximum number of requests that can be cached."""
64
+
65
+ def __init__(
66
+ self,
67
+ *,
68
+ api_client: RequestQueueClientAsync,
69
+ metadata: RequestQueueMetadata,
70
+ ) -> None:
71
+ """Initialize a new instance.
72
+
73
+ Preferably use the `ApifyRequestQueueClient.open` class method to create a new instance.
74
+ """
75
+ self._api_client = api_client
76
+ """The Apify request queue client for API operations."""
77
+
78
+ self._metadata = metadata
79
+ """Additional data related to the RequestQueue."""
80
+
81
+ self._queue_head = deque[str]()
82
+ """A deque to store request unique keys in the queue head."""
83
+
84
+ self._requests_cache: LRUCache[str, CachedRequest] = LRUCache(maxsize=self._MAX_CACHED_REQUESTS)
85
+ """A cache to store request objects. Request unique key is used as the cache key."""
86
+
87
+ self._queue_has_locked_requests: bool | None = None
88
+ """Whether the queue has requests locked by another client."""
89
+
90
+ self._should_check_for_forefront_requests = False
91
+ """Whether to check for forefront requests in the next list_head call."""
92
+
93
+ self._fetch_lock = asyncio.Lock()
94
+ """Fetch lock to minimize race conditions when communicating with API."""
95
+
96
+ async def _get_metadata_estimate(self) -> RequestQueueMetadata:
97
+ """Try to get cached metadata first. If multiple clients, fuse with global metadata.
98
+
99
+ This method is used internally to avoid unnecessary API call unless needed (multiple clients).
100
+ Local estimation of metadata is without delay, unlike metadata from API. In situation where there is only one
101
+ client, it is the better choice.
102
+ """
103
+ if self._metadata.had_multiple_clients:
104
+ return await self.get_metadata()
105
+ # Get local estimation (will not include changes done bo another client)
106
+ return self._metadata
107
+
108
+ @override
109
+ async def get_metadata(self) -> RequestQueueMetadata:
110
+ """Get metadata about the request queue.
111
+
112
+ Returns:
113
+ Metadata from the API, merged with local estimation, because in some cases, the data from the API can
114
+ be delayed.
115
+ """
116
+ response = await self._api_client.get()
117
+ if response is None:
118
+ raise ValueError('Failed to fetch request queue metadata from the API.')
119
+ # Enhance API response by local estimations (API can be delayed few seconds, while local estimation not.)
120
+ return RequestQueueMetadata(
121
+ id=response['id'],
122
+ name=response['name'],
123
+ total_request_count=max(response['totalRequestCount'], self._metadata.total_request_count),
124
+ handled_request_count=max(response['handledRequestCount'], self._metadata.handled_request_count),
125
+ pending_request_count=response['pendingRequestCount'],
126
+ created_at=min(response['createdAt'], self._metadata.created_at),
127
+ modified_at=max(response['modifiedAt'], self._metadata.modified_at),
128
+ accessed_at=max(response['accessedAt'], self._metadata.accessed_at),
129
+ had_multiple_clients=response['hadMultipleClients'] or self._metadata.had_multiple_clients,
130
+ )
131
+
132
+ @classmethod
133
+ async def open(
134
+ cls,
135
+ *,
136
+ id: str | None,
137
+ name: str | None,
138
+ configuration: Configuration,
139
+ ) -> ApifyRequestQueueClient:
140
+ """Open an Apify request queue client.
141
+
142
+ This method creates and initializes a new instance of the Apify request queue client. It handles
143
+ authentication, storage lookup/creation, and metadata retrieval, and sets up internal caching and queue
144
+ management structures.
145
+
146
+ Args:
147
+ id: The ID of an existing request queue to open. If provided, the client will connect to this specific
148
+ storage. Cannot be used together with `name`.
149
+ name: The name of a request queue to get or create. If a storage with this name exists, it will be opened;
150
+ otherwise, a new one will be created. Cannot be used together with `id`.
151
+ configuration: The configuration object containing API credentials and settings. Must include a valid
152
+ `token` and `api_base_url`. May also contain a `default_request_queue_id` for fallback when neither
153
+ `id` nor `name` is provided.
154
+
155
+ Returns:
156
+ An instance for the opened or created storage client.
157
+
158
+ Raises:
159
+ ValueError: If the configuration is missing required fields (token, api_base_url), if both `id` and `name`
160
+ are provided, or if neither `id` nor `name` is provided and no default storage ID is available
161
+ in the configuration.
162
+ """
163
+ token = configuration.token
164
+ if not token:
165
+ raise ValueError(f'Apify storage client requires a valid token in Configuration (token={token}).')
166
+
167
+ api_url = configuration.api_base_url
168
+ if not api_url:
169
+ raise ValueError(f'Apify storage client requires a valid API URL in Configuration (api_url={api_url}).')
170
+
171
+ api_public_base_url = configuration.api_public_base_url
172
+ if not api_public_base_url:
173
+ raise ValueError(
174
+ 'Apify storage client requires a valid API public base URL in Configuration '
175
+ f'(api_public_base_url={api_public_base_url}).'
176
+ )
177
+
178
+ # Create Apify client with the provided token and API URL.
179
+ apify_client_async = ApifyClientAsync(
180
+ token=token,
181
+ api_url=api_url,
182
+ max_retries=8,
183
+ min_delay_between_retries_millis=500,
184
+ timeout_secs=360,
185
+ )
186
+ apify_rqs_client = apify_client_async.request_queues()
187
+
188
+ match (id, name):
189
+ case (None, None):
190
+ # If both id and name are None, try to get the default storage ID from environment variables.
191
+ # The default storage ID environment variable is set by the Apify platform. It also contains
192
+ # a new storage ID after Actor's reboot or migration.
193
+ id = configuration.default_request_queue_id
194
+ case (None, name):
195
+ # If only name is provided, get or create the storage by name.
196
+ id = RequestQueueMetadata.model_validate(
197
+ await apify_rqs_client.get_or_create(name=name),
198
+ ).id
199
+ case (_, None):
200
+ # If only id is provided, use it.
201
+ pass
202
+ case (_, _):
203
+ # If both id and name are provided, raise an error.
204
+ raise ValueError('Only one of "id" or "name" can be specified, not both.')
205
+ if id is None:
206
+ raise RuntimeError('Unreachable code')
207
+
208
+ # Use suitable client_key to make `hadMultipleClients` response of Apify API useful.
209
+ # It should persist across migrated or resurrected Actor runs on the Apify platform.
210
+ _api_max_client_key_length = 32
211
+ client_key = (configuration.actor_run_id or crypto_random_object_id(length=_api_max_client_key_length))[
212
+ :_api_max_client_key_length
213
+ ]
214
+
215
+ apify_rq_client = apify_client_async.request_queue(request_queue_id=id, client_key=client_key)
216
+
217
+ # Fetch its metadata.
218
+ metadata = await apify_rq_client.get()
219
+
220
+ # If metadata is None, it means the storage does not exist, so we create it.
221
+ if metadata is None:
222
+ id = RequestQueueMetadata.model_validate(
223
+ await apify_rqs_client.get_or_create(),
224
+ ).id
225
+ apify_rq_client = apify_client_async.request_queue(request_queue_id=id, client_key=client_key)
226
+
227
+ # Verify that the storage exists by fetching its metadata again.
228
+ metadata = await apify_rq_client.get()
229
+ if metadata is None:
230
+ raise ValueError(f'Opening request queue with id={id} and name={name} failed.')
231
+
232
+ metadata_model = RequestQueueMetadata.model_validate(metadata)
233
+
234
+ return cls(
235
+ api_client=apify_rq_client,
236
+ metadata=metadata_model,
237
+ )
238
+
239
+ @override
240
+ async def purge(self) -> None:
241
+ raise NotImplementedError(
242
+ 'Purging the request queue is not supported in the Apify platform. '
243
+ 'Use the `drop` method to delete the request queue instead.'
244
+ )
245
+
246
+ @override
247
+ async def drop(self) -> None:
248
+ await self._api_client.delete()
249
+
250
+ @override
251
+ async def add_batch_of_requests(
252
+ self,
253
+ requests: Sequence[Request],
254
+ *,
255
+ forefront: bool = False,
256
+ ) -> AddRequestsResponse:
257
+ """Add a batch of requests to the queue.
258
+
259
+ Args:
260
+ requests: The requests to add.
261
+ forefront: Whether to add the requests to the beginning of the queue.
262
+
263
+ Returns:
264
+ Response containing information about the added requests.
265
+ """
266
+ # Do not try to add previously added requests to avoid pointless expensive calls to API
267
+
268
+ new_requests: list[Request] = []
269
+ already_present_requests: list[ProcessedRequest] = []
270
+
271
+ for request in requests:
272
+ if self._requests_cache.get(request.unique_key):
273
+ # We are not sure if it was already handled at this point, and it is not worth calling API for it.
274
+ # It could have been handled by another client in the meantime, so cached information about
275
+ # `request.was_already_handled` is not reliable.
276
+ already_present_requests.append(
277
+ ProcessedRequest.model_validate(
278
+ {
279
+ 'uniqueKey': request.unique_key,
280
+ 'wasAlreadyPresent': True,
281
+ 'wasAlreadyHandled': request.was_already_handled,
282
+ }
283
+ )
284
+ )
285
+
286
+ else:
287
+ # Add new request to the cache.
288
+ processed_request = ProcessedRequest.model_validate(
289
+ {
290
+ 'uniqueKey': request.unique_key,
291
+ 'wasAlreadyPresent': True,
292
+ 'wasAlreadyHandled': request.was_already_handled,
293
+ }
294
+ )
295
+ self._cache_request(
296
+ request.unique_key,
297
+ processed_request,
298
+ )
299
+ new_requests.append(request)
300
+
301
+ if new_requests:
302
+ # Prepare requests for API by converting to dictionaries.
303
+ requests_dict = [
304
+ request.model_dump(
305
+ by_alias=True,
306
+ exclude={'id'}, # Exclude ID fields from requests since the API doesn't accept them.
307
+ )
308
+ for request in new_requests
309
+ ]
310
+
311
+ # Send requests to API.
312
+ api_response = AddRequestsResponse.model_validate(
313
+ await self._api_client.batch_add_requests(requests=requests_dict, forefront=forefront)
314
+ )
315
+
316
+ # Add the locally known already present processed requests based on the local cache.
317
+ api_response.processed_requests.extend(already_present_requests)
318
+
319
+ # Remove unprocessed requests from the cache
320
+ for unprocessed_request in api_response.unprocessed_requests:
321
+ self._requests_cache.pop(unprocessed_request.unique_key, None)
322
+
323
+ else:
324
+ api_response = AddRequestsResponse.model_validate(
325
+ {'unprocessedRequests': [], 'processedRequests': already_present_requests}
326
+ )
327
+
328
+ logger.debug(
329
+ f'Tried to add new requests: {len(new_requests)}, '
330
+ f'succeeded to add new requests: {len(api_response.processed_requests) - len(already_present_requests)}, '
331
+ f'skipped already present requests: {len(already_present_requests)}'
332
+ )
333
+
334
+ # Update assumed total count for newly added requests.
335
+ new_request_count = 0
336
+ for processed_request in api_response.processed_requests:
337
+ if not processed_request.was_already_present and not processed_request.was_already_handled:
338
+ new_request_count += 1
339
+
340
+ self._metadata.total_request_count += new_request_count
341
+
342
+ return api_response
343
+
344
+ @override
345
+ async def get_request(self, unique_key: str) -> Request | None:
346
+ """Get a request by unique key.
347
+
348
+ Args:
349
+ unique_key: Unique key of the request to get.
350
+
351
+ Returns:
352
+ The request or None if not found.
353
+ """
354
+ response = await self._api_client.get_request(unique_key_to_request_id(unique_key))
355
+
356
+ if response is None:
357
+ return None
358
+
359
+ return Request.model_validate(response)
360
+
361
+ @override
362
+ async def fetch_next_request(self) -> Request | None:
363
+ """Return the next request in the queue to be processed.
364
+
365
+ Once you successfully finish processing of the request, you need to call `mark_request_as_handled`
366
+ to mark the request as handled in the queue. If there was some error in processing the request, call
367
+ `reclaim_request` instead, so that the queue will give the request to some other consumer
368
+ in another call to the `fetch_next_request` method.
369
+
370
+ Returns:
371
+ The request or `None` if there are no more pending requests.
372
+ """
373
+ # Ensure the queue head has requests if available. Fetching the head with lock to prevent race conditions.
374
+ async with self._fetch_lock:
375
+ await self._ensure_head_is_non_empty()
376
+
377
+ # If queue head is empty after ensuring, there are no requests
378
+ if not self._queue_head:
379
+ return None
380
+
381
+ # Get the next request ID from the queue head
382
+ next_unique_key = self._queue_head.popleft()
383
+
384
+ request = await self._get_or_hydrate_request(next_unique_key)
385
+
386
+ # Handle potential inconsistency where request might not be in the main table yet
387
+ if request is None:
388
+ logger.debug(
389
+ 'Cannot find a request from the beginning of queue, will be retried later',
390
+ extra={'nextRequestUniqueKey': next_unique_key},
391
+ )
392
+ return None
393
+
394
+ # If the request was already handled, skip it
395
+ if request.handled_at is not None:
396
+ logger.debug(
397
+ 'Request fetched from the beginning of queue was already handled',
398
+ extra={'nextRequestUniqueKey': next_unique_key},
399
+ )
400
+ return None
401
+
402
+ # Use get request to ensure we have the full request object.
403
+ request = await self.get_request(request.unique_key)
404
+ if request is None:
405
+ logger.debug(
406
+ 'Request fetched from the beginning of queue was not found in the RQ',
407
+ extra={'nextRequestUniqueKey': next_unique_key},
408
+ )
409
+ return None
410
+
411
+ return request
412
+
413
+ @override
414
+ async def mark_request_as_handled(self, request: Request) -> ProcessedRequest | None:
415
+ """Mark a request as handled after successful processing.
416
+
417
+ Handled requests will never again be returned by the `fetch_next_request` method.
418
+
419
+ Args:
420
+ request: The request to mark as handled.
421
+
422
+ Returns:
423
+ Information about the queue operation. `None` if the given request was not in progress.
424
+ """
425
+ # Set the handled_at timestamp if not already set
426
+ if request.handled_at is None:
427
+ request.handled_at = datetime.now(tz=timezone.utc)
428
+
429
+ if cached_request := self._requests_cache[request.unique_key]:
430
+ cached_request.was_already_handled = request.was_already_handled
431
+ try:
432
+ # Update the request in the API
433
+ processed_request = await self._update_request(request)
434
+ processed_request.unique_key = request.unique_key
435
+
436
+ # Update assumed handled count if this wasn't already handled
437
+ if not processed_request.was_already_handled:
438
+ self._metadata.handled_request_count += 1
439
+
440
+ # Update the cache with the handled request
441
+ cache_key = request.unique_key
442
+ self._cache_request(
443
+ cache_key,
444
+ processed_request,
445
+ hydrated_request=request,
446
+ )
447
+ except Exception as exc:
448
+ logger.debug(f'Error marking request {request.unique_key} as handled: {exc!s}')
449
+ return None
450
+ else:
451
+ return processed_request
452
+
453
+ @override
454
+ async def reclaim_request(
455
+ self,
456
+ request: Request,
457
+ *,
458
+ forefront: bool = False,
459
+ ) -> ProcessedRequest | None:
460
+ """Reclaim a failed request back to the queue.
461
+
462
+ The request will be returned for processing later again by another call to `fetch_next_request`.
463
+
464
+ Args:
465
+ request: The request to return to the queue.
466
+ forefront: Whether to add the request to the head or the end of the queue.
467
+
468
+ Returns:
469
+ Information about the queue operation. `None` if the given request was not in progress.
470
+ """
471
+ # Check if the request was marked as handled and clear it. When reclaiming,
472
+ # we want to put the request back for processing.
473
+ if request.was_already_handled:
474
+ request.handled_at = None
475
+
476
+ # Reclaim with lock to prevent race conditions that could lead to double processing of the same request.
477
+ async with self._fetch_lock:
478
+ try:
479
+ # Update the request in the API.
480
+ processed_request = await self._update_request(request, forefront=forefront)
481
+ processed_request.unique_key = request.unique_key
482
+
483
+ # If the request was previously handled, decrement our handled count since
484
+ # we're putting it back for processing.
485
+ if request.was_already_handled and not processed_request.was_already_handled:
486
+ self._metadata.handled_request_count -= 1
487
+
488
+ # Update the cache
489
+ cache_key = request.unique_key
490
+ self._cache_request(
491
+ cache_key,
492
+ processed_request,
493
+ hydrated_request=request,
494
+ )
495
+
496
+ # If we're adding to the forefront, we need to check for forefront requests
497
+ # in the next list_head call
498
+ if forefront:
499
+ self._should_check_for_forefront_requests = True
500
+
501
+ # Try to release the lock on the request
502
+ try:
503
+ await self._delete_request_lock(request.unique_key, forefront=forefront)
504
+ except Exception as err:
505
+ logger.debug(f'Failed to delete request lock for request {request.unique_key}', exc_info=err)
506
+ except Exception as exc:
507
+ logger.debug(f'Error reclaiming request {request.unique_key}: {exc!s}')
508
+ return None
509
+ else:
510
+ return processed_request
511
+
512
+ @override
513
+ async def is_empty(self) -> bool:
514
+ """Check if the queue is empty.
515
+
516
+ Returns:
517
+ True if the queue is empty, False otherwise.
518
+ """
519
+ # Check _list_head and self._queue_has_locked_requests with lock to make sure they are consistent.
520
+ # Without the lock the `is_empty` is prone to falsely report True with some low probability race condition.
521
+ async with self._fetch_lock:
522
+ head = await self._list_head(limit=1, lock_time=None)
523
+ return len(head.items) == 0 and not self._queue_has_locked_requests
524
+
525
+ async def _ensure_head_is_non_empty(self) -> None:
526
+ """Ensure that the queue head has requests if they are available in the queue."""
527
+ # If queue head has adequate requests, skip fetching more
528
+ if len(self._queue_head) > 1 and not self._should_check_for_forefront_requests:
529
+ return
530
+
531
+ # Fetch requests from the API and populate the queue head
532
+ await self._list_head(lock_time=self._DEFAULT_LOCK_TIME)
533
+
534
+ async def _get_or_hydrate_request(self, unique_key: str) -> Request | None:
535
+ """Get a request by unique key, either from cache or by fetching from API.
536
+
537
+ Args:
538
+ unique_key: Unique key of the request to get.
539
+
540
+ Returns:
541
+ The request if found and valid, otherwise None.
542
+ """
543
+ # First check if the request is in our cache
544
+ cached_entry = self._requests_cache.get(unique_key)
545
+
546
+ if cached_entry and cached_entry.hydrated:
547
+ # If we have the request hydrated in cache, check if lock is expired
548
+ if cached_entry.lock_expires_at and cached_entry.lock_expires_at < datetime.now(tz=timezone.utc):
549
+ # Try to prolong the lock if it's expired
550
+ try:
551
+ lock_secs = int(self._DEFAULT_LOCK_TIME.total_seconds())
552
+ response = await self._prolong_request_lock(unique_key, lock_secs=lock_secs)
553
+ cached_entry.lock_expires_at = response.lock_expires_at
554
+ except Exception:
555
+ # If prolonging the lock fails, we lost the request
556
+ logger.debug(f'Failed to prolong lock for request {unique_key}, returning None')
557
+ return None
558
+
559
+ return cached_entry.hydrated
560
+
561
+ # If not in cache or not hydrated, fetch the request
562
+ try:
563
+ # Try to acquire or prolong the lock
564
+ lock_secs = int(self._DEFAULT_LOCK_TIME.total_seconds())
565
+ await self._prolong_request_lock(unique_key, lock_secs=lock_secs)
566
+
567
+ # Fetch the request data
568
+ request = await self.get_request(unique_key)
569
+
570
+ # If request is not found, release lock and return None
571
+ if not request:
572
+ await self._delete_request_lock(unique_key)
573
+ return None
574
+
575
+ # Update cache with hydrated request
576
+ cache_key = request.unique_key
577
+ self._cache_request(
578
+ cache_key,
579
+ ProcessedRequest(
580
+ unique_key=request.unique_key,
581
+ was_already_present=True,
582
+ was_already_handled=request.handled_at is not None,
583
+ ),
584
+ hydrated_request=request,
585
+ )
586
+ except Exception as exc:
587
+ logger.debug(f'Error fetching or locking request {unique_key}: {exc!s}')
588
+ return None
589
+ else:
590
+ return request
591
+
592
+ async def _update_request(
593
+ self,
594
+ request: Request,
595
+ *,
596
+ forefront: bool = False,
597
+ ) -> ProcessedRequest:
598
+ """Update a request in the queue.
599
+
600
+ Args:
601
+ request: The updated request.
602
+ forefront: Whether to put the updated request in the beginning or the end of the queue.
603
+
604
+ Returns:
605
+ The updated request
606
+ """
607
+ request_dict = request.model_dump(by_alias=True)
608
+ request_dict['id'] = unique_key_to_request_id(request.unique_key)
609
+ response = await self._api_client.update_request(
610
+ request=request_dict,
611
+ forefront=forefront,
612
+ )
613
+
614
+ return ProcessedRequest.model_validate(
615
+ {'uniqueKey': request.unique_key} | response,
616
+ )
617
+
618
+ async def _list_head(
619
+ self,
620
+ *,
621
+ lock_time: timedelta | None = None,
622
+ limit: int = 25,
623
+ ) -> RequestQueueHead:
624
+ """Retrieve requests from the beginning of the queue.
625
+
626
+ Args:
627
+ lock_time: Duration for which to lock the retrieved requests.
628
+ If None, requests will not be locked.
629
+ limit: Maximum number of requests to retrieve.
630
+
631
+ Returns:
632
+ A collection of requests from the beginning of the queue.
633
+ """
634
+ # Return from cache if available and we're not checking for new forefront requests
635
+ if self._queue_head and not self._should_check_for_forefront_requests:
636
+ logger.debug(f'Using cached queue head with {len(self._queue_head)} requests')
637
+ # Create a list of requests from the cached queue head
638
+ items = []
639
+ for unique_key in list(self._queue_head)[:limit]:
640
+ cached_request = self._requests_cache.get(unique_key)
641
+ if cached_request and cached_request.hydrated:
642
+ items.append(cached_request.hydrated)
643
+
644
+ metadata = await self._get_metadata_estimate()
645
+
646
+ return RequestQueueHead(
647
+ limit=limit,
648
+ had_multiple_clients=metadata.had_multiple_clients,
649
+ queue_modified_at=metadata.modified_at,
650
+ items=items,
651
+ queue_has_locked_requests=self._queue_has_locked_requests,
652
+ lock_time=lock_time,
653
+ )
654
+ leftover_buffer = list[str]()
655
+ if self._should_check_for_forefront_requests:
656
+ leftover_buffer = list(self._queue_head)
657
+ self._queue_head.clear()
658
+ self._should_check_for_forefront_requests = False
659
+
660
+ # Otherwise fetch from API
661
+ lock_time = lock_time or self._DEFAULT_LOCK_TIME
662
+ lock_secs = int(lock_time.total_seconds())
663
+
664
+ response = await self._api_client.list_and_lock_head(
665
+ lock_secs=lock_secs,
666
+ limit=limit,
667
+ )
668
+
669
+ # Update the queue head cache
670
+ self._queue_has_locked_requests = response.get('queueHasLockedRequests', False)
671
+ # Check if there is another client working with the RequestQueue
672
+ self._metadata.had_multiple_clients = response.get('hadMultipleClients', False)
673
+
674
+ for request_data in response.get('items', []):
675
+ request = Request.model_validate(request_data)
676
+
677
+ # Skip requests without ID or unique key
678
+ if not request.unique_key:
679
+ logger.debug(
680
+ 'Skipping request from queue head, missing ID or unique key',
681
+ extra={
682
+ 'unique_key': request.unique_key,
683
+ },
684
+ )
685
+ continue
686
+
687
+ # Cache the request
688
+ self._cache_request(
689
+ request.unique_key,
690
+ ProcessedRequest(
691
+ unique_key=request.unique_key,
692
+ was_already_present=True,
693
+ was_already_handled=False,
694
+ ),
695
+ hydrated_request=request,
696
+ )
697
+ self._queue_head.append(request.unique_key)
698
+
699
+ for leftover_unique_key in leftover_buffer:
700
+ # After adding new requests to the forefront, any existing leftover locked request is kept in the end.
701
+ self._queue_head.append(leftover_unique_key)
702
+ return RequestQueueHead.model_validate(response)
703
+
704
+ async def _prolong_request_lock(
705
+ self,
706
+ unique_key: str,
707
+ *,
708
+ lock_secs: int,
709
+ ) -> ProlongRequestLockResponse:
710
+ """Prolong the lock on a specific request in the queue.
711
+
712
+ Args:
713
+ unique_key: Unique key of the request whose lock is to be prolonged.
714
+ lock_secs: The additional amount of time, in seconds, that the request will remain locked.
715
+
716
+ Returns:
717
+ A response containing the time at which the lock will expire.
718
+ """
719
+ response = await self._api_client.prolong_request_lock(
720
+ request_id=unique_key_to_request_id(unique_key),
721
+ # All requests reaching this code were the tip of the queue at the moment when they were fetched,
722
+ # so if their lock expires, they should be put back to the forefront as their handling is long overdue.
723
+ forefront=True,
724
+ lock_secs=lock_secs,
725
+ )
726
+
727
+ result = ProlongRequestLockResponse(
728
+ lock_expires_at=datetime.fromisoformat(response['lockExpiresAt'].replace('Z', '+00:00'))
729
+ )
730
+
731
+ # Update the cache with the new lock expiration
732
+ for cached_request in self._requests_cache.values():
733
+ if cached_request.unique_key == unique_key:
734
+ cached_request.lock_expires_at = result.lock_expires_at
735
+ break
736
+
737
+ return result
738
+
739
+ async def _delete_request_lock(
740
+ self,
741
+ unique_key: str,
742
+ *,
743
+ forefront: bool = False,
744
+ ) -> None:
745
+ """Delete the lock on a specific request in the queue.
746
+
747
+ Args:
748
+ unique_key: Unique key of the request to delete the lock.
749
+ forefront: Whether to put the request in the beginning or the end of the queue after the lock is deleted.
750
+ """
751
+ try:
752
+ await self._api_client.delete_request_lock(
753
+ request_id=unique_key_to_request_id(unique_key),
754
+ forefront=forefront,
755
+ )
756
+
757
+ # Update the cache to remove the lock
758
+ for cached_request in self._requests_cache.values():
759
+ if cached_request.unique_key == unique_key:
760
+ cached_request.lock_expires_at = None
761
+ break
762
+ except Exception as err:
763
+ logger.debug(f'Failed to delete request lock for request {unique_key}', exc_info=err)
764
+
765
+ def _cache_request(
766
+ self,
767
+ cache_key: str,
768
+ processed_request: ProcessedRequest,
769
+ *,
770
+ hydrated_request: Request | None = None,
771
+ ) -> None:
772
+ """Cache a request for future use.
773
+
774
+ Args:
775
+ cache_key: The key to use for caching the request. It should be request ID.
776
+ processed_request: The processed request information.
777
+ forefront: Whether the request was added to the forefront of the queue.
778
+ hydrated_request: The hydrated request object, if available.
779
+ """
780
+ self._requests_cache[cache_key] = CachedRequest(
781
+ unique_key=processed_request.unique_key,
782
+ was_already_handled=processed_request.was_already_handled,
783
+ hydrated=hydrated_request,
784
+ lock_expires_at=None,
785
+ )