apify 2.7.1b20__py3-none-any.whl → 2.7.1b21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of apify might be problematic. Click here for more details.
- apify/_actor.py +37 -66
- apify/storage_clients/__init__.py +2 -0
- apify/storage_clients/_apify/_request_queue_client.py +118 -605
- apify/storage_clients/_apify/_request_queue_shared_client.py +527 -0
- apify/storage_clients/_apify/_request_queue_single_client.py +399 -0
- apify/storage_clients/_apify/_storage_client.py +26 -4
- apify/storage_clients/_apify/_utils.py +27 -1
- apify/storage_clients/_smart_apify/__init__.py +1 -0
- apify/storage_clients/_smart_apify/_storage_client.py +117 -0
- {apify-2.7.1b20.dist-info → apify-2.7.1b21.dist-info}/METADATA +2 -2
- {apify-2.7.1b20.dist-info → apify-2.7.1b21.dist-info}/RECORD +13 -9
- {apify-2.7.1b20.dist-info → apify-2.7.1b21.dist-info}/WHEEL +0 -0
- {apify-2.7.1b20.dist-info → apify-2.7.1b21.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,15 +1,8 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import asyncio
|
|
4
|
-
import re
|
|
5
|
-
from base64 import b64encode
|
|
6
|
-
from collections import deque
|
|
7
|
-
from datetime import datetime, timedelta, timezone
|
|
8
|
-
from hashlib import sha256
|
|
9
3
|
from logging import getLogger
|
|
10
|
-
from typing import TYPE_CHECKING, Final
|
|
4
|
+
from typing import TYPE_CHECKING, Final, Literal
|
|
11
5
|
|
|
12
|
-
from cachetools import LRUCache
|
|
13
6
|
from typing_extensions import override
|
|
14
7
|
|
|
15
8
|
from apify_client import ApifyClientAsync
|
|
@@ -18,54 +11,24 @@ from crawlee.storage_clients._base import RequestQueueClient
|
|
|
18
11
|
from crawlee.storage_clients.models import AddRequestsResponse, ProcessedRequest, RequestQueueMetadata
|
|
19
12
|
from crawlee.storages import RequestQueue
|
|
20
13
|
|
|
21
|
-
from ._models import
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
ProlongRequestLockResponse,
|
|
25
|
-
RequestQueueHead,
|
|
26
|
-
RequestQueueStats,
|
|
27
|
-
)
|
|
14
|
+
from ._models import ApifyRequestQueueMetadata, RequestQueueStats
|
|
15
|
+
from ._request_queue_shared_client import _ApifyRequestQueueSharedClient
|
|
16
|
+
from ._request_queue_single_client import _ApifyRequestQueueSingleClient
|
|
28
17
|
from ._utils import AliasResolver
|
|
29
|
-
from apify import Request
|
|
30
18
|
|
|
31
19
|
if TYPE_CHECKING:
|
|
32
20
|
from collections.abc import Sequence
|
|
33
21
|
|
|
34
22
|
from apify_client.clients import RequestQueueClientAsync
|
|
23
|
+
from crawlee import Request
|
|
35
24
|
|
|
36
25
|
from apify import Configuration
|
|
37
26
|
|
|
38
27
|
logger = getLogger(__name__)
|
|
39
28
|
|
|
40
29
|
|
|
41
|
-
def unique_key_to_request_id(unique_key: str, *, request_id_length: int = 15) -> str:
|
|
42
|
-
"""Generate a deterministic request ID based on a unique key.
|
|
43
|
-
|
|
44
|
-
Args:
|
|
45
|
-
unique_key: The unique key to convert into a request ID.
|
|
46
|
-
request_id_length: The length of the request ID.
|
|
47
|
-
|
|
48
|
-
Returns:
|
|
49
|
-
A URL-safe, truncated request ID based on the unique key.
|
|
50
|
-
"""
|
|
51
|
-
# Encode the unique key and compute its SHA-256 hash
|
|
52
|
-
hashed_key = sha256(unique_key.encode('utf-8')).digest()
|
|
53
|
-
|
|
54
|
-
# Encode the hash in base64 and decode it to get a string
|
|
55
|
-
base64_encoded = b64encode(hashed_key).decode('utf-8')
|
|
56
|
-
|
|
57
|
-
# Remove characters that are not URL-safe ('+', '/', or '=')
|
|
58
|
-
url_safe_key = re.sub(r'(\+|\/|=)', '', base64_encoded)
|
|
59
|
-
|
|
60
|
-
# Truncate the key to the desired length
|
|
61
|
-
return url_safe_key[:request_id_length]
|
|
62
|
-
|
|
63
|
-
|
|
64
30
|
class ApifyRequestQueueClient(RequestQueueClient):
|
|
65
|
-
"""
|
|
66
|
-
|
|
67
|
-
_DEFAULT_LOCK_TIME: Final[timedelta] = timedelta(minutes=3)
|
|
68
|
-
"""The default lock time for requests in the queue."""
|
|
31
|
+
"""Base class for Apify platform implementations of the request queue client."""
|
|
69
32
|
|
|
70
33
|
_MAX_CACHED_REQUESTS: Final[int] = 1_000_000
|
|
71
34
|
"""Maximum number of requests that can be cached."""
|
|
@@ -75,6 +38,7 @@ class ApifyRequestQueueClient(RequestQueueClient):
|
|
|
75
38
|
*,
|
|
76
39
|
api_client: RequestQueueClientAsync,
|
|
77
40
|
metadata: RequestQueueMetadata,
|
|
41
|
+
access: Literal['single', 'shared'] = 'single',
|
|
78
42
|
) -> None:
|
|
79
43
|
"""Initialize a new instance.
|
|
80
44
|
|
|
@@ -83,35 +47,112 @@ class ApifyRequestQueueClient(RequestQueueClient):
|
|
|
83
47
|
self._api_client = api_client
|
|
84
48
|
"""The Apify request queue client for API operations."""
|
|
85
49
|
|
|
86
|
-
self.
|
|
87
|
-
"""
|
|
50
|
+
self._implementation: _ApifyRequestQueueSingleClient | _ApifyRequestQueueSharedClient
|
|
51
|
+
"""Internal implementation used to communicate with the Apify platform based Request Queue."""
|
|
52
|
+
if access == 'single':
|
|
53
|
+
self._implementation = _ApifyRequestQueueSingleClient(
|
|
54
|
+
api_client=self._api_client, metadata=metadata, cache_size=self._MAX_CACHED_REQUESTS
|
|
55
|
+
)
|
|
56
|
+
elif access == 'shared':
|
|
57
|
+
self._implementation = _ApifyRequestQueueSharedClient(
|
|
58
|
+
api_client=self._api_client,
|
|
59
|
+
metadata=metadata,
|
|
60
|
+
cache_size=self._MAX_CACHED_REQUESTS,
|
|
61
|
+
metadata_getter=self.get_metadata,
|
|
62
|
+
)
|
|
63
|
+
else:
|
|
64
|
+
raise RuntimeError(f"Unsupported access type: {access}. Allowed values are 'single' or 'shared'.")
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
def _metadata(self) -> RequestQueueMetadata:
|
|
68
|
+
return self._implementation.metadata
|
|
69
|
+
|
|
70
|
+
@override
|
|
71
|
+
async def add_batch_of_requests(
|
|
72
|
+
self,
|
|
73
|
+
requests: Sequence[Request],
|
|
74
|
+
*,
|
|
75
|
+
forefront: bool = False,
|
|
76
|
+
) -> AddRequestsResponse:
|
|
77
|
+
"""Add a batch of requests to the queue.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
requests: The requests to add.
|
|
81
|
+
forefront: Whether to add the requests to the beginning of the queue.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Response containing information about the added requests.
|
|
85
|
+
"""
|
|
86
|
+
return await self._implementation.add_batch_of_requests(requests, forefront=forefront)
|
|
87
|
+
|
|
88
|
+
@override
|
|
89
|
+
async def fetch_next_request(self) -> Request | None:
|
|
90
|
+
"""Return the next request in the queue to be processed.
|
|
91
|
+
|
|
92
|
+
Once you successfully finish processing of the request, you need to call `mark_request_as_handled`
|
|
93
|
+
to mark the request as handled in the queue. If there was some error in processing the request, call
|
|
94
|
+
`reclaim_request` instead, so that the queue will give the request to some other consumer
|
|
95
|
+
in another call to the `fetch_next_request` method.
|
|
88
96
|
|
|
89
|
-
|
|
90
|
-
|
|
97
|
+
Returns:
|
|
98
|
+
The request or `None` if there are no more pending requests.
|
|
99
|
+
"""
|
|
100
|
+
return await self._implementation.fetch_next_request()
|
|
91
101
|
|
|
92
|
-
|
|
93
|
-
|
|
102
|
+
@override
|
|
103
|
+
async def mark_request_as_handled(self, request: Request) -> ProcessedRequest | None:
|
|
104
|
+
"""Mark a request as handled after successful processing.
|
|
105
|
+
|
|
106
|
+
Handled requests will never again be returned by the `fetch_next_request` method.
|
|
94
107
|
|
|
95
|
-
|
|
96
|
-
|
|
108
|
+
Args:
|
|
109
|
+
request: The request to mark as handled.
|
|
97
110
|
|
|
98
|
-
|
|
99
|
-
|
|
111
|
+
Returns:
|
|
112
|
+
Information about the queue operation. `None` if the given request was not in progress.
|
|
113
|
+
"""
|
|
114
|
+
return await self._implementation.mark_request_as_handled(request)
|
|
100
115
|
|
|
101
|
-
|
|
102
|
-
|
|
116
|
+
@override
|
|
117
|
+
async def get_request(self, unique_key: str) -> Request | None:
|
|
118
|
+
"""Get a request by unique key.
|
|
103
119
|
|
|
104
|
-
|
|
105
|
-
|
|
120
|
+
Args:
|
|
121
|
+
unique_key: Unique key of the request to get.
|
|
106
122
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
client, it is the better choice.
|
|
123
|
+
Returns:
|
|
124
|
+
The request or None if not found.
|
|
110
125
|
"""
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
126
|
+
return await self._implementation.get_request(unique_key)
|
|
127
|
+
|
|
128
|
+
@override
|
|
129
|
+
async def reclaim_request(
|
|
130
|
+
self,
|
|
131
|
+
request: Request,
|
|
132
|
+
*,
|
|
133
|
+
forefront: bool = False,
|
|
134
|
+
) -> ProcessedRequest | None:
|
|
135
|
+
"""Reclaim a failed request back to the queue.
|
|
136
|
+
|
|
137
|
+
The request will be returned for processing later again by another call to `fetch_next_request`.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
request: The request to return to the queue.
|
|
141
|
+
forefront: Whether to add the request to the head or the end of the queue.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Information about the queue operation. `None` if the given request was not in progress.
|
|
145
|
+
"""
|
|
146
|
+
return await self._implementation.reclaim_request(request, forefront=forefront)
|
|
147
|
+
|
|
148
|
+
@override
|
|
149
|
+
async def is_empty(self) -> bool:
|
|
150
|
+
"""Check if the queue is empty.
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
True if the queue is empty, False otherwise.
|
|
154
|
+
"""
|
|
155
|
+
return await self._implementation.is_empty()
|
|
115
156
|
|
|
116
157
|
@override
|
|
117
158
|
async def get_metadata(self) -> ApifyRequestQueueMetadata:
|
|
@@ -146,6 +187,7 @@ class ApifyRequestQueueClient(RequestQueueClient):
|
|
|
146
187
|
name: str | None,
|
|
147
188
|
alias: str | None,
|
|
148
189
|
configuration: Configuration,
|
|
190
|
+
access: Literal['single', 'shared'] = 'single',
|
|
149
191
|
) -> ApifyRequestQueueClient:
|
|
150
192
|
"""Open an Apify request queue client.
|
|
151
193
|
|
|
@@ -163,6 +205,17 @@ class ApifyRequestQueueClient(RequestQueueClient):
|
|
|
163
205
|
configuration: The configuration object containing API credentials and settings. Must include a valid
|
|
164
206
|
`token` and `api_base_url`. May also contain a `default_request_queue_id` for fallback when neither
|
|
165
207
|
`id`, `name`, nor `alias` is provided.
|
|
208
|
+
access: Controls the implementation of the request queue client based on expected scenario:
|
|
209
|
+
- 'single' is suitable for single consumer scenarios. It makes less API calls, is cheaper and faster.
|
|
210
|
+
- 'shared' is suitable for multiple consumers scenarios at the cost of higher API usage.
|
|
211
|
+
Detailed constraints for the 'single' access type:
|
|
212
|
+
- Only one client is consuming the request queue at the time.
|
|
213
|
+
- Multiple producers can put requests to the queue, but their forefront requests are not guaranteed to
|
|
214
|
+
be handled so quickly as this client does not aggressively fetch the forefront and relies on local
|
|
215
|
+
head estimation.
|
|
216
|
+
- Requests are only added to the queue, never deleted by other clients. (Marking as handled is ok.)
|
|
217
|
+
- Other producers can add new requests, but not modify existing ones.
|
|
218
|
+
(Modifications would not be included in local cache)
|
|
166
219
|
|
|
167
220
|
Returns:
|
|
168
221
|
An instance for the opened or created storage client.
|
|
@@ -260,10 +313,7 @@ class ApifyRequestQueueClient(RequestQueueClient):
|
|
|
260
313
|
|
|
261
314
|
metadata_model = RequestQueueMetadata.model_validate(metadata)
|
|
262
315
|
|
|
263
|
-
return cls(
|
|
264
|
-
api_client=apify_rq_client,
|
|
265
|
-
metadata=metadata_model,
|
|
266
|
-
)
|
|
316
|
+
return cls(api_client=apify_rq_client, metadata=metadata_model, access=access)
|
|
267
317
|
|
|
268
318
|
@override
|
|
269
319
|
async def purge(self) -> None:
|
|
@@ -275,540 +325,3 @@ class ApifyRequestQueueClient(RequestQueueClient):
|
|
|
275
325
|
@override
|
|
276
326
|
async def drop(self) -> None:
|
|
277
327
|
await self._api_client.delete()
|
|
278
|
-
|
|
279
|
-
@override
|
|
280
|
-
async def add_batch_of_requests(
|
|
281
|
-
self,
|
|
282
|
-
requests: Sequence[Request],
|
|
283
|
-
*,
|
|
284
|
-
forefront: bool = False,
|
|
285
|
-
) -> AddRequestsResponse:
|
|
286
|
-
"""Add a batch of requests to the queue.
|
|
287
|
-
|
|
288
|
-
Args:
|
|
289
|
-
requests: The requests to add.
|
|
290
|
-
forefront: Whether to add the requests to the beginning of the queue.
|
|
291
|
-
|
|
292
|
-
Returns:
|
|
293
|
-
Response containing information about the added requests.
|
|
294
|
-
"""
|
|
295
|
-
# Do not try to add previously added requests to avoid pointless expensive calls to API
|
|
296
|
-
|
|
297
|
-
new_requests: list[Request] = []
|
|
298
|
-
already_present_requests: list[ProcessedRequest] = []
|
|
299
|
-
|
|
300
|
-
for request in requests:
|
|
301
|
-
if self._requests_cache.get(request.unique_key):
|
|
302
|
-
# We are not sure if it was already handled at this point, and it is not worth calling API for it.
|
|
303
|
-
# It could have been handled by another client in the meantime, so cached information about
|
|
304
|
-
# `request.was_already_handled` is not reliable.
|
|
305
|
-
already_present_requests.append(
|
|
306
|
-
ProcessedRequest.model_validate(
|
|
307
|
-
{
|
|
308
|
-
'uniqueKey': request.unique_key,
|
|
309
|
-
'wasAlreadyPresent': True,
|
|
310
|
-
'wasAlreadyHandled': request.was_already_handled,
|
|
311
|
-
}
|
|
312
|
-
)
|
|
313
|
-
)
|
|
314
|
-
|
|
315
|
-
else:
|
|
316
|
-
# Add new request to the cache.
|
|
317
|
-
processed_request = ProcessedRequest.model_validate(
|
|
318
|
-
{
|
|
319
|
-
'uniqueKey': request.unique_key,
|
|
320
|
-
'wasAlreadyPresent': True,
|
|
321
|
-
'wasAlreadyHandled': request.was_already_handled,
|
|
322
|
-
}
|
|
323
|
-
)
|
|
324
|
-
self._cache_request(
|
|
325
|
-
request.unique_key,
|
|
326
|
-
processed_request,
|
|
327
|
-
)
|
|
328
|
-
new_requests.append(request)
|
|
329
|
-
|
|
330
|
-
if new_requests:
|
|
331
|
-
# Prepare requests for API by converting to dictionaries.
|
|
332
|
-
requests_dict = [
|
|
333
|
-
request.model_dump(
|
|
334
|
-
by_alias=True,
|
|
335
|
-
exclude={'id'}, # Exclude ID fields from requests since the API doesn't accept them.
|
|
336
|
-
)
|
|
337
|
-
for request in new_requests
|
|
338
|
-
]
|
|
339
|
-
|
|
340
|
-
# Send requests to API.
|
|
341
|
-
api_response = AddRequestsResponse.model_validate(
|
|
342
|
-
await self._api_client.batch_add_requests(requests=requests_dict, forefront=forefront)
|
|
343
|
-
)
|
|
344
|
-
|
|
345
|
-
# Add the locally known already present processed requests based on the local cache.
|
|
346
|
-
api_response.processed_requests.extend(already_present_requests)
|
|
347
|
-
|
|
348
|
-
# Remove unprocessed requests from the cache
|
|
349
|
-
for unprocessed_request in api_response.unprocessed_requests:
|
|
350
|
-
self._requests_cache.pop(unprocessed_request.unique_key, None)
|
|
351
|
-
|
|
352
|
-
else:
|
|
353
|
-
api_response = AddRequestsResponse.model_validate(
|
|
354
|
-
{'unprocessedRequests': [], 'processedRequests': already_present_requests}
|
|
355
|
-
)
|
|
356
|
-
|
|
357
|
-
logger.debug(
|
|
358
|
-
f'Tried to add new requests: {len(new_requests)}, '
|
|
359
|
-
f'succeeded to add new requests: {len(api_response.processed_requests) - len(already_present_requests)}, '
|
|
360
|
-
f'skipped already present requests: {len(already_present_requests)}'
|
|
361
|
-
)
|
|
362
|
-
|
|
363
|
-
# Update assumed total count for newly added requests.
|
|
364
|
-
new_request_count = 0
|
|
365
|
-
for processed_request in api_response.processed_requests:
|
|
366
|
-
if not processed_request.was_already_present and not processed_request.was_already_handled:
|
|
367
|
-
new_request_count += 1
|
|
368
|
-
|
|
369
|
-
self._metadata.total_request_count += new_request_count
|
|
370
|
-
|
|
371
|
-
return api_response
|
|
372
|
-
|
|
373
|
-
@override
|
|
374
|
-
async def get_request(self, unique_key: str) -> Request | None:
|
|
375
|
-
"""Get a request by unique key.
|
|
376
|
-
|
|
377
|
-
Args:
|
|
378
|
-
unique_key: Unique key of the request to get.
|
|
379
|
-
|
|
380
|
-
Returns:
|
|
381
|
-
The request or None if not found.
|
|
382
|
-
"""
|
|
383
|
-
response = await self._api_client.get_request(unique_key_to_request_id(unique_key))
|
|
384
|
-
|
|
385
|
-
if response is None:
|
|
386
|
-
return None
|
|
387
|
-
|
|
388
|
-
return Request.model_validate(response)
|
|
389
|
-
|
|
390
|
-
@override
|
|
391
|
-
async def fetch_next_request(self) -> Request | None:
|
|
392
|
-
"""Return the next request in the queue to be processed.
|
|
393
|
-
|
|
394
|
-
Once you successfully finish processing of the request, you need to call `mark_request_as_handled`
|
|
395
|
-
to mark the request as handled in the queue. If there was some error in processing the request, call
|
|
396
|
-
`reclaim_request` instead, so that the queue will give the request to some other consumer
|
|
397
|
-
in another call to the `fetch_next_request` method.
|
|
398
|
-
|
|
399
|
-
Returns:
|
|
400
|
-
The request or `None` if there are no more pending requests.
|
|
401
|
-
"""
|
|
402
|
-
# Ensure the queue head has requests if available. Fetching the head with lock to prevent race conditions.
|
|
403
|
-
async with self._fetch_lock:
|
|
404
|
-
await self._ensure_head_is_non_empty()
|
|
405
|
-
|
|
406
|
-
# If queue head is empty after ensuring, there are no requests
|
|
407
|
-
if not self._queue_head:
|
|
408
|
-
return None
|
|
409
|
-
|
|
410
|
-
# Get the next request ID from the queue head
|
|
411
|
-
next_unique_key = self._queue_head.popleft()
|
|
412
|
-
|
|
413
|
-
request = await self._get_or_hydrate_request(next_unique_key)
|
|
414
|
-
|
|
415
|
-
# Handle potential inconsistency where request might not be in the main table yet
|
|
416
|
-
if request is None:
|
|
417
|
-
logger.debug(
|
|
418
|
-
'Cannot find a request from the beginning of queue, will be retried later',
|
|
419
|
-
extra={'nextRequestUniqueKey': next_unique_key},
|
|
420
|
-
)
|
|
421
|
-
return None
|
|
422
|
-
|
|
423
|
-
# If the request was already handled, skip it
|
|
424
|
-
if request.handled_at is not None:
|
|
425
|
-
logger.debug(
|
|
426
|
-
'Request fetched from the beginning of queue was already handled',
|
|
427
|
-
extra={'nextRequestUniqueKey': next_unique_key},
|
|
428
|
-
)
|
|
429
|
-
return None
|
|
430
|
-
|
|
431
|
-
# Use get request to ensure we have the full request object.
|
|
432
|
-
request = await self.get_request(request.unique_key)
|
|
433
|
-
if request is None:
|
|
434
|
-
logger.debug(
|
|
435
|
-
'Request fetched from the beginning of queue was not found in the RQ',
|
|
436
|
-
extra={'nextRequestUniqueKey': next_unique_key},
|
|
437
|
-
)
|
|
438
|
-
return None
|
|
439
|
-
|
|
440
|
-
return request
|
|
441
|
-
|
|
442
|
-
@override
|
|
443
|
-
async def mark_request_as_handled(self, request: Request) -> ProcessedRequest | None:
|
|
444
|
-
"""Mark a request as handled after successful processing.
|
|
445
|
-
|
|
446
|
-
Handled requests will never again be returned by the `fetch_next_request` method.
|
|
447
|
-
|
|
448
|
-
Args:
|
|
449
|
-
request: The request to mark as handled.
|
|
450
|
-
|
|
451
|
-
Returns:
|
|
452
|
-
Information about the queue operation. `None` if the given request was not in progress.
|
|
453
|
-
"""
|
|
454
|
-
# Set the handled_at timestamp if not already set
|
|
455
|
-
if request.handled_at is None:
|
|
456
|
-
request.handled_at = datetime.now(tz=timezone.utc)
|
|
457
|
-
|
|
458
|
-
if cached_request := self._requests_cache[request.unique_key]:
|
|
459
|
-
cached_request.was_already_handled = request.was_already_handled
|
|
460
|
-
try:
|
|
461
|
-
# Update the request in the API
|
|
462
|
-
processed_request = await self._update_request(request)
|
|
463
|
-
processed_request.unique_key = request.unique_key
|
|
464
|
-
|
|
465
|
-
# Update assumed handled count if this wasn't already handled
|
|
466
|
-
if not processed_request.was_already_handled:
|
|
467
|
-
self._metadata.handled_request_count += 1
|
|
468
|
-
|
|
469
|
-
# Update the cache with the handled request
|
|
470
|
-
cache_key = request.unique_key
|
|
471
|
-
self._cache_request(
|
|
472
|
-
cache_key,
|
|
473
|
-
processed_request,
|
|
474
|
-
hydrated_request=request,
|
|
475
|
-
)
|
|
476
|
-
except Exception as exc:
|
|
477
|
-
logger.debug(f'Error marking request {request.unique_key} as handled: {exc!s}')
|
|
478
|
-
return None
|
|
479
|
-
else:
|
|
480
|
-
return processed_request
|
|
481
|
-
|
|
482
|
-
@override
|
|
483
|
-
async def reclaim_request(
|
|
484
|
-
self,
|
|
485
|
-
request: Request,
|
|
486
|
-
*,
|
|
487
|
-
forefront: bool = False,
|
|
488
|
-
) -> ProcessedRequest | None:
|
|
489
|
-
"""Reclaim a failed request back to the queue.
|
|
490
|
-
|
|
491
|
-
The request will be returned for processing later again by another call to `fetch_next_request`.
|
|
492
|
-
|
|
493
|
-
Args:
|
|
494
|
-
request: The request to return to the queue.
|
|
495
|
-
forefront: Whether to add the request to the head or the end of the queue.
|
|
496
|
-
|
|
497
|
-
Returns:
|
|
498
|
-
Information about the queue operation. `None` if the given request was not in progress.
|
|
499
|
-
"""
|
|
500
|
-
# Check if the request was marked as handled and clear it. When reclaiming,
|
|
501
|
-
# we want to put the request back for processing.
|
|
502
|
-
if request.was_already_handled:
|
|
503
|
-
request.handled_at = None
|
|
504
|
-
|
|
505
|
-
# Reclaim with lock to prevent race conditions that could lead to double processing of the same request.
|
|
506
|
-
async with self._fetch_lock:
|
|
507
|
-
try:
|
|
508
|
-
# Update the request in the API.
|
|
509
|
-
processed_request = await self._update_request(request, forefront=forefront)
|
|
510
|
-
processed_request.unique_key = request.unique_key
|
|
511
|
-
|
|
512
|
-
# If the request was previously handled, decrement our handled count since
|
|
513
|
-
# we're putting it back for processing.
|
|
514
|
-
if request.was_already_handled and not processed_request.was_already_handled:
|
|
515
|
-
self._metadata.handled_request_count -= 1
|
|
516
|
-
|
|
517
|
-
# Update the cache
|
|
518
|
-
cache_key = request.unique_key
|
|
519
|
-
self._cache_request(
|
|
520
|
-
cache_key,
|
|
521
|
-
processed_request,
|
|
522
|
-
hydrated_request=request,
|
|
523
|
-
)
|
|
524
|
-
|
|
525
|
-
# If we're adding to the forefront, we need to check for forefront requests
|
|
526
|
-
# in the next list_head call
|
|
527
|
-
if forefront:
|
|
528
|
-
self._should_check_for_forefront_requests = True
|
|
529
|
-
|
|
530
|
-
# Try to release the lock on the request
|
|
531
|
-
try:
|
|
532
|
-
await self._delete_request_lock(request.unique_key, forefront=forefront)
|
|
533
|
-
except Exception as err:
|
|
534
|
-
logger.debug(f'Failed to delete request lock for request {request.unique_key}', exc_info=err)
|
|
535
|
-
except Exception as exc:
|
|
536
|
-
logger.debug(f'Error reclaiming request {request.unique_key}: {exc!s}')
|
|
537
|
-
return None
|
|
538
|
-
else:
|
|
539
|
-
return processed_request
|
|
540
|
-
|
|
541
|
-
@override
|
|
542
|
-
async def is_empty(self) -> bool:
|
|
543
|
-
"""Check if the queue is empty.
|
|
544
|
-
|
|
545
|
-
Returns:
|
|
546
|
-
True if the queue is empty, False otherwise.
|
|
547
|
-
"""
|
|
548
|
-
# Check _list_head and self._queue_has_locked_requests with lock to make sure they are consistent.
|
|
549
|
-
# Without the lock the `is_empty` is prone to falsely report True with some low probability race condition.
|
|
550
|
-
async with self._fetch_lock:
|
|
551
|
-
head = await self._list_head(limit=1, lock_time=None)
|
|
552
|
-
return len(head.items) == 0 and not self._queue_has_locked_requests
|
|
553
|
-
|
|
554
|
-
async def _ensure_head_is_non_empty(self) -> None:
|
|
555
|
-
"""Ensure that the queue head has requests if they are available in the queue."""
|
|
556
|
-
# If queue head has adequate requests, skip fetching more
|
|
557
|
-
if len(self._queue_head) > 1 and not self._should_check_for_forefront_requests:
|
|
558
|
-
return
|
|
559
|
-
|
|
560
|
-
# Fetch requests from the API and populate the queue head
|
|
561
|
-
await self._list_head(lock_time=self._DEFAULT_LOCK_TIME)
|
|
562
|
-
|
|
563
|
-
async def _get_or_hydrate_request(self, unique_key: str) -> Request | None:
|
|
564
|
-
"""Get a request by unique key, either from cache or by fetching from API.
|
|
565
|
-
|
|
566
|
-
Args:
|
|
567
|
-
unique_key: Unique key of the request to get.
|
|
568
|
-
|
|
569
|
-
Returns:
|
|
570
|
-
The request if found and valid, otherwise None.
|
|
571
|
-
"""
|
|
572
|
-
# First check if the request is in our cache
|
|
573
|
-
cached_entry = self._requests_cache.get(unique_key)
|
|
574
|
-
|
|
575
|
-
if cached_entry and cached_entry.hydrated:
|
|
576
|
-
# If we have the request hydrated in cache, check if lock is expired
|
|
577
|
-
if cached_entry.lock_expires_at and cached_entry.lock_expires_at < datetime.now(tz=timezone.utc):
|
|
578
|
-
# Try to prolong the lock if it's expired
|
|
579
|
-
try:
|
|
580
|
-
lock_secs = int(self._DEFAULT_LOCK_TIME.total_seconds())
|
|
581
|
-
response = await self._prolong_request_lock(unique_key, lock_secs=lock_secs)
|
|
582
|
-
cached_entry.lock_expires_at = response.lock_expires_at
|
|
583
|
-
except Exception:
|
|
584
|
-
# If prolonging the lock fails, we lost the request
|
|
585
|
-
logger.debug(f'Failed to prolong lock for request {unique_key}, returning None')
|
|
586
|
-
return None
|
|
587
|
-
|
|
588
|
-
return cached_entry.hydrated
|
|
589
|
-
|
|
590
|
-
# If not in cache or not hydrated, fetch the request
|
|
591
|
-
try:
|
|
592
|
-
# Try to acquire or prolong the lock
|
|
593
|
-
lock_secs = int(self._DEFAULT_LOCK_TIME.total_seconds())
|
|
594
|
-
await self._prolong_request_lock(unique_key, lock_secs=lock_secs)
|
|
595
|
-
|
|
596
|
-
# Fetch the request data
|
|
597
|
-
request = await self.get_request(unique_key)
|
|
598
|
-
|
|
599
|
-
# If request is not found, release lock and return None
|
|
600
|
-
if not request:
|
|
601
|
-
await self._delete_request_lock(unique_key)
|
|
602
|
-
return None
|
|
603
|
-
|
|
604
|
-
# Update cache with hydrated request
|
|
605
|
-
cache_key = request.unique_key
|
|
606
|
-
self._cache_request(
|
|
607
|
-
cache_key,
|
|
608
|
-
ProcessedRequest(
|
|
609
|
-
unique_key=request.unique_key,
|
|
610
|
-
was_already_present=True,
|
|
611
|
-
was_already_handled=request.handled_at is not None,
|
|
612
|
-
),
|
|
613
|
-
hydrated_request=request,
|
|
614
|
-
)
|
|
615
|
-
except Exception as exc:
|
|
616
|
-
logger.debug(f'Error fetching or locking request {unique_key}: {exc!s}')
|
|
617
|
-
return None
|
|
618
|
-
else:
|
|
619
|
-
return request
|
|
620
|
-
|
|
621
|
-
async def _update_request(
|
|
622
|
-
self,
|
|
623
|
-
request: Request,
|
|
624
|
-
*,
|
|
625
|
-
forefront: bool = False,
|
|
626
|
-
) -> ProcessedRequest:
|
|
627
|
-
"""Update a request in the queue.
|
|
628
|
-
|
|
629
|
-
Args:
|
|
630
|
-
request: The updated request.
|
|
631
|
-
forefront: Whether to put the updated request in the beginning or the end of the queue.
|
|
632
|
-
|
|
633
|
-
Returns:
|
|
634
|
-
The updated request
|
|
635
|
-
"""
|
|
636
|
-
request_dict = request.model_dump(by_alias=True)
|
|
637
|
-
request_dict['id'] = unique_key_to_request_id(request.unique_key)
|
|
638
|
-
response = await self._api_client.update_request(
|
|
639
|
-
request=request_dict,
|
|
640
|
-
forefront=forefront,
|
|
641
|
-
)
|
|
642
|
-
|
|
643
|
-
return ProcessedRequest.model_validate(
|
|
644
|
-
{'uniqueKey': request.unique_key} | response,
|
|
645
|
-
)
|
|
646
|
-
|
|
647
|
-
async def _list_head(
|
|
648
|
-
self,
|
|
649
|
-
*,
|
|
650
|
-
lock_time: timedelta | None = None,
|
|
651
|
-
limit: int = 25,
|
|
652
|
-
) -> RequestQueueHead:
|
|
653
|
-
"""Retrieve requests from the beginning of the queue.
|
|
654
|
-
|
|
655
|
-
Args:
|
|
656
|
-
lock_time: Duration for which to lock the retrieved requests.
|
|
657
|
-
If None, requests will not be locked.
|
|
658
|
-
limit: Maximum number of requests to retrieve.
|
|
659
|
-
|
|
660
|
-
Returns:
|
|
661
|
-
A collection of requests from the beginning of the queue.
|
|
662
|
-
"""
|
|
663
|
-
# Return from cache if available and we're not checking for new forefront requests
|
|
664
|
-
if self._queue_head and not self._should_check_for_forefront_requests:
|
|
665
|
-
logger.debug(f'Using cached queue head with {len(self._queue_head)} requests')
|
|
666
|
-
# Create a list of requests from the cached queue head
|
|
667
|
-
items = []
|
|
668
|
-
for unique_key in list(self._queue_head)[:limit]:
|
|
669
|
-
cached_request = self._requests_cache.get(unique_key)
|
|
670
|
-
if cached_request and cached_request.hydrated:
|
|
671
|
-
items.append(cached_request.hydrated)
|
|
672
|
-
|
|
673
|
-
metadata = await self._get_metadata_estimate()
|
|
674
|
-
|
|
675
|
-
return RequestQueueHead(
|
|
676
|
-
limit=limit,
|
|
677
|
-
had_multiple_clients=metadata.had_multiple_clients,
|
|
678
|
-
queue_modified_at=metadata.modified_at,
|
|
679
|
-
items=items,
|
|
680
|
-
queue_has_locked_requests=self._queue_has_locked_requests,
|
|
681
|
-
lock_time=lock_time,
|
|
682
|
-
)
|
|
683
|
-
leftover_buffer = list[str]()
|
|
684
|
-
if self._should_check_for_forefront_requests:
|
|
685
|
-
leftover_buffer = list(self._queue_head)
|
|
686
|
-
self._queue_head.clear()
|
|
687
|
-
self._should_check_for_forefront_requests = False
|
|
688
|
-
|
|
689
|
-
# Otherwise fetch from API
|
|
690
|
-
lock_time = lock_time or self._DEFAULT_LOCK_TIME
|
|
691
|
-
lock_secs = int(lock_time.total_seconds())
|
|
692
|
-
|
|
693
|
-
response = await self._api_client.list_and_lock_head(
|
|
694
|
-
lock_secs=lock_secs,
|
|
695
|
-
limit=limit,
|
|
696
|
-
)
|
|
697
|
-
|
|
698
|
-
# Update the queue head cache
|
|
699
|
-
self._queue_has_locked_requests = response.get('queueHasLockedRequests', False)
|
|
700
|
-
# Check if there is another client working with the RequestQueue
|
|
701
|
-
self._metadata.had_multiple_clients = response.get('hadMultipleClients', False)
|
|
702
|
-
|
|
703
|
-
for request_data in response.get('items', []):
|
|
704
|
-
request = Request.model_validate(request_data)
|
|
705
|
-
|
|
706
|
-
# Skip requests without ID or unique key
|
|
707
|
-
if not request.unique_key:
|
|
708
|
-
logger.debug(
|
|
709
|
-
'Skipping request from queue head, missing ID or unique key',
|
|
710
|
-
extra={
|
|
711
|
-
'unique_key': request.unique_key,
|
|
712
|
-
},
|
|
713
|
-
)
|
|
714
|
-
continue
|
|
715
|
-
|
|
716
|
-
# Cache the request
|
|
717
|
-
self._cache_request(
|
|
718
|
-
request.unique_key,
|
|
719
|
-
ProcessedRequest(
|
|
720
|
-
unique_key=request.unique_key,
|
|
721
|
-
was_already_present=True,
|
|
722
|
-
was_already_handled=False,
|
|
723
|
-
),
|
|
724
|
-
hydrated_request=request,
|
|
725
|
-
)
|
|
726
|
-
self._queue_head.append(request.unique_key)
|
|
727
|
-
|
|
728
|
-
for leftover_unique_key in leftover_buffer:
|
|
729
|
-
# After adding new requests to the forefront, any existing leftover locked request is kept in the end.
|
|
730
|
-
self._queue_head.append(leftover_unique_key)
|
|
731
|
-
return RequestQueueHead.model_validate(response)
|
|
732
|
-
|
|
733
|
-
async def _prolong_request_lock(
|
|
734
|
-
self,
|
|
735
|
-
unique_key: str,
|
|
736
|
-
*,
|
|
737
|
-
lock_secs: int,
|
|
738
|
-
) -> ProlongRequestLockResponse:
|
|
739
|
-
"""Prolong the lock on a specific request in the queue.
|
|
740
|
-
|
|
741
|
-
Args:
|
|
742
|
-
unique_key: Unique key of the request whose lock is to be prolonged.
|
|
743
|
-
lock_secs: The additional amount of time, in seconds, that the request will remain locked.
|
|
744
|
-
|
|
745
|
-
Returns:
|
|
746
|
-
A response containing the time at which the lock will expire.
|
|
747
|
-
"""
|
|
748
|
-
response = await self._api_client.prolong_request_lock(
|
|
749
|
-
request_id=unique_key_to_request_id(unique_key),
|
|
750
|
-
# All requests reaching this code were the tip of the queue at the moment when they were fetched,
|
|
751
|
-
# so if their lock expires, they should be put back to the forefront as their handling is long overdue.
|
|
752
|
-
forefront=True,
|
|
753
|
-
lock_secs=lock_secs,
|
|
754
|
-
)
|
|
755
|
-
|
|
756
|
-
result = ProlongRequestLockResponse(
|
|
757
|
-
lock_expires_at=datetime.fromisoformat(response['lockExpiresAt'].replace('Z', '+00:00'))
|
|
758
|
-
)
|
|
759
|
-
|
|
760
|
-
# Update the cache with the new lock expiration
|
|
761
|
-
for cached_request in self._requests_cache.values():
|
|
762
|
-
if cached_request.unique_key == unique_key:
|
|
763
|
-
cached_request.lock_expires_at = result.lock_expires_at
|
|
764
|
-
break
|
|
765
|
-
|
|
766
|
-
return result
|
|
767
|
-
|
|
768
|
-
async def _delete_request_lock(
|
|
769
|
-
self,
|
|
770
|
-
unique_key: str,
|
|
771
|
-
*,
|
|
772
|
-
forefront: bool = False,
|
|
773
|
-
) -> None:
|
|
774
|
-
"""Delete the lock on a specific request in the queue.
|
|
775
|
-
|
|
776
|
-
Args:
|
|
777
|
-
unique_key: Unique key of the request to delete the lock.
|
|
778
|
-
forefront: Whether to put the request in the beginning or the end of the queue after the lock is deleted.
|
|
779
|
-
"""
|
|
780
|
-
try:
|
|
781
|
-
await self._api_client.delete_request_lock(
|
|
782
|
-
request_id=unique_key_to_request_id(unique_key),
|
|
783
|
-
forefront=forefront,
|
|
784
|
-
)
|
|
785
|
-
|
|
786
|
-
# Update the cache to remove the lock
|
|
787
|
-
for cached_request in self._requests_cache.values():
|
|
788
|
-
if cached_request.unique_key == unique_key:
|
|
789
|
-
cached_request.lock_expires_at = None
|
|
790
|
-
break
|
|
791
|
-
except Exception as err:
|
|
792
|
-
logger.debug(f'Failed to delete request lock for request {unique_key}', exc_info=err)
|
|
793
|
-
|
|
794
|
-
def _cache_request(
|
|
795
|
-
self,
|
|
796
|
-
cache_key: str,
|
|
797
|
-
processed_request: ProcessedRequest,
|
|
798
|
-
*,
|
|
799
|
-
hydrated_request: Request | None = None,
|
|
800
|
-
) -> None:
|
|
801
|
-
"""Cache a request for future use.
|
|
802
|
-
|
|
803
|
-
Args:
|
|
804
|
-
cache_key: The key to use for caching the request. It should be request ID.
|
|
805
|
-
processed_request: The processed request information.
|
|
806
|
-
forefront: Whether the request was added to the forefront of the queue.
|
|
807
|
-
hydrated_request: The hydrated request object, if available.
|
|
808
|
-
"""
|
|
809
|
-
self._requests_cache[cache_key] = CachedRequest(
|
|
810
|
-
unique_key=processed_request.unique_key,
|
|
811
|
-
was_already_handled=processed_request.was_already_handled,
|
|
812
|
-
hydrated=hydrated_request,
|
|
813
|
-
lock_expires_at=None,
|
|
814
|
-
)
|