crawlee 1.0.2b3__py3-none-any.whl → 1.1.2b7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlee might be problematic. Click here for more details.
- crawlee/_request.py +32 -21
- crawlee/_service_locator.py +4 -4
- crawlee/_types.py +30 -17
- crawlee/_utils/context.py +2 -2
- crawlee/_utils/file.py +7 -0
- crawlee/_utils/recoverable_state.py +32 -8
- crawlee/_utils/recurring_task.py +17 -1
- crawlee/_utils/robots.py +17 -5
- crawlee/_utils/sitemap.py +1 -1
- crawlee/_utils/time.py +41 -1
- crawlee/_utils/urls.py +9 -2
- crawlee/browsers/_browser_pool.py +4 -1
- crawlee/browsers/_playwright_browser_controller.py +1 -1
- crawlee/browsers/_playwright_browser_plugin.py +17 -3
- crawlee/browsers/_types.py +1 -1
- crawlee/configuration.py +3 -1
- crawlee/crawlers/__init__.py +2 -1
- crawlee/crawlers/_abstract_http/__init__.py +2 -1
- crawlee/crawlers/_abstract_http/_abstract_http_crawler.py +47 -11
- crawlee/crawlers/_adaptive_playwright/_adaptive_playwright_crawler.py +33 -13
- crawlee/crawlers/_adaptive_playwright/_adaptive_playwright_crawling_context.py +6 -2
- crawlee/crawlers/_basic/_basic_crawler.py +126 -112
- crawlee/crawlers/_beautifulsoup/_beautifulsoup_crawler.py +2 -2
- crawlee/crawlers/_parsel/_parsel_crawler.py +2 -2
- crawlee/crawlers/_playwright/_playwright_crawler.py +55 -11
- crawlee/crawlers/_playwright/_playwright_http_client.py +7 -1
- crawlee/crawlers/_playwright/_playwright_pre_nav_crawling_context.py +4 -1
- crawlee/crawlers/_playwright/_types.py +12 -2
- crawlee/events/_event_manager.py +4 -4
- crawlee/fingerprint_suite/_header_generator.py +2 -2
- crawlee/http_clients/_base.py +4 -0
- crawlee/http_clients/_curl_impersonate.py +12 -0
- crawlee/http_clients/_httpx.py +16 -6
- crawlee/http_clients/_impit.py +25 -10
- crawlee/otel/crawler_instrumentor.py +3 -3
- crawlee/request_loaders/_sitemap_request_loader.py +22 -4
- crawlee/sessions/_session_pool.py +1 -1
- crawlee/statistics/_error_snapshotter.py +1 -1
- crawlee/statistics/_models.py +32 -1
- crawlee/statistics/_statistics.py +24 -33
- crawlee/storage_clients/__init__.py +4 -0
- crawlee/storage_clients/_file_system/_dataset_client.py +2 -2
- crawlee/storage_clients/_file_system/_key_value_store_client.py +3 -3
- crawlee/storage_clients/_file_system/_request_queue_client.py +27 -9
- crawlee/storage_clients/_redis/__init__.py +6 -0
- crawlee/storage_clients/_redis/_client_mixin.py +295 -0
- crawlee/storage_clients/_redis/_dataset_client.py +325 -0
- crawlee/storage_clients/_redis/_key_value_store_client.py +264 -0
- crawlee/storage_clients/_redis/_request_queue_client.py +586 -0
- crawlee/storage_clients/_redis/_storage_client.py +146 -0
- crawlee/storage_clients/_redis/_utils.py +23 -0
- crawlee/storage_clients/_redis/lua_scripts/atomic_bloom_add_requests.lua +36 -0
- crawlee/storage_clients/_redis/lua_scripts/atomic_fetch_request.lua +49 -0
- crawlee/storage_clients/_redis/lua_scripts/atomic_set_add_requests.lua +37 -0
- crawlee/storage_clients/_redis/lua_scripts/reclaim_stale_requests.lua +34 -0
- crawlee/storage_clients/_redis/py.typed +0 -0
- crawlee/storage_clients/_sql/_db_models.py +1 -2
- crawlee/storage_clients/_sql/_key_value_store_client.py +3 -2
- crawlee/storage_clients/_sql/_request_queue_client.py +18 -4
- crawlee/storage_clients/_sql/_storage_client.py +1 -1
- crawlee/storages/_key_value_store.py +5 -2
- {crawlee-1.0.2b3.dist-info → crawlee-1.1.2b7.dist-info}/METADATA +8 -3
- {crawlee-1.0.2b3.dist-info → crawlee-1.1.2b7.dist-info}/RECORD +66 -54
- {crawlee-1.0.2b3.dist-info → crawlee-1.1.2b7.dist-info}/WHEEL +1 -1
- {crawlee-1.0.2b3.dist-info → crawlee-1.1.2b7.dist-info}/entry_points.txt +0 -0
- {crawlee-1.0.2b3.dist-info → crawlee-1.1.2b7.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import warnings
|
|
4
|
+
from typing import Literal
|
|
5
|
+
|
|
6
|
+
from redis.asyncio import Redis
|
|
7
|
+
from typing_extensions import override
|
|
8
|
+
|
|
9
|
+
from crawlee._utils.docs import docs_group
|
|
10
|
+
from crawlee.configuration import Configuration
|
|
11
|
+
from crawlee.storage_clients._base import StorageClient
|
|
12
|
+
|
|
13
|
+
from ._dataset_client import RedisDatasetClient
|
|
14
|
+
from ._key_value_store_client import RedisKeyValueStoreClient
|
|
15
|
+
from ._request_queue_client import RedisRequestQueueClient
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@docs_group('Storage clients')
|
|
19
|
+
class RedisStorageClient(StorageClient):
|
|
20
|
+
"""Redis implementation of the storage client.
|
|
21
|
+
|
|
22
|
+
This storage client provides access to datasets, key-value stores, and request queues that persist data
|
|
23
|
+
to a Redis database v8.0+. Each storage type uses Redis-specific data structures and key patterns for
|
|
24
|
+
efficient storage and retrieval.
|
|
25
|
+
|
|
26
|
+
The client accepts either a Redis connection string or a pre-configured Redis client instance.
|
|
27
|
+
Exactly one of these parameters must be provided during initialization.
|
|
28
|
+
|
|
29
|
+
Storage types use the following Redis data structures:
|
|
30
|
+
- **Datasets**: Redis JSON arrays for item storage with metadata in JSON objects
|
|
31
|
+
- **Key-value stores**: Redis hashes for key-value pairs with separate metadata storage
|
|
32
|
+
- **Request queues**: Redis lists for FIFO queuing, hashes for request data and in-progress tracking,
|
|
33
|
+
and Bloom filters for request deduplication
|
|
34
|
+
|
|
35
|
+
Warning:
|
|
36
|
+
This is an experimental feature. The behavior and interface may change in future versions.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(
|
|
40
|
+
self,
|
|
41
|
+
*,
|
|
42
|
+
connection_string: str | None = None,
|
|
43
|
+
redis: Redis | None = None,
|
|
44
|
+
queue_dedup_strategy: Literal['default', 'bloom'] = 'default',
|
|
45
|
+
queue_bloom_error_rate: float = 1e-7,
|
|
46
|
+
) -> None:
|
|
47
|
+
"""Initialize the Redis storage client.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
connection_string: Redis connection string (e.g., "redis://localhost:6379").
|
|
51
|
+
Supports standard Redis URL format with optional database selection.
|
|
52
|
+
redis: Pre-configured Redis client instance.
|
|
53
|
+
queue_dedup_strategy: Strategy for request queue deduplication. Options are:
|
|
54
|
+
- 'default': Uses Redis sets for exact deduplication.
|
|
55
|
+
- 'bloom': Uses Redis Bloom filters for probabilistic deduplication with lower memory usage. When using
|
|
56
|
+
this approach, approximately 1 in 1e-7 requests will be falsely considered duplicate.
|
|
57
|
+
queue_bloom_error_rate: Desired false positive rate for Bloom filter deduplication. Only relevant if
|
|
58
|
+
`queue_dedup_strategy` is set to 'bloom'.
|
|
59
|
+
"""
|
|
60
|
+
match (redis, connection_string):
|
|
61
|
+
case (None, None):
|
|
62
|
+
raise ValueError('Either redis or connection_string must be provided.')
|
|
63
|
+
case (Redis(), None):
|
|
64
|
+
self._redis = redis
|
|
65
|
+
case (None, str()):
|
|
66
|
+
self._redis = Redis.from_url(connection_string)
|
|
67
|
+
case (Redis(), str()):
|
|
68
|
+
raise ValueError('Either redis or connection_string must be provided, not both.')
|
|
69
|
+
|
|
70
|
+
self._queue_dedup_strategy = queue_dedup_strategy
|
|
71
|
+
self._queue_bloom_error_rate = queue_bloom_error_rate
|
|
72
|
+
|
|
73
|
+
# Call the notification only once
|
|
74
|
+
warnings.warn(
|
|
75
|
+
(
|
|
76
|
+
'RedisStorageClient is experimental and its API, behavior, and key structure may change in future '
|
|
77
|
+
'releases.'
|
|
78
|
+
),
|
|
79
|
+
category=UserWarning,
|
|
80
|
+
stacklevel=2,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
@override
|
|
84
|
+
async def create_dataset_client(
|
|
85
|
+
self,
|
|
86
|
+
*,
|
|
87
|
+
id: str | None = None,
|
|
88
|
+
name: str | None = None,
|
|
89
|
+
alias: str | None = None,
|
|
90
|
+
configuration: Configuration | None = None,
|
|
91
|
+
) -> RedisDatasetClient:
|
|
92
|
+
configuration = configuration or Configuration.get_global_configuration()
|
|
93
|
+
|
|
94
|
+
client = await RedisDatasetClient.open(
|
|
95
|
+
id=id,
|
|
96
|
+
name=name,
|
|
97
|
+
alias=alias,
|
|
98
|
+
redis=self._redis,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
await self._purge_if_needed(client, configuration)
|
|
102
|
+
return client
|
|
103
|
+
|
|
104
|
+
@override
|
|
105
|
+
async def create_kvs_client(
|
|
106
|
+
self,
|
|
107
|
+
*,
|
|
108
|
+
id: str | None = None,
|
|
109
|
+
name: str | None = None,
|
|
110
|
+
alias: str | None = None,
|
|
111
|
+
configuration: Configuration | None = None,
|
|
112
|
+
) -> RedisKeyValueStoreClient:
|
|
113
|
+
configuration = configuration or Configuration.get_global_configuration()
|
|
114
|
+
|
|
115
|
+
client = await RedisKeyValueStoreClient.open(
|
|
116
|
+
id=id,
|
|
117
|
+
name=name,
|
|
118
|
+
alias=alias,
|
|
119
|
+
redis=self._redis,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
await self._purge_if_needed(client, configuration)
|
|
123
|
+
return client
|
|
124
|
+
|
|
125
|
+
@override
|
|
126
|
+
async def create_rq_client(
|
|
127
|
+
self,
|
|
128
|
+
*,
|
|
129
|
+
id: str | None = None,
|
|
130
|
+
name: str | None = None,
|
|
131
|
+
alias: str | None = None,
|
|
132
|
+
configuration: Configuration | None = None,
|
|
133
|
+
) -> RedisRequestQueueClient:
|
|
134
|
+
configuration = configuration or Configuration.get_global_configuration()
|
|
135
|
+
|
|
136
|
+
client = await RedisRequestQueueClient.open(
|
|
137
|
+
id=id,
|
|
138
|
+
name=name,
|
|
139
|
+
alias=alias,
|
|
140
|
+
redis=self._redis,
|
|
141
|
+
dedup_strategy=self._queue_dedup_strategy,
|
|
142
|
+
bloom_error_rate=self._queue_bloom_error_rate,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
await self._purge_if_needed(client, configuration)
|
|
146
|
+
return client
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from collections.abc import Awaitable
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import TypeVar, overload
|
|
4
|
+
|
|
5
|
+
T = TypeVar('T')
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@overload
|
|
9
|
+
async def await_redis_response(response: Awaitable[T]) -> T: ...
|
|
10
|
+
@overload
|
|
11
|
+
async def await_redis_response(response: T) -> T: ...
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
async def await_redis_response(response: Awaitable[T] | T) -> T:
|
|
15
|
+
"""Solve the problem of ambiguous typing for redis."""
|
|
16
|
+
return await response if isinstance(response, Awaitable) else response
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def read_lua_script(script_name: str) -> str:
|
|
20
|
+
"""Read a Lua script from a file."""
|
|
21
|
+
file_path = Path(__file__).parent / 'lua_scripts' / script_name
|
|
22
|
+
with file_path.open('r', encoding='utf-8') as file:
|
|
23
|
+
return file.read()
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
local added_filter_key = KEYS[1]
|
|
2
|
+
local queue_key = KEYS[2]
|
|
3
|
+
local data_key = KEYS[3]
|
|
4
|
+
|
|
5
|
+
local forefront = ARGV[1] == '1'
|
|
6
|
+
local unique_keys = cjson.decode(ARGV[2])
|
|
7
|
+
local requests_data = cjson.decode(ARGV[3])
|
|
8
|
+
|
|
9
|
+
-- Add and check which unique keys are actually new using Bloom filter
|
|
10
|
+
local bf_results = redis.call('bf.madd', added_filter_key, unpack(unique_keys))
|
|
11
|
+
|
|
12
|
+
local actually_added = {}
|
|
13
|
+
local hset_args = {}
|
|
14
|
+
|
|
15
|
+
-- Process the results
|
|
16
|
+
for i, unique_key in ipairs(unique_keys) do
|
|
17
|
+
if bf_results[i] == 1 then
|
|
18
|
+
-- This key was added by us (did not exist before)
|
|
19
|
+
table.insert(hset_args, unique_key)
|
|
20
|
+
table.insert(hset_args, requests_data[unique_key])
|
|
21
|
+
table.insert(actually_added, unique_key)
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
-- Add only those that are actually new
|
|
26
|
+
if #actually_added > 0 then
|
|
27
|
+
redis.call('hset', data_key, unpack(hset_args))
|
|
28
|
+
|
|
29
|
+
if forefront then
|
|
30
|
+
redis.call('lpush', queue_key, unpack(actually_added))
|
|
31
|
+
else
|
|
32
|
+
redis.call('rpush', queue_key, unpack(actually_added))
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
return cjson.encode(actually_added)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
local queue_key = KEYS[1]
|
|
2
|
+
local in_progress_key = KEYS[2]
|
|
3
|
+
local data_key = KEYS[3]
|
|
4
|
+
local client_id = ARGV[1]
|
|
5
|
+
local blocked_until_timestamp = ARGV[2]
|
|
6
|
+
local batch_size = tonumber(ARGV[3])
|
|
7
|
+
|
|
8
|
+
-- Pop batch unique_key from queue
|
|
9
|
+
local batch_result = redis.call('LMPOP', 1, queue_key, 'LEFT', 'COUNT', batch_size)
|
|
10
|
+
if not batch_result then
|
|
11
|
+
return nil
|
|
12
|
+
end
|
|
13
|
+
local unique_keys = batch_result[2]
|
|
14
|
+
|
|
15
|
+
-- Get requests data
|
|
16
|
+
local requests_data = redis.call('HMGET', data_key, unpack(unique_keys))
|
|
17
|
+
if not requests_data then
|
|
18
|
+
-- Data missing, skip this request
|
|
19
|
+
return nil
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
-- Prepare results and update in_progress
|
|
23
|
+
local final_result = {}
|
|
24
|
+
local in_progress_hmset = {}
|
|
25
|
+
local pending_decrement = 0
|
|
26
|
+
local in_progress_data = cjson.encode({
|
|
27
|
+
client_id = client_id,
|
|
28
|
+
blocked_until_timestamp = tonumber(blocked_until_timestamp)
|
|
29
|
+
})
|
|
30
|
+
for i = 1, #unique_keys do
|
|
31
|
+
local unique_key = unique_keys[i]
|
|
32
|
+
local request_data = requests_data[i]
|
|
33
|
+
|
|
34
|
+
if request_data then
|
|
35
|
+
-- Add to in_progress hash
|
|
36
|
+
table.insert(in_progress_hmset, unique_key)
|
|
37
|
+
table.insert(in_progress_hmset, in_progress_data)
|
|
38
|
+
|
|
39
|
+
table.insert(final_result, request_data)
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
-- Update in_progress hash
|
|
44
|
+
if #in_progress_hmset > 0 then
|
|
45
|
+
redis.call('HMSET', in_progress_key, unpack(in_progress_hmset))
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
-- Return result with requests data
|
|
49
|
+
return final_result
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
local added_filter_key = KEYS[1]
|
|
2
|
+
local queue_key = KEYS[2]
|
|
3
|
+
local data_key = KEYS[3]
|
|
4
|
+
|
|
5
|
+
local forefront = ARGV[1] == '1'
|
|
6
|
+
local unique_keys = cjson.decode(ARGV[2])
|
|
7
|
+
local requests_data = cjson.decode(ARGV[3])
|
|
8
|
+
|
|
9
|
+
-- Add and check which unique keys are actually new using Redis set
|
|
10
|
+
local actually_added = {}
|
|
11
|
+
local hset_args = {}
|
|
12
|
+
|
|
13
|
+
-- Process each unique key
|
|
14
|
+
for _, unique_key in ipairs(unique_keys) do
|
|
15
|
+
-- Try to add the key to the set, returns 1 if added, 0 if already existed
|
|
16
|
+
local set_result = redis.call('sadd', added_filter_key, unique_key)
|
|
17
|
+
|
|
18
|
+
if set_result == 1 then
|
|
19
|
+
-- This key was added by us (did not exist before)
|
|
20
|
+
table.insert(hset_args, unique_key)
|
|
21
|
+
table.insert(hset_args, requests_data[unique_key])
|
|
22
|
+
table.insert(actually_added, unique_key)
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
-- Add only those that are actually new
|
|
27
|
+
if #actually_added > 0 then
|
|
28
|
+
redis.call('hset', data_key, unpack(hset_args))
|
|
29
|
+
|
|
30
|
+
if forefront then
|
|
31
|
+
redis.call('lpush', queue_key, unpack(actually_added))
|
|
32
|
+
else
|
|
33
|
+
redis.call('rpush', queue_key, unpack(actually_added))
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
return cjson.encode(actually_added)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
local in_progress_key = KEYS[1]
|
|
2
|
+
local queue_key = KEYS[2]
|
|
3
|
+
local data_key = KEYS[3]
|
|
4
|
+
local current_time = tonumber(ARGV[1])
|
|
5
|
+
|
|
6
|
+
local max_reclaim = 1000
|
|
7
|
+
|
|
8
|
+
local cursor = "0"
|
|
9
|
+
local count = 0
|
|
10
|
+
|
|
11
|
+
repeat
|
|
12
|
+
local result = redis.call('hscan', in_progress_key, cursor, 'COUNT', 100)
|
|
13
|
+
cursor = result[1]
|
|
14
|
+
local entries = result[2]
|
|
15
|
+
|
|
16
|
+
for i = 1, #entries, 2 do
|
|
17
|
+
if count >= max_reclaim then
|
|
18
|
+
break
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
local unique_key = entries[i]
|
|
22
|
+
local data = cjson.decode(entries[i + 1])
|
|
23
|
+
|
|
24
|
+
-- Check if timed out
|
|
25
|
+
if current_time > data.blocked_until_timestamp then
|
|
26
|
+
-- Atomically remove from in_progress and add back to queue
|
|
27
|
+
redis.call('hdel', in_progress_key, unique_key)
|
|
28
|
+
redis.call('rpush', queue_key, unique_key)
|
|
29
|
+
count = count + 1
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
until cursor == "0" or count >= max_reclaim
|
|
33
|
+
|
|
34
|
+
return count
|
|
File without changes
|
|
@@ -205,9 +205,8 @@ class RequestDb(Base):
|
|
|
205
205
|
'idx_fetch_available',
|
|
206
206
|
'request_queue_id',
|
|
207
207
|
'is_handled',
|
|
208
|
-
'time_blocked_until',
|
|
209
208
|
'sequence_number',
|
|
210
|
-
postgresql_where=text('is_handled
|
|
209
|
+
postgresql_where=text('is_handled is false'),
|
|
211
210
|
),
|
|
212
211
|
)
|
|
213
212
|
|
|
@@ -2,9 +2,9 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
4
|
from logging import getLogger
|
|
5
|
-
from typing import TYPE_CHECKING, Any
|
|
5
|
+
from typing import TYPE_CHECKING, Any, cast
|
|
6
6
|
|
|
7
|
-
from sqlalchemy import delete, select
|
|
7
|
+
from sqlalchemy import CursorResult, delete, select
|
|
8
8
|
from typing_extensions import Self, override
|
|
9
9
|
|
|
10
10
|
from crawlee._utils.file import infer_mime_type
|
|
@@ -227,6 +227,7 @@ class SqlKeyValueStoreClient(KeyValueStoreClient, SqlClientMixin):
|
|
|
227
227
|
async with self.get_session(with_simple_commit=True) as session:
|
|
228
228
|
# Delete the record if it exists
|
|
229
229
|
result = await session.execute(stmt)
|
|
230
|
+
result = cast('CursorResult', result) if not isinstance(result, CursorResult) else result
|
|
230
231
|
|
|
231
232
|
# Update metadata if we actually deleted something
|
|
232
233
|
if result.rowcount > 0:
|
|
@@ -5,9 +5,9 @@ from datetime import datetime, timedelta, timezone
|
|
|
5
5
|
from functools import lru_cache
|
|
6
6
|
from hashlib import sha256
|
|
7
7
|
from logging import getLogger
|
|
8
|
-
from typing import TYPE_CHECKING, Any
|
|
8
|
+
from typing import TYPE_CHECKING, Any, cast
|
|
9
9
|
|
|
10
|
-
from sqlalchemy import func, or_, select, update
|
|
10
|
+
from sqlalchemy import CursorResult, func, or_, select, update
|
|
11
11
|
from sqlalchemy.exc import SQLAlchemyError
|
|
12
12
|
from sqlalchemy.orm import load_only
|
|
13
13
|
from typing_extensions import NotRequired, Self, override
|
|
@@ -231,6 +231,7 @@ class SqlRequestQueueClient(RequestQueueClient, SqlClientMixin):
|
|
|
231
231
|
|
|
232
232
|
async with self.get_session() as session:
|
|
233
233
|
result = await session.execute(stmt)
|
|
234
|
+
result = cast('CursorResult', result) if not isinstance(result, CursorResult) else result
|
|
234
235
|
existing_requests = {req.request_id: req for req in result.scalars()}
|
|
235
236
|
state = await self._get_state(session)
|
|
236
237
|
insert_values: list[dict] = []
|
|
@@ -498,9 +499,12 @@ class SqlRequestQueueClient(RequestQueueClient, SqlClientMixin):
|
|
|
498
499
|
)
|
|
499
500
|
async with self.get_session() as session:
|
|
500
501
|
result = await session.execute(stmt)
|
|
502
|
+
result = cast('CursorResult', result) if not isinstance(result, CursorResult) else result
|
|
503
|
+
|
|
501
504
|
if result.rowcount == 0:
|
|
502
505
|
logger.warning(f'Request {request.unique_key} not found in database.')
|
|
503
506
|
return None
|
|
507
|
+
|
|
504
508
|
await self._update_metadata(
|
|
505
509
|
session,
|
|
506
510
|
**_QueueMetadataUpdateParams(
|
|
@@ -542,14 +546,24 @@ class SqlRequestQueueClient(RequestQueueClient, SqlClientMixin):
|
|
|
542
546
|
block_until = now + timedelta(seconds=self._BLOCK_REQUEST_TIME)
|
|
543
547
|
# Extend blocking for forefront request, it is considered blocked by the current client.
|
|
544
548
|
stmt = stmt.values(
|
|
545
|
-
sequence_number=new_sequence,
|
|
549
|
+
sequence_number=new_sequence,
|
|
550
|
+
time_blocked_until=block_until,
|
|
551
|
+
client_key=self.client_key,
|
|
552
|
+
data=request.model_dump_json(),
|
|
546
553
|
)
|
|
547
554
|
else:
|
|
548
555
|
new_sequence = state.sequence_counter
|
|
549
556
|
state.sequence_counter += 1
|
|
550
|
-
stmt = stmt.values(
|
|
557
|
+
stmt = stmt.values(
|
|
558
|
+
sequence_number=new_sequence,
|
|
559
|
+
time_blocked_until=None,
|
|
560
|
+
client_key=None,
|
|
561
|
+
data=request.model_dump_json(),
|
|
562
|
+
)
|
|
551
563
|
|
|
552
564
|
result = await session.execute(stmt)
|
|
565
|
+
result = cast('CursorResult', result) if not isinstance(result, CursorResult) else result
|
|
566
|
+
|
|
553
567
|
if result.rowcount == 0:
|
|
554
568
|
logger.warning(f'Request {request.unique_key} not found in database.')
|
|
555
569
|
return None
|
|
@@ -149,7 +149,7 @@ class SqlStorageClient(StorageClient):
|
|
|
149
149
|
# Raise an error if the new version creates breaking changes in the database schema.
|
|
150
150
|
if db_version and db_version != __version__:
|
|
151
151
|
warnings.warn(
|
|
152
|
-
f'Database version {db_version
|
|
152
|
+
f'Database version {db_version} does not match library version {__version__}. '
|
|
153
153
|
'This may lead to unexpected behavior. Drop the db if you want to make sure that '
|
|
154
154
|
'everything will work fine.',
|
|
155
155
|
category=UserWarning,
|
|
@@ -281,11 +281,14 @@ class KeyValueStore(Storage):
|
|
|
281
281
|
if key in cache:
|
|
282
282
|
return cache[key].current_value.root
|
|
283
283
|
|
|
284
|
+
async def kvs_factory() -> KeyValueStore:
|
|
285
|
+
return self
|
|
286
|
+
|
|
284
287
|
cache[key] = recoverable_state = RecoverableState(
|
|
285
288
|
default_state=AutosavedValue(default_value),
|
|
286
|
-
persistence_enabled=True,
|
|
287
|
-
persist_state_kvs_id=self.id,
|
|
288
289
|
persist_state_key=key,
|
|
290
|
+
persistence_enabled=True,
|
|
291
|
+
persist_state_kvs_factory=kvs_factory,
|
|
289
292
|
logger=logger,
|
|
290
293
|
)
|
|
291
294
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: crawlee
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.1.2b7
|
|
4
4
|
Summary: Crawlee for Python
|
|
5
5
|
Project-URL: Apify Homepage, https://apify.com
|
|
6
6
|
Project-URL: Changelog, https://crawlee.dev/python/docs/changelog
|
|
@@ -223,15 +223,17 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
223
223
|
Classifier: Programming Language :: Python :: 3.11
|
|
224
224
|
Classifier: Programming Language :: Python :: 3.12
|
|
225
225
|
Classifier: Programming Language :: Python :: 3.13
|
|
226
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
226
227
|
Classifier: Topic :: Software Development :: Libraries
|
|
227
228
|
Requires-Python: >=3.10
|
|
229
|
+
Requires-Dist: async-timeout>=5.0.1
|
|
228
230
|
Requires-Dist: cachetools>=5.5.0
|
|
229
231
|
Requires-Dist: colorama>=0.4.0
|
|
230
|
-
Requires-Dist: impit>=0.
|
|
232
|
+
Requires-Dist: impit>=0.8.0
|
|
231
233
|
Requires-Dist: more-itertools>=10.2.0
|
|
232
234
|
Requires-Dist: protego>=0.5.0
|
|
233
235
|
Requires-Dist: psutil>=6.0.0
|
|
234
|
-
Requires-Dist: pydantic-settings
|
|
236
|
+
Requires-Dist: pydantic-settings>=2.12.0
|
|
235
237
|
Requires-Dist: pydantic>=2.11.0
|
|
236
238
|
Requires-Dist: pyee>=9.0.0
|
|
237
239
|
Requires-Dist: tldextract>=5.1.0
|
|
@@ -263,6 +265,7 @@ Requires-Dist: opentelemetry-sdk>=1.34.1; extra == 'all'
|
|
|
263
265
|
Requires-Dist: opentelemetry-semantic-conventions>=0.54; extra == 'all'
|
|
264
266
|
Requires-Dist: parsel>=1.10.0; extra == 'all'
|
|
265
267
|
Requires-Dist: playwright>=1.27.0; extra == 'all'
|
|
268
|
+
Requires-Dist: redis[hiredis]>=7.0.0; extra == 'all'
|
|
266
269
|
Requires-Dist: rich>=13.9.0; extra == 'all'
|
|
267
270
|
Requires-Dist: scikit-learn>=1.6.0; extra == 'all'
|
|
268
271
|
Requires-Dist: sqlalchemy[asyncio]<3.0.0,>=2.0.0; extra == 'all'
|
|
@@ -296,6 +299,8 @@ Provides-Extra: playwright
|
|
|
296
299
|
Requires-Dist: apify-fingerprint-datapoints>=0.0.2; extra == 'playwright'
|
|
297
300
|
Requires-Dist: browserforge>=1.2.3; extra == 'playwright'
|
|
298
301
|
Requires-Dist: playwright>=1.27.0; extra == 'playwright'
|
|
302
|
+
Provides-Extra: redis
|
|
303
|
+
Requires-Dist: redis[hiredis]>=7.0.0; extra == 'redis'
|
|
299
304
|
Provides-Extra: sql-postgres
|
|
300
305
|
Requires-Dist: asyncpg>=0.24.0; extra == 'sql-postgres'
|
|
301
306
|
Requires-Dist: sqlalchemy[asyncio]<3.0.0,>=2.0.0; extra == 'sql-postgres'
|