pydocket 0.8.0__tar.gz → 0.9.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- {pydocket-0.8.0 → pydocket-0.9.1}/PKG-INFO +1 -1
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/docket.py +151 -35
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/worker.py +2 -2
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/test_fundamentals.py +24 -3
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/test_worker.py +317 -13
- {pydocket-0.8.0 → pydocket-0.9.1}/.cursor/rules/general.mdc +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/.cursor/rules/python-style.mdc +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/.github/codecov.yml +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/.github/workflows/chaos.yml +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/.github/workflows/ci.yml +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/.github/workflows/docs.yml +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/.github/workflows/publish.yml +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/.gitignore +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/.pre-commit-config.yaml +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/CLAUDE.md +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/LICENSE +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/README.md +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/chaos/README.md +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/chaos/__init__.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/chaos/driver.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/chaos/producer.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/chaos/run +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/chaos/tasks.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/docs/advanced-patterns.md +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/docs/api-reference.md +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/docs/dependencies.md +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/docs/getting-started.md +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/docs/index.md +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/docs/production.md +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/docs/testing.md +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/examples/__init__.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/examples/common.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/examples/concurrency_control.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/examples/find_and_flood.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/examples/self_perpetuating.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/mkdocs.yml +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/pyproject.toml +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/__init__.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/__main__.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/annotations.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/cli.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/dependencies.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/execution.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/instrumentation.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/py.typed +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/src/docket/tasks.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/telemetry/.gitignore +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/telemetry/start +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/telemetry/stop +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/__init__.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/__init__.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/conftest.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/test_clear.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/test_module.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/test_parsing.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/test_snapshot.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/test_striking.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/test_tasks.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/test_version.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/test_worker.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/cli/test_workers.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/conftest.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/test_concurrency_basic.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/test_concurrency_control.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/test_concurrency_refresh.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/test_dependencies.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/test_docket.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/test_execution.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/test_instrumentation.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/tests/test_striking.py +0 -0
- {pydocket-0.8.0 → pydocket-0.9.1}/uv.lock +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.9.1
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -16,6 +16,7 @@ from typing import (
|
|
|
16
16
|
Mapping,
|
|
17
17
|
NoReturn,
|
|
18
18
|
ParamSpec,
|
|
19
|
+
Protocol,
|
|
19
20
|
Self,
|
|
20
21
|
Sequence,
|
|
21
22
|
TypedDict,
|
|
@@ -27,7 +28,6 @@ from typing import (
|
|
|
27
28
|
import redis.exceptions
|
|
28
29
|
from opentelemetry import propagate, trace
|
|
29
30
|
from redis.asyncio import ConnectionPool, Redis
|
|
30
|
-
from redis.asyncio.client import Pipeline
|
|
31
31
|
from uuid_extensions import uuid7
|
|
32
32
|
|
|
33
33
|
from .execution import (
|
|
@@ -55,6 +55,18 @@ logger: logging.Logger = logging.getLogger(__name__)
|
|
|
55
55
|
tracer: trace.Tracer = trace.get_tracer(__name__)
|
|
56
56
|
|
|
57
57
|
|
|
58
|
+
class _schedule_task(Protocol):
|
|
59
|
+
async def __call__(
|
|
60
|
+
self, keys: list[str], args: list[str | float | bytes]
|
|
61
|
+
) -> str: ... # pragma: no cover
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class _cancel_task(Protocol):
|
|
65
|
+
async def __call__(
|
|
66
|
+
self, keys: list[str], args: list[str]
|
|
67
|
+
) -> str: ... # pragma: no cover
|
|
68
|
+
|
|
69
|
+
|
|
58
70
|
P = ParamSpec("P")
|
|
59
71
|
R = TypeVar("R")
|
|
60
72
|
|
|
@@ -131,6 +143,8 @@ class Docket:
|
|
|
131
143
|
|
|
132
144
|
_monitor_strikes_task: asyncio.Task[None]
|
|
133
145
|
_connection_pool: ConnectionPool
|
|
146
|
+
_schedule_task_script: _schedule_task | None
|
|
147
|
+
_cancel_task_script: _cancel_task | None
|
|
134
148
|
|
|
135
149
|
def __init__(
|
|
136
150
|
self,
|
|
@@ -156,6 +170,8 @@ class Docket:
|
|
|
156
170
|
self.url = url
|
|
157
171
|
self.heartbeat_interval = heartbeat_interval
|
|
158
172
|
self.missed_heartbeats = missed_heartbeats
|
|
173
|
+
self._schedule_task_script = None
|
|
174
|
+
self._cancel_task_script = None
|
|
159
175
|
|
|
160
176
|
@property
|
|
161
177
|
def worker_group_name(self) -> str:
|
|
@@ -300,9 +316,7 @@ class Docket:
|
|
|
300
316
|
execution = Execution(function, args, kwargs, when, key, attempt=1)
|
|
301
317
|
|
|
302
318
|
async with self.redis() as redis:
|
|
303
|
-
|
|
304
|
-
await self._schedule(redis, pipeline, execution, replace=False)
|
|
305
|
-
await pipeline.execute()
|
|
319
|
+
await self._schedule(redis, execution, replace=False)
|
|
306
320
|
|
|
307
321
|
TASKS_ADDED.add(1, {**self.labels(), **execution.general_labels()})
|
|
308
322
|
TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
|
|
@@ -361,9 +375,7 @@ class Docket:
|
|
|
361
375
|
execution = Execution(function, args, kwargs, when, key, attempt=1)
|
|
362
376
|
|
|
363
377
|
async with self.redis() as redis:
|
|
364
|
-
|
|
365
|
-
await self._schedule(redis, pipeline, execution, replace=True)
|
|
366
|
-
await pipeline.execute()
|
|
378
|
+
await self._schedule(redis, execution, replace=True)
|
|
367
379
|
|
|
368
380
|
TASKS_REPLACED.add(1, {**self.labels(), **execution.general_labels()})
|
|
369
381
|
TASKS_CANCELLED.add(1, {**self.labels(), **execution.general_labels()})
|
|
@@ -383,9 +395,7 @@ class Docket:
|
|
|
383
395
|
},
|
|
384
396
|
):
|
|
385
397
|
async with self.redis() as redis:
|
|
386
|
-
|
|
387
|
-
await self._schedule(redis, pipeline, execution, replace=False)
|
|
388
|
-
await pipeline.execute()
|
|
398
|
+
await self._schedule(redis, execution, replace=False)
|
|
389
399
|
|
|
390
400
|
TASKS_SCHEDULED.add(1, {**self.labels(), **execution.general_labels()})
|
|
391
401
|
|
|
@@ -400,9 +410,7 @@ class Docket:
|
|
|
400
410
|
attributes={**self.labels(), "docket.key": key},
|
|
401
411
|
):
|
|
402
412
|
async with self.redis() as redis:
|
|
403
|
-
|
|
404
|
-
await self._cancel(pipeline, key)
|
|
405
|
-
await pipeline.execute()
|
|
413
|
+
await self._cancel(redis, key)
|
|
406
414
|
|
|
407
415
|
TASKS_CANCELLED.add(1, self.labels())
|
|
408
416
|
|
|
@@ -423,10 +431,17 @@ class Docket:
|
|
|
423
431
|
async def _schedule(
|
|
424
432
|
self,
|
|
425
433
|
redis: Redis,
|
|
426
|
-
pipeline: Pipeline,
|
|
427
434
|
execution: Execution,
|
|
428
435
|
replace: bool = False,
|
|
429
436
|
) -> None:
|
|
437
|
+
"""Schedule a task atomically.
|
|
438
|
+
|
|
439
|
+
Handles:
|
|
440
|
+
- Checking for task existence
|
|
441
|
+
- Cancelling existing tasks when replacing
|
|
442
|
+
- Adding tasks to stream (immediate) or queue (future)
|
|
443
|
+
- Tracking stream message IDs for later cancellation
|
|
444
|
+
"""
|
|
430
445
|
if self.strike_list.is_stricken(execution):
|
|
431
446
|
logger.warning(
|
|
432
447
|
"%r is stricken, skipping schedule of %r",
|
|
@@ -449,32 +464,133 @@ class Docket:
|
|
|
449
464
|
key = execution.key
|
|
450
465
|
when = execution.when
|
|
451
466
|
known_task_key = self.known_task_key(key)
|
|
467
|
+
is_immediate = when <= datetime.now(timezone.utc)
|
|
452
468
|
|
|
469
|
+
# Lock per task key to prevent race conditions between concurrent operations
|
|
453
470
|
async with redis.lock(f"{known_task_key}:lock", timeout=10):
|
|
454
|
-
if
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
"
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
471
|
+
if self._schedule_task_script is None:
|
|
472
|
+
self._schedule_task_script = cast(
|
|
473
|
+
_schedule_task,
|
|
474
|
+
redis.register_script(
|
|
475
|
+
# KEYS: stream_key, known_key, parked_key, queue_key
|
|
476
|
+
# ARGV: task_key, when_timestamp, is_immediate, replace, ...message_fields
|
|
477
|
+
"""
|
|
478
|
+
local stream_key = KEYS[1]
|
|
479
|
+
local known_key = KEYS[2]
|
|
480
|
+
local parked_key = KEYS[3]
|
|
481
|
+
local queue_key = KEYS[4]
|
|
482
|
+
|
|
483
|
+
local task_key = ARGV[1]
|
|
484
|
+
local when_timestamp = ARGV[2]
|
|
485
|
+
local is_immediate = ARGV[3] == '1'
|
|
486
|
+
local replace = ARGV[4] == '1'
|
|
487
|
+
|
|
488
|
+
-- Extract message fields from ARGV[5] onwards
|
|
489
|
+
local message = {}
|
|
490
|
+
for i = 5, #ARGV, 2 do
|
|
491
|
+
message[#message + 1] = ARGV[i] -- field name
|
|
492
|
+
message[#message + 1] = ARGV[i + 1] -- field value
|
|
493
|
+
end
|
|
494
|
+
|
|
495
|
+
-- Handle replacement: cancel existing task if needed
|
|
496
|
+
if replace then
|
|
497
|
+
local existing_message_id = redis.call('HGET', known_key, 'stream_message_id')
|
|
498
|
+
if existing_message_id then
|
|
499
|
+
redis.call('XDEL', stream_key, existing_message_id)
|
|
500
|
+
end
|
|
501
|
+
redis.call('DEL', known_key, parked_key)
|
|
502
|
+
redis.call('ZREM', queue_key, task_key)
|
|
503
|
+
else
|
|
504
|
+
-- Check if task already exists
|
|
505
|
+
if redis.call('EXISTS', known_key) == 1 then
|
|
506
|
+
return 'EXISTS'
|
|
507
|
+
end
|
|
508
|
+
end
|
|
509
|
+
|
|
510
|
+
if is_immediate then
|
|
511
|
+
-- Add to stream and store message ID for later cancellation
|
|
512
|
+
local message_id = redis.call('XADD', stream_key, '*', unpack(message))
|
|
513
|
+
redis.call('HSET', known_key, 'when', when_timestamp, 'stream_message_id', message_id)
|
|
514
|
+
return message_id
|
|
515
|
+
else
|
|
516
|
+
-- Add to queue with task data in parked hash
|
|
517
|
+
redis.call('HSET', known_key, 'when', when_timestamp)
|
|
518
|
+
redis.call('HSET', parked_key, unpack(message))
|
|
519
|
+
redis.call('ZADD', queue_key, when_timestamp, task_key)
|
|
520
|
+
return 'QUEUED'
|
|
521
|
+
end
|
|
522
|
+
"""
|
|
523
|
+
),
|
|
524
|
+
)
|
|
525
|
+
schedule_task = self._schedule_task_script
|
|
465
526
|
|
|
466
|
-
|
|
527
|
+
await schedule_task(
|
|
528
|
+
keys=[
|
|
529
|
+
self.stream_key,
|
|
530
|
+
known_task_key,
|
|
531
|
+
self.parked_task_key(key),
|
|
532
|
+
self.queue_key,
|
|
533
|
+
],
|
|
534
|
+
args=[
|
|
535
|
+
key,
|
|
536
|
+
str(when.timestamp()),
|
|
537
|
+
"1" if is_immediate else "0",
|
|
538
|
+
"1" if replace else "0",
|
|
539
|
+
*[
|
|
540
|
+
item
|
|
541
|
+
for field, value in message.items()
|
|
542
|
+
for item in (field, value)
|
|
543
|
+
],
|
|
544
|
+
],
|
|
545
|
+
)
|
|
467
546
|
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
else:
|
|
471
|
-
pipeline.hset(self.parked_task_key(key), mapping=message) # type: ignore[arg-type]
|
|
472
|
-
pipeline.zadd(self.queue_key, {key: when.timestamp()})
|
|
547
|
+
async def _cancel(self, redis: Redis, key: str) -> None:
|
|
548
|
+
"""Cancel a task atomically.
|
|
473
549
|
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
550
|
+
Handles cancellation regardless of task location:
|
|
551
|
+
- From the stream (using stored message ID)
|
|
552
|
+
- From the queue (scheduled tasks)
|
|
553
|
+
- Cleans up all associated metadata keys
|
|
554
|
+
"""
|
|
555
|
+
if self._cancel_task_script is None:
|
|
556
|
+
self._cancel_task_script = cast(
|
|
557
|
+
_cancel_task,
|
|
558
|
+
redis.register_script(
|
|
559
|
+
# KEYS: stream_key, known_key, parked_key, queue_key
|
|
560
|
+
# ARGV: task_key
|
|
561
|
+
"""
|
|
562
|
+
local stream_key = KEYS[1]
|
|
563
|
+
local known_key = KEYS[2]
|
|
564
|
+
local parked_key = KEYS[3]
|
|
565
|
+
local queue_key = KEYS[4]
|
|
566
|
+
local task_key = ARGV[1]
|
|
567
|
+
|
|
568
|
+
-- Delete from stream if message ID exists
|
|
569
|
+
local message_id = redis.call('HGET', known_key, 'stream_message_id')
|
|
570
|
+
if message_id then
|
|
571
|
+
redis.call('XDEL', stream_key, message_id)
|
|
572
|
+
end
|
|
573
|
+
|
|
574
|
+
-- Clean up all task-related keys
|
|
575
|
+
redis.call('DEL', known_key, parked_key)
|
|
576
|
+
redis.call('ZREM', queue_key, task_key)
|
|
577
|
+
|
|
578
|
+
return 'OK'
|
|
579
|
+
"""
|
|
580
|
+
),
|
|
581
|
+
)
|
|
582
|
+
cancel_task = self._cancel_task_script
|
|
583
|
+
|
|
584
|
+
# Execute the cancellation script
|
|
585
|
+
await cancel_task(
|
|
586
|
+
keys=[
|
|
587
|
+
self.stream_key,
|
|
588
|
+
self.known_task_key(key),
|
|
589
|
+
self.parked_task_key(key),
|
|
590
|
+
self.queue_key,
|
|
591
|
+
],
|
|
592
|
+
args=[key],
|
|
593
|
+
)
|
|
478
594
|
|
|
479
595
|
@property
|
|
480
596
|
def strike_key(self) -> str:
|
|
@@ -286,7 +286,7 @@ class Worker:
|
|
|
286
286
|
count=available_slots,
|
|
287
287
|
)
|
|
288
288
|
|
|
289
|
-
|
|
289
|
+
def start_task(message_id: RedisMessageID, message: RedisMessage) -> bool:
|
|
290
290
|
function_name = message[b"function"].decode()
|
|
291
291
|
if not (function := self.docket.tasks.get(function_name)):
|
|
292
292
|
logger.warning(
|
|
@@ -347,7 +347,7 @@ class Worker:
|
|
|
347
347
|
if not message: # pragma: no cover
|
|
348
348
|
continue
|
|
349
349
|
|
|
350
|
-
task_started =
|
|
350
|
+
task_started = start_task(message_id, message)
|
|
351
351
|
if not task_started:
|
|
352
352
|
# Other errors - delete and ack
|
|
353
353
|
await self._delete_known_task(redis, message)
|
|
@@ -250,10 +250,10 @@ async def test_cancelling_future_task(
|
|
|
250
250
|
the_task.assert_not_called()
|
|
251
251
|
|
|
252
252
|
|
|
253
|
-
async def
|
|
253
|
+
async def test_cancelling_immediate_task(
|
|
254
254
|
docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
|
|
255
255
|
):
|
|
256
|
-
"""docket
|
|
256
|
+
"""docket can cancel a task that is scheduled immediately"""
|
|
257
257
|
|
|
258
258
|
execution = await docket.add(the_task, now())("a", "b", c="c")
|
|
259
259
|
|
|
@@ -261,7 +261,28 @@ async def test_cancelling_current_task_not_supported(
|
|
|
261
261
|
|
|
262
262
|
await worker.run_until_finished()
|
|
263
263
|
|
|
264
|
-
the_task.
|
|
264
|
+
the_task.assert_not_called()
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
async def test_cancellation_is_idempotent(
|
|
268
|
+
docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
|
|
269
|
+
):
|
|
270
|
+
"""Test that canceling the same task twice doesn't error."""
|
|
271
|
+
key = f"test-task:{uuid4()}"
|
|
272
|
+
|
|
273
|
+
# Schedule a task
|
|
274
|
+
later = now() + timedelta(seconds=1)
|
|
275
|
+
await docket.add(the_task, later, key=key)("test")
|
|
276
|
+
|
|
277
|
+
# Cancel it twice - both should succeed without error
|
|
278
|
+
await docket.cancel(key)
|
|
279
|
+
await docket.cancel(key) # Should be idempotent
|
|
280
|
+
|
|
281
|
+
# Run worker to ensure the task was actually cancelled
|
|
282
|
+
await worker.run_until_finished()
|
|
283
|
+
|
|
284
|
+
# Task should not have been executed since it was cancelled
|
|
285
|
+
the_task.assert_not_called()
|
|
265
286
|
|
|
266
287
|
|
|
267
288
|
async def test_errors_are_logged(
|
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import logging
|
|
3
|
+
import time
|
|
3
4
|
from contextlib import asynccontextmanager
|
|
5
|
+
from contextvars import ContextVar
|
|
4
6
|
from datetime import datetime, timedelta, timezone
|
|
5
|
-
from typing import AsyncGenerator
|
|
7
|
+
from typing import AsyncGenerator, Callable, Iterable
|
|
6
8
|
from unittest.mock import AsyncMock, patch
|
|
9
|
+
from uuid import uuid4
|
|
7
10
|
|
|
11
|
+
import cloudpickle # type: ignore[import]
|
|
8
12
|
import pytest
|
|
9
13
|
from redis.asyncio import Redis
|
|
10
14
|
from redis.exceptions import ConnectionError
|
|
@@ -17,6 +21,8 @@ from docket import (
|
|
|
17
21
|
Perpetual,
|
|
18
22
|
Worker,
|
|
19
23
|
)
|
|
24
|
+
from docket.dependencies import Timeout
|
|
25
|
+
from docket.execution import Execution
|
|
20
26
|
from docket.tasks import standard_tasks
|
|
21
27
|
from docket.worker import ms
|
|
22
28
|
|
|
@@ -174,7 +180,6 @@ async def test_redeliveries_respect_concurrency_limits(docket: Docket):
|
|
|
174
180
|
nonlocal failure_count
|
|
175
181
|
|
|
176
182
|
# Record when this task runs
|
|
177
|
-
import time
|
|
178
183
|
|
|
179
184
|
task_executions.append((customer_id, time.time()))
|
|
180
185
|
|
|
@@ -555,7 +560,6 @@ async def test_worker_can_be_told_to_skip_automatic_tasks(docket: Docket):
|
|
|
555
560
|
|
|
556
561
|
async def test_worker_concurrency_limits_task_queuing_behavior(docket: Docket):
|
|
557
562
|
"""Test that concurrency limits control task execution properly"""
|
|
558
|
-
from contextvars import ContextVar
|
|
559
563
|
|
|
560
564
|
# Use contextvar for reliable tracking across async execution
|
|
561
565
|
execution_log: ContextVar[list[tuple[str, int]]] = ContextVar("execution_log")
|
|
@@ -1171,7 +1175,6 @@ async def test_worker_concurrency_edge_cases(docket: Docket):
|
|
|
1171
1175
|
|
|
1172
1176
|
async def test_worker_timeout_exceeds_redelivery_timeout(docket: Docket):
|
|
1173
1177
|
"""Test worker handles user timeout longer than redelivery timeout."""
|
|
1174
|
-
from docket.dependencies import Timeout
|
|
1175
1178
|
|
|
1176
1179
|
task_executed = False
|
|
1177
1180
|
|
|
@@ -1250,8 +1253,6 @@ async def test_worker_concurrency_missing_argument_early_return(docket: Docket):
|
|
|
1250
1253
|
|
|
1251
1254
|
async def test_worker_no_concurrency_dependency_in_function(docket: Docket):
|
|
1252
1255
|
"""Test _can_start_task with function that has no concurrency dependency."""
|
|
1253
|
-
from docket.execution import Execution
|
|
1254
|
-
from datetime import datetime, timezone
|
|
1255
1256
|
|
|
1256
1257
|
async def task_without_concurrency_dependency():
|
|
1257
1258
|
await asyncio.sleep(0.001)
|
|
@@ -1277,8 +1278,6 @@ async def test_worker_no_concurrency_dependency_in_function(docket: Docket):
|
|
|
1277
1278
|
|
|
1278
1279
|
async def test_worker_no_concurrency_dependency_in_release(docket: Docket):
|
|
1279
1280
|
"""Test _release_concurrency_slot with function that has no concurrency dependency."""
|
|
1280
|
-
from docket.execution import Execution
|
|
1281
|
-
from datetime import datetime, timezone
|
|
1282
1281
|
|
|
1283
1282
|
async def task_without_concurrency_dependency():
|
|
1284
1283
|
await asyncio.sleep(0.001)
|
|
@@ -1303,8 +1302,6 @@ async def test_worker_no_concurrency_dependency_in_release(docket: Docket):
|
|
|
1303
1302
|
|
|
1304
1303
|
async def test_worker_missing_concurrency_argument_in_release(docket: Docket):
|
|
1305
1304
|
"""Test _release_concurrency_slot when concurrency argument is missing."""
|
|
1306
|
-
from docket.execution import Execution
|
|
1307
|
-
from datetime import datetime, timezone
|
|
1308
1305
|
|
|
1309
1306
|
async def task_with_missing_arg(
|
|
1310
1307
|
concurrency: ConcurrencyLimit = ConcurrencyLimit(
|
|
@@ -1333,8 +1330,6 @@ async def test_worker_missing_concurrency_argument_in_release(docket: Docket):
|
|
|
1333
1330
|
|
|
1334
1331
|
async def test_worker_concurrency_missing_argument_in_can_start(docket: Docket):
|
|
1335
1332
|
"""Test _can_start_task with missing concurrency argument during execution."""
|
|
1336
|
-
from docket.execution import Execution
|
|
1337
|
-
from datetime import datetime, timezone
|
|
1338
1333
|
|
|
1339
1334
|
async def task_with_missing_concurrency_arg(
|
|
1340
1335
|
concurrency: ConcurrencyLimit = ConcurrencyLimit(
|
|
@@ -1383,7 +1378,6 @@ async def test_worker_exception_before_dependencies(docket: Docket):
|
|
|
1383
1378
|
task_failed = False
|
|
1384
1379
|
|
|
1385
1380
|
# Mock resolved_dependencies to fail before setting dependencies
|
|
1386
|
-
from unittest.mock import patch, AsyncMock
|
|
1387
1381
|
|
|
1388
1382
|
await docket.add(task_that_will_fail)()
|
|
1389
1383
|
|
|
@@ -1427,3 +1421,313 @@ async def test_finally_block_releases_concurrency_on_success(docket: Docket):
|
|
|
1427
1421
|
|
|
1428
1422
|
# If both tasks completed, the finally block successfully released slots
|
|
1429
1423
|
assert task_completed
|
|
1424
|
+
|
|
1425
|
+
|
|
1426
|
+
async def test_replacement_race_condition_stream_tasks(
|
|
1427
|
+
docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
|
|
1428
|
+
):
|
|
1429
|
+
"""Test that replace() properly cancels tasks already in the stream.
|
|
1430
|
+
|
|
1431
|
+
This reproduces the race condition where:
|
|
1432
|
+
1. Task is scheduled for immediate execution
|
|
1433
|
+
2. Scheduler moves it to stream
|
|
1434
|
+
3. replace() tries to cancel but only checks queue/hash, not stream
|
|
1435
|
+
4. Both original and replacement tasks execute
|
|
1436
|
+
"""
|
|
1437
|
+
key = f"my-cool-task:{uuid4()}"
|
|
1438
|
+
|
|
1439
|
+
# Schedule a task immediately (will be moved to stream quickly)
|
|
1440
|
+
await docket.add(the_task, now(), key=key)("a", "b", c="c")
|
|
1441
|
+
|
|
1442
|
+
# Let the scheduler move the task to the stream
|
|
1443
|
+
# The scheduler runs every 250ms by default
|
|
1444
|
+
await asyncio.sleep(0.3)
|
|
1445
|
+
|
|
1446
|
+
# Now replace the task - this should cancel the one in the stream
|
|
1447
|
+
later = now() + timedelta(milliseconds=100)
|
|
1448
|
+
await docket.replace(the_task, later, key=key)("b", "c", c="d")
|
|
1449
|
+
|
|
1450
|
+
# Run the worker to completion
|
|
1451
|
+
await worker.run_until_finished()
|
|
1452
|
+
|
|
1453
|
+
# Should only execute the replacement task, not both
|
|
1454
|
+
the_task.assert_awaited_once_with("b", "c", c="d")
|
|
1455
|
+
assert the_task.await_count == 1, (
|
|
1456
|
+
f"Task was called {the_task.await_count} times, expected 1"
|
|
1457
|
+
)
|
|
1458
|
+
|
|
1459
|
+
|
|
1460
|
+
async def test_replace_task_in_queue_before_stream(
|
|
1461
|
+
docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
|
|
1462
|
+
):
|
|
1463
|
+
"""Test that replace() works correctly when task is still in queue."""
|
|
1464
|
+
key = f"my-cool-task:{uuid4()}"
|
|
1465
|
+
|
|
1466
|
+
# Schedule a task slightly in the future (stays in queue)
|
|
1467
|
+
soon = now() + timedelta(seconds=1)
|
|
1468
|
+
await docket.add(the_task, soon, key=key)("a", "b", c="c")
|
|
1469
|
+
|
|
1470
|
+
# Replace immediately (before scheduler can move it)
|
|
1471
|
+
later = now() + timedelta(milliseconds=100)
|
|
1472
|
+
await docket.replace(the_task, later, key=key)("b", "c", c="d")
|
|
1473
|
+
|
|
1474
|
+
await worker.run_until_finished()
|
|
1475
|
+
|
|
1476
|
+
# Should only execute the replacement
|
|
1477
|
+
the_task.assert_awaited_once_with("b", "c", c="d")
|
|
1478
|
+
assert the_task.await_count == 1
|
|
1479
|
+
|
|
1480
|
+
|
|
1481
|
+
async def test_rapid_replace_operations(
|
|
1482
|
+
docket: Docket, worker: Worker, the_task: AsyncMock, now: Callable[[], datetime]
|
|
1483
|
+
):
|
|
1484
|
+
"""Test multiple rapid replace operations."""
|
|
1485
|
+
key = f"my-cool-task:{uuid4()}"
|
|
1486
|
+
|
|
1487
|
+
# Schedule initial task
|
|
1488
|
+
await docket.add(the_task, now(), key=key)("a", "b", c="c")
|
|
1489
|
+
|
|
1490
|
+
# Rapid replacements
|
|
1491
|
+
for i in range(5):
|
|
1492
|
+
when = now() + timedelta(milliseconds=50 + i * 10)
|
|
1493
|
+
await docket.replace(the_task, when, key=key)(f"arg{i}", b=f"b{i}")
|
|
1494
|
+
|
|
1495
|
+
await worker.run_until_finished()
|
|
1496
|
+
|
|
1497
|
+
# Should only execute the last replacement
|
|
1498
|
+
the_task.assert_awaited_once_with("arg4", b="b4")
|
|
1499
|
+
assert the_task.await_count == 1
|
|
1500
|
+
|
|
1501
|
+
|
|
1502
|
+
async def test_wrongtype_error_with_legacy_known_task_key(
|
|
1503
|
+
docket: Docket,
|
|
1504
|
+
worker: Worker,
|
|
1505
|
+
the_task: AsyncMock,
|
|
1506
|
+
now: Callable[[], datetime],
|
|
1507
|
+
caplog: pytest.LogCaptureFixture,
|
|
1508
|
+
) -> None:
|
|
1509
|
+
"""Test graceful handling when known task keys exist as strings from legacy implementations.
|
|
1510
|
+
|
|
1511
|
+
Regression test for issue where worker scheduler would get WRONGTYPE errors when trying to
|
|
1512
|
+
HSET on known task keys that existed as string values from older docket versions.
|
|
1513
|
+
|
|
1514
|
+
The original error occurred when:
|
|
1515
|
+
1. A legacy docket created known task keys as simple string values (timestamps)
|
|
1516
|
+
2. The new scheduler tried to HSET stream_message_id on these keys
|
|
1517
|
+
3. Redis threw WRONGTYPE error because you can't HSET on a string key
|
|
1518
|
+
4. This caused scheduler loop failures in production
|
|
1519
|
+
|
|
1520
|
+
This test reproduces that scenario by manually setting up the legacy state,
|
|
1521
|
+
then verifies the new code handles it gracefully without errors.
|
|
1522
|
+
"""
|
|
1523
|
+
key = f"legacy-task:{uuid4()}"
|
|
1524
|
+
|
|
1525
|
+
# Simulate legacy behavior: create the known task key as a string
|
|
1526
|
+
# This is what older versions of docket would have done
|
|
1527
|
+
async with docket.redis() as redis:
|
|
1528
|
+
known_task_key = docket.known_task_key(key)
|
|
1529
|
+
when = now() + timedelta(seconds=1)
|
|
1530
|
+
|
|
1531
|
+
# Set up legacy state: known key as string, task in queue with parked data
|
|
1532
|
+
await redis.set(known_task_key, str(when.timestamp()))
|
|
1533
|
+
await redis.zadd(docket.queue_key, {key: when.timestamp()})
|
|
1534
|
+
|
|
1535
|
+
await redis.hset( # type: ignore
|
|
1536
|
+
docket.parked_task_key(key),
|
|
1537
|
+
mapping={
|
|
1538
|
+
"key": key,
|
|
1539
|
+
"when": when.isoformat(),
|
|
1540
|
+
"function": "trace",
|
|
1541
|
+
"args": cloudpickle.dumps(["legacy task test"]), # type: ignore[arg-type]
|
|
1542
|
+
"kwargs": cloudpickle.dumps({}), # type: ignore[arg-type]
|
|
1543
|
+
"attempt": "1",
|
|
1544
|
+
},
|
|
1545
|
+
)
|
|
1546
|
+
|
|
1547
|
+
# Capture logs to ensure no errors occur and see task execution
|
|
1548
|
+
with caplog.at_level(logging.INFO):
|
|
1549
|
+
await worker.run_until_finished()
|
|
1550
|
+
|
|
1551
|
+
# Should not have any ERROR logs now that the issue is fixed
|
|
1552
|
+
error_logs = [record for record in caplog.records if record.levelname == "ERROR"]
|
|
1553
|
+
assert len(error_logs) == 0, (
|
|
1554
|
+
f"Expected no error logs, but got: {[r.message for r in error_logs]}"
|
|
1555
|
+
)
|
|
1556
|
+
|
|
1557
|
+
# The task should execute successfully
|
|
1558
|
+
# Since we used trace, we should see an INFO log with the message
|
|
1559
|
+
info_logs = [record for record in caplog.records if record.levelname == "INFO"]
|
|
1560
|
+
trace_logs = [
|
|
1561
|
+
record for record in info_logs if "legacy task test" in record.message
|
|
1562
|
+
]
|
|
1563
|
+
assert len(trace_logs) > 0, (
|
|
1564
|
+
f"Expected to see trace log with 'legacy task test', got: {[r.message for r in info_logs]}"
|
|
1565
|
+
)
|
|
1566
|
+
|
|
1567
|
+
|
|
1568
|
+
async def count_redis_keys_by_type(redis: Redis, prefix: str) -> dict[str, int]:
|
|
1569
|
+
"""Count Redis keys by type for a given prefix."""
|
|
1570
|
+
pattern = f"{prefix}*"
|
|
1571
|
+
keys: Iterable[str] = await redis.keys(pattern) # type: ignore
|
|
1572
|
+
counts: dict[str, int] = {}
|
|
1573
|
+
|
|
1574
|
+
for key in keys:
|
|
1575
|
+
key_type = await redis.type(key)
|
|
1576
|
+
key_type_str = (
|
|
1577
|
+
key_type.decode() if isinstance(key_type, bytes) else str(key_type)
|
|
1578
|
+
)
|
|
1579
|
+
counts[key_type_str] = counts.get(key_type_str, 0) + 1
|
|
1580
|
+
|
|
1581
|
+
return counts
|
|
1582
|
+
|
|
1583
|
+
|
|
1584
|
+
class KeyCountChecker:
|
|
1585
|
+
"""Helper to verify Redis key counts remain consistent across operations."""
|
|
1586
|
+
|
|
1587
|
+
def __init__(self, docket: Docket, redis: Redis) -> None:
|
|
1588
|
+
self.docket = docket
|
|
1589
|
+
self.redis = redis
|
|
1590
|
+
self.baseline_counts: dict[str, int] = {}
|
|
1591
|
+
|
|
1592
|
+
async def capture_baseline(self) -> None:
|
|
1593
|
+
"""Capture baseline key counts after worker priming."""
|
|
1594
|
+
self.baseline_counts = await count_redis_keys_by_type(
|
|
1595
|
+
self.redis, self.docket.name
|
|
1596
|
+
)
|
|
1597
|
+
print(f"Baseline key counts: {self.baseline_counts}")
|
|
1598
|
+
|
|
1599
|
+
async def verify_keys_increased(self, operation: str) -> None:
|
|
1600
|
+
"""Verify that key counts increased after scheduling operation."""
|
|
1601
|
+
current_counts = await count_redis_keys_by_type(self.redis, self.docket.name)
|
|
1602
|
+
print(f"After {operation} key counts: {current_counts}")
|
|
1603
|
+
|
|
1604
|
+
total_current = sum(current_counts.values())
|
|
1605
|
+
total_baseline = sum(self.baseline_counts.values())
|
|
1606
|
+
assert total_current > total_baseline, (
|
|
1607
|
+
f"Expected more keys after {operation}, but got {total_current} vs {total_baseline}"
|
|
1608
|
+
)
|
|
1609
|
+
|
|
1610
|
+
async def verify_keys_returned_to_baseline(self, operation: str) -> None:
|
|
1611
|
+
"""Verify that key counts returned to baseline after operation completion."""
|
|
1612
|
+
final_counts = await count_redis_keys_by_type(self.redis, self.docket.name)
|
|
1613
|
+
print(f"Final key counts: {final_counts}")
|
|
1614
|
+
|
|
1615
|
+
# Check each key type matches baseline
|
|
1616
|
+
all_key_types = set(self.baseline_counts.keys()) | set(final_counts.keys())
|
|
1617
|
+
for key_type in all_key_types:
|
|
1618
|
+
baseline_count = self.baseline_counts.get(key_type, 0)
|
|
1619
|
+
final_count = final_counts.get(key_type, 0)
|
|
1620
|
+
assert final_count == baseline_count, (
|
|
1621
|
+
f"Memory leak detected after {operation}: {key_type} keys not cleaned up properly. "
|
|
1622
|
+
f"Baseline: {baseline_count}, Final: {final_count}"
|
|
1623
|
+
)
|
|
1624
|
+
|
|
1625
|
+
|
|
1626
|
+
async def test_redis_key_cleanup_successful_task(
|
|
1627
|
+
docket: Docket, worker: Worker
|
|
1628
|
+
) -> None:
|
|
1629
|
+
"""Test that Redis keys are properly cleaned up after successful task execution.
|
|
1630
|
+
|
|
1631
|
+
This test systematically counts Redis keys before and after task operations to detect
|
|
1632
|
+
memory leaks where keys are not properly cleaned up.
|
|
1633
|
+
"""
|
|
1634
|
+
# Prime the worker (run once with no tasks to establish baseline)
|
|
1635
|
+
await worker.run_until_finished()
|
|
1636
|
+
|
|
1637
|
+
# Create and register a simple task
|
|
1638
|
+
task_executed = False
|
|
1639
|
+
|
|
1640
|
+
async def successful_task():
|
|
1641
|
+
nonlocal task_executed
|
|
1642
|
+
task_executed = True
|
|
1643
|
+
await asyncio.sleep(0.01) # Small delay to ensure proper execution flow
|
|
1644
|
+
|
|
1645
|
+
docket.register(successful_task)
|
|
1646
|
+
|
|
1647
|
+
async with docket.redis() as redis:
|
|
1648
|
+
checker = KeyCountChecker(docket, redis)
|
|
1649
|
+
await checker.capture_baseline()
|
|
1650
|
+
|
|
1651
|
+
# Schedule the task
|
|
1652
|
+
await docket.add(successful_task)()
|
|
1653
|
+
await checker.verify_keys_increased("scheduling")
|
|
1654
|
+
|
|
1655
|
+
# Execute the task
|
|
1656
|
+
await worker.run_until_finished()
|
|
1657
|
+
|
|
1658
|
+
# Verify task executed successfully
|
|
1659
|
+
assert task_executed, "Task should have executed successfully"
|
|
1660
|
+
|
|
1661
|
+
# Verify cleanup
|
|
1662
|
+
await checker.verify_keys_returned_to_baseline("successful task execution")
|
|
1663
|
+
|
|
1664
|
+
|
|
1665
|
+
async def test_redis_key_cleanup_failed_task(docket: Docket, worker: Worker) -> None:
|
|
1666
|
+
"""Test that Redis keys are properly cleaned up after failed task execution."""
|
|
1667
|
+
# Prime the worker
|
|
1668
|
+
await worker.run_until_finished()
|
|
1669
|
+
|
|
1670
|
+
# Create a task that will fail
|
|
1671
|
+
task_attempted = False
|
|
1672
|
+
|
|
1673
|
+
async def failing_task():
|
|
1674
|
+
nonlocal task_attempted
|
|
1675
|
+
task_attempted = True
|
|
1676
|
+
raise ValueError("Intentional test failure")
|
|
1677
|
+
|
|
1678
|
+
docket.register(failing_task)
|
|
1679
|
+
|
|
1680
|
+
async with docket.redis() as redis:
|
|
1681
|
+
checker = KeyCountChecker(docket, redis)
|
|
1682
|
+
await checker.capture_baseline()
|
|
1683
|
+
|
|
1684
|
+
# Schedule the task
|
|
1685
|
+
await docket.add(failing_task)()
|
|
1686
|
+
await checker.verify_keys_increased("scheduling")
|
|
1687
|
+
|
|
1688
|
+
# Execute the task (should fail)
|
|
1689
|
+
await worker.run_until_finished()
|
|
1690
|
+
|
|
1691
|
+
# Verify task was attempted
|
|
1692
|
+
assert task_attempted, "Task should have been attempted"
|
|
1693
|
+
|
|
1694
|
+
# Verify cleanup despite failure
|
|
1695
|
+
await checker.verify_keys_returned_to_baseline("failed task execution")
|
|
1696
|
+
|
|
1697
|
+
|
|
1698
|
+
async def test_redis_key_cleanup_cancelled_task(docket: Docket, worker: Worker) -> None:
|
|
1699
|
+
"""Test that Redis keys are properly cleaned up after task cancellation."""
|
|
1700
|
+
# Prime the worker
|
|
1701
|
+
await worker.run_until_finished()
|
|
1702
|
+
|
|
1703
|
+
# Create a task that won't be executed
|
|
1704
|
+
task_executed = False
|
|
1705
|
+
|
|
1706
|
+
async def task_to_cancel():
|
|
1707
|
+
nonlocal task_executed
|
|
1708
|
+
task_executed = True # pragma: no cover
|
|
1709
|
+
|
|
1710
|
+
docket.register(task_to_cancel)
|
|
1711
|
+
|
|
1712
|
+
async with docket.redis() as redis:
|
|
1713
|
+
checker = KeyCountChecker(docket, redis)
|
|
1714
|
+
await checker.capture_baseline()
|
|
1715
|
+
|
|
1716
|
+
# Schedule the task for future execution
|
|
1717
|
+
future_time = datetime.now(timezone.utc) + timedelta(seconds=10)
|
|
1718
|
+
execution = await docket.add(task_to_cancel, future_time)()
|
|
1719
|
+
await checker.verify_keys_increased("scheduling")
|
|
1720
|
+
|
|
1721
|
+
# Cancel the task
|
|
1722
|
+
await docket.cancel(execution.key)
|
|
1723
|
+
|
|
1724
|
+
# Run worker to process any cleanup
|
|
1725
|
+
await worker.run_until_finished()
|
|
1726
|
+
|
|
1727
|
+
# Verify task was not executed
|
|
1728
|
+
assert not task_executed, (
|
|
1729
|
+
"Task should not have been executed after cancellation"
|
|
1730
|
+
)
|
|
1731
|
+
|
|
1732
|
+
# Verify cleanup after cancellation
|
|
1733
|
+
await checker.verify_keys_returned_to_baseline("task cancellation")
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|