rrq 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rrq/__init__.py +0 -0
- rrq/client.py +159 -0
- rrq/constants.py +42 -0
- rrq/exc.py +46 -0
- rrq/job.py +133 -0
- rrq/registry.py +77 -0
- rrq/rrq.py +328 -0
- rrq/settings.py +107 -0
- rrq/store.py +568 -0
- rrq/worker.py +897 -0
- rrq-0.2.5.dist-info/METADATA +201 -0
- rrq-0.2.5.dist-info/RECORD +15 -0
- rrq-0.2.5.dist-info/WHEEL +4 -0
- rrq-0.2.5.dist-info/entry_points.txt +2 -0
- rrq-0.2.5.dist-info/licenses/LICENSE +13 -0
rrq/__init__.py
ADDED
|
File without changes
|
rrq/client.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
"""This module defines the RRQClient, used for enqueuing jobs into the RRQ system."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import uuid
|
|
5
|
+
from datetime import UTC, datetime, timedelta
|
|
6
|
+
from typing import Any, Optional
|
|
7
|
+
|
|
8
|
+
from .job import Job, JobStatus
|
|
9
|
+
from .settings import RRQSettings
|
|
10
|
+
from .store import JobStore
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class RRQClient:
|
|
16
|
+
"""Client interface for interacting with the RRQ (Reliable Redis Queue) system.
|
|
17
|
+
|
|
18
|
+
Provides methods primarily for enqueuing jobs.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, settings: RRQSettings, job_store: Optional[JobStore] = None):
|
|
22
|
+
"""Initializes the RRQClient.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
settings: The RRQSettings instance containing configuration.
|
|
26
|
+
job_store: Optional JobStore instance. If not provided, a new one
|
|
27
|
+
will be created based on the settings. This allows sharing
|
|
28
|
+
a JobStore instance across multiple components.
|
|
29
|
+
"""
|
|
30
|
+
self.settings = settings
|
|
31
|
+
# If job_store is not provided, create one. This allows for flexibility:
|
|
32
|
+
# - External management of JobStore (e.g., passed from an application context)
|
|
33
|
+
# - Client creates its own if used standalone.
|
|
34
|
+
if job_store:
|
|
35
|
+
self.job_store = job_store
|
|
36
|
+
self._created_store_internally = False
|
|
37
|
+
else:
|
|
38
|
+
self.job_store = JobStore(settings=self.settings)
|
|
39
|
+
self._created_store_internally = True
|
|
40
|
+
|
|
41
|
+
async def close(self) -> None:
|
|
42
|
+
"""Closes the underlying JobStore's Redis connection if it was created internally by this client."""
|
|
43
|
+
if self._created_store_internally:
|
|
44
|
+
await self.job_store.aclose()
|
|
45
|
+
|
|
46
|
+
async def enqueue(
|
|
47
|
+
self,
|
|
48
|
+
function_name: str,
|
|
49
|
+
*args: Any,
|
|
50
|
+
_queue_name: Optional[str] = None,
|
|
51
|
+
_job_id: Optional[str] = None,
|
|
52
|
+
_unique_key: Optional[str] = None,
|
|
53
|
+
_max_retries: Optional[int] = None,
|
|
54
|
+
_job_timeout_seconds: Optional[int] = None,
|
|
55
|
+
_defer_until: Optional[datetime] = None,
|
|
56
|
+
_defer_by: Optional[timedelta] = None,
|
|
57
|
+
_result_ttl_seconds: Optional[int] = None,
|
|
58
|
+
**kwargs: Any,
|
|
59
|
+
) -> Optional[Job]:
|
|
60
|
+
"""Enqueues a job to be processed by RRQ workers.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
function_name: The registered name of the handler function to execute.
|
|
64
|
+
*args: Positional arguments to pass to the handler function.
|
|
65
|
+
_queue_name: Specific queue to enqueue the job to. Defaults to `RRQSettings.default_queue_name`.
|
|
66
|
+
_job_id: User-provided job ID for idempotency or tracking. If None, a UUID is generated.
|
|
67
|
+
_unique_key: If provided, ensures that only one job with this key is active or recently completed.
|
|
68
|
+
Uses a Redis lock with `default_unique_job_lock_ttl_seconds`.
|
|
69
|
+
_max_retries: Maximum number of retries for this specific job. Overrides `RRQSettings.default_max_retries`.
|
|
70
|
+
_job_timeout_seconds: Timeout (in seconds) for this specific job. Overrides `RRQSettings.default_job_timeout_seconds`.
|
|
71
|
+
_defer_until: A specific datetime (UTC recommended) when the job should become available for processing.
|
|
72
|
+
_defer_by: A timedelta relative to now, specifying when the job should become available.
|
|
73
|
+
_result_ttl_seconds: Time-to-live (in seconds) for the result of this specific job. Overrides `RRQSettings.default_result_ttl_seconds`.
|
|
74
|
+
**kwargs: Keyword arguments to pass to the handler function.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
The created Job object if successfully enqueued, or None if enqueueing was denied
|
|
78
|
+
(e.g., due to a unique key conflict).
|
|
79
|
+
"""
|
|
80
|
+
# print(
|
|
81
|
+
# f"DEBUG RRQClient.enqueue: function_name='{function_name}', args={args}, kwargs={kwargs}"
|
|
82
|
+
# ) # DEBUG
|
|
83
|
+
|
|
84
|
+
job_id_to_use = _job_id or str(uuid.uuid4())
|
|
85
|
+
|
|
86
|
+
if _unique_key:
|
|
87
|
+
lock_acquired = await self.job_store.acquire_unique_job_lock(
|
|
88
|
+
unique_key=_unique_key,
|
|
89
|
+
job_id=job_id_to_use, # Store current job_id in lock for traceability
|
|
90
|
+
lock_ttl_seconds=self.settings.default_unique_job_lock_ttl_seconds,
|
|
91
|
+
)
|
|
92
|
+
if not lock_acquired:
|
|
93
|
+
logger.info(
|
|
94
|
+
f"Job with unique key '{_unique_key}' already active or recently run. Enqueue denied."
|
|
95
|
+
)
|
|
96
|
+
return None
|
|
97
|
+
|
|
98
|
+
queue_name_to_use = _queue_name or self.settings.default_queue_name
|
|
99
|
+
enqueue_time_utc = datetime.now(UTC)
|
|
100
|
+
|
|
101
|
+
# Create the Job instance with all provided details and defaults
|
|
102
|
+
job = Job(
|
|
103
|
+
id=job_id_to_use,
|
|
104
|
+
function_name=function_name,
|
|
105
|
+
job_args=list(args),
|
|
106
|
+
job_kwargs=kwargs,
|
|
107
|
+
enqueue_time=enqueue_time_utc,
|
|
108
|
+
status=JobStatus.PENDING,
|
|
109
|
+
current_retries=0,
|
|
110
|
+
max_retries=(
|
|
111
|
+
_max_retries
|
|
112
|
+
if _max_retries is not None
|
|
113
|
+
else self.settings.default_max_retries
|
|
114
|
+
),
|
|
115
|
+
job_timeout_seconds=(
|
|
116
|
+
_job_timeout_seconds
|
|
117
|
+
if _job_timeout_seconds is not None
|
|
118
|
+
else self.settings.default_job_timeout_seconds
|
|
119
|
+
),
|
|
120
|
+
result_ttl_seconds=(
|
|
121
|
+
_result_ttl_seconds
|
|
122
|
+
if _result_ttl_seconds is not None
|
|
123
|
+
else self.settings.default_result_ttl_seconds
|
|
124
|
+
),
|
|
125
|
+
job_unique_key=_unique_key,
|
|
126
|
+
queue_name=queue_name_to_use, # Store the target queue name
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# Save the full job definition
|
|
130
|
+
await self.job_store.save_job_definition(job)
|
|
131
|
+
|
|
132
|
+
# Determine the score for the sorted set (queue)
|
|
133
|
+
# Score is a millisecond timestamp for when the job should be processed.
|
|
134
|
+
score_dt = enqueue_time_utc # Default to immediate processing
|
|
135
|
+
if _defer_until:
|
|
136
|
+
score_dt = _defer_until
|
|
137
|
+
elif _defer_by:
|
|
138
|
+
score_dt = enqueue_time_utc + _defer_by
|
|
139
|
+
|
|
140
|
+
# Ensure score_dt is timezone-aware (UTC) if it's naive from user input
|
|
141
|
+
if score_dt.tzinfo is None:
|
|
142
|
+
score_dt = score_dt.replace(tzinfo=UTC)
|
|
143
|
+
elif score_dt.tzinfo != UTC:
|
|
144
|
+
# Convert to UTC if it's aware but not UTC
|
|
145
|
+
score_dt = score_dt.astimezone(UTC)
|
|
146
|
+
|
|
147
|
+
score_timestamp_ms = int(score_dt.timestamp() * 1000)
|
|
148
|
+
|
|
149
|
+
# Add the job ID to the processing queue
|
|
150
|
+
await self.job_store.add_job_to_queue(
|
|
151
|
+
queue_name_to_use,
|
|
152
|
+
job.id,
|
|
153
|
+
float(score_timestamp_ms), # Redis ZADD score must be float
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
logger.debug(
|
|
157
|
+
f"Enqueued job {job.id} ('{job.function_name}') to queue '{queue_name_to_use}' with score {score_timestamp_ms}"
|
|
158
|
+
)
|
|
159
|
+
return job
|
rrq/constants.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""This module defines constants used throughout the RRQ (Reliable Redis Queue) system.
|
|
2
|
+
|
|
3
|
+
These constants include Redis key prefixes, default queue names, and default
|
|
4
|
+
configuration values for job processing and worker behavior.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
# RRQ Constants
|
|
8
|
+
|
|
9
|
+
# Default queue name if not specified
|
|
10
|
+
DEFAULT_QUEUE_NAME: str = "rrq:queue:default"
|
|
11
|
+
|
|
12
|
+
# Default Dead Letter Queue name
|
|
13
|
+
DEFAULT_DLQ_NAME: str = "rrq:dlq:default"
|
|
14
|
+
|
|
15
|
+
# Redis key prefixes
|
|
16
|
+
JOB_KEY_PREFIX: str = "rrq:job:"
|
|
17
|
+
QUEUE_KEY_PREFIX: str = "rrq:queue:" # For ZSETs holding job IDs
|
|
18
|
+
ACTIVE_JOBS_PREFIX: str = (
|
|
19
|
+
"rrq:active:" # For lists of active jobs per worker (optional, for recovery)
|
|
20
|
+
)
|
|
21
|
+
LOCK_KEY_PREFIX: str = "rrq:lock:job:" # For job processing locks
|
|
22
|
+
UNIQUE_JOB_LOCK_PREFIX: str = "rrq:lock:unique:" # For user-defined unique job keys
|
|
23
|
+
HEALTH_KEY_PREFIX: str = "rrq:health:worker:"
|
|
24
|
+
RETRY_COUNTER_PREFIX: str = (
|
|
25
|
+
"rrq:retry_count:" # Potentially, if not stored directly in job hash
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
# Default job settings (can be overridden by RRQSettings or per job)
|
|
29
|
+
DEFAULT_MAX_RETRIES: int = 5
|
|
30
|
+
DEFAULT_JOB_TIMEOUT_SECONDS: int = 300 # 5 minutes
|
|
31
|
+
DEFAULT_LOCK_TIMEOUT_EXTENSION_SECONDS: int = (
|
|
32
|
+
60 # How much longer lock should live than job_timeout
|
|
33
|
+
)
|
|
34
|
+
DEFAULT_RESULT_TTL_SECONDS: int = 3600 * 24 # 1 day
|
|
35
|
+
DEFAULT_DLQ_RESULT_TTL_SECONDS: int = 3600 * 24 * 7 # 7 days for DLQ job details
|
|
36
|
+
DEFAULT_UNIQUE_JOB_LOCK_TTL_SECONDS: int = 3600 * 6 # 6 hours for unique job lock
|
|
37
|
+
|
|
38
|
+
# Poll delay for worker
|
|
39
|
+
DEFAULT_POLL_DELAY_SECONDS: float = 0.1
|
|
40
|
+
|
|
41
|
+
# Default worker ID if not specified
|
|
42
|
+
DEFAULT_WORKER_ID_PREFIX: str = "rrq_worker_"
|
rrq/exc.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""This module defines custom exceptions for the RRQ (Reliable Redis Queue) system."""
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class RRQError(Exception):
|
|
7
|
+
"""Base class for all RRQ specific errors."""
|
|
8
|
+
|
|
9
|
+
pass
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class RetryJob(RRQError):
|
|
13
|
+
"""Exception raised by a job handler to signal that the job should be retried.
|
|
14
|
+
|
|
15
|
+
This allows a handler to explicitly request a retry, potentially with a custom delay,
|
|
16
|
+
rather than relying on automatic retries for general exceptions.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
message: str = "Job requested retry",
|
|
22
|
+
defer_seconds: Optional[float] = None,
|
|
23
|
+
):
|
|
24
|
+
"""
|
|
25
|
+
Args:
|
|
26
|
+
message: Optional message describing why the retry is requested.
|
|
27
|
+
defer_seconds: Optional custom delay in seconds before the job is re-queued.
|
|
28
|
+
If None, the worker will use its default backoff strategy.
|
|
29
|
+
"""
|
|
30
|
+
super().__init__(message)
|
|
31
|
+
self.defer_seconds = defer_seconds
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class JobNotFound(RRQError):
|
|
35
|
+
"""Exception raised when a job definition cannot be found in the JobStore."""
|
|
36
|
+
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class MaxRetriesExceeded(Exception):
|
|
41
|
+
"""Raised when a job fails after reaching its maximum retry limit."""
|
|
42
|
+
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# Add other RRQ-specific exceptions here as needed
|
rrq/job.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
"""This module defines the core data structures for jobs in the RRQ system,
|
|
2
|
+
including the Job model and JobStatus enumeration.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import uuid
|
|
6
|
+
from datetime import UTC, datetime
|
|
7
|
+
from enum import Enum
|
|
8
|
+
from typing import Any, Optional
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class JobStatus(str, Enum):
|
|
14
|
+
"""Represents the lifecycle status of a job within the RRQ system."""
|
|
15
|
+
|
|
16
|
+
PENDING = "PENDING" # Job enqueued, awaiting processing by a worker.
|
|
17
|
+
ACTIVE = "ACTIVE" # Job picked up by a worker and is currently being processed.
|
|
18
|
+
COMPLETED = "COMPLETED" # Job processed successfully.
|
|
19
|
+
FAILED = (
|
|
20
|
+
"FAILED" # Job failed after all retry attempts or was a non-retryable failure.
|
|
21
|
+
)
|
|
22
|
+
RETRYING = "RETRYING" # Job failed, an attempt will be made to re-process it after a delay.
|
|
23
|
+
# NOT_FOUND might be a status for queries, but not stored on the job itself typically
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def new_job_id() -> str:
|
|
27
|
+
"""Generates a new unique job ID (UUID4)."""
|
|
28
|
+
return str(uuid.uuid4())
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class Job(BaseModel):
|
|
32
|
+
"""Represents a job to be processed by an RRQ worker.
|
|
33
|
+
|
|
34
|
+
This model encapsulates all the information related to a job, including its
|
|
35
|
+
identity, execution parameters, status, and results.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
id: str = Field(
|
|
39
|
+
default_factory=new_job_id, description="Unique identifier for the job."
|
|
40
|
+
)
|
|
41
|
+
function_name: str = Field(
|
|
42
|
+
description="Name of the handler function to execute for this job."
|
|
43
|
+
)
|
|
44
|
+
job_args: list[Any] = Field(
|
|
45
|
+
default_factory=list,
|
|
46
|
+
description="Positional arguments for the handler function.",
|
|
47
|
+
)
|
|
48
|
+
job_kwargs: dict[str, Any] = Field(
|
|
49
|
+
default_factory=dict, description="Keyword arguments for the handler function."
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
enqueue_time: datetime = Field(
|
|
53
|
+
default_factory=lambda: datetime.now(UTC),
|
|
54
|
+
description="Timestamp (UTC) when the job was initially enqueued.",
|
|
55
|
+
)
|
|
56
|
+
# score: Optional[float] = None # The score in the ZSET, derived from defer_until/defer_by
|
|
57
|
+
# Not stored in the job hash directly, but used for queueing.
|
|
58
|
+
|
|
59
|
+
status: JobStatus = Field(
|
|
60
|
+
default=JobStatus.PENDING, description="Current status of the job."
|
|
61
|
+
)
|
|
62
|
+
current_retries: int = Field(
|
|
63
|
+
default=0, description="Number of retry attempts made so far."
|
|
64
|
+
)
|
|
65
|
+
next_scheduled_run_time: Optional[datetime] = Field(
|
|
66
|
+
default=None,
|
|
67
|
+
description="Timestamp (UTC) when the job is next scheduled to run (for retries/deferrals).",
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Execution control parameters, can be overridden from worker defaults.
|
|
71
|
+
max_retries: int = Field(
|
|
72
|
+
default=3, description="Maximum number of retry attempts allowed for this job."
|
|
73
|
+
)
|
|
74
|
+
job_timeout_seconds: Optional[int] = Field(
|
|
75
|
+
default=None,
|
|
76
|
+
description="Optional per-job execution timeout in seconds. Overrides worker default if set.",
|
|
77
|
+
)
|
|
78
|
+
result_ttl_seconds: Optional[int] = Field(
|
|
79
|
+
default=None,
|
|
80
|
+
description="Optional Time-To-Live (in seconds) for the job's result. Overrides worker default if set.",
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Optional key for ensuring job uniqueness if provided during enqueue.
|
|
84
|
+
job_unique_key: Optional[str] = Field(
|
|
85
|
+
default=None, description="Optional key for ensuring job uniqueness."
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
# Fields populated upon job completion or failure.
|
|
89
|
+
completion_time: Optional[datetime] = Field(
|
|
90
|
+
default=None,
|
|
91
|
+
description="Timestamp (UTC) when the job finished (completed or failed permanently).",
|
|
92
|
+
)
|
|
93
|
+
result: Optional[Any] = Field(
|
|
94
|
+
default=None,
|
|
95
|
+
description="Result of the job if successful, or error details if failed.",
|
|
96
|
+
)
|
|
97
|
+
last_error: Optional[str] = Field(
|
|
98
|
+
default=None,
|
|
99
|
+
description="String representation of the last error encountered during processing.",
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
# Optional routing hints (currently informational, could be used for advanced routing).
|
|
103
|
+
queue_name: Optional[str] = Field(
|
|
104
|
+
default=None, description="The name of the queue this job was last enqueued on."
|
|
105
|
+
)
|
|
106
|
+
dlq_name: Optional[str] = Field(
|
|
107
|
+
default=None,
|
|
108
|
+
description="The name of the Dead Letter Queue this job will be moved to if it fails permanently.",
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# For model_config to allow arbitrary types if result is complex and not Pydantic model
|
|
112
|
+
# class Config:
|
|
113
|
+
# arbitrary_types_allowed = True
|
|
114
|
+
|
|
115
|
+
# def to_redis_hash(self) -> dict[str, Any]:
|
|
116
|
+
# """Prepares the job model for storage as a Redis hash.
|
|
117
|
+
# Pydantic's model_dump is good, but we might want to ensure all values are easily
|
|
118
|
+
# storable as strings or simple types for Redis, or handle serialization here.
|
|
119
|
+
# For now, model_dump with json_encoders should suffice with a good serializer.
|
|
120
|
+
# """
|
|
121
|
+
# # Using model_dump ensures that Pydantic models are properly serialized (e.g., datetimes to ISO strings)
|
|
122
|
+
# # We will use a JSON serializer in JobStore that handles Pydantic models correctly.
|
|
123
|
+
# return self.model_dump(exclude_none=True)
|
|
124
|
+
|
|
125
|
+
# @classmethod
|
|
126
|
+
# def from_redis_hash(cls, data: dict[str, Any]) -> "Job":
|
|
127
|
+
# """Reconstructs a Job instance from data retrieved from a Redis hash."""""""""
|
|
128
|
+
# # Pydantic will handle parsing basic types. Datetimes are expected to be ISO strings.
|
|
129
|
+
# # Handle potential None values for args/kwargs if they were excluded from dump
|
|
130
|
+
# # data.setdefault("args", None) # Removed
|
|
131
|
+
# # data.setdefault("kwargs", None) # Removed
|
|
132
|
+
# return cls(**data)
|
|
133
|
+
pass # Add pass if class body becomes empty after removing methods, or remove if not needed
|
rrq/registry.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""This module provides the JobRegistry class for managing and retrieving job handler functions."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Callable, Optional
|
|
4
|
+
|
|
5
|
+
# Potentially: from collections.abc import Callable if more specific async callable needed
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class JobRegistry:
|
|
9
|
+
"""Manages the registration and retrieval of job handler functions.
|
|
10
|
+
|
|
11
|
+
Handlers are asynchronous functions that perform the actual work of a job.
|
|
12
|
+
They are registered with a unique name, which is used by the RRQClient to
|
|
13
|
+
enqueue jobs and by the RRQWorker to look up the appropriate handler for execution.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(self) -> None:
|
|
17
|
+
"""Initializes an empty job registry."""
|
|
18
|
+
self._handlers: dict[str, Callable[..., Any]] = {}
|
|
19
|
+
|
|
20
|
+
def register(
|
|
21
|
+
self, name: str, handler: Callable[..., Any], replace: bool = False
|
|
22
|
+
) -> None:
|
|
23
|
+
"""Registers a job handler function.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
name: The unique name for this handler. Used when enqueuing jobs.
|
|
27
|
+
handler: The asynchronous callable function that will execute the job.
|
|
28
|
+
It should typically accept a context dictionary as its first argument,
|
|
29
|
+
followed by job-specific arguments and keyword arguments.
|
|
30
|
+
replace: If True, an existing handler with the same name will be replaced.
|
|
31
|
+
If False (default) and a handler with the same name exists,
|
|
32
|
+
a ValueError is raised.
|
|
33
|
+
|
|
34
|
+
Raises:
|
|
35
|
+
ValueError: If a handler with the same name is already registered and `replace` is False,
|
|
36
|
+
or if the provided handler is not callable.
|
|
37
|
+
"""
|
|
38
|
+
if not callable(handler):
|
|
39
|
+
raise ValueError(f"Handler for '{name}' must be a callable.")
|
|
40
|
+
if name in self._handlers and not replace:
|
|
41
|
+
raise ValueError(
|
|
42
|
+
f"Handler with name '{name}' already registered. Set replace=True to override."
|
|
43
|
+
)
|
|
44
|
+
self._handlers[name] = handler
|
|
45
|
+
|
|
46
|
+
def unregister(self, name: str) -> None:
|
|
47
|
+
"""Unregisters a job handler function.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
name: The name of the handler to unregister.
|
|
51
|
+
"""
|
|
52
|
+
# If the handler exists, remove it. Otherwise, do nothing.
|
|
53
|
+
if name in self._handlers:
|
|
54
|
+
del self._handlers[name]
|
|
55
|
+
|
|
56
|
+
def get_handler(self, name: str) -> Optional[Callable[..., Any]]:
|
|
57
|
+
"""Retrieves a registered job handler function by its name.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
name: The name of the handler to retrieve.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
The callable handler function if found, otherwise None.
|
|
64
|
+
"""
|
|
65
|
+
return self._handlers.get(name)
|
|
66
|
+
|
|
67
|
+
def get_registered_functions(self) -> list[str]:
|
|
68
|
+
"""Returns a list of names of all registered handler functions."""
|
|
69
|
+
return list(self._handlers.keys())
|
|
70
|
+
|
|
71
|
+
def clear(self) -> None:
|
|
72
|
+
"""Clears all registered handlers from the registry."""
|
|
73
|
+
self._handlers.clear()
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
# Global instance for convenience, though applications might manage their own.
|
|
77
|
+
# job_registry = JobRegistry()
|