oban 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oban/__init__.py +22 -0
- oban/__main__.py +12 -0
- oban/_backoff.py +87 -0
- oban/_config.py +171 -0
- oban/_executor.py +188 -0
- oban/_extensions.py +16 -0
- oban/_leader.py +118 -0
- oban/_lifeline.py +77 -0
- oban/_notifier.py +324 -0
- oban/_producer.py +334 -0
- oban/_pruner.py +93 -0
- oban/_query.py +409 -0
- oban/_recorded.py +34 -0
- oban/_refresher.py +88 -0
- oban/_scheduler.py +359 -0
- oban/_stager.py +115 -0
- oban/_worker.py +78 -0
- oban/cli.py +436 -0
- oban/decorators.py +218 -0
- oban/job.py +315 -0
- oban/oban.py +1084 -0
- oban/py.typed +0 -0
- oban/queries/__init__.py +0 -0
- oban/queries/ack_job.sql +11 -0
- oban/queries/all_jobs.sql +25 -0
- oban/queries/cancel_many_jobs.sql +37 -0
- oban/queries/cleanup_expired_leaders.sql +4 -0
- oban/queries/cleanup_expired_producers.sql +2 -0
- oban/queries/delete_many_jobs.sql +5 -0
- oban/queries/delete_producer.sql +2 -0
- oban/queries/elect_leader.sql +10 -0
- oban/queries/fetch_jobs.sql +44 -0
- oban/queries/get_job.sql +23 -0
- oban/queries/insert_job.sql +28 -0
- oban/queries/insert_producer.sql +2 -0
- oban/queries/install.sql +113 -0
- oban/queries/prune_jobs.sql +18 -0
- oban/queries/reelect_leader.sql +12 -0
- oban/queries/refresh_producers.sql +3 -0
- oban/queries/rescue_jobs.sql +18 -0
- oban/queries/reset.sql +5 -0
- oban/queries/resign_leader.sql +4 -0
- oban/queries/retry_many_jobs.sql +13 -0
- oban/queries/stage_jobs.sql +34 -0
- oban/queries/uninstall.sql +4 -0
- oban/queries/update_job.sql +54 -0
- oban/queries/update_producer.sql +3 -0
- oban/queries/verify_structure.sql +9 -0
- oban/schema.py +115 -0
- oban/telemetry/__init__.py +10 -0
- oban/telemetry/core.py +170 -0
- oban/telemetry/logger.py +147 -0
- oban/testing.py +439 -0
- oban-0.5.0.dist-info/METADATA +290 -0
- oban-0.5.0.dist-info/RECORD +59 -0
- oban-0.5.0.dist-info/WHEEL +5 -0
- oban-0.5.0.dist-info/entry_points.txt +2 -0
- oban-0.5.0.dist-info/licenses/LICENSE.txt +201 -0
- oban-0.5.0.dist-info/top_level.txt +1 -0
oban/oban.py
ADDED
|
@@ -0,0 +1,1084 @@
|
|
|
1
|
+
"""Core class for enqueueing and processing jobs.
|
|
2
|
+
|
|
3
|
+
This module provides the Oban class for managing queues and processing jobs. Oban can
|
|
4
|
+
run in client mode (enqueueing only) or server mode (enqueueing and processing).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import socket
|
|
11
|
+
from collections.abc import Iterable
|
|
12
|
+
from typing import Any, Callable
|
|
13
|
+
|
|
14
|
+
from psycopg_pool import AsyncConnectionPool
|
|
15
|
+
|
|
16
|
+
from .job import Job
|
|
17
|
+
from ._leader import Leader
|
|
18
|
+
from ._lifeline import Lifeline
|
|
19
|
+
from ._notifier import Notifier, PostgresNotifier
|
|
20
|
+
from ._producer import Producer, QueueInfo
|
|
21
|
+
from ._pruner import Pruner
|
|
22
|
+
from ._query import Query
|
|
23
|
+
from ._refresher import Refresher
|
|
24
|
+
from ._scheduler import Scheduler
|
|
25
|
+
from ._stager import Stager
|
|
26
|
+
|
|
27
|
+
QueueConfig = int | dict[str, Any]
|
|
28
|
+
|
|
29
|
+
_instances: dict[str, Oban] = {}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class Oban:
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
*,
|
|
36
|
+
pool: Any,
|
|
37
|
+
dispatcher: Any = None,
|
|
38
|
+
leadership: bool | None = None,
|
|
39
|
+
lifeline: dict[str, Any] = {},
|
|
40
|
+
name: str | None = None,
|
|
41
|
+
node: str | None = None,
|
|
42
|
+
notifier: Notifier | None = None,
|
|
43
|
+
prefix: str | None = None,
|
|
44
|
+
pruner: dict[str, Any] = {},
|
|
45
|
+
queues: dict[str, QueueConfig] | None = None,
|
|
46
|
+
refresher: dict[str, Any] = {},
|
|
47
|
+
scheduler: dict[str, Any] = {},
|
|
48
|
+
stager: dict[str, Any] = {},
|
|
49
|
+
) -> None:
|
|
50
|
+
"""Initialize an Oban instance.
|
|
51
|
+
|
|
52
|
+
Oban can run in two modes:
|
|
53
|
+
|
|
54
|
+
- Server mode: When queues are configured, this instance processes jobs.
|
|
55
|
+
Leadership is enabled by default to coordinate cluster-wide operations.
|
|
56
|
+
- Client mode: When no queues are configured, this instance only enqueues jobs.
|
|
57
|
+
Leadership is disabled by default.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
pool: Database connection pool (e.g., AsyncConnectionPool)
|
|
61
|
+
leadership: Enable leadership election (default: True if queues configured, False otherwise)
|
|
62
|
+
lifeline: Lifeline config options: interval (default: 60.0)
|
|
63
|
+
name: Name for this instance in the registry (default: "oban")
|
|
64
|
+
node: Node identifier for this instance (default: socket.gethostname())
|
|
65
|
+
notifier: Notifier instance for pub/sub (default: PostgresNotifier with default config)
|
|
66
|
+
prefix: PostgreSQL schema where Oban tables are located (default: "public")
|
|
67
|
+
pruner: Pruning config options: max_age in seconds (default: 86_400.0, 1 day),
|
|
68
|
+
interval (default: 60.0), limit (default: 20_000).
|
|
69
|
+
queues: Queue names mapped to worker limits (default: {})
|
|
70
|
+
refresher: Refresher config options: interval (default: 15.0), max_age (default: 60.0)
|
|
71
|
+
scheduler: Scheduler config options: timezone (default: "UTC")
|
|
72
|
+
stager: Stager config options: interval (default: 1.0), limit (default: 20_000)
|
|
73
|
+
"""
|
|
74
|
+
queues = queues or {}
|
|
75
|
+
|
|
76
|
+
if leadership is None:
|
|
77
|
+
leadership = bool(queues)
|
|
78
|
+
|
|
79
|
+
self._dispatcher = dispatcher
|
|
80
|
+
self._name = name or "oban"
|
|
81
|
+
self._node = node or socket.gethostname()
|
|
82
|
+
self._prefix = prefix or "public"
|
|
83
|
+
self._query = Query(pool, self._prefix)
|
|
84
|
+
|
|
85
|
+
self._notifier = notifier or PostgresNotifier(
|
|
86
|
+
query=self._query, prefix=self._prefix
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
self._producers = {
|
|
90
|
+
queue: Producer(
|
|
91
|
+
dispatcher=self._dispatcher,
|
|
92
|
+
query=self._query,
|
|
93
|
+
name=self._name,
|
|
94
|
+
node=self._node,
|
|
95
|
+
notifier=self._notifier,
|
|
96
|
+
queue=queue,
|
|
97
|
+
**self._parse_queue_config(queue, config),
|
|
98
|
+
)
|
|
99
|
+
for queue, config in queues.items()
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
self._leader = Leader(
|
|
103
|
+
query=self._query,
|
|
104
|
+
node=self._node,
|
|
105
|
+
name=self._name,
|
|
106
|
+
enabled=leadership,
|
|
107
|
+
notifier=self._notifier,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
self._stager = Stager(
|
|
111
|
+
query=self._query,
|
|
112
|
+
notifier=self._notifier,
|
|
113
|
+
producers=self._producers,
|
|
114
|
+
leader=self._leader,
|
|
115
|
+
**stager,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
self._lifeline = Lifeline(leader=self._leader, query=self._query, **lifeline)
|
|
119
|
+
self._pruner = Pruner(leader=self._leader, query=self._query, **pruner)
|
|
120
|
+
|
|
121
|
+
self._refresher = Refresher(
|
|
122
|
+
leader=self._leader,
|
|
123
|
+
producers=self._producers,
|
|
124
|
+
query=self._query,
|
|
125
|
+
**refresher,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
self._scheduler = Scheduler(
|
|
129
|
+
leader=self._leader,
|
|
130
|
+
notifier=self._notifier,
|
|
131
|
+
query=self._query,
|
|
132
|
+
**scheduler,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
self._signal_token = None
|
|
136
|
+
|
|
137
|
+
_instances[self._name] = self
|
|
138
|
+
|
|
139
|
+
@staticmethod
|
|
140
|
+
def _parse_queue_config(queue: str, config: QueueConfig) -> dict[str, Any]:
|
|
141
|
+
if isinstance(config, int):
|
|
142
|
+
return {"limit": config}
|
|
143
|
+
else:
|
|
144
|
+
return config
|
|
145
|
+
|
|
146
|
+
@staticmethod
|
|
147
|
+
async def create_pool(
|
|
148
|
+
dsn: str | None = None,
|
|
149
|
+
*,
|
|
150
|
+
min_size: int | None = None,
|
|
151
|
+
max_size: int | None = None,
|
|
152
|
+
timeout: float | None = None,
|
|
153
|
+
) -> AsyncConnectionPool:
|
|
154
|
+
"""Create a connection pool for use with Oban.
|
|
155
|
+
|
|
156
|
+
This is a convenience method that creates and opens an AsyncConnectionPool.
|
|
157
|
+
Configuration is loaded from multiple sources in order of precedence (lowest
|
|
158
|
+
to highest):
|
|
159
|
+
|
|
160
|
+
1. oban.toml in the current directory
|
|
161
|
+
2. Environment variables (OBAN_DSN, OBAN_POOL_MIN_SIZE, etc.)
|
|
162
|
+
3. Explicit arguments passed to this method
|
|
163
|
+
|
|
164
|
+
The caller is responsible for closing the pool when done.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
dsn: PostgreSQL connection string (e.g., "postgresql://localhost/mydb")
|
|
168
|
+
min_size: Minimum number of connections to keep open (default: 1)
|
|
169
|
+
max_size: Maximum number of connections in the pool (default: 10)
|
|
170
|
+
timeout: Timeout in seconds for acquiring a connection (default: 30.0)
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
An open AsyncConnectionPool ready for use
|
|
174
|
+
|
|
175
|
+
Example:
|
|
176
|
+
Using configuration from oban.toml or environment:
|
|
177
|
+
|
|
178
|
+
>>> pool = await Oban.create_pool()
|
|
179
|
+
>>> async with Oban(pool=pool) as oban:
|
|
180
|
+
... await oban.enqueue(SomeWorker.new({"key": "value"}))
|
|
181
|
+
>>> await pool.close()
|
|
182
|
+
|
|
183
|
+
Overriding the dsn:
|
|
184
|
+
|
|
185
|
+
>>> pool = await Oban.create_pool("postgresql://localhost/mydb")
|
|
186
|
+
"""
|
|
187
|
+
from ._config import Config
|
|
188
|
+
|
|
189
|
+
config = Config.load(
|
|
190
|
+
dsn=dsn,
|
|
191
|
+
pool_min_size=min_size,
|
|
192
|
+
pool_max_size=max_size,
|
|
193
|
+
pool_timeout=timeout,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
return await config.create_pool()
|
|
197
|
+
|
|
198
|
+
async def __aenter__(self) -> Oban:
|
|
199
|
+
return await self.start()
|
|
200
|
+
|
|
201
|
+
async def __aexit__(self, _exc_type, _exc_val, _exc_tb) -> None:
|
|
202
|
+
await self.stop()
|
|
203
|
+
|
|
204
|
+
@property
|
|
205
|
+
def is_leader(self) -> bool:
|
|
206
|
+
"""Check if this node is currently the leader.
|
|
207
|
+
|
|
208
|
+
Returns False if leadership is not enabled for this instance. Otherwise, it indicates
|
|
209
|
+
whether this instance is acting as leader.
|
|
210
|
+
|
|
211
|
+
Example:
|
|
212
|
+
>>> async with Oban(pool=pool, leadership=True) as oban:
|
|
213
|
+
... if oban.is_leader:
|
|
214
|
+
... # Perform leader-only operation
|
|
215
|
+
"""
|
|
216
|
+
return self._leader.is_leader
|
|
217
|
+
|
|
218
|
+
def _connection(self):
|
|
219
|
+
return self._query.connection()
|
|
220
|
+
|
|
221
|
+
async def start(self) -> Oban:
|
|
222
|
+
"""Start the Oban instance and begin processing jobs.
|
|
223
|
+
|
|
224
|
+
This starts all internal processes including the notifier, leader election,
|
|
225
|
+
job staging, scheduling, and queue producers. When queues are configured,
|
|
226
|
+
it also verifies that the required database tables exist.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
The started Oban instance for method chaining
|
|
230
|
+
|
|
231
|
+
Raises:
|
|
232
|
+
RuntimeError: If required database tables are missing (run migrations first)
|
|
233
|
+
|
|
234
|
+
Example:
|
|
235
|
+
For most use cases, prefer using Oban as an async context manager:
|
|
236
|
+
|
|
237
|
+
>>> async with Oban(pool=pool, queues={"default": 10}) as oban:
|
|
238
|
+
... await oban.enqueue(MyWorker.new({"id": 1}))
|
|
239
|
+
|
|
240
|
+
Use explicit start/stop for more control over the lifecycle:
|
|
241
|
+
|
|
242
|
+
>>> oban = Oban(pool=pool, queues={"default": 10})
|
|
243
|
+
>>> await oban.start()
|
|
244
|
+
>>> # ... application runs ...
|
|
245
|
+
>>> await oban.stop()
|
|
246
|
+
"""
|
|
247
|
+
if self._producers:
|
|
248
|
+
await self._verify_structure()
|
|
249
|
+
|
|
250
|
+
if self._dispatcher:
|
|
251
|
+
await self._dispatcher.start()
|
|
252
|
+
|
|
253
|
+
tasks = [
|
|
254
|
+
self._notifier.start(),
|
|
255
|
+
self._leader.start(),
|
|
256
|
+
self._stager.start(),
|
|
257
|
+
self._lifeline.start(),
|
|
258
|
+
self._pruner.start(),
|
|
259
|
+
self._refresher.start(),
|
|
260
|
+
self._scheduler.start(),
|
|
261
|
+
]
|
|
262
|
+
|
|
263
|
+
for producer in self._producers.values():
|
|
264
|
+
tasks.append(producer.start())
|
|
265
|
+
|
|
266
|
+
await asyncio.gather(*tasks)
|
|
267
|
+
|
|
268
|
+
self._signal_token = await self._notifier.listen(
|
|
269
|
+
"signal", self._on_signal, wait=False
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
return self
|
|
273
|
+
|
|
274
|
+
async def stop(self) -> None:
|
|
275
|
+
"""Stop the Oban instance and gracefully shut down all processes.
|
|
276
|
+
|
|
277
|
+
This stops all internal processes including queue producers, the notifier,
|
|
278
|
+
leader election, and background tasks. Running jobs are allowed to complete
|
|
279
|
+
before producers fully stop.
|
|
280
|
+
|
|
281
|
+
Calling stop on an instance that was never started is safe and returns
|
|
282
|
+
immediately.
|
|
283
|
+
|
|
284
|
+
Example:
|
|
285
|
+
For most use cases, prefer using Oban as an async context manager:
|
|
286
|
+
|
|
287
|
+
>>> async with Oban(pool=pool, queues={"default": 10}) as oban:
|
|
288
|
+
... await oban.enqueue(MyWorker.new({"id": 1}))
|
|
289
|
+
|
|
290
|
+
Use explicit start/stop for more control over the lifecycle:
|
|
291
|
+
|
|
292
|
+
>>> oban = Oban(pool=pool, queues={"default": 10})
|
|
293
|
+
>>> await oban.start()
|
|
294
|
+
>>> # ... application runs ...
|
|
295
|
+
>>> await oban.stop()
|
|
296
|
+
"""
|
|
297
|
+
if not self._signal_token:
|
|
298
|
+
return
|
|
299
|
+
|
|
300
|
+
await self._notifier.unlisten(self._signal_token)
|
|
301
|
+
|
|
302
|
+
tasks = [
|
|
303
|
+
self._leader.stop(),
|
|
304
|
+
self._stager.stop(),
|
|
305
|
+
self._lifeline.stop(),
|
|
306
|
+
self._pruner.stop(),
|
|
307
|
+
self._refresher.stop(),
|
|
308
|
+
self._scheduler.stop(),
|
|
309
|
+
self._notifier.stop(),
|
|
310
|
+
]
|
|
311
|
+
|
|
312
|
+
for producer in self._producers.values():
|
|
313
|
+
tasks.append(producer.stop())
|
|
314
|
+
|
|
315
|
+
await asyncio.gather(*tasks)
|
|
316
|
+
|
|
317
|
+
if self._dispatcher:
|
|
318
|
+
await self._dispatcher.stop()
|
|
319
|
+
|
|
320
|
+
async def _verify_structure(self) -> None:
|
|
321
|
+
existing = await self._query.verify_structure()
|
|
322
|
+
|
|
323
|
+
for table in ["oban_jobs", "oban_leaders", "oban_producers"]:
|
|
324
|
+
if table not in existing:
|
|
325
|
+
raise RuntimeError(
|
|
326
|
+
f"The '{table}' is missing, run schema installation first."
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
async def enqueue(self, job: Job) -> Job:
|
|
330
|
+
"""Enqueue a job in the database for processing.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
job: A Job instance created via Worker.new()
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
The inserted job with database-assigned values (id, timestamps, state)
|
|
337
|
+
|
|
338
|
+
Example:
|
|
339
|
+
>>> job = EmailWorker.new({"to": "user@example.com", "subject": "Welcome"})
|
|
340
|
+
>>> await oban.enqueue(job)
|
|
341
|
+
|
|
342
|
+
For convenience, you can also use Worker.enqueue() directly:
|
|
343
|
+
|
|
344
|
+
>>> await EmailWorker.enqueue({"to": "user@example.com", "subject": "Welcome"})
|
|
345
|
+
"""
|
|
346
|
+
result = await self.enqueue_many(job)
|
|
347
|
+
|
|
348
|
+
return result[0]
|
|
349
|
+
|
|
350
|
+
async def enqueue_many(
|
|
351
|
+
self, jobs_or_first: Iterable[Job] | Job, /, *rest: Job
|
|
352
|
+
) -> list[Job]:
|
|
353
|
+
"""Insert multiple jobs into the database in a single operation.
|
|
354
|
+
|
|
355
|
+
This is more efficient than calling enqueue() multiple times as it uses a
|
|
356
|
+
single database query to insert all jobs.
|
|
357
|
+
|
|
358
|
+
Args:
|
|
359
|
+
jobs_or_first: Either an iterable of jobs, or the first job when using
|
|
360
|
+
variadic arguments
|
|
361
|
+
*rest: Additional jobs when using variadic arguments
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
The inserted jobs with database-assigned values (id, timestamps, state)
|
|
365
|
+
|
|
366
|
+
Example:
|
|
367
|
+
>>> job1 = EmailWorker.new({"to": "user1@example.com"})
|
|
368
|
+
>>> job2 = EmailWorker.new({"to": "user2@example.com"})
|
|
369
|
+
>>> job3 = EmailWorker.new({"to": "user3@example.com"})
|
|
370
|
+
>>> await oban.enqueue_many(job1, job2, job3)
|
|
371
|
+
>>> # Or with an iterable:
|
|
372
|
+
>>> await oban.enqueue_many([job1, job2, job3])
|
|
373
|
+
>>> await oban.enqueue_many(Worker.new({"id": id}) for id in range(10))
|
|
374
|
+
"""
|
|
375
|
+
from .testing import _get_mode
|
|
376
|
+
|
|
377
|
+
if isinstance(jobs_or_first, Job):
|
|
378
|
+
jobs = [jobs_or_first, *rest]
|
|
379
|
+
else:
|
|
380
|
+
jobs = list(jobs_or_first)
|
|
381
|
+
|
|
382
|
+
if _get_mode() == "inline":
|
|
383
|
+
await self._execute_inline(jobs)
|
|
384
|
+
|
|
385
|
+
return jobs
|
|
386
|
+
|
|
387
|
+
result = await self._query.insert_jobs(jobs)
|
|
388
|
+
|
|
389
|
+
queues = {job.queue for job in result if job.state == "available"}
|
|
390
|
+
|
|
391
|
+
await self._notifier.notify("insert", [{"queue": queue} for queue in queues])
|
|
392
|
+
|
|
393
|
+
return result
|
|
394
|
+
|
|
395
|
+
async def _execute_inline(self, jobs):
|
|
396
|
+
from .testing import process_job
|
|
397
|
+
|
|
398
|
+
for job in jobs:
|
|
399
|
+
result = process_job(job)
|
|
400
|
+
|
|
401
|
+
if asyncio.iscoroutine(result):
|
|
402
|
+
await result
|
|
403
|
+
|
|
404
|
+
async def get_job(self, job_id: int) -> Job | None:
|
|
405
|
+
"""Fetch a job by its ID.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
job_id: The ID of the job to fetch
|
|
409
|
+
|
|
410
|
+
Returns:
|
|
411
|
+
The Job if found, None otherwise
|
|
412
|
+
|
|
413
|
+
Example:
|
|
414
|
+
>>> job = await oban.get_job(123)
|
|
415
|
+
>>> if job:
|
|
416
|
+
... print(f"Job state: {job.state}")
|
|
417
|
+
"""
|
|
418
|
+
return await self._query.get_job(job_id)
|
|
419
|
+
|
|
420
|
+
async def retry_job(self, job: Job | int) -> None:
|
|
421
|
+
"""Retry a job by setting it as available for execution.
|
|
422
|
+
|
|
423
|
+
Jobs currently `available` or `executing` are ignored. The job is scheduled
|
|
424
|
+
for immediate execution, with `max_attempts` increased if already maxed out.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
job: A Job instance or job ID to retry
|
|
428
|
+
|
|
429
|
+
Example:
|
|
430
|
+
Retry a job by ID:
|
|
431
|
+
|
|
432
|
+
>>> await oban.retry_job(123)
|
|
433
|
+
|
|
434
|
+
Retry a job instance:
|
|
435
|
+
|
|
436
|
+
>>> await oban.retry_job(job)
|
|
437
|
+
"""
|
|
438
|
+
job_id = job.id if isinstance(job, Job) else job
|
|
439
|
+
|
|
440
|
+
if not job_id:
|
|
441
|
+
raise ValueError("Cannot retry a job that has not been enqueued")
|
|
442
|
+
|
|
443
|
+
await self.retry_many_jobs([job_id])
|
|
444
|
+
|
|
445
|
+
async def retry_many_jobs(self, jobs: list[Job | int]) -> int:
|
|
446
|
+
"""Retry multiple jobs by setting them as available for execution.
|
|
447
|
+
|
|
448
|
+
Jobs currently `available` or `executing` are ignored. Jobs are scheduled
|
|
449
|
+
for immediate execution, with max_attempts increased if already maxed out.
|
|
450
|
+
|
|
451
|
+
Args:
|
|
452
|
+
jobs: List of Job instances or job IDs to retry
|
|
453
|
+
|
|
454
|
+
Returns:
|
|
455
|
+
The number of jobs updated
|
|
456
|
+
|
|
457
|
+
Example:
|
|
458
|
+
Retry multiple jobs by ID:
|
|
459
|
+
|
|
460
|
+
>>> count = await oban.retry_many_jobs([123, 456, 789])
|
|
461
|
+
|
|
462
|
+
Retry multiple job instances:
|
|
463
|
+
|
|
464
|
+
>>> count = await oban.retry_many_jobs([job_1, job_2, job_3])
|
|
465
|
+
"""
|
|
466
|
+
job_ids = [self._extract_id(job) for job in jobs]
|
|
467
|
+
|
|
468
|
+
return await self._query.retry_many_jobs(job_ids)
|
|
469
|
+
|
|
470
|
+
async def delete_job(self, job: Job | int) -> None:
|
|
471
|
+
"""Delete a job from the database.
|
|
472
|
+
|
|
473
|
+
Jobs in the `executing` state cannot be deleted and are ignored.
|
|
474
|
+
|
|
475
|
+
Args:
|
|
476
|
+
job: A Job instance or job ID to delete
|
|
477
|
+
|
|
478
|
+
Example:
|
|
479
|
+
Delete a job by ID:
|
|
480
|
+
|
|
481
|
+
>>> await oban.delete_job(123)
|
|
482
|
+
|
|
483
|
+
Delete a job instance:
|
|
484
|
+
|
|
485
|
+
>>> await oban.delete_job(job)
|
|
486
|
+
"""
|
|
487
|
+
job_id = job.id if isinstance(job, Job) else job
|
|
488
|
+
|
|
489
|
+
if not job_id:
|
|
490
|
+
raise ValueError("Cannot delete a job that has not been enqueued")
|
|
491
|
+
|
|
492
|
+
await self.delete_many_jobs([job_id])
|
|
493
|
+
|
|
494
|
+
async def delete_many_jobs(self, jobs: list[Job | int]) -> int:
|
|
495
|
+
"""Delete multiple jobs from the database.
|
|
496
|
+
|
|
497
|
+
Jobs in the `executing` state cannot be deleted and are ignored.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
jobs: List of Job instances or job IDs to delete
|
|
501
|
+
|
|
502
|
+
Returns:
|
|
503
|
+
The number of jobs deleted
|
|
504
|
+
|
|
505
|
+
Example:
|
|
506
|
+
Delete multiple jobs by ID:
|
|
507
|
+
|
|
508
|
+
>>> count = await oban.delete_many_jobs([123, 456, 789])
|
|
509
|
+
|
|
510
|
+
Delete multiple job instances:
|
|
511
|
+
|
|
512
|
+
>>> count = await oban.delete_many_jobs([job_1, job_2, job_3])
|
|
513
|
+
"""
|
|
514
|
+
job_ids = [self._extract_id(job) for job in jobs]
|
|
515
|
+
|
|
516
|
+
return await self._query.delete_many_jobs(job_ids)
|
|
517
|
+
|
|
518
|
+
async def cancel_job(self, job: Job | int) -> None:
|
|
519
|
+
"""Cancel a job to prevent it from running.
|
|
520
|
+
|
|
521
|
+
Jobs are marked as `cancelled`. Only jobs with the statuses `executing`,
|
|
522
|
+
`available`, `scheduled`, or `retryable` can be cancelled.
|
|
523
|
+
|
|
524
|
+
For `executing` jobs, the database state is updated immediately, but the
|
|
525
|
+
running task is not forcefully terminated. Workers should check for cancellation
|
|
526
|
+
at safe points and stop gracefully by calling `job.cancelled()`:
|
|
527
|
+
|
|
528
|
+
>>> async def process(self, job):
|
|
529
|
+
... for item in dataset:
|
|
530
|
+
... if job.cancelled():
|
|
531
|
+
... return Cancel("Job was cancelled")
|
|
532
|
+
... await process_item(item)
|
|
533
|
+
|
|
534
|
+
Args:
|
|
535
|
+
job: A Job instance or job ID to cancel
|
|
536
|
+
|
|
537
|
+
Example:
|
|
538
|
+
Cancel a job by ID:
|
|
539
|
+
|
|
540
|
+
>>> await oban.cancel_job(123)
|
|
541
|
+
|
|
542
|
+
Cancel a job instance:
|
|
543
|
+
|
|
544
|
+
>>> await oban.cancel_job(job)
|
|
545
|
+
"""
|
|
546
|
+
job_id = job.id if isinstance(job, Job) else job
|
|
547
|
+
|
|
548
|
+
if not job_id:
|
|
549
|
+
raise ValueError("Cannot cancel a job that has not been enqueued")
|
|
550
|
+
|
|
551
|
+
await self.cancel_many_jobs([job_id])
|
|
552
|
+
|
|
553
|
+
async def cancel_many_jobs(self, jobs: list[Job | int]) -> int:
|
|
554
|
+
"""Cancel multiple jobs to prevent them from running.
|
|
555
|
+
|
|
556
|
+
Jobs are marked as `cancelled`. Only jobs with the statuses `executing`,
|
|
557
|
+
`available`, `scheduled`, or `retryable` can be cancelled.
|
|
558
|
+
|
|
559
|
+
For `executing` jobs, the database state is updated immediately, but
|
|
560
|
+
running tasks are not forcefully terminated. Workers should check for cancellation
|
|
561
|
+
at safe points and stop gracefully by calling `job.cancelled()`:
|
|
562
|
+
|
|
563
|
+
>>> async def process(self, job):
|
|
564
|
+
... for item in dataset:
|
|
565
|
+
... if job.cancelled():
|
|
566
|
+
... return Cancel("Job was cancelled")
|
|
567
|
+
... await process_item(item)
|
|
568
|
+
|
|
569
|
+
Args:
|
|
570
|
+
jobs: List of Job instances or job IDs to cancel
|
|
571
|
+
|
|
572
|
+
Returns:
|
|
573
|
+
The number of jobs cancelled
|
|
574
|
+
|
|
575
|
+
Example:
|
|
576
|
+
Cancel multiple jobs by ID:
|
|
577
|
+
|
|
578
|
+
>>> count = await oban.cancel_many_jobs([123, 456, 789])
|
|
579
|
+
|
|
580
|
+
Cancel multiple job instances:
|
|
581
|
+
|
|
582
|
+
>>> count = await oban.cancel_many_jobs([job_1, job_2, job_3])
|
|
583
|
+
"""
|
|
584
|
+
job_ids = [self._extract_id(job) for job in jobs]
|
|
585
|
+
|
|
586
|
+
count, executing_ids = await self._query.cancel_many_jobs(job_ids)
|
|
587
|
+
|
|
588
|
+
if executing_ids:
|
|
589
|
+
payloads = [{"action": "pkill", "job_id": id} for id in executing_ids]
|
|
590
|
+
|
|
591
|
+
await self._notifier.notify("signal", payloads)
|
|
592
|
+
|
|
593
|
+
return count
|
|
594
|
+
|
|
595
|
+
async def update_job(
|
|
596
|
+
self, job: Job | int, changes: dict[str, Any] | Callable[[Job], dict[str, Any]]
|
|
597
|
+
) -> Job:
|
|
598
|
+
"""Update a job with the given changes.
|
|
599
|
+
|
|
600
|
+
This function accepts either a job instance or id, along with either a dict of
|
|
601
|
+
changes or a callable that receives the job and returns a dict of changes.
|
|
602
|
+
|
|
603
|
+
The update operation is wrapped in a transaction with a locking clause to
|
|
604
|
+
prevent concurrent modifications.
|
|
605
|
+
|
|
606
|
+
Fields and Validations:
|
|
607
|
+
All changes are validated using the same validations as `Job.new()`. Only the
|
|
608
|
+
following subset of fields can be updated:
|
|
609
|
+
|
|
610
|
+
- args
|
|
611
|
+
- max_attempts
|
|
612
|
+
- meta
|
|
613
|
+
- priority
|
|
614
|
+
- queue
|
|
615
|
+
- scheduled_at
|
|
616
|
+
- tags
|
|
617
|
+
- worker
|
|
618
|
+
|
|
619
|
+
Warning:
|
|
620
|
+
Use caution when updating jobs that are currently executing. Modifying fields
|
|
621
|
+
like args, queue, or worker while a job is running may lead to unexpected
|
|
622
|
+
behavior or inconsistent state. Consider whether the job should be cancelled
|
|
623
|
+
first, or if the update should be deferred until after execution completes.
|
|
624
|
+
|
|
625
|
+
Args:
|
|
626
|
+
job: A Job instance or job ID to update
|
|
627
|
+
changes: Either a dict of field changes, or a callable that takes the job
|
|
628
|
+
and returns a dict of changes
|
|
629
|
+
|
|
630
|
+
Returns:
|
|
631
|
+
The updated job with all current field values
|
|
632
|
+
|
|
633
|
+
Example:
|
|
634
|
+
Update a job with a dict of changes:
|
|
635
|
+
|
|
636
|
+
>>> await oban.update_job(job, {"tags": ["urgent"], "priority": 0})
|
|
637
|
+
|
|
638
|
+
Update a job by id:
|
|
639
|
+
|
|
640
|
+
>>> await oban.update_job(123, {"tags": ["processed"], "meta": {"batch_id": 456}})
|
|
641
|
+
|
|
642
|
+
Update a job using a callable:
|
|
643
|
+
|
|
644
|
+
>>> await oban.update_job(job, lambda j: {"tags": ["retry"] + j.tags})
|
|
645
|
+
|
|
646
|
+
>>> from datetime import datetime
|
|
647
|
+
>>> await oban.update_job(job, lambda j: {
|
|
648
|
+
... "meta": {**j.meta, "processed_at": datetime.now()}
|
|
649
|
+
... })
|
|
650
|
+
|
|
651
|
+
Use schedule_in for convenient scheduling:
|
|
652
|
+
|
|
653
|
+
>>> await oban.update_job(job, {"schedule_in": 300}) # 5 minutes from now
|
|
654
|
+
"""
|
|
655
|
+
result = await self.update_many_jobs([job], changes)
|
|
656
|
+
|
|
657
|
+
return result[0]
|
|
658
|
+
|
|
659
|
+
async def update_many_jobs(
|
|
660
|
+
self,
|
|
661
|
+
jobs: list[Job | int],
|
|
662
|
+
changes: dict[str, Any] | Callable[[Job], dict[str, Any]],
|
|
663
|
+
) -> list[Job]:
|
|
664
|
+
"""Update multiple jobs with the given changes.
|
|
665
|
+
|
|
666
|
+
This function accepts a list of job instances or ids, along with either a dict of
|
|
667
|
+
changes or a callable that receives each job and returns a dict of changes.
|
|
668
|
+
|
|
669
|
+
The update operation is wrapped in a transaction with a locking clause to
|
|
670
|
+
prevent concurrent modifications.
|
|
671
|
+
|
|
672
|
+
Fields and Validations:
|
|
673
|
+
All changes are validated using the same validations as Job.new(). Only the
|
|
674
|
+
following subset of fields can be updated:
|
|
675
|
+
|
|
676
|
+
- args
|
|
677
|
+
- max_attempts
|
|
678
|
+
- meta
|
|
679
|
+
- priority
|
|
680
|
+
- queue
|
|
681
|
+
- scheduled_at
|
|
682
|
+
- tags
|
|
683
|
+
- worker
|
|
684
|
+
|
|
685
|
+
Warning:
|
|
686
|
+
Use caution when updating jobs that are currently executing. Modifying fields
|
|
687
|
+
like args, queue, or worker while a job is running may lead to unexpected
|
|
688
|
+
behavior or inconsistent state. Consider whether the job should be cancelled
|
|
689
|
+
first, or if the update should be deferred until after execution completes.
|
|
690
|
+
|
|
691
|
+
Args:
|
|
692
|
+
jobs: List of Job instances or job IDs to update
|
|
693
|
+
changes: Either a dict of field changes, or a callable that takes a job
|
|
694
|
+
and returns a dict of changes
|
|
695
|
+
|
|
696
|
+
Returns:
|
|
697
|
+
The updated jobs with all current field values
|
|
698
|
+
|
|
699
|
+
Example:
|
|
700
|
+
Update multiple jobs with a dict of changes:
|
|
701
|
+
|
|
702
|
+
>>> await oban.update_many_jobs([job1, job2], {"priority": 0})
|
|
703
|
+
|
|
704
|
+
Update multiple jobs by ID:
|
|
705
|
+
|
|
706
|
+
>>> await oban.update_many_jobs([123, 456], {"tags": ["processed"]})
|
|
707
|
+
|
|
708
|
+
Update multiple jobs using a callable:
|
|
709
|
+
|
|
710
|
+
>>> await oban.update_many_jobs(
|
|
711
|
+
... [job1, job2],
|
|
712
|
+
... lambda job: {"tags": ["retry"] + job.tags}
|
|
713
|
+
... )
|
|
714
|
+
"""
|
|
715
|
+
instances = []
|
|
716
|
+
|
|
717
|
+
for job in jobs:
|
|
718
|
+
if isinstance(job, Job):
|
|
719
|
+
instances.append(job)
|
|
720
|
+
else:
|
|
721
|
+
fetched = await self._query.get_job(job)
|
|
722
|
+
|
|
723
|
+
if fetched is None:
|
|
724
|
+
raise ValueError(f"Job with id {job} not found")
|
|
725
|
+
|
|
726
|
+
instances.append(fetched)
|
|
727
|
+
|
|
728
|
+
# This isn't optimized for small updates, as we're re-sending values that haven't changed
|
|
729
|
+
# and we're also not combining fetching and updating in a single transaction. This works
|
|
730
|
+
# well enough for an infrequently used method.
|
|
731
|
+
updated = [
|
|
732
|
+
job.update(changes.copy() if isinstance(changes, dict) else changes(job))
|
|
733
|
+
for job in instances
|
|
734
|
+
]
|
|
735
|
+
|
|
736
|
+
return await self._query.update_many_jobs(updated)
|
|
737
|
+
|
|
738
|
+
async def pause_queue(self, queue: str, *, node: str | None = None) -> None:
|
|
739
|
+
"""Pause a queue, preventing it from executing new jobs.
|
|
740
|
+
|
|
741
|
+
All running jobs will remain running until they are finished.
|
|
742
|
+
|
|
743
|
+
Args:
|
|
744
|
+
queue: The name of the queue to pause
|
|
745
|
+
node: Specific node name to pause. If not provided, pauses across all nodes.
|
|
746
|
+
|
|
747
|
+
Example:
|
|
748
|
+
Pause the default queue across all nodes:
|
|
749
|
+
|
|
750
|
+
>>> await oban.pause_queue("default")
|
|
751
|
+
|
|
752
|
+
Pause the default queue only on a particular node:
|
|
753
|
+
|
|
754
|
+
>>> await oban.pause_queue("default", node="worker.1")
|
|
755
|
+
"""
|
|
756
|
+
if not node or node == self._node:
|
|
757
|
+
producer = self._producers.get(queue)
|
|
758
|
+
|
|
759
|
+
if producer:
|
|
760
|
+
await producer.pause()
|
|
761
|
+
|
|
762
|
+
if node != self._node:
|
|
763
|
+
ident = self._scope_signal(node)
|
|
764
|
+
|
|
765
|
+
await self._notifier.notify(
|
|
766
|
+
"signal", {"action": "pause", "queue": queue, "ident": ident}
|
|
767
|
+
)
|
|
768
|
+
|
|
769
|
+
async def resume_queue(self, queue: str, *, node: str | None = None) -> None:
|
|
770
|
+
"""Resume a paused queue, allowing it to execute jobs again.
|
|
771
|
+
|
|
772
|
+
Args:
|
|
773
|
+
queue: The name of the queue to resume
|
|
774
|
+
node: Specific node name to resume. If not provided, resumes across all nodes.
|
|
775
|
+
|
|
776
|
+
Example:
|
|
777
|
+
Resume the default queue across all nodes:
|
|
778
|
+
|
|
779
|
+
>>> await oban.resume_queue("default")
|
|
780
|
+
|
|
781
|
+
Resume the default queue only on a particular node:
|
|
782
|
+
|
|
783
|
+
>>> await oban.resume_queue("default", node="worker.1")
|
|
784
|
+
"""
|
|
785
|
+
if not node or node == self._node:
|
|
786
|
+
producer = self._producers.get(queue)
|
|
787
|
+
|
|
788
|
+
if producer:
|
|
789
|
+
await producer.resume()
|
|
790
|
+
|
|
791
|
+
if node != self._node:
|
|
792
|
+
ident = self._scope_signal(node)
|
|
793
|
+
|
|
794
|
+
await self._notifier.notify(
|
|
795
|
+
"signal", {"action": "resume", "queue": queue, "ident": ident}
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
async def pause_all_queues(self, *, node: str | None = None) -> None:
|
|
799
|
+
"""Pause all queues, preventing them from executing new jobs.
|
|
800
|
+
|
|
801
|
+
All running jobs will remain running until they are finished.
|
|
802
|
+
|
|
803
|
+
Args:
|
|
804
|
+
node: Specific node name to pause. If not provided, pauses across all nodes.
|
|
805
|
+
|
|
806
|
+
Example:
|
|
807
|
+
Pause all queues across all nodes:
|
|
808
|
+
|
|
809
|
+
>>> await oban.pause_all_queues()
|
|
810
|
+
|
|
811
|
+
Pause all queues only on a particular node:
|
|
812
|
+
|
|
813
|
+
>>> await oban.pause_all_queues(node="worker.1")
|
|
814
|
+
"""
|
|
815
|
+
if not node or node == self._node:
|
|
816
|
+
for producer in self._producers.values():
|
|
817
|
+
await producer.pause()
|
|
818
|
+
|
|
819
|
+
if node != self._node:
|
|
820
|
+
ident = self._scope_signal(node)
|
|
821
|
+
|
|
822
|
+
await self._notifier.notify(
|
|
823
|
+
"signal", {"action": "pause", "queue": "*", "ident": ident}
|
|
824
|
+
)
|
|
825
|
+
|
|
826
|
+
async def resume_all_queues(self, *, node: str | None = None) -> None:
|
|
827
|
+
"""Resume all paused queues, allowing them to execute jobs again.
|
|
828
|
+
|
|
829
|
+
Args:
|
|
830
|
+
node: Specific node name to resume. If not provided, resumes across all nodes.
|
|
831
|
+
|
|
832
|
+
Example:
|
|
833
|
+
Resume all queues across all nodes:
|
|
834
|
+
|
|
835
|
+
>>> await oban.resume_all_queues()
|
|
836
|
+
|
|
837
|
+
Resume all queues only on a particular node:
|
|
838
|
+
|
|
839
|
+
>>> await oban.resume_all_queues(node="worker.1")
|
|
840
|
+
"""
|
|
841
|
+
if not node or node == self._node:
|
|
842
|
+
for producer in self._producers.values():
|
|
843
|
+
await producer.resume()
|
|
844
|
+
|
|
845
|
+
if node != self._node:
|
|
846
|
+
ident = self._scope_signal(node)
|
|
847
|
+
|
|
848
|
+
await self._notifier.notify(
|
|
849
|
+
"signal", {"action": "resume", "queue": "*", "ident": ident}
|
|
850
|
+
)
|
|
851
|
+
|
|
852
|
+
def check_queue(self, queue: str) -> QueueInfo | None:
|
|
853
|
+
"""Check the current state of a queue.
|
|
854
|
+
|
|
855
|
+
This allows you to introspect on a queue's health by retrieving key attributes
|
|
856
|
+
of the producer's state, such as the current limit, running job IDs, and when
|
|
857
|
+
the producer was started.
|
|
858
|
+
|
|
859
|
+
Args:
|
|
860
|
+
queue: The name of the queue to check
|
|
861
|
+
|
|
862
|
+
Returns:
|
|
863
|
+
A QueueInfo instance with the producer's state, or None if the queue
|
|
864
|
+
isn't running on this node.
|
|
865
|
+
|
|
866
|
+
Example:
|
|
867
|
+
Get details about the default queue:
|
|
868
|
+
|
|
869
|
+
>>> state = oban.check_queue("default")
|
|
870
|
+
... print(f"Queue {state.queue} has {len(state.running)} jobs running")
|
|
871
|
+
|
|
872
|
+
Attempt to check a queue that isn't running locally:
|
|
873
|
+
|
|
874
|
+
>>> state = oban.check_queue("not_running")
|
|
875
|
+
>>> print(state) # None
|
|
876
|
+
"""
|
|
877
|
+
producer = self._producers.get(queue)
|
|
878
|
+
|
|
879
|
+
if producer:
|
|
880
|
+
return producer.check()
|
|
881
|
+
|
|
882
|
+
def check_all_queues(self) -> list[QueueInfo]:
|
|
883
|
+
"""Check the current state of all queues running on this node.
|
|
884
|
+
|
|
885
|
+
Returns:
|
|
886
|
+
A list of QueueInfo instances, one for each queue running locally.
|
|
887
|
+
Returns an empty list if no queues are running on this node.
|
|
888
|
+
|
|
889
|
+
Example:
|
|
890
|
+
Get details about all local queues:
|
|
891
|
+
|
|
892
|
+
>>> states = oban.check_all_queues()
|
|
893
|
+
>>> for state in states:
|
|
894
|
+
... print(f"{state.queue}: {len(state.running)} running, paused={state.paused}")
|
|
895
|
+
"""
|
|
896
|
+
return [producer.check() for producer in self._producers.values()]
|
|
897
|
+
|
|
898
|
+
async def start_queue(
|
|
899
|
+
self, *, queue: str, limit: int, paused: bool = False, node: str | None = None
|
|
900
|
+
) -> None:
|
|
901
|
+
"""Start a new supervised queue.
|
|
902
|
+
|
|
903
|
+
By default, this starts a new supervised queue across all nodes running Oban on the
|
|
904
|
+
same database and prefix.
|
|
905
|
+
|
|
906
|
+
Args:
|
|
907
|
+
queue: The name of the queue to start
|
|
908
|
+
limit: The concurrency limit for the queue
|
|
909
|
+
paused: Whether the queue starts in a paused state (default: False)
|
|
910
|
+
node: Specific node name to start the queue on. If not provided, starts across all nodes.
|
|
911
|
+
|
|
912
|
+
Example:
|
|
913
|
+
Start the priority queue with a concurrency limit of 10 across all nodes:
|
|
914
|
+
|
|
915
|
+
>>> await oban.start_queue(queue="priority", limit=10)
|
|
916
|
+
|
|
917
|
+
Start the media queue on a particular node:
|
|
918
|
+
|
|
919
|
+
>>> await oban.start_queue(queue="media", limit=5, node="worker.1")
|
|
920
|
+
|
|
921
|
+
Start the media queue in a paused state:
|
|
922
|
+
|
|
923
|
+
>>> await oban.start_queue(queue="media", limit=5, paused=True)
|
|
924
|
+
"""
|
|
925
|
+
if not node or node == self._node:
|
|
926
|
+
await self._start_queue_local(queue=queue, limit=limit, paused=paused)
|
|
927
|
+
else:
|
|
928
|
+
ident = self._scope_signal(node)
|
|
929
|
+
|
|
930
|
+
await self._notifier.notify(
|
|
931
|
+
"signal",
|
|
932
|
+
{
|
|
933
|
+
"action": "start",
|
|
934
|
+
"queue": queue,
|
|
935
|
+
"limit": limit,
|
|
936
|
+
"paused": paused,
|
|
937
|
+
"ident": ident,
|
|
938
|
+
},
|
|
939
|
+
)
|
|
940
|
+
|
|
941
|
+
async def _start_queue_local(self, **params) -> None:
|
|
942
|
+
queue = params["queue"]
|
|
943
|
+
|
|
944
|
+
if queue in self._producers:
|
|
945
|
+
return
|
|
946
|
+
|
|
947
|
+
producer = Producer(
|
|
948
|
+
dispatcher=self._dispatcher,
|
|
949
|
+
query=self._query,
|
|
950
|
+
name=self._name,
|
|
951
|
+
node=self._node,
|
|
952
|
+
notifier=self._notifier,
|
|
953
|
+
**params,
|
|
954
|
+
)
|
|
955
|
+
|
|
956
|
+
self._producers[queue] = producer
|
|
957
|
+
|
|
958
|
+
await producer.start()
|
|
959
|
+
|
|
960
|
+
async def stop_queue(self, queue: str, *, node: str | None = None) -> None:
|
|
961
|
+
"""Stop a supervised queue.
|
|
962
|
+
|
|
963
|
+
By default, this stops the queue across all connected nodes.
|
|
964
|
+
|
|
965
|
+
Args:
|
|
966
|
+
queue: The name of the queue to stop
|
|
967
|
+
node: Specific node name to stop the queue on. If not provided, stops across all nodes.
|
|
968
|
+
|
|
969
|
+
Example:
|
|
970
|
+
Stop the priority queue across all nodes:
|
|
971
|
+
|
|
972
|
+
>>> await oban.stop_queue("priority")
|
|
973
|
+
|
|
974
|
+
Stop the media queue on a particular node:
|
|
975
|
+
|
|
976
|
+
>>> await oban.stop_queue("media", node="worker.1")
|
|
977
|
+
"""
|
|
978
|
+
if not node or node == self._node:
|
|
979
|
+
await self._stop_queue_local(queue)
|
|
980
|
+
else:
|
|
981
|
+
ident = self._scope_signal(node)
|
|
982
|
+
|
|
983
|
+
await self._notifier.notify(
|
|
984
|
+
"signal", {"action": "stop", "queue": queue, "ident": ident}
|
|
985
|
+
)
|
|
986
|
+
|
|
987
|
+
async def _stop_queue_local(self, queue: str) -> None:
|
|
988
|
+
producer = self._producers.pop(queue, None)
|
|
989
|
+
|
|
990
|
+
if producer:
|
|
991
|
+
await producer.stop()
|
|
992
|
+
|
|
993
|
+
async def scale_queue(
|
|
994
|
+
self, *, queue: str, node: str | None = None, **kwargs: Any
|
|
995
|
+
) -> None:
|
|
996
|
+
"""Scale the concurrency for a queue.
|
|
997
|
+
|
|
998
|
+
By default, this scales the queue across all connected nodes.
|
|
999
|
+
|
|
1000
|
+
Args:
|
|
1001
|
+
queue: The name of the queue to scale
|
|
1002
|
+
node: Specific node name to scale the queue on. If not provided, scales across all nodes.
|
|
1003
|
+
**kwargs: Options passed through transparently, including:
|
|
1004
|
+
limit: The new concurrency limit
|
|
1005
|
+
|
|
1006
|
+
Example:
|
|
1007
|
+
Scale a queue up, triggering immediate execution of queued jobs:
|
|
1008
|
+
|
|
1009
|
+
>>> await oban.scale_queue(queue="default", limit=50)
|
|
1010
|
+
|
|
1011
|
+
Scale the queue back down, allowing executing jobs to finish:
|
|
1012
|
+
|
|
1013
|
+
>>> await oban.scale_queue(queue="default", limit=5)
|
|
1014
|
+
|
|
1015
|
+
Scale the queue on a particular node:
|
|
1016
|
+
|
|
1017
|
+
>>> await oban.scale_queue(queue="default", limit=10, node="worker.1")
|
|
1018
|
+
"""
|
|
1019
|
+
if not node or node == self._node:
|
|
1020
|
+
await self._scale_queue_local(queue, **kwargs)
|
|
1021
|
+
|
|
1022
|
+
if node != self._node:
|
|
1023
|
+
ident = self._scope_signal(node)
|
|
1024
|
+
|
|
1025
|
+
await self._notifier.notify(
|
|
1026
|
+
"signal",
|
|
1027
|
+
{"action": "scale", "queue": queue, "ident": ident, **kwargs},
|
|
1028
|
+
)
|
|
1029
|
+
|
|
1030
|
+
@staticmethod
|
|
1031
|
+
def _extract_id(job: Job | int) -> int:
|
|
1032
|
+
match job:
|
|
1033
|
+
case Job(id=int(id)):
|
|
1034
|
+
return id
|
|
1035
|
+
case int(id):
|
|
1036
|
+
return id
|
|
1037
|
+
case _:
|
|
1038
|
+
raise ValueError("Cannot retry a job that has not been enqueued")
|
|
1039
|
+
|
|
1040
|
+
async def _scale_queue_local(self, queue: str, **kwargs: Any) -> None:
|
|
1041
|
+
producer = self._producers.get(queue)
|
|
1042
|
+
|
|
1043
|
+
if producer:
|
|
1044
|
+
await producer.scale(**kwargs)
|
|
1045
|
+
|
|
1046
|
+
def _scope_signal(self, node: str | None) -> str:
|
|
1047
|
+
if node is not None:
|
|
1048
|
+
return f"{self._name}.{node}"
|
|
1049
|
+
else:
|
|
1050
|
+
return "any"
|
|
1051
|
+
|
|
1052
|
+
async def _on_signal(self, _channel: str, payload: dict) -> None:
|
|
1053
|
+
ident = payload.pop("ident")
|
|
1054
|
+
|
|
1055
|
+
if ident != "any" and ident != f"{self._name}.{self._node}":
|
|
1056
|
+
return
|
|
1057
|
+
|
|
1058
|
+
match payload.pop("action"):
|
|
1059
|
+
case "start":
|
|
1060
|
+
await self._start_queue_local(**payload)
|
|
1061
|
+
case "stop":
|
|
1062
|
+
await self._stop_queue_local(**payload)
|
|
1063
|
+
case "scale":
|
|
1064
|
+
await self._scale_queue_local(**payload)
|
|
1065
|
+
|
|
1066
|
+
|
|
1067
|
+
def get_instance(name: str = "oban") -> Oban:
|
|
1068
|
+
"""Get an Oban instance from the registry by name.
|
|
1069
|
+
|
|
1070
|
+
Args:
|
|
1071
|
+
name: Name of the instance to retrieve (default: "oban")
|
|
1072
|
+
|
|
1073
|
+
Returns:
|
|
1074
|
+
The Oban instance
|
|
1075
|
+
|
|
1076
|
+
Raises:
|
|
1077
|
+
RuntimeError: If no instance with the given name exists
|
|
1078
|
+
"""
|
|
1079
|
+
instance = _instances.get(name)
|
|
1080
|
+
|
|
1081
|
+
if instance is None:
|
|
1082
|
+
raise RuntimeError(f"Oban instance '{name}' not found in registry")
|
|
1083
|
+
|
|
1084
|
+
return instance
|