oban 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oban/__init__.py +22 -0
- oban/__main__.py +12 -0
- oban/_backoff.py +87 -0
- oban/_config.py +171 -0
- oban/_executor.py +188 -0
- oban/_extensions.py +16 -0
- oban/_leader.py +118 -0
- oban/_lifeline.py +77 -0
- oban/_notifier.py +324 -0
- oban/_producer.py +334 -0
- oban/_pruner.py +93 -0
- oban/_query.py +409 -0
- oban/_recorded.py +34 -0
- oban/_refresher.py +88 -0
- oban/_scheduler.py +359 -0
- oban/_stager.py +115 -0
- oban/_worker.py +78 -0
- oban/cli.py +436 -0
- oban/decorators.py +218 -0
- oban/job.py +315 -0
- oban/oban.py +1084 -0
- oban/py.typed +0 -0
- oban/queries/__init__.py +0 -0
- oban/queries/ack_job.sql +11 -0
- oban/queries/all_jobs.sql +25 -0
- oban/queries/cancel_many_jobs.sql +37 -0
- oban/queries/cleanup_expired_leaders.sql +4 -0
- oban/queries/cleanup_expired_producers.sql +2 -0
- oban/queries/delete_many_jobs.sql +5 -0
- oban/queries/delete_producer.sql +2 -0
- oban/queries/elect_leader.sql +10 -0
- oban/queries/fetch_jobs.sql +44 -0
- oban/queries/get_job.sql +23 -0
- oban/queries/insert_job.sql +28 -0
- oban/queries/insert_producer.sql +2 -0
- oban/queries/install.sql +113 -0
- oban/queries/prune_jobs.sql +18 -0
- oban/queries/reelect_leader.sql +12 -0
- oban/queries/refresh_producers.sql +3 -0
- oban/queries/rescue_jobs.sql +18 -0
- oban/queries/reset.sql +5 -0
- oban/queries/resign_leader.sql +4 -0
- oban/queries/retry_many_jobs.sql +13 -0
- oban/queries/stage_jobs.sql +34 -0
- oban/queries/uninstall.sql +4 -0
- oban/queries/update_job.sql +54 -0
- oban/queries/update_producer.sql +3 -0
- oban/queries/verify_structure.sql +9 -0
- oban/schema.py +115 -0
- oban/telemetry/__init__.py +10 -0
- oban/telemetry/core.py +170 -0
- oban/telemetry/logger.py +147 -0
- oban/testing.py +439 -0
- oban-0.5.0.dist-info/METADATA +290 -0
- oban-0.5.0.dist-info/RECORD +59 -0
- oban-0.5.0.dist-info/WHEEL +5 -0
- oban-0.5.0.dist-info/entry_points.txt +2 -0
- oban-0.5.0.dist-info/licenses/LICENSE.txt +201 -0
- oban-0.5.0.dist-info/top_level.txt +1 -0
oban/testing.py
ADDED
|
@@ -0,0 +1,439 @@
|
|
|
1
|
+
"""Testing helpers for Oban workers and queues.
|
|
2
|
+
|
|
3
|
+
This module provides utilities for unit testing workers without database interaction.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
|
|
11
|
+
from contextlib import contextmanager
|
|
12
|
+
from contextvars import ContextVar
|
|
13
|
+
from datetime import datetime, timedelta, timezone
|
|
14
|
+
from typing import TYPE_CHECKING
|
|
15
|
+
|
|
16
|
+
from ._executor import Executor
|
|
17
|
+
from .job import Job
|
|
18
|
+
from .oban import get_instance
|
|
19
|
+
from ._worker import worker_name
|
|
20
|
+
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from .oban import Oban
|
|
23
|
+
|
|
24
|
+
_testing_mode = ContextVar[str | None]("oban_testing_mode", default=None)
|
|
25
|
+
|
|
26
|
+
FAR_FUTURE = timedelta(365 * 100)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@contextmanager
|
|
30
|
+
def mode(testing_mode: str):
|
|
31
|
+
"""Temporarily set the testing mode for Oban instances.
|
|
32
|
+
|
|
33
|
+
This context manager allows you to override the testing mode for all Oban
|
|
34
|
+
instances within a specific context. Useful for switching modes in individual
|
|
35
|
+
tests without affecting the entire test suite.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
testing_mode: The mode to set ("inline" or "manual")
|
|
39
|
+
|
|
40
|
+
Yields:
|
|
41
|
+
None
|
|
42
|
+
|
|
43
|
+
Example:
|
|
44
|
+
>>> import oban.testing
|
|
45
|
+
>>>
|
|
46
|
+
>>> oban.testing.set_mode("manual")
|
|
47
|
+
>>>
|
|
48
|
+
>>> def test_inline_execution():
|
|
49
|
+
... with oban.testing.mode("inline"):
|
|
50
|
+
... # Jobs execute immediately in this context
|
|
51
|
+
... await EmailWorker.enqueue({"to": "user@example.com"})
|
|
52
|
+
"""
|
|
53
|
+
token = _testing_mode.set(testing_mode)
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
yield
|
|
57
|
+
finally:
|
|
58
|
+
_testing_mode.reset(token)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _get_mode() -> str | None:
|
|
62
|
+
return _testing_mode.get()
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
async def reset_oban(oban: str | Oban = "oban"):
|
|
66
|
+
"""Reset Oban tables between tests.
|
|
67
|
+
|
|
68
|
+
Truncates all oban related tables with CASCADE and RESTART IDENTITY. Useful
|
|
69
|
+
for cleaning up between tests when using manual testing mode.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
oban: Oban instance name (default: "oban") or Oban instance
|
|
73
|
+
|
|
74
|
+
Example:
|
|
75
|
+
>>> from oban.testing import reset_oban
|
|
76
|
+
>>> import pytest
|
|
77
|
+
>>>
|
|
78
|
+
>>> # In your conftest.py
|
|
79
|
+
>>> @pytest.fixture(autouse=True)
|
|
80
|
+
>>> async def _reset_oban_after_test():
|
|
81
|
+
... yield
|
|
82
|
+
... await reset_oban()
|
|
83
|
+
>>>
|
|
84
|
+
>>> # Or call directly in tests
|
|
85
|
+
>>> async def test_something(oban):
|
|
86
|
+
... await oban.enqueue(SomeWorker.new({}))
|
|
87
|
+
... # ... test assertions ...
|
|
88
|
+
... await reset_oban()
|
|
89
|
+
"""
|
|
90
|
+
if isinstance(oban, str):
|
|
91
|
+
oban = get_instance(oban)
|
|
92
|
+
|
|
93
|
+
await oban._query.reset()
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
async def all_enqueued(*, oban: str | Oban = "oban", **filters) -> list[Job]:
|
|
97
|
+
"""Retrieve all currently enqueued jobs matching a set of filters.
|
|
98
|
+
|
|
99
|
+
Only jobs matching all of the provided filters will be returned. Additionally,
|
|
100
|
+
jobs are returned in descending order where the most recently enqueued job will be
|
|
101
|
+
listed first.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
oban: Oban instance name (default: "oban") or Oban instance
|
|
105
|
+
**filters: Job fields to match (e.g., worker=EmailWorker, args={"to": "..."},
|
|
106
|
+
queue="mailers", priority=5). Args supports partial matching.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
List of Job instances matching the filters, in descending order by ID
|
|
110
|
+
|
|
111
|
+
Example:
|
|
112
|
+
>>> from oban.testing import all_enqueued
|
|
113
|
+
>>> from app.workers import EmailWorker
|
|
114
|
+
>>>
|
|
115
|
+
>>> # Assert based on only some of a job's args
|
|
116
|
+
>>> jobs = await all_enqueued(worker=EmailWorker)
|
|
117
|
+
>>> assert len(jobs) == 1
|
|
118
|
+
>>> assert jobs[0].args["id"] == 1
|
|
119
|
+
>>>
|
|
120
|
+
>>> # Assert that exactly one job was inserted for a queue
|
|
121
|
+
>>> jobs = await all_enqueued(queue="alpha")
|
|
122
|
+
>>> assert len(jobs) == 1
|
|
123
|
+
>>>
|
|
124
|
+
>>> # Assert that there aren't any jobs enqueued
|
|
125
|
+
>>> assert await all_enqueued() == []
|
|
126
|
+
"""
|
|
127
|
+
if isinstance(oban, str):
|
|
128
|
+
oban = get_instance(oban)
|
|
129
|
+
|
|
130
|
+
if "worker" in filters and not isinstance(filters["worker"], str):
|
|
131
|
+
filters["worker"] = worker_name(filters["worker"])
|
|
132
|
+
|
|
133
|
+
jobs = await oban._query.all_jobs(["available", "scheduled"])
|
|
134
|
+
|
|
135
|
+
return [job for job in jobs if _match_filters(job, filters)]
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _match_filters(job: Job, filters: dict) -> bool:
|
|
139
|
+
for key, value in filters.items():
|
|
140
|
+
if key == "args":
|
|
141
|
+
if not _args_match(value, job.args):
|
|
142
|
+
return False
|
|
143
|
+
elif getattr(job, key, None) != value:
|
|
144
|
+
return False
|
|
145
|
+
|
|
146
|
+
return True
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def _args_match(expected: dict, actual: dict) -> bool:
|
|
150
|
+
for key, value in expected.items():
|
|
151
|
+
if key not in actual or actual[key] != value:
|
|
152
|
+
return False
|
|
153
|
+
|
|
154
|
+
return True
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
async def assert_enqueued(*, oban: str | Oban = "oban", timeout: float = 0, **filters):
|
|
158
|
+
"""Assert that a job matching the given criteria was enqueued.
|
|
159
|
+
|
|
160
|
+
This helper queries the database for jobs in 'available' or 'scheduled' state
|
|
161
|
+
that match the provided filters. With a timeout, it will poll repeatedly until
|
|
162
|
+
a matching job is found or the timeout expires.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
oban: Oban instance name (default: "oban") or Oban instance
|
|
166
|
+
timeout: Maximum time to wait for a matching job (in seconds). Default: 0 (no wait)
|
|
167
|
+
**filters: Job fields to match (e.g., worker=EmailWorker, args={"to": "..."},
|
|
168
|
+
queue="mailers", priority=5). Args supports partial matching.
|
|
169
|
+
|
|
170
|
+
Raises:
|
|
171
|
+
AssertionError: If no matching job is found within the timeout
|
|
172
|
+
|
|
173
|
+
Example:
|
|
174
|
+
>>> from oban.testing import assert_enqueued
|
|
175
|
+
>>> from app.workers import EmailWorker
|
|
176
|
+
>>>
|
|
177
|
+
>>> # Assert job was enqueued with specific worker and args
|
|
178
|
+
>>> async def test_signup_sends_email(app):
|
|
179
|
+
... await app.post("/signup", json={"email": "user@example.com"})
|
|
180
|
+
... await assert_enqueued(worker=EmailWorker, args={"to": "user@example.com"})
|
|
181
|
+
>>>
|
|
182
|
+
>>> # Wait up to 0.2 seconds for an async job to be enqueued
|
|
183
|
+
>>> await assert_enqueued(worker=EmailWorker, timeout=0.2)
|
|
184
|
+
>>>
|
|
185
|
+
>>> # Match on queue alone
|
|
186
|
+
>>> await assert_enqueued(queue="mailers")
|
|
187
|
+
>>>
|
|
188
|
+
>>> # Partial args matching
|
|
189
|
+
>>> await assert_enqueued(worker=EmailWorker, args={"to": "user@example.com"})
|
|
190
|
+
>>>
|
|
191
|
+
>>> # Filter by queue and priority
|
|
192
|
+
>>> await assert_enqueued(worker=EmailWorker, queue="mailers", priority=5)
|
|
193
|
+
>>>
|
|
194
|
+
>>> # Use an alternate oban instance
|
|
195
|
+
>>> await assert_enqueued(worker=BatchWorker, oban="batch")
|
|
196
|
+
"""
|
|
197
|
+
|
|
198
|
+
async def has_matching_jobs():
|
|
199
|
+
jobs = await all_enqueued(oban=oban, **filters)
|
|
200
|
+
|
|
201
|
+
return bool(jobs)
|
|
202
|
+
|
|
203
|
+
if not await _poll_until(has_matching_jobs, timeout):
|
|
204
|
+
all_jobs = await all_enqueued(oban=oban)
|
|
205
|
+
formatted = "\n".join(f" {job}" for job in all_jobs)
|
|
206
|
+
timeout_msg = f" within {timeout}s" if timeout > 0 else ""
|
|
207
|
+
|
|
208
|
+
raise AssertionError(
|
|
209
|
+
f"Expected a job matching: {filters} to be enqueued{timeout_msg}. Instead found:\n\n{formatted}"
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
async def refute_enqueued(*, oban: str | Oban = "oban", timeout: float = 0, **filters):
|
|
214
|
+
"""Assert that no job matching the given criteria was enqueued.
|
|
215
|
+
|
|
216
|
+
This helper queries the database for jobs in 'available' or 'scheduled' state
|
|
217
|
+
that match the provided filters and asserts that none are found. With a timeout,
|
|
218
|
+
it will poll repeatedly during the timeout period to ensure no matching job appears.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
oban: Oban instance name (default: "oban") or Oban instance
|
|
222
|
+
timeout: Time to monitor for matching jobs (in seconds). Default: 0 (check once)
|
|
223
|
+
**filters: Job fields to match (e.g., worker=EmailWorker, args={"to": "..."},
|
|
224
|
+
queue="mailers", priority=5). Args supports partial matching.
|
|
225
|
+
|
|
226
|
+
Raises:
|
|
227
|
+
AssertionError: If any matching jobs are found
|
|
228
|
+
|
|
229
|
+
Example:
|
|
230
|
+
>>> from oban.testing import refute_enqueued
|
|
231
|
+
>>> from app.workers import EmailWorker
|
|
232
|
+
>>>
|
|
233
|
+
>>> # Assert no email jobs were enqueued
|
|
234
|
+
>>> async def test_no_email_on_invalid_signup(app):
|
|
235
|
+
... await app.post("/signup", json={"email": "invalid"})
|
|
236
|
+
... await refute_enqueued(worker=EmailWorker)
|
|
237
|
+
>>>
|
|
238
|
+
>>> # Monitor for 0.2 seconds to ensure no async job is enqueued
|
|
239
|
+
>>> await refute_enqueued(worker=EmailWorker, timeout=0.2)
|
|
240
|
+
>>>
|
|
241
|
+
>>> # Refute specific args
|
|
242
|
+
>>> await refute_enqueued(worker=EmailWorker, args={"to": "blocked@example.com"})
|
|
243
|
+
>>>
|
|
244
|
+
>>> # Refute on queue
|
|
245
|
+
>>> await refute_enqueued(queue="mailers")
|
|
246
|
+
"""
|
|
247
|
+
|
|
248
|
+
async def has_matching_jobs():
|
|
249
|
+
jobs = await all_enqueued(oban=oban, **filters)
|
|
250
|
+
|
|
251
|
+
return bool(jobs)
|
|
252
|
+
|
|
253
|
+
if await _poll_until(has_matching_jobs, timeout):
|
|
254
|
+
matching = await all_enqueued(oban=oban, **filters)
|
|
255
|
+
formatted = "\n".join(f" {job}" for job in matching)
|
|
256
|
+
timeout_msg = f" within {timeout}s" if timeout > 0 else ""
|
|
257
|
+
|
|
258
|
+
raise AssertionError(
|
|
259
|
+
f"Expected no jobs matching: {filters} to be enqueued{timeout_msg}. Instead found:\n\n{formatted}"
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
async def drain_queue(
|
|
264
|
+
queue: str = "default",
|
|
265
|
+
oban: str | Oban = "oban",
|
|
266
|
+
with_recursion: bool = True,
|
|
267
|
+
with_safety: bool = False,
|
|
268
|
+
with_scheduled: bool = True,
|
|
269
|
+
) -> dict[str, int]:
|
|
270
|
+
"""Synchronously execute all available jobs in a queue.
|
|
271
|
+
|
|
272
|
+
All execution happens within the current process. Draining a queue from within
|
|
273
|
+
the current process is especially useful for testing, where jobs enqueued by a
|
|
274
|
+
process in sandbox mode are only visible to that process.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
queue: Name of the queue to drain
|
|
278
|
+
oban: Oban instance name (default: "oban") or Oban instance
|
|
279
|
+
with_recursion: Whether to drain jobs recursively, or all in a single pass.
|
|
280
|
+
Either way, jobs are processed sequentially, one at a time.
|
|
281
|
+
Recursion is required when jobs insert other jobs or depend
|
|
282
|
+
on the execution of other jobs. Defaults to True.
|
|
283
|
+
with_scheduled: Whether to include scheduled or retryable jobs when draining.
|
|
284
|
+
In recursive mode, which is the default, this will include snoozed
|
|
285
|
+
jobs, and may lead to an infinite loop if the job snoozes repeatedly.
|
|
286
|
+
Defaults to True.
|
|
287
|
+
with_safety: Whether to silently catch and record errors when draining. When
|
|
288
|
+
False, raised exceptions are immediately propagated to the caller.
|
|
289
|
+
Defaults to False.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Dict with counts for each terminal job state (completed, discarded, cancelled,
|
|
293
|
+
scheduled, retryable)
|
|
294
|
+
|
|
295
|
+
Example:
|
|
296
|
+
>>> from oban.testing import drain_queue
|
|
297
|
+
>>> from oban import worker
|
|
298
|
+
>>>
|
|
299
|
+
>>> # Drain a queue with jobs
|
|
300
|
+
>>> result = await drain_queue(queue="default")
|
|
301
|
+
>>> # {'completed': 2, 'discarded': 1, 'cancelled': 0, ...}
|
|
302
|
+
>>>
|
|
303
|
+
>>> # Drain without scheduled jobs
|
|
304
|
+
>>> await drain_queue(queue="default", with_scheduled=False)
|
|
305
|
+
>>>
|
|
306
|
+
>>> # Drain without safety and assert an error is raised
|
|
307
|
+
>>> import pytest
|
|
308
|
+
>>> with pytest.raises(RuntimeError):
|
|
309
|
+
... await drain_queue(queue="risky", with_safety=False)
|
|
310
|
+
>>>
|
|
311
|
+
>>> # Drain without recursion (jobs that enqueue other jobs)
|
|
312
|
+
>>> await drain_queue(queue="default", with_recursion=False)
|
|
313
|
+
"""
|
|
314
|
+
if isinstance(oban, str):
|
|
315
|
+
oban = get_instance(oban)
|
|
316
|
+
|
|
317
|
+
summary = {
|
|
318
|
+
"cancelled": 0,
|
|
319
|
+
"completed": 0,
|
|
320
|
+
"discarded": 0,
|
|
321
|
+
"retryable": 0,
|
|
322
|
+
"scheduled": 0,
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
while True:
|
|
326
|
+
if with_scheduled:
|
|
327
|
+
before = datetime.now(timezone.utc) + FAR_FUTURE
|
|
328
|
+
await oban._query.stage_jobs(limit=1000, queues=[queue], before=before)
|
|
329
|
+
|
|
330
|
+
match await oban._query.fetch_jobs(
|
|
331
|
+
demand=1, queue=queue, node="drain", uuid="drain"
|
|
332
|
+
):
|
|
333
|
+
case []:
|
|
334
|
+
break
|
|
335
|
+
case [job]:
|
|
336
|
+
executor = await Executor(job=job, safe=with_safety).execute()
|
|
337
|
+
|
|
338
|
+
if executor.action is not None:
|
|
339
|
+
await oban._query.ack_jobs([executor.action])
|
|
340
|
+
|
|
341
|
+
summary[executor.status] += 1
|
|
342
|
+
|
|
343
|
+
if not with_recursion:
|
|
344
|
+
break
|
|
345
|
+
|
|
346
|
+
return summary
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
async def _poll_until(condition, timeout: float, interval: float = 0.01) -> bool:
|
|
350
|
+
if timeout <= 0:
|
|
351
|
+
return await condition()
|
|
352
|
+
|
|
353
|
+
elapsed = 0.0
|
|
354
|
+
|
|
355
|
+
while elapsed < timeout:
|
|
356
|
+
elapsed += interval
|
|
357
|
+
|
|
358
|
+
if await condition():
|
|
359
|
+
return True
|
|
360
|
+
|
|
361
|
+
await asyncio.sleep(interval)
|
|
362
|
+
|
|
363
|
+
return await condition()
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
def process_job(job: Job):
|
|
367
|
+
"""Execute a worker's process method with the given job.
|
|
368
|
+
|
|
369
|
+
This helper is designed for unit testing workers in isolation without
|
|
370
|
+
requiring database interaction.
|
|
371
|
+
|
|
372
|
+
Args:
|
|
373
|
+
job: A Job instance to process
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
The result from the worker's process method (any value is accepted)
|
|
377
|
+
|
|
378
|
+
Raises:
|
|
379
|
+
Any exception raised by the worker if it fails
|
|
380
|
+
|
|
381
|
+
Example:
|
|
382
|
+
>>> from oban import worker
|
|
383
|
+
>>> from oban.testing import process_job
|
|
384
|
+
>>>
|
|
385
|
+
>>> @worker()
|
|
386
|
+
... class EmailWorker:
|
|
387
|
+
... async def process(self, job):
|
|
388
|
+
... return {"sent": True, "to": job.args["to"]}
|
|
389
|
+
>>>
|
|
390
|
+
>>> def test_email_worker():
|
|
391
|
+
... job = EmailWorker.new({"to": "user@example.com", "subject": "Hello"})
|
|
392
|
+
... result = process_job(job)
|
|
393
|
+
... assert result["sent"] is True
|
|
394
|
+
... assert result["to"] == "user@example.com"
|
|
395
|
+
|
|
396
|
+
You can also test function-based workers using the @job decorator:
|
|
397
|
+
|
|
398
|
+
>>> from oban import job
|
|
399
|
+
>>> from oban.testing import process_job
|
|
400
|
+
>>>
|
|
401
|
+
>>> @job()
|
|
402
|
+
... def send_notification(user_id: int, message: str):
|
|
403
|
+
... return f"Sent '{message}' to user {user_id}"
|
|
404
|
+
>>>
|
|
405
|
+
>>> def test_send_notification():
|
|
406
|
+
... job = send_notification.new(123, "Hello World")
|
|
407
|
+
... result = process_job(job)
|
|
408
|
+
... assert result == "Sent 'Hello World' to user 123"
|
|
409
|
+
"""
|
|
410
|
+
now = datetime.now(timezone.utc)
|
|
411
|
+
|
|
412
|
+
job.args = json.loads(json.dumps(job.args))
|
|
413
|
+
job.meta = json.loads(json.dumps(job.meta))
|
|
414
|
+
|
|
415
|
+
if job.id is None:
|
|
416
|
+
job.id = id(job)
|
|
417
|
+
|
|
418
|
+
if job.attempt == 0:
|
|
419
|
+
job.attempt = 1
|
|
420
|
+
|
|
421
|
+
if job.attempted_at is None:
|
|
422
|
+
job.attempted_at = now
|
|
423
|
+
|
|
424
|
+
if job.scheduled_at is None:
|
|
425
|
+
job.scheduled_at = now
|
|
426
|
+
|
|
427
|
+
if job.inserted_at is None:
|
|
428
|
+
job.inserted_at = now
|
|
429
|
+
|
|
430
|
+
async def _execute():
|
|
431
|
+
executor = await Executor(job, safe=False).execute()
|
|
432
|
+
|
|
433
|
+
return executor.result
|
|
434
|
+
|
|
435
|
+
try:
|
|
436
|
+
asyncio.get_running_loop()
|
|
437
|
+
return _execute()
|
|
438
|
+
except RuntimeError:
|
|
439
|
+
return asyncio.run(_execute())
|