oban 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. oban/__init__.py +22 -0
  2. oban/__main__.py +12 -0
  3. oban/_backoff.py +87 -0
  4. oban/_config.py +171 -0
  5. oban/_executor.py +188 -0
  6. oban/_extensions.py +16 -0
  7. oban/_leader.py +118 -0
  8. oban/_lifeline.py +77 -0
  9. oban/_notifier.py +324 -0
  10. oban/_producer.py +334 -0
  11. oban/_pruner.py +93 -0
  12. oban/_query.py +409 -0
  13. oban/_recorded.py +34 -0
  14. oban/_refresher.py +88 -0
  15. oban/_scheduler.py +359 -0
  16. oban/_stager.py +115 -0
  17. oban/_worker.py +78 -0
  18. oban/cli.py +436 -0
  19. oban/decorators.py +218 -0
  20. oban/job.py +315 -0
  21. oban/oban.py +1084 -0
  22. oban/py.typed +0 -0
  23. oban/queries/__init__.py +0 -0
  24. oban/queries/ack_job.sql +11 -0
  25. oban/queries/all_jobs.sql +25 -0
  26. oban/queries/cancel_many_jobs.sql +37 -0
  27. oban/queries/cleanup_expired_leaders.sql +4 -0
  28. oban/queries/cleanup_expired_producers.sql +2 -0
  29. oban/queries/delete_many_jobs.sql +5 -0
  30. oban/queries/delete_producer.sql +2 -0
  31. oban/queries/elect_leader.sql +10 -0
  32. oban/queries/fetch_jobs.sql +44 -0
  33. oban/queries/get_job.sql +23 -0
  34. oban/queries/insert_job.sql +28 -0
  35. oban/queries/insert_producer.sql +2 -0
  36. oban/queries/install.sql +113 -0
  37. oban/queries/prune_jobs.sql +18 -0
  38. oban/queries/reelect_leader.sql +12 -0
  39. oban/queries/refresh_producers.sql +3 -0
  40. oban/queries/rescue_jobs.sql +18 -0
  41. oban/queries/reset.sql +5 -0
  42. oban/queries/resign_leader.sql +4 -0
  43. oban/queries/retry_many_jobs.sql +13 -0
  44. oban/queries/stage_jobs.sql +34 -0
  45. oban/queries/uninstall.sql +4 -0
  46. oban/queries/update_job.sql +54 -0
  47. oban/queries/update_producer.sql +3 -0
  48. oban/queries/verify_structure.sql +9 -0
  49. oban/schema.py +115 -0
  50. oban/telemetry/__init__.py +10 -0
  51. oban/telemetry/core.py +170 -0
  52. oban/telemetry/logger.py +147 -0
  53. oban/testing.py +439 -0
  54. oban-0.5.0.dist-info/METADATA +290 -0
  55. oban-0.5.0.dist-info/RECORD +59 -0
  56. oban-0.5.0.dist-info/WHEEL +5 -0
  57. oban-0.5.0.dist-info/entry_points.txt +2 -0
  58. oban-0.5.0.dist-info/licenses/LICENSE.txt +201 -0
  59. oban-0.5.0.dist-info/top_level.txt +1 -0
oban/_pruner.py ADDED
@@ -0,0 +1,93 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ from typing import TYPE_CHECKING
5
+
6
+ from . import telemetry
7
+
8
+ if TYPE_CHECKING:
9
+ from ._leader import Leader
10
+ from ._query import Query
11
+
12
+
13
+ class Pruner:
14
+ """Manages periodic deletion of completed, cancelled, and discarded jobs.
15
+
16
+ This class is managed internally by Oban and shouldn't be constructed directly.
17
+ Instead, configure pruning via the Oban constructor:
18
+
19
+ >>> async with Oban(
20
+ ... conn=conn,
21
+ ... queues={"default": 10},
22
+ ... pruner={"max_age": 86_400, "interval": 60.0, "limit": 20_000}
23
+ ... ) as oban:
24
+ ... # Pruner runs automatically in the background
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ *,
30
+ query: Query,
31
+ leader: Leader,
32
+ max_age: int = 86_400,
33
+ interval: float = 60.0,
34
+ limit: int = 20_000,
35
+ ) -> None:
36
+ self._leader = leader
37
+ self._max_age = max_age
38
+ self._interval = interval
39
+ self._limit = limit
40
+ self._query = query
41
+
42
+ self._loop_task = None
43
+
44
+ self._validate(max_age=max_age, interval=interval, limit=limit)
45
+
46
+ @staticmethod
47
+ def _validate(*, max_age: int, interval: float, limit: int) -> None:
48
+ if not isinstance(max_age, int):
49
+ raise TypeError(f"max_age must be an integer, got {max_age}")
50
+ if max_age <= 0:
51
+ raise ValueError(f"max_age must be positive, got {max_age}")
52
+
53
+ if not isinstance(interval, (int, float)):
54
+ raise TypeError(f"interval must be a number, got {interval}")
55
+ if interval <= 0:
56
+ raise ValueError(f"interval must be positive, got {interval}")
57
+
58
+ if not isinstance(limit, int):
59
+ raise TypeError(f"limit must be an integer, got {limit}")
60
+ if limit <= 0:
61
+ raise ValueError(f"limit must be positive, got {limit}")
62
+
63
+ async def start(self) -> None:
64
+ self._loop_task = asyncio.create_task(self._loop(), name="oban-pruner")
65
+
66
+ async def stop(self) -> None:
67
+ if not self._loop_task:
68
+ return
69
+
70
+ self._loop_task.cancel()
71
+
72
+ try:
73
+ await self._loop_task
74
+ except asyncio.CancelledError:
75
+ pass
76
+
77
+ async def _loop(self) -> None:
78
+ while True:
79
+ try:
80
+ await asyncio.sleep(self._interval)
81
+
82
+ if self._leader.is_leader:
83
+ await self._prune()
84
+ except asyncio.CancelledError:
85
+ break
86
+ except Exception:
87
+ pass
88
+
89
+ async def _prune(self) -> None:
90
+ with telemetry.span("oban.pruner.prune", {}) as context:
91
+ pruned = await self._query.prune_jobs(self._max_age, self._limit)
92
+
93
+ context.add({"pruned_count": pruned})
oban/_query.py ADDED
@@ -0,0 +1,409 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ from collections import defaultdict
5
+ from contextlib import asynccontextmanager
6
+ from dataclasses import replace
7
+ from datetime import datetime, timezone
8
+ from functools import cache
9
+ from importlib.resources import files
10
+ from typing import Any
11
+
12
+ from psycopg.rows import class_row
13
+ from psycopg.types.json import Jsonb
14
+ from psycopg_pool import AsyncConnectionPool
15
+
16
+ from ._executor import AckAction
17
+ from ._extensions import use_ext
18
+ from .job import Job, TIMESTAMP_FIELDS
19
+
20
+ ACKABLE_FIELDS = [
21
+ "id",
22
+ "state",
23
+ "attempt_change",
24
+ "schedule_in",
25
+ "error",
26
+ "meta",
27
+ ]
28
+
29
+
30
+ INSERTABLE_FIELDS = [
31
+ "args",
32
+ "inserted_at",
33
+ "max_attempts",
34
+ "meta",
35
+ "priority",
36
+ "queue",
37
+ "scheduled_at",
38
+ "state",
39
+ "tags",
40
+ "worker",
41
+ ]
42
+
43
+ # The `Job` class has errors, but we only insert a single `error` at one time.
44
+ JSON_FIELDS = ["args", "error", "errors", "meta", "tags"]
45
+
46
+
47
+ UPDATABLE_FIELDS = [
48
+ "args",
49
+ "max_attempts",
50
+ "meta",
51
+ "priority",
52
+ "queue",
53
+ "scheduled_at",
54
+ "tags",
55
+ "worker",
56
+ ]
57
+
58
+
59
+ async def _ack_jobs(query: Query, acks: list[AckAction]) -> list[int]:
60
+ async with query._pool.connection() as conn:
61
+ async with conn.transaction():
62
+ stmt = Query._load_file("ack_job.sql", query._prefix)
63
+ acked_ids = []
64
+
65
+ for ack in acks:
66
+ args = {
67
+ field: Query._cast_type(field, getattr(ack, field))
68
+ for field in ACKABLE_FIELDS
69
+ }
70
+
71
+ result = await conn.execute(stmt, args)
72
+ row = await result.fetchone()
73
+
74
+ if row:
75
+ acked_ids.append(row[0])
76
+
77
+ return acked_ids
78
+
79
+
80
+ async def _insert_jobs(query: Query, jobs: list[Job]) -> list[Job]:
81
+ async with query._pool.connection() as conn:
82
+ stmt = Query._load_file("insert_job.sql", query._prefix)
83
+ inserted = []
84
+
85
+ for job in jobs:
86
+ args = {
87
+ key: Query._cast_type(key, getattr(job, key))
88
+ for key in INSERTABLE_FIELDS
89
+ }
90
+
91
+ result = await conn.execute(stmt, args)
92
+ row = await result.fetchone()
93
+
94
+ inserted.append(
95
+ replace(
96
+ job,
97
+ id=row[0],
98
+ inserted_at=row[1],
99
+ queue=row[2],
100
+ scheduled_at=row[3],
101
+ state=row[4],
102
+ )
103
+ )
104
+
105
+ return inserted
106
+
107
+
108
+ class Query:
109
+ @staticmethod
110
+ @cache
111
+ def _load_file(
112
+ path: str,
113
+ prefix: str = "public",
114
+ package: str = "oban.queries",
115
+ apply_prefix: bool = True,
116
+ ) -> str:
117
+ sql = files(package).joinpath(path).read_text(encoding="utf-8")
118
+
119
+ if apply_prefix:
120
+ return re.sub(
121
+ r"\b(oban_jobs|oban_leaders|oban_producers|oban_job_state|oban_state_to_bit)\b",
122
+ rf"{prefix}.\1",
123
+ sql,
124
+ )
125
+ else:
126
+ return sql
127
+
128
+ @staticmethod
129
+ def _cast_type(field: str, value: Any) -> Any:
130
+ if field in JSON_FIELDS and value is not None:
131
+ return Jsonb(value)
132
+
133
+ # Ensure timestamps are written as UTC rather than being implicitly cast to the current
134
+ # timezone. The database uses `TIMESTAMP WITHOUT TIME ZONE` and the value is automatically
135
+ # shifted when the zone is present.
136
+ if field in TIMESTAMP_FIELDS and value is not None:
137
+ return value.astimezone(timezone.utc).replace(tzinfo=None)
138
+
139
+ return value
140
+
141
+ def __init__(self, pool: AsyncConnectionPool, prefix: str = "public") -> None:
142
+ if not isinstance(pool, AsyncConnectionPool):
143
+ raise TypeError(f"Expected AsyncConnectionPool, got {type(pool).__name__}")
144
+
145
+ self._pool = pool
146
+ self._prefix = prefix
147
+
148
+ @property
149
+ def dsn(self) -> str:
150
+ return self._pool.conninfo
151
+
152
+ @asynccontextmanager
153
+ async def connection(self):
154
+ async with self._pool.connection() as conn:
155
+ yield conn
156
+
157
+ # Jobs
158
+
159
+ async def ack_jobs(self, acks: list[AckAction]) -> list[int]:
160
+ return await use_ext("query.ack_jobs", _ack_jobs, self, acks)
161
+
162
+ async def all_jobs(self, states: list[str]) -> list[Job]:
163
+ async with self._pool.connection() as conn:
164
+ stmt = self._load_file("all_jobs.sql", self._prefix)
165
+
166
+ async with conn.cursor(row_factory=class_row(Job)) as cur:
167
+ await cur.execute(stmt, {"states": states})
168
+ return await cur.fetchall()
169
+
170
+ async def cancel_many_jobs(self, ids: list[int]) -> tuple[int, list[int]]:
171
+ async with self._pool.connection() as conn:
172
+ async with conn.transaction():
173
+ stmt = self._load_file("cancel_many_jobs.sql", self._prefix)
174
+ args = {"ids": ids}
175
+
176
+ result = await conn.execute(stmt, args)
177
+ rows = await result.fetchall()
178
+
179
+ executing_ids = [row[0] for row in rows if row[1] == "executing"]
180
+
181
+ return len(rows), executing_ids
182
+
183
+ async def delete_many_jobs(self, ids: list[int]) -> int:
184
+ async with self._pool.connection() as conn:
185
+ async with conn.transaction():
186
+ stmt = self._load_file("delete_many_jobs.sql", self._prefix)
187
+ args = {"ids": ids}
188
+
189
+ result = await conn.execute(stmt, args)
190
+
191
+ return result.rowcount
192
+
193
+ async def get_job(self, job_id: int) -> Job:
194
+ async with self._pool.connection() as conn:
195
+ stmt = self._load_file("get_job.sql", self._prefix)
196
+
197
+ async with conn.cursor(row_factory=class_row(Job)) as cur:
198
+ await cur.execute(stmt, (job_id,))
199
+
200
+ return await cur.fetchone()
201
+
202
+ async def fetch_jobs(
203
+ self, demand: int, queue: str, node: str, uuid: str
204
+ ) -> list[Job]:
205
+ async with self._pool.connection() as conn:
206
+ async with conn.transaction():
207
+ stmt = self._load_file("fetch_jobs.sql", self._prefix)
208
+ args = {"queue": queue, "demand": demand, "attempted_by": [node, uuid]}
209
+
210
+ async with conn.cursor(row_factory=class_row(Job)) as cur:
211
+ await cur.execute(stmt, args)
212
+
213
+ return await cur.fetchall()
214
+
215
+ async def insert_jobs(self, jobs: list[Job]) -> list[Job]:
216
+ return await use_ext("query.insert_jobs", _insert_jobs, self, jobs)
217
+
218
+ async def prune_jobs(self, max_age: int, limit: int) -> int:
219
+ async with self._pool.connection() as conn:
220
+ async with conn.transaction():
221
+ stmt = self._load_file("prune_jobs.sql", self._prefix)
222
+ args = {"max_age": max_age, "limit": limit}
223
+
224
+ result = await conn.execute(stmt, args)
225
+
226
+ return result.rowcount
227
+
228
+ async def rescue_jobs(self, rescue_after: float) -> int:
229
+ async with self._pool.connection() as conn:
230
+ async with conn.transaction():
231
+ stmt = self._load_file("rescue_jobs.sql", self._prefix)
232
+ args = {"rescue_after": rescue_after}
233
+
234
+ result = await conn.execute(stmt, args)
235
+
236
+ return result.rowcount
237
+
238
+ async def retry_many_jobs(self, ids: list[int]) -> int:
239
+ async with self._pool.connection() as conn:
240
+ async with conn.transaction():
241
+ stmt = self._load_file("retry_many_jobs.sql", self._prefix)
242
+ args = {"ids": ids}
243
+
244
+ result = await conn.execute(stmt, args)
245
+
246
+ return result.rowcount
247
+
248
+ async def stage_jobs(
249
+ self, limit: int, queues: list[str], before: datetime | None = None
250
+ ) -> tuple[int, list[str]]:
251
+ async with self._pool.connection() as conn:
252
+ async with conn.transaction():
253
+ stmt = self._load_file("stage_jobs.sql", self._prefix)
254
+ args = {"limit": limit, "queues": queues, "before": before}
255
+
256
+ result = await conn.execute(stmt, args)
257
+ rows = await result.fetchall()
258
+ queues = [queue for (queue,) in rows]
259
+
260
+ return (len(rows), queues)
261
+
262
+ async def update_many_jobs(self, jobs: list[Job]) -> list[Job]:
263
+ async with self._pool.connection() as conn:
264
+ async with conn.transaction():
265
+ stmt = self._load_file("update_job.sql", self._prefix)
266
+ args = defaultdict(list)
267
+
268
+ for job in jobs:
269
+ args["ids"].append(job.id)
270
+
271
+ for key in UPDATABLE_FIELDS:
272
+ args[key].append(self._cast_type(key, getattr(job, key)))
273
+
274
+ result = await conn.execute(stmt, dict(args))
275
+ rows = await result.fetchall()
276
+
277
+ return [
278
+ replace(
279
+ job,
280
+ args=row[0],
281
+ max_attempts=row[1],
282
+ meta=row[2],
283
+ priority=row[3],
284
+ queue=row[4],
285
+ scheduled_at=row[5],
286
+ state=row[6],
287
+ tags=row[7],
288
+ worker=row[8],
289
+ )
290
+ for job, row in zip(jobs, rows)
291
+ ]
292
+
293
+ # Leadership
294
+
295
+ async def attempt_leadership(
296
+ self, name: str, node: str, ttl: int, is_leader: bool
297
+ ) -> bool:
298
+ async with self._pool.connection() as conn:
299
+ async with conn.transaction():
300
+ cleanup_stmt = self._load_file(
301
+ "cleanup_expired_leaders.sql", self._prefix
302
+ )
303
+
304
+ await conn.execute(cleanup_stmt)
305
+
306
+ if is_leader:
307
+ elect_stmt = self._load_file("reelect_leader.sql", self._prefix)
308
+ else:
309
+ elect_stmt = self._load_file("elect_leader.sql", self._prefix)
310
+
311
+ args = {"name": name, "node": node, "ttl": ttl}
312
+ result = await conn.execute(elect_stmt, args)
313
+ rows = await result.fetchone()
314
+
315
+ return rows is not None and rows[0] == node
316
+
317
+ async def resign_leader(self, name: str, node: str) -> None:
318
+ async with self._pool.connection() as conn:
319
+ stmt = self._load_file("resign_leader.sql", self._prefix)
320
+ args = {"name": name, "node": node}
321
+
322
+ await conn.execute(stmt, args)
323
+
324
+ # Schema
325
+
326
+ async def install(self) -> None:
327
+ async with self._pool.connection() as conn:
328
+ stmt = self._load_file("install.sql", self._prefix)
329
+
330
+ await conn.execute(stmt)
331
+
332
+ async def reset(self) -> None:
333
+ async with self._pool.connection() as conn:
334
+ stmt = self._load_file("reset.sql", self._prefix)
335
+
336
+ await conn.execute(stmt)
337
+
338
+ async def uninstall(self) -> None:
339
+ async with self._pool.connection() as conn:
340
+ stmt = self._load_file("uninstall.sql", self._prefix)
341
+
342
+ await conn.execute(stmt)
343
+
344
+ async def verify_structure(self) -> list[str]:
345
+ async with self._pool.connection() as conn:
346
+ stmt = self._load_file("verify_structure.sql", apply_prefix=False)
347
+ args = {"prefix": self._prefix}
348
+ rows = await conn.execute(stmt, args)
349
+ results = await rows.fetchall()
350
+
351
+ return [table for (table,) in results]
352
+
353
+ # Producer
354
+
355
+ async def cleanup_expired_producers(self, max_age: float) -> int:
356
+ async with self._pool.connection() as conn:
357
+ stmt = self._load_file("cleanup_expired_producers.sql", self._prefix)
358
+ args = {"max_age": max_age}
359
+
360
+ result = await conn.execute(stmt, args)
361
+
362
+ return result.rowcount
363
+
364
+ async def delete_producer(self, uuid: str) -> None:
365
+ async with self._pool.connection() as conn:
366
+ stmt = self._load_file("delete_producer.sql", self._prefix)
367
+ args = {"uuid": uuid}
368
+
369
+ await conn.execute(stmt, args)
370
+
371
+ async def insert_producer(
372
+ self, uuid: str, name: str, node: str, queue: str, meta: dict[str, Any]
373
+ ) -> None:
374
+ async with self._pool.connection() as conn:
375
+ stmt = self._load_file("insert_producer.sql", self._prefix)
376
+ args = {
377
+ "uuid": uuid,
378
+ "name": name,
379
+ "node": node,
380
+ "queue": queue,
381
+ "meta": Jsonb(meta),
382
+ }
383
+
384
+ await conn.execute(stmt, args)
385
+
386
+ async def refresh_producers(self, uuids: list[str]) -> int:
387
+ async with self._pool.connection() as conn:
388
+ stmt = self._load_file("refresh_producers.sql", self._prefix)
389
+ args = {"uuids": uuids}
390
+
391
+ result = await conn.execute(stmt, args)
392
+
393
+ return result.rowcount
394
+
395
+ async def update_producer(self, uuid: str, meta: dict[str, Any]) -> None:
396
+ async with self._pool.connection() as conn:
397
+ stmt = self._load_file("update_producer.sql", self._prefix)
398
+ args = {"uuid": uuid, "meta": Jsonb(meta)}
399
+
400
+ await conn.execute(stmt, args)
401
+
402
+ # Notifier
403
+
404
+ async def notify(self, channel: str, payloads: list[str]) -> None:
405
+ async with self._pool.connection() as conn:
406
+ await conn.execute(
407
+ "SELECT pg_notify(%s, payload) FROM json_array_elements_text(%s::json) AS payload",
408
+ (channel, Jsonb(payloads)),
409
+ )
oban/_recorded.py ADDED
@@ -0,0 +1,34 @@
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ from typing import Any
5
+
6
+ import erlpack
7
+
8
+
9
+ def _convert_bytes(value: Any) -> Any:
10
+ if isinstance(value, bytes):
11
+ return value.decode("utf-8")
12
+ elif isinstance(value, dict):
13
+ return {_convert_bytes(key): _convert_bytes(val) for key, val in value.items()}
14
+ elif isinstance(value, list):
15
+ return [_convert_bytes(item) for item in value]
16
+ return value
17
+
18
+
19
+ def encode_recorded(value: Any) -> str:
20
+ binary = erlpack.pack(value)
21
+ encoded = base64.b64encode(binary).decode("ascii")
22
+
23
+ return encoded.rstrip("=")
24
+
25
+
26
+ def decode_recorded(encoded: str) -> Any:
27
+ padding = 4 - (len(encoded) % 4)
28
+
29
+ if padding != 4:
30
+ encoded = encoded + ("=" * padding)
31
+
32
+ binary = base64.b64decode(encoded)
33
+
34
+ return _convert_bytes(erlpack.unpack(binary))
oban/_refresher.py ADDED
@@ -0,0 +1,88 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ from typing import TYPE_CHECKING
5
+
6
+ from . import telemetry
7
+
8
+ if TYPE_CHECKING:
9
+ from ._producer import Producer
10
+ from ._leader import Leader
11
+ from ._query import Query
12
+
13
+
14
+ class Refresher:
15
+ def __init__(
16
+ self,
17
+ *,
18
+ query: Query,
19
+ leader: Leader,
20
+ producers: dict[str, Producer],
21
+ interval: float = 15.0,
22
+ max_age: float = 60.0,
23
+ ) -> None:
24
+ self._leader = leader
25
+ self._interval = interval
26
+ self._max_age = max_age
27
+ self._producers = producers
28
+ self._query = query
29
+
30
+ self._loop_task = None
31
+
32
+ self._validate(interval=interval, max_age=max_age)
33
+
34
+ @staticmethod
35
+ def _validate(*, interval: float, max_age: float) -> None:
36
+ if not isinstance(interval, (int, float)):
37
+ raise TypeError(f"interval must be a number, got {interval}")
38
+ if interval <= 0:
39
+ raise ValueError(f"interval must be positive, got {interval}")
40
+
41
+ if not isinstance(max_age, (int, float)):
42
+ raise TypeError(f"max_age must be a number, got {max_age}")
43
+ if max_age <= 0:
44
+ raise ValueError(f"max_age must be positive, got {max_age}")
45
+
46
+ async def start(self) -> None:
47
+ self._loop_task = asyncio.create_task(self._loop(), name="oban-refresher")
48
+
49
+ async def stop(self) -> None:
50
+ if self._loop_task:
51
+ self._loop_task.cancel()
52
+
53
+ try:
54
+ await self._loop_task
55
+ except asyncio.CancelledError:
56
+ pass
57
+
58
+ async def _loop(self) -> None:
59
+ while True:
60
+ try:
61
+ await asyncio.sleep(self._interval)
62
+
63
+ await self._refresh()
64
+ await self._cleanup()
65
+ except asyncio.CancelledError:
66
+ break
67
+ except Exception:
68
+ pass
69
+
70
+ async def _refresh(self) -> None:
71
+ with telemetry.span("oban.refresher.refresh", {}) as context:
72
+ uuids = [producer._uuid for producer in self._producers.values()]
73
+
74
+ if uuids:
75
+ refreshed = await self._query.refresh_producers(uuids)
76
+ else:
77
+ refreshed = 0
78
+
79
+ context.add({"refreshed_count": refreshed})
80
+
81
+ async def _cleanup(self) -> None:
82
+ if not self._leader.is_leader:
83
+ return
84
+
85
+ with telemetry.span("oban.refresher.cleanup", {}) as context:
86
+ cleaned_up = await self._query.cleanup_expired_producers(self._max_age)
87
+
88
+ context.add({"cleanup_count": cleaned_up})