oban 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. oban/__init__.py +22 -0
  2. oban/__main__.py +12 -0
  3. oban/_backoff.py +87 -0
  4. oban/_config.py +171 -0
  5. oban/_executor.py +188 -0
  6. oban/_extensions.py +16 -0
  7. oban/_leader.py +118 -0
  8. oban/_lifeline.py +77 -0
  9. oban/_notifier.py +324 -0
  10. oban/_producer.py +334 -0
  11. oban/_pruner.py +93 -0
  12. oban/_query.py +409 -0
  13. oban/_recorded.py +34 -0
  14. oban/_refresher.py +88 -0
  15. oban/_scheduler.py +359 -0
  16. oban/_stager.py +115 -0
  17. oban/_worker.py +78 -0
  18. oban/cli.py +436 -0
  19. oban/decorators.py +218 -0
  20. oban/job.py +315 -0
  21. oban/oban.py +1084 -0
  22. oban/py.typed +0 -0
  23. oban/queries/__init__.py +0 -0
  24. oban/queries/ack_job.sql +11 -0
  25. oban/queries/all_jobs.sql +25 -0
  26. oban/queries/cancel_many_jobs.sql +37 -0
  27. oban/queries/cleanup_expired_leaders.sql +4 -0
  28. oban/queries/cleanup_expired_producers.sql +2 -0
  29. oban/queries/delete_many_jobs.sql +5 -0
  30. oban/queries/delete_producer.sql +2 -0
  31. oban/queries/elect_leader.sql +10 -0
  32. oban/queries/fetch_jobs.sql +44 -0
  33. oban/queries/get_job.sql +23 -0
  34. oban/queries/insert_job.sql +28 -0
  35. oban/queries/insert_producer.sql +2 -0
  36. oban/queries/install.sql +113 -0
  37. oban/queries/prune_jobs.sql +18 -0
  38. oban/queries/reelect_leader.sql +12 -0
  39. oban/queries/refresh_producers.sql +3 -0
  40. oban/queries/rescue_jobs.sql +18 -0
  41. oban/queries/reset.sql +5 -0
  42. oban/queries/resign_leader.sql +4 -0
  43. oban/queries/retry_many_jobs.sql +13 -0
  44. oban/queries/stage_jobs.sql +34 -0
  45. oban/queries/uninstall.sql +4 -0
  46. oban/queries/update_job.sql +54 -0
  47. oban/queries/update_producer.sql +3 -0
  48. oban/queries/verify_structure.sql +9 -0
  49. oban/schema.py +115 -0
  50. oban/telemetry/__init__.py +10 -0
  51. oban/telemetry/core.py +170 -0
  52. oban/telemetry/logger.py +147 -0
  53. oban/testing.py +439 -0
  54. oban-0.5.0.dist-info/METADATA +290 -0
  55. oban-0.5.0.dist-info/RECORD +59 -0
  56. oban-0.5.0.dist-info/WHEEL +5 -0
  57. oban-0.5.0.dist-info/entry_points.txt +2 -0
  58. oban-0.5.0.dist-info/licenses/LICENSE.txt +201 -0
  59. oban-0.5.0.dist-info/top_level.txt +1 -0
oban/__init__.py ADDED
@@ -0,0 +1,22 @@
1
+ from importlib.metadata import version
2
+
3
+ from .decorators import job, worker
4
+ from .job import Cancel, Job, Record, Snooze
5
+ from .oban import Oban
6
+
7
+ try:
8
+ import oban_pro # noqa: F401 # ty: ignore[unresolved-import]
9
+ except ImportError:
10
+ pass
11
+
12
+ __all__ = [
13
+ "Cancel",
14
+ "Job",
15
+ "Oban",
16
+ "Record",
17
+ "Snooze",
18
+ "job",
19
+ "worker",
20
+ ]
21
+
22
+ __version__ = version("oban")
oban/__main__.py ADDED
@@ -0,0 +1,12 @@
1
+ import contextlib
2
+
3
+ from oban.cli import main
4
+
5
+
6
+ def safe_main() -> None:
7
+ with contextlib.suppress(KeyboardInterrupt):
8
+ main()
9
+
10
+
11
+ if __name__ == "__main__":
12
+ safe_main()
oban/_backoff.py ADDED
@@ -0,0 +1,87 @@
1
+ import random
2
+ from typing import Literal
3
+
4
+
5
+ def exponential(
6
+ attempt: int, *, max_pow: int = 10, min_pad: int = 0, mult: int = 1
7
+ ) -> int:
8
+ """Calculate exponential backoff delay in seconds.
9
+
10
+ Args:
11
+ attempt: The retry attempt number
12
+ max_pow: Maximum power of 2 (default: 10)
13
+ min_pad: Minimum padding in seconds (default: 0)
14
+ mult: Multiplier for the exponential value (default: 1)
15
+
16
+ Returns:
17
+ Backoff delay in seconds
18
+ """
19
+ return min_pad + mult * pow(2, min(attempt, max_pow))
20
+
21
+
22
+ def jitter(
23
+ time: int, *, mode: Literal["inc", "dec", "both"] = "both", mult: float = 0.1
24
+ ) -> int:
25
+ """Add jitter to a backoff time.
26
+
27
+ Args:
28
+ time: Base time in seconds
29
+ mode: Jitter mode - "inc" (increase only), "dec" (decrease only), or "both" (default: "both")
30
+ mult: Jitter multiplier (default: 0.1)
31
+
32
+ Returns:
33
+ Time with jitter applied
34
+ """
35
+ rand = random.random()
36
+ diff = int(rand * mult * time)
37
+
38
+ match mode:
39
+ case "inc":
40
+ return time + diff
41
+ case "dec":
42
+ return time - diff
43
+ case "both":
44
+ return time + diff if rand >= 0.5 else time - diff
45
+
46
+
47
+ def jittery_exponential(
48
+ attempt: int, *, max_pow: int = 10, min_pad: int = 0, mult: int = 1
49
+ ) -> int:
50
+ """Calculate exponential backoff with jitter.
51
+
52
+ Args:
53
+ attempt: The retry attempt number
54
+ max_pow: Maximum power of 2 (default: 10, caps at ~1024 seconds)
55
+ min_pad: Minimum padding in seconds (default: 0)
56
+ mult: Multiplier for the exponential value (default: 1)
57
+
58
+ Returns:
59
+ Backoff delay in seconds with jitter applied
60
+ """
61
+ time = exponential(attempt, max_pow=max_pow, min_pad=min_pad, mult=mult)
62
+
63
+ return jitter(time, mode="both")
64
+
65
+
66
+ def jittery_clamped(attempt: int, max_attempts: int, *, clamped_max: int = 20) -> int:
67
+ """Calculate jittery clamped backoff for job retries.
68
+
69
+ Clamps the attempt number proportionally to max_attempts, then applies
70
+ exponential backoff with a minimum padding and jitter that only increases.
71
+
72
+ Args:
73
+ attempt: The retry attempt number
74
+ max_attempts: Maximum attempts allowed
75
+ clamped_max: Maximum value to clamp attempts to (default: 20)
76
+
77
+ Returns:
78
+ Backoff delay in seconds with exponential backoff and jitter
79
+ """
80
+ if max_attempts <= clamped_max:
81
+ clamped_attempt = attempt
82
+ else:
83
+ clamped_attempt = round(attempt / max_attempts * clamped_max)
84
+
85
+ time = exponential(clamped_attempt, mult=1, max_pow=100, min_pad=15)
86
+
87
+ return jitter(time, mode="inc")
oban/_config.py ADDED
@@ -0,0 +1,171 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import tomllib
5
+ from dataclasses import dataclass, field, fields
6
+ from pathlib import Path
7
+ from typing import Any
8
+
9
+ from psycopg_pool import AsyncConnectionPool
10
+
11
+ from oban import Oban
12
+
13
+
14
+ @dataclass
15
+ class Config:
16
+ """Configuration for Oban instances.
17
+
18
+ Can be used by both CLI and programmatic usage to create Oban instances
19
+ with consistent configuration.
20
+ """
21
+
22
+ dsn: str | None = None
23
+ queues: dict[str, int] = field(default_factory=dict)
24
+ name: str | None = None
25
+ node: str | None = None
26
+ prefix: str | None = None
27
+ leadership: bool | None = None
28
+
29
+ # Core loop configurations
30
+ lifeline: dict[str, Any] | None = None
31
+ pruner: dict[str, Any] | None = None
32
+ refresher: dict[str, Any] | None = None
33
+ scheduler: dict[str, Any] | None = None
34
+ stager: dict[str, Any] | None = None
35
+
36
+ # Connection pool options
37
+ pool_min_size: int = 1
38
+ pool_max_size: int = 10
39
+ pool_timeout: float = 30.0
40
+
41
+ @staticmethod
42
+ def _parse_queues(input: str) -> dict[str, int]:
43
+ if not input:
44
+ return {}
45
+
46
+ return {
47
+ name.strip(): int(limit.strip())
48
+ for line in input.split(",")
49
+ if line.strip() and ":" in line
50
+ for name, limit in [line.split(":", 1)]
51
+ }
52
+
53
+ @classmethod
54
+ def from_env(cls) -> Config:
55
+ """Load configuration from environment variables.
56
+
57
+ Supported environment variables:
58
+
59
+ - OBAN_DSN: Database connection string (required)
60
+ - OBAN_QUEUES: Comma-separated queue:limit pairs (e.g., "default:10,mailers:5")
61
+ - OBAN_PREFIX: Schema prefix (default: "public")
62
+ - OBAN_NODE: Node identifier (default: hostname)
63
+ - OBAN_POOL_MIN_SIZE: Minimum connection pool size (default: 1)
64
+ - OBAN_POOL_MAX_SIZE: Maximum connection pool size (default: 10)
65
+ """
66
+ return cls(
67
+ dsn=os.getenv("OBAN_DSN"),
68
+ queues=cls._parse_queues(os.getenv("OBAN_QUEUES", "")),
69
+ node=os.getenv("OBAN_NODE"),
70
+ prefix=os.getenv("OBAN_PREFIX", "public"),
71
+ pool_min_size=int(os.getenv("OBAN_POOL_MIN_SIZE", "1")),
72
+ pool_max_size=int(os.getenv("OBAN_POOL_MAX_SIZE", "10")),
73
+ )
74
+
75
+ @classmethod
76
+ def from_cli(cls, params: dict[str, Any]) -> Config:
77
+ if queues := params.pop("queues", None):
78
+ params["queues"] = cls._parse_queues(queues)
79
+
80
+ return cls(**params)
81
+
82
+ @classmethod
83
+ def from_toml(cls, path: str | None = None) -> Config:
84
+ params = {}
85
+ path_obj = Path(path or "oban.toml")
86
+
87
+ if path_obj.exists():
88
+ with open(path_obj, "rb") as file:
89
+ params = tomllib.load(file)
90
+
91
+ return cls(**params)
92
+
93
+ @classmethod
94
+ def load(cls, path: str | None = None, **overrides: Any) -> Config:
95
+ tml_conf = cls.from_toml(path)
96
+ env_conf = cls.from_env()
97
+ cli_conf = cls(**overrides)
98
+
99
+ return tml_conf.merge(env_conf).merge(cli_conf)
100
+
101
+ def merge(self, other: Config) -> Config:
102
+ def merge_dicts(this, that) -> dict | None:
103
+ if that is None or this is None:
104
+ return this
105
+
106
+ merged = this.copy()
107
+ merged.update(that)
108
+
109
+ return merged
110
+
111
+ merged = {}
112
+
113
+ for field_ref in fields(self):
114
+ name = field_ref.name
115
+ this_val = getattr(self, name)
116
+ that_val = getattr(other, name)
117
+
118
+ if isinstance(that_val, dict):
119
+ merged[name] = merge_dicts(this_val, that_val)
120
+ elif that_val is not None:
121
+ merged[name] = that_val
122
+ else:
123
+ merged[name] = this_val
124
+
125
+ return Config(**merged)
126
+
127
+ async def create_pool(self) -> AsyncConnectionPool:
128
+ if not self.dsn:
129
+ raise ValueError("dsn is required to create a connection pool")
130
+
131
+ pool = AsyncConnectionPool(
132
+ conninfo=self.dsn,
133
+ min_size=self.pool_min_size,
134
+ max_size=self.pool_max_size,
135
+ timeout=self.pool_timeout,
136
+ open=False,
137
+ )
138
+
139
+ await pool.open()
140
+ await pool.wait()
141
+
142
+ return pool
143
+
144
+ async def create_oban(
145
+ self, pool: AsyncConnectionPool | None = None, dispatcher: Any = None
146
+ ) -> Oban:
147
+ pool = pool or await self.create_pool()
148
+
149
+ params: dict[str, Any] = {
150
+ "pool": pool,
151
+ "dispatcher": dispatcher,
152
+ "name": self.name,
153
+ "prefix": self.prefix,
154
+ "queues": self.queues,
155
+ }
156
+
157
+ extras = {
158
+ key: getattr(self, key)
159
+ for key in [
160
+ "leadership",
161
+ "lifeline",
162
+ "node",
163
+ "pruner",
164
+ "refresher",
165
+ "scheduler",
166
+ "stager",
167
+ ]
168
+ if getattr(self, key) is not None
169
+ }
170
+
171
+ return Oban(**params, **extras)
oban/_executor.py ADDED
@@ -0,0 +1,188 @@
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ import traceback
5
+
6
+ from contextvars import ContextVar
7
+ from dataclasses import dataclass
8
+ from datetime import datetime, timezone
9
+ from typing import TYPE_CHECKING
10
+
11
+ from . import telemetry
12
+ from ._backoff import jittery_clamped
13
+ from ._extensions import use_ext
14
+ from ._worker import resolve_worker
15
+ from .job import Cancel, Record, Snooze
16
+
17
+ if TYPE_CHECKING:
18
+ from .job import Job
19
+
20
+ _current_job: ContextVar[Job | None] = ContextVar("oban_current_job", default=None)
21
+
22
+
23
+ @dataclass(frozen=True, slots=True)
24
+ class AckAction:
25
+ job: Job
26
+ state: str
27
+ attempt_change: int | None = None
28
+ error: dict | None = None
29
+ meta: dict | None = None
30
+ schedule_in: int | None = None
31
+
32
+ @property
33
+ def id(self) -> int | None:
34
+ return self.job.id
35
+
36
+
37
+ class Executor:
38
+ def __init__(self, job: Job, safe: bool = True):
39
+ self.job = job
40
+ self.safe = safe
41
+
42
+ self.action = None
43
+ self.result = None
44
+ self.worker = None
45
+
46
+ self._start_time = time.monotonic_ns()
47
+ self._traceback = None
48
+
49
+ @staticmethod
50
+ def current_job() -> Job | None:
51
+ return _current_job.get()
52
+
53
+ async def execute(self) -> Executor:
54
+ self._report_started()
55
+ await self._process()
56
+ self._record_stopped()
57
+ self._report_stopped()
58
+ self._reraise_unsafe()
59
+
60
+ return self
61
+
62
+ @property
63
+ def status(self) -> str | None:
64
+ if self.action:
65
+ return self.action.state
66
+
67
+ def _report_started(self) -> None:
68
+ telemetry.execute(
69
+ "oban.job.start",
70
+ {"job": self.job, "monotonic_time": self._start_time},
71
+ )
72
+
73
+ async def _process(self) -> None:
74
+ token = _current_job.set(self.job)
75
+
76
+ try:
77
+ self.worker = resolve_worker(self.job.worker)()
78
+ self.result = await self.worker.process(self.job)
79
+ except Exception as error:
80
+ self.result = error
81
+ self._traceback = traceback.format_exc()
82
+ finally:
83
+ _current_job.reset(token)
84
+
85
+ def _record_stopped(self) -> None:
86
+ # This is largely for type checking, as executing an unpersisted job wouldn't happen
87
+ # during actual job processing.
88
+ if self.job.id is None:
89
+ self.job.id = 0
90
+
91
+ result = use_ext(
92
+ "executor.wrap_result", lambda _job, res: res, self.job, self.result
93
+ )
94
+
95
+ match result:
96
+ case Exception() as error:
97
+ if self.job.attempt >= self.job.max_attempts:
98
+ self.action = AckAction(
99
+ job=self.job,
100
+ state="discarded",
101
+ error=self._format_error(error),
102
+ )
103
+ else:
104
+ self.action = AckAction(
105
+ job=self.job,
106
+ state="retryable",
107
+ error=self._format_error(error),
108
+ schedule_in=self._retry_backoff(),
109
+ )
110
+
111
+ case Cancel(reason=reason):
112
+ self.action = AckAction(
113
+ job=self.job, state="cancelled", error=self._format_error(reason)
114
+ )
115
+
116
+ case Snooze(seconds=seconds):
117
+ self.action = AckAction(
118
+ job=self.job,
119
+ attempt_change=-1,
120
+ state="scheduled",
121
+ schedule_in=seconds,
122
+ meta={"snoozed": self.job.meta.get("snoozed", 0) + 1},
123
+ )
124
+
125
+ case Record(encoded=encoded):
126
+ self.action = AckAction(
127
+ job=self.job,
128
+ state="completed",
129
+ meta={"recorded": True, "return": encoded},
130
+ )
131
+
132
+ case _:
133
+ self.action = AckAction(job=self.job, state="completed")
134
+
135
+ def _report_stopped(self) -> None:
136
+ stop_time = time.monotonic_ns()
137
+
138
+ meta = {
139
+ "monotonic_time": stop_time,
140
+ "duration": stop_time - self._start_time,
141
+ "queue_time": self._queue_time(),
142
+ "job": self.job,
143
+ "state": self.status,
144
+ }
145
+
146
+ if self.status in ("retryable", "discarded"):
147
+ error_meta = {
148
+ "error_message": str(self.result),
149
+ "error_type": type(self.result).__name__,
150
+ "traceback": self._traceback,
151
+ }
152
+
153
+ telemetry.execute("oban.job.exception", {**meta, **error_meta})
154
+ else:
155
+ telemetry.execute("oban.job.stop", meta)
156
+
157
+ def _reraise_unsafe(self) -> None:
158
+ if not self.safe and isinstance(self.result, BaseException):
159
+ raise self.result
160
+
161
+ def _retry_backoff(self) -> int:
162
+ if hasattr(self.worker, "backoff"):
163
+ return self.worker.backoff(self.job)
164
+ else:
165
+ return jittery_clamped(self.job.attempt, self.job.max_attempts)
166
+
167
+ def _queue_time(self) -> int:
168
+ attempted_at = self.job.attempted_at
169
+ scheduled_at = self.job.scheduled_at
170
+
171
+ if attempted_at and scheduled_at:
172
+ delta = (attempted_at - scheduled_at).total_seconds()
173
+
174
+ return int(delta * 1_000_000_000)
175
+ else:
176
+ return 0
177
+
178
+ def _format_error(self, error: Exception | str) -> dict:
179
+ if isinstance(error, str):
180
+ error_str = error
181
+ else:
182
+ error_str = repr(error)
183
+
184
+ return {
185
+ "attempt": self.job.attempt,
186
+ "at": datetime.now(timezone.utc).isoformat(),
187
+ "error": error_str,
188
+ }
oban/_extensions.py ADDED
@@ -0,0 +1,16 @@
1
+ from typing import Any, Callable
2
+
3
+ _extensions: dict[str, Callable] = {}
4
+
5
+
6
+ def get_ext(name: str, default: Callable) -> Callable:
7
+ return _extensions.get(name, default)
8
+
9
+
10
+ def put_ext(name: str, func: Callable) -> None:
11
+ _extensions[name] = func
12
+
13
+
14
+ def use_ext(name: str, default: Callable, *args, **kwargs) -> Any:
15
+ func = _extensions.get(name, default)
16
+ return func(*args, **kwargs)
oban/_leader.py ADDED
@@ -0,0 +1,118 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ from typing import TYPE_CHECKING
5
+
6
+ from . import telemetry
7
+
8
+ if TYPE_CHECKING:
9
+ from ._notifier import Notifier
10
+ from ._query import Query
11
+
12
+
13
+ class Leader:
14
+ """Manages leadership election and coordination across Oban nodes.
15
+
16
+ This class is managed internally by Oban and shouldn't be constructed directly.
17
+ Instead, check leadership status via the Oban.is_leader property:
18
+
19
+ >>> async with Oban(pool=pool, queues={"default": 10}) as oban:
20
+ ... if oban.is_leader:
21
+ ... # Perform leader-only operations
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ *,
27
+ enabled: bool = True,
28
+ interval: float = 30.0,
29
+ name: str = "oban",
30
+ node: str,
31
+ notifier: Notifier,
32
+ query: Query,
33
+ ) -> None:
34
+ self._enabled = enabled
35
+ self._interval = interval
36
+ self._name = name
37
+ self._node = node
38
+ self._notifier = notifier
39
+ self._query = query
40
+
41
+ self._is_leader = False
42
+ self._listen_token = None
43
+ self._loop_task = None
44
+ self._started = asyncio.Event()
45
+
46
+ self._validate(interval=interval)
47
+
48
+ @staticmethod
49
+ def _validate(*, interval: float) -> None:
50
+ if not isinstance(interval, (int, float)):
51
+ raise TypeError(f"interval must be a number, got {interval}")
52
+ if interval <= 0:
53
+ raise ValueError(f"interval must be positive, got {interval}")
54
+
55
+ @property
56
+ def is_leader(self) -> bool:
57
+ return self._is_leader
58
+
59
+ async def start(self) -> None:
60
+ if not self._enabled:
61
+ self._started.set()
62
+ return
63
+
64
+ self._listen_token = await self._notifier.listen(
65
+ "leader", self._on_notification, wait=False
66
+ )
67
+ self._loop_task = asyncio.create_task(self._loop(), name="oban-leader")
68
+
69
+ await self._started.wait()
70
+
71
+ async def stop(self) -> None:
72
+ if self._listen_token:
73
+ await self._notifier.unlisten(self._listen_token)
74
+
75
+ if self._loop_task:
76
+ self._loop_task.cancel()
77
+
78
+ try:
79
+ await self._loop_task
80
+ except asyncio.CancelledError:
81
+ pass
82
+
83
+ if self._is_leader:
84
+ payload = {"action": "resign", "node": self._node, "name": self._name}
85
+
86
+ await self._notifier.notify("leader", payload)
87
+ await self._query.resign_leader(self._name, self._node)
88
+
89
+ async def _loop(self) -> None:
90
+ while True:
91
+ try:
92
+ await self._election()
93
+ except asyncio.CancelledError:
94
+ break
95
+ except Exception:
96
+ pass
97
+ finally:
98
+ if not self._started.is_set():
99
+ self._started.set()
100
+
101
+ # Sleep for half interval if leader (to boost their refresh interval and allow them to
102
+ # retain leadership), full interval otherwise
103
+ sleep_duration = self._interval / 2 if self._is_leader else self._interval
104
+
105
+ await asyncio.sleep(sleep_duration)
106
+
107
+ async def _election(self) -> None:
108
+ meta = {"leader": self._is_leader}
109
+
110
+ with telemetry.span("oban.leader.election", meta) as context:
111
+ self._is_leader = await self._query.attempt_leadership(
112
+ self._name, self._node, int(self._interval), self._is_leader
113
+ )
114
+
115
+ context.add({"leader": self._is_leader})
116
+
117
+ async def _on_notification(self, _channel: str, _payload: dict) -> None:
118
+ await self._election()
oban/_lifeline.py ADDED
@@ -0,0 +1,77 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ from typing import TYPE_CHECKING
5
+
6
+ from . import telemetry
7
+ from ._extensions import use_ext
8
+
9
+ if TYPE_CHECKING:
10
+ from ._leader import Leader
11
+ from ._query import Query
12
+
13
+
14
+ async def _rescue(query: Query, rescue_after: float) -> None:
15
+ with telemetry.span("oban.lifeline.rescue", {}) as context:
16
+ rescued = await query.rescue_jobs(rescue_after)
17
+
18
+ context.add({"rescued_count": rescued})
19
+
20
+
21
+ class Lifeline:
22
+ def __init__(
23
+ self,
24
+ *,
25
+ query: Query,
26
+ leader: Leader,
27
+ interval: float = 60.0,
28
+ rescue_after: float = 300.0,
29
+ ) -> None:
30
+ self._leader = leader
31
+ self._interval = interval
32
+ self._rescue_after = rescue_after
33
+ self._query = query
34
+
35
+ self._loop_task = None
36
+
37
+ self._validate(interval=interval, rescue_after=rescue_after)
38
+
39
+ @staticmethod
40
+ def _validate(*, interval: float, rescue_after: float) -> None:
41
+ if not isinstance(interval, (int, float)):
42
+ raise TypeError(f"interval must be a number, got {interval}")
43
+ if interval <= 0:
44
+ raise ValueError(f"interval must be positive, got {interval}")
45
+ if not isinstance(rescue_after, (int, float)):
46
+ raise TypeError(f"rescue_after must be a number, got {rescue_after}")
47
+ if rescue_after <= 0:
48
+ raise ValueError(f"rescue_after must be positive, got {rescue_after}")
49
+
50
+ async def start(self) -> None:
51
+ self._loop_task = asyncio.create_task(self._loop(), name="oban-lifeline")
52
+
53
+ async def stop(self) -> None:
54
+ if self._loop_task:
55
+ self._loop_task.cancel()
56
+
57
+ try:
58
+ await self._loop_task
59
+ except asyncio.CancelledError:
60
+ pass
61
+
62
+ async def _loop(self) -> None:
63
+ while True:
64
+ try:
65
+ await asyncio.sleep(self._interval)
66
+
67
+ await self._rescue()
68
+ except asyncio.CancelledError:
69
+ break
70
+ except Exception:
71
+ pass
72
+
73
+ async def _rescue(self) -> None:
74
+ if not self._leader.is_leader:
75
+ return
76
+
77
+ await use_ext("lifeline.rescue", _rescue, self._query, self._rescue_after)