horsies 0.1.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- horsies/__init__.py +115 -0
- horsies/core/__init__.py +0 -0
- horsies/core/app.py +552 -0
- horsies/core/banner.py +144 -0
- horsies/core/brokers/__init__.py +5 -0
- horsies/core/brokers/listener.py +444 -0
- horsies/core/brokers/postgres.py +864 -0
- horsies/core/cli.py +624 -0
- horsies/core/codec/serde.py +575 -0
- horsies/core/errors.py +535 -0
- horsies/core/logging.py +90 -0
- horsies/core/models/__init__.py +0 -0
- horsies/core/models/app.py +268 -0
- horsies/core/models/broker.py +79 -0
- horsies/core/models/queues.py +23 -0
- horsies/core/models/recovery.py +101 -0
- horsies/core/models/schedule.py +229 -0
- horsies/core/models/task_pg.py +307 -0
- horsies/core/models/tasks.py +332 -0
- horsies/core/models/workflow.py +1988 -0
- horsies/core/models/workflow_pg.py +245 -0
- horsies/core/registry/tasks.py +101 -0
- horsies/core/scheduler/__init__.py +26 -0
- horsies/core/scheduler/calculator.py +267 -0
- horsies/core/scheduler/service.py +569 -0
- horsies/core/scheduler/state.py +260 -0
- horsies/core/task_decorator.py +615 -0
- horsies/core/types/status.py +38 -0
- horsies/core/utils/imports.py +203 -0
- horsies/core/utils/loop_runner.py +44 -0
- horsies/core/worker/current.py +17 -0
- horsies/core/worker/worker.py +1967 -0
- horsies/core/workflows/__init__.py +23 -0
- horsies/core/workflows/engine.py +2344 -0
- horsies/core/workflows/recovery.py +501 -0
- horsies/core/workflows/registry.py +97 -0
- horsies/py.typed +0 -0
- horsies-0.1.0a1.dist-info/METADATA +31 -0
- horsies-0.1.0a1.dist-info/RECORD +42 -0
- horsies-0.1.0a1.dist-info/WHEEL +5 -0
- horsies-0.1.0a1.dist-info/entry_points.txt +2 -0
- horsies-0.1.0a1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
# app/core/models/tasks.py
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
import datetime
|
|
4
|
+
from typing import (
|
|
5
|
+
TYPE_CHECKING,
|
|
6
|
+
Any,
|
|
7
|
+
Generic,
|
|
8
|
+
TypeVar,
|
|
9
|
+
Optional,
|
|
10
|
+
Literal,
|
|
11
|
+
Self,
|
|
12
|
+
Annotated,
|
|
13
|
+
Union,
|
|
14
|
+
overload,
|
|
15
|
+
)
|
|
16
|
+
from pydantic import BaseModel, model_validator, Field
|
|
17
|
+
from pydantic.types import PositiveInt
|
|
18
|
+
from enum import Enum
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from horsies.core.models.workflow import SubWorkflowSummary
|
|
22
|
+
|
|
23
|
+
T = TypeVar('T') # success payload
|
|
24
|
+
E = TypeVar('E') # error payload (TaskError )
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class _Unset:
|
|
28
|
+
"""Sentinel type for distinguishing 'not provided' from None."""
|
|
29
|
+
|
|
30
|
+
__slots__ = ()
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class LibraryErrorCode(str, Enum):
|
|
34
|
+
"""
|
|
35
|
+
Library-defined error codes for infrastructure/runtime failures.
|
|
36
|
+
|
|
37
|
+
These enumerate errors produced by the library runtime itself. User code
|
|
38
|
+
should define domain-specific error codes as strings (e.g., "TOO_LARGE")
|
|
39
|
+
or custom Enums for their own error categories.
|
|
40
|
+
|
|
41
|
+
Categories:
|
|
42
|
+
- Execution errors: UNHANDLED_EXCEPTION, TASK_EXCEPTION, WORKER_CRASHED
|
|
43
|
+
- Retrieval errors: WAIT_TIMEOUT, TASK_NOT_FOUND, TASK_CANCELLED, RESULT_NOT_AVAILABLE
|
|
44
|
+
- Broker errors: BROKER_ERROR
|
|
45
|
+
- Worker errors: WORKER_RESOLUTION_ERROR, WORKER_SERIALIZATION_ERROR
|
|
46
|
+
- Validation errors: RETURN_TYPE_MISMATCH, PYDANTIC_HYDRATION_ERROR
|
|
47
|
+
- Lifecycle errors: SEND_SUPPRESSED
|
|
48
|
+
- Workflow errors: UPSTREAM_SKIPPED, WORKFLOW_SUCCESS_CASE_NOT_MET
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
# Execution errors
|
|
52
|
+
UNHANDLED_EXCEPTION = 'UNHANDLED_EXCEPTION'
|
|
53
|
+
TASK_EXCEPTION = 'TASK_EXCEPTION'
|
|
54
|
+
WORKER_CRASHED = 'WORKER_CRASHED'
|
|
55
|
+
|
|
56
|
+
# Retrieval errors (from handle.get() / get_async() / result_for())
|
|
57
|
+
WAIT_TIMEOUT = 'WAIT_TIMEOUT'
|
|
58
|
+
TASK_NOT_FOUND = 'TASK_NOT_FOUND'
|
|
59
|
+
TASK_CANCELLED = 'TASK_CANCELLED'
|
|
60
|
+
RESULT_NOT_AVAILABLE = 'RESULT_NOT_AVAILABLE'
|
|
61
|
+
RESULT_NOT_READY = 'RESULT_NOT_READY'
|
|
62
|
+
|
|
63
|
+
# Broker errors
|
|
64
|
+
BROKER_ERROR = 'BROKER_ERROR'
|
|
65
|
+
|
|
66
|
+
# Worker errors
|
|
67
|
+
WORKER_RESOLUTION_ERROR = 'WORKER_RESOLUTION_ERROR'
|
|
68
|
+
WORKER_SERIALIZATION_ERROR = 'WORKER_SERIALIZATION_ERROR'
|
|
69
|
+
|
|
70
|
+
# Validation errors
|
|
71
|
+
RETURN_TYPE_MISMATCH = 'RETURN_TYPE_MISMATCH'
|
|
72
|
+
PYDANTIC_HYDRATION_ERROR = 'PYDANTIC_HYDRATION_ERROR'
|
|
73
|
+
|
|
74
|
+
# Lifecycle errors
|
|
75
|
+
SEND_SUPPRESSED = 'SEND_SUPPRESSED'
|
|
76
|
+
|
|
77
|
+
# Workflow errors
|
|
78
|
+
UPSTREAM_SKIPPED = 'UPSTREAM_SKIPPED'
|
|
79
|
+
WORKFLOW_CTX_MISSING_ID = 'WORKFLOW_CTX_MISSING_ID'
|
|
80
|
+
WORKFLOW_SUCCESS_CASE_NOT_MET = 'WORKFLOW_SUCCESS_CASE_NOT_MET'
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class TaskError(BaseModel):
|
|
84
|
+
"""
|
|
85
|
+
The error payload for a TaskResult.
|
|
86
|
+
A task error can be returned by:
|
|
87
|
+
- a task function (e.g. `return TaskResult(err=TaskError(...))`)
|
|
88
|
+
- library failure (e.g. execution error, serialization error, etc.)
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
model_config = {'arbitrary_types_allowed': True}
|
|
92
|
+
|
|
93
|
+
exception: Optional[dict[str, Any] | BaseException] = None
|
|
94
|
+
# Library internal errors use LibraryErrorCode; user errors use str.
|
|
95
|
+
error_code: Optional[Union[LibraryErrorCode, str]] = None
|
|
96
|
+
data: Optional[Any] = None
|
|
97
|
+
message: Optional[str] = None
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class SubWorkflowError(TaskError):
|
|
101
|
+
"""
|
|
102
|
+
Error representing a failed subworkflow.
|
|
103
|
+
|
|
104
|
+
Allows parent tasks to distinguish subworkflow failures from regular
|
|
105
|
+
task failures via pattern matching:
|
|
106
|
+
|
|
107
|
+
match result.err:
|
|
108
|
+
case SubWorkflowError() as e:
|
|
109
|
+
print(f"Subworkflow {e.sub_workflow_id} failed")
|
|
110
|
+
case TaskError() as e:
|
|
111
|
+
print(f"Task error: {e.message}")
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
sub_workflow_id: str
|
|
115
|
+
sub_workflow_summary: 'SubWorkflowSummary[Any]'
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
_UNSET: _Unset = _Unset()
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class TaskResult(Generic[T, E]):
|
|
122
|
+
"""
|
|
123
|
+
Discriminated union style result: exactly one of ok / err is set.
|
|
124
|
+
Supports None as a valid success value (e.g., TaskResult[None, TaskError]).
|
|
125
|
+
|
|
126
|
+
Internally uses tuple-based discriminated union for type narrowing:
|
|
127
|
+
- (True, value) for success
|
|
128
|
+
- (False, error) for failure
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
__slots__ = ('_data',)
|
|
132
|
+
_data: tuple[Literal[True], T] | tuple[Literal[False], E]
|
|
133
|
+
|
|
134
|
+
@overload
|
|
135
|
+
def __init__(self, *, ok: T) -> None: ...
|
|
136
|
+
|
|
137
|
+
@overload
|
|
138
|
+
def __init__(self, *, err: E) -> None: ...
|
|
139
|
+
|
|
140
|
+
def __init__(
|
|
141
|
+
self,
|
|
142
|
+
*,
|
|
143
|
+
ok: T | _Unset = _UNSET,
|
|
144
|
+
err: E | _Unset = _UNSET,
|
|
145
|
+
) -> None:
|
|
146
|
+
ok_provided = not isinstance(ok, _Unset)
|
|
147
|
+
err_provided = not isinstance(err, _Unset)
|
|
148
|
+
|
|
149
|
+
if ok_provided and err_provided:
|
|
150
|
+
raise ValueError('TaskResult cannot have both ok and err')
|
|
151
|
+
if not ok_provided and not err_provided:
|
|
152
|
+
raise ValueError('TaskResult must have exactly one of ok / err')
|
|
153
|
+
|
|
154
|
+
# isinstance narrowing for assignment
|
|
155
|
+
if not isinstance(ok, _Unset):
|
|
156
|
+
self._data = (True, ok)
|
|
157
|
+
elif not isinstance(err, _Unset):
|
|
158
|
+
self._data = (False, err)
|
|
159
|
+
else:
|
|
160
|
+
raise ValueError('TaskResult must have exactly one of ok / err')
|
|
161
|
+
|
|
162
|
+
# helpers
|
|
163
|
+
def is_ok(self) -> bool:
|
|
164
|
+
return self._data[0]
|
|
165
|
+
|
|
166
|
+
def is_err(self) -> bool:
|
|
167
|
+
return not self._data[0]
|
|
168
|
+
|
|
169
|
+
@property
|
|
170
|
+
def ok(self) -> T | None:
|
|
171
|
+
"""Access the success value, or None if this is an error result."""
|
|
172
|
+
match self._data:
|
|
173
|
+
case (True, value):
|
|
174
|
+
return value
|
|
175
|
+
case (False, _):
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
@property
|
|
179
|
+
def err(self) -> E | None:
|
|
180
|
+
"""Access the error value, or None if this is a success result."""
|
|
181
|
+
match self._data:
|
|
182
|
+
case (False, error):
|
|
183
|
+
return error
|
|
184
|
+
case (True, _):
|
|
185
|
+
return None
|
|
186
|
+
|
|
187
|
+
def unwrap(self) -> T:
|
|
188
|
+
"""Get the success value. Raises if result is error."""
|
|
189
|
+
match self._data:
|
|
190
|
+
case (True, value):
|
|
191
|
+
return value
|
|
192
|
+
case (False, _):
|
|
193
|
+
raise ValueError('Result is not ok - check is_ok() first')
|
|
194
|
+
|
|
195
|
+
def unwrap_err(self) -> E:
|
|
196
|
+
"""Get the error value. Raises if result is success."""
|
|
197
|
+
match self._data:
|
|
198
|
+
case (False, error):
|
|
199
|
+
return error
|
|
200
|
+
case (True, _):
|
|
201
|
+
raise ValueError('Result is not error - check is_err() first')
|
|
202
|
+
|
|
203
|
+
@property
|
|
204
|
+
def ok_value(self) -> T:
|
|
205
|
+
"""Get the success value. Raises if result is error."""
|
|
206
|
+
match self._data:
|
|
207
|
+
case (True, value):
|
|
208
|
+
return value
|
|
209
|
+
case (False, _):
|
|
210
|
+
raise ValueError('Result is not ok - check is_ok() first')
|
|
211
|
+
|
|
212
|
+
@property
|
|
213
|
+
def err_value(self) -> E:
|
|
214
|
+
"""Get the error value. Raises if result is success."""
|
|
215
|
+
match self._data:
|
|
216
|
+
case (False, error):
|
|
217
|
+
return error
|
|
218
|
+
case (True, _):
|
|
219
|
+
raise ValueError('Result is not error - check is_err() first')
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
class RetryPolicy(BaseModel):
|
|
223
|
+
"""
|
|
224
|
+
Retry policy configuration for tasks.
|
|
225
|
+
|
|
226
|
+
Two strategies supported:
|
|
227
|
+
1. Fixed: Uses intervals list exactly as specified
|
|
228
|
+
2. Exponential: Uses intervals[0] as base, exponentially increases
|
|
229
|
+
|
|
230
|
+
- max_retries: maximum number of retry attempts (initial send not counted)
|
|
231
|
+
- intervals: delay intervals in seconds between retry attempts
|
|
232
|
+
- backoff_strategy: 'fixed' uses intervals as-is, 'exponential' uses intervals[0] as base
|
|
233
|
+
- jitter: whether to add ±25% randomization to delays
|
|
234
|
+
"""
|
|
235
|
+
|
|
236
|
+
max_retries: Annotated[
|
|
237
|
+
int, Field(ge=1, le=20, description='Number of retry attempts (1-20)')
|
|
238
|
+
] = 3
|
|
239
|
+
intervals: Annotated[
|
|
240
|
+
list[
|
|
241
|
+
Annotated[
|
|
242
|
+
PositiveInt,
|
|
243
|
+
Field(le=86400, description='Retry interval in seconds (1-86400)'),
|
|
244
|
+
]
|
|
245
|
+
],
|
|
246
|
+
Field(min_length=1, max_length=20, description='List of retry intervals'),
|
|
247
|
+
] = [60, 300, 900] # seconds: 1min, 5min, 15min
|
|
248
|
+
backoff_strategy: Literal['fixed', 'exponential'] = 'fixed'
|
|
249
|
+
jitter: bool = True
|
|
250
|
+
|
|
251
|
+
@model_validator(mode='after')
|
|
252
|
+
def validate_strategy_consistency(self) -> Self:
|
|
253
|
+
"""Validate that backoff strategy is consistent with intervals configuration."""
|
|
254
|
+
|
|
255
|
+
if self.backoff_strategy == 'fixed':
|
|
256
|
+
# Fixed strategy: intervals length should match max_retries
|
|
257
|
+
if len(self.intervals) != self.max_retries:
|
|
258
|
+
raise ValueError(
|
|
259
|
+
f'Fixed backoff strategy requires intervals length ({len(self.intervals)}) '
|
|
260
|
+
f'to match max_retries ({self.max_retries}). '
|
|
261
|
+
f'Either adjust intervals list or use exponential strategy.'
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
elif self.backoff_strategy == 'exponential':
|
|
265
|
+
# Exponential strategy: should have exactly one base interval
|
|
266
|
+
if len(self.intervals) != 1:
|
|
267
|
+
raise ValueError(
|
|
268
|
+
f'Exponential backoff strategy requires exactly one base interval, '
|
|
269
|
+
f'got {len(self.intervals)} intervals. Use intervals=[base_seconds] for exponential backoff.'
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
return self
|
|
273
|
+
|
|
274
|
+
# Convenience constructors to prevent misconfiguration at call sites
|
|
275
|
+
@classmethod
|
|
276
|
+
def fixed(cls, intervals: list[int], *, jitter: bool = True) -> 'RetryPolicy':
|
|
277
|
+
"""Create a fixed backoff policy where intervals length defines max_retries."""
|
|
278
|
+
return cls(
|
|
279
|
+
max_retries=len(intervals),
|
|
280
|
+
intervals=intervals,
|
|
281
|
+
backoff_strategy='fixed',
|
|
282
|
+
jitter=jitter,
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
@classmethod
|
|
286
|
+
def exponential(
|
|
287
|
+
cls,
|
|
288
|
+
base_seconds: int,
|
|
289
|
+
*,
|
|
290
|
+
max_retries: int,
|
|
291
|
+
jitter: bool = True,
|
|
292
|
+
) -> 'RetryPolicy':
|
|
293
|
+
"""Create an exponential backoff policy using a single base interval.
|
|
294
|
+
|
|
295
|
+
The policy uses base_seconds * 2**(attempt-1) per attempt.
|
|
296
|
+
"""
|
|
297
|
+
return cls(
|
|
298
|
+
max_retries=max_retries,
|
|
299
|
+
intervals=[base_seconds],
|
|
300
|
+
backoff_strategy='exponential',
|
|
301
|
+
jitter=jitter,
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
class TaskOptions(BaseModel):
|
|
306
|
+
"""
|
|
307
|
+
Options for a task.
|
|
308
|
+
|
|
309
|
+
Fields:
|
|
310
|
+
task_name: Unique task identifier (mandatory - decoupled from function names)
|
|
311
|
+
queue_name: Target queue name (validated against app config at definition time)
|
|
312
|
+
good_until: Task expiry deadline (task skipped if not claimed by this time)
|
|
313
|
+
auto_retry_for: Error codes or exception types that trigger automatic retries
|
|
314
|
+
retry_policy: Retry timing and backoff configuration
|
|
315
|
+
"""
|
|
316
|
+
|
|
317
|
+
task_name: str
|
|
318
|
+
queue_name: Optional[str] = None
|
|
319
|
+
good_until: Optional[datetime.datetime] = None
|
|
320
|
+
auto_retry_for: Optional[list[Union[str, LibraryErrorCode]]] = None
|
|
321
|
+
retry_policy: Optional[RetryPolicy] = None
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
# Rebuild SubWorkflowError to resolve forward reference to SubWorkflowSummary
|
|
325
|
+
def _rebuild_subworkflow_error() -> None:
|
|
326
|
+
"""Rebuild SubWorkflowError after SubWorkflowSummary is importable."""
|
|
327
|
+
from horsies.core.models.workflow import SubWorkflowSummary
|
|
328
|
+
|
|
329
|
+
SubWorkflowError.model_rebuild()
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
_rebuild_subworkflow_error()
|