horsies 0.1.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. horsies/__init__.py +117 -0
  2. horsies/core/__init__.py +0 -0
  3. horsies/core/app.py +552 -0
  4. horsies/core/banner.py +144 -0
  5. horsies/core/brokers/__init__.py +5 -0
  6. horsies/core/brokers/listener.py +444 -0
  7. horsies/core/brokers/postgres.py +993 -0
  8. horsies/core/cli.py +624 -0
  9. horsies/core/codec/serde.py +596 -0
  10. horsies/core/errors.py +535 -0
  11. horsies/core/logging.py +90 -0
  12. horsies/core/models/__init__.py +0 -0
  13. horsies/core/models/app.py +268 -0
  14. horsies/core/models/broker.py +79 -0
  15. horsies/core/models/queues.py +23 -0
  16. horsies/core/models/recovery.py +101 -0
  17. horsies/core/models/schedule.py +229 -0
  18. horsies/core/models/task_pg.py +307 -0
  19. horsies/core/models/tasks.py +358 -0
  20. horsies/core/models/workflow.py +1990 -0
  21. horsies/core/models/workflow_pg.py +245 -0
  22. horsies/core/registry/tasks.py +101 -0
  23. horsies/core/scheduler/__init__.py +26 -0
  24. horsies/core/scheduler/calculator.py +267 -0
  25. horsies/core/scheduler/service.py +569 -0
  26. horsies/core/scheduler/state.py +260 -0
  27. horsies/core/task_decorator.py +656 -0
  28. horsies/core/types/status.py +38 -0
  29. horsies/core/utils/imports.py +203 -0
  30. horsies/core/utils/loop_runner.py +44 -0
  31. horsies/core/worker/current.py +17 -0
  32. horsies/core/worker/worker.py +1967 -0
  33. horsies/core/workflows/__init__.py +23 -0
  34. horsies/core/workflows/engine.py +2344 -0
  35. horsies/core/workflows/recovery.py +501 -0
  36. horsies/core/workflows/registry.py +97 -0
  37. horsies/py.typed +0 -0
  38. horsies-0.1.0a4.dist-info/METADATA +35 -0
  39. horsies-0.1.0a4.dist-info/RECORD +42 -0
  40. horsies-0.1.0a4.dist-info/WHEEL +5 -0
  41. horsies-0.1.0a4.dist-info/entry_points.txt +2 -0
  42. horsies-0.1.0a4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,358 @@
1
+ # app/core/models/tasks.py
2
+ from __future__ import annotations
3
+ import datetime
4
+ from dataclasses import dataclass
5
+ from typing import (
6
+ TYPE_CHECKING,
7
+ Any,
8
+ Generic,
9
+ TypeVar,
10
+ Optional,
11
+ Literal,
12
+ Self,
13
+ Annotated,
14
+ Union,
15
+ overload,
16
+ )
17
+ from pydantic import BaseModel, model_validator, Field
18
+ from pydantic.types import PositiveInt
19
+ from enum import Enum
20
+
21
+ if TYPE_CHECKING:
22
+ from horsies.core.models.workflow import SubWorkflowSummary
23
+
24
+ from horsies.core.types.status import TaskStatus
25
+ T = TypeVar('T') # success payload
26
+ E = TypeVar('E') # error payload (TaskError )
27
+
28
+
29
+ class _Unset:
30
+ """Sentinel type for distinguishing 'not provided' from None."""
31
+
32
+ __slots__ = ()
33
+
34
+
35
+ class LibraryErrorCode(str, Enum):
36
+ """
37
+ Library-defined error codes for infrastructure/runtime failures.
38
+
39
+ These enumerate errors produced by the library runtime itself. User code
40
+ should define domain-specific error codes as strings (e.g., "TOO_LARGE")
41
+ or custom Enums for their own error categories.
42
+
43
+ Categories:
44
+ - Execution errors: UNHANDLED_EXCEPTION, TASK_EXCEPTION, WORKER_CRASHED
45
+ - Retrieval errors: WAIT_TIMEOUT, TASK_NOT_FOUND, TASK_CANCELLED, RESULT_NOT_AVAILABLE
46
+ - Broker errors: BROKER_ERROR
47
+ - Worker errors: WORKER_RESOLUTION_ERROR, WORKER_SERIALIZATION_ERROR
48
+ - Validation errors: RETURN_TYPE_MISMATCH, PYDANTIC_HYDRATION_ERROR
49
+ - Lifecycle errors: SEND_SUPPRESSED
50
+ - Workflow errors: UPSTREAM_SKIPPED, WORKFLOW_SUCCESS_CASE_NOT_MET
51
+ """
52
+
53
+ # Execution errors
54
+ UNHANDLED_EXCEPTION = 'UNHANDLED_EXCEPTION'
55
+ TASK_EXCEPTION = 'TASK_EXCEPTION'
56
+ WORKER_CRASHED = 'WORKER_CRASHED'
57
+
58
+ # Retrieval errors (from handle.get() / get_async() / result_for())
59
+ WAIT_TIMEOUT = 'WAIT_TIMEOUT'
60
+ TASK_NOT_FOUND = 'TASK_NOT_FOUND'
61
+ TASK_CANCELLED = 'TASK_CANCELLED'
62
+ RESULT_NOT_AVAILABLE = 'RESULT_NOT_AVAILABLE'
63
+ RESULT_NOT_READY = 'RESULT_NOT_READY'
64
+
65
+ # Broker errors
66
+ BROKER_ERROR = 'BROKER_ERROR'
67
+
68
+ # Worker errors
69
+ WORKER_RESOLUTION_ERROR = 'WORKER_RESOLUTION_ERROR'
70
+ WORKER_SERIALIZATION_ERROR = 'WORKER_SERIALIZATION_ERROR'
71
+
72
+ # Validation errors
73
+ RETURN_TYPE_MISMATCH = 'RETURN_TYPE_MISMATCH'
74
+ PYDANTIC_HYDRATION_ERROR = 'PYDANTIC_HYDRATION_ERROR'
75
+
76
+ # Lifecycle errors
77
+ SEND_SUPPRESSED = 'SEND_SUPPRESSED'
78
+
79
+ # Workflow errors
80
+ UPSTREAM_SKIPPED = 'UPSTREAM_SKIPPED'
81
+ WORKFLOW_CTX_MISSING_ID = 'WORKFLOW_CTX_MISSING_ID'
82
+ WORKFLOW_SUCCESS_CASE_NOT_MET = 'WORKFLOW_SUCCESS_CASE_NOT_MET'
83
+
84
+
85
+ class TaskError(BaseModel):
86
+ """
87
+ The error payload for a TaskResult.
88
+ A task error can be returned by:
89
+ - a task function (e.g. `return TaskResult(err=TaskError(...))`)
90
+ - library failure (e.g. execution error, serialization error, etc.)
91
+ """
92
+
93
+ model_config = {'arbitrary_types_allowed': True}
94
+
95
+ exception: Optional[dict[str, Any] | BaseException] = None
96
+ # Library internal errors use LibraryErrorCode; user errors use str.
97
+ error_code: Optional[Union[LibraryErrorCode, str]] = None
98
+ data: Optional[Any] = None
99
+ message: Optional[str] = None
100
+
101
+
102
+ class SubWorkflowError(TaskError):
103
+ """
104
+ Error representing a failed subworkflow.
105
+
106
+ Allows parent tasks to distinguish subworkflow failures from regular
107
+ task failures via pattern matching:
108
+
109
+ match result.err:
110
+ case SubWorkflowError() as e:
111
+ print(f"Subworkflow {e.sub_workflow_id} failed")
112
+ case TaskError() as e:
113
+ print(f"Task error: {e.message}")
114
+ """
115
+
116
+ sub_workflow_id: str
117
+ sub_workflow_summary: 'SubWorkflowSummary[Any]'
118
+
119
+
120
+ _UNSET: _Unset = _Unset()
121
+
122
+
123
+ class TaskResult(Generic[T, E]):
124
+ """
125
+ Discriminated union style result: exactly one of ok / err is set.
126
+ Supports None as a valid success value (e.g., TaskResult[None, TaskError]).
127
+
128
+ Internally uses tuple-based discriminated union for type narrowing:
129
+ - (True, value) for success
130
+ - (False, error) for failure
131
+ """
132
+
133
+ __slots__ = ('_data',)
134
+ _data: tuple[Literal[True], T] | tuple[Literal[False], E]
135
+
136
+ @overload
137
+ def __init__(self, *, ok: T) -> None: ...
138
+
139
+ @overload
140
+ def __init__(self, *, err: E) -> None: ...
141
+
142
+ def __init__(
143
+ self,
144
+ *,
145
+ ok: T | _Unset = _UNSET,
146
+ err: E | _Unset = _UNSET,
147
+ ) -> None:
148
+ ok_provided = not isinstance(ok, _Unset)
149
+ err_provided = not isinstance(err, _Unset)
150
+
151
+ if ok_provided and err_provided:
152
+ raise ValueError('TaskResult cannot have both ok and err')
153
+ if not ok_provided and not err_provided:
154
+ raise ValueError('TaskResult must have exactly one of ok / err')
155
+
156
+ # isinstance narrowing for assignment
157
+ if not isinstance(ok, _Unset):
158
+ self._data = (True, ok)
159
+ elif not isinstance(err, _Unset):
160
+ self._data = (False, err)
161
+ else:
162
+ raise ValueError('TaskResult must have exactly one of ok / err')
163
+
164
+ # helpers
165
+ def is_ok(self) -> bool:
166
+ return self._data[0]
167
+
168
+ def is_err(self) -> bool:
169
+ return not self._data[0]
170
+
171
+ @property
172
+ def ok(self) -> T | None:
173
+ """Access the success value, or None if this is an error result."""
174
+ match self._data:
175
+ case (True, value):
176
+ return value
177
+ case (False, _):
178
+ return None
179
+
180
+ @property
181
+ def err(self) -> E | None:
182
+ """Access the error value, or None if this is a success result."""
183
+ match self._data:
184
+ case (False, error):
185
+ return error
186
+ case (True, _):
187
+ return None
188
+
189
+ def unwrap(self) -> T:
190
+ """Get the success value. Raises if result is error."""
191
+ match self._data:
192
+ case (True, value):
193
+ return value
194
+ case (False, _):
195
+ raise ValueError('Result is not ok - check is_ok() first')
196
+
197
+ def unwrap_err(self) -> E:
198
+ """Get the error value. Raises if result is success."""
199
+ match self._data:
200
+ case (False, error):
201
+ return error
202
+ case (True, _):
203
+ raise ValueError('Result is not error - check is_err() first')
204
+
205
+ @property
206
+ def ok_value(self) -> T:
207
+ """Get the success value. Raises if result is error."""
208
+ match self._data:
209
+ case (True, value):
210
+ return value
211
+ case (False, _):
212
+ raise ValueError('Result is not ok - check is_ok() first')
213
+
214
+ @property
215
+ def err_value(self) -> E:
216
+ """Get the error value. Raises if result is success."""
217
+ match self._data:
218
+ case (False, error):
219
+ return error
220
+ case (True, _):
221
+ raise ValueError('Result is not error - check is_err() first')
222
+
223
+
224
+ @dataclass
225
+ class TaskInfo:
226
+ """Metadata for a broker-backed task."""
227
+
228
+ task_id: str
229
+ task_name: str
230
+ status: TaskStatus
231
+ queue_name: str
232
+ priority: int
233
+ retry_count: int
234
+ max_retries: int
235
+ next_retry_at: datetime.datetime | None
236
+ sent_at: datetime.datetime | None
237
+ claimed_at: datetime.datetime | None
238
+ started_at: datetime.datetime | None
239
+ completed_at: datetime.datetime | None
240
+ failed_at: datetime.datetime | None
241
+ worker_hostname: str | None
242
+ worker_pid: int | None
243
+ worker_process_name: str | None
244
+ result: TaskResult[Any, TaskError] | None = None
245
+ failed_reason: str | None = None
246
+
247
+
248
+ class RetryPolicy(BaseModel):
249
+ """
250
+ Retry policy configuration for tasks.
251
+
252
+ Two strategies supported:
253
+ 1. Fixed: Uses intervals list exactly as specified
254
+ 2. Exponential: Uses intervals[0] as base, exponentially increases
255
+
256
+ - max_retries: maximum number of retry attempts (initial send not counted)
257
+ - intervals: delay intervals in seconds between retry attempts
258
+ - backoff_strategy: 'fixed' uses intervals as-is, 'exponential' uses intervals[0] as base
259
+ - jitter: whether to add ±25% randomization to delays
260
+ """
261
+
262
+ max_retries: Annotated[
263
+ int, Field(ge=1, le=20, description='Number of retry attempts (1-20)')
264
+ ] = 3
265
+ intervals: Annotated[
266
+ list[
267
+ Annotated[
268
+ PositiveInt,
269
+ Field(le=86400, description='Retry interval in seconds (1-86400)'),
270
+ ]
271
+ ],
272
+ Field(min_length=1, max_length=20, description='List of retry intervals'),
273
+ ] = [60, 300, 900] # seconds: 1min, 5min, 15min
274
+ backoff_strategy: Literal['fixed', 'exponential'] = 'fixed'
275
+ jitter: bool = True
276
+
277
+ @model_validator(mode='after')
278
+ def validate_strategy_consistency(self) -> Self:
279
+ """Validate that backoff strategy is consistent with intervals configuration."""
280
+
281
+ if self.backoff_strategy == 'fixed':
282
+ # Fixed strategy: intervals length should match max_retries
283
+ if len(self.intervals) != self.max_retries:
284
+ raise ValueError(
285
+ f'Fixed backoff strategy requires intervals length ({len(self.intervals)}) '
286
+ f'to match max_retries ({self.max_retries}). '
287
+ f'Either adjust intervals list or use exponential strategy.'
288
+ )
289
+
290
+ elif self.backoff_strategy == 'exponential':
291
+ # Exponential strategy: should have exactly one base interval
292
+ if len(self.intervals) != 1:
293
+ raise ValueError(
294
+ f'Exponential backoff strategy requires exactly one base interval, '
295
+ f'got {len(self.intervals)} intervals. Use intervals=[base_seconds] for exponential backoff.'
296
+ )
297
+
298
+ return self
299
+
300
+ # Convenience constructors to prevent misconfiguration at call sites
301
+ @classmethod
302
+ def fixed(cls, intervals: list[int], *, jitter: bool = True) -> 'RetryPolicy':
303
+ """Create a fixed backoff policy where intervals length defines max_retries."""
304
+ return cls(
305
+ max_retries=len(intervals),
306
+ intervals=intervals,
307
+ backoff_strategy='fixed',
308
+ jitter=jitter,
309
+ )
310
+
311
+ @classmethod
312
+ def exponential(
313
+ cls,
314
+ base_seconds: int,
315
+ *,
316
+ max_retries: int,
317
+ jitter: bool = True,
318
+ ) -> 'RetryPolicy':
319
+ """Create an exponential backoff policy using a single base interval.
320
+
321
+ The policy uses base_seconds * 2**(attempt-1) per attempt.
322
+ """
323
+ return cls(
324
+ max_retries=max_retries,
325
+ intervals=[base_seconds],
326
+ backoff_strategy='exponential',
327
+ jitter=jitter,
328
+ )
329
+
330
+
331
+ class TaskOptions(BaseModel):
332
+ """
333
+ Options for a task.
334
+
335
+ Fields:
336
+ task_name: Unique task identifier (mandatory - decoupled from function names)
337
+ queue_name: Target queue name (validated against app config at definition time)
338
+ good_until: Task expiry deadline (task skipped if not claimed by this time)
339
+ auto_retry_for: Error codes or exception types that trigger automatic retries
340
+ retry_policy: Retry timing and backoff configuration
341
+ """
342
+
343
+ task_name: str
344
+ queue_name: Optional[str] = None
345
+ good_until: Optional[datetime.datetime] = None
346
+ auto_retry_for: Optional[list[Union[str, LibraryErrorCode]]] = None
347
+ retry_policy: Optional[RetryPolicy] = None
348
+
349
+
350
+ # Rebuild SubWorkflowError to resolve forward reference to SubWorkflowSummary
351
+ def _rebuild_subworkflow_error() -> None:
352
+ """Rebuild SubWorkflowError after SubWorkflowSummary is importable."""
353
+ from horsies.core.models.workflow import SubWorkflowSummary
354
+
355
+ SubWorkflowError.model_rebuild()
356
+
357
+
358
+ _rebuild_subworkflow_error()