horsies 0.1.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. horsies/__init__.py +115 -0
  2. horsies/core/__init__.py +0 -0
  3. horsies/core/app.py +552 -0
  4. horsies/core/banner.py +144 -0
  5. horsies/core/brokers/__init__.py +5 -0
  6. horsies/core/brokers/listener.py +444 -0
  7. horsies/core/brokers/postgres.py +864 -0
  8. horsies/core/cli.py +624 -0
  9. horsies/core/codec/serde.py +575 -0
  10. horsies/core/errors.py +535 -0
  11. horsies/core/logging.py +90 -0
  12. horsies/core/models/__init__.py +0 -0
  13. horsies/core/models/app.py +268 -0
  14. horsies/core/models/broker.py +79 -0
  15. horsies/core/models/queues.py +23 -0
  16. horsies/core/models/recovery.py +101 -0
  17. horsies/core/models/schedule.py +229 -0
  18. horsies/core/models/task_pg.py +307 -0
  19. horsies/core/models/tasks.py +332 -0
  20. horsies/core/models/workflow.py +1988 -0
  21. horsies/core/models/workflow_pg.py +245 -0
  22. horsies/core/registry/tasks.py +101 -0
  23. horsies/core/scheduler/__init__.py +26 -0
  24. horsies/core/scheduler/calculator.py +267 -0
  25. horsies/core/scheduler/service.py +569 -0
  26. horsies/core/scheduler/state.py +260 -0
  27. horsies/core/task_decorator.py +615 -0
  28. horsies/core/types/status.py +38 -0
  29. horsies/core/utils/imports.py +203 -0
  30. horsies/core/utils/loop_runner.py +44 -0
  31. horsies/core/worker/current.py +17 -0
  32. horsies/core/worker/worker.py +1967 -0
  33. horsies/core/workflows/__init__.py +23 -0
  34. horsies/core/workflows/engine.py +2344 -0
  35. horsies/core/workflows/recovery.py +501 -0
  36. horsies/core/workflows/registry.py +97 -0
  37. horsies/py.typed +0 -0
  38. horsies-0.1.0a1.dist-info/METADATA +31 -0
  39. horsies-0.1.0a1.dist-info/RECORD +42 -0
  40. horsies-0.1.0a1.dist-info/WHEEL +5 -0
  41. horsies-0.1.0a1.dist-info/entry_points.txt +2 -0
  42. horsies-0.1.0a1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,229 @@
1
+ # horsies/core/models/schedule.py
2
+ from __future__ import annotations
3
+ from datetime import time as datetime_time
4
+ from typing import Literal, Union, Optional, Any
5
+ from pydantic import BaseModel, Field, model_validator
6
+ from typing_extensions import Self
7
+ from enum import Enum
8
+ from horsies.core.errors import ConfigurationError, ErrorCode, ValidationReport, raise_collected
9
+
10
+
11
+ class Weekday(str, Enum):
12
+ """Enum for days of the week."""
13
+
14
+ MONDAY = 'monday'
15
+ TUESDAY = 'tuesday'
16
+ WEDNESDAY = 'wednesday'
17
+ THURSDAY = 'thursday'
18
+ FRIDAY = 'friday'
19
+ SATURDAY = 'saturday'
20
+ SUNDAY = 'sunday'
21
+
22
+
23
+ class IntervalSchedule(BaseModel):
24
+ """
25
+ Run task every N seconds/minutes/hours/days.
26
+
27
+ At least one time unit must be specified.
28
+ Total interval is the sum of all specified units.
29
+
30
+ Examples:
31
+ - Every 30 seconds: IntervalSchedule(seconds=30)
32
+ - Every 5 minutes: IntervalSchedule(minutes=5)
33
+ - Every 6 hours: IntervalSchedule(hours=6)
34
+ - Every 2 days: IntervalSchedule(days=2)
35
+ - Every 1.5 hours: IntervalSchedule(hours=1, minutes=30)
36
+ """
37
+
38
+ type: Literal['interval'] = 'interval'
39
+ seconds: Optional[int] = Field(
40
+ default=None, ge=1, le=86400, description='Seconds component (1-86400)'
41
+ )
42
+ minutes: Optional[int] = Field(
43
+ default=None, ge=1, le=1440, description='Minutes component (1-1440)'
44
+ )
45
+ hours: Optional[int] = Field(
46
+ default=None, ge=1, le=168, description='Hours component (1-168)'
47
+ )
48
+ days: Optional[int] = Field(
49
+ default=None, ge=1, le=365, description='Days component (1-365)'
50
+ )
51
+
52
+ @model_validator(mode='after')
53
+ def validate_at_least_one_unit(self) -> Self:
54
+ """Ensure at least one time unit is specified."""
55
+ report = ValidationReport('schedule')
56
+ if not any([self.seconds, self.minutes, self.hours, self.days]):
57
+ report.add(ConfigurationError(
58
+ message='IntervalSchedule requires at least one time unit',
59
+ code=ErrorCode.CONFIG_INVALID_SCHEDULE,
60
+ notes=['all time units (seconds, minutes, hours, days) are None'],
61
+ help_text='specify at least one: seconds, minutes, hours, or days',
62
+ ))
63
+ raise_collected(report)
64
+ return self
65
+
66
+ def total_seconds(self) -> int:
67
+ """Calculate total interval in seconds."""
68
+ total = 0
69
+ if self.seconds:
70
+ total += self.seconds
71
+ if self.minutes:
72
+ total += self.minutes * 60
73
+ if self.hours:
74
+ total += self.hours * 3600
75
+ if self.days:
76
+ total += self.days * 86400
77
+ return total
78
+
79
+
80
+ class HourlySchedule(BaseModel):
81
+ """
82
+ Run task every hour at a specific minute and second.
83
+
84
+ Examples:
85
+ - Every hour at XX:30:00 -> HourlySchedule(minute=30)
86
+ - Every hour at XX:15:30 -> HourlySchedule(minute=15, second=30)
87
+ """
88
+
89
+ type: Literal['hourly'] = 'hourly'
90
+ minute: int = Field(ge=0, le=59, description='Minute of the hour (0-59)')
91
+ second: int = Field(
92
+ default=0, ge=0, le=59, description='Second of the minute (0-59)'
93
+ )
94
+
95
+
96
+ class DailySchedule(BaseModel):
97
+ """
98
+ Run task every day at a specific time.
99
+
100
+ Examples:
101
+ - Daily at 3:00 AM -> DailySchedule(time=time(3, 0, 0))
102
+ - Daily at 15:30:00 -> DailySchedule(time=time(15, 30, 0))
103
+ """
104
+
105
+ type: Literal['daily'] = 'daily'
106
+ time: datetime_time = Field(description='Time of day to run (HH:MM:SS)')
107
+
108
+
109
+ class WeeklySchedule(BaseModel):
110
+ """
111
+ Run task on specific days of the week at a specific time.
112
+
113
+ Examples:
114
+ - Monday and Friday at 9 AM:
115
+ WeeklySchedule(days=[Weekday.MONDAY, Weekday.FRIDAY], time=time(9, 0, 0))
116
+ - Weekdays at 5 PM:
117
+ WeeklySchedule(days=[...weekdays...], time=time(17, 0, 0))
118
+ """
119
+
120
+ type: Literal['weekly'] = 'weekly'
121
+ days: list[Weekday] = Field(min_length=1, description='Days of week to run')
122
+ time: datetime_time = Field(description='Time of day to run (HH:MM:SS)')
123
+
124
+ @model_validator(mode='after')
125
+ def validate_unique_days(self) -> Self:
126
+ """Ensure no duplicate days."""
127
+ report = ValidationReport('schedule')
128
+ if len(self.days) != len(set(self.days)):
129
+ report.add(ConfigurationError(
130
+ message='WeeklySchedule has duplicate days',
131
+ code=ErrorCode.CONFIG_INVALID_SCHEDULE,
132
+ notes=[f'days: {[d.value for d in self.days]}'],
133
+ help_text='each day should appear only once in the list',
134
+ ))
135
+ raise_collected(report)
136
+ return self
137
+
138
+
139
+ class MonthlySchedule(BaseModel):
140
+ """
141
+ Run task on a specific day of the month at a specific time.
142
+
143
+ Note: If day > days in month (e.g., day=31 in February),
144
+ the schedule will be skipped for that month.
145
+
146
+ Examples:
147
+ - First day of month at midnight:
148
+ MonthlySchedule(day=1, time=time(0, 0, 0))
149
+ - 15th of each month at 3 PM:
150
+ MonthlySchedule(day=15, time=time(15, 0, 0))
151
+ """
152
+
153
+ type: Literal['monthly'] = 'monthly'
154
+ day: int = Field(ge=1, le=31, description='Day of month (1-31)')
155
+ time: datetime_time = Field(description='Time of day to run (HH:MM:SS)')
156
+
157
+
158
+ SchedulePattern = Union[
159
+ IntervalSchedule,
160
+ HourlySchedule,
161
+ DailySchedule,
162
+ WeeklySchedule,
163
+ MonthlySchedule,
164
+ ]
165
+
166
+
167
+ class TaskSchedule(BaseModel):
168
+ """
169
+ Definition of a scheduled task.
170
+
171
+ Fields:
172
+ - name: Unique identifier for this schedule (used as DB primary key)
173
+ - task_name: Name of the task to execute (must be registered via @app.task)
174
+ - pattern: Schedule pattern defining when the task runs
175
+ - args: Positional arguments to pass to the task
176
+ - kwargs: Keyword arguments to pass to the task
177
+ - queue_name: Target queue (None = default queue)
178
+ - enabled: Whether this schedule is active
179
+ - timezone: Timezone for schedule evaluation (default: UTC)
180
+ - catch_up_missed: If scheduler was down, should missed runs be executed?
181
+ """
182
+
183
+ name: str = Field(description='Unique schedule identifier')
184
+ task_name: str = Field(description='Task to execute (must be registered)')
185
+ pattern: SchedulePattern = Field(description='Schedule pattern')
186
+ args: tuple[Any, ...] = Field(default=(), description='Task positional arguments')
187
+ kwargs: dict[str, Any] = Field(
188
+ default_factory=dict, description='Task keyword arguments'
189
+ )
190
+ queue_name: Optional[str] = Field(default=None, description='Target queue name')
191
+ enabled: bool = Field(default=True, description='Whether schedule is active')
192
+ timezone: str = Field(default='UTC', description='Timezone for schedule evaluation')
193
+ catch_up_missed: bool = Field(
194
+ default=False, description='Execute missed runs if scheduler was down'
195
+ )
196
+
197
+
198
+ class ScheduleConfig(BaseModel):
199
+ """
200
+ Scheduler configuration for AppConfig.
201
+
202
+ Fields:
203
+ - enabled: Master switch for scheduler (default: True)
204
+ - schedules: List of scheduled tasks
205
+ - check_interval_seconds: How often to check for due schedules (1-60 seconds)
206
+ """
207
+
208
+ enabled: bool = Field(default=True, description='Master scheduler enable switch')
209
+ schedules: list[TaskSchedule] = Field(
210
+ default_factory=list, description='List of scheduled tasks'
211
+ )
212
+ check_interval_seconds: int = Field(
213
+ default=1, ge=1, le=60, description='Scheduler check interval (1-60 seconds)'
214
+ )
215
+
216
+ @model_validator(mode='after')
217
+ def validate_unique_schedule_names(self) -> Self:
218
+ """Ensure all schedule names are unique."""
219
+ report = ValidationReport('schedule')
220
+ names: list[str] = [s.name for s in self.schedules]
221
+ if len(names) != len(set(names)):
222
+ report.add(ConfigurationError(
223
+ message='duplicate schedule names',
224
+ code=ErrorCode.CONFIG_INVALID_SCHEDULE,
225
+ notes=[f'schedule names: {names}'],
226
+ help_text='each schedule must have a unique name',
227
+ ))
228
+ raise_collected(report)
229
+ return self
@@ -0,0 +1,307 @@
1
+ from __future__ import annotations
2
+ from datetime import datetime, timezone
3
+ from typing import Any, Optional
4
+ from sqlalchemy import (
5
+ String,
6
+ Text,
7
+ Boolean,
8
+ DateTime,
9
+ Integer,
10
+ Enum as SQLAlchemyEnum,
11
+ Float,
12
+ ARRAY,
13
+ )
14
+ from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
15
+ from sqlalchemy.dialects.postgresql import JSONB
16
+
17
+ from horsies.core.types.status import TaskStatus
18
+
19
+
20
+ class Base(DeclarativeBase):
21
+ """SQLAlchemy declarative base for PostgreSQL models"""
22
+
23
+ pass
24
+
25
+
26
+ class TaskModel(Base):
27
+ """
28
+ SQLAlchemy model for storing tasks in the database.
29
+
30
+ - id: str # uuid4
31
+ - task_name: str # name of the task, coming from the task decorator
32
+ - queue_name: str # name of the queue, coming from the task decorator, defaulting to "default"
33
+ - priority: int # 1..100, set by the queue's priority value, defaulting to 100, least important
34
+ - args: str # task arguments, serialized as json
35
+ - kwargs: str # task keyword arguments, serialized as json
36
+ - status: TaskStatus # PENDING, CLAIMED, RUNNING, COMPLETED, FAILED
37
+ - sent_at: datetime # when .send() was called on task
38
+ - claimed_at: datetime # when task was claimed by a worker
39
+ - started_at: datetime # when task actually started running in the worker's process
40
+ - completed_at: datetime # when task was completed, set by the process
41
+ - failed_at: datetime # when task failed, set by the process
42
+ - result: str # task result, serialized as json
43
+ - failed_reason: str # reason for task failure, serialized as json
44
+ - claimed: bool # whether the task is claimed by a worker
45
+ - claim_expires_at: datetime # when a prefetched claim expires and can be reclaimed
46
+ - good_until: datetime # when the task will be considered expired and retried
47
+ - retry_count: int # current number of retry attempts
48
+ - max_retries: int # maximum number of retry attempts allowed
49
+ - next_retry_at: datetime # when the task should be retried next
50
+ - task_options: str # serialized TaskOptions configuration for retry policies
51
+ - worker_pid: int # process ID of the worker executing this task
52
+ - worker_hostname: str # hostname of the machine running the worker
53
+ - worker_process_name: str # name/identifier of the worker process
54
+ - created_at: datetime # when the task was created
55
+ - updated_at: datetime # when the task was last updated
56
+ """
57
+
58
+ __tablename__ = 'horsies_tasks'
59
+
60
+ # Basic task information
61
+ id: Mapped[str] = mapped_column(String(36), primary_key=True)
62
+ task_name: Mapped[str] = mapped_column(String(255), nullable=False)
63
+ queue_name: Mapped[str] = mapped_column(String(100), nullable=False, index=True)
64
+ priority: Mapped[int] = mapped_column(nullable=False, default=100) # 1..100
65
+
66
+ # Function arguments (stored as JSON)
67
+ args: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
68
+ kwargs: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
69
+
70
+ # Task status and execution tracking
71
+ status: Mapped[TaskStatus] = mapped_column(
72
+ SQLAlchemyEnum(TaskStatus, native_enum=False),
73
+ nullable=False,
74
+ default=TaskStatus.PENDING,
75
+ index=True,
76
+ )
77
+
78
+ # Timestamps
79
+ sent_at: Mapped[Optional[datetime]] = mapped_column(
80
+ DateTime(timezone=True), nullable=True
81
+ ) # when .send() was called on task
82
+ claimed_at: Mapped[Optional[datetime]] = mapped_column(
83
+ DateTime(timezone=True), nullable=True
84
+ ) # when the task was claimed by a worker
85
+ started_at: Mapped[Optional[datetime]] = mapped_column(
86
+ DateTime(timezone=True), nullable=True
87
+ )
88
+ completed_at: Mapped[Optional[datetime]] = mapped_column(
89
+ DateTime(timezone=True), nullable=True
90
+ )
91
+ failed_at: Mapped[Optional[datetime]] = mapped_column(
92
+ DateTime(timezone=True), nullable=True
93
+ )
94
+
95
+ # Results and error handling
96
+ result: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
97
+ failed_reason: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
98
+
99
+ # Task claiming and lifecycle
100
+ claimed: Mapped[bool] = mapped_column(Boolean, default=False, index=True)
101
+ claimed_by_worker_id: Mapped[Optional[str]] = mapped_column(
102
+ String(255), nullable=True
103
+ )
104
+ # Claim lease expiry: prefetched claims expire after this time and can be reclaimed.
105
+ # NULL means no expiry (task is within running capacity, not prefetched).
106
+ claim_expires_at: Mapped[Optional[datetime]] = mapped_column(
107
+ DateTime(timezone=True), nullable=True, index=True
108
+ )
109
+ good_until: Mapped[Optional[datetime]] = mapped_column(
110
+ DateTime(timezone=True), nullable=True, index=True
111
+ )
112
+
113
+ # Retry configuration and tracking
114
+ retry_count: Mapped[int] = mapped_column(Integer, default=0, nullable=False)
115
+ max_retries: Mapped[int] = mapped_column(Integer, default=0, nullable=False)
116
+ next_retry_at: Mapped[Optional[datetime]] = mapped_column(
117
+ DateTime(timezone=True), nullable=True, index=True
118
+ )
119
+ task_options: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
120
+
121
+ # Worker process tracking
122
+ worker_pid: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
123
+ worker_hostname: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
124
+ worker_process_name: Mapped[Optional[str]] = mapped_column(
125
+ String(255), nullable=True
126
+ )
127
+
128
+ # Metadata
129
+ created_at: Mapped[datetime] = mapped_column(
130
+ DateTime(timezone=True), default=datetime.now(timezone.utc)
131
+ )
132
+ updated_at: Mapped[datetime] = mapped_column(
133
+ DateTime(timezone=True),
134
+ default=datetime.now(timezone.utc),
135
+ onupdate=datetime.now(timezone.utc),
136
+ )
137
+
138
+
139
+ class TaskHeartbeatModel(Base):
140
+ """Normalized heartbeat entries for task liveness tracking."""
141
+
142
+ __tablename__ = 'horsies_heartbeats'
143
+
144
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
145
+ task_id: Mapped[str] = mapped_column(String(36), nullable=False)
146
+ sender_id: Mapped[str] = mapped_column(String(255), nullable=False)
147
+ role: Mapped[str] = mapped_column(String(20), nullable=False)
148
+ sent_at: Mapped[datetime] = mapped_column(
149
+ DateTime(timezone=True), default=datetime.now(timezone.utc), nullable=False
150
+ )
151
+ hostname: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
152
+ pid: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
153
+
154
+
155
+ class WorkerStateModel(Base):
156
+ """Worker instance state tracking for monitoring and cluster management.
157
+
158
+ Timeseries table - each row is a snapshot taken every 5 seconds.
159
+ Enables historical analysis and trend visualization for TUI/web monitoring.
160
+
161
+ Retention: Recommend deleting snapshots older than 7-30 days to prevent unbounded growth.
162
+ """
163
+
164
+ __tablename__ = 'horsies_worker_states'
165
+
166
+ # Timeseries primary key
167
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
168
+ worker_id: Mapped[str] = mapped_column(String(255), nullable=False, index=True)
169
+ snapshot_at: Mapped[datetime] = mapped_column(
170
+ DateTime(timezone=True),
171
+ nullable=False,
172
+ index=True,
173
+ default=datetime.now(timezone.utc),
174
+ )
175
+
176
+ # Identity
177
+ hostname: Mapped[str] = mapped_column(String(255), nullable=False)
178
+ pid: Mapped[int] = mapped_column(Integer, nullable=False)
179
+
180
+ # Configuration snapshot (from WorkerConfig)
181
+ processes: Mapped[int] = mapped_column(Integer, nullable=False)
182
+ max_claim_batch: Mapped[int] = mapped_column(Integer, nullable=False)
183
+ max_claim_per_worker: Mapped[int] = mapped_column(Integer, nullable=False)
184
+ cluster_wide_cap: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
185
+ queues: Mapped[list[str]] = mapped_column(ARRAY(String), nullable=False)
186
+
187
+ # Queue configuration (CUSTOM mode)
188
+ queue_priorities: Mapped[Optional[dict[str, int]]] = mapped_column(
189
+ JSONB, nullable=True
190
+ )
191
+ queue_max_concurrency: Mapped[Optional[dict[str, int]]] = mapped_column(
192
+ JSONB, nullable=True
193
+ )
194
+
195
+ # Recovery configuration snapshot
196
+ recovery_config: Mapped[Optional[dict[str, Any]]] = mapped_column(
197
+ JSONB, nullable=True
198
+ )
199
+
200
+ # Current load at snapshot time
201
+ tasks_running: Mapped[int] = mapped_column(
202
+ Integer, default=0, nullable=False, index=True
203
+ )
204
+ tasks_claimed: Mapped[int] = mapped_column(
205
+ Integer, default=0, nullable=False, index=True
206
+ )
207
+
208
+ # System metrics (via psutil)
209
+ memory_usage_mb: Mapped[Optional[float]] = mapped_column(Float, nullable=True)
210
+ memory_percent: Mapped[Optional[float]] = mapped_column(Float, nullable=True)
211
+ cpu_percent: Mapped[Optional[float]] = mapped_column(Float, nullable=True)
212
+
213
+ # Worker lifecycle metadata
214
+ worker_started_at: Mapped[datetime] = mapped_column(
215
+ DateTime(timezone=True), nullable=False
216
+ )
217
+
218
+
219
+ class ScheduleStateModel(Base):
220
+ """Schedule execution state tracking for scheduler service.
221
+
222
+ Tracks the last and next execution times for each scheduled task.
223
+ Ensures schedules don't run multiple times and enables catch-up logic.
224
+
225
+ Fields:
226
+ - schedule_name: Unique schedule identifier (from TaskSchedule.name)
227
+ - last_run_at: When the schedule last executed successfully
228
+ - next_run_at: When the schedule should run next (calculated by scheduler)
229
+ - last_task_id: Task ID of the most recently enqueued task
230
+ - run_count: Total number of times this schedule has executed
231
+ - config_hash: Hash of schedule configuration (pattern + timezone) for change detection
232
+ - updated_at: Last state update timestamp
233
+ """
234
+
235
+ __tablename__ = 'horsies_schedule_state'
236
+
237
+ schedule_name: Mapped[str] = mapped_column(String(255), primary_key=True)
238
+ last_run_at: Mapped[Optional[datetime]] = mapped_column(
239
+ DateTime(timezone=True), nullable=True
240
+ )
241
+ next_run_at: Mapped[Optional[datetime]] = mapped_column(
242
+ DateTime(timezone=True), nullable=True, index=True
243
+ )
244
+ last_task_id: Mapped[Optional[str]] = mapped_column(String(36), nullable=True)
245
+ run_count: Mapped[int] = mapped_column(Integer, default=0, nullable=False)
246
+ config_hash: Mapped[Optional[str]] = mapped_column(String(64), nullable=True)
247
+ updated_at: Mapped[datetime] = mapped_column(
248
+ DateTime(timezone=True),
249
+ default=datetime.now(timezone.utc),
250
+ onupdate=datetime.now(timezone.utc),
251
+ nullable=False,
252
+ )
253
+
254
+
255
+ """
256
+ NOTE:
257
+ - When a task returns a `TaskError`, the task is not necessarily failed.
258
+ It could be that be that running the task itself was successful from library's perspective,
259
+ but the task function itself wanted to return an error for a given condition.
260
+
261
+ Scenario 1:
262
+ ```python
263
+ @app.task()
264
+ def task_function(input: int) -> TaskResult[int, TaskError]:
265
+ if input < 0:
266
+ return TaskResult(err=TaskError(
267
+ error_code="NEGATIVE_INPUT",
268
+ message="Input cannot be negative",
269
+ ))
270
+ return TaskResult(ok=input * 2)
271
+
272
+ # In this case, task did not fail, rather returns an error as expected.
273
+ handle = task_function.send(-1) # This will return a TaskError
274
+ result = handle.get()
275
+ ```
276
+
277
+ Scenario 2:
278
+ ```python
279
+ class Dog:
280
+ def __init__(self, name: str):
281
+ self.name = name
282
+
283
+ def return_dog(input: str) -> Dog:
284
+ return Dog(input)
285
+
286
+
287
+ @app.task()
288
+ def example_task(input: str) -> TaskResult[Dog, TaskError]:
289
+ try:
290
+ unserializable_return = return_dog(input)
291
+ return TaskResult(ok=unserializable_return)
292
+ except Exception as e:
293
+ return TaskResult(err=TaskError(
294
+ error_code="YOUR_ERROR_CODE", # this is still the case you as the developer, catch in the task function
295
+ message="Your error message",
296
+ data={"error": str(e)},
297
+ ))
298
+
299
+ handle = example_task.send("Rex")
300
+ result = handle.get()
301
+ if result.is_err():
302
+ print(f"✓ Task execution failed: {result.unwrap_err()}") # Since a python class of Dog is not serializable by the library,
303
+ # this will return a TaskError with error_code="WORKER_SERIALIZATION_ERROR" and message="Cannot serialize value of type Dog"
304
+ # And the task will be marked as failed.
305
+ ```
306
+
307
+ """