hatchet-sdk 1.0.0__py3-none-any.whl → 1.0.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hatchet-sdk might be problematic. Click here for more details.

Files changed (65) hide show
  1. hatchet_sdk/__init__.py +27 -16
  2. hatchet_sdk/client.py +13 -63
  3. hatchet_sdk/clients/admin.py +203 -124
  4. hatchet_sdk/clients/dispatcher/action_listener.py +42 -42
  5. hatchet_sdk/clients/dispatcher/dispatcher.py +18 -16
  6. hatchet_sdk/clients/durable_event_listener.py +327 -0
  7. hatchet_sdk/clients/rest/__init__.py +12 -1
  8. hatchet_sdk/clients/rest/api/log_api.py +258 -0
  9. hatchet_sdk/clients/rest/api/task_api.py +32 -6
  10. hatchet_sdk/clients/rest/api/workflow_runs_api.py +626 -0
  11. hatchet_sdk/clients/rest/models/__init__.py +12 -1
  12. hatchet_sdk/clients/rest/models/v1_log_line.py +94 -0
  13. hatchet_sdk/clients/rest/models/v1_log_line_level.py +39 -0
  14. hatchet_sdk/clients/rest/models/v1_log_line_list.py +110 -0
  15. hatchet_sdk/clients/rest/models/v1_task_summary.py +80 -64
  16. hatchet_sdk/clients/rest/models/v1_trigger_workflow_run_request.py +95 -0
  17. hatchet_sdk/clients/rest/models/v1_workflow_run_display_name.py +98 -0
  18. hatchet_sdk/clients/rest/models/v1_workflow_run_display_name_list.py +114 -0
  19. hatchet_sdk/clients/rest/models/workflow_run_shape_item_for_workflow_run_details.py +9 -4
  20. hatchet_sdk/clients/rest_client.py +21 -0
  21. hatchet_sdk/clients/run_event_listener.py +0 -1
  22. hatchet_sdk/context/context.py +85 -147
  23. hatchet_sdk/contracts/dispatcher_pb2_grpc.py +1 -1
  24. hatchet_sdk/contracts/events_pb2.py +2 -2
  25. hatchet_sdk/contracts/events_pb2_grpc.py +1 -1
  26. hatchet_sdk/contracts/v1/dispatcher_pb2.py +36 -0
  27. hatchet_sdk/contracts/v1/dispatcher_pb2.pyi +38 -0
  28. hatchet_sdk/contracts/v1/dispatcher_pb2_grpc.py +145 -0
  29. hatchet_sdk/contracts/v1/shared/condition_pb2.py +39 -0
  30. hatchet_sdk/contracts/v1/shared/condition_pb2.pyi +72 -0
  31. hatchet_sdk/contracts/v1/shared/condition_pb2_grpc.py +29 -0
  32. hatchet_sdk/contracts/v1/workflows_pb2.py +67 -0
  33. hatchet_sdk/contracts/v1/workflows_pb2.pyi +228 -0
  34. hatchet_sdk/contracts/v1/workflows_pb2_grpc.py +234 -0
  35. hatchet_sdk/contracts/workflows_pb2_grpc.py +1 -1
  36. hatchet_sdk/features/cron.py +3 -3
  37. hatchet_sdk/features/scheduled.py +2 -2
  38. hatchet_sdk/hatchet.py +427 -151
  39. hatchet_sdk/opentelemetry/instrumentor.py +8 -13
  40. hatchet_sdk/rate_limit.py +33 -39
  41. hatchet_sdk/runnables/contextvars.py +12 -0
  42. hatchet_sdk/runnables/standalone.py +194 -0
  43. hatchet_sdk/runnables/task.py +144 -0
  44. hatchet_sdk/runnables/types.py +138 -0
  45. hatchet_sdk/runnables/workflow.py +764 -0
  46. hatchet_sdk/utils/aio_utils.py +0 -79
  47. hatchet_sdk/utils/proto_enums.py +0 -7
  48. hatchet_sdk/utils/timedelta_to_expression.py +23 -0
  49. hatchet_sdk/utils/typing.py +2 -2
  50. hatchet_sdk/v0/clients/rest_client.py +9 -0
  51. hatchet_sdk/v0/worker/action_listener_process.py +18 -2
  52. hatchet_sdk/waits.py +120 -0
  53. hatchet_sdk/worker/action_listener_process.py +64 -30
  54. hatchet_sdk/worker/runner/run_loop_manager.py +35 -25
  55. hatchet_sdk/worker/runner/runner.py +72 -49
  56. hatchet_sdk/worker/runner/utils/capture_logs.py +3 -11
  57. hatchet_sdk/worker/worker.py +155 -118
  58. hatchet_sdk/workflow_run.py +4 -5
  59. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.0a1.dist-info}/METADATA +1 -2
  60. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.0a1.dist-info}/RECORD +62 -42
  61. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.0a1.dist-info}/entry_points.txt +2 -0
  62. hatchet_sdk/semver.py +0 -30
  63. hatchet_sdk/worker/runner/utils/error_with_traceback.py +0 -6
  64. hatchet_sdk/workflow.py +0 -527
  65. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.0a1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,764 @@
1
+ import asyncio
2
+ from datetime import datetime
3
+ from typing import TYPE_CHECKING, Any, Callable, Generic, Union, cast, overload
4
+
5
+ from google.protobuf import timestamp_pb2
6
+ from pydantic import BaseModel
7
+
8
+ from hatchet_sdk.clients.admin import (
9
+ ScheduleTriggerWorkflowOptions,
10
+ TriggerWorkflowOptions,
11
+ WorkflowRunTriggerConfig,
12
+ )
13
+ from hatchet_sdk.clients.rest.models.cron_workflows import CronWorkflows
14
+ from hatchet_sdk.context.context import Context, DurableContext
15
+ from hatchet_sdk.contracts.v1.shared.condition_pb2 import TaskConditions
16
+ from hatchet_sdk.contracts.v1.workflows_pb2 import (
17
+ Concurrency,
18
+ CreateTaskOpts,
19
+ CreateWorkflowVersionRequest,
20
+ DesiredWorkerLabels,
21
+ )
22
+ from hatchet_sdk.contracts.v1.workflows_pb2 import StickyStrategy as StickyStrategyProto
23
+ from hatchet_sdk.contracts.workflows_pb2 import WorkflowVersion
24
+ from hatchet_sdk.labels import DesiredWorkerLabel
25
+ from hatchet_sdk.logger import logger
26
+ from hatchet_sdk.rate_limit import RateLimit
27
+ from hatchet_sdk.runnables.task import Task
28
+ from hatchet_sdk.runnables.types import (
29
+ DEFAULT_EXECUTION_TIMEOUT,
30
+ DEFAULT_SCHEDULE_TIMEOUT,
31
+ ConcurrencyExpression,
32
+ R,
33
+ StepType,
34
+ TWorkflowInput,
35
+ WorkflowConfig,
36
+ )
37
+ from hatchet_sdk.utils.proto_enums import convert_python_enum_to_proto
38
+ from hatchet_sdk.utils.timedelta_to_expression import Duration, timedelta_to_expr
39
+ from hatchet_sdk.utils.typing import JSONSerializableMapping
40
+ from hatchet_sdk.waits import (
41
+ Action,
42
+ Condition,
43
+ OrGroup,
44
+ ParentCondition,
45
+ SleepCondition,
46
+ UserEventCondition,
47
+ )
48
+ from hatchet_sdk.workflow_run import WorkflowRunRef
49
+
50
+ if TYPE_CHECKING:
51
+ from hatchet_sdk import Hatchet
52
+ from hatchet_sdk.runnables.standalone import Standalone
53
+
54
+
55
+ def transform_desired_worker_label(d: DesiredWorkerLabel) -> DesiredWorkerLabels:
56
+ value = d.value
57
+ return DesiredWorkerLabels(
58
+ strValue=value if not isinstance(value, int) else None,
59
+ intValue=value if isinstance(value, int) else None,
60
+ required=d.required,
61
+ weight=d.weight,
62
+ comparator=d.comparator, # type: ignore[arg-type]
63
+ )
64
+
65
+
66
+ class TypedTriggerWorkflowRunConfig(BaseModel, Generic[TWorkflowInput]):
67
+ input: TWorkflowInput
68
+ options: TriggerWorkflowOptions
69
+
70
+
71
+ class BaseWorkflow(Generic[TWorkflowInput]):
72
+ def __init__(self, config: WorkflowConfig, client: "Hatchet") -> None:
73
+ self.config = config
74
+ self._default_tasks: list[Task[TWorkflowInput, Any]] = []
75
+ self._durable_tasks: list[Task[TWorkflowInput, Any]] = []
76
+ self._on_failure_task: Task[TWorkflowInput, Any] | None = None
77
+ self._on_success_task: Task[TWorkflowInput, Any] | None = None
78
+ self.client = client
79
+
80
+ def _get_service_name(self, namespace: str) -> str:
81
+ return f"{namespace}{self.config.name.lower()}"
82
+
83
+ def _create_action_name(
84
+ self, namespace: str, step: Task[TWorkflowInput, Any]
85
+ ) -> str:
86
+ return self._get_service_name(namespace) + ":" + step.name
87
+
88
+ def _get_name(self, namespace: str) -> str:
89
+ return namespace + self.config.name
90
+
91
+ def _raise_for_invalid_concurrency(
92
+ self, concurrency: ConcurrencyExpression
93
+ ) -> bool:
94
+ expr = concurrency.expression
95
+
96
+ if not expr.startswith("input."):
97
+ return True
98
+
99
+ _, field = expr.split(".", maxsplit=2)
100
+
101
+ if field not in self.config.input_validator.model_fields.keys():
102
+ raise ValueError(
103
+ f"The concurrency expression provided relies on the `{field}` field, which was not present in `{self.config.input_validator.__name__}`."
104
+ )
105
+
106
+ return True
107
+
108
+ @overload
109
+ def _concurrency_to_proto(self, concurrency: None) -> None: ...
110
+
111
+ @overload
112
+ def _concurrency_to_proto(
113
+ self, concurrency: ConcurrencyExpression
114
+ ) -> Concurrency: ...
115
+
116
+ def _concurrency_to_proto(
117
+ self, concurrency: ConcurrencyExpression | None
118
+ ) -> Concurrency | None:
119
+ if not concurrency:
120
+ return None
121
+
122
+ self._raise_for_invalid_concurrency(concurrency)
123
+
124
+ return Concurrency(
125
+ expression=concurrency.expression,
126
+ max_runs=concurrency.max_runs,
127
+ limit_strategy=concurrency.limit_strategy,
128
+ )
129
+
130
+ @overload
131
+ def _validate_task(
132
+ self, task: "Task[TWorkflowInput, R]", service_name: str
133
+ ) -> CreateTaskOpts: ...
134
+
135
+ @overload
136
+ def _validate_task(self, task: None, service_name: str) -> None: ...
137
+
138
+ def _validate_task(
139
+ self, task: Union["Task[TWorkflowInput, R]", None], service_name: str
140
+ ) -> CreateTaskOpts | None:
141
+ if not task:
142
+ return None
143
+
144
+ return CreateTaskOpts(
145
+ readable_id=task.name,
146
+ action=service_name + ":" + task.name,
147
+ timeout=timedelta_to_expr(task.execution_timeout),
148
+ inputs="{}",
149
+ parents=[p.name for p in task.parents],
150
+ retries=task.retries,
151
+ rate_limits=task.rate_limits,
152
+ worker_labels=task.desired_worker_labels,
153
+ backoff_factor=task.backoff_factor,
154
+ backoff_max_seconds=task.backoff_max_seconds,
155
+ concurrency=[self._concurrency_to_proto(t) for t in task.concurrency],
156
+ conditions=self._conditions_to_proto(task),
157
+ schedule_timeout=timedelta_to_expr(task.schedule_timeout),
158
+ )
159
+
160
+ def _validate_priority(self, default_priority: int | None) -> int | None:
161
+ validated_priority = (
162
+ max(1, min(3, default_priority)) if default_priority else None
163
+ )
164
+ if validated_priority != default_priority:
165
+ logger.warning(
166
+ "Warning: Default Priority Must be between 1 and 3 -- inclusively. Adjusted to be within the range."
167
+ )
168
+
169
+ return validated_priority
170
+
171
+ def _assign_action(self, condition: Condition, action: Action) -> Condition:
172
+ condition.base.action = action
173
+
174
+ return condition
175
+
176
+ def _conditions_to_proto(self, task: Task[TWorkflowInput, Any]) -> TaskConditions:
177
+ wait_for_conditions = [
178
+ self._assign_action(w, Action.QUEUE) for w in task.wait_for
179
+ ]
180
+
181
+ cancel_if_conditions = [
182
+ self._assign_action(c, Action.CANCEL) for c in task.cancel_if
183
+ ]
184
+ skip_if_conditions = [self._assign_action(s, Action.SKIP) for s in task.skip_if]
185
+
186
+ conditions = wait_for_conditions + cancel_if_conditions + skip_if_conditions
187
+
188
+ if len({c.base.readable_data_key for c in conditions}) != len(
189
+ [c.base.readable_data_key for c in conditions]
190
+ ):
191
+ raise ValueError("Conditions must have unique readable data keys.")
192
+
193
+ user_events = [
194
+ c.to_pb() for c in conditions if isinstance(c, UserEventCondition)
195
+ ]
196
+ parent_overrides = [
197
+ c.to_pb() for c in conditions if isinstance(c, ParentCondition)
198
+ ]
199
+ sleep_conditions = [
200
+ c.to_pb() for c in conditions if isinstance(c, SleepCondition)
201
+ ]
202
+
203
+ return TaskConditions(
204
+ parent_override_conditions=parent_overrides,
205
+ sleep_conditions=sleep_conditions,
206
+ user_event_conditions=user_events,
207
+ )
208
+
209
+ def _is_leaf_task(self, task: Task[TWorkflowInput, Any]) -> bool:
210
+ return not any(task in t.parents for t in self.tasks if task != t)
211
+
212
+ def _get_create_opts(self, namespace: str) -> CreateWorkflowVersionRequest:
213
+ service_name = self._get_service_name(namespace)
214
+
215
+ name = self._get_name(namespace)
216
+ event_triggers = [namespace + event for event in self.config.on_events]
217
+
218
+ if self._on_success_task:
219
+ self._on_success_task.parents = [
220
+ task
221
+ for task in self.tasks
222
+ if task.type == StepType.DEFAULT and self._is_leaf_task(task)
223
+ ]
224
+
225
+ on_success_task = self._validate_task(self._on_success_task, service_name)
226
+
227
+ tasks = [
228
+ self._validate_task(task, service_name)
229
+ for task in self.tasks
230
+ if task.type == StepType.DEFAULT
231
+ ]
232
+
233
+ if on_success_task:
234
+ tasks += [on_success_task]
235
+
236
+ on_failure_task = self._validate_task(self._on_failure_task, service_name)
237
+
238
+ return CreateWorkflowVersionRequest(
239
+ name=name,
240
+ description=self.config.description,
241
+ version=self.config.version,
242
+ event_triggers=event_triggers,
243
+ cron_triggers=self.config.on_crons,
244
+ tasks=tasks,
245
+ concurrency=self._concurrency_to_proto(self.config.concurrency),
246
+ ## TODO: Fix this
247
+ cron_input=None,
248
+ on_failure_task=on_failure_task,
249
+ sticky=convert_python_enum_to_proto(self.config.sticky, StickyStrategyProto), # type: ignore[arg-type]
250
+ )
251
+
252
+ def _get_workflow_input(self, ctx: Context) -> TWorkflowInput:
253
+ return cast(
254
+ TWorkflowInput,
255
+ self.config.input_validator.model_validate(ctx.workflow_input),
256
+ )
257
+
258
+ @property
259
+ def tasks(self) -> list[Task[TWorkflowInput, Any]]:
260
+ tasks = self._default_tasks + self._durable_tasks
261
+
262
+ if self._on_failure_task:
263
+ tasks += [self._on_failure_task]
264
+
265
+ if self._on_success_task:
266
+ tasks += [self._on_success_task]
267
+
268
+ return tasks
269
+
270
+ @property
271
+ def is_durable(self) -> bool:
272
+ return any(task.is_durable for task in self.tasks)
273
+
274
+ def create_run_workflow_config(
275
+ self,
276
+ input: TWorkflowInput | None = None,
277
+ key: str | None = None,
278
+ options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
279
+ ) -> WorkflowRunTriggerConfig:
280
+ return WorkflowRunTriggerConfig(
281
+ workflow_name=self.config.name,
282
+ input=input.model_dump() if input else {},
283
+ options=options,
284
+ key=key,
285
+ )
286
+
287
+
288
+ class Workflow(BaseWorkflow[TWorkflowInput]):
289
+ """
290
+ A Hatchet workflow, which allows you to define tasks to be run and perform actions on the workflow, such as
291
+ running / spawning children and scheduling future runs.
292
+ """
293
+
294
+ def run_no_wait(
295
+ self,
296
+ input: TWorkflowInput | None = None,
297
+ options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
298
+ ) -> WorkflowRunRef:
299
+ return self.client.admin.run_workflow(
300
+ workflow_name=self.config.name,
301
+ input=input.model_dump() if input else {},
302
+ options=options,
303
+ )
304
+
305
+ def run(
306
+ self,
307
+ input: TWorkflowInput | None = None,
308
+ options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
309
+ ) -> dict[str, Any]:
310
+ ref = self.client.admin.run_workflow(
311
+ workflow_name=self.config.name,
312
+ input=input.model_dump() if input else {},
313
+ options=options,
314
+ )
315
+
316
+ return ref.result()
317
+
318
+ async def aio_run_no_wait(
319
+ self,
320
+ input: TWorkflowInput | None = None,
321
+ options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
322
+ ) -> WorkflowRunRef:
323
+ return await self.client.admin.aio_run_workflow(
324
+ workflow_name=self.config.name,
325
+ input=input.model_dump() if input else {},
326
+ options=options,
327
+ )
328
+
329
+ async def aio_run(
330
+ self,
331
+ input: TWorkflowInput | None = None,
332
+ options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
333
+ ) -> dict[str, Any]:
334
+ ref = await self.client.admin.aio_run_workflow(
335
+ workflow_name=self.config.name,
336
+ input=input.model_dump() if input else {},
337
+ options=options,
338
+ )
339
+
340
+ return await ref.aio_result()
341
+
342
+ def run_many(
343
+ self,
344
+ workflows: list[WorkflowRunTriggerConfig],
345
+ ) -> list[dict[str, Any]]:
346
+ refs = self.client.admin.run_workflows(
347
+ workflows=workflows,
348
+ )
349
+
350
+ return [ref.result() for ref in refs]
351
+
352
+ async def aio_run_many(
353
+ self,
354
+ workflows: list[WorkflowRunTriggerConfig],
355
+ ) -> list[dict[str, Any]]:
356
+ refs = await self.client.admin.aio_run_workflows(
357
+ workflows=workflows,
358
+ )
359
+
360
+ return await asyncio.gather(*[ref.aio_result() for ref in refs])
361
+
362
+ def run_many_no_wait(
363
+ self,
364
+ workflows: list[WorkflowRunTriggerConfig],
365
+ ) -> list[WorkflowRunRef]:
366
+ return self.client.admin.run_workflows(
367
+ workflows=workflows,
368
+ )
369
+
370
+ async def aio_run_many_no_wait(
371
+ self,
372
+ workflows: list[WorkflowRunTriggerConfig],
373
+ ) -> list[WorkflowRunRef]:
374
+ return await self.client.admin.aio_run_workflows(
375
+ workflows=workflows,
376
+ )
377
+
378
+ def schedule(
379
+ self,
380
+ schedules: list[datetime],
381
+ input: TWorkflowInput | None = None,
382
+ options: ScheduleTriggerWorkflowOptions = ScheduleTriggerWorkflowOptions(),
383
+ ) -> WorkflowVersion:
384
+ return self.client.admin.schedule_workflow(
385
+ name=self.config.name,
386
+ schedules=cast(list[datetime | timestamp_pb2.Timestamp], schedules),
387
+ input=input.model_dump() if input else {},
388
+ options=options,
389
+ )
390
+
391
+ async def aio_schedule(
392
+ self,
393
+ schedules: list[datetime | timestamp_pb2.Timestamp],
394
+ input: TWorkflowInput,
395
+ options: ScheduleTriggerWorkflowOptions = ScheduleTriggerWorkflowOptions(),
396
+ ) -> WorkflowVersion:
397
+ return await self.client.admin.aio_schedule_workflow(
398
+ name=self.config.name,
399
+ schedules=schedules,
400
+ input=input.model_dump(),
401
+ options=options,
402
+ )
403
+
404
+ def create_cron(
405
+ self,
406
+ cron_name: str,
407
+ expression: str,
408
+ input: TWorkflowInput,
409
+ additional_metadata: JSONSerializableMapping,
410
+ ) -> CronWorkflows:
411
+ return self.client.cron.create(
412
+ workflow_name=self.config.name,
413
+ cron_name=cron_name,
414
+ expression=expression,
415
+ input=input.model_dump(),
416
+ additional_metadata=additional_metadata,
417
+ )
418
+
419
+ async def aio_create_cron(
420
+ self,
421
+ cron_name: str,
422
+ expression: str,
423
+ input: TWorkflowInput,
424
+ additional_metadata: JSONSerializableMapping,
425
+ ) -> CronWorkflows:
426
+ return await self.client.cron.aio_create(
427
+ workflow_name=self.config.name,
428
+ cron_name=cron_name,
429
+ expression=expression,
430
+ input=input.model_dump(),
431
+ additional_metadata=additional_metadata,
432
+ )
433
+
434
+ def _parse_task_name(
435
+ self,
436
+ name: str | None,
437
+ func: (
438
+ Callable[[TWorkflowInput, Context], R]
439
+ | Callable[[TWorkflowInput, DurableContext], R]
440
+ ),
441
+ ) -> str:
442
+ non_null_name = name or func.__name__
443
+
444
+ return non_null_name.lower()
445
+
446
+ def task(
447
+ self,
448
+ name: str | None = None,
449
+ schedule_timeout: Duration = DEFAULT_SCHEDULE_TIMEOUT,
450
+ execution_timeout: Duration = DEFAULT_EXECUTION_TIMEOUT,
451
+ parents: list[Task[TWorkflowInput, Any]] = [],
452
+ retries: int = 0,
453
+ rate_limits: list[RateLimit] = [],
454
+ desired_worker_labels: dict[str, DesiredWorkerLabel] = {},
455
+ backoff_factor: float | None = None,
456
+ backoff_max_seconds: int | None = None,
457
+ concurrency: list[ConcurrencyExpression] = [],
458
+ wait_for: list[Condition | OrGroup] = [],
459
+ skip_if: list[Condition | OrGroup] = [],
460
+ cancel_if: list[Condition | OrGroup] = [],
461
+ ) -> Callable[[Callable[[TWorkflowInput, Context], R]], Task[TWorkflowInput, R]]:
462
+ """
463
+ A decorator to transform a function into a Hatchet task that run as part of a workflow.
464
+
465
+ :param name: The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator.
466
+ :type name: str | None
467
+
468
+ :param timeout: The execution timeout of the task. Defaults to 60 minutes.
469
+ :type timeout: datetime.timedelta | str
470
+
471
+ :param parents: A list of tasks that are parents of the task. Note: Parents must be defined before their children. Defaults to an empty list (no parents).
472
+ :type parents: list[Task]
473
+
474
+ :param retries: The number of times to retry the task before failing. Default: `0`
475
+ :type retries: int
476
+
477
+ :param rate_limits: A list of rate limit configurations for the task. Defaults to an empty list (no rate limits).
478
+ :type rate_limits: list[RateLimit]
479
+
480
+ :param desired_worker_labels: A dictionary of desired worker labels that determine to which worker the task should be assigned. See documentation and examples on affinity and worker labels for more details. Defaults to an empty dictionary (no desired worker labels).
481
+ :type desired_worker_labels: dict[str, DesiredWorkerLabel]
482
+
483
+ :param backoff_factor: The backoff factor for controlling exponential backoff in retries. Default: `None`
484
+ :type backoff_factor: float | None
485
+
486
+ :param backoff_max_seconds: The maximum number of seconds to allow retries with exponential backoff to continue. Default: `None`
487
+ :type backoff_max_seconds: int | None
488
+
489
+ :returns: A decorator which creates a `Task` object.
490
+ :rtype: Callable[[Callable[[Type[BaseModel], Context], R]], Task[Type[BaseModel], R]]
491
+ """
492
+
493
+ def inner(
494
+ func: Callable[[TWorkflowInput, Context], R]
495
+ ) -> Task[TWorkflowInput, R]:
496
+ task = Task(
497
+ _fn=func,
498
+ is_durable=False,
499
+ workflow=self,
500
+ type=StepType.DEFAULT,
501
+ name=self._parse_task_name(name, func),
502
+ execution_timeout=execution_timeout,
503
+ schedule_timeout=schedule_timeout,
504
+ parents=parents,
505
+ retries=retries,
506
+ rate_limits=[r.to_proto() for r in rate_limits],
507
+ desired_worker_labels={
508
+ key: transform_desired_worker_label(d)
509
+ for key, d in desired_worker_labels.items()
510
+ },
511
+ backoff_factor=backoff_factor,
512
+ backoff_max_seconds=backoff_max_seconds,
513
+ concurrency=concurrency,
514
+ wait_for=wait_for,
515
+ skip_if=skip_if,
516
+ cancel_if=cancel_if,
517
+ )
518
+
519
+ self._default_tasks.append(task)
520
+
521
+ return task
522
+
523
+ return inner
524
+
525
+ def durable_task(
526
+ self,
527
+ name: str | None = None,
528
+ schedule_timeout: Duration = DEFAULT_SCHEDULE_TIMEOUT,
529
+ execution_timeout: Duration = DEFAULT_EXECUTION_TIMEOUT,
530
+ parents: list[Task[TWorkflowInput, Any]] = [],
531
+ retries: int = 0,
532
+ rate_limits: list[RateLimit] = [],
533
+ desired_worker_labels: dict[str, DesiredWorkerLabel] = {},
534
+ backoff_factor: float | None = None,
535
+ backoff_max_seconds: int | None = None,
536
+ concurrency: list[ConcurrencyExpression] = [],
537
+ wait_for: list[Condition | OrGroup] = [],
538
+ skip_if: list[Condition | OrGroup] = [],
539
+ cancel_if: list[Condition | OrGroup] = [],
540
+ ) -> Callable[
541
+ [Callable[[TWorkflowInput, DurableContext], R]], Task[TWorkflowInput, R]
542
+ ]:
543
+ """
544
+ A decorator to transform a function into a durable Hatchet task that run as part of a workflow.
545
+
546
+ **IMPORTANT:** This decorator creates a _durable_ task, which works using Hatchet's durable execution capabilities. This is an advanced feature of Hatchet.
547
+
548
+ See the Hatchet docs for more information on durable execution to decide if this is right for you.
549
+
550
+ :param name: The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator.
551
+ :type name: str | None
552
+
553
+ :param timeout: The execution timeout of the task. Defaults to 60 minutes.
554
+ :type timeout: datetime.timedelta | str
555
+
556
+ :param parents: A list of tasks that are parents of the task. Note: Parents must be defined before their children. Defaults to an empty list (no parents).
557
+ :type parents: list[Task]
558
+
559
+ :param retries: The number of times to retry the task before failing. Default: `0`
560
+ :type retries: int
561
+
562
+ :param rate_limits: A list of rate limit configurations for the task. Defaults to an empty list (no rate limits).
563
+ :type rate_limits: list[RateLimit]
564
+
565
+ :param desired_worker_labels: A dictionary of desired worker labels that determine to which worker the task should be assigned. See documentation and examples on affinity and worker labels for more details. Defaults to an empty dictionary (no desired worker labels).
566
+ :type desired_worker_labels: dict[str, DesiredWorkerLabel]
567
+
568
+ :param backoff_factor: The backoff factor for controlling exponential backoff in retries. Default: `None`
569
+ :type backoff_factor: float | None
570
+
571
+ :param backoff_max_seconds: The maximum number of seconds to allow retries with exponential backoff to continue. Default: `None`
572
+ :type backoff_max_seconds: int | None
573
+
574
+ :returns: A decorator which creates a `Task` object.
575
+ :rtype: Callable[[Callable[[Type[BaseModel], Context], R]], Task[Type[BaseModel], R]]
576
+ """
577
+
578
+ def inner(
579
+ func: Callable[[TWorkflowInput, DurableContext], R]
580
+ ) -> Task[TWorkflowInput, R]:
581
+ task = Task(
582
+ _fn=func,
583
+ is_durable=True,
584
+ workflow=self,
585
+ type=StepType.DEFAULT,
586
+ name=self._parse_task_name(name, func),
587
+ execution_timeout=execution_timeout,
588
+ schedule_timeout=schedule_timeout,
589
+ parents=parents,
590
+ retries=retries,
591
+ rate_limits=[r.to_proto() for r in rate_limits],
592
+ desired_worker_labels={
593
+ key: transform_desired_worker_label(d)
594
+ for key, d in desired_worker_labels.items()
595
+ },
596
+ backoff_factor=backoff_factor,
597
+ backoff_max_seconds=backoff_max_seconds,
598
+ concurrency=concurrency,
599
+ wait_for=wait_for,
600
+ skip_if=skip_if,
601
+ cancel_if=cancel_if,
602
+ )
603
+
604
+ self._durable_tasks.append(task)
605
+
606
+ return task
607
+
608
+ return inner
609
+
610
+ def on_failure_task(
611
+ self,
612
+ name: str | None = None,
613
+ schedule_timeout: Duration = DEFAULT_SCHEDULE_TIMEOUT,
614
+ execution_timeout: Duration = DEFAULT_EXECUTION_TIMEOUT,
615
+ retries: int = 0,
616
+ rate_limits: list[RateLimit] = [],
617
+ backoff_factor: float | None = None,
618
+ backoff_max_seconds: int | None = None,
619
+ concurrency: list[ConcurrencyExpression] = [],
620
+ ) -> Callable[[Callable[[TWorkflowInput, Context], R]], Task[TWorkflowInput, R]]:
621
+ """
622
+ A decorator to transform a function into a Hatchet on-failure task that runs as the last step in a workflow that had at least one task fail.
623
+
624
+ :param name: The name of the on-failure task. If not specified, defaults to the name of the function being wrapped by the `on_failure_task` decorator.
625
+ :type name: str | None
626
+
627
+ :param timeout: The execution timeout of the on-failure task. Defaults to 60 minutes.
628
+ :type timeout: datetime.timedelta | str
629
+
630
+ :param retries: The number of times to retry the on-failure task before failing. Default: `0`
631
+ :type retries: int
632
+
633
+ :param rate_limits: A list of rate limit configurations for the on-failure task. Defaults to an empty list (no rate limits).
634
+ :type rate_limits: list[RateLimit]
635
+
636
+ :param backoff_factor: The backoff factor for controlling exponential backoff in retries. Default: `None`
637
+ :type backoff_factor: float | None
638
+
639
+ :param backoff_max_seconds: The maximum number of seconds to allow retries with exponential backoff to continue. Default: `None`
640
+ :type backoff_max_seconds: int | None
641
+
642
+ :returns: A decorator which creates a `Task` object.
643
+ :rtype: Callable[[Callable[[Type[BaseModel], Context], R]], Task[Type[BaseModel], R]]
644
+ """
645
+
646
+ def inner(
647
+ func: Callable[[TWorkflowInput, Context], R]
648
+ ) -> Task[TWorkflowInput, R]:
649
+ task = Task(
650
+ is_durable=False,
651
+ _fn=func,
652
+ workflow=self,
653
+ type=StepType.ON_FAILURE,
654
+ name=self._parse_task_name(name, func) + "-on-failure",
655
+ execution_timeout=execution_timeout,
656
+ schedule_timeout=schedule_timeout,
657
+ retries=retries,
658
+ rate_limits=[r.to_proto() for r in rate_limits],
659
+ backoff_factor=backoff_factor,
660
+ backoff_max_seconds=backoff_max_seconds,
661
+ concurrency=concurrency,
662
+ )
663
+
664
+ self._on_failure_task = task
665
+
666
+ return task
667
+
668
+ return inner
669
+
670
+ def on_success_task(
671
+ self,
672
+ name: str | None = None,
673
+ schedule_timeout: Duration = DEFAULT_SCHEDULE_TIMEOUT,
674
+ execution_timeout: Duration = DEFAULT_EXECUTION_TIMEOUT,
675
+ retries: int = 0,
676
+ rate_limits: list[RateLimit] = [],
677
+ backoff_factor: float | None = None,
678
+ backoff_max_seconds: int | None = None,
679
+ concurrency: list[ConcurrencyExpression] = [],
680
+ ) -> Callable[[Callable[[TWorkflowInput, Context], R]], Task[TWorkflowInput, R]]:
681
+ """
682
+ A decorator to transform a function into a Hatchet on-success task that runs as the last step in a workflow that had all upstream tasks succeed.
683
+
684
+ :param name: The name of the on-success task. If not specified, defaults to the name of the function being wrapped by the `on_failure_task` decorator.
685
+ :type name: str | None
686
+
687
+ :param timeout: The execution timeout of the on-success task. Defaults to 60 minutes.
688
+ :type timeout: datetime.timedelta | str
689
+
690
+ :param retries: The number of times to retry the on-success task before failing. Default: `0`
691
+ :type retries: int
692
+
693
+ :param rate_limits: A list of rate limit configurations for the on-success task. Defaults to an empty list (no rate limits).
694
+ :type rate_limits: list[RateLimit]
695
+
696
+ :param backoff_factor: The backoff factor for controlling exponential backoff in retries. Default: `None`
697
+ :type backoff_factor: float | None
698
+
699
+ :param backoff_max_seconds: The maximum number of seconds to allow retries with exponential backoff to continue. Default: `None`
700
+ :type backoff_max_seconds: int | None
701
+
702
+ :returns: A decorator which creates a `Task` object.
703
+ :rtype: Callable[[Callable[[Type[BaseModel], Context], R]], Task[Type[BaseModel], R]]
704
+ """
705
+
706
+ def inner(
707
+ func: Callable[[TWorkflowInput, Context], R]
708
+ ) -> Task[TWorkflowInput, R]:
709
+ task = Task(
710
+ is_durable=False,
711
+ _fn=func,
712
+ workflow=self,
713
+ type=StepType.ON_SUCCESS,
714
+ name=self._parse_task_name(name, func) + "-on-success",
715
+ execution_timeout=execution_timeout,
716
+ schedule_timeout=schedule_timeout,
717
+ retries=retries,
718
+ rate_limits=[r.to_proto() for r in rate_limits],
719
+ backoff_factor=backoff_factor,
720
+ backoff_max_seconds=backoff_max_seconds,
721
+ concurrency=concurrency,
722
+ parents=[],
723
+ )
724
+
725
+ self._on_success_task = task
726
+
727
+ return task
728
+
729
+ return inner
730
+
731
+ def add_task(self, task: "Standalone[TWorkflowInput, Any]") -> None:
732
+ """
733
+ Add a task to a workflow. Intended to be used with a previously existing task (a Standalone),
734
+ such as one created with `@hatchet.task()`, which has been converted to a `Task` object using `to_task`.
735
+
736
+ For example:
737
+
738
+ ```python
739
+ @hatchet.task()
740
+ def my_task(input, ctx) -> None:
741
+ pass
742
+
743
+ wf = hatchet.workflow()
744
+
745
+ wf.add_task(my_task.to_task())
746
+ ```
747
+ """
748
+ _task = task._task
749
+
750
+ match _task.type:
751
+ case StepType.DEFAULT:
752
+ self._default_tasks.append(_task)
753
+ case StepType.ON_FAILURE:
754
+ if self._on_failure_task:
755
+ raise ValueError("Only one on-failure task is allowed")
756
+
757
+ self._on_failure_task = _task
758
+ case StepType.ON_SUCCESS:
759
+ if self._on_success_task:
760
+ raise ValueError("Only one on-success task is allowed")
761
+
762
+ self._on_success_task = _task
763
+ case _:
764
+ raise ValueError("Invalid task type")