hatchet-sdk 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hatchet-sdk might be problematic. Click here for more details.

Files changed (73) hide show
  1. hatchet_sdk/__init__.py +32 -16
  2. hatchet_sdk/client.py +25 -63
  3. hatchet_sdk/clients/admin.py +203 -142
  4. hatchet_sdk/clients/dispatcher/action_listener.py +42 -42
  5. hatchet_sdk/clients/dispatcher/dispatcher.py +18 -16
  6. hatchet_sdk/clients/durable_event_listener.py +327 -0
  7. hatchet_sdk/clients/rest/__init__.py +12 -1
  8. hatchet_sdk/clients/rest/api/log_api.py +258 -0
  9. hatchet_sdk/clients/rest/api/task_api.py +32 -6
  10. hatchet_sdk/clients/rest/api/workflow_runs_api.py +626 -0
  11. hatchet_sdk/clients/rest/models/__init__.py +12 -1
  12. hatchet_sdk/clients/rest/models/v1_log_line.py +94 -0
  13. hatchet_sdk/clients/rest/models/v1_log_line_level.py +39 -0
  14. hatchet_sdk/clients/rest/models/v1_log_line_list.py +110 -0
  15. hatchet_sdk/clients/rest/models/v1_task_summary.py +80 -64
  16. hatchet_sdk/clients/rest/models/v1_trigger_workflow_run_request.py +95 -0
  17. hatchet_sdk/clients/rest/models/v1_workflow_run_display_name.py +98 -0
  18. hatchet_sdk/clients/rest/models/v1_workflow_run_display_name_list.py +114 -0
  19. hatchet_sdk/clients/rest/models/workflow_run_shape_item_for_workflow_run_details.py +9 -4
  20. hatchet_sdk/clients/rest/models/workflow_runs_metrics.py +5 -1
  21. hatchet_sdk/clients/run_event_listener.py +0 -1
  22. hatchet_sdk/clients/v1/api_client.py +81 -0
  23. hatchet_sdk/context/context.py +86 -159
  24. hatchet_sdk/contracts/dispatcher_pb2_grpc.py +1 -1
  25. hatchet_sdk/contracts/events_pb2.py +2 -2
  26. hatchet_sdk/contracts/events_pb2_grpc.py +1 -1
  27. hatchet_sdk/contracts/v1/dispatcher_pb2.py +36 -0
  28. hatchet_sdk/contracts/v1/dispatcher_pb2.pyi +38 -0
  29. hatchet_sdk/contracts/v1/dispatcher_pb2_grpc.py +145 -0
  30. hatchet_sdk/contracts/v1/shared/condition_pb2.py +39 -0
  31. hatchet_sdk/contracts/v1/shared/condition_pb2.pyi +72 -0
  32. hatchet_sdk/contracts/v1/shared/condition_pb2_grpc.py +29 -0
  33. hatchet_sdk/contracts/v1/workflows_pb2.py +67 -0
  34. hatchet_sdk/contracts/v1/workflows_pb2.pyi +228 -0
  35. hatchet_sdk/contracts/v1/workflows_pb2_grpc.py +234 -0
  36. hatchet_sdk/contracts/workflows_pb2_grpc.py +1 -1
  37. hatchet_sdk/features/cron.py +91 -121
  38. hatchet_sdk/features/logs.py +16 -0
  39. hatchet_sdk/features/metrics.py +75 -0
  40. hatchet_sdk/features/rate_limits.py +45 -0
  41. hatchet_sdk/features/runs.py +221 -0
  42. hatchet_sdk/features/scheduled.py +114 -131
  43. hatchet_sdk/features/workers.py +41 -0
  44. hatchet_sdk/features/workflows.py +55 -0
  45. hatchet_sdk/hatchet.py +463 -165
  46. hatchet_sdk/opentelemetry/instrumentor.py +8 -13
  47. hatchet_sdk/rate_limit.py +33 -39
  48. hatchet_sdk/runnables/contextvars.py +12 -0
  49. hatchet_sdk/runnables/standalone.py +192 -0
  50. hatchet_sdk/runnables/task.py +144 -0
  51. hatchet_sdk/runnables/types.py +138 -0
  52. hatchet_sdk/runnables/workflow.py +771 -0
  53. hatchet_sdk/utils/aio_utils.py +0 -79
  54. hatchet_sdk/utils/proto_enums.py +0 -7
  55. hatchet_sdk/utils/timedelta_to_expression.py +23 -0
  56. hatchet_sdk/utils/typing.py +2 -2
  57. hatchet_sdk/v0/clients/rest_client.py +9 -0
  58. hatchet_sdk/v0/worker/action_listener_process.py +18 -2
  59. hatchet_sdk/waits.py +120 -0
  60. hatchet_sdk/worker/action_listener_process.py +64 -30
  61. hatchet_sdk/worker/runner/run_loop_manager.py +35 -26
  62. hatchet_sdk/worker/runner/runner.py +72 -55
  63. hatchet_sdk/worker/runner/utils/capture_logs.py +3 -11
  64. hatchet_sdk/worker/worker.py +155 -118
  65. hatchet_sdk/workflow_run.py +4 -5
  66. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.1.dist-info}/METADATA +1 -2
  67. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.1.dist-info}/RECORD +69 -43
  68. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.1.dist-info}/entry_points.txt +2 -0
  69. hatchet_sdk/clients/rest_client.py +0 -636
  70. hatchet_sdk/semver.py +0 -30
  71. hatchet_sdk/worker/runner/utils/error_with_traceback.py +0 -6
  72. hatchet_sdk/workflow.py +0 -527
  73. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,771 @@
1
+ import asyncio
2
+ from datetime import datetime
3
+ from typing import TYPE_CHECKING, Any, Callable, Generic, Union, cast, overload
4
+
5
+ from google.protobuf import timestamp_pb2
6
+ from pydantic import BaseModel
7
+
8
+ from hatchet_sdk.clients.admin import (
9
+ ScheduleTriggerWorkflowOptions,
10
+ TriggerWorkflowOptions,
11
+ WorkflowRunTriggerConfig,
12
+ )
13
+ from hatchet_sdk.clients.rest.models.cron_workflows import CronWorkflows
14
+ from hatchet_sdk.context.context import Context, DurableContext
15
+ from hatchet_sdk.contracts.v1.shared.condition_pb2 import TaskConditions
16
+ from hatchet_sdk.contracts.v1.workflows_pb2 import (
17
+ Concurrency,
18
+ CreateTaskOpts,
19
+ CreateWorkflowVersionRequest,
20
+ DesiredWorkerLabels,
21
+ )
22
+ from hatchet_sdk.contracts.v1.workflows_pb2 import StickyStrategy as StickyStrategyProto
23
+ from hatchet_sdk.contracts.workflows_pb2 import WorkflowVersion
24
+ from hatchet_sdk.labels import DesiredWorkerLabel
25
+ from hatchet_sdk.logger import logger
26
+ from hatchet_sdk.rate_limit import RateLimit
27
+ from hatchet_sdk.runnables.task import Task
28
+ from hatchet_sdk.runnables.types import (
29
+ DEFAULT_EXECUTION_TIMEOUT,
30
+ DEFAULT_SCHEDULE_TIMEOUT,
31
+ ConcurrencyExpression,
32
+ EmptyModel,
33
+ R,
34
+ StepType,
35
+ TWorkflowInput,
36
+ WorkflowConfig,
37
+ )
38
+ from hatchet_sdk.utils.proto_enums import convert_python_enum_to_proto
39
+ from hatchet_sdk.utils.timedelta_to_expression import Duration, timedelta_to_expr
40
+ from hatchet_sdk.utils.typing import JSONSerializableMapping
41
+ from hatchet_sdk.waits import (
42
+ Action,
43
+ Condition,
44
+ OrGroup,
45
+ ParentCondition,
46
+ SleepCondition,
47
+ UserEventCondition,
48
+ )
49
+ from hatchet_sdk.workflow_run import WorkflowRunRef
50
+
51
+ if TYPE_CHECKING:
52
+ from hatchet_sdk import Hatchet
53
+ from hatchet_sdk.runnables.standalone import Standalone
54
+
55
+
56
+ def transform_desired_worker_label(d: DesiredWorkerLabel) -> DesiredWorkerLabels:
57
+ value = d.value
58
+ return DesiredWorkerLabels(
59
+ strValue=value if not isinstance(value, int) else None,
60
+ intValue=value if isinstance(value, int) else None,
61
+ required=d.required,
62
+ weight=d.weight,
63
+ comparator=d.comparator, # type: ignore[arg-type]
64
+ )
65
+
66
+
67
+ class TypedTriggerWorkflowRunConfig(BaseModel, Generic[TWorkflowInput]):
68
+ input: TWorkflowInput
69
+ options: TriggerWorkflowOptions
70
+
71
+
72
+ class BaseWorkflow(Generic[TWorkflowInput]):
73
+ def __init__(self, config: WorkflowConfig, client: "Hatchet") -> None:
74
+ self.config = config
75
+ self._default_tasks: list[Task[TWorkflowInput, Any]] = []
76
+ self._durable_tasks: list[Task[TWorkflowInput, Any]] = []
77
+ self._on_failure_task: Task[TWorkflowInput, Any] | None = None
78
+ self._on_success_task: Task[TWorkflowInput, Any] | None = None
79
+ self.client = client
80
+
81
+ def _get_service_name(self, namespace: str) -> str:
82
+ return f"{namespace}{self.config.name.lower()}"
83
+
84
+ def _create_action_name(
85
+ self, namespace: str, step: Task[TWorkflowInput, Any]
86
+ ) -> str:
87
+ return self._get_service_name(namespace) + ":" + step.name
88
+
89
+ def _get_name(self, namespace: str) -> str:
90
+ return namespace + self.config.name
91
+
92
+ def _raise_for_invalid_concurrency(
93
+ self, concurrency: ConcurrencyExpression
94
+ ) -> bool:
95
+ expr = concurrency.expression
96
+
97
+ if not expr.startswith("input."):
98
+ return True
99
+
100
+ _, field = expr.split(".", maxsplit=2)
101
+
102
+ if field not in self.config.input_validator.model_fields.keys():
103
+ raise ValueError(
104
+ f"The concurrency expression provided relies on the `{field}` field, which was not present in `{self.config.input_validator.__name__}`."
105
+ )
106
+
107
+ return True
108
+
109
+ @overload
110
+ def _concurrency_to_proto(self, concurrency: None) -> None: ...
111
+
112
+ @overload
113
+ def _concurrency_to_proto(
114
+ self, concurrency: ConcurrencyExpression
115
+ ) -> Concurrency: ...
116
+
117
+ def _concurrency_to_proto(
118
+ self, concurrency: ConcurrencyExpression | None
119
+ ) -> Concurrency | None:
120
+ if not concurrency:
121
+ return None
122
+
123
+ self._raise_for_invalid_concurrency(concurrency)
124
+
125
+ return Concurrency(
126
+ expression=concurrency.expression,
127
+ max_runs=concurrency.max_runs,
128
+ limit_strategy=concurrency.limit_strategy,
129
+ )
130
+
131
+ @overload
132
+ def _validate_task(
133
+ self, task: "Task[TWorkflowInput, R]", service_name: str
134
+ ) -> CreateTaskOpts: ...
135
+
136
+ @overload
137
+ def _validate_task(self, task: None, service_name: str) -> None: ...
138
+
139
+ def _validate_task(
140
+ self, task: Union["Task[TWorkflowInput, R]", None], service_name: str
141
+ ) -> CreateTaskOpts | None:
142
+ if not task:
143
+ return None
144
+
145
+ return CreateTaskOpts(
146
+ readable_id=task.name,
147
+ action=service_name + ":" + task.name,
148
+ timeout=timedelta_to_expr(task.execution_timeout),
149
+ inputs="{}",
150
+ parents=[p.name for p in task.parents],
151
+ retries=task.retries,
152
+ rate_limits=task.rate_limits,
153
+ worker_labels=task.desired_worker_labels,
154
+ backoff_factor=task.backoff_factor,
155
+ backoff_max_seconds=task.backoff_max_seconds,
156
+ concurrency=[self._concurrency_to_proto(t) for t in task.concurrency],
157
+ conditions=self._conditions_to_proto(task),
158
+ schedule_timeout=timedelta_to_expr(task.schedule_timeout),
159
+ )
160
+
161
+ def _validate_priority(self, default_priority: int | None) -> int | None:
162
+ validated_priority = (
163
+ max(1, min(3, default_priority)) if default_priority else None
164
+ )
165
+ if validated_priority != default_priority:
166
+ logger.warning(
167
+ "Warning: Default Priority Must be between 1 and 3 -- inclusively. Adjusted to be within the range."
168
+ )
169
+
170
+ return validated_priority
171
+
172
+ def _assign_action(self, condition: Condition, action: Action) -> Condition:
173
+ condition.base.action = action
174
+
175
+ return condition
176
+
177
+ def _conditions_to_proto(self, task: Task[TWorkflowInput, Any]) -> TaskConditions:
178
+ wait_for_conditions = [
179
+ self._assign_action(w, Action.QUEUE) for w in task.wait_for
180
+ ]
181
+
182
+ cancel_if_conditions = [
183
+ self._assign_action(c, Action.CANCEL) for c in task.cancel_if
184
+ ]
185
+ skip_if_conditions = [self._assign_action(s, Action.SKIP) for s in task.skip_if]
186
+
187
+ conditions = wait_for_conditions + cancel_if_conditions + skip_if_conditions
188
+
189
+ if len({c.base.readable_data_key for c in conditions}) != len(
190
+ [c.base.readable_data_key for c in conditions]
191
+ ):
192
+ raise ValueError("Conditions must have unique readable data keys.")
193
+
194
+ user_events = [
195
+ c.to_pb() for c in conditions if isinstance(c, UserEventCondition)
196
+ ]
197
+ parent_overrides = [
198
+ c.to_pb() for c in conditions if isinstance(c, ParentCondition)
199
+ ]
200
+ sleep_conditions = [
201
+ c.to_pb() for c in conditions if isinstance(c, SleepCondition)
202
+ ]
203
+
204
+ return TaskConditions(
205
+ parent_override_conditions=parent_overrides,
206
+ sleep_conditions=sleep_conditions,
207
+ user_event_conditions=user_events,
208
+ )
209
+
210
+ def _is_leaf_task(self, task: Task[TWorkflowInput, Any]) -> bool:
211
+ return not any(task in t.parents for t in self.tasks if task != t)
212
+
213
+ def _get_create_opts(self, namespace: str) -> CreateWorkflowVersionRequest:
214
+ service_name = self._get_service_name(namespace)
215
+
216
+ name = self._get_name(namespace)
217
+ event_triggers = [namespace + event for event in self.config.on_events]
218
+
219
+ if self._on_success_task:
220
+ self._on_success_task.parents = [
221
+ task
222
+ for task in self.tasks
223
+ if task.type == StepType.DEFAULT and self._is_leaf_task(task)
224
+ ]
225
+
226
+ on_success_task = self._validate_task(self._on_success_task, service_name)
227
+
228
+ tasks = [
229
+ self._validate_task(task, service_name)
230
+ for task in self.tasks
231
+ if task.type == StepType.DEFAULT
232
+ ]
233
+
234
+ if on_success_task:
235
+ tasks += [on_success_task]
236
+
237
+ on_failure_task = self._validate_task(self._on_failure_task, service_name)
238
+
239
+ return CreateWorkflowVersionRequest(
240
+ name=name,
241
+ description=self.config.description,
242
+ version=self.config.version,
243
+ event_triggers=event_triggers,
244
+ cron_triggers=self.config.on_crons,
245
+ tasks=tasks,
246
+ concurrency=self._concurrency_to_proto(self.config.concurrency),
247
+ ## TODO: Fix this
248
+ cron_input=None,
249
+ on_failure_task=on_failure_task,
250
+ sticky=convert_python_enum_to_proto(self.config.sticky, StickyStrategyProto), # type: ignore[arg-type]
251
+ )
252
+
253
+ def _get_workflow_input(self, ctx: Context) -> TWorkflowInput:
254
+ return cast(
255
+ TWorkflowInput,
256
+ self.config.input_validator.model_validate(ctx.workflow_input),
257
+ )
258
+
259
+ @property
260
+ def tasks(self) -> list[Task[TWorkflowInput, Any]]:
261
+ tasks = self._default_tasks + self._durable_tasks
262
+
263
+ if self._on_failure_task:
264
+ tasks += [self._on_failure_task]
265
+
266
+ if self._on_success_task:
267
+ tasks += [self._on_success_task]
268
+
269
+ return tasks
270
+
271
+ @property
272
+ def is_durable(self) -> bool:
273
+ return any(task.is_durable for task in self.tasks)
274
+
275
+ def create_bulk_run_item(
276
+ self,
277
+ input: TWorkflowInput | None = None,
278
+ key: str | None = None,
279
+ options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
280
+ ) -> WorkflowRunTriggerConfig:
281
+ return WorkflowRunTriggerConfig(
282
+ workflow_name=self.config.name,
283
+ input=input.model_dump() if input else {},
284
+ options=options,
285
+ key=key,
286
+ )
287
+
288
+
289
+ class Workflow(BaseWorkflow[TWorkflowInput]):
290
+ """
291
+ A Hatchet workflow, which allows you to define tasks to be run and perform actions on the workflow, such as
292
+ running / spawning children and scheduling future runs.
293
+ """
294
+
295
+ def run_no_wait(
296
+ self,
297
+ input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
298
+ options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
299
+ ) -> WorkflowRunRef:
300
+ return self.client._client.admin.run_workflow(
301
+ workflow_name=self.config.name,
302
+ input=input.model_dump() if input else {},
303
+ options=options,
304
+ )
305
+
306
+ def run(
307
+ self,
308
+ input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
309
+ options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
310
+ ) -> dict[str, Any]:
311
+ ref = self.client._client.admin.run_workflow(
312
+ workflow_name=self.config.name,
313
+ input=input.model_dump() if input else {},
314
+ options=options,
315
+ )
316
+
317
+ return ref.result()
318
+
319
+ async def aio_run_no_wait(
320
+ self,
321
+ input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
322
+ options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
323
+ ) -> WorkflowRunRef:
324
+ return await self.client._client.admin.aio_run_workflow(
325
+ workflow_name=self.config.name,
326
+ input=input.model_dump() if input else {},
327
+ options=options,
328
+ )
329
+
330
+ async def aio_run(
331
+ self,
332
+ input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
333
+ options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
334
+ ) -> dict[str, Any]:
335
+ ref = await self.client._client.admin.aio_run_workflow(
336
+ workflow_name=self.config.name,
337
+ input=input.model_dump() if input else {},
338
+ options=options,
339
+ )
340
+
341
+ return await ref.aio_result()
342
+
343
+ def run_many(
344
+ self,
345
+ workflows: list[WorkflowRunTriggerConfig],
346
+ ) -> list[dict[str, Any]]:
347
+ refs = self.client._client.admin.run_workflows(
348
+ workflows=workflows,
349
+ )
350
+
351
+ return [ref.result() for ref in refs]
352
+
353
+ async def aio_run_many(
354
+ self,
355
+ workflows: list[WorkflowRunTriggerConfig],
356
+ ) -> list[dict[str, Any]]:
357
+ refs = await self.client._client.admin.aio_run_workflows(
358
+ workflows=workflows,
359
+ )
360
+
361
+ return await asyncio.gather(*[ref.aio_result() for ref in refs])
362
+
363
+ def run_many_no_wait(
364
+ self,
365
+ workflows: list[WorkflowRunTriggerConfig],
366
+ ) -> list[WorkflowRunRef]:
367
+ return self.client._client.admin.run_workflows(
368
+ workflows=workflows,
369
+ )
370
+
371
+ async def aio_run_many_no_wait(
372
+ self,
373
+ workflows: list[WorkflowRunTriggerConfig],
374
+ ) -> list[WorkflowRunRef]:
375
+ return await self.client._client.admin.aio_run_workflows(
376
+ workflows=workflows,
377
+ )
378
+
379
+ def schedule(
380
+ self,
381
+ run_at: datetime,
382
+ input: TWorkflowInput | None = None,
383
+ options: ScheduleTriggerWorkflowOptions = ScheduleTriggerWorkflowOptions(),
384
+ ) -> WorkflowVersion:
385
+ return self.client._client.admin.schedule_workflow(
386
+ name=self.config.name,
387
+ schedules=cast(list[datetime | timestamp_pb2.Timestamp], [run_at]),
388
+ input=input.model_dump() if input else {},
389
+ options=options,
390
+ )
391
+
392
+ async def aio_schedule(
393
+ self,
394
+ run_at: datetime,
395
+ input: TWorkflowInput,
396
+ options: ScheduleTriggerWorkflowOptions = ScheduleTriggerWorkflowOptions(),
397
+ ) -> WorkflowVersion:
398
+ return await self.client._client.admin.aio_schedule_workflow(
399
+ name=self.config.name,
400
+ schedules=cast(list[datetime | timestamp_pb2.Timestamp], [run_at]),
401
+ input=input.model_dump(),
402
+ options=options,
403
+ )
404
+
405
+ def create_cron(
406
+ self,
407
+ cron_name: str,
408
+ expression: str,
409
+ input: TWorkflowInput,
410
+ additional_metadata: JSONSerializableMapping,
411
+ ) -> CronWorkflows:
412
+ return self.client.cron.create(
413
+ workflow_name=self.config.name,
414
+ cron_name=cron_name,
415
+ expression=expression,
416
+ input=input.model_dump(),
417
+ additional_metadata=additional_metadata,
418
+ )
419
+
420
+ async def aio_create_cron(
421
+ self,
422
+ cron_name: str,
423
+ expression: str,
424
+ input: TWorkflowInput,
425
+ additional_metadata: JSONSerializableMapping,
426
+ ) -> CronWorkflows:
427
+ return await self.client.cron.aio_create(
428
+ workflow_name=self.config.name,
429
+ cron_name=cron_name,
430
+ expression=expression,
431
+ input=input.model_dump(),
432
+ additional_metadata=additional_metadata,
433
+ )
434
+
435
+ def _parse_task_name(
436
+ self,
437
+ name: str | None,
438
+ func: (
439
+ Callable[[TWorkflowInput, Context], R]
440
+ | Callable[[TWorkflowInput, DurableContext], R]
441
+ ),
442
+ ) -> str:
443
+ non_null_name = name or func.__name__
444
+
445
+ return non_null_name.lower()
446
+
447
+ def task(
448
+ self,
449
+ name: str | None = None,
450
+ schedule_timeout: Duration = DEFAULT_SCHEDULE_TIMEOUT,
451
+ execution_timeout: Duration = DEFAULT_EXECUTION_TIMEOUT,
452
+ parents: list[Task[TWorkflowInput, Any]] = [],
453
+ retries: int = 0,
454
+ rate_limits: list[RateLimit] = [],
455
+ desired_worker_labels: dict[str, DesiredWorkerLabel] = {},
456
+ backoff_factor: float | None = None,
457
+ backoff_max_seconds: int | None = None,
458
+ concurrency: list[ConcurrencyExpression] = [],
459
+ wait_for: list[Condition | OrGroup] = [],
460
+ skip_if: list[Condition | OrGroup] = [],
461
+ cancel_if: list[Condition | OrGroup] = [],
462
+ ) -> Callable[[Callable[[TWorkflowInput, Context], R]], Task[TWorkflowInput, R]]:
463
+ """
464
+ A decorator to transform a function into a Hatchet task that run as part of a workflow.
465
+
466
+ :param name: The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator.
467
+ :type name: str | None
468
+
469
+ :param timeout: The execution timeout of the task. Defaults to 60 minutes.
470
+ :type timeout: datetime.timedelta | str
471
+
472
+ :param parents: A list of tasks that are parents of the task. Note: Parents must be defined before their children. Defaults to an empty list (no parents).
473
+ :type parents: list[Task]
474
+
475
+ :param retries: The number of times to retry the task before failing. Default: `0`
476
+ :type retries: int
477
+
478
+ :param rate_limits: A list of rate limit configurations for the task. Defaults to an empty list (no rate limits).
479
+ :type rate_limits: list[RateLimit]
480
+
481
+ :param desired_worker_labels: A dictionary of desired worker labels that determine to which worker the task should be assigned. See documentation and examples on affinity and worker labels for more details. Defaults to an empty dictionary (no desired worker labels).
482
+ :type desired_worker_labels: dict[str, DesiredWorkerLabel]
483
+
484
+ :param backoff_factor: The backoff factor for controlling exponential backoff in retries. Default: `None`
485
+ :type backoff_factor: float | None
486
+
487
+ :param backoff_max_seconds: The maximum number of seconds to allow retries with exponential backoff to continue. Default: `None`
488
+ :type backoff_max_seconds: int | None
489
+
490
+ :returns: A decorator which creates a `Task` object.
491
+ :rtype: Callable[[Callable[[Type[BaseModel], Context], R]], Task[Type[BaseModel], R]]
492
+ """
493
+
494
+ def inner(
495
+ func: Callable[[TWorkflowInput, Context], R]
496
+ ) -> Task[TWorkflowInput, R]:
497
+ task = Task(
498
+ _fn=func,
499
+ is_durable=False,
500
+ workflow=self,
501
+ type=StepType.DEFAULT,
502
+ name=self._parse_task_name(name, func),
503
+ execution_timeout=execution_timeout,
504
+ schedule_timeout=schedule_timeout,
505
+ parents=parents,
506
+ retries=retries,
507
+ rate_limits=[r.to_proto() for r in rate_limits],
508
+ desired_worker_labels={
509
+ key: transform_desired_worker_label(d)
510
+ for key, d in desired_worker_labels.items()
511
+ },
512
+ backoff_factor=backoff_factor,
513
+ backoff_max_seconds=backoff_max_seconds,
514
+ concurrency=concurrency,
515
+ wait_for=wait_for,
516
+ skip_if=skip_if,
517
+ cancel_if=cancel_if,
518
+ )
519
+
520
+ self._default_tasks.append(task)
521
+
522
+ return task
523
+
524
+ return inner
525
+
526
+ def durable_task(
527
+ self,
528
+ name: str | None = None,
529
+ schedule_timeout: Duration = DEFAULT_SCHEDULE_TIMEOUT,
530
+ execution_timeout: Duration = DEFAULT_EXECUTION_TIMEOUT,
531
+ parents: list[Task[TWorkflowInput, Any]] = [],
532
+ retries: int = 0,
533
+ rate_limits: list[RateLimit] = [],
534
+ desired_worker_labels: dict[str, DesiredWorkerLabel] = {},
535
+ backoff_factor: float | None = None,
536
+ backoff_max_seconds: int | None = None,
537
+ concurrency: list[ConcurrencyExpression] = [],
538
+ wait_for: list[Condition | OrGroup] = [],
539
+ skip_if: list[Condition | OrGroup] = [],
540
+ cancel_if: list[Condition | OrGroup] = [],
541
+ ) -> Callable[
542
+ [Callable[[TWorkflowInput, DurableContext], R]], Task[TWorkflowInput, R]
543
+ ]:
544
+ """
545
+ A decorator to transform a function into a durable Hatchet task that run as part of a workflow.
546
+
547
+ **IMPORTANT:** This decorator creates a _durable_ task, which works using Hatchet's durable execution capabilities. This is an advanced feature of Hatchet.
548
+
549
+ See the Hatchet docs for more information on durable execution to decide if this is right for you.
550
+
551
+ :param name: The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator.
552
+ :type name: str | None
553
+
554
+ :param timeout: The execution timeout of the task. Defaults to 60 minutes.
555
+ :type timeout: datetime.timedelta | str
556
+
557
+ :param parents: A list of tasks that are parents of the task. Note: Parents must be defined before their children. Defaults to an empty list (no parents).
558
+ :type parents: list[Task]
559
+
560
+ :param retries: The number of times to retry the task before failing. Default: `0`
561
+ :type retries: int
562
+
563
+ :param rate_limits: A list of rate limit configurations for the task. Defaults to an empty list (no rate limits).
564
+ :type rate_limits: list[RateLimit]
565
+
566
+ :param desired_worker_labels: A dictionary of desired worker labels that determine to which worker the task should be assigned. See documentation and examples on affinity and worker labels for more details. Defaults to an empty dictionary (no desired worker labels).
567
+ :type desired_worker_labels: dict[str, DesiredWorkerLabel]
568
+
569
+ :param backoff_factor: The backoff factor for controlling exponential backoff in retries. Default: `None`
570
+ :type backoff_factor: float | None
571
+
572
+ :param backoff_max_seconds: The maximum number of seconds to allow retries with exponential backoff to continue. Default: `None`
573
+ :type backoff_max_seconds: int | None
574
+
575
+ :returns: A decorator which creates a `Task` object.
576
+ :rtype: Callable[[Callable[[Type[BaseModel], Context], R]], Task[Type[BaseModel], R]]
577
+ """
578
+
579
+ def inner(
580
+ func: Callable[[TWorkflowInput, DurableContext], R]
581
+ ) -> Task[TWorkflowInput, R]:
582
+ task = Task(
583
+ _fn=func,
584
+ is_durable=True,
585
+ workflow=self,
586
+ type=StepType.DEFAULT,
587
+ name=self._parse_task_name(name, func),
588
+ execution_timeout=execution_timeout,
589
+ schedule_timeout=schedule_timeout,
590
+ parents=parents,
591
+ retries=retries,
592
+ rate_limits=[r.to_proto() for r in rate_limits],
593
+ desired_worker_labels={
594
+ key: transform_desired_worker_label(d)
595
+ for key, d in desired_worker_labels.items()
596
+ },
597
+ backoff_factor=backoff_factor,
598
+ backoff_max_seconds=backoff_max_seconds,
599
+ concurrency=concurrency,
600
+ wait_for=wait_for,
601
+ skip_if=skip_if,
602
+ cancel_if=cancel_if,
603
+ )
604
+
605
+ self._durable_tasks.append(task)
606
+
607
+ return task
608
+
609
+ return inner
610
+
611
+ def on_failure_task(
612
+ self,
613
+ name: str | None = None,
614
+ schedule_timeout: Duration = DEFAULT_SCHEDULE_TIMEOUT,
615
+ execution_timeout: Duration = DEFAULT_EXECUTION_TIMEOUT,
616
+ retries: int = 0,
617
+ rate_limits: list[RateLimit] = [],
618
+ backoff_factor: float | None = None,
619
+ backoff_max_seconds: int | None = None,
620
+ concurrency: list[ConcurrencyExpression] = [],
621
+ ) -> Callable[[Callable[[TWorkflowInput, Context], R]], Task[TWorkflowInput, R]]:
622
+ """
623
+ A decorator to transform a function into a Hatchet on-failure task that runs as the last step in a workflow that had at least one task fail.
624
+
625
+ :param name: The name of the on-failure task. If not specified, defaults to the name of the function being wrapped by the `on_failure_task` decorator.
626
+ :type name: str | None
627
+
628
+ :param timeout: The execution timeout of the on-failure task. Defaults to 60 minutes.
629
+ :type timeout: datetime.timedelta | str
630
+
631
+ :param retries: The number of times to retry the on-failure task before failing. Default: `0`
632
+ :type retries: int
633
+
634
+ :param rate_limits: A list of rate limit configurations for the on-failure task. Defaults to an empty list (no rate limits).
635
+ :type rate_limits: list[RateLimit]
636
+
637
+ :param backoff_factor: The backoff factor for controlling exponential backoff in retries. Default: `None`
638
+ :type backoff_factor: float | None
639
+
640
+ :param backoff_max_seconds: The maximum number of seconds to allow retries with exponential backoff to continue. Default: `None`
641
+ :type backoff_max_seconds: int | None
642
+
643
+ :returns: A decorator which creates a `Task` object.
644
+ :rtype: Callable[[Callable[[Type[BaseModel], Context], R]], Task[Type[BaseModel], R]]
645
+ """
646
+
647
+ def inner(
648
+ func: Callable[[TWorkflowInput, Context], R]
649
+ ) -> Task[TWorkflowInput, R]:
650
+ task = Task(
651
+ is_durable=False,
652
+ _fn=func,
653
+ workflow=self,
654
+ type=StepType.ON_FAILURE,
655
+ name=self._parse_task_name(name, func) + "-on-failure",
656
+ execution_timeout=execution_timeout,
657
+ schedule_timeout=schedule_timeout,
658
+ retries=retries,
659
+ rate_limits=[r.to_proto() for r in rate_limits],
660
+ backoff_factor=backoff_factor,
661
+ backoff_max_seconds=backoff_max_seconds,
662
+ concurrency=concurrency,
663
+ )
664
+
665
+ if self._on_failure_task:
666
+ raise ValueError("Only one on-failure task is allowed")
667
+
668
+ self._on_failure_task = task
669
+
670
+ return task
671
+
672
+ return inner
673
+
674
+ def on_success_task(
675
+ self,
676
+ name: str | None = None,
677
+ schedule_timeout: Duration = DEFAULT_SCHEDULE_TIMEOUT,
678
+ execution_timeout: Duration = DEFAULT_EXECUTION_TIMEOUT,
679
+ retries: int = 0,
680
+ rate_limits: list[RateLimit] = [],
681
+ backoff_factor: float | None = None,
682
+ backoff_max_seconds: int | None = None,
683
+ concurrency: list[ConcurrencyExpression] = [],
684
+ ) -> Callable[[Callable[[TWorkflowInput, Context], R]], Task[TWorkflowInput, R]]:
685
+ """
686
+ A decorator to transform a function into a Hatchet on-success task that runs as the last step in a workflow that had all upstream tasks succeed.
687
+
688
+ :param name: The name of the on-success task. If not specified, defaults to the name of the function being wrapped by the `on_failure_task` decorator.
689
+ :type name: str | None
690
+
691
+ :param timeout: The execution timeout of the on-success task. Defaults to 60 minutes.
692
+ :type timeout: datetime.timedelta | str
693
+
694
+ :param retries: The number of times to retry the on-success task before failing. Default: `0`
695
+ :type retries: int
696
+
697
+ :param rate_limits: A list of rate limit configurations for the on-success task. Defaults to an empty list (no rate limits).
698
+ :type rate_limits: list[RateLimit]
699
+
700
+ :param backoff_factor: The backoff factor for controlling exponential backoff in retries. Default: `None`
701
+ :type backoff_factor: float | None
702
+
703
+ :param backoff_max_seconds: The maximum number of seconds to allow retries with exponential backoff to continue. Default: `None`
704
+ :type backoff_max_seconds: int | None
705
+
706
+ :returns: A decorator which creates a `Task` object.
707
+ :rtype: Callable[[Callable[[Type[BaseModel], Context], R]], Task[Type[BaseModel], R]]
708
+ """
709
+
710
+ def inner(
711
+ func: Callable[[TWorkflowInput, Context], R]
712
+ ) -> Task[TWorkflowInput, R]:
713
+ task = Task(
714
+ is_durable=False,
715
+ _fn=func,
716
+ workflow=self,
717
+ type=StepType.ON_SUCCESS,
718
+ name=self._parse_task_name(name, func) + "-on-success",
719
+ execution_timeout=execution_timeout,
720
+ schedule_timeout=schedule_timeout,
721
+ retries=retries,
722
+ rate_limits=[r.to_proto() for r in rate_limits],
723
+ backoff_factor=backoff_factor,
724
+ backoff_max_seconds=backoff_max_seconds,
725
+ concurrency=concurrency,
726
+ parents=[],
727
+ )
728
+
729
+ if self._on_failure_task:
730
+ raise ValueError("Only one on-failure task is allowed")
731
+
732
+ self._on_success_task = task
733
+
734
+ return task
735
+
736
+ return inner
737
+
738
+ def add_task(self, task: "Standalone[TWorkflowInput, Any]") -> None:
739
+ """
740
+ Add a task to a workflow. Intended to be used with a previously existing task (a Standalone),
741
+ such as one created with `@hatchet.task()`, which has been converted to a `Task` object using `to_task`.
742
+
743
+ For example:
744
+
745
+ ```python
746
+ @hatchet.task()
747
+ def my_task(input, ctx) -> None:
748
+ pass
749
+
750
+ wf = hatchet.workflow()
751
+
752
+ wf.add_task(my_task.to_task())
753
+ ```
754
+ """
755
+ _task = task._task
756
+
757
+ match _task.type:
758
+ case StepType.DEFAULT:
759
+ self._default_tasks.append(_task)
760
+ case StepType.ON_FAILURE:
761
+ if self._on_failure_task:
762
+ raise ValueError("Only one on-failure task is allowed")
763
+
764
+ self._on_failure_task = _task
765
+ case StepType.ON_SUCCESS:
766
+ if self._on_success_task:
767
+ raise ValueError("Only one on-success task is allowed")
768
+
769
+ self._on_success_task = _task
770
+ case _:
771
+ raise ValueError("Invalid task type")