hatchet-sdk 1.12.3__py3-none-any.whl → 1.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hatchet-sdk might be problematic. Click here for more details.
- hatchet_sdk/__init__.py +46 -40
- hatchet_sdk/clients/admin.py +18 -23
- hatchet_sdk/clients/dispatcher/action_listener.py +4 -3
- hatchet_sdk/clients/dispatcher/dispatcher.py +1 -4
- hatchet_sdk/clients/event_ts.py +2 -1
- hatchet_sdk/clients/events.py +16 -12
- hatchet_sdk/clients/listeners/durable_event_listener.py +4 -2
- hatchet_sdk/clients/listeners/pooled_listener.py +2 -2
- hatchet_sdk/clients/listeners/run_event_listener.py +7 -8
- hatchet_sdk/clients/listeners/workflow_listener.py +14 -6
- hatchet_sdk/clients/rest/api_response.py +3 -2
- hatchet_sdk/clients/rest/tenacity_utils.py +6 -8
- hatchet_sdk/config.py +2 -0
- hatchet_sdk/connection.py +10 -4
- hatchet_sdk/context/context.py +170 -46
- hatchet_sdk/context/worker_context.py +4 -7
- hatchet_sdk/contracts/dispatcher_pb2.py +38 -38
- hatchet_sdk/contracts/dispatcher_pb2.pyi +4 -2
- hatchet_sdk/contracts/events_pb2.py +13 -13
- hatchet_sdk/contracts/events_pb2.pyi +4 -2
- hatchet_sdk/contracts/v1/workflows_pb2.py +1 -1
- hatchet_sdk/contracts/v1/workflows_pb2.pyi +2 -2
- hatchet_sdk/exceptions.py +99 -1
- hatchet_sdk/features/cron.py +2 -2
- hatchet_sdk/features/filters.py +3 -3
- hatchet_sdk/features/runs.py +4 -4
- hatchet_sdk/features/scheduled.py +8 -9
- hatchet_sdk/hatchet.py +65 -64
- hatchet_sdk/opentelemetry/instrumentor.py +20 -20
- hatchet_sdk/runnables/action.py +1 -2
- hatchet_sdk/runnables/contextvars.py +19 -0
- hatchet_sdk/runnables/task.py +37 -29
- hatchet_sdk/runnables/types.py +9 -8
- hatchet_sdk/runnables/workflow.py +57 -42
- hatchet_sdk/utils/proto_enums.py +4 -4
- hatchet_sdk/utils/timedelta_to_expression.py +2 -3
- hatchet_sdk/utils/typing.py +11 -17
- hatchet_sdk/waits.py +6 -5
- hatchet_sdk/worker/action_listener_process.py +33 -13
- hatchet_sdk/worker/runner/run_loop_manager.py +15 -11
- hatchet_sdk/worker/runner/runner.py +102 -92
- hatchet_sdk/worker/runner/utils/capture_logs.py +72 -31
- hatchet_sdk/worker/worker.py +29 -25
- hatchet_sdk/workflow_run.py +4 -2
- {hatchet_sdk-1.12.3.dist-info → hatchet_sdk-1.13.0.dist-info}/METADATA +1 -1
- {hatchet_sdk-1.12.3.dist-info → hatchet_sdk-1.13.0.dist-info}/RECORD +48 -48
- {hatchet_sdk-1.12.3.dist-info → hatchet_sdk-1.13.0.dist-info}/WHEEL +0 -0
- {hatchet_sdk-1.12.3.dist-info → hatchet_sdk-1.13.0.dist-info}/entry_points.txt +0 -0
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import json
|
|
2
|
+
from collections.abc import Callable, Collection, Coroutine
|
|
2
3
|
from importlib.metadata import version
|
|
3
|
-
from typing import Any,
|
|
4
|
+
from typing import Any, cast
|
|
4
5
|
|
|
5
6
|
from hatchet_sdk.contracts import workflows_pb2 as v0_workflow_protos
|
|
6
7
|
from hatchet_sdk.utils.typing import JSONSerializableMapping
|
|
@@ -23,10 +24,10 @@ try:
|
|
|
23
24
|
TraceContextTextMapPropagator,
|
|
24
25
|
)
|
|
25
26
|
from wrapt import wrap_function_wrapper # type: ignore[import-untyped]
|
|
26
|
-
except (RuntimeError, ImportError, ModuleNotFoundError):
|
|
27
|
+
except (RuntimeError, ImportError, ModuleNotFoundError) as e:
|
|
27
28
|
raise ModuleNotFoundError(
|
|
28
29
|
"To use the HatchetInstrumentor, you must install Hatchet's `otel` extra using (e.g.) `pip install hatchet-sdk[otel]`"
|
|
29
|
-
)
|
|
30
|
+
) from e
|
|
30
31
|
|
|
31
32
|
import inspect
|
|
32
33
|
from datetime import datetime
|
|
@@ -204,7 +205,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
204
205
|
super().__init__()
|
|
205
206
|
|
|
206
207
|
def instrumentation_dependencies(self) -> Collection[str]:
|
|
207
|
-
return
|
|
208
|
+
return ()
|
|
208
209
|
|
|
209
210
|
def _instrument(self, **kwargs: InstrumentKwargs) -> None:
|
|
210
211
|
self._tracer = get_tracer(__name__, hatchet_sdk_version, self.tracer_provider)
|
|
@@ -394,11 +395,11 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
394
395
|
options = PushEventOptions(
|
|
395
396
|
**options.model_dump(exclude={"additional_metadata"}),
|
|
396
397
|
additional_metadata=_inject_traceparent_into_metadata(
|
|
397
|
-
|
|
398
|
+
options.additional_metadata,
|
|
398
399
|
),
|
|
399
400
|
)
|
|
400
401
|
|
|
401
|
-
return wrapped(event_key,
|
|
402
|
+
return wrapped(event_key, payload, options)
|
|
402
403
|
|
|
403
404
|
## IMPORTANT: Keep these types in sync with the wrapped method's signature
|
|
404
405
|
def _wrap_bulk_push_event(
|
|
@@ -432,7 +433,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
432
433
|
BulkPushEventWithMetadata(
|
|
433
434
|
**event.model_dump(exclude={"additional_metadata"}),
|
|
434
435
|
additional_metadata=_inject_traceparent_into_metadata(
|
|
435
|
-
|
|
436
|
+
event.additional_metadata,
|
|
436
437
|
),
|
|
437
438
|
)
|
|
438
439
|
for event in bulk_events
|
|
@@ -494,7 +495,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
494
495
|
options = TriggerWorkflowOptions(
|
|
495
496
|
**options.model_dump(exclude={"additional_metadata"}),
|
|
496
497
|
additional_metadata=_inject_traceparent_into_metadata(
|
|
497
|
-
|
|
498
|
+
options.additional_metadata,
|
|
498
499
|
),
|
|
499
500
|
)
|
|
500
501
|
|
|
@@ -551,19 +552,18 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
551
552
|
options = TriggerWorkflowOptions(
|
|
552
553
|
**options.model_dump(exclude={"additional_metadata"}),
|
|
553
554
|
additional_metadata=_inject_traceparent_into_metadata(
|
|
554
|
-
|
|
555
|
+
options.additional_metadata,
|
|
555
556
|
),
|
|
556
557
|
)
|
|
557
558
|
|
|
558
559
|
return await wrapped(workflow_name, payload, options)
|
|
559
560
|
|
|
560
|
-
def _ts_to_iso(self, ts:
|
|
561
|
+
def _ts_to_iso(self, ts: datetime | timestamp_pb2.Timestamp) -> str:
|
|
561
562
|
if isinstance(ts, datetime):
|
|
562
563
|
return ts.isoformat()
|
|
563
|
-
|
|
564
|
+
if isinstance(ts, timestamp_pb2.Timestamp):
|
|
564
565
|
return ts.ToJsonString()
|
|
565
|
-
|
|
566
|
-
raise TypeError(f"Unsupported type for timestamp conversion: {type(ts)}")
|
|
566
|
+
raise TypeError(f"Unsupported type for timestamp conversion: {type(ts)}")
|
|
567
567
|
|
|
568
568
|
## IMPORTANT: Keep these types in sync with the wrapped method's signature
|
|
569
569
|
def _wrap_schedule_workflow(
|
|
@@ -571,7 +571,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
571
571
|
wrapped: Callable[
|
|
572
572
|
[
|
|
573
573
|
str,
|
|
574
|
-
list[
|
|
574
|
+
list[datetime | timestamp_pb2.Timestamp],
|
|
575
575
|
JSONSerializableMapping,
|
|
576
576
|
ScheduleTriggerWorkflowOptions,
|
|
577
577
|
],
|
|
@@ -580,14 +580,14 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
580
580
|
instance: AdminClient,
|
|
581
581
|
args: tuple[
|
|
582
582
|
str,
|
|
583
|
-
list[
|
|
583
|
+
list[datetime | timestamp_pb2.Timestamp],
|
|
584
584
|
JSONSerializableMapping,
|
|
585
585
|
ScheduleTriggerWorkflowOptions,
|
|
586
586
|
],
|
|
587
587
|
kwargs: dict[
|
|
588
588
|
str,
|
|
589
589
|
str
|
|
590
|
-
| list[
|
|
590
|
+
| list[datetime | timestamp_pb2.Timestamp]
|
|
591
591
|
| JSONSerializableMapping
|
|
592
592
|
| ScheduleTriggerWorkflowOptions,
|
|
593
593
|
],
|
|
@@ -595,7 +595,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
595
595
|
params = self.extract_bound_args(wrapped, args, kwargs)
|
|
596
596
|
|
|
597
597
|
workflow_name = cast(str, params[0])
|
|
598
|
-
schedules = cast(list[
|
|
598
|
+
schedules = cast(list[datetime | timestamp_pb2.Timestamp], params[1])
|
|
599
599
|
input = cast(JSONSerializableMapping, params[2])
|
|
600
600
|
options = cast(
|
|
601
601
|
ScheduleTriggerWorkflowOptions,
|
|
@@ -633,7 +633,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
633
633
|
options = ScheduleTriggerWorkflowOptions(
|
|
634
634
|
**options.model_dump(exclude={"additional_metadata"}),
|
|
635
635
|
additional_metadata=_inject_traceparent_into_metadata(
|
|
636
|
-
|
|
636
|
+
options.additional_metadata,
|
|
637
637
|
),
|
|
638
638
|
)
|
|
639
639
|
|
|
@@ -673,7 +673,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
673
673
|
options=TriggerWorkflowOptions(
|
|
674
674
|
**config.options.model_dump(exclude={"additional_metadata"}),
|
|
675
675
|
additional_metadata=_inject_traceparent_into_metadata(
|
|
676
|
-
|
|
676
|
+
config.options.additional_metadata,
|
|
677
677
|
),
|
|
678
678
|
),
|
|
679
679
|
)
|
|
@@ -705,7 +705,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc]
|
|
|
705
705
|
options=TriggerWorkflowOptions(
|
|
706
706
|
**config.options.model_dump(exclude={"additional_metadata"}),
|
|
707
707
|
additional_metadata=_inject_traceparent_into_metadata(
|
|
708
|
-
|
|
708
|
+
config.options.additional_metadata,
|
|
709
709
|
),
|
|
710
710
|
),
|
|
711
711
|
)
|
hatchet_sdk/runnables/action.py
CHANGED
|
@@ -121,5 +121,4 @@ class Action(BaseModel):
|
|
|
121
121
|
"""
|
|
122
122
|
if self.action_type == ActionType.START_GET_GROUP_KEY:
|
|
123
123
|
return f"{self.get_group_key_run_id}/{self.retry_count}"
|
|
124
|
-
|
|
125
|
-
return f"{self.step_run_id}/{self.retry_count}"
|
|
124
|
+
return f"{self.step_run_id}/{self.retry_count}"
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import threading
|
|
2
3
|
from collections import Counter
|
|
3
4
|
from contextvars import ContextVar
|
|
4
5
|
|
|
@@ -15,3 +16,21 @@ ctx_worker_id: ContextVar[str | None] = ContextVar("ctx_worker_id", default=None
|
|
|
15
16
|
|
|
16
17
|
workflow_spawn_indices = Counter[ActionKey]()
|
|
17
18
|
spawn_index_lock = asyncio.Lock()
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class TaskCounter:
|
|
22
|
+
def __init__(self) -> None:
|
|
23
|
+
self._count = 0
|
|
24
|
+
self._lock = threading.Lock()
|
|
25
|
+
|
|
26
|
+
def increment(self) -> int:
|
|
27
|
+
with self._lock:
|
|
28
|
+
self._count += 1
|
|
29
|
+
return self._count
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def value(self) -> int:
|
|
33
|
+
return self._count
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
task_count = TaskCounter()
|
hatchet_sdk/runnables/task.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
from
|
|
2
|
-
from typing import TYPE_CHECKING, Any,
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
from typing import TYPE_CHECKING, Any, Generic, cast, get_type_hints
|
|
3
3
|
|
|
4
4
|
from hatchet_sdk.context.context import Context, DurableContext
|
|
5
5
|
from hatchet_sdk.contracts.v1.shared.condition_pb2 import TaskConditions
|
|
@@ -40,28 +40,30 @@ if TYPE_CHECKING:
|
|
|
40
40
|
class Task(Generic[TWorkflowInput, R]):
|
|
41
41
|
def __init__(
|
|
42
42
|
self,
|
|
43
|
-
_fn:
|
|
43
|
+
_fn: (
|
|
44
44
|
Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]
|
|
45
|
-
| Callable[[TWorkflowInput, Context], AwaitableLike[R]]
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
45
|
+
| Callable[[TWorkflowInput, Context], AwaitableLike[R]]
|
|
46
|
+
| (
|
|
47
|
+
Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]]
|
|
48
|
+
| Callable[[TWorkflowInput, DurableContext], AwaitableLike[R]]
|
|
49
|
+
)
|
|
50
|
+
),
|
|
49
51
|
is_durable: bool,
|
|
50
52
|
type: StepType,
|
|
51
53
|
workflow: "Workflow[TWorkflowInput]",
|
|
52
54
|
name: str,
|
|
53
|
-
execution_timeout: Duration
|
|
54
|
-
schedule_timeout: Duration
|
|
55
|
-
parents: "list[Task[TWorkflowInput, Any]]
|
|
56
|
-
retries: int
|
|
57
|
-
rate_limits: list[CreateTaskRateLimit]
|
|
58
|
-
desired_worker_labels: dict[str, DesiredWorkerLabels]
|
|
59
|
-
backoff_factor: float | None
|
|
60
|
-
backoff_max_seconds: int | None
|
|
61
|
-
concurrency: list[ConcurrencyExpression]
|
|
62
|
-
wait_for: list[Condition | OrGroup]
|
|
63
|
-
skip_if: list[Condition | OrGroup]
|
|
64
|
-
cancel_if: list[Condition | OrGroup]
|
|
55
|
+
execution_timeout: Duration,
|
|
56
|
+
schedule_timeout: Duration,
|
|
57
|
+
parents: "list[Task[TWorkflowInput, Any]] | None",
|
|
58
|
+
retries: int,
|
|
59
|
+
rate_limits: list[CreateTaskRateLimit] | None,
|
|
60
|
+
desired_worker_labels: dict[str, DesiredWorkerLabels] | None,
|
|
61
|
+
backoff_factor: float | None,
|
|
62
|
+
backoff_max_seconds: int | None,
|
|
63
|
+
concurrency: list[ConcurrencyExpression] | None,
|
|
64
|
+
wait_for: list[Condition | OrGroup] | None,
|
|
65
|
+
skip_if: list[Condition | OrGroup] | None,
|
|
66
|
+
cancel_if: list[Condition | OrGroup] | None,
|
|
65
67
|
) -> None:
|
|
66
68
|
self.is_durable = is_durable
|
|
67
69
|
|
|
@@ -74,17 +76,17 @@ class Task(Generic[TWorkflowInput, R]):
|
|
|
74
76
|
self.execution_timeout = execution_timeout
|
|
75
77
|
self.schedule_timeout = schedule_timeout
|
|
76
78
|
self.name = name
|
|
77
|
-
self.parents = parents
|
|
79
|
+
self.parents = parents or []
|
|
78
80
|
self.retries = retries
|
|
79
|
-
self.rate_limits = rate_limits
|
|
80
|
-
self.desired_worker_labels = desired_worker_labels
|
|
81
|
+
self.rate_limits = rate_limits or []
|
|
82
|
+
self.desired_worker_labels = desired_worker_labels or {}
|
|
81
83
|
self.backoff_factor = backoff_factor
|
|
82
84
|
self.backoff_max_seconds = backoff_max_seconds
|
|
83
|
-
self.concurrency = concurrency
|
|
85
|
+
self.concurrency = concurrency or []
|
|
84
86
|
|
|
85
|
-
self.wait_for = self._flatten_conditions(wait_for)
|
|
86
|
-
self.skip_if = self._flatten_conditions(skip_if)
|
|
87
|
-
self.cancel_if = self._flatten_conditions(cancel_if)
|
|
87
|
+
self.wait_for = self._flatten_conditions(wait_for or [])
|
|
88
|
+
self.skip_if = self._flatten_conditions(skip_if or [])
|
|
89
|
+
self.cancel_if = self._flatten_conditions(cancel_if or [])
|
|
88
90
|
|
|
89
91
|
return_type = get_type_hints(_fn).get("return")
|
|
90
92
|
|
|
@@ -179,13 +181,19 @@ class Task(Generic[TWorkflowInput, R]):
|
|
|
179
181
|
raise ValueError("Conditions must have unique readable data keys.")
|
|
180
182
|
|
|
181
183
|
user_events = [
|
|
182
|
-
c.to_proto()
|
|
184
|
+
c.to_proto(self.workflow.client.config)
|
|
185
|
+
for c in conditions
|
|
186
|
+
if isinstance(c, UserEventCondition)
|
|
183
187
|
]
|
|
184
188
|
parent_overrides = [
|
|
185
|
-
c.to_proto()
|
|
189
|
+
c.to_proto(self.workflow.client.config)
|
|
190
|
+
for c in conditions
|
|
191
|
+
if isinstance(c, ParentCondition)
|
|
186
192
|
]
|
|
187
193
|
sleep_conditions = [
|
|
188
|
-
c.to_proto()
|
|
194
|
+
c.to_proto(self.workflow.client.config)
|
|
195
|
+
for c in conditions
|
|
196
|
+
if isinstance(c, SleepCondition)
|
|
189
197
|
]
|
|
190
198
|
|
|
191
199
|
return TaskConditions(
|
hatchet_sdk/runnables/types.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import json
|
|
3
|
+
from collections.abc import Callable
|
|
3
4
|
from enum import Enum
|
|
4
|
-
from typing import Any,
|
|
5
|
+
from typing import Any, ParamSpec, TypeGuard, TypeVar
|
|
5
6
|
|
|
6
7
|
from pydantic import BaseModel, ConfigDict, Field
|
|
7
8
|
|
|
@@ -11,7 +12,7 @@ from hatchet_sdk.contracts.v1.workflows_pb2 import DefaultFilter as DefaultFilte
|
|
|
11
12
|
from hatchet_sdk.utils.timedelta_to_expression import Duration
|
|
12
13
|
from hatchet_sdk.utils.typing import AwaitableLike, JSONSerializableMapping
|
|
13
14
|
|
|
14
|
-
ValidTaskReturnType =
|
|
15
|
+
ValidTaskReturnType = BaseModel | JSONSerializableMapping | None
|
|
15
16
|
|
|
16
17
|
R = TypeVar("R", bound=ValidTaskReturnType)
|
|
17
18
|
P = ParamSpec("P")
|
|
@@ -78,7 +79,7 @@ class DefaultFilter(BaseModel):
|
|
|
78
79
|
return DefaultFilterProto(
|
|
79
80
|
expression=self.expression,
|
|
80
81
|
scope=self.scope,
|
|
81
|
-
payload=payload_json,
|
|
82
|
+
payload=payload_json.encode("utf-8"),
|
|
82
83
|
)
|
|
83
84
|
|
|
84
85
|
|
|
@@ -92,7 +93,7 @@ class WorkflowConfig(BaseModel):
|
|
|
92
93
|
on_crons: list[str] = Field(default_factory=list)
|
|
93
94
|
sticky: StickyStrategy | None = None
|
|
94
95
|
concurrency: ConcurrencyExpression | list[ConcurrencyExpression] | None = None
|
|
95
|
-
input_validator:
|
|
96
|
+
input_validator: type[BaseModel] = EmptyModel
|
|
96
97
|
default_priority: int | None = None
|
|
97
98
|
|
|
98
99
|
task_defaults: TaskDefaults = TaskDefaults()
|
|
@@ -107,7 +108,7 @@ class StepType(str, Enum):
|
|
|
107
108
|
|
|
108
109
|
AsyncFunc = Callable[[TWorkflowInput, Context], AwaitableLike[R]]
|
|
109
110
|
SyncFunc = Callable[[TWorkflowInput, Context], R]
|
|
110
|
-
TaskFunc =
|
|
111
|
+
TaskFunc = AsyncFunc[TWorkflowInput, R] | SyncFunc[TWorkflowInput, R]
|
|
111
112
|
|
|
112
113
|
|
|
113
114
|
def is_async_fn(
|
|
@@ -124,9 +125,9 @@ def is_sync_fn(
|
|
|
124
125
|
|
|
125
126
|
DurableAsyncFunc = Callable[[TWorkflowInput, DurableContext], AwaitableLike[R]]
|
|
126
127
|
DurableSyncFunc = Callable[[TWorkflowInput, DurableContext], R]
|
|
127
|
-
DurableTaskFunc =
|
|
128
|
-
DurableAsyncFunc[TWorkflowInput, R]
|
|
129
|
-
|
|
128
|
+
DurableTaskFunc = (
|
|
129
|
+
DurableAsyncFunc[TWorkflowInput, R] | DurableSyncFunc[TWorkflowInput, R]
|
|
130
|
+
)
|
|
130
131
|
|
|
131
132
|
|
|
132
133
|
def is_durable_async_fn(
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
from collections.abc import Callable
|
|
2
3
|
from datetime import datetime, timedelta, timezone
|
|
3
4
|
from functools import cached_property
|
|
4
|
-
from typing import TYPE_CHECKING, Any,
|
|
5
|
+
from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, get_type_hints
|
|
5
6
|
|
|
6
7
|
from google.protobuf import timestamp_pb2
|
|
7
8
|
from pydantic import BaseModel, model_validator
|
|
@@ -11,6 +12,7 @@ from hatchet_sdk.clients.admin import (
|
|
|
11
12
|
TriggerWorkflowOptions,
|
|
12
13
|
WorkflowRunTriggerConfig,
|
|
13
14
|
)
|
|
15
|
+
from hatchet_sdk.clients.listeners.run_event_listener import RunEventListener
|
|
14
16
|
from hatchet_sdk.clients.rest.models.cron_workflows import CronWorkflows
|
|
15
17
|
from hatchet_sdk.clients.rest.models.v1_filter import V1Filter
|
|
16
18
|
from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus
|
|
@@ -371,7 +373,7 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
371
373
|
self,
|
|
372
374
|
expression: str,
|
|
373
375
|
scope: str,
|
|
374
|
-
payload: JSONSerializableMapping =
|
|
376
|
+
payload: JSONSerializableMapping | None = None,
|
|
375
377
|
) -> V1Filter:
|
|
376
378
|
"""
|
|
377
379
|
Create a new filter.
|
|
@@ -393,7 +395,7 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
393
395
|
self,
|
|
394
396
|
expression: str,
|
|
395
397
|
scope: str,
|
|
396
|
-
payload: JSONSerializableMapping =
|
|
398
|
+
payload: JSONSerializableMapping | None = None,
|
|
397
399
|
) -> V1Filter:
|
|
398
400
|
"""
|
|
399
401
|
Create a new filter.
|
|
@@ -458,7 +460,7 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
458
460
|
cron_name: str,
|
|
459
461
|
expression: str,
|
|
460
462
|
input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
|
|
461
|
-
additional_metadata: JSONSerializableMapping =
|
|
463
|
+
additional_metadata: JSONSerializableMapping | None = None,
|
|
462
464
|
priority: int | None = None,
|
|
463
465
|
) -> CronWorkflows:
|
|
464
466
|
"""
|
|
@@ -477,7 +479,7 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
477
479
|
cron_name=cron_name,
|
|
478
480
|
expression=expression,
|
|
479
481
|
input=self._serialize_input(input),
|
|
480
|
-
additional_metadata=additional_metadata,
|
|
482
|
+
additional_metadata=additional_metadata or {},
|
|
481
483
|
priority=priority,
|
|
482
484
|
)
|
|
483
485
|
|
|
@@ -486,7 +488,7 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
486
488
|
cron_name: str,
|
|
487
489
|
expression: str,
|
|
488
490
|
input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
|
|
489
|
-
additional_metadata: JSONSerializableMapping =
|
|
491
|
+
additional_metadata: JSONSerializableMapping | None = None,
|
|
490
492
|
priority: int | None = None,
|
|
491
493
|
) -> CronWorkflows:
|
|
492
494
|
"""
|
|
@@ -505,7 +507,7 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
505
507
|
cron_name=cron_name,
|
|
506
508
|
expression=expression,
|
|
507
509
|
input=self._serialize_input(input),
|
|
508
|
-
additional_metadata=additional_metadata,
|
|
510
|
+
additional_metadata=additional_metadata or {},
|
|
509
511
|
priority=priority,
|
|
510
512
|
)
|
|
511
513
|
|
|
@@ -620,7 +622,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
620
622
|
"""
|
|
621
623
|
Run the workflow asynchronously and wait for it to complete.
|
|
622
624
|
|
|
623
|
-
This method triggers a workflow run,
|
|
625
|
+
This method triggers a workflow run, awaits until completion, and returns the final result.
|
|
624
626
|
|
|
625
627
|
:param input: The input data for the workflow, must match the workflow's input type.
|
|
626
628
|
:param options: Additional options for workflow execution like metadata and parent workflow ID.
|
|
@@ -716,16 +718,16 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
716
718
|
name: str | None = None,
|
|
717
719
|
schedule_timeout: Duration = timedelta(minutes=5),
|
|
718
720
|
execution_timeout: Duration = timedelta(seconds=60),
|
|
719
|
-
parents: list[Task[TWorkflowInput, Any]] =
|
|
721
|
+
parents: list[Task[TWorkflowInput, Any]] | None = None,
|
|
720
722
|
retries: int = 0,
|
|
721
|
-
rate_limits: list[RateLimit] =
|
|
722
|
-
desired_worker_labels: dict[str, DesiredWorkerLabel] =
|
|
723
|
+
rate_limits: list[RateLimit] | None = None,
|
|
724
|
+
desired_worker_labels: dict[str, DesiredWorkerLabel] | None = None,
|
|
723
725
|
backoff_factor: float | None = None,
|
|
724
726
|
backoff_max_seconds: int | None = None,
|
|
725
|
-
concurrency: list[ConcurrencyExpression] =
|
|
726
|
-
wait_for: list[Condition | OrGroup] =
|
|
727
|
-
skip_if: list[Condition | OrGroup] =
|
|
728
|
-
cancel_if: list[Condition | OrGroup] =
|
|
727
|
+
concurrency: list[ConcurrencyExpression] | None = None,
|
|
728
|
+
wait_for: list[Condition | OrGroup] | None = None,
|
|
729
|
+
skip_if: list[Condition | OrGroup] | None = None,
|
|
730
|
+
cancel_if: list[Condition | OrGroup] | None = None,
|
|
729
731
|
) -> Callable[
|
|
730
732
|
[Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]],
|
|
731
733
|
Task[TWorkflowInput, R],
|
|
@@ -784,10 +786,10 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
784
786
|
schedule_timeout=computed_params.schedule_timeout,
|
|
785
787
|
parents=parents,
|
|
786
788
|
retries=computed_params.retries,
|
|
787
|
-
rate_limits=[r.to_proto() for r in rate_limits],
|
|
789
|
+
rate_limits=[r.to_proto() for r in rate_limits or []],
|
|
788
790
|
desired_worker_labels={
|
|
789
791
|
key: transform_desired_worker_label(d)
|
|
790
|
-
for key, d in desired_worker_labels.items()
|
|
792
|
+
for key, d in (desired_worker_labels or {}).items()
|
|
791
793
|
},
|
|
792
794
|
backoff_factor=computed_params.backoff_factor,
|
|
793
795
|
backoff_max_seconds=computed_params.backoff_max_seconds,
|
|
@@ -808,16 +810,16 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
808
810
|
name: str | None = None,
|
|
809
811
|
schedule_timeout: Duration = timedelta(minutes=5),
|
|
810
812
|
execution_timeout: Duration = timedelta(seconds=60),
|
|
811
|
-
parents: list[Task[TWorkflowInput, Any]] =
|
|
813
|
+
parents: list[Task[TWorkflowInput, Any]] | None = None,
|
|
812
814
|
retries: int = 0,
|
|
813
|
-
rate_limits: list[RateLimit] =
|
|
814
|
-
desired_worker_labels: dict[str, DesiredWorkerLabel] =
|
|
815
|
+
rate_limits: list[RateLimit] | None = None,
|
|
816
|
+
desired_worker_labels: dict[str, DesiredWorkerLabel] | None = None,
|
|
815
817
|
backoff_factor: float | None = None,
|
|
816
818
|
backoff_max_seconds: int | None = None,
|
|
817
|
-
concurrency: list[ConcurrencyExpression] =
|
|
818
|
-
wait_for: list[Condition | OrGroup] =
|
|
819
|
-
skip_if: list[Condition | OrGroup] =
|
|
820
|
-
cancel_if: list[Condition | OrGroup] =
|
|
819
|
+
concurrency: list[ConcurrencyExpression] | None = None,
|
|
820
|
+
wait_for: list[Condition | OrGroup] | None = None,
|
|
821
|
+
skip_if: list[Condition | OrGroup] | None = None,
|
|
822
|
+
cancel_if: list[Condition | OrGroup] | None = None,
|
|
821
823
|
) -> Callable[
|
|
822
824
|
[Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]]],
|
|
823
825
|
Task[TWorkflowInput, R],
|
|
@@ -880,10 +882,10 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
880
882
|
schedule_timeout=computed_params.schedule_timeout,
|
|
881
883
|
parents=parents,
|
|
882
884
|
retries=computed_params.retries,
|
|
883
|
-
rate_limits=[r.to_proto() for r in rate_limits],
|
|
885
|
+
rate_limits=[r.to_proto() for r in rate_limits or []],
|
|
884
886
|
desired_worker_labels={
|
|
885
887
|
key: transform_desired_worker_label(d)
|
|
886
|
-
for key, d in desired_worker_labels.items()
|
|
888
|
+
for key, d in (desired_worker_labels or {}).items()
|
|
887
889
|
},
|
|
888
890
|
backoff_factor=computed_params.backoff_factor,
|
|
889
891
|
backoff_max_seconds=computed_params.backoff_max_seconds,
|
|
@@ -905,10 +907,10 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
905
907
|
schedule_timeout: Duration = timedelta(minutes=5),
|
|
906
908
|
execution_timeout: Duration = timedelta(seconds=60),
|
|
907
909
|
retries: int = 0,
|
|
908
|
-
rate_limits: list[RateLimit] =
|
|
910
|
+
rate_limits: list[RateLimit] | None = None,
|
|
909
911
|
backoff_factor: float | None = None,
|
|
910
912
|
backoff_max_seconds: int | None = None,
|
|
911
|
-
concurrency: list[ConcurrencyExpression] =
|
|
913
|
+
concurrency: list[ConcurrencyExpression] | None = None,
|
|
912
914
|
) -> Callable[
|
|
913
915
|
[Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]],
|
|
914
916
|
Task[TWorkflowInput, R],
|
|
@@ -947,10 +949,15 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
947
949
|
execution_timeout=execution_timeout,
|
|
948
950
|
schedule_timeout=schedule_timeout,
|
|
949
951
|
retries=retries,
|
|
950
|
-
rate_limits=[r.to_proto() for r in rate_limits],
|
|
952
|
+
rate_limits=[r.to_proto() for r in rate_limits or []],
|
|
951
953
|
backoff_factor=backoff_factor,
|
|
952
954
|
backoff_max_seconds=backoff_max_seconds,
|
|
953
955
|
concurrency=concurrency,
|
|
956
|
+
desired_worker_labels=None,
|
|
957
|
+
parents=None,
|
|
958
|
+
wait_for=None,
|
|
959
|
+
skip_if=None,
|
|
960
|
+
cancel_if=None,
|
|
954
961
|
)
|
|
955
962
|
|
|
956
963
|
if self._on_failure_task:
|
|
@@ -968,10 +975,10 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
968
975
|
schedule_timeout: Duration = timedelta(minutes=5),
|
|
969
976
|
execution_timeout: Duration = timedelta(seconds=60),
|
|
970
977
|
retries: int = 0,
|
|
971
|
-
rate_limits: list[RateLimit] =
|
|
978
|
+
rate_limits: list[RateLimit] | None = None,
|
|
972
979
|
backoff_factor: float | None = None,
|
|
973
980
|
backoff_max_seconds: int | None = None,
|
|
974
|
-
concurrency: list[ConcurrencyExpression] =
|
|
981
|
+
concurrency: list[ConcurrencyExpression] | None = None,
|
|
975
982
|
) -> Callable[
|
|
976
983
|
[Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]],
|
|
977
984
|
Task[TWorkflowInput, R],
|
|
@@ -1010,11 +1017,15 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
1010
1017
|
execution_timeout=execution_timeout,
|
|
1011
1018
|
schedule_timeout=schedule_timeout,
|
|
1012
1019
|
retries=retries,
|
|
1013
|
-
rate_limits=[r.to_proto() for r in rate_limits],
|
|
1020
|
+
rate_limits=[r.to_proto() for r in rate_limits or []],
|
|
1014
1021
|
backoff_factor=backoff_factor,
|
|
1015
1022
|
backoff_max_seconds=backoff_max_seconds,
|
|
1016
1023
|
concurrency=concurrency,
|
|
1017
|
-
parents=
|
|
1024
|
+
parents=None,
|
|
1025
|
+
desired_worker_labels=None,
|
|
1026
|
+
wait_for=None,
|
|
1027
|
+
skip_if=None,
|
|
1028
|
+
cancel_if=None,
|
|
1018
1029
|
)
|
|
1019
1030
|
|
|
1020
1031
|
if self._on_success_task:
|
|
@@ -1087,6 +1098,9 @@ class TaskRunRef(Generic[TWorkflowInput, R]):
|
|
|
1087
1098
|
|
|
1088
1099
|
return self._s._extract_result(result)
|
|
1089
1100
|
|
|
1101
|
+
def stream(self) -> RunEventListener:
|
|
1102
|
+
return self._wrr.stream()
|
|
1103
|
+
|
|
1090
1104
|
|
|
1091
1105
|
class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]):
|
|
1092
1106
|
def __init__(
|
|
@@ -1123,13 +1137,14 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]):
|
|
|
1123
1137
|
options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
|
|
1124
1138
|
) -> R:
|
|
1125
1139
|
"""
|
|
1126
|
-
|
|
1127
|
-
|
|
1140
|
+
Run the workflow synchronously and wait for it to complete.
|
|
1141
|
+
|
|
1142
|
+
This method triggers a workflow run, blocks until completion, and returns the extracted result.
|
|
1128
1143
|
|
|
1129
1144
|
:param input: The input data for the workflow.
|
|
1130
1145
|
:param options: Additional options for workflow execution.
|
|
1131
1146
|
|
|
1132
|
-
:returns:
|
|
1147
|
+
:returns: The extracted result of the workflow execution.
|
|
1133
1148
|
"""
|
|
1134
1149
|
return self._extract_result(self._workflow.run(input, options))
|
|
1135
1150
|
|
|
@@ -1141,12 +1156,12 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]):
|
|
|
1141
1156
|
"""
|
|
1142
1157
|
Run the workflow asynchronously and wait for it to complete.
|
|
1143
1158
|
|
|
1144
|
-
This method triggers a workflow run,
|
|
1159
|
+
This method triggers a workflow run, awaits until completion, and returns the extracted result.
|
|
1145
1160
|
|
|
1146
1161
|
:param input: The input data for the workflow, must match the workflow's input type.
|
|
1147
1162
|
:param options: Additional options for workflow execution like metadata and parent workflow ID.
|
|
1148
1163
|
|
|
1149
|
-
:returns: The result of the workflow execution
|
|
1164
|
+
:returns: The extracted result of the workflow execution.
|
|
1150
1165
|
"""
|
|
1151
1166
|
result = await self._workflow.aio_run(input, options)
|
|
1152
1167
|
return self._extract_result(result)
|
|
@@ -1157,14 +1172,14 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]):
|
|
|
1157
1172
|
options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
|
|
1158
1173
|
) -> TaskRunRef[TWorkflowInput, R]:
|
|
1159
1174
|
"""
|
|
1160
|
-
|
|
1175
|
+
Trigger a workflow run without waiting for it to complete.
|
|
1161
1176
|
|
|
1162
|
-
This method triggers a workflow run
|
|
1177
|
+
This method triggers a workflow run and immediately returns a reference to the run without blocking while the workflow runs.
|
|
1163
1178
|
|
|
1164
1179
|
:param input: The input data for the workflow, must match the workflow's input type.
|
|
1165
1180
|
:param options: Additional options for workflow execution like metadata and parent workflow ID.
|
|
1166
1181
|
|
|
1167
|
-
:returns:
|
|
1182
|
+
:returns: A `TaskRunRef` object representing the reference to the workflow run.
|
|
1168
1183
|
"""
|
|
1169
1184
|
ref = self._workflow.run_no_wait(input, options)
|
|
1170
1185
|
|
|
@@ -1182,7 +1197,7 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]):
|
|
|
1182
1197
|
:param input: The input data for the workflow.
|
|
1183
1198
|
:param options: Additional options for workflow execution.
|
|
1184
1199
|
|
|
1185
|
-
:returns: A `
|
|
1200
|
+
:returns: A `TaskRunRef` object representing the reference to the workflow run.
|
|
1186
1201
|
"""
|
|
1187
1202
|
ref = await self._workflow.aio_run_no_wait(input, options)
|
|
1188
1203
|
|
hatchet_sdk/utils/proto_enums.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
|
-
from typing import
|
|
2
|
+
from typing import TypeVar, overload
|
|
3
3
|
|
|
4
4
|
from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
|
|
5
5
|
|
|
@@ -26,19 +26,19 @@ def convert_python_enum_to_proto(
|
|
|
26
26
|
|
|
27
27
|
@overload
|
|
28
28
|
def convert_proto_enum_to_python(
|
|
29
|
-
value: TProtoEnumValue, python_enum_class:
|
|
29
|
+
value: TProtoEnumValue, python_enum_class: type[TPythonEnum], proto_enum: TProtoEnum
|
|
30
30
|
) -> TPythonEnum: ...
|
|
31
31
|
|
|
32
32
|
|
|
33
33
|
@overload
|
|
34
34
|
def convert_proto_enum_to_python(
|
|
35
|
-
value: None, python_enum_class:
|
|
35
|
+
value: None, python_enum_class: type[TPythonEnum], proto_enum: TProtoEnum
|
|
36
36
|
) -> None: ...
|
|
37
37
|
|
|
38
38
|
|
|
39
39
|
def convert_proto_enum_to_python(
|
|
40
40
|
value: TProtoEnumValue | None,
|
|
41
|
-
python_enum_class:
|
|
41
|
+
python_enum_class: type[TPythonEnum],
|
|
42
42
|
proto_enum: TProtoEnum,
|
|
43
43
|
) -> TPythonEnum | None:
|
|
44
44
|
if value is None:
|
|
@@ -18,7 +18,6 @@ def timedelta_to_expr(td: Duration) -> str:
|
|
|
18
18
|
## IMPORTANT: We only support hours, minutes, and seconds on the engine
|
|
19
19
|
if seconds % HOUR == 0:
|
|
20
20
|
return f"{seconds // HOUR}h"
|
|
21
|
-
|
|
21
|
+
if seconds % MINUTE == 0:
|
|
22
22
|
return f"{seconds // MINUTE}m"
|
|
23
|
-
|
|
24
|
-
return f"{seconds}s"
|
|
23
|
+
return f"{seconds}s"
|