hatchet-sdk 1.2.6__py3-none-any.whl → 1.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hatchet-sdk might be problematic. Click here for more details.
- hatchet_sdk/__init__.py +7 -5
- hatchet_sdk/client.py +14 -6
- hatchet_sdk/clients/admin.py +57 -15
- hatchet_sdk/clients/dispatcher/action_listener.py +2 -2
- hatchet_sdk/clients/dispatcher/dispatcher.py +20 -7
- hatchet_sdk/clients/event_ts.py +25 -5
- hatchet_sdk/clients/listeners/durable_event_listener.py +125 -0
- hatchet_sdk/clients/listeners/pooled_listener.py +255 -0
- hatchet_sdk/clients/listeners/workflow_listener.py +62 -0
- hatchet_sdk/clients/rest/api/api_token_api.py +24 -24
- hatchet_sdk/clients/rest/api/default_api.py +64 -64
- hatchet_sdk/clients/rest/api/event_api.py +64 -64
- hatchet_sdk/clients/rest/api/github_api.py +8 -8
- hatchet_sdk/clients/rest/api/healthcheck_api.py +16 -16
- hatchet_sdk/clients/rest/api/log_api.py +16 -16
- hatchet_sdk/clients/rest/api/metadata_api.py +24 -24
- hatchet_sdk/clients/rest/api/rate_limits_api.py +8 -8
- hatchet_sdk/clients/rest/api/slack_api.py +16 -16
- hatchet_sdk/clients/rest/api/sns_api.py +24 -24
- hatchet_sdk/clients/rest/api/step_run_api.py +56 -56
- hatchet_sdk/clients/rest/api/task_api.py +56 -56
- hatchet_sdk/clients/rest/api/tenant_api.py +128 -128
- hatchet_sdk/clients/rest/api/user_api.py +96 -96
- hatchet_sdk/clients/rest/api/worker_api.py +24 -24
- hatchet_sdk/clients/rest/api/workflow_api.py +144 -144
- hatchet_sdk/clients/rest/api/workflow_run_api.py +48 -48
- hatchet_sdk/clients/rest/api/workflow_runs_api.py +40 -40
- hatchet_sdk/clients/rest/api_client.py +5 -8
- hatchet_sdk/clients/rest/configuration.py +7 -3
- hatchet_sdk/clients/rest/models/tenant_step_run_queue_metrics.py +2 -2
- hatchet_sdk/clients/rest/models/v1_task_summary.py +5 -0
- hatchet_sdk/clients/rest/models/v1_workflow_run.py +5 -0
- hatchet_sdk/clients/rest/rest.py +160 -111
- hatchet_sdk/clients/v1/api_client.py +2 -2
- hatchet_sdk/context/context.py +22 -21
- hatchet_sdk/features/cron.py +41 -40
- hatchet_sdk/features/logs.py +7 -6
- hatchet_sdk/features/metrics.py +19 -18
- hatchet_sdk/features/runs.py +88 -68
- hatchet_sdk/features/scheduled.py +42 -42
- hatchet_sdk/features/workers.py +17 -16
- hatchet_sdk/features/workflows.py +15 -14
- hatchet_sdk/hatchet.py +1 -1
- hatchet_sdk/runnables/standalone.py +12 -9
- hatchet_sdk/runnables/task.py +66 -2
- hatchet_sdk/runnables/types.py +8 -0
- hatchet_sdk/runnables/workflow.py +26 -125
- hatchet_sdk/waits.py +8 -8
- hatchet_sdk/worker/runner/run_loop_manager.py +4 -4
- hatchet_sdk/worker/runner/runner.py +22 -11
- hatchet_sdk/worker/worker.py +29 -25
- hatchet_sdk/workflow_run.py +58 -9
- {hatchet_sdk-1.2.6.dist-info → hatchet_sdk-1.3.1.dist-info}/METADATA +1 -1
- {hatchet_sdk-1.2.6.dist-info → hatchet_sdk-1.3.1.dist-info}/RECORD +57 -57
- hatchet_sdk/clients/durable_event_listener.py +0 -329
- hatchet_sdk/clients/workflow_listener.py +0 -288
- hatchet_sdk/utils/aio.py +0 -43
- /hatchet_sdk/clients/{run_event_listener.py → listeners/run_event_listener.py} +0 -0
- {hatchet_sdk-1.2.6.dist-info → hatchet_sdk-1.3.1.dist-info}/WHEEL +0 -0
- {hatchet_sdk-1.2.6.dist-info → hatchet_sdk-1.3.1.dist-info}/entry_points.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
from datetime import datetime
|
|
3
|
-
from typing import TYPE_CHECKING, Any, Callable, Generic,
|
|
3
|
+
from typing import TYPE_CHECKING, Any, Callable, Generic, cast
|
|
4
4
|
|
|
5
5
|
from google.protobuf import timestamp_pb2
|
|
6
6
|
from pydantic import BaseModel
|
|
@@ -12,10 +12,7 @@ from hatchet_sdk.clients.admin import (
|
|
|
12
12
|
)
|
|
13
13
|
from hatchet_sdk.clients.rest.models.cron_workflows import CronWorkflows
|
|
14
14
|
from hatchet_sdk.context.context import Context, DurableContext
|
|
15
|
-
from hatchet_sdk.contracts.v1.shared.condition_pb2 import TaskConditions
|
|
16
15
|
from hatchet_sdk.contracts.v1.workflows_pb2 import (
|
|
17
|
-
Concurrency,
|
|
18
|
-
CreateTaskOpts,
|
|
19
16
|
CreateWorkflowVersionRequest,
|
|
20
17
|
DesiredWorkerLabels,
|
|
21
18
|
)
|
|
@@ -36,16 +33,9 @@ from hatchet_sdk.runnables.types import (
|
|
|
36
33
|
WorkflowConfig,
|
|
37
34
|
)
|
|
38
35
|
from hatchet_sdk.utils.proto_enums import convert_python_enum_to_proto
|
|
39
|
-
from hatchet_sdk.utils.timedelta_to_expression import Duration
|
|
36
|
+
from hatchet_sdk.utils.timedelta_to_expression import Duration
|
|
40
37
|
from hatchet_sdk.utils.typing import JSONSerializableMapping
|
|
41
|
-
from hatchet_sdk.waits import
|
|
42
|
-
Action,
|
|
43
|
-
Condition,
|
|
44
|
-
OrGroup,
|
|
45
|
-
ParentCondition,
|
|
46
|
-
SleepCondition,
|
|
47
|
-
UserEventCondition,
|
|
48
|
-
)
|
|
38
|
+
from hatchet_sdk.waits import Condition, OrGroup
|
|
49
39
|
from hatchet_sdk.workflow_run import WorkflowRunRef
|
|
50
40
|
|
|
51
41
|
if TYPE_CHECKING:
|
|
@@ -78,16 +68,12 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
78
68
|
self._on_success_task: Task[TWorkflowInput, Any] | None = None
|
|
79
69
|
self.client = client
|
|
80
70
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
def _create_action_name(
|
|
85
|
-
self, namespace: str, step: Task[TWorkflowInput, Any]
|
|
86
|
-
) -> str:
|
|
87
|
-
return self._get_service_name(namespace) + ":" + step.name
|
|
71
|
+
@property
|
|
72
|
+
def service_name(self) -> str:
|
|
73
|
+
return f"{self.client.config.namespace}{self.config.name.lower()}"
|
|
88
74
|
|
|
89
|
-
def
|
|
90
|
-
return
|
|
75
|
+
def _create_action_name(self, step: Task[TWorkflowInput, Any]) -> str:
|
|
76
|
+
return self.service_name + ":" + step.name
|
|
91
77
|
|
|
92
78
|
def _raise_for_invalid_concurrency(
|
|
93
79
|
self, concurrency: ConcurrencyExpression
|
|
@@ -106,58 +92,6 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
106
92
|
|
|
107
93
|
return True
|
|
108
94
|
|
|
109
|
-
@overload
|
|
110
|
-
def _concurrency_to_proto(self, concurrency: None) -> None: ...
|
|
111
|
-
|
|
112
|
-
@overload
|
|
113
|
-
def _concurrency_to_proto(
|
|
114
|
-
self, concurrency: ConcurrencyExpression
|
|
115
|
-
) -> Concurrency: ...
|
|
116
|
-
|
|
117
|
-
def _concurrency_to_proto(
|
|
118
|
-
self, concurrency: ConcurrencyExpression | None
|
|
119
|
-
) -> Concurrency | None:
|
|
120
|
-
if not concurrency:
|
|
121
|
-
return None
|
|
122
|
-
|
|
123
|
-
self._raise_for_invalid_concurrency(concurrency)
|
|
124
|
-
|
|
125
|
-
return Concurrency(
|
|
126
|
-
expression=concurrency.expression,
|
|
127
|
-
max_runs=concurrency.max_runs,
|
|
128
|
-
limit_strategy=concurrency.limit_strategy,
|
|
129
|
-
)
|
|
130
|
-
|
|
131
|
-
@overload
|
|
132
|
-
def _validate_task(
|
|
133
|
-
self, task: "Task[TWorkflowInput, R]", service_name: str
|
|
134
|
-
) -> CreateTaskOpts: ...
|
|
135
|
-
|
|
136
|
-
@overload
|
|
137
|
-
def _validate_task(self, task: None, service_name: str) -> None: ...
|
|
138
|
-
|
|
139
|
-
def _validate_task(
|
|
140
|
-
self, task: Union["Task[TWorkflowInput, R]", None], service_name: str
|
|
141
|
-
) -> CreateTaskOpts | None:
|
|
142
|
-
if not task:
|
|
143
|
-
return None
|
|
144
|
-
|
|
145
|
-
return CreateTaskOpts(
|
|
146
|
-
readable_id=task.name,
|
|
147
|
-
action=service_name + ":" + task.name,
|
|
148
|
-
timeout=timedelta_to_expr(task.execution_timeout),
|
|
149
|
-
inputs="{}",
|
|
150
|
-
parents=[p.name for p in task.parents],
|
|
151
|
-
retries=task.retries,
|
|
152
|
-
rate_limits=task.rate_limits,
|
|
153
|
-
worker_labels=task.desired_worker_labels,
|
|
154
|
-
backoff_factor=task.backoff_factor,
|
|
155
|
-
backoff_max_seconds=task.backoff_max_seconds,
|
|
156
|
-
concurrency=[self._concurrency_to_proto(t) for t in task.concurrency],
|
|
157
|
-
conditions=self._conditions_to_proto(task),
|
|
158
|
-
schedule_timeout=timedelta_to_expr(task.schedule_timeout),
|
|
159
|
-
)
|
|
160
|
-
|
|
161
95
|
def _validate_priority(self, default_priority: int | None) -> int | None:
|
|
162
96
|
validated_priority = (
|
|
163
97
|
max(1, min(3, default_priority)) if default_priority else None
|
|
@@ -169,51 +103,14 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
169
103
|
|
|
170
104
|
return validated_priority
|
|
171
105
|
|
|
172
|
-
def _assign_action(self, condition: Condition, action: Action) -> Condition:
|
|
173
|
-
condition.base.action = action
|
|
174
|
-
|
|
175
|
-
return condition
|
|
176
|
-
|
|
177
|
-
def _conditions_to_proto(self, task: Task[TWorkflowInput, Any]) -> TaskConditions:
|
|
178
|
-
wait_for_conditions = [
|
|
179
|
-
self._assign_action(w, Action.QUEUE) for w in task.wait_for
|
|
180
|
-
]
|
|
181
|
-
|
|
182
|
-
cancel_if_conditions = [
|
|
183
|
-
self._assign_action(c, Action.CANCEL) for c in task.cancel_if
|
|
184
|
-
]
|
|
185
|
-
skip_if_conditions = [self._assign_action(s, Action.SKIP) for s in task.skip_if]
|
|
186
|
-
|
|
187
|
-
conditions = wait_for_conditions + cancel_if_conditions + skip_if_conditions
|
|
188
|
-
|
|
189
|
-
if len({c.base.readable_data_key for c in conditions}) != len(
|
|
190
|
-
[c.base.readable_data_key for c in conditions]
|
|
191
|
-
):
|
|
192
|
-
raise ValueError("Conditions must have unique readable data keys.")
|
|
193
|
-
|
|
194
|
-
user_events = [
|
|
195
|
-
c.to_pb() for c in conditions if isinstance(c, UserEventCondition)
|
|
196
|
-
]
|
|
197
|
-
parent_overrides = [
|
|
198
|
-
c.to_pb() for c in conditions if isinstance(c, ParentCondition)
|
|
199
|
-
]
|
|
200
|
-
sleep_conditions = [
|
|
201
|
-
c.to_pb() for c in conditions if isinstance(c, SleepCondition)
|
|
202
|
-
]
|
|
203
|
-
|
|
204
|
-
return TaskConditions(
|
|
205
|
-
parent_override_conditions=parent_overrides,
|
|
206
|
-
sleep_conditions=sleep_conditions,
|
|
207
|
-
user_event_conditions=user_events,
|
|
208
|
-
)
|
|
209
|
-
|
|
210
106
|
def _is_leaf_task(self, task: Task[TWorkflowInput, Any]) -> bool:
|
|
211
107
|
return not any(task in t.parents for t in self.tasks if task != t)
|
|
212
108
|
|
|
213
|
-
def
|
|
214
|
-
|
|
109
|
+
def to_proto(self) -> CreateWorkflowVersionRequest:
|
|
110
|
+
namespace = self.client.config.namespace
|
|
111
|
+
service_name = self.service_name
|
|
215
112
|
|
|
216
|
-
name = self.
|
|
113
|
+
name = self.name
|
|
217
114
|
event_triggers = [namespace + event for event in self.config.on_events]
|
|
218
115
|
|
|
219
116
|
if self._on_success_task:
|
|
@@ -223,10 +120,12 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
223
120
|
if task.type == StepType.DEFAULT and self._is_leaf_task(task)
|
|
224
121
|
]
|
|
225
122
|
|
|
226
|
-
on_success_task =
|
|
123
|
+
on_success_task = (
|
|
124
|
+
t.to_proto(service_name) if (t := self._on_success_task) else None
|
|
125
|
+
)
|
|
227
126
|
|
|
228
127
|
tasks = [
|
|
229
|
-
|
|
128
|
+
task.to_proto(service_name)
|
|
230
129
|
for task in self.tasks
|
|
231
130
|
if task.type == StepType.DEFAULT
|
|
232
131
|
]
|
|
@@ -234,7 +133,9 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
234
133
|
if on_success_task:
|
|
235
134
|
tasks += [on_success_task]
|
|
236
135
|
|
|
237
|
-
on_failure_task =
|
|
136
|
+
on_failure_task = (
|
|
137
|
+
t.to_proto(service_name) if (t := self._on_failure_task) else None
|
|
138
|
+
)
|
|
238
139
|
|
|
239
140
|
return CreateWorkflowVersionRequest(
|
|
240
141
|
name=name,
|
|
@@ -243,7 +144,7 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
243
144
|
event_triggers=event_triggers,
|
|
244
145
|
cron_triggers=self.config.on_crons,
|
|
245
146
|
tasks=tasks,
|
|
246
|
-
concurrency=
|
|
147
|
+
concurrency=(c.to_proto() if (c := self.config.concurrency) else None),
|
|
247
148
|
## TODO: Fix this
|
|
248
149
|
cron_input=None,
|
|
249
150
|
on_failure_task=on_failure_task,
|
|
@@ -274,11 +175,11 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
|
|
274
175
|
|
|
275
176
|
@property
|
|
276
177
|
def name(self) -> str:
|
|
277
|
-
return self.
|
|
178
|
+
return self.client.config.namespace + self.config.name
|
|
278
179
|
|
|
279
180
|
def create_bulk_run_item(
|
|
280
181
|
self,
|
|
281
|
-
input: TWorkflowInput
|
|
182
|
+
input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
|
|
282
183
|
key: str | None = None,
|
|
283
184
|
options: TriggerWorkflowOptions = TriggerWorkflowOptions(),
|
|
284
185
|
) -> WorkflowRunTriggerConfig:
|
|
@@ -394,7 +295,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
394
295
|
def schedule(
|
|
395
296
|
self,
|
|
396
297
|
run_at: datetime,
|
|
397
|
-
input: TWorkflowInput
|
|
298
|
+
input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
|
|
398
299
|
options: ScheduleTriggerWorkflowOptions = ScheduleTriggerWorkflowOptions(),
|
|
399
300
|
) -> WorkflowVersion:
|
|
400
301
|
return self.client._client.admin.schedule_workflow(
|
|
@@ -407,7 +308,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
407
308
|
async def aio_schedule(
|
|
408
309
|
self,
|
|
409
310
|
run_at: datetime,
|
|
410
|
-
input: TWorkflowInput
|
|
311
|
+
input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
|
|
411
312
|
options: ScheduleTriggerWorkflowOptions = ScheduleTriggerWorkflowOptions(),
|
|
412
313
|
) -> WorkflowVersion:
|
|
413
314
|
return await self.client._client.admin.aio_schedule_workflow(
|
|
@@ -421,7 +322,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
421
322
|
self,
|
|
422
323
|
cron_name: str,
|
|
423
324
|
expression: str,
|
|
424
|
-
input: TWorkflowInput
|
|
325
|
+
input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
|
|
425
326
|
additional_metadata: JSONSerializableMapping = {},
|
|
426
327
|
) -> CronWorkflows:
|
|
427
328
|
return self.client.cron.create(
|
|
@@ -436,7 +337,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
|
|
|
436
337
|
self,
|
|
437
338
|
cron_name: str,
|
|
438
339
|
expression: str,
|
|
439
|
-
input: TWorkflowInput
|
|
340
|
+
input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()),
|
|
440
341
|
additional_metadata: JSONSerializableMapping = {},
|
|
441
342
|
) -> CronWorkflows:
|
|
442
343
|
return await self.client.cron.aio_create(
|
hatchet_sdk/waits.py
CHANGED
|
@@ -37,7 +37,7 @@ class BaseCondition(BaseModel):
|
|
|
37
37
|
or_group_id: str = Field(default_factory=generate_or_group_id)
|
|
38
38
|
expression: str | None = None
|
|
39
39
|
|
|
40
|
-
def
|
|
40
|
+
def to_proto(self) -> BaseMatchCondition:
|
|
41
41
|
return BaseMatchCondition(
|
|
42
42
|
readable_data_key=self.readable_data_key,
|
|
43
43
|
action=convert_python_enum_to_proto(self.action, ProtoAction), # type: ignore[arg-type]
|
|
@@ -51,7 +51,7 @@ class Condition(ABC):
|
|
|
51
51
|
self.base = base
|
|
52
52
|
|
|
53
53
|
@abstractmethod
|
|
54
|
-
def
|
|
54
|
+
def to_proto(
|
|
55
55
|
self,
|
|
56
56
|
) -> UserEventMatchCondition | ParentOverrideMatchCondition | SleepMatchCondition:
|
|
57
57
|
pass
|
|
@@ -67,9 +67,9 @@ class SleepCondition(Condition):
|
|
|
67
67
|
|
|
68
68
|
self.duration = duration
|
|
69
69
|
|
|
70
|
-
def
|
|
70
|
+
def to_proto(self) -> SleepMatchCondition:
|
|
71
71
|
return SleepMatchCondition(
|
|
72
|
-
base=self.base.
|
|
72
|
+
base=self.base.to_proto(),
|
|
73
73
|
sleep_for=timedelta_to_expr(self.duration),
|
|
74
74
|
)
|
|
75
75
|
|
|
@@ -86,9 +86,9 @@ class UserEventCondition(Condition):
|
|
|
86
86
|
self.event_key = event_key
|
|
87
87
|
self.expression = expression
|
|
88
88
|
|
|
89
|
-
def
|
|
89
|
+
def to_proto(self) -> UserEventMatchCondition:
|
|
90
90
|
return UserEventMatchCondition(
|
|
91
|
-
base=self.base.
|
|
91
|
+
base=self.base.to_proto(),
|
|
92
92
|
user_event_key=self.event_key,
|
|
93
93
|
)
|
|
94
94
|
|
|
@@ -103,9 +103,9 @@ class ParentCondition(Condition):
|
|
|
103
103
|
|
|
104
104
|
self.parent = parent
|
|
105
105
|
|
|
106
|
-
def
|
|
106
|
+
def to_proto(self) -> ParentOverrideMatchCondition:
|
|
107
107
|
return ParentOverrideMatchCondition(
|
|
108
|
-
base=self.base.
|
|
108
|
+
base=self.base.to_proto(),
|
|
109
109
|
parent_readable_id=self.parent.name,
|
|
110
110
|
)
|
|
111
111
|
|
|
@@ -55,17 +55,17 @@ class WorkerActionRunLoopManager:
|
|
|
55
55
|
self.client = Client(config=self.config, debug=self.debug)
|
|
56
56
|
self.start()
|
|
57
57
|
|
|
58
|
-
def start(self
|
|
59
|
-
k = self.loop.create_task(self.aio_start(
|
|
58
|
+
def start(self) -> None:
|
|
59
|
+
k = self.loop.create_task(self.aio_start()) # noqa: F841
|
|
60
60
|
|
|
61
61
|
async def aio_start(self, retry_count: int = 1) -> None:
|
|
62
62
|
await capture_logs(
|
|
63
63
|
self.client.log_interceptor,
|
|
64
64
|
self.client.event,
|
|
65
65
|
self._async_start,
|
|
66
|
-
)(
|
|
66
|
+
)()
|
|
67
67
|
|
|
68
|
-
async def _async_start(self
|
|
68
|
+
async def _async_start(self) -> None:
|
|
69
69
|
logger.info("starting runner...")
|
|
70
70
|
self.loop = asyncio.get_running_loop()
|
|
71
71
|
# needed for graceful termination
|
|
@@ -16,9 +16,10 @@ from hatchet_sdk.client import Client
|
|
|
16
16
|
from hatchet_sdk.clients.admin import AdminClient
|
|
17
17
|
from hatchet_sdk.clients.dispatcher.action_listener import Action, ActionType
|
|
18
18
|
from hatchet_sdk.clients.dispatcher.dispatcher import DispatcherClient
|
|
19
|
-
from hatchet_sdk.clients.
|
|
20
|
-
from hatchet_sdk.clients.
|
|
21
|
-
from hatchet_sdk.clients.
|
|
19
|
+
from hatchet_sdk.clients.events import EventClient
|
|
20
|
+
from hatchet_sdk.clients.listeners.durable_event_listener import DurableEventListener
|
|
21
|
+
from hatchet_sdk.clients.listeners.run_event_listener import RunEventListenerClient
|
|
22
|
+
from hatchet_sdk.clients.listeners.workflow_listener import PooledWorkflowRunListener
|
|
22
23
|
from hatchet_sdk.config import ClientConfig
|
|
23
24
|
from hatchet_sdk.context.context import Context, DurableContext
|
|
24
25
|
from hatchet_sdk.context.worker_context import WorkerContext
|
|
@@ -31,6 +32,7 @@ from hatchet_sdk.contracts.dispatcher_pb2 import (
|
|
|
31
32
|
STEP_EVENT_TYPE_STARTED,
|
|
32
33
|
)
|
|
33
34
|
from hatchet_sdk.exceptions import NonRetryableException
|
|
35
|
+
from hatchet_sdk.features.runs import RunsClient
|
|
34
36
|
from hatchet_sdk.logger import logger
|
|
35
37
|
from hatchet_sdk.runnables.contextvars import (
|
|
36
38
|
ctx_step_run_id,
|
|
@@ -66,7 +68,7 @@ class Runner:
|
|
|
66
68
|
):
|
|
67
69
|
# We store the config so we can dynamically create clients for the dispatcher client.
|
|
68
70
|
self.config = config
|
|
69
|
-
|
|
71
|
+
|
|
70
72
|
self.slots = slots
|
|
71
73
|
self.tasks: dict[str, asyncio.Task[Any]] = {} # Store run ids and futures
|
|
72
74
|
self.contexts: dict[str, Context] = {} # Store run ids and contexts
|
|
@@ -82,12 +84,21 @@ class Runner:
|
|
|
82
84
|
self.killing = False
|
|
83
85
|
self.handle_kill = handle_kill
|
|
84
86
|
|
|
85
|
-
# We need to initialize a new admin and dispatcher client *after* we've started the event loop,
|
|
86
|
-
# otherwise the grpc.aio methods will use a different event loop and we'll get a bunch of errors.
|
|
87
87
|
self.dispatcher_client = DispatcherClient(self.config)
|
|
88
|
-
self.admin_client = AdminClient(self.config)
|
|
89
88
|
self.workflow_run_event_listener = RunEventListenerClient(self.config)
|
|
90
|
-
self.
|
|
89
|
+
self.workflow_listener = PooledWorkflowRunListener(self.config)
|
|
90
|
+
self.runs_client = RunsClient(
|
|
91
|
+
config=self.config,
|
|
92
|
+
workflow_run_event_listener=self.workflow_run_event_listener,
|
|
93
|
+
workflow_run_listener=self.workflow_listener,
|
|
94
|
+
)
|
|
95
|
+
self.admin_client = AdminClient(
|
|
96
|
+
self.config,
|
|
97
|
+
self.workflow_listener,
|
|
98
|
+
self.workflow_run_event_listener,
|
|
99
|
+
self.runs_client,
|
|
100
|
+
)
|
|
101
|
+
self.event_client = EventClient(self.config)
|
|
91
102
|
self.durable_event_listener = DurableEventListener(self.config)
|
|
92
103
|
|
|
93
104
|
self.worker_context = WorkerContext(
|
|
@@ -291,11 +302,11 @@ class Runner:
|
|
|
291
302
|
action=action,
|
|
292
303
|
dispatcher_client=self.dispatcher_client,
|
|
293
304
|
admin_client=self.admin_client,
|
|
294
|
-
event_client=self.
|
|
305
|
+
event_client=self.event_client,
|
|
295
306
|
durable_event_listener=self.durable_event_listener,
|
|
296
307
|
worker=self.worker_context,
|
|
297
308
|
validator_registry=self.validator_registry,
|
|
298
|
-
runs_client=self.
|
|
309
|
+
runs_client=self.runs_client,
|
|
299
310
|
)
|
|
300
311
|
|
|
301
312
|
## IMPORTANT: Keep this method's signature in sync with the wrapper in the OTel instrumentor
|
|
@@ -430,7 +441,7 @@ class Runner:
|
|
|
430
441
|
# check if thread is still running, if so, print a warning
|
|
431
442
|
if run_id in self.threads:
|
|
432
443
|
thread = self.threads.get(run_id)
|
|
433
|
-
if thread and self.
|
|
444
|
+
if thread and self.config.enable_force_kill_sync_threads:
|
|
434
445
|
self.force_kill_thread(thread)
|
|
435
446
|
await asyncio.sleep(1)
|
|
436
447
|
|
hatchet_sdk/worker/worker.py
CHANGED
|
@@ -11,6 +11,7 @@ from multiprocessing import Queue
|
|
|
11
11
|
from multiprocessing.process import BaseProcess
|
|
12
12
|
from types import FrameType
|
|
13
13
|
from typing import Any, TypeVar, get_type_hints
|
|
14
|
+
from warnings import warn
|
|
14
15
|
|
|
15
16
|
from aiohttp import web
|
|
16
17
|
from aiohttp.web_request import Request
|
|
@@ -38,6 +39,10 @@ from hatchet_sdk.worker.runner.run_loop_manager import (
|
|
|
38
39
|
T = TypeVar("T")
|
|
39
40
|
|
|
40
41
|
|
|
42
|
+
class LoopAlreadyRunningException(Exception):
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
|
|
41
46
|
class WorkerStatus(Enum):
|
|
42
47
|
INITIALIZED = 1
|
|
43
48
|
STARTING = 2
|
|
@@ -128,24 +133,20 @@ class Worker:
|
|
|
128
133
|
sys.exit(1)
|
|
129
134
|
|
|
130
135
|
def register_workflow(self, workflow: BaseWorkflow[Any]) -> None:
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
opts = workflow._get_create_opts(namespace)
|
|
134
|
-
name = workflow._get_name(namespace)
|
|
136
|
+
opts = workflow.to_proto()
|
|
137
|
+
name = workflow.name
|
|
135
138
|
|
|
136
139
|
try:
|
|
137
140
|
self.client.admin.put_workflow(name, opts)
|
|
138
141
|
except Exception as e:
|
|
139
|
-
logger.error(
|
|
140
|
-
f"failed to register workflow: {workflow._get_name(namespace)}"
|
|
141
|
-
)
|
|
142
|
+
logger.error(f"failed to register workflow: {workflow.name}")
|
|
142
143
|
logger.error(e)
|
|
143
144
|
sys.exit(1)
|
|
144
145
|
|
|
145
146
|
for step in workflow.tasks:
|
|
146
|
-
action_name = workflow._create_action_name(
|
|
147
|
+
action_name = workflow._create_action_name(step)
|
|
147
148
|
|
|
148
|
-
if
|
|
149
|
+
if step.is_durable:
|
|
149
150
|
self.has_any_durable = True
|
|
150
151
|
self.durable_action_registry[action_name] = step
|
|
151
152
|
else:
|
|
@@ -167,22 +168,20 @@ class Worker:
|
|
|
167
168
|
def status(self) -> WorkerStatus:
|
|
168
169
|
return self._status
|
|
169
170
|
|
|
170
|
-
def _setup_loop(self
|
|
171
|
+
def _setup_loop(self) -> None:
|
|
171
172
|
try:
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
173
|
+
asyncio.get_running_loop()
|
|
174
|
+
raise LoopAlreadyRunningException(
|
|
175
|
+
"An event loop is already running. This worker requires its own dedicated event loop. "
|
|
176
|
+
"Make sure you're not using asyncio.run() or other loop-creating functions in the main thread."
|
|
177
|
+
)
|
|
176
178
|
except RuntimeError:
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
logger.debug("creating new event loop")
|
|
180
|
-
created_loop = True
|
|
179
|
+
pass
|
|
181
180
|
|
|
181
|
+
logger.debug("Creating new event loop")
|
|
182
|
+
self.loop = asyncio.new_event_loop()
|
|
182
183
|
asyncio.set_event_loop(self.loop)
|
|
183
184
|
|
|
184
|
-
return created_loop
|
|
185
|
-
|
|
186
185
|
async def _health_check_handler(self, request: Request) -> Response:
|
|
187
186
|
response = HealthCheckResponse(
|
|
188
187
|
status=self.status.name,
|
|
@@ -224,7 +223,13 @@ class Worker:
|
|
|
224
223
|
logger.info(f"healthcheck server running on port {port}")
|
|
225
224
|
|
|
226
225
|
def start(self, options: WorkerStartOptions = WorkerStartOptions()) -> None:
|
|
227
|
-
|
|
226
|
+
if options.loop is not None:
|
|
227
|
+
warn(
|
|
228
|
+
"Passing a custom event loop is deprecated and will be removed in the future. This option no longer has any effect",
|
|
229
|
+
DeprecationWarning,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
self._setup_loop()
|
|
228
233
|
|
|
229
234
|
if not self.loop:
|
|
230
235
|
raise RuntimeError("event loop not set, cannot start worker")
|
|
@@ -232,11 +237,10 @@ class Worker:
|
|
|
232
237
|
asyncio.run_coroutine_threadsafe(self._aio_start(), self.loop)
|
|
233
238
|
|
|
234
239
|
# start the loop and wait until its closed
|
|
235
|
-
|
|
236
|
-
self.loop.run_forever()
|
|
240
|
+
self.loop.run_forever()
|
|
237
241
|
|
|
238
|
-
|
|
239
|
-
|
|
242
|
+
if self.handle_kill:
|
|
243
|
+
sys.exit(0)
|
|
240
244
|
|
|
241
245
|
async def _aio_start(self) -> None:
|
|
242
246
|
main_pid = os.getpid()
|
hatchet_sdk/workflow_run.py
CHANGED
|
@@ -1,23 +1,27 @@
|
|
|
1
|
+
import time
|
|
1
2
|
from typing import Any
|
|
2
3
|
|
|
3
|
-
from hatchet_sdk.clients.run_event_listener import (
|
|
4
|
+
from hatchet_sdk.clients.listeners.run_event_listener import (
|
|
4
5
|
RunEventListener,
|
|
5
6
|
RunEventListenerClient,
|
|
6
7
|
)
|
|
7
|
-
from hatchet_sdk.clients.workflow_listener import PooledWorkflowRunListener
|
|
8
|
-
from hatchet_sdk.
|
|
9
|
-
from hatchet_sdk.
|
|
8
|
+
from hatchet_sdk.clients.listeners.workflow_listener import PooledWorkflowRunListener
|
|
9
|
+
from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus
|
|
10
|
+
from hatchet_sdk.features.runs import RunsClient
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
class WorkflowRunRef:
|
|
13
14
|
def __init__(
|
|
14
15
|
self,
|
|
15
16
|
workflow_run_id: str,
|
|
16
|
-
|
|
17
|
+
workflow_run_listener: PooledWorkflowRunListener,
|
|
18
|
+
workflow_run_event_listener: RunEventListenerClient,
|
|
19
|
+
runs_client: RunsClient,
|
|
17
20
|
):
|
|
18
21
|
self.workflow_run_id = workflow_run_id
|
|
19
|
-
self.
|
|
20
|
-
self.workflow_run_event_listener =
|
|
22
|
+
self.workflow_run_listener = workflow_run_listener
|
|
23
|
+
self.workflow_run_event_listener = workflow_run_event_listener
|
|
24
|
+
self.runs_client = runs_client
|
|
21
25
|
|
|
22
26
|
def __str__(self) -> str:
|
|
23
27
|
return self.workflow_run_id
|
|
@@ -26,7 +30,52 @@ class WorkflowRunRef:
|
|
|
26
30
|
return self.workflow_run_event_listener.stream(self.workflow_run_id)
|
|
27
31
|
|
|
28
32
|
async def aio_result(self) -> dict[str, Any]:
|
|
29
|
-
return await self.
|
|
33
|
+
return await self.workflow_run_listener.aio_result(self.workflow_run_id)
|
|
34
|
+
|
|
35
|
+
def _safely_get_action_name(self, action_id: str | None) -> str | None:
|
|
36
|
+
if not action_id:
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
return action_id.split(":", maxsplit=1)[1]
|
|
41
|
+
except IndexError:
|
|
42
|
+
return None
|
|
30
43
|
|
|
31
44
|
def result(self) -> dict[str, Any]:
|
|
32
|
-
|
|
45
|
+
retries = 0
|
|
46
|
+
|
|
47
|
+
while True:
|
|
48
|
+
try:
|
|
49
|
+
details = self.runs_client.get(self.workflow_run_id)
|
|
50
|
+
except Exception:
|
|
51
|
+
retries += 1
|
|
52
|
+
|
|
53
|
+
if retries > 10:
|
|
54
|
+
raise ValueError(f"Workflow run {self.workflow_run_id} not found")
|
|
55
|
+
|
|
56
|
+
time.sleep(1)
|
|
57
|
+
continue
|
|
58
|
+
|
|
59
|
+
match details.run.status:
|
|
60
|
+
case V1TaskStatus.RUNNING:
|
|
61
|
+
time.sleep(1)
|
|
62
|
+
case V1TaskStatus.FAILED:
|
|
63
|
+
raise ValueError(
|
|
64
|
+
f"Workflow run failed: {details.run.error_message}"
|
|
65
|
+
)
|
|
66
|
+
case V1TaskStatus.COMPLETED:
|
|
67
|
+
return {
|
|
68
|
+
name: t.output
|
|
69
|
+
for t in details.tasks
|
|
70
|
+
if (name := self._safely_get_action_name(t.action_id))
|
|
71
|
+
}
|
|
72
|
+
case V1TaskStatus.QUEUED:
|
|
73
|
+
time.sleep(1)
|
|
74
|
+
case V1TaskStatus.CANCELLED:
|
|
75
|
+
raise ValueError(
|
|
76
|
+
f"Workflow run cancelled: {details.run.error_message}"
|
|
77
|
+
)
|
|
78
|
+
case _:
|
|
79
|
+
raise ValueError(
|
|
80
|
+
f"Unknown workflow run status: {details.run.status}"
|
|
81
|
+
)
|