hatchet-sdk 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hatchet-sdk might be problematic. Click here for more details.

Files changed (73) hide show
  1. hatchet_sdk/__init__.py +32 -16
  2. hatchet_sdk/client.py +25 -63
  3. hatchet_sdk/clients/admin.py +203 -142
  4. hatchet_sdk/clients/dispatcher/action_listener.py +42 -42
  5. hatchet_sdk/clients/dispatcher/dispatcher.py +18 -16
  6. hatchet_sdk/clients/durable_event_listener.py +327 -0
  7. hatchet_sdk/clients/rest/__init__.py +12 -1
  8. hatchet_sdk/clients/rest/api/log_api.py +258 -0
  9. hatchet_sdk/clients/rest/api/task_api.py +32 -6
  10. hatchet_sdk/clients/rest/api/workflow_runs_api.py +626 -0
  11. hatchet_sdk/clients/rest/models/__init__.py +12 -1
  12. hatchet_sdk/clients/rest/models/v1_log_line.py +94 -0
  13. hatchet_sdk/clients/rest/models/v1_log_line_level.py +39 -0
  14. hatchet_sdk/clients/rest/models/v1_log_line_list.py +110 -0
  15. hatchet_sdk/clients/rest/models/v1_task_summary.py +80 -64
  16. hatchet_sdk/clients/rest/models/v1_trigger_workflow_run_request.py +95 -0
  17. hatchet_sdk/clients/rest/models/v1_workflow_run_display_name.py +98 -0
  18. hatchet_sdk/clients/rest/models/v1_workflow_run_display_name_list.py +114 -0
  19. hatchet_sdk/clients/rest/models/workflow_run_shape_item_for_workflow_run_details.py +9 -4
  20. hatchet_sdk/clients/rest/models/workflow_runs_metrics.py +5 -1
  21. hatchet_sdk/clients/run_event_listener.py +0 -1
  22. hatchet_sdk/clients/v1/api_client.py +81 -0
  23. hatchet_sdk/context/context.py +86 -159
  24. hatchet_sdk/contracts/dispatcher_pb2_grpc.py +1 -1
  25. hatchet_sdk/contracts/events_pb2.py +2 -2
  26. hatchet_sdk/contracts/events_pb2_grpc.py +1 -1
  27. hatchet_sdk/contracts/v1/dispatcher_pb2.py +36 -0
  28. hatchet_sdk/contracts/v1/dispatcher_pb2.pyi +38 -0
  29. hatchet_sdk/contracts/v1/dispatcher_pb2_grpc.py +145 -0
  30. hatchet_sdk/contracts/v1/shared/condition_pb2.py +39 -0
  31. hatchet_sdk/contracts/v1/shared/condition_pb2.pyi +72 -0
  32. hatchet_sdk/contracts/v1/shared/condition_pb2_grpc.py +29 -0
  33. hatchet_sdk/contracts/v1/workflows_pb2.py +67 -0
  34. hatchet_sdk/contracts/v1/workflows_pb2.pyi +228 -0
  35. hatchet_sdk/contracts/v1/workflows_pb2_grpc.py +234 -0
  36. hatchet_sdk/contracts/workflows_pb2_grpc.py +1 -1
  37. hatchet_sdk/features/cron.py +91 -121
  38. hatchet_sdk/features/logs.py +16 -0
  39. hatchet_sdk/features/metrics.py +75 -0
  40. hatchet_sdk/features/rate_limits.py +45 -0
  41. hatchet_sdk/features/runs.py +221 -0
  42. hatchet_sdk/features/scheduled.py +114 -131
  43. hatchet_sdk/features/workers.py +41 -0
  44. hatchet_sdk/features/workflows.py +55 -0
  45. hatchet_sdk/hatchet.py +463 -165
  46. hatchet_sdk/opentelemetry/instrumentor.py +8 -13
  47. hatchet_sdk/rate_limit.py +33 -39
  48. hatchet_sdk/runnables/contextvars.py +12 -0
  49. hatchet_sdk/runnables/standalone.py +192 -0
  50. hatchet_sdk/runnables/task.py +144 -0
  51. hatchet_sdk/runnables/types.py +138 -0
  52. hatchet_sdk/runnables/workflow.py +771 -0
  53. hatchet_sdk/utils/aio_utils.py +0 -79
  54. hatchet_sdk/utils/proto_enums.py +0 -7
  55. hatchet_sdk/utils/timedelta_to_expression.py +23 -0
  56. hatchet_sdk/utils/typing.py +2 -2
  57. hatchet_sdk/v0/clients/rest_client.py +9 -0
  58. hatchet_sdk/v0/worker/action_listener_process.py +18 -2
  59. hatchet_sdk/waits.py +120 -0
  60. hatchet_sdk/worker/action_listener_process.py +64 -30
  61. hatchet_sdk/worker/runner/run_loop_manager.py +35 -26
  62. hatchet_sdk/worker/runner/runner.py +72 -55
  63. hatchet_sdk/worker/runner/utils/capture_logs.py +3 -11
  64. hatchet_sdk/worker/worker.py +155 -118
  65. hatchet_sdk/workflow_run.py +4 -5
  66. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.1.dist-info}/METADATA +1 -2
  67. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.1.dist-info}/RECORD +69 -43
  68. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.1.dist-info}/entry_points.txt +2 -0
  69. hatchet_sdk/clients/rest_client.py +0 -636
  70. hatchet_sdk/semver.py +0 -30
  71. hatchet_sdk/worker/runner/utils/error_with_traceback.py +0 -6
  72. hatchet_sdk/workflow.py +0 -527
  73. {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.1.dist-info}/WHEEL +0 -0
@@ -2,6 +2,7 @@ import asyncio
2
2
  import multiprocessing
3
3
  import multiprocessing.context
4
4
  import os
5
+ import re
5
6
  import signal
6
7
  import sys
7
8
  from dataclasses import dataclass, field
@@ -15,12 +16,15 @@ from aiohttp import web
15
16
  from aiohttp.web_request import Request
16
17
  from aiohttp.web_response import Response
17
18
  from prometheus_client import Gauge, generate_latest
19
+ from pydantic import BaseModel
18
20
 
19
- from hatchet_sdk.client import Client, new_client_raw
21
+ from hatchet_sdk.client import Client
20
22
  from hatchet_sdk.clients.dispatcher.action_listener import Action
21
23
  from hatchet_sdk.config import ClientConfig
22
- from hatchet_sdk.contracts.workflows_pb2 import CreateWorkflowVersionOpts
24
+ from hatchet_sdk.contracts.v1.workflows_pb2 import CreateWorkflowVersionRequest
23
25
  from hatchet_sdk.logger import logger
26
+ from hatchet_sdk.runnables.task import Task
27
+ from hatchet_sdk.runnables.workflow import BaseWorkflow
24
28
  from hatchet_sdk.utils.typing import WorkflowValidator, is_basemodel_subclass
25
29
  from hatchet_sdk.worker.action_listener_process import (
26
30
  ActionEvent,
@@ -30,10 +34,8 @@ from hatchet_sdk.worker.runner.run_loop_manager import (
30
34
  STOP_LOOP_TYPE,
31
35
  WorkerActionRunLoopManager,
32
36
  )
33
- from hatchet_sdk.workflow import BaseWorkflow, Step, StepType, Task
34
37
 
35
38
  T = TypeVar("T")
36
- TBaseWorkflow = TypeVar("TBaseWorkflow", bound=BaseWorkflow)
37
39
 
38
40
 
39
41
  class WorkerStatus(Enum):
@@ -48,56 +50,76 @@ class WorkerStartOptions:
48
50
  loop: asyncio.AbstractEventLoop | None = field(default=None)
49
51
 
50
52
 
53
+ class HealthCheckResponse(BaseModel):
54
+ status: str
55
+ name: str
56
+ slots: int
57
+ actions: list[str]
58
+ labels: dict[str, str | int]
59
+ python_version: str
60
+
61
+
51
62
  class Worker:
52
63
  def __init__(
53
64
  self,
54
65
  name: str,
55
- config: ClientConfig = ClientConfig(),
56
- max_runs: int | None = None,
66
+ config: ClientConfig,
67
+ slots: int | None = None,
57
68
  labels: dict[str, str | int] = {},
58
69
  debug: bool = False,
59
70
  owned_loop: bool = True,
60
71
  handle_kill: bool = True,
72
+ workflows: list[BaseWorkflow[Any]] = [],
61
73
  ) -> None:
62
- self.name = name
63
74
  self.config = config
64
- self.max_runs = max_runs
75
+ self.name = self.config.namespace + name
76
+ self.slots = slots
65
77
  self.debug = debug
66
78
  self.labels = labels
67
79
  self.handle_kill = handle_kill
68
80
  self.owned_loop = owned_loop
69
81
 
70
- self.client: Client
82
+ self.action_registry: dict[str, Task[Any, Any]] = {}
83
+ self.durable_action_registry: dict[str, Task[Any, Any]] = {}
71
84
 
72
- self.action_registry: dict[str, Step[Any]] = {}
73
85
  self.validator_registry: dict[str, WorkflowValidator] = {}
74
86
 
75
87
  self.killing: bool = False
76
88
  self._status: WorkerStatus
77
89
 
78
- self.action_listener_process: BaseProcess
90
+ self.action_listener_process: BaseProcess | None = None
91
+ self.durable_action_listener_process: BaseProcess | None = None
92
+
79
93
  self.action_listener_health_check: asyncio.Task[None]
80
- self.action_runner: WorkerActionRunLoopManager
94
+
95
+ self.action_runner: WorkerActionRunLoopManager | None = None
96
+ self.durable_action_runner: WorkerActionRunLoopManager | None = None
81
97
 
82
98
  self.ctx = multiprocessing.get_context("spawn")
83
99
 
84
100
  self.action_queue: "Queue[Action | STOP_LOOP_TYPE]" = self.ctx.Queue()
85
101
  self.event_queue: "Queue[ActionEvent]" = self.ctx.Queue()
86
102
 
103
+ self.durable_action_queue: "Queue[Action | STOP_LOOP_TYPE]" = self.ctx.Queue()
104
+ self.durable_event_queue: "Queue[ActionEvent]" = self.ctx.Queue()
105
+
87
106
  self.loop: asyncio.AbstractEventLoop
88
107
 
89
- self.client = new_client_raw(self.config, self.debug)
90
- self.name = self.client.config.namespace + self.name
108
+ self.client = Client(config=self.config, debug=self.debug)
91
109
 
92
110
  self._setup_signal_handlers()
93
111
 
94
112
  self.worker_status_gauge = Gauge(
95
- "hatchet_worker_status", "Current status of the Hatchet worker"
113
+ "hatchet_worker_status_" + re.sub(r"\W+", "", name),
114
+ "Current status of the Hatchet worker",
96
115
  )
97
116
 
98
- def register_workflow_from_opts(
99
- self, name: str, opts: CreateWorkflowVersionOpts
100
- ) -> None:
117
+ self.has_any_durable = False
118
+ self.has_any_non_durable = False
119
+
120
+ self.register_workflows(workflows)
121
+
122
+ def register_workflow_from_opts(self, opts: CreateWorkflowVersionRequest) -> None:
101
123
  try:
102
124
  self.client.admin.put_workflow(opts.name, opts)
103
125
  except Exception as e:
@@ -105,21 +127,31 @@ class Worker:
105
127
  logger.error(e)
106
128
  sys.exit(1)
107
129
 
108
- def register_workflow(self, workflow: TBaseWorkflow) -> None:
130
+ def register_workflow(self, workflow: BaseWorkflow[Any]) -> None:
109
131
  namespace = self.client.config.namespace
110
132
 
133
+ opts = workflow._get_create_opts(namespace)
134
+ name = workflow._get_name(namespace)
135
+
111
136
  try:
112
- self.client.admin.put_workflow(
113
- workflow.get_name(namespace), workflow.get_create_opts(namespace)
114
- )
137
+ self.client.admin.put_workflow(name, opts)
115
138
  except Exception as e:
116
- logger.error(f"failed to register workflow: {workflow.get_name(namespace)}")
139
+ logger.error(
140
+ f"failed to register workflow: {workflow._get_name(namespace)}"
141
+ )
117
142
  logger.error(e)
118
143
  sys.exit(1)
119
144
 
120
- for step in workflow.steps:
121
- action_name = workflow.create_action_name(namespace, step)
122
- self.action_registry[action_name] = step
145
+ for step in workflow.tasks:
146
+ action_name = workflow._create_action_name(namespace, step)
147
+
148
+ if workflow.is_durable:
149
+ self.has_any_durable = True
150
+ self.durable_action_registry[action_name] = step
151
+ else:
152
+ self.has_any_non_durable = True
153
+ self.action_registry[action_name] = step
154
+
123
155
  return_type = get_type_hints(step.fn).get("return")
124
156
 
125
157
  self.validator_registry[action_name] = WorkflowValidator(
@@ -127,67 +159,55 @@ class Worker:
127
159
  step_output=return_type if is_basemodel_subclass(return_type) else None,
128
160
  )
129
161
 
130
- def register_function(self, function: Task[Any, Any]) -> None:
131
- from hatchet_sdk.workflow import BaseWorkflow
132
-
133
- declaration = function.hatchet.declare_workflow(
134
- **function.workflow_config.model_dump()
135
- )
136
-
137
- class Workflow(BaseWorkflow):
138
- config = declaration.config
139
-
140
- @property
141
- def default_steps(self) -> list[Step[Any]]:
142
- return [function.step]
143
-
144
- @property
145
- def on_failure_steps(self) -> list[Step[Any]]:
146
- if not function.on_failure_step:
147
- return []
148
-
149
- step = function.on_failure_step.step
150
- step.type = StepType.ON_FAILURE
151
-
152
- return [step]
153
-
154
- self.register_workflow(Workflow())
162
+ def register_workflows(self, workflows: list[BaseWorkflow[Any]]) -> None:
163
+ for workflow in workflows:
164
+ self.register_workflow(workflow)
155
165
 
166
+ @property
156
167
  def status(self) -> WorkerStatus:
157
168
  return self._status
158
169
 
159
- def setup_loop(self, loop: asyncio.AbstractEventLoop | None = None) -> bool:
170
+ def _setup_loop(self, loop: asyncio.AbstractEventLoop | None = None) -> bool:
160
171
  try:
161
- loop = loop or asyncio.get_running_loop()
162
- self.loop = loop
163
- created_loop = False
172
+ self.loop = loop or asyncio.get_running_loop()
164
173
  logger.debug("using existing event loop")
165
- return created_loop
174
+
175
+ created_loop = False
166
176
  except RuntimeError:
167
177
  self.loop = asyncio.new_event_loop()
178
+
168
179
  logger.debug("creating new event loop")
169
- asyncio.set_event_loop(self.loop)
170
180
  created_loop = True
171
- return created_loop
172
181
 
173
- async def health_check_handler(self, request: Request) -> Response:
174
- status = self.status()
182
+ asyncio.set_event_loop(self.loop)
183
+
184
+ return created_loop
185
+
186
+ async def _health_check_handler(self, request: Request) -> Response:
187
+ response = HealthCheckResponse(
188
+ status=self.status.name,
189
+ name=self.name,
190
+ slots=self.slots or 0,
191
+ actions=list(self.action_registry.keys()),
192
+ labels=self.labels,
193
+ python_version=sys.version,
194
+ ).model_dump()
175
195
 
176
- return web.json_response({"status": status.name})
196
+ return web.json_response(response)
177
197
 
178
- async def metrics_handler(self, request: Request) -> Response:
179
- self.worker_status_gauge.set(1 if self.status() == WorkerStatus.HEALTHY else 0)
198
+ async def _metrics_handler(self, request: Request) -> Response:
199
+ self.worker_status_gauge.set(1 if self.status == WorkerStatus.HEALTHY else 0)
180
200
 
181
201
  return web.Response(body=generate_latest(), content_type="text/plain")
182
202
 
183
- async def start_health_server(self) -> None:
203
+ async def _start_health_server(self) -> None:
184
204
  port = self.config.healthcheck.port
185
205
 
186
206
  app = web.Application()
187
207
  app.add_routes(
188
208
  [
189
- web.get("/health", self.health_check_handler),
190
- web.get("/metrics", self.metrics_handler),
209
+ web.get("/health", self._health_check_handler),
210
+ web.get("/metrics", self._metrics_handler),
191
211
  ]
192
212
  )
193
213
 
@@ -204,11 +224,9 @@ class Worker:
204
224
  logger.info(f"healthcheck server running on port {port}")
205
225
 
206
226
  def start(self, options: WorkerStartOptions = WorkerStartOptions()) -> None:
207
- self.owned_loop = self.setup_loop(options.loop)
227
+ self.owned_loop = self._setup_loop(options.loop)
208
228
 
209
- asyncio.run_coroutine_threadsafe(
210
- self.aio_start(options, _from_start=True), self.loop
211
- )
229
+ asyncio.run_coroutine_threadsafe(self._aio_start(), self.loop)
212
230
 
213
231
  # start the loop and wait until its closed
214
232
  if self.owned_loop:
@@ -217,35 +235,35 @@ class Worker:
217
235
  if self.handle_kill:
218
236
  sys.exit(0)
219
237
 
220
- ## Start methods
221
- async def aio_start(
222
- self,
223
- options: WorkerStartOptions = WorkerStartOptions(),
224
- _from_start: bool = False,
225
- ) -> None:
238
+ async def _aio_start(self) -> None:
226
239
  main_pid = os.getpid()
240
+
227
241
  logger.info("------------------------------------------")
228
242
  logger.info("STARTING HATCHET...")
229
243
  logger.debug(f"worker runtime starting on PID: {main_pid}")
230
244
 
231
245
  self._status = WorkerStatus.STARTING
232
246
 
233
- if len(self.action_registry.keys()) == 0:
234
- logger.error(
247
+ if (
248
+ len(self.action_registry.keys()) == 0
249
+ and len(self.durable_action_registry.keys()) == 0
250
+ ):
251
+ raise ValueError(
235
252
  "no actions registered, register workflows or actions before starting worker"
236
253
  )
237
- return None
238
-
239
- # non blocking setup
240
- if not _from_start:
241
- self.setup_loop(options.loop)
242
254
 
243
255
  if self.config.healthcheck.enabled:
244
- await self.start_health_server()
256
+ await self._start_health_server()
245
257
 
246
- self.action_listener_process = self._start_listener()
258
+ if self.has_any_non_durable:
259
+ self.action_listener_process = self._start_action_listener(is_durable=False)
260
+ self.action_runner = self._run_action_runner(is_durable=False)
247
261
 
248
- self.action_runner = self._run_action_runner()
262
+ if self.has_any_durable:
263
+ self.durable_action_listener_process = self._start_action_listener(
264
+ is_durable=True
265
+ )
266
+ self.durable_action_runner = self._run_action_runner(is_durable=True)
249
267
 
250
268
  self.action_listener_health_check = self.loop.create_task(
251
269
  self._check_listener_health()
@@ -253,35 +271,39 @@ class Worker:
253
271
 
254
272
  await self.action_listener_health_check
255
273
 
256
- def _run_action_runner(self) -> WorkerActionRunLoopManager:
274
+ def _run_action_runner(self, is_durable: bool) -> WorkerActionRunLoopManager:
257
275
  # Retrieve the shared queue
258
276
  return WorkerActionRunLoopManager(
259
- self.name,
260
- self.action_registry,
277
+ self.name + ("_durable" if is_durable else ""),
278
+ self.durable_action_registry if is_durable else self.action_registry,
261
279
  self.validator_registry,
262
- self.max_runs,
280
+ 1_000 if is_durable else self.slots,
263
281
  self.config,
264
- self.action_queue,
265
- self.event_queue,
282
+ self.durable_action_queue if is_durable else self.action_queue,
283
+ self.durable_event_queue if is_durable else self.event_queue,
266
284
  self.loop,
267
285
  self.handle_kill,
268
286
  self.client.debug,
269
287
  self.labels,
270
288
  )
271
289
 
272
- def _start_listener(self) -> multiprocessing.context.SpawnProcess:
273
- action_list = [str(key) for key in self.action_registry.keys()]
274
-
290
+ def _start_action_listener(
291
+ self, is_durable: bool
292
+ ) -> multiprocessing.context.SpawnProcess:
275
293
  try:
276
294
  process = self.ctx.Process(
277
295
  target=worker_action_listener_process,
278
296
  args=(
279
- self.name,
280
- action_list,
281
- self.max_runs,
297
+ self.name + ("_durable" if is_durable else ""),
298
+ (
299
+ list(self.durable_action_registry.keys())
300
+ if is_durable
301
+ else list(self.action_registry.keys())
302
+ ),
303
+ 1_000 if is_durable else self.slots,
282
304
  self.config,
283
- self.action_queue,
284
- self.event_queue,
305
+ self.durable_action_queue if is_durable else self.action_queue,
306
+ self.durable_event_queue if is_durable else self.event_queue,
285
307
  self.handle_kill,
286
308
  self.client.debug,
287
309
  self.labels,
@@ -300,8 +322,13 @@ class Worker:
300
322
  try:
301
323
  while not self.killing:
302
324
  if (
303
- self.action_listener_process is None
304
- or not self.action_listener_process.is_alive()
325
+ not self.action_listener_process
326
+ and not self.durable_action_listener_process
327
+ ) or (
328
+ self.action_listener_process
329
+ and self.durable_action_listener_process
330
+ and not self.action_listener_process.is_alive()
331
+ and not self.durable_action_listener_process.is_alive()
305
332
  ):
306
333
  logger.debug("child action listener process killed...")
307
334
  self._status = WorkerStatus.UNHEALTHY
@@ -314,7 +341,6 @@ class Worker:
314
341
  except Exception as e:
315
342
  logger.error(f"error checking listener health: {e}")
316
343
 
317
- ## Cleanup methods
318
344
  def _setup_signal_handlers(self) -> None:
319
345
  signal.signal(signal.SIGTERM, self._handle_exit_signal)
320
346
  signal.signal(signal.SIGINT, self._handle_exit_signal)
@@ -327,52 +353,63 @@ class Worker:
327
353
 
328
354
  def _handle_force_quit_signal(self, signum: int, frame: FrameType | None) -> None:
329
355
  logger.info("received SIGQUIT...")
330
- self.exit_forcefully()
356
+ self.loop.create_task(self._exit_forcefully())
331
357
 
332
- async def close(self) -> None:
358
+ async def _close(self) -> None:
333
359
  logger.info(f"closing worker '{self.name}'...")
334
360
  self.killing = True
335
- # self.action_queue.close()
336
- # self.event_queue.close()
337
361
 
338
362
  if self.action_runner is not None:
339
363
  self.action_runner.cleanup()
340
364
 
365
+ if self.durable_action_runner is not None:
366
+ self.durable_action_runner.cleanup()
367
+
341
368
  await self.action_listener_health_check
342
369
 
343
370
  async def exit_gracefully(self) -> None:
344
371
  logger.debug(f"gracefully stopping worker: {self.name}")
345
372
 
346
373
  if self.killing:
347
- return self.exit_forcefully()
374
+ return await self._exit_forcefully()
348
375
 
349
376
  self.killing = True
350
377
 
351
- await self.action_runner.wait_for_tasks()
378
+ if self.action_runner:
379
+ await self.action_runner.wait_for_tasks()
380
+ await self.action_runner.exit_gracefully()
352
381
 
353
- await self.action_runner.exit_gracefully()
382
+ if self.durable_action_runner:
383
+ await self.durable_action_runner.wait_for_tasks()
384
+ await self.durable_action_runner.exit_gracefully()
354
385
 
355
386
  if self.action_listener_process and self.action_listener_process.is_alive():
356
387
  self.action_listener_process.kill()
357
388
 
358
- await self.close()
389
+ if (
390
+ self.durable_action_listener_process
391
+ and self.durable_action_listener_process.is_alive()
392
+ ):
393
+ self.durable_action_listener_process.kill()
394
+
395
+ await self._close()
359
396
  if self.loop and self.owned_loop:
360
397
  self.loop.stop()
361
398
 
362
399
  logger.info("👋")
363
400
 
364
- def exit_forcefully(self) -> None:
401
+ async def _exit_forcefully(self) -> None:
365
402
  self.killing = True
366
403
 
367
404
  logger.debug(f"forcefully stopping worker: {self.name}")
368
405
 
369
- ## TODO: `self.close` needs to be awaited / used
370
- self.close() # type: ignore[unused-coroutine]
406
+ await self._close()
371
407
 
372
408
  if self.action_listener_process:
373
- self.action_listener_process.kill() # Forcefully kill the process
409
+ self.action_listener_process.kill()
410
+
411
+ if self.durable_action_listener_process:
412
+ self.durable_action_listener_process.kill()
374
413
 
375
414
  logger.info("👋")
376
- sys.exit(
377
- 1
378
- ) # Exit immediately TODO - should we exit with 1 here, there may be other workers to cleanup
415
+ sys.exit(1)
@@ -1,5 +1,5 @@
1
1
  import asyncio
2
- from typing import Any, Coroutine
2
+ from typing import Any
3
3
 
4
4
  from hatchet_sdk.clients.run_event_listener import (
5
5
  RunEventListener,
@@ -10,8 +10,6 @@ from hatchet_sdk.utils.aio_utils import get_active_event_loop
10
10
 
11
11
 
12
12
  class WorkflowRunRef:
13
- workflow_run_id: str
14
-
15
13
  def __init__(
16
14
  self,
17
15
  workflow_run_id: str,
@@ -28,11 +26,12 @@ class WorkflowRunRef:
28
26
  def stream(self) -> RunEventListener:
29
27
  return self.workflow_run_event_listener.stream(self.workflow_run_id)
30
28
 
31
- def aio_result(self) -> Coroutine[None, None, dict[str, Any]]:
32
- return self.workflow_listener.result(self.workflow_run_id)
29
+ async def aio_result(self) -> dict[str, Any]:
30
+ return await self.workflow_listener.result(self.workflow_run_id)
33
31
 
34
32
  def result(self) -> dict[str, Any]:
35
33
  coro = self.workflow_listener.result(self.workflow_run_id)
34
+
36
35
  loop = get_active_event_loop()
37
36
 
38
37
  if loop is None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hatchet-sdk
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary:
5
5
  Author: Alexander Belanger
6
6
  Author-email: alexander@hatchet.run
@@ -30,7 +30,6 @@ Requires-Dist: protobuf (>=5.29.1,<6.0.0)
30
30
  Requires-Dist: pydantic (>=2.6.3,<3.0.0)
31
31
  Requires-Dist: pydantic-settings (>=2.7.1,<3.0.0)
32
32
  Requires-Dist: python-dateutil (>=2.9.0.post0,<3.0.0)
33
- Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
34
33
  Requires-Dist: pyyaml (>=6.0.1,<7.0.0)
35
34
  Requires-Dist: tenacity (>=8.4.1)
36
35
  Requires-Dist: urllib3 (>=1.26.20)