hatchet-sdk 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hatchet-sdk might be problematic. Click here for more details.

@@ -36,6 +36,7 @@ class ScheduleTriggerWorkflowOptions(BaseModel):
36
36
  child_index: int | None = None
37
37
  child_key: str | None = None
38
38
  namespace: str | None = None
39
+ additional_metadata: JSONSerializableMapping = Field(default_factory=dict)
39
40
 
40
41
 
41
42
  class TriggerWorkflowOptions(ScheduleTriggerWorkflowOptions):
@@ -153,7 +154,11 @@ class AdminClient:
153
154
  name=name,
154
155
  schedules=[self._parse_schedule(schedule) for schedule in schedules],
155
156
  input=json.dumps(input),
156
- **options.model_dump(),
157
+ parent_id=options.parent_id,
158
+ parent_step_run_id=options.parent_step_run_id,
159
+ child_index=options.child_index,
160
+ child_key=options.child_key,
161
+ additional_metadata=json.dumps(options.additional_metadata),
157
162
  )
158
163
 
159
164
  @tenacity_retry
@@ -316,6 +321,7 @@ class AdminClient:
316
321
  except (grpc.RpcError, grpc.aio.AioRpcError) as e:
317
322
  if e.code() == grpc.StatusCode.ALREADY_EXISTS:
318
323
  raise DedupeViolationErr(e.details())
324
+ raise e
319
325
 
320
326
  return WorkflowRunRef(
321
327
  workflow_run_id=resp.workflow_run_id,
@@ -277,9 +277,7 @@ class ActionListener:
277
277
  )
278
278
 
279
279
  t.cancel()
280
-
281
- if listener:
282
- listener.cancel()
280
+ listener.cancel()
283
281
 
284
282
  break
285
283
 
@@ -300,7 +298,9 @@ class ActionListener:
300
298
  )
301
299
  )
302
300
  except (ValueError, json.JSONDecodeError) as e:
303
- raise ValueError(f"Error decoding payload: {e}")
301
+ logger.error(f"Error decoding payload: {e}")
302
+
303
+ action_payload = ActionPayload()
304
304
 
305
305
  action = Action(
306
306
  tenant_id=assigned_action.tenantId,
@@ -421,7 +421,7 @@ class ActionListener:
421
421
  except Exception as e:
422
422
  logger.error(f"failed to unregister: {e}")
423
423
 
424
- if self.interrupt:
424
+ if self.interrupt: # type: ignore[truthy-bool]
425
425
  self.interrupt.set()
426
426
 
427
427
  def unregister(self) -> WorkerUnsubscribeRequest:
@@ -159,8 +159,8 @@ class DurableEventListener:
159
159
  )
160
160
 
161
161
  t.cancel()
162
- if self.listener:
163
- self.listener.cancel()
162
+ self.listener.cancel()
163
+
164
164
  await asyncio.sleep(
165
165
  DEFAULT_DURABLE_EVENT_LISTENER_RETRY_INTERVAL
166
166
  )
@@ -237,6 +237,8 @@ class DurableEventListener:
237
237
  del self.events[subscription_id]
238
238
 
239
239
  async def subscribe(self, task_id: str, signal_key: str) -> DurableEvent:
240
+ subscription_id: int | None = None
241
+
240
242
  try:
241
243
  # create a new subscription id, place a mutex on the counter
242
244
  async with self.subscription_counter_lock:
@@ -275,7 +277,8 @@ class DurableEventListener:
275
277
  except asyncio.CancelledError:
276
278
  raise
277
279
  finally:
278
- self.cleanup_subscription(subscription_id)
280
+ if subscription_id:
281
+ self.cleanup_subscription(subscription_id)
279
282
 
280
283
  async def _retry_subscribe(
281
284
  self,
@@ -86,8 +86,7 @@ class EventClient:
86
86
  namespaced_event_key = namespace + event_key
87
87
 
88
88
  try:
89
- meta = options.additional_metadata
90
- meta_bytes = None if meta is None else json.dumps(meta)
89
+ meta_bytes = json.dumps(options.additional_metadata)
91
90
  except Exception as e:
92
91
  raise ValueError(f"Error encoding meta: {e}")
93
92
 
@@ -1,5 +1,4 @@
1
1
  import asyncio
2
- import json
3
2
  from enum import Enum
4
3
  from typing import Any, AsyncGenerator, Callable, Generator, cast
5
4
 
@@ -128,18 +127,10 @@ class RunEventListener:
128
127
  raise Exception(
129
128
  f"Unknown event type: {workflow_event.eventType}"
130
129
  )
131
- payload = None
132
130
 
133
- try:
134
- if workflow_event.eventPayload:
135
- payload = json.loads(workflow_event.eventPayload)
136
- except Exception:
137
- payload = workflow_event.eventPayload
138
- pass
139
-
140
- assert isinstance(payload, str)
141
-
142
- yield StepRunEvent(type=eventType, payload=payload)
131
+ yield StepRunEvent(
132
+ type=eventType, payload=workflow_event.eventPayload
133
+ )
143
134
  elif workflow_event.resourceType == RESOURCE_TYPE_WORKFLOW_RUN:
144
135
  if workflow_event.eventType in step_run_event_type_mapping:
145
136
  workflowRunEventType = step_run_event_type_mapping[
@@ -150,17 +141,10 @@ class RunEventListener:
150
141
  f"Unknown event type: {workflow_event.eventType}"
151
142
  )
152
143
 
153
- payload = None
154
-
155
- try:
156
- if workflow_event.eventPayload:
157
- payload = json.loads(workflow_event.eventPayload)
158
- except Exception:
159
- pass
160
-
161
- assert isinstance(payload, str)
162
-
163
- yield StepRunEvent(type=workflowRunEventType, payload=payload)
144
+ yield StepRunEvent(
145
+ type=workflowRunEventType,
146
+ payload=workflow_event.eventPayload,
147
+ )
164
148
 
165
149
  if workflow_event.hangup:
166
150
  listener = None
@@ -236,9 +220,6 @@ class RunEventListenerClient:
236
220
  return self.stream(workflow_run_id)
237
221
 
238
222
  def stream(self, workflow_run_id: str) -> RunEventListener:
239
- if not isinstance(workflow_run_id, str) and hasattr(workflow_run_id, "__str__"):
240
- workflow_run_id = str(workflow_run_id)
241
-
242
223
  if not self.client:
243
224
  aio_conn = new_conn(self.config, True)
244
225
  self.client = DispatcherStub(aio_conn) # type: ignore[no-untyped-call]
@@ -132,8 +132,8 @@ class PooledWorkflowRunListener:
132
132
  )
133
133
 
134
134
  t.cancel()
135
- if self.listener:
136
- self.listener.cancel()
135
+ self.listener.cancel()
136
+
137
137
  await asyncio.sleep(
138
138
  DEFAULT_WORKFLOW_LISTENER_RETRY_INTERVAL
139
139
  )
@@ -203,6 +203,8 @@ class PooledWorkflowRunListener:
203
203
  del self.events[subscription_id]
204
204
 
205
205
  async def subscribe(self, workflow_run_id: str) -> WorkflowRunEvent:
206
+ subscription_id: int | None = None
207
+
206
208
  try:
207
209
  # create a new subscription id, place a mutex on the counter
208
210
  await self.subscription_counter_lock.acquire()
@@ -234,7 +236,8 @@ class PooledWorkflowRunListener:
234
236
  except asyncio.CancelledError:
235
237
  raise
236
238
  finally:
237
- self.cleanup_subscription(subscription_id)
239
+ if subscription_id:
240
+ self.cleanup_subscription(subscription_id)
238
241
 
239
242
  async def result(self, workflow_run_id: str) -> dict[str, Any]:
240
243
  from hatchet_sdk.clients.admin import DedupeViolationErr
@@ -113,6 +113,14 @@ class Context:
113
113
 
114
114
  return parent_step_data
115
115
 
116
+ def aio_task_output(self, task: "Task[TWorkflowInput, R]") -> "R":
117
+ if task.is_async_function:
118
+ return self.task_output(task)
119
+
120
+ raise ValueError(
121
+ f"Task '{task.name}' is not an async function. Use `task_output` instead."
122
+ )
123
+
116
124
  @property
117
125
  def was_triggered_by_event(self) -> bool:
118
126
  return self.data.triggered_by == "event"
@@ -157,6 +165,7 @@ class Context:
157
165
 
158
166
  def handle_result(future: Future[tuple[bool, Exception | None]]) -> None:
159
167
  success, exception = future.result()
168
+
160
169
  if not success and exception:
161
170
  if raise_on_error:
162
171
  raise exception
@@ -123,6 +123,7 @@ class Runner:
123
123
 
124
124
  errored = False
125
125
  cancelled = task.cancelled()
126
+ output = None
126
127
 
127
128
  # Get the output from the future
128
129
  try:
@@ -167,6 +168,7 @@ class Runner:
167
168
 
168
169
  errored = False
169
170
  cancelled = task.cancelled()
171
+ output = None
170
172
 
171
173
  # Get the output from the future
172
174
  try:
@@ -204,12 +206,9 @@ class Runner:
204
206
  def thread_action_func(
205
207
  self, ctx: Context, task: Task[TWorkflowInput, R], action: Action
206
208
  ) -> R:
207
- if action.step_run_id is not None and action.step_run_id != "":
209
+ if action.step_run_id:
208
210
  self.threads[action.step_run_id] = current_thread()
209
- elif (
210
- action.get_group_key_run_id is not None
211
- and action.get_group_key_run_id != ""
212
- ):
211
+ elif action.get_group_key_run_id:
213
212
  self.threads[action.get_group_key_run_id] = current_thread()
214
213
 
215
214
  return task.call(ctx)
@@ -63,9 +63,6 @@ def capture_logs(
63
63
  ) -> Callable[P, Awaitable[T]]:
64
64
  @functools.wraps(func)
65
65
  async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
66
- if not logger:
67
- raise Exception("No logger configured on client")
68
-
69
66
  log_stream = StringIO()
70
67
  custom_handler = CustomLogHandler(event_client, log_stream)
71
68
  custom_handler.setLevel(logging.INFO)
@@ -103,7 +103,7 @@ class Worker:
103
103
  self.durable_action_queue: "Queue[Action | STOP_LOOP_TYPE]" = self.ctx.Queue()
104
104
  self.durable_event_queue: "Queue[ActionEvent]" = self.ctx.Queue()
105
105
 
106
- self.loop: asyncio.AbstractEventLoop
106
+ self.loop: asyncio.AbstractEventLoop | None
107
107
 
108
108
  self.client = Client(config=self.config, debug=self.debug)
109
109
 
@@ -226,6 +226,9 @@ class Worker:
226
226
  def start(self, options: WorkerStartOptions = WorkerStartOptions()) -> None:
227
227
  self.owned_loop = self._setup_loop(options.loop)
228
228
 
229
+ if not self.loop:
230
+ raise RuntimeError("event loop not set, cannot start worker")
231
+
229
232
  asyncio.run_coroutine_threadsafe(self._aio_start(), self.loop)
230
233
 
231
234
  # start the loop and wait until its closed
@@ -265,27 +268,31 @@ class Worker:
265
268
  )
266
269
  self.durable_action_runner = self._run_action_runner(is_durable=True)
267
270
 
268
- self.action_listener_health_check = self.loop.create_task(
269
- self._check_listener_health()
270
- )
271
+ if self.loop:
272
+ self.action_listener_health_check = self.loop.create_task(
273
+ self._check_listener_health()
274
+ )
271
275
 
272
- await self.action_listener_health_check
276
+ await self.action_listener_health_check
273
277
 
274
278
  def _run_action_runner(self, is_durable: bool) -> WorkerActionRunLoopManager:
275
279
  # Retrieve the shared queue
276
- return WorkerActionRunLoopManager(
277
- self.name + ("_durable" if is_durable else ""),
278
- self.durable_action_registry if is_durable else self.action_registry,
279
- self.validator_registry,
280
- 1_000 if is_durable else self.slots,
281
- self.config,
282
- self.durable_action_queue if is_durable else self.action_queue,
283
- self.durable_event_queue if is_durable else self.event_queue,
284
- self.loop,
285
- self.handle_kill,
286
- self.client.debug,
287
- self.labels,
288
- )
280
+ if self.loop:
281
+ return WorkerActionRunLoopManager(
282
+ self.name + ("_durable" if is_durable else ""),
283
+ self.durable_action_registry if is_durable else self.action_registry,
284
+ self.validator_registry,
285
+ 1_000 if is_durable else self.slots,
286
+ self.config,
287
+ self.durable_action_queue if is_durable else self.action_queue,
288
+ self.durable_event_queue if is_durable else self.event_queue,
289
+ self.loop,
290
+ self.handle_kill,
291
+ self.client.debug,
292
+ self.labels,
293
+ )
294
+
295
+ raise RuntimeError("event loop not set, cannot start action runner")
289
296
 
290
297
  def _start_action_listener(
291
298
  self, is_durable: bool
@@ -332,7 +339,7 @@ class Worker:
332
339
  ):
333
340
  logger.debug("child action listener process killed...")
334
341
  self._status = WorkerStatus.UNHEALTHY
335
- if not self.killing:
342
+ if self.loop:
336
343
  self.loop.create_task(self.exit_gracefully())
337
344
  break
338
345
  else:
@@ -349,11 +356,13 @@ class Worker:
349
356
  def _handle_exit_signal(self, signum: int, frame: FrameType | None) -> None:
350
357
  sig_name = "SIGTERM" if signum == signal.SIGTERM else "SIGINT"
351
358
  logger.info(f"received signal {sig_name}...")
352
- self.loop.create_task(self.exit_gracefully())
359
+ if self.loop:
360
+ self.loop.create_task(self.exit_gracefully())
353
361
 
354
362
  def _handle_force_quit_signal(self, signum: int, frame: FrameType | None) -> None:
355
363
  logger.info("received SIGQUIT...")
356
- self.loop.create_task(self._exit_forcefully())
364
+ if self.loop:
365
+ self.loop.create_task(self._exit_forcefully())
357
366
 
358
367
  async def _close(self) -> None:
359
368
  logger.info(f"closing worker '{self.name}'...")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hatchet-sdk
3
- Version: 1.0.1
3
+ Version: 1.0.3
4
4
  Summary:
5
5
  Author: Alexander Belanger
6
6
  Author-email: alexander@hatchet.run
@@ -29,6 +29,7 @@ Requires-Dist: prometheus-client (>=0.21.1,<0.22.0)
29
29
  Requires-Dist: protobuf (>=5.29.1,<6.0.0)
30
30
  Requires-Dist: pydantic (>=2.6.3,<3.0.0)
31
31
  Requires-Dist: pydantic-settings (>=2.7.1,<3.0.0)
32
+ Requires-Dist: pytest-timeout (>=2.3.1,<3.0.0)
32
33
  Requires-Dist: python-dateutil (>=2.9.0.post0,<3.0.0)
33
34
  Requires-Dist: pyyaml (>=6.0.1,<7.0.0)
34
35
  Requires-Dist: tenacity (>=8.4.1)
@@ -1,11 +1,11 @@
1
1
  hatchet_sdk/__init__.py,sha256=o_06wLLKCKRq4uQuCF62yDRb8hTQYYcqPC3FIDNHxuQ,10002
2
2
  hatchet_sdk/client.py,sha256=nfLv2jzv7XlL9VzQSnfyCdtK4ew0zanUgsoXC0KEtY0,2255
3
- hatchet_sdk/clients/admin.py,sha256=LtSv6y4PZ2Tkz8Z-JlaDd55wSLTNBTa7rUhPx6DBsp8,16316
4
- hatchet_sdk/clients/dispatcher/action_listener.py,sha256=mQI2qQI_tZLEezTO1wbGcAsjPRQYRe-KSHurJUESKeA,16554
3
+ hatchet_sdk/clients/admin.py,sha256=C-a1kkF2OCR4LOj489uIg_vHAEPiiG3LjyKxU7Y085w,16638
4
+ hatchet_sdk/clients/dispatcher/action_listener.py,sha256=8enip982Fkb_8blco1ixahmuaKwxsahx06wDWte_4MU,16595
5
5
  hatchet_sdk/clients/dispatcher/dispatcher.py,sha256=GMb4ljE-gSTf5RkpmRboPXCMncJKAJ6KKERGcf1nz48,6993
6
- hatchet_sdk/clients/durable_event_listener.py,sha256=ZiJOGlI7NKmN6Oev1UGF7wETTtvGxpmgO7YS3tDjYMk,11823
6
+ hatchet_sdk/clients/durable_event_listener.py,sha256=XzXECjulUWSsu6wiWYKogqKPGa1gwcrKFtr2SJ4xSok,11850
7
7
  hatchet_sdk/clients/event_ts.py,sha256=tbWLz3NXrwMyIoEm0Q2TfitF5cNEpo3k42jWKciOK8A,1082
8
- hatchet_sdk/clients/events.py,sha256=tZ-xuS9wAcT4H8oVk9dLOzww_UeHbgMJQUmSFuh3VPM,5615
8
+ hatchet_sdk/clients/events.py,sha256=yw8Goyh0KUk9bSSfTXjaGhF4gtxOG1T0wZOmYBbqPpI,5565
9
9
  hatchet_sdk/clients/rest/__init__.py,sha256=Bee4HPFiMGDHx5xbHkxxVbLBz_mDgSZUqh-nIhvsD1k,16511
10
10
  hatchet_sdk/clients/rest/api/__init__.py,sha256=XWlkH9iwpQvJHDqKe7kWl3MUzcTOaH-JiFZbki_fg_U,1200
11
11
  hatchet_sdk/clients/rest/api/api_token_api.py,sha256=C10FEIHHGBpwq-bIKkrBhvPlg6az4aHlREWEUlJHWl0,33577
@@ -214,13 +214,13 @@ hatchet_sdk/clients/rest/models/workflow_version_meta.py,sha256=TW4R7bAuYAg_LraN
214
214
  hatchet_sdk/clients/rest/models/workflow_workers_count.py,sha256=qhzqfvjjIDyARkiiLGluMIqEmqO-diHTsjlu0Doi0yg,2875
215
215
  hatchet_sdk/clients/rest/rest.py,sha256=NbmK_NvoL3-g6Oul6dsZgJO3XvCWtw2V0qAbr8pGfQE,6967
216
216
  hatchet_sdk/clients/rest/tenacity_utils.py,sha256=n6QvwuGwinLQpiWNU5GxrDNhFBE8_wZdg3WNur21rJ0,1055
217
- hatchet_sdk/clients/run_event_listener.py,sha256=t58Scw9CypYXegU7ZWLAUeSFcse2l4G3d8WvIoEPlBI,10689
217
+ hatchet_sdk/clients/run_event_listener.py,sha256=12o2P8bi3NUGhzPi7UM9d9D-FX94cr5qyNIEWIweFD0,9960
218
218
  hatchet_sdk/clients/v1/api_client.py,sha256=0FmhJIjN5Y4CWEsIWt0XzoOmIFUjPwFOAG0TI-fVqHI,2412
219
- hatchet_sdk/clients/workflow_listener.py,sha256=oqvoJYw9rCSPv-_V-ipX_yEC6ipLG8ZRUVwkaM2SEjc,10443
219
+ hatchet_sdk/clients/workflow_listener.py,sha256=sdeCN3z1kMurG_5rLoT3-rVUJKrd3XJerVPnMfR6U4U,10470
220
220
  hatchet_sdk/config.py,sha256=piNrTA4EuYNNl0FpsFWceOuIOps-6R95PWZomQWOMBA,3426
221
221
  hatchet_sdk/connection.py,sha256=B5gT5NL9BBB5-l9U_cN6pMlraQk880rEYMnqaK_dgL0,2590
222
222
  hatchet_sdk/context/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
223
- hatchet_sdk/context/context.py,sha256=8vfNMu-SEC5BDaJmvbo42SxuKH6UwLdX4-q0j0zN0IU,8728
223
+ hatchet_sdk/context/context.py,sha256=ViVd-aiKy-dIcCmjVegNFb_vEJhqQ-Q3Zh9llU7nlXY,9002
224
224
  hatchet_sdk/context/worker_context.py,sha256=OVcEWvdT_Kpd0nlg61VAPUgIPSFzSLs0aSrXWj-1GX4,974
225
225
  hatchet_sdk/contracts/dispatcher_pb2.py,sha256=B35F3XQQkk05UA84nuZOIFtiydgPbB8gA5FhvNvSqb0,14414
226
226
  hatchet_sdk/contracts/dispatcher_pb2.pyi,sha256=JLtc615N9vNDRtQoUVynclPBbgIsRhbikcrT8b7Z-TM,18336
@@ -501,11 +501,11 @@ hatchet_sdk/waits.py,sha256=mBJVOjvTJfhXCngyIfNccYFtg7eiFM2B2n7lcg90S3A,3327
501
501
  hatchet_sdk/worker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
502
502
  hatchet_sdk/worker/action_listener_process.py,sha256=OslEFdj0VZvC65yPB8vNqrLQXQYeRxrfjz7XPzZ8AcA,11455
503
503
  hatchet_sdk/worker/runner/run_loop_manager.py,sha256=uLzNCKy0yHEX8IosDCQvA8TqkIOd14BaAFFArOaPxzA,3970
504
- hatchet_sdk/worker/runner/runner.py,sha256=35bBFrBAamirOkfNgjNAbewP2eKTPBLbmPG19By-nFg,16509
505
- hatchet_sdk/worker/runner/utils/capture_logs.py,sha256=_3W5pqT-lirfhsSjOEBkD9cgCm1OEhn6Wk5So8nhib8,2795
506
- hatchet_sdk/worker/worker.py,sha256=sHqirnXNwVcw24LBzEGyUgEgYN5T1430tUs6WHNZZhg,14147
504
+ hatchet_sdk/worker/runner/runner.py,sha256=IIvjrE1sJlF1oEdMTk7s15-CjTyCu8om6yEFia2XcdE,16434
505
+ hatchet_sdk/worker/runner/utils/capture_logs.py,sha256=nHRPSiDBqzhObM7i2X7t03OupVFnE7kQBdR2Ckgg-2w,2709
506
+ hatchet_sdk/worker/worker.py,sha256=qyHs64H-grF9HR1CgH7MlnoDmTQ8mm4d8basx-ZDyWc,14490
507
507
  hatchet_sdk/workflow_run.py,sha256=JTLOuGyEat4OvMM3h55WrX0aFFpqs5YtK7YJxTMC92I,1428
508
- hatchet_sdk-1.0.1.dist-info/METADATA,sha256=GZShTmxvV7xHlFjrOgEb_9AnrY1UTzmTNHq7CDt7CWA,1829
509
- hatchet_sdk-1.0.1.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
510
- hatchet_sdk-1.0.1.dist-info/entry_points.txt,sha256=g_3isHLTk-_oUZ6iVAN0iuFQV8vL1zvAsswQ32OqyeU,1194
511
- hatchet_sdk-1.0.1.dist-info/RECORD,,
508
+ hatchet_sdk-1.0.3.dist-info/METADATA,sha256=Ltt_FG3crdnhRIB2CZLjTSKSpM6ca06QiylFWgBHQfM,1876
509
+ hatchet_sdk-1.0.3.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
510
+ hatchet_sdk-1.0.3.dist-info/entry_points.txt,sha256=5mTp_AsCWK5raiVxP_MU9eBCgkRGl4OsN6chpHcvm7o,1235
511
+ hatchet_sdk-1.0.3.dist-info/RECORD,,
@@ -23,6 +23,7 @@ pydantic=examples.pydantic.worker:main
23
23
  rate_limit=examples.rate_limit.worker:main
24
24
  retries_with_backoff=examples.retries_with_backoff.worker:main
25
25
  simple=examples.simple.worker:main
26
+ streaming=examples.streaming.worker:main
26
27
  timeout=examples.timeout.worker:main
27
28
  v2_simple=examples.v2.simple.worker:main
28
29
  waits=examples.waits.worker:main