hatchet-sdk 1.16.5__py3-none-any.whl → 1.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hatchet-sdk might be problematic. Click here for more details.

hatchet_sdk/__init__.py CHANGED
@@ -155,7 +155,7 @@ from hatchet_sdk.exceptions import (
155
155
  from hatchet_sdk.features.cel import CELEvaluationResult, CELFailure, CELSuccess
156
156
  from hatchet_sdk.features.runs import BulkCancelReplayOpts, RunFilter
157
157
  from hatchet_sdk.hatchet import Hatchet
158
- from hatchet_sdk.runnables.task import Task
158
+ from hatchet_sdk.runnables.task import Depends, Task
159
159
  from hatchet_sdk.runnables.types import (
160
160
  ConcurrencyExpression,
161
161
  ConcurrencyLimitStrategy,
@@ -198,6 +198,7 @@ __all__ = [
198
198
  "CreateWorkflowVersionOpts",
199
199
  "DedupeViolationError",
200
200
  "DefaultFilter",
201
+ "Depends",
201
202
  "DurableContext",
202
203
  "EmptyModel",
203
204
  "Event",
@@ -28,7 +28,7 @@ from hatchet_sdk.contracts.events_pb2 import (
28
28
  )
29
29
  from hatchet_sdk.contracts.events_pb2_grpc import EventsServiceStub
30
30
  from hatchet_sdk.metadata import get_metadata
31
- from hatchet_sdk.utils.typing import JSONSerializableMapping
31
+ from hatchet_sdk.utils.typing import JSONSerializableMapping, LogLevel
32
32
 
33
33
 
34
34
  def proto_timestamp_now() -> timestamp_pb2.Timestamp:
@@ -180,11 +180,14 @@ class EventClient(BaseRestClient):
180
180
  )
181
181
 
182
182
  @tenacity_retry
183
- def log(self, message: str, step_run_id: str) -> None:
183
+ def log(
184
+ self, message: str, step_run_id: str, level: LogLevel | None = None
185
+ ) -> None:
184
186
  request = PutLogRequest(
185
187
  stepRunId=step_run_id,
186
188
  createdAt=proto_timestamp_now(),
187
189
  message=message,
190
+ level=level.value if level else None,
188
191
  )
189
192
 
190
193
  self.events_service_client.PutLog(request, metadata=get_metadata(self.token))
@@ -21,10 +21,11 @@ from hatchet_sdk.conditions import (
21
21
  flatten_conditions,
22
22
  )
23
23
  from hatchet_sdk.context.worker_context import WorkerContext
24
+ from hatchet_sdk.exceptions import TaskRunError
24
25
  from hatchet_sdk.features.runs import RunsClient
25
26
  from hatchet_sdk.logger import logger
26
27
  from hatchet_sdk.utils.timedelta_to_expression import Duration, timedelta_to_expr
27
- from hatchet_sdk.utils.typing import JSONSerializableMapping
28
+ from hatchet_sdk.utils.typing import JSONSerializableMapping, LogLevel
28
29
  from hatchet_sdk.worker.runner.utils.capture_logs import AsyncLogSender, LogRecord
29
30
 
30
31
  if TYPE_CHECKING:
@@ -211,7 +212,9 @@ class Context:
211
212
  line = str(line)
212
213
 
213
214
  logger.info(line)
214
- self.log_sender.publish(LogRecord(message=line, step_run_id=self.step_run_id))
215
+ self.log_sender.publish(
216
+ LogRecord(message=line, step_run_id=self.step_run_id, level=LogLevel.INFO)
217
+ )
215
218
 
216
219
  def release_slot(self) -> None:
217
220
  """
@@ -360,15 +363,41 @@ class Context:
360
363
  task: "Task[TWorkflowInput, R]",
361
364
  ) -> str | None:
362
365
  """
366
+ **DEPRECATED**: Use `get_task_run_error` instead.
367
+
363
368
  A helper intended to be used in an on-failure step to retrieve the error that occurred in a specific upstream task run.
364
369
 
365
370
  :param task: The task whose error you want to retrieve.
366
371
  :return: The error message of the task run, or None if no error occurred.
367
372
  """
373
+ warn(
374
+ "`fetch_task_run_error` is deprecated. Use `get_task_run_error` instead.",
375
+ DeprecationWarning,
376
+ stacklevel=2,
377
+ )
368
378
  errors = self.data.step_run_errors
369
379
 
370
380
  return errors.get(task.name)
371
381
 
382
+ def get_task_run_error(
383
+ self,
384
+ task: "Task[TWorkflowInput, R]",
385
+ ) -> TaskRunError | None:
386
+ """
387
+ A helper intended to be used in an on-failure step to retrieve the error that occurred in a specific upstream task run.
388
+
389
+ :param task: The task whose error you want to retrieve.
390
+ :return: The error message of the task run, or None if no error occurred.
391
+ """
392
+ errors = self.data.step_run_errors
393
+
394
+ error = errors.get(task.name)
395
+
396
+ if not error:
397
+ return None
398
+
399
+ return TaskRunError.deserialize(error)
400
+
372
401
 
373
402
  class DurableContext(Context):
374
403
  def __init__(
hatchet_sdk/exceptions.py CHANGED
@@ -1,4 +1,10 @@
1
+ import json
1
2
  import traceback
3
+ from typing import cast
4
+
5
+
6
+ class InvalidDependencyError(Exception):
7
+ pass
2
8
 
3
9
 
4
10
  class NonRetryableException(Exception): # noqa: N818
@@ -9,28 +15,42 @@ class DedupeViolationError(Exception):
9
15
  """Raised by the Hatchet library to indicate that a workflow has already been run with this deduplication value."""
10
16
 
11
17
 
18
+ TASK_RUN_ERROR_METADATA_KEY = "__hatchet_error_metadata__"
19
+
20
+
12
21
  class TaskRunError(Exception):
13
22
  def __init__(
14
23
  self,
15
24
  exc: str,
16
25
  exc_type: str,
17
26
  trace: str,
27
+ task_run_external_id: str | None,
18
28
  ) -> None:
19
29
  self.exc = exc
20
30
  self.exc_type = exc_type
21
31
  self.trace = trace
32
+ self.task_run_external_id = task_run_external_id
22
33
 
23
34
  def __str__(self) -> str:
24
- return self.serialize()
35
+ return self.serialize(include_metadata=False)
25
36
 
26
37
  def __repr__(self) -> str:
27
38
  return str(self)
28
39
 
29
- def serialize(self) -> str:
40
+ def serialize(self, include_metadata: bool) -> str:
30
41
  if not self.exc_type or not self.exc:
31
42
  return ""
32
43
 
33
- return (
44
+ metadata = json.dumps(
45
+ {
46
+ TASK_RUN_ERROR_METADATA_KEY: {
47
+ "task_run_external_id": self.task_run_external_id,
48
+ }
49
+ },
50
+ indent=None,
51
+ )
52
+
53
+ result = (
34
54
  self.exc_type.replace(": ", ":::")
35
55
  + ": "
36
56
  + self.exc.replace("\n", "\\\n")
@@ -38,6 +58,40 @@ class TaskRunError(Exception):
38
58
  + self.trace
39
59
  )
40
60
 
61
+ if include_metadata:
62
+ return result + "\n\n" + metadata
63
+
64
+ return result
65
+
66
+ @classmethod
67
+ def _extract_metadata(cls, serialized: str) -> tuple[str, dict[str, str | None]]:
68
+ metadata = serialized.split("\n")[-1]
69
+
70
+ try:
71
+ parsed = json.loads(metadata)
72
+
73
+ if (
74
+ TASK_RUN_ERROR_METADATA_KEY in parsed
75
+ and "task_run_external_id" in parsed[TASK_RUN_ERROR_METADATA_KEY]
76
+ ):
77
+ serialized = serialized.replace(metadata, "").strip()
78
+ return serialized, cast(
79
+ dict[str, str | None], parsed[TASK_RUN_ERROR_METADATA_KEY]
80
+ )
81
+
82
+ return serialized, {}
83
+ except json.JSONDecodeError:
84
+ return serialized, {}
85
+
86
+ @classmethod
87
+ def _unpack_serialized_error(cls, serialized: str) -> tuple[str | None, str, str]:
88
+ serialized, metadata = cls._extract_metadata(serialized)
89
+
90
+ external_id = metadata.get("task_run_external_id", None)
91
+ header, trace = serialized.split("\n", 1)
92
+
93
+ return external_id, header, trace
94
+
41
95
  @classmethod
42
96
  def deserialize(cls, serialized: str) -> "TaskRunError":
43
97
  if not serialized:
@@ -45,10 +99,16 @@ class TaskRunError(Exception):
45
99
  exc="",
46
100
  exc_type="",
47
101
  trace="",
102
+ task_run_external_id=None,
48
103
  )
49
104
 
105
+ task_run_external_id = None
106
+
50
107
  try:
51
- header, trace = serialized.split("\n", 1)
108
+ task_run_external_id, header, trace = cls._unpack_serialized_error(
109
+ serialized
110
+ )
111
+
52
112
  exc_type, exc = header.split(": ", 1)
53
113
  except ValueError:
54
114
  ## If we get here, we saw an error that was not serialized how we expected,
@@ -57,6 +117,7 @@ class TaskRunError(Exception):
57
117
  exc=serialized,
58
118
  exc_type="HatchetError",
59
119
  trace="",
120
+ task_run_external_id=task_run_external_id,
60
121
  )
61
122
 
62
123
  exc_type = exc_type.replace(":::", ": ")
@@ -66,16 +127,20 @@ class TaskRunError(Exception):
66
127
  exc=exc,
67
128
  exc_type=exc_type,
68
129
  trace=trace,
130
+ task_run_external_id=task_run_external_id,
69
131
  )
70
132
 
71
133
  @classmethod
72
- def from_exception(cls, exc: Exception) -> "TaskRunError":
134
+ def from_exception(
135
+ cls, exc: Exception, task_run_external_id: str | None
136
+ ) -> "TaskRunError":
73
137
  return cls(
74
138
  exc=str(exc),
75
139
  exc_type=type(exc).__name__,
76
140
  trace="".join(
77
141
  traceback.format_exception(type(exc), exc, exc.__traceback__)
78
142
  ),
143
+ task_run_external_id=task_run_external_id,
79
144
  )
80
145
 
81
146
 
hatchet_sdk/hatchet.py CHANGED
@@ -3,7 +3,7 @@ import logging
3
3
  from collections.abc import Callable
4
4
  from datetime import timedelta
5
5
  from functools import cached_property
6
- from typing import Any, cast, overload
6
+ from typing import Any, Concatenate, ParamSpec, cast, overload
7
7
 
8
8
  from hatchet_sdk import Context, DurableContext
9
9
  from hatchet_sdk.client import Client
@@ -40,6 +40,8 @@ from hatchet_sdk.utils.timedelta_to_expression import Duration
40
40
  from hatchet_sdk.utils.typing import CoroutineLike
41
41
  from hatchet_sdk.worker.worker import LifespanFn, Worker
42
42
 
43
+ P = ParamSpec("P")
44
+
43
45
 
44
46
  class Hatchet:
45
47
  """
@@ -346,7 +348,7 @@ class Hatchet:
346
348
  backoff_max_seconds: int | None = None,
347
349
  default_filters: list[DefaultFilter] | None = None,
348
350
  ) -> Callable[
349
- [Callable[[EmptyModel, Context], R | CoroutineLike[R]]],
351
+ [Callable[Concatenate[EmptyModel, Context, P], R | CoroutineLike[R]]],
350
352
  Standalone[EmptyModel, R],
351
353
  ]: ...
352
354
 
@@ -372,7 +374,7 @@ class Hatchet:
372
374
  backoff_max_seconds: int | None = None,
373
375
  default_filters: list[DefaultFilter] | None = None,
374
376
  ) -> Callable[
375
- [Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]],
377
+ [Callable[Concatenate[TWorkflowInput, Context, P], R | CoroutineLike[R]]],
376
378
  Standalone[TWorkflowInput, R],
377
379
  ]: ...
378
380
 
@@ -398,11 +400,11 @@ class Hatchet:
398
400
  default_filters: list[DefaultFilter] | None = None,
399
401
  ) -> (
400
402
  Callable[
401
- [Callable[[EmptyModel, Context], R | CoroutineLike[R]]],
403
+ [Callable[Concatenate[EmptyModel, Context, P], R | CoroutineLike[R]]],
402
404
  Standalone[EmptyModel, R],
403
405
  ]
404
406
  | Callable[
405
- [Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]],
407
+ [Callable[Concatenate[TWorkflowInput, Context, P], R | CoroutineLike[R]]],
406
408
  Standalone[TWorkflowInput, R],
407
409
  ]
408
410
  ):
@@ -447,7 +449,9 @@ class Hatchet:
447
449
  """
448
450
 
449
451
  def inner(
450
- func: Callable[[TWorkflowInput, Context], R | CoroutineLike[R]],
452
+ func: Callable[
453
+ Concatenate[TWorkflowInput, Context, P], R | CoroutineLike[R]
454
+ ],
451
455
  ) -> Standalone[TWorkflowInput, R]:
452
456
  inferred_name = name or func.__name__
453
457
 
@@ -518,7 +522,7 @@ class Hatchet:
518
522
  backoff_max_seconds: int | None = None,
519
523
  default_filters: list[DefaultFilter] | None = None,
520
524
  ) -> Callable[
521
- [Callable[[EmptyModel, DurableContext], R | CoroutineLike[R]]],
525
+ [Callable[Concatenate[EmptyModel, DurableContext, P], R | CoroutineLike[R]]],
522
526
  Standalone[EmptyModel, R],
523
527
  ]: ...
524
528
 
@@ -544,7 +548,11 @@ class Hatchet:
544
548
  backoff_max_seconds: int | None = None,
545
549
  default_filters: list[DefaultFilter] | None = None,
546
550
  ) -> Callable[
547
- [Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]]],
551
+ [
552
+ Callable[
553
+ Concatenate[TWorkflowInput, DurableContext, P], R | CoroutineLike[R]
554
+ ]
555
+ ],
548
556
  Standalone[TWorkflowInput, R],
549
557
  ]: ...
550
558
 
@@ -570,11 +578,19 @@ class Hatchet:
570
578
  default_filters: list[DefaultFilter] | None = None,
571
579
  ) -> (
572
580
  Callable[
573
- [Callable[[EmptyModel, DurableContext], R | CoroutineLike[R]]],
581
+ [
582
+ Callable[
583
+ Concatenate[EmptyModel, DurableContext, P], R | CoroutineLike[R]
584
+ ]
585
+ ],
574
586
  Standalone[EmptyModel, R],
575
587
  ]
576
588
  | Callable[
577
- [Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]]],
589
+ [
590
+ Callable[
591
+ Concatenate[TWorkflowInput, DurableContext, P], R | CoroutineLike[R]
592
+ ]
593
+ ],
578
594
  Standalone[TWorkflowInput, R],
579
595
  ]
580
596
  ):
@@ -619,7 +635,9 @@ class Hatchet:
619
635
  """
620
636
 
621
637
  def inner(
622
- func: Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]],
638
+ func: Callable[
639
+ Concatenate[TWorkflowInput, DurableContext, P], R | CoroutineLike[R]
640
+ ],
623
641
  ) -> Standalone[TWorkflowInput, R]:
624
642
  inferred_name = name or func.__name__
625
643
  workflow = Workflow[TWorkflowInput](
hatchet_sdk/rate_limit.py CHANGED
@@ -1,20 +1,10 @@
1
1
  from enum import Enum
2
2
 
3
- from celpy import CELEvalError, Environment # type: ignore
4
3
  from pydantic import BaseModel, model_validator
5
4
 
6
5
  from hatchet_sdk.contracts.v1.workflows_pb2 import CreateTaskRateLimit
7
6
 
8
7
 
9
- def validate_cel_expression(expr: str) -> bool:
10
- env = Environment()
11
- try:
12
- env.compile(expr)
13
- return True
14
- except CELEvalError:
15
- return False
16
-
17
-
18
8
  class RateLimitDuration(str, Enum):
19
9
  SECOND = "SECOND"
20
10
  MINUTE = "MINUTE"
@@ -72,17 +62,7 @@ class RateLimit(BaseModel):
72
62
  if self.dynamic_key and self.static_key:
73
63
  raise ValueError("Cannot have both static key and dynamic key set")
74
64
 
75
- if self.dynamic_key and not validate_cel_expression(self.dynamic_key):
76
- raise ValueError(f"Invalid CEL expression: {self.dynamic_key}")
77
-
78
- if not isinstance(self.units, int) and not validate_cel_expression(self.units):
79
- raise ValueError(f"Invalid CEL expression: {self.units}")
80
-
81
- if (
82
- self.limit
83
- and not isinstance(self.limit, int)
84
- and not validate_cel_expression(self.limit)
85
- ):
65
+ if self.limit and not isinstance(self.limit, int):
86
66
  raise ValueError(f"Invalid CEL expression: {self.limit}")
87
67
 
88
68
  if self.dynamic_key and not self.limit:
@@ -1,5 +1,21 @@
1
+ import asyncio
1
2
  from collections.abc import Callable
2
- from typing import TYPE_CHECKING, Any, Generic, cast, get_type_hints
3
+ from inspect import Parameter, iscoroutinefunction, signature
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Annotated,
7
+ Any,
8
+ Concatenate,
9
+ Generic,
10
+ ParamSpec,
11
+ TypeVar,
12
+ cast,
13
+ get_args,
14
+ get_origin,
15
+ get_type_hints,
16
+ )
17
+
18
+ from pydantic import BaseModel, ConfigDict
3
19
 
4
20
  from hatchet_sdk.conditions import (
5
21
  Action,
@@ -18,6 +34,7 @@ from hatchet_sdk.contracts.v1.workflows_pb2 import (
18
34
  CreateTaskRateLimit,
19
35
  DesiredWorkerLabels,
20
36
  )
37
+ from hatchet_sdk.exceptions import InvalidDependencyError
21
38
  from hatchet_sdk.runnables.types import (
22
39
  ConcurrencyExpression,
23
40
  EmptyModel,
@@ -25,7 +42,6 @@ from hatchet_sdk.runnables.types import (
25
42
  StepType,
26
43
  TWorkflowInput,
27
44
  is_async_fn,
28
- is_durable_sync_fn,
29
45
  is_sync_fn,
30
46
  )
31
47
  from hatchet_sdk.utils.timedelta_to_expression import Duration, timedelta_to_expr
@@ -41,16 +57,45 @@ from hatchet_sdk.worker.runner.utils.capture_logs import AsyncLogSender
41
57
  if TYPE_CHECKING:
42
58
  from hatchet_sdk.runnables.workflow import Workflow
43
59
 
60
+ T = TypeVar("T")
61
+ P = ParamSpec("P")
62
+
63
+
64
+ class Depends(Generic[T, TWorkflowInput]):
65
+ def __init__(
66
+ self, fn: Callable[[TWorkflowInput, Context], T | CoroutineLike[T]]
67
+ ) -> None:
68
+ sig = signature(fn)
69
+ params = list(sig.parameters.values())
70
+
71
+ if len(params) != 2:
72
+ raise InvalidDependencyError(
73
+ f"Dependency function {fn.__name__} must have exactly two parameters: input and ctx."
74
+ )
75
+
76
+ self.fn = fn
77
+
78
+
79
+ class DependencyToInject(BaseModel):
80
+ model_config = ConfigDict(arbitrary_types_allowed=True)
81
+
82
+ name: str
83
+ value: Any
84
+
44
85
 
45
86
  class Task(Generic[TWorkflowInput, R]):
46
87
  def __init__(
47
88
  self,
48
89
  _fn: (
49
- Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]
50
- | Callable[[TWorkflowInput, Context], AwaitableLike[R]]
90
+ Callable[Concatenate[TWorkflowInput, Context, P], R | CoroutineLike[R]]
91
+ | Callable[Concatenate[TWorkflowInput, Context, P], AwaitableLike[R]]
51
92
  | (
52
- Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]]
53
- | Callable[[TWorkflowInput, DurableContext], AwaitableLike[R]]
93
+ Callable[
94
+ Concatenate[TWorkflowInput, DurableContext, P], R | CoroutineLike[R]
95
+ ]
96
+ | Callable[
97
+ Concatenate[TWorkflowInput, DurableContext, P], AwaitableLike[R]
98
+ ]
54
99
  )
55
100
  ),
56
101
  is_durable: bool,
@@ -100,33 +145,74 @@ class Task(Generic[TWorkflowInput, R]):
100
145
  step_output=return_type if is_basemodel_subclass(return_type) else None,
101
146
  )
102
147
 
103
- def call(self, ctx: Context | DurableContext) -> R:
148
+ async def _parse_parameter(
149
+ self,
150
+ name: str,
151
+ param: Parameter,
152
+ input: TWorkflowInput,
153
+ ctx: Context | DurableContext,
154
+ ) -> DependencyToInject | None:
155
+ annotation = param.annotation
156
+
157
+ if get_origin(annotation) is Annotated:
158
+ args = get_args(annotation)
159
+
160
+ if len(args) < 2:
161
+ return None
162
+
163
+ metadata = args[1:]
164
+
165
+ for item in metadata:
166
+ if isinstance(item, Depends):
167
+ if iscoroutinefunction(item.fn):
168
+ return DependencyToInject(
169
+ name=name, value=await item.fn(input, ctx)
170
+ )
171
+
172
+ return DependencyToInject(
173
+ name=name, value=await asyncio.to_thread(item.fn, input, ctx)
174
+ )
175
+
176
+ return None
177
+
178
+ async def _unpack_dependencies(
179
+ self, ctx: Context | DurableContext
180
+ ) -> dict[str, Any]:
181
+ sig = signature(self.fn)
182
+ input = self.workflow._get_workflow_input(ctx)
183
+ return {
184
+ parsed.name: parsed.value
185
+ for n, p in sig.parameters.items()
186
+ if (parsed := await self._parse_parameter(n, p, input, ctx)) is not None
187
+ }
188
+
189
+ def call(
190
+ self, ctx: Context | DurableContext, dependencies: dict[str, Any] | None = None
191
+ ) -> R:
104
192
  if self.is_async_function:
105
193
  raise TypeError(f"{self.name} is not a sync function. Use `acall` instead.")
106
194
 
107
195
  workflow_input = self.workflow._get_workflow_input(ctx)
196
+ dependencies = dependencies or {}
108
197
 
109
- if self.is_durable:
110
- fn = cast(Callable[[TWorkflowInput, DurableContext], R], self.fn)
111
- if is_durable_sync_fn(fn):
112
- return fn(workflow_input, cast(DurableContext, ctx))
113
- else:
114
- fn = cast(Callable[[TWorkflowInput, Context], R], self.fn)
115
- if is_sync_fn(fn):
116
- return fn(workflow_input, ctx)
198
+ if is_sync_fn(self.fn): # type: ignore
199
+ return self.fn(workflow_input, cast(Context, ctx), **dependencies) # type: ignore
117
200
 
118
201
  raise TypeError(f"{self.name} is not a sync function. Use `acall` instead.")
119
202
 
120
- async def aio_call(self, ctx: Context | DurableContext) -> R:
203
+ async def aio_call(
204
+ self, ctx: Context | DurableContext, dependencies: dict[str, Any] | None = None
205
+ ) -> R:
121
206
  if not self.is_async_function:
122
207
  raise TypeError(
123
208
  f"{self.name} is not an async function. Use `call` instead."
124
209
  )
125
210
 
126
211
  workflow_input = self.workflow._get_workflow_input(ctx)
212
+ dependencies = dependencies or {}
127
213
 
128
214
  if is_async_fn(self.fn): # type: ignore
129
- return await self.fn(workflow_input, cast(Context, ctx)) # type: ignore
215
+ return await self.fn(workflow_input, cast(Context, ctx), **dependencies) # type: ignore
130
216
 
131
217
  raise TypeError(f"{self.name} is not an async function. Use `call` instead.")
132
218
 
@@ -255,6 +341,7 @@ class Task(Generic[TWorkflowInput, R]):
255
341
  parent_outputs: dict[str, JSONSerializableMapping] | None = None,
256
342
  retry_count: int = 0,
257
343
  lifespan: Any = None,
344
+ dependencies: dict[str, Any] | None = None,
258
345
  ) -> R:
259
346
  """
260
347
  Mimic the execution of a task. This method is intended to be used to unit test
@@ -266,6 +353,7 @@ class Task(Generic[TWorkflowInput, R]):
266
353
  :param parent_outputs: Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={"step_1": {"result": "Hello, world!"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`.
267
354
  :param retry_count: The number of times the task has been retried.
268
355
  :param lifespan: The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task.
356
+ :param dependencies: Dependencies to be injected into the task. This is useful for tasks that have dependencies defined using `Depends`. **IMPORTANT**: You must pass the dependencies _directly_, **not** the `Depends` objects themselves. For example, if you have a task that has a dependency `config: Annotated[str, Depends(get_config)]`, you should pass `dependencies={"config": "config_value"}` to `aio_mock_run`.
269
357
 
270
358
  :return: The output of the task.
271
359
  :raises TypeError: If the task is an async function and `mock_run` is called, or if the task is a sync function and `aio_mock_run` is called.
@@ -280,7 +368,7 @@ class Task(Generic[TWorkflowInput, R]):
280
368
  input, additional_metadata, parent_outputs, retry_count, lifespan
281
369
  )
282
370
 
283
- return self.call(ctx)
371
+ return self.call(ctx, dependencies)
284
372
 
285
373
  async def aio_mock_run(
286
374
  self,
@@ -289,6 +377,7 @@ class Task(Generic[TWorkflowInput, R]):
289
377
  parent_outputs: dict[str, JSONSerializableMapping] | None = None,
290
378
  retry_count: int = 0,
291
379
  lifespan: Any = None,
380
+ dependencies: dict[str, Any] | None = None,
292
381
  ) -> R:
293
382
  """
294
383
  Mimic the execution of a task. This method is intended to be used to unit test
@@ -300,6 +389,7 @@ class Task(Generic[TWorkflowInput, R]):
300
389
  :param parent_outputs: Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={"step_1": {"result": "Hello, world!"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`.
301
390
  :param retry_count: The number of times the task has been retried.
302
391
  :param lifespan: The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task.
392
+ :param dependencies: Dependencies to be injected into the task. This is useful for tasks that have dependencies defined using `Depends`. **IMPORTANT**: You must pass the dependencies _directly_, **not** the `Depends` objects themselves. For example, if you have a task that has a dependency `config: Annotated[str, Depends(get_config)]`, you should pass `dependencies={"config": "config_value"}` to `aio_mock_run`.
303
393
 
304
394
  :return: The output of the task.
305
395
  :raises TypeError: If the task is an async function and `mock_run` is called, or if the task is a sync function and `aio_mock_run` is called.
@@ -318,4 +408,4 @@ class Task(Generic[TWorkflowInput, R]):
318
408
  lifespan,
319
409
  )
320
410
 
321
- return await self.aio_call(ctx)
411
+ return await self.aio_call(ctx, dependencies)
@@ -5,8 +5,10 @@ from functools import cached_property
5
5
  from typing import (
6
6
  TYPE_CHECKING,
7
7
  Any,
8
+ Concatenate,
8
9
  Generic,
9
10
  Literal,
11
+ ParamSpec,
10
12
  TypeVar,
11
13
  cast,
12
14
  get_type_hints,
@@ -60,6 +62,7 @@ if TYPE_CHECKING:
60
62
 
61
63
 
62
64
  T = TypeVar("T")
65
+ P = ParamSpec("P")
63
66
 
64
67
 
65
68
  def fall_back_to_default(value: T, param_default: T, fallback_value: T | None) -> T:
@@ -800,7 +803,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
800
803
  skip_if: list[Condition | OrGroup] | None = None,
801
804
  cancel_if: list[Condition | OrGroup] | None = None,
802
805
  ) -> Callable[
803
- [Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]],
806
+ [Callable[Concatenate[TWorkflowInput, Context, P], R | CoroutineLike[R]]],
804
807
  Task[TWorkflowInput, R],
805
808
  ]:
806
809
  """
@@ -845,7 +848,9 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
845
848
  )
846
849
 
847
850
  def inner(
848
- func: Callable[[TWorkflowInput, Context], R | CoroutineLike[R]],
851
+ func: Callable[
852
+ Concatenate[TWorkflowInput, Context, P], R | CoroutineLike[R]
853
+ ],
849
854
  ) -> Task[TWorkflowInput, R]:
850
855
  task = Task(
851
856
  _fn=func,
@@ -892,7 +897,11 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
892
897
  skip_if: list[Condition | OrGroup] | None = None,
893
898
  cancel_if: list[Condition | OrGroup] | None = None,
894
899
  ) -> Callable[
895
- [Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]]],
900
+ [
901
+ Callable[
902
+ Concatenate[TWorkflowInput, DurableContext, P], R | CoroutineLike[R]
903
+ ]
904
+ ],
896
905
  Task[TWorkflowInput, R],
897
906
  ]:
898
907
  """
@@ -941,7 +950,9 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
941
950
  )
942
951
 
943
952
  def inner(
944
- func: Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]],
953
+ func: Callable[
954
+ Concatenate[TWorkflowInput, DurableContext, P], R | CoroutineLike[R]
955
+ ],
945
956
  ) -> Task[TWorkflowInput, R]:
946
957
  task = Task(
947
958
  _fn=func,
@@ -983,7 +994,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
983
994
  backoff_max_seconds: int | None = None,
984
995
  concurrency: list[ConcurrencyExpression] | None = None,
985
996
  ) -> Callable[
986
- [Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]],
997
+ [Callable[Concatenate[TWorkflowInput, Context, P], R | CoroutineLike[R]]],
987
998
  Task[TWorkflowInput, R],
988
999
  ]:
989
1000
  """
@@ -1009,7 +1020,9 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
1009
1020
  """
1010
1021
 
1011
1022
  def inner(
1012
- func: Callable[[TWorkflowInput, Context], R | CoroutineLike[R]],
1023
+ func: Callable[
1024
+ Concatenate[TWorkflowInput, Context, P], R | CoroutineLike[R]
1025
+ ],
1013
1026
  ) -> Task[TWorkflowInput, R]:
1014
1027
  task = Task(
1015
1028
  is_durable=False,
@@ -1051,7 +1064,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
1051
1064
  backoff_max_seconds: int | None = None,
1052
1065
  concurrency: list[ConcurrencyExpression] | None = None,
1053
1066
  ) -> Callable[
1054
- [Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]],
1067
+ [Callable[Concatenate[TWorkflowInput, Context, P], R | CoroutineLike[R]]],
1055
1068
  Task[TWorkflowInput, R],
1056
1069
  ]:
1057
1070
  """
@@ -1077,7 +1090,9 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
1077
1090
  """
1078
1091
 
1079
1092
  def inner(
1080
- func: Callable[[TWorkflowInput, Context], R | CoroutineLike[R]],
1093
+ func: Callable[
1094
+ Concatenate[TWorkflowInput, Context, P], R | CoroutineLike[R]
1095
+ ],
1081
1096
  ) -> Task[TWorkflowInput, R]:
1082
1097
  task = Task(
1083
1098
  is_durable=False,
@@ -1,5 +1,6 @@
1
1
  import sys
2
2
  from collections.abc import Awaitable, Coroutine, Generator
3
+ from enum import Enum
3
4
  from typing import Any, Literal, TypeAlias, TypeGuard, TypeVar
4
5
 
5
6
  from pydantic import BaseModel
@@ -31,3 +32,29 @@ else:
31
32
 
32
33
  STOP_LOOP_TYPE = Literal["STOP_LOOP"]
33
34
  STOP_LOOP: STOP_LOOP_TYPE = "STOP_LOOP" # Sentinel object to stop the loop
35
+
36
+
37
+ class LogLevel(str, Enum):
38
+ DEBUG = "DEBUG"
39
+ INFO = "INFO"
40
+ WARN = "WARN"
41
+ ERROR = "ERROR"
42
+
43
+ @classmethod
44
+ def from_levelname(cls, levelname: str) -> "LogLevel":
45
+ levelname = levelname.upper()
46
+
47
+ if levelname == "DEBUG":
48
+ return cls.DEBUG
49
+
50
+ if levelname == "INFO":
51
+ return cls.INFO
52
+
53
+ if levelname in ["WARNING", "WARN"]:
54
+ return cls.WARN
55
+
56
+ if levelname == "ERROR":
57
+ return cls.ERROR
58
+
59
+ # fall back to INFO
60
+ return cls.INFO
@@ -166,22 +166,22 @@ class Runner:
166
166
  except Exception as e:
167
167
  should_not_retry = isinstance(e, NonRetryableException)
168
168
 
169
- exc = TaskRunError.from_exception(e)
169
+ exc = TaskRunError.from_exception(e, action.step_run_id)
170
170
 
171
171
  # This except is coming from the application itself, so we want to send that to the Hatchet instance
172
172
  self.event_queue.put(
173
173
  ActionEvent(
174
174
  action=action,
175
175
  type=STEP_EVENT_TYPE_FAILED,
176
- payload=exc.serialize(),
176
+ payload=exc.serialize(include_metadata=True),
177
177
  should_not_retry=should_not_retry,
178
178
  )
179
179
  )
180
180
 
181
- log_with_level = logger.info if should_not_retry else logger.error
181
+ log_with_level = logger.info if should_not_retry else logger.exception
182
182
 
183
183
  log_with_level(
184
- f"failed step run: {action.action_id}/{action.step_run_id}\n{exc.serialize()}"
184
+ f"failed step run: {action.action_id}/{action.step_run_id}\n{exc.serialize(include_metadata=False)}"
185
185
  )
186
186
 
187
187
  return
@@ -198,18 +198,18 @@ class Runner:
198
198
  )
199
199
  )
200
200
  except IllegalTaskOutputError as e:
201
- exc = TaskRunError.from_exception(e)
201
+ exc = TaskRunError.from_exception(e, action.step_run_id)
202
202
  self.event_queue.put(
203
203
  ActionEvent(
204
204
  action=action,
205
205
  type=STEP_EVENT_TYPE_FAILED,
206
- payload=exc.serialize(),
206
+ payload=exc.serialize(include_metadata=True),
207
207
  should_not_retry=False,
208
208
  )
209
209
  )
210
210
 
211
- logger.error(
212
- f"failed step run: {action.action_id}/{action.step_run_id}\n{exc.serialize()}"
211
+ logger.exception(
212
+ f"failed step run: {action.action_id}/{action.step_run_id}\n{exc.serialize(include_metadata=False)}"
213
213
  )
214
214
 
215
215
  return
@@ -230,19 +230,19 @@ class Runner:
230
230
  try:
231
231
  output = task.result()
232
232
  except Exception as e:
233
- exc = TaskRunError.from_exception(e)
233
+ exc = TaskRunError.from_exception(e, action.step_run_id)
234
234
 
235
235
  self.event_queue.put(
236
236
  ActionEvent(
237
237
  action=action,
238
238
  type=GROUP_KEY_EVENT_TYPE_FAILED,
239
- payload=exc.serialize(),
239
+ payload=exc.serialize(include_metadata=True),
240
240
  should_not_retry=False,
241
241
  )
242
242
  )
243
243
 
244
- logger.error(
245
- f"failed step run: {action.action_id}/{action.step_run_id}\n{exc.serialize()}"
244
+ logger.exception(
245
+ f"failed step run: {action.action_id}/{action.step_run_id}\n{exc.serialize(include_metadata=False)}"
246
246
  )
247
247
 
248
248
  return
@@ -259,18 +259,18 @@ class Runner:
259
259
  )
260
260
  )
261
261
  except IllegalTaskOutputError as e:
262
- exc = TaskRunError.from_exception(e)
262
+ exc = TaskRunError.from_exception(e, action.step_run_id)
263
263
  self.event_queue.put(
264
264
  ActionEvent(
265
265
  action=action,
266
266
  type=STEP_EVENT_TYPE_FAILED,
267
- payload=exc.serialize(),
267
+ payload=exc.serialize(include_metadata=True),
268
268
  should_not_retry=False,
269
269
  )
270
270
  )
271
271
 
272
- logger.error(
273
- f"failed step run: {action.action_id}/{action.step_run_id}\n{exc.serialize()}"
272
+ logger.exception(
273
+ f"failed step run: {action.action_id}/{action.step_run_id}\n{exc.serialize(include_metadata=False)}"
274
274
  )
275
275
 
276
276
  return
@@ -280,12 +280,16 @@ class Runner:
280
280
  return inner_callback
281
281
 
282
282
  def thread_action_func(
283
- self, ctx: Context, task: Task[TWorkflowInput, R], action: Action
283
+ self,
284
+ ctx: Context,
285
+ task: Task[TWorkflowInput, R],
286
+ action: Action,
287
+ dependencies: dict[str, Any],
284
288
  ) -> R:
285
289
  if action.step_run_id or action.get_group_key_run_id:
286
290
  self.threads[action.key] = current_thread()
287
291
 
288
- return task.call(ctx)
292
+ return task.call(ctx, dependencies)
289
293
 
290
294
  # We wrap all actions in an async func
291
295
  async def async_wrapped_action_func(
@@ -300,9 +304,12 @@ class Runner:
300
304
  ctx_action_key.set(action.key)
301
305
  ctx_additional_metadata.set(action.additional_metadata)
302
306
 
307
+ dependencies = await task._unpack_dependencies(ctx)
308
+
303
309
  try:
304
310
  if task.is_async_function:
305
- return await task.aio_call(ctx)
311
+ return await task.aio_call(ctx, dependencies)
312
+
306
313
  pfunc = functools.partial(
307
314
  # we must copy the context vars to the new thread, as only asyncio natively supports
308
315
  # contextvars
@@ -343,6 +350,7 @@ class Runner:
343
350
  ctx,
344
351
  task,
345
352
  action,
353
+ dependencies,
346
354
  )
347
355
 
348
356
  loop = asyncio.get_event_loop()
@@ -16,7 +16,12 @@ from hatchet_sdk.runnables.contextvars import (
16
16
  ctx_worker_id,
17
17
  ctx_workflow_run_id,
18
18
  )
19
- from hatchet_sdk.utils.typing import STOP_LOOP, STOP_LOOP_TYPE, JSONSerializableMapping
19
+ from hatchet_sdk.utils.typing import (
20
+ STOP_LOOP,
21
+ STOP_LOOP_TYPE,
22
+ JSONSerializableMapping,
23
+ LogLevel,
24
+ )
20
25
 
21
26
  T = TypeVar("T")
22
27
  P = ParamSpec("P")
@@ -67,6 +72,7 @@ def copy_context_vars(
67
72
  class LogRecord(BaseModel):
68
73
  message: str
69
74
  step_run_id: str
75
+ level: LogLevel
70
76
 
71
77
 
72
78
  class AsyncLogSender:
@@ -86,6 +92,7 @@ class AsyncLogSender:
86
92
  self.event_client.log,
87
93
  message=record.message,
88
94
  step_run_id=record.step_run_id,
95
+ level=record.level,
89
96
  )
90
97
  except Exception:
91
98
  logger.exception("failed to send log to Hatchet")
@@ -97,7 +104,7 @@ class AsyncLogSender:
97
104
  logger.warning("log queue is full, dropping log message")
98
105
 
99
106
 
100
- class CustomLogHandler(logging.StreamHandler): # type: ignore[type-arg]
107
+ class LogForwardingHandler(logging.StreamHandler): # type: ignore[type-arg]
101
108
  def __init__(self, log_sender: AsyncLogSender, stream: StringIO):
102
109
  super().__init__(stream)
103
110
 
@@ -112,7 +119,13 @@ class CustomLogHandler(logging.StreamHandler): # type: ignore[type-arg]
112
119
  if not step_run_id:
113
120
  return
114
121
 
115
- self.log_sender.publish(LogRecord(message=log_entry, step_run_id=step_run_id))
122
+ self.log_sender.publish(
123
+ LogRecord(
124
+ message=log_entry,
125
+ step_run_id=step_run_id,
126
+ level=LogLevel.from_levelname(record.levelname),
127
+ )
128
+ )
116
129
 
117
130
 
118
131
  def capture_logs(
@@ -121,27 +134,27 @@ def capture_logs(
121
134
  @functools.wraps(func)
122
135
  async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
123
136
  log_stream = StringIO()
124
- custom_handler = CustomLogHandler(log_sender, log_stream)
125
- custom_handler.setLevel(logger.level)
137
+ log_forwarder = LogForwardingHandler(log_sender, log_stream)
138
+ log_forwarder.setLevel(logger.level)
126
139
 
127
140
  if logger.handlers:
128
141
  for handler in logger.handlers:
129
142
  if handler.formatter:
130
- custom_handler.setFormatter(handler.formatter)
143
+ log_forwarder.setFormatter(handler.formatter)
131
144
  break
132
145
 
133
146
  for handler in logger.handlers:
134
147
  for filter_obj in handler.filters:
135
- custom_handler.addFilter(filter_obj)
148
+ log_forwarder.addFilter(filter_obj)
136
149
 
137
- if not any(h for h in logger.handlers if isinstance(h, CustomLogHandler)):
138
- logger.addHandler(custom_handler)
150
+ if not any(h for h in logger.handlers if isinstance(h, LogForwardingHandler)):
151
+ logger.addHandler(log_forwarder)
139
152
 
140
153
  try:
141
154
  result = await func(*args, **kwargs)
142
155
  finally:
143
- custom_handler.flush()
144
- logger.removeHandler(custom_handler)
156
+ log_forwarder.flush()
157
+ logger.removeHandler(log_forwarder)
145
158
  log_stream.close()
146
159
 
147
160
  return result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hatchet-sdk
3
- Version: 1.16.5
3
+ Version: 1.17.0
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Alexander Belanger
@@ -15,7 +15,6 @@ Provides-Extra: otel
15
15
  Requires-Dist: aiohttp (>=3.10.5,<4.0.0)
16
16
  Requires-Dist: aiohttp-retry (>=2.8.3,<3.0.0)
17
17
  Requires-Dist: aiostream (>=0.5.2,<0.6.0)
18
- Requires-Dist: cel-python (>=0.2.0,<0.3.0)
19
18
  Requires-Dist: grpcio (>=1.64.1,!=1.68.*) ; python_version < "3.13"
20
19
  Requires-Dist: grpcio (>=1.69.0) ; python_version >= "3.13"
21
20
  Requires-Dist: grpcio-tools (>=1.64.1,!=1.68.*) ; python_version < "3.13"
@@ -1,10 +1,10 @@
1
- hatchet_sdk/__init__.py,sha256=r51t5nLruYORKSpxdMJ5jbub6sWVANyWM-VOw2QQKuw,10887
1
+ hatchet_sdk/__init__.py,sha256=ng-IkoknD8Xbq1q8Wc42tEsO0OhD74MI9FPbBor9ut8,10911
2
2
  hatchet_sdk/client.py,sha256=s0-0WXGTyLkD-JOJl68bsaInDKOzDHSM5NCB0ic46lw,2502
3
3
  hatchet_sdk/clients/admin.py,sha256=Blx1OYhPGcdbUVCNq7n5jygjTy--8l_RYpjV-a8DRjw,17058
4
4
  hatchet_sdk/clients/dispatcher/action_listener.py,sha256=FeIYd8HZoYX_ELdeu--Nc6kTn9OfjL7Tr7WCs2EgJbc,13822
5
5
  hatchet_sdk/clients/dispatcher/dispatcher.py,sha256=1d4_4DYdFskqXjmHPHgZ-jBx1l9fQ1e0SMIweDL0VsE,8559
6
6
  hatchet_sdk/clients/event_ts.py,sha256=JVfxZ-OJ-xv7xJgPWAjv-g7ChwnkVwMDbYNVcAF-XnE,2121
7
- hatchet_sdk/clients/events.py,sha256=wE36_Wyb8BLywpuc8epRj7ZZDN9UCmztn6g3wdgRivM,8849
7
+ hatchet_sdk/clients/events.py,sha256=fcoC7OhLmAKMyeXJMY-zhPPcr129em9XO-gGPh10BBg,8954
8
8
  hatchet_sdk/clients/listeners/durable_event_listener.py,sha256=55WbVQpm65ccVSQtqz-Z_4EI8Gig-7MzH5F9Arh-rb0,4166
9
9
  hatchet_sdk/clients/listeners/pooled_listener.py,sha256=mBx9XTQZuFStyvuM93QPyhjnF7qF2XzWfuUR7bniHt8,8512
10
10
  hatchet_sdk/clients/listeners/run_event_listener.py,sha256=CNXG5a_MUoYnNVmfrXkW1w3v6UnImyeUFXHQ96n4ULM,10222
@@ -261,7 +261,7 @@ hatchet_sdk/conditions.py,sha256=CnhpkXgVXM3wc0kAX8KZQA6tp8NFAbdzAN2xFbw7Hb0,452
261
261
  hatchet_sdk/config.py,sha256=9yXDCBfVW5AHosiSRWiEScSFSsm7Lx7wi0urNLC11Ck,5266
262
262
  hatchet_sdk/connection.py,sha256=XCBY9-UxaN3blakgZ59AhDpjb1ilLOOlmNNM6QaDtMM,2961
263
263
  hatchet_sdk/context/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
264
- hatchet_sdk/context/context.py,sha256=NZzFLu1hOZqsZNGSF8T_LrZOMfFDX-2DIyNMtZXs2ck,15359
264
+ hatchet_sdk/context/context.py,sha256=KE-GsaosBar8USXSj1lkWHdsZXfXFf6tI9RwGTOF-aM,16278
265
265
  hatchet_sdk/context/worker_context.py,sha256=3lGkOYmDixeuSmqxXbsYav2gErcjP8cDa2m0t0iomjI,884
266
266
  hatchet_sdk/contracts/dispatcher_pb2.py,sha256=W9aGh-wctZhLjUXUdeQTxH4qArsw6D0kIAWM9SVCX5o,14786
267
267
  hatchet_sdk/contracts/dispatcher_pb2.pyi,sha256=9Qoz88G-btdlTuxvk4knqfnYdcIXy3oR9DTh6MwIdP4,18923
@@ -281,7 +281,7 @@ hatchet_sdk/contracts/v1/workflows_pb2_grpc.py,sha256=XytYpV2kJQZT8iAs14z4SWsv-9
281
281
  hatchet_sdk/contracts/workflows_pb2.py,sha256=daEsUwZnlDQ5GGLJ8WHgLdI1Tgr3lBXxGV1mJ6go0nE,11812
282
282
  hatchet_sdk/contracts/workflows_pb2.pyi,sha256=WJ3b45pWvoNmmWTWjBJt61IiAoVn61F62AG5OrRsnd8,15538
283
283
  hatchet_sdk/contracts/workflows_pb2_grpc.py,sha256=2V8E72DlJx5qlH2yiQpVCu5cQbKUba5X7T1yNrQDF_s,10819
284
- hatchet_sdk/exceptions.py,sha256=DG-mS0wZiB-4Pnyr-BgY-LRrAEAdgP2nqQlIheU99t4,2646
284
+ hatchet_sdk/exceptions.py,sha256=4NC_3CgJhZVF4RYJ6zAb2i4mGqFUL4TiKGSTz99bV-w,4656
285
285
  hatchet_sdk/features/cel.py,sha256=Uefvm2Du3SJCHiHsp12-URPxXJLe40uv0wK7guFucsE,4002
286
286
  hatchet_sdk/features/cron.py,sha256=k6Y-JJBPaf2Dtx-fwvNA2j7lTzHLBZpwVMA_u-p6Lvw,9723
287
287
  hatchet_sdk/features/filters.py,sha256=n6PPeRiqd5SOFlcx8V2strUaCGma9JPRAOLx44XpC0o,6443
@@ -293,18 +293,18 @@ hatchet_sdk/features/scheduled.py,sha256=t7YA9CoJrzBhH82ChTSFWaTF_dyoC9i1O_wf9yw
293
293
  hatchet_sdk/features/tenant.py,sha256=xkhh5mRKCWbunk_S1iBmGR-DYR-F4mjxk8jLyYUyzNE,886
294
294
  hatchet_sdk/features/workers.py,sha256=DVdno28RmtlfhMJUkaPcOMHNKXCPV0RFrXtLqV6zWyE,2600
295
295
  hatchet_sdk/features/workflows.py,sha256=WTt58imAFRrEEB3M5hEEIBwNtrzdWbITFpgtsIqJNSM,4770
296
- hatchet_sdk/hatchet.py,sha256=weSL5A_Ea61ryFtGBe2CST51z9aI3HCzW0dSSJ6G9wA,25984
296
+ hatchet_sdk/hatchet.py,sha256=rEqakbTENR__bars1GM42d6RWrsgu8YzenvnEnkEd5E,26416
297
297
  hatchet_sdk/labels.py,sha256=nATgxWE3lFxRTnfISEpoIRLGbMfAZsHF4lZTuG4Mfic,182
298
298
  hatchet_sdk/logger.py,sha256=5uOr52T4mImSQm1QvWT8HvZFK5WfPNh3Y1cBQZRFgUQ,333
299
299
  hatchet_sdk/metadata.py,sha256=XkRbhnghJJGCdVvF-uzyGBcNaTqpeQ3uiQvNNP1wyBc,107
300
300
  hatchet_sdk/opentelemetry/instrumentor.py,sha256=7-OM_6Wu_EJaOVyYECNj7H50YqX6SkokQe1hGtU1rFY,27028
301
301
  hatchet_sdk/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
302
- hatchet_sdk/rate_limit.py,sha256=TwbCuggiZaWpYuo4mjVLlE-z1OfQ2mRBiVvCSaG3lv4,3919
302
+ hatchet_sdk/rate_limit.py,sha256=ptFvHJU9rCzxfcITZEnRkKtJM-SY12WP84FzBwCKAPE,3277
303
303
  hatchet_sdk/runnables/action.py,sha256=zrVHpyzIQ9XZgWwY69b_6uhZd53An4trRoLd9b3os5E,4384
304
304
  hatchet_sdk/runnables/contextvars.py,sha256=jHrrewUlFPAT9f2u3VCsuSlDBtBoagEUtUzJOSmm4yk,1118
305
- hatchet_sdk/runnables/task.py,sha256=-5AeToJqIbpgGeyrol5VJaFGND4l_UY8k9VIhrUBXaw,12796
305
+ hatchet_sdk/runnables/task.py,sha256=JsiDBkYQVJodyqtNDT9z8Pwz3ePL8GhY0Z1-ptPw9ms,16030
306
306
  hatchet_sdk/runnables/types.py,sha256=M23xSMTBPl12CXCCXZ0wbnqZ_sePB6CJKtOdipiNDlg,4362
307
- hatchet_sdk/runnables/workflow.py,sha256=_F5XIeokfQxMgrFtVjdM3ArtCwfKj3u51dinA7ouyWI,57513
307
+ hatchet_sdk/runnables/workflow.py,sha256=Nm71LrY_EcPJUrlzRuBh6sp7bv-pMrt0gQgZNftue9g,57848
308
308
  hatchet_sdk/token.py,sha256=KjIiInwG5Kqd_FO4BSW1x_5Uc7PFbnzIVJqr50-ZldE,779
309
309
  hatchet_sdk/utils/aio.py,sha256=cu1rD_UZkShtfzi7iXMYwBBaCRdxJQTdUC0_mf8nU2E,499
310
310
  hatchet_sdk/utils/backoff.py,sha256=6B5Rb5nLKw_TqqgpJMYjIBV1PTTtbOMRZCveisVhg_I,353
@@ -313,7 +313,7 @@ hatchet_sdk/utils/opentelemetry.py,sha256=64TVwCLrUzEmcL2BUNPV_QubfiR5jajOZtVeGY
313
313
  hatchet_sdk/utils/proto_enums.py,sha256=v2gp_ZmIhPxURVXwz5lscllXwZXDl5XGXeL6gezw3o0,1241
314
314
  hatchet_sdk/utils/serde.py,sha256=5edZsFddc5KjfbBjHVizPKW6PGgzM5guaLQ5FAFrog8,1769
315
315
  hatchet_sdk/utils/timedelta_to_expression.py,sha256=YujnBnGn7lxtkUdKIeqmOiN_ZCGBpRPjCCSzcD3jxzA,644
316
- hatchet_sdk/utils/typing.py,sha256=FgYnZyJSoRjNVFodxlI9gn0X8ST1KFed7xfUynIxa2U,978
316
+ hatchet_sdk/utils/typing.py,sha256=zyRsfF-HO_aVhNx_vun-BRCbMWYDBps8aV0NczGUcho,1534
317
317
  hatchet_sdk/v0/__init__.py,sha256=r3Q7l2RsLgdIkK2jjiz7-JJpD1T_Zy--Oa9MN5n_yEs,9654
318
318
  hatchet_sdk/v0/client.py,sha256=G1RDZln9Og7tRQulogXkZw8TsVlx7f0VvmtFI_VAe6E,3495
319
319
  hatchet_sdk/v0/clients/admin.py,sha256=0ZsBPLZ5ktn_oC2VZsJqqcqr2m8TnU07ogml46p_X58,18065
@@ -538,11 +538,11 @@ hatchet_sdk/v0/workflow_run.py,sha256=jsEZprXshrSV7i_TtL5uoCL03D18zQ3NeJCq7mp97D
538
538
  hatchet_sdk/worker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
539
539
  hatchet_sdk/worker/action_listener_process.py,sha256=CzXen-7tFG_rryvM2xWV2_KMUFC2-i_Ts643TB_Urd8,12878
540
540
  hatchet_sdk/worker/runner/run_loop_manager.py,sha256=BcdfxSvZdrxbeTZSUASwCTMKJe6pwLorHVKPTprkM2k,4176
541
- hatchet_sdk/worker/runner/runner.py,sha256=L5J_dbwpz2P0rbDzpxW1udByQJHii28KEvzx-1LxB_8,22406
542
- hatchet_sdk/worker/runner/utils/capture_logs.py,sha256=FBEcPTi6cxFsGPER51k-xeMUzVJhLIAq7NyKTHCM5-E,4386
541
+ hatchet_sdk/worker/runner/runner.py,sha256=QULD00hEyW2dcHCcH46C1k7mxr5nHEwtusyAs33VOO0,22857
542
+ hatchet_sdk/worker/runner/utils/capture_logs.py,sha256=Gpzcs1JVqZE0spFztyjcbcUTjDCrdIBI9c_POVhSIvk,4616
543
543
  hatchet_sdk/worker/worker.py,sha256=9EiESMMcS7voa4cAnmnHMx4rC-pqaTmP74bcTbFPqfQ,16435
544
544
  hatchet_sdk/workflow_run.py,sha256=KcylcqRwKADtnzOTjoiVr1vdr7qTZFtDeD5aRS6A4Y8,2823
545
- hatchet_sdk-1.16.5.dist-info/METADATA,sha256=UijjCLHKfTjcYSAAmG-cy0awxxfNPnuqlls_66QPWgc,3628
546
- hatchet_sdk-1.16.5.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
547
- hatchet_sdk-1.16.5.dist-info/entry_points.txt,sha256=Un_76pcLse-ZGBlwebhQpnTPyQrripeHW8J7qmEpGOk,1400
548
- hatchet_sdk-1.16.5.dist-info/RECORD,,
545
+ hatchet_sdk-1.17.0.dist-info/METADATA,sha256=_ChzeGYdEXj9NRkokdcAU4aME768nOI09kILBnz2YUE,3585
546
+ hatchet_sdk-1.17.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
547
+ hatchet_sdk-1.17.0.dist-info/entry_points.txt,sha256=Un_76pcLse-ZGBlwebhQpnTPyQrripeHW8J7qmEpGOk,1400
548
+ hatchet_sdk-1.17.0.dist-info/RECORD,,