hatchet-sdk 1.2.5__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hatchet-sdk might be problematic. Click here for more details.

Files changed (60) hide show
  1. hatchet_sdk/__init__.py +7 -5
  2. hatchet_sdk/client.py +14 -6
  3. hatchet_sdk/clients/admin.py +57 -15
  4. hatchet_sdk/clients/dispatcher/action_listener.py +2 -2
  5. hatchet_sdk/clients/dispatcher/dispatcher.py +20 -7
  6. hatchet_sdk/clients/event_ts.py +25 -5
  7. hatchet_sdk/clients/listeners/durable_event_listener.py +125 -0
  8. hatchet_sdk/clients/listeners/pooled_listener.py +255 -0
  9. hatchet_sdk/clients/listeners/workflow_listener.py +62 -0
  10. hatchet_sdk/clients/rest/api/api_token_api.py +24 -24
  11. hatchet_sdk/clients/rest/api/default_api.py +64 -64
  12. hatchet_sdk/clients/rest/api/event_api.py +64 -64
  13. hatchet_sdk/clients/rest/api/github_api.py +8 -8
  14. hatchet_sdk/clients/rest/api/healthcheck_api.py +16 -16
  15. hatchet_sdk/clients/rest/api/log_api.py +16 -16
  16. hatchet_sdk/clients/rest/api/metadata_api.py +24 -24
  17. hatchet_sdk/clients/rest/api/rate_limits_api.py +8 -8
  18. hatchet_sdk/clients/rest/api/slack_api.py +16 -16
  19. hatchet_sdk/clients/rest/api/sns_api.py +24 -24
  20. hatchet_sdk/clients/rest/api/step_run_api.py +56 -56
  21. hatchet_sdk/clients/rest/api/task_api.py +56 -56
  22. hatchet_sdk/clients/rest/api/tenant_api.py +128 -128
  23. hatchet_sdk/clients/rest/api/user_api.py +96 -96
  24. hatchet_sdk/clients/rest/api/worker_api.py +24 -24
  25. hatchet_sdk/clients/rest/api/workflow_api.py +144 -144
  26. hatchet_sdk/clients/rest/api/workflow_run_api.py +48 -48
  27. hatchet_sdk/clients/rest/api/workflow_runs_api.py +40 -40
  28. hatchet_sdk/clients/rest/api_client.py +5 -8
  29. hatchet_sdk/clients/rest/configuration.py +7 -3
  30. hatchet_sdk/clients/rest/models/tenant_step_run_queue_metrics.py +2 -2
  31. hatchet_sdk/clients/rest/models/v1_task_summary.py +5 -0
  32. hatchet_sdk/clients/rest/models/workflow_runs_metrics.py +5 -1
  33. hatchet_sdk/clients/rest/rest.py +160 -111
  34. hatchet_sdk/clients/v1/api_client.py +2 -2
  35. hatchet_sdk/context/context.py +22 -21
  36. hatchet_sdk/features/cron.py +41 -40
  37. hatchet_sdk/features/logs.py +7 -6
  38. hatchet_sdk/features/metrics.py +19 -18
  39. hatchet_sdk/features/runs.py +88 -68
  40. hatchet_sdk/features/scheduled.py +42 -42
  41. hatchet_sdk/features/workers.py +17 -16
  42. hatchet_sdk/features/workflows.py +15 -14
  43. hatchet_sdk/hatchet.py +1 -1
  44. hatchet_sdk/runnables/standalone.py +12 -9
  45. hatchet_sdk/runnables/task.py +66 -2
  46. hatchet_sdk/runnables/types.py +8 -0
  47. hatchet_sdk/runnables/workflow.py +48 -136
  48. hatchet_sdk/waits.py +8 -8
  49. hatchet_sdk/worker/runner/run_loop_manager.py +4 -4
  50. hatchet_sdk/worker/runner/runner.py +22 -11
  51. hatchet_sdk/worker/worker.py +29 -25
  52. hatchet_sdk/workflow_run.py +55 -9
  53. {hatchet_sdk-1.2.5.dist-info → hatchet_sdk-1.3.0.dist-info}/METADATA +1 -1
  54. {hatchet_sdk-1.2.5.dist-info → hatchet_sdk-1.3.0.dist-info}/RECORD +57 -57
  55. hatchet_sdk/clients/durable_event_listener.py +0 -329
  56. hatchet_sdk/clients/workflow_listener.py +0 -288
  57. hatchet_sdk/utils/aio.py +0 -43
  58. /hatchet_sdk/clients/{run_event_listener.py → listeners/run_event_listener.py} +0 -0
  59. {hatchet_sdk-1.2.5.dist-info → hatchet_sdk-1.3.0.dist-info}/WHEEL +0 -0
  60. {hatchet_sdk-1.2.5.dist-info → hatchet_sdk-1.3.0.dist-info}/entry_points.txt +0 -0
@@ -1,329 +0,0 @@
1
- import asyncio
2
- import json
3
- from collections.abc import AsyncIterator
4
- from typing import Any, Literal, cast
5
-
6
- import grpc
7
- import grpc.aio
8
- from grpc._cython import cygrpc # type: ignore[attr-defined]
9
- from pydantic import BaseModel, ConfigDict
10
-
11
- from hatchet_sdk.clients.event_ts import ThreadSafeEvent, read_with_interrupt
12
- from hatchet_sdk.clients.rest.tenacity_utils import tenacity_retry
13
- from hatchet_sdk.config import ClientConfig
14
- from hatchet_sdk.connection import new_conn
15
- from hatchet_sdk.contracts.v1.dispatcher_pb2 import (
16
- DurableEvent,
17
- ListenForDurableEventRequest,
18
- )
19
- from hatchet_sdk.contracts.v1.dispatcher_pb2 import (
20
- RegisterDurableEventRequest as RegisterDurableEventRequestProto,
21
- )
22
- from hatchet_sdk.contracts.v1.dispatcher_pb2_grpc import V1DispatcherStub
23
- from hatchet_sdk.contracts.v1.shared.condition_pb2 import DurableEventListenerConditions
24
- from hatchet_sdk.logger import logger
25
- from hatchet_sdk.metadata import get_metadata
26
- from hatchet_sdk.waits import SleepCondition, UserEventCondition
27
-
28
- DEFAULT_DURABLE_EVENT_LISTENER_RETRY_INTERVAL = 3 # seconds
29
- DEFAULT_DURABLE_EVENT_LISTENER_RETRY_COUNT = 5
30
- DEFAULT_DURABLE_EVENT_LISTENER_INTERRUPT_INTERVAL = 1800 # 30 minutes
31
-
32
-
33
- class _Subscription:
34
- def __init__(self, id: int, task_id: str, signal_key: str):
35
- self.id = id
36
- self.task_id = task_id
37
- self.signal_key = signal_key
38
- self.queue: asyncio.Queue[DurableEvent | None] = asyncio.Queue()
39
-
40
- async def __aiter__(self) -> "_Subscription":
41
- return self
42
-
43
- async def __anext__(self) -> DurableEvent | None:
44
- return await self.queue.get()
45
-
46
- async def get(self) -> DurableEvent:
47
- event = await self.queue.get()
48
-
49
- if event is None:
50
- raise StopAsyncIteration
51
-
52
- return event
53
-
54
- async def put(self, item: DurableEvent) -> None:
55
- await self.queue.put(item)
56
-
57
- async def close(self) -> None:
58
- await self.queue.put(None)
59
-
60
-
61
- class RegisterDurableEventRequest(BaseModel):
62
- model_config = ConfigDict(arbitrary_types_allowed=True)
63
-
64
- task_id: str
65
- signal_key: str
66
- conditions: list[SleepCondition | UserEventCondition]
67
-
68
- def to_proto(self) -> RegisterDurableEventRequestProto:
69
- return RegisterDurableEventRequestProto(
70
- task_id=self.task_id,
71
- signal_key=self.signal_key,
72
- conditions=DurableEventListenerConditions(
73
- sleep_conditions=[
74
- c.to_pb() for c in self.conditions if isinstance(c, SleepCondition)
75
- ],
76
- user_event_conditions=[
77
- c.to_pb()
78
- for c in self.conditions
79
- if isinstance(c, UserEventCondition)
80
- ],
81
- ),
82
- )
83
-
84
-
85
- class DurableEventListener:
86
- def __init__(self, config: ClientConfig):
87
- self.token = config.token
88
- self.config = config
89
-
90
- # list of all active subscriptions, mapping from a subscription id to a task id and signal key
91
- self.subscriptions_to_task_id_signal_key: dict[int, tuple[str, str]] = {}
92
-
93
- # task id-signal key tuples mapped to an array of subscription ids
94
- self.task_id_signal_key_to_subscriptions: dict[tuple[str, str], list[int]] = {}
95
-
96
- self.subscription_counter: int = 0
97
- self.subscription_counter_lock: asyncio.Lock = asyncio.Lock()
98
-
99
- self.requests: asyncio.Queue[ListenForDurableEventRequest | int] = (
100
- asyncio.Queue()
101
- )
102
-
103
- self.listener: (
104
- grpc.aio.UnaryStreamCall[ListenForDurableEventRequest, DurableEvent] | None
105
- ) = None
106
- self.listener_task: asyncio.Task[None] | None = None
107
-
108
- self.curr_requester: int = 0
109
-
110
- self.events: dict[int, _Subscription] = {}
111
-
112
- self.interrupter: asyncio.Task[None] | None = None
113
-
114
- async def _interrupter(self) -> None:
115
- """
116
- _interrupter runs in a separate thread and interrupts the listener according to a configurable duration.
117
- """
118
- await asyncio.sleep(DEFAULT_DURABLE_EVENT_LISTENER_INTERRUPT_INTERVAL)
119
-
120
- if self.interrupt is not None:
121
- self.interrupt.set()
122
-
123
- async def _init_producer(self) -> None:
124
- conn = new_conn(self.config, True)
125
- client = V1DispatcherStub(conn)
126
-
127
- try:
128
- if not self.listener:
129
- while True:
130
- try:
131
- self.listener = await self._retry_subscribe(client)
132
-
133
- logger.debug("Workflow run listener connected.")
134
-
135
- # spawn an interrupter task
136
- if self.interrupter is not None and not self.interrupter.done():
137
- self.interrupter.cancel()
138
-
139
- self.interrupter = asyncio.create_task(self._interrupter())
140
-
141
- while True:
142
- self.interrupt = ThreadSafeEvent()
143
- if self.listener is None:
144
- continue
145
-
146
- t = asyncio.create_task(
147
- read_with_interrupt(self.listener, self.interrupt)
148
- )
149
- await self.interrupt.wait()
150
-
151
- if not t.done():
152
- logger.warning(
153
- "Interrupted read_with_interrupt task of durable event listener"
154
- )
155
-
156
- t.cancel()
157
- self.listener.cancel()
158
-
159
- await asyncio.sleep(
160
- DEFAULT_DURABLE_EVENT_LISTENER_RETRY_INTERVAL
161
- )
162
- break
163
-
164
- event = t.result()
165
-
166
- if event is cygrpc.EOF:
167
- break
168
-
169
- # get a list of subscriptions for this task-signal pair
170
- subscriptions = (
171
- self.task_id_signal_key_to_subscriptions.get(
172
- (event.task_id, event.signal_key), []
173
- )
174
- )
175
-
176
- for subscription_id in subscriptions:
177
- await self.events[subscription_id].put(event)
178
-
179
- except grpc.RpcError as e:
180
- logger.debug(f"grpc error in durable event listener: {e}")
181
- await asyncio.sleep(
182
- DEFAULT_DURABLE_EVENT_LISTENER_RETRY_INTERVAL
183
- )
184
- continue
185
-
186
- except Exception as e:
187
- logger.error(f"Error in durable event listener: {e}")
188
-
189
- self.listener = None
190
-
191
- # close all subscriptions
192
- for subscription_id in self.events:
193
- await self.events[subscription_id].close()
194
-
195
- raise e
196
-
197
- async def _request(self) -> AsyncIterator[ListenForDurableEventRequest]:
198
- self.curr_requester = self.curr_requester + 1
199
-
200
- # replay all existing subscriptions
201
- for task_id, signal_key in set(
202
- self.subscriptions_to_task_id_signal_key.values()
203
- ):
204
- yield ListenForDurableEventRequest(
205
- task_id=task_id,
206
- signal_key=signal_key,
207
- )
208
-
209
- while True:
210
- request = await self.requests.get()
211
-
212
- # if the request is an int which matches the current requester, then we should stop
213
- if request == self.curr_requester:
214
- break
215
-
216
- # if we've gotten an int that doesn't match the current requester, then we should ignore it
217
- if isinstance(request, int):
218
- continue
219
-
220
- yield request
221
- self.requests.task_done()
222
-
223
- def cleanup_subscription(self, subscription_id: int) -> None:
224
- task_id_signal_key = self.subscriptions_to_task_id_signal_key[subscription_id]
225
-
226
- if task_id_signal_key in self.task_id_signal_key_to_subscriptions:
227
- self.task_id_signal_key_to_subscriptions[task_id_signal_key].remove(
228
- subscription_id
229
- )
230
-
231
- del self.subscriptions_to_task_id_signal_key[subscription_id]
232
- del self.events[subscription_id]
233
-
234
- async def subscribe(self, task_id: str, signal_key: str) -> DurableEvent:
235
- subscription_id: int | None = None
236
-
237
- try:
238
- # create a new subscription id, place a mutex on the counter
239
- async with self.subscription_counter_lock:
240
- self.subscription_counter += 1
241
- subscription_id = self.subscription_counter
242
-
243
- self.subscriptions_to_task_id_signal_key[subscription_id] = (
244
- task_id,
245
- signal_key,
246
- )
247
-
248
- if (task_id, signal_key) not in self.task_id_signal_key_to_subscriptions:
249
- self.task_id_signal_key_to_subscriptions[(task_id, signal_key)] = [
250
- subscription_id
251
- ]
252
- else:
253
- self.task_id_signal_key_to_subscriptions[(task_id, signal_key)].append(
254
- subscription_id
255
- )
256
-
257
- self.events[subscription_id] = _Subscription(
258
- subscription_id, task_id, signal_key
259
- )
260
-
261
- await self.requests.put(
262
- ListenForDurableEventRequest(
263
- task_id=task_id,
264
- signal_key=signal_key,
265
- )
266
- )
267
-
268
- if not self.listener_task or self.listener_task.done():
269
- self.listener_task = asyncio.create_task(self._init_producer())
270
-
271
- return await self.events[subscription_id].get()
272
- except asyncio.CancelledError:
273
- raise
274
- finally:
275
- if subscription_id:
276
- self.cleanup_subscription(subscription_id)
277
-
278
- async def _retry_subscribe(
279
- self,
280
- client: V1DispatcherStub,
281
- ) -> grpc.aio.UnaryStreamCall[ListenForDurableEventRequest, DurableEvent]:
282
- retries = 0
283
-
284
- while retries < DEFAULT_DURABLE_EVENT_LISTENER_RETRY_COUNT:
285
- try:
286
- if retries > 0:
287
- await asyncio.sleep(DEFAULT_DURABLE_EVENT_LISTENER_RETRY_INTERVAL)
288
-
289
- # signal previous async iterator to stop
290
- if self.curr_requester != 0:
291
- self.requests.put_nowait(self.curr_requester)
292
-
293
- return cast(
294
- grpc.aio.UnaryStreamCall[
295
- ListenForDurableEventRequest, DurableEvent
296
- ],
297
- client.ListenForDurableEvent(
298
- self._request(), # type: ignore[arg-type]
299
- metadata=get_metadata(self.token),
300
- ),
301
- )
302
- except grpc.RpcError as e:
303
- if e.code() == grpc.StatusCode.UNAVAILABLE:
304
- retries = retries + 1
305
- else:
306
- raise ValueError(f"gRPC error: {e}")
307
-
308
- raise ValueError("Failed to connect to durable event listener")
309
-
310
- @tenacity_retry
311
- def register_durable_event(
312
- self, request: RegisterDurableEventRequest
313
- ) -> Literal[True]:
314
- conn = new_conn(self.config, True)
315
- client = V1DispatcherStub(conn)
316
-
317
- client.RegisterDurableEvent(
318
- request.to_proto(),
319
- timeout=5,
320
- metadata=get_metadata(self.token),
321
- )
322
-
323
- return True
324
-
325
- @tenacity_retry
326
- async def result(self, task_id: str, signal_key: str) -> dict[str, Any]:
327
- event = await self.subscribe(task_id, signal_key)
328
-
329
- return cast(dict[str, Any], json.loads(event.data.decode("utf-8")))
@@ -1,288 +0,0 @@
1
- import asyncio
2
- import json
3
- from collections.abc import AsyncIterator
4
- from typing import Any, cast
5
-
6
- import grpc
7
- import grpc.aio
8
- from grpc._cython import cygrpc # type: ignore[attr-defined]
9
-
10
- from hatchet_sdk.clients.event_ts import ThreadSafeEvent, read_with_interrupt
11
- from hatchet_sdk.config import ClientConfig
12
- from hatchet_sdk.connection import new_conn
13
- from hatchet_sdk.contracts.dispatcher_pb2 import (
14
- SubscribeToWorkflowRunsRequest,
15
- WorkflowRunEvent,
16
- )
17
- from hatchet_sdk.contracts.dispatcher_pb2_grpc import DispatcherStub
18
- from hatchet_sdk.logger import logger
19
- from hatchet_sdk.metadata import get_metadata
20
-
21
- DEFAULT_WORKFLOW_LISTENER_RETRY_INTERVAL = 3 # seconds
22
- DEFAULT_WORKFLOW_LISTENER_RETRY_COUNT = 5
23
- DEFAULT_WORKFLOW_LISTENER_INTERRUPT_INTERVAL = 1800 # 30 minutes
24
-
25
- DEDUPE_MESSAGE = "DUPLICATE_WORKFLOW_RUN"
26
-
27
-
28
- class _Subscription:
29
- def __init__(self, id: int, workflow_run_id: str):
30
- self.id = id
31
- self.workflow_run_id = workflow_run_id
32
- self.queue: asyncio.Queue[WorkflowRunEvent | None] = asyncio.Queue()
33
-
34
- async def __aiter__(self) -> "_Subscription":
35
- return self
36
-
37
- async def __anext__(self) -> WorkflowRunEvent | None:
38
- return await self.queue.get()
39
-
40
- async def get(self) -> WorkflowRunEvent:
41
- event = await self.queue.get()
42
-
43
- if event is None:
44
- raise StopAsyncIteration
45
-
46
- return event
47
-
48
- async def put(self, item: WorkflowRunEvent) -> None:
49
- await self.queue.put(item)
50
-
51
- async def close(self) -> None:
52
- await self.queue.put(None)
53
-
54
-
55
- class PooledWorkflowRunListener:
56
- def __init__(self, config: ClientConfig):
57
- self.token = config.token
58
- self.config = config
59
-
60
- # list of all active subscriptions, mapping from a subscription id to a workflow run id
61
- self.subscriptions_to_workflows: dict[int, str] = {}
62
-
63
- # list of workflow run ids mapped to an array of subscription ids
64
- self.workflows_to_subscriptions: dict[str, list[int]] = {}
65
-
66
- self.subscription_counter: int = 0
67
- self.subscription_counter_lock: asyncio.Lock = asyncio.Lock()
68
-
69
- self.requests: asyncio.Queue[SubscribeToWorkflowRunsRequest | int] = (
70
- asyncio.Queue()
71
- )
72
-
73
- self.listener: (
74
- grpc.aio.UnaryStreamCall[SubscribeToWorkflowRunsRequest, WorkflowRunEvent]
75
- | None
76
- ) = None
77
- self.listener_task: asyncio.Task[None] | None = None
78
-
79
- self.curr_requester: int = 0
80
-
81
- # events have keys of the format workflow_run_id + subscription_id
82
- self.events: dict[int, _Subscription] = {}
83
-
84
- self.interrupter: asyncio.Task[None] | None = None
85
-
86
- ## IMPORTANT: This needs to be created lazily so we don't require
87
- ## an event loop to instantiate the client.
88
- self.client: DispatcherStub | None = None
89
-
90
- async def _interrupter(self) -> None:
91
- """
92
- _interrupter runs in a separate thread and interrupts the listener according to a configurable duration.
93
- """
94
- await asyncio.sleep(DEFAULT_WORKFLOW_LISTENER_INTERRUPT_INTERVAL)
95
-
96
- if self.interrupt is not None:
97
- self.interrupt.set()
98
-
99
- async def _init_producer(self) -> None:
100
- try:
101
- if not self.listener:
102
- while True:
103
- try:
104
- self.listener = await self._retry_subscribe()
105
-
106
- logger.debug("Workflow run listener connected.")
107
-
108
- # spawn an interrupter task
109
- if self.interrupter is not None and not self.interrupter.done():
110
- self.interrupter.cancel()
111
-
112
- self.interrupter = asyncio.create_task(self._interrupter())
113
-
114
- while True:
115
- self.interrupt = ThreadSafeEvent()
116
- if self.listener is None:
117
- continue
118
-
119
- t = asyncio.create_task(
120
- read_with_interrupt(self.listener, self.interrupt)
121
- )
122
- await self.interrupt.wait()
123
-
124
- if not t.done():
125
- # print a warning
126
- logger.warning(
127
- "Interrupted read_with_interrupt task of workflow run listener"
128
- )
129
-
130
- t.cancel()
131
- self.listener.cancel()
132
-
133
- await asyncio.sleep(
134
- DEFAULT_WORKFLOW_LISTENER_RETRY_INTERVAL
135
- )
136
- break
137
-
138
- workflow_event: WorkflowRunEvent = t.result()
139
-
140
- if workflow_event is cygrpc.EOF:
141
- break
142
-
143
- # get a list of subscriptions for this workflow
144
- subscriptions = self.workflows_to_subscriptions.get(
145
- workflow_event.workflowRunId, []
146
- )
147
-
148
- for subscription_id in subscriptions:
149
- await self.events[subscription_id].put(workflow_event)
150
-
151
- except grpc.RpcError as e:
152
- logger.debug(f"grpc error in workflow run listener: {e}")
153
- await asyncio.sleep(DEFAULT_WORKFLOW_LISTENER_RETRY_INTERVAL)
154
- continue
155
-
156
- except Exception as e:
157
- logger.error(f"Error in workflow run listener: {e}")
158
-
159
- self.listener = None
160
-
161
- # close all subscriptions
162
- for subscription_id in self.events:
163
- await self.events[subscription_id].close()
164
-
165
- raise e
166
-
167
- async def _request(self) -> AsyncIterator[SubscribeToWorkflowRunsRequest]:
168
- self.curr_requester = self.curr_requester + 1
169
-
170
- # replay all existing subscriptions
171
- workflow_run_set = set(self.subscriptions_to_workflows.values())
172
-
173
- for workflow_run_id in workflow_run_set:
174
- yield SubscribeToWorkflowRunsRequest(
175
- workflowRunId=workflow_run_id,
176
- )
177
-
178
- while True:
179
- request = await self.requests.get()
180
-
181
- # if the request is an int which matches the current requester, then we should stop
182
- if request == self.curr_requester:
183
- break
184
-
185
- # if we've gotten an int that doesn't match the current requester, then we should ignore it
186
- if isinstance(request, int):
187
- continue
188
-
189
- yield request
190
- self.requests.task_done()
191
-
192
- def cleanup_subscription(self, subscription_id: int) -> None:
193
- workflow_run_id = self.subscriptions_to_workflows[subscription_id]
194
-
195
- if workflow_run_id in self.workflows_to_subscriptions:
196
- self.workflows_to_subscriptions[workflow_run_id].remove(subscription_id)
197
-
198
- del self.subscriptions_to_workflows[subscription_id]
199
- del self.events[subscription_id]
200
-
201
- async def subscribe(self, workflow_run_id: str) -> WorkflowRunEvent:
202
- subscription_id: int | None = None
203
-
204
- try:
205
- # create a new subscription id, place a mutex on the counter
206
- await self.subscription_counter_lock.acquire()
207
- self.subscription_counter += 1
208
- subscription_id = self.subscription_counter
209
- self.subscription_counter_lock.release()
210
-
211
- self.subscriptions_to_workflows[subscription_id] = workflow_run_id
212
-
213
- if workflow_run_id not in self.workflows_to_subscriptions:
214
- self.workflows_to_subscriptions[workflow_run_id] = [subscription_id]
215
- else:
216
- self.workflows_to_subscriptions[workflow_run_id].append(subscription_id)
217
-
218
- self.events[subscription_id] = _Subscription(
219
- subscription_id, workflow_run_id
220
- )
221
-
222
- await self.requests.put(
223
- SubscribeToWorkflowRunsRequest(
224
- workflowRunId=workflow_run_id,
225
- )
226
- )
227
-
228
- if not self.listener_task or self.listener_task.done():
229
- self.listener_task = asyncio.create_task(self._init_producer())
230
-
231
- return await self.events[subscription_id].get()
232
- except asyncio.CancelledError:
233
- raise
234
- finally:
235
- if subscription_id:
236
- self.cleanup_subscription(subscription_id)
237
-
238
- async def aio_result(self, workflow_run_id: str) -> dict[str, Any]:
239
- from hatchet_sdk.clients.admin import DedupeViolationErr
240
-
241
- event = await self.subscribe(workflow_run_id)
242
- errors = [result.error for result in event.results if result.error]
243
-
244
- if errors:
245
- if DEDUPE_MESSAGE in errors[0]:
246
- raise DedupeViolationErr(errors[0])
247
- else:
248
- raise Exception(f"Workflow Errors: {errors}")
249
-
250
- return {
251
- result.stepReadableId: json.loads(result.output)
252
- for result in event.results
253
- if result.output
254
- }
255
-
256
- async def _retry_subscribe(
257
- self,
258
- ) -> grpc.aio.UnaryStreamCall[SubscribeToWorkflowRunsRequest, WorkflowRunEvent]:
259
- retries = 0
260
- if self.client is None:
261
- conn = new_conn(self.config, True)
262
- self.client = DispatcherStub(conn)
263
-
264
- while retries < DEFAULT_WORKFLOW_LISTENER_RETRY_COUNT:
265
- try:
266
- if retries > 0:
267
- await asyncio.sleep(DEFAULT_WORKFLOW_LISTENER_RETRY_INTERVAL)
268
-
269
- # signal previous async iterator to stop
270
- if self.curr_requester != 0:
271
- self.requests.put_nowait(self.curr_requester)
272
-
273
- return cast(
274
- grpc.aio.UnaryStreamCall[
275
- SubscribeToWorkflowRunsRequest, WorkflowRunEvent
276
- ],
277
- self.client.SubscribeToWorkflowRuns(
278
- self._request(), # type: ignore[arg-type]
279
- metadata=get_metadata(self.token),
280
- ),
281
- )
282
- except grpc.RpcError as e:
283
- if e.code() == grpc.StatusCode.UNAVAILABLE:
284
- retries = retries + 1
285
- else:
286
- raise ValueError(f"gRPC error: {e}")
287
-
288
- raise ValueError("Failed to connect to workflow run listener")
hatchet_sdk/utils/aio.py DELETED
@@ -1,43 +0,0 @@
1
- import asyncio
2
- from concurrent.futures import ThreadPoolExecutor
3
- from typing import Callable, Coroutine, ParamSpec, TypeVar
4
-
5
- P = ParamSpec("P")
6
- R = TypeVar("R")
7
- Y = TypeVar("Y")
8
- S = TypeVar("S")
9
-
10
-
11
- def _run_async_function_do_not_use_directly(
12
- async_func: Callable[P, Coroutine[Y, S, R]],
13
- *args: P.args,
14
- **kwargs: P.kwargs,
15
- ) -> R:
16
- loop = asyncio.new_event_loop()
17
- asyncio.set_event_loop(loop)
18
- try:
19
- return loop.run_until_complete(async_func(*args, **kwargs))
20
- finally:
21
- loop.close()
22
-
23
-
24
- def run_async_from_sync(
25
- async_func: Callable[P, Coroutine[Y, S, R]],
26
- *args: P.args,
27
- **kwargs: P.kwargs,
28
- ) -> R:
29
- try:
30
- loop = asyncio.get_event_loop()
31
- except RuntimeError:
32
- loop = None
33
-
34
- if loop and loop.is_running():
35
- return loop.run_until_complete(async_func(*args, **kwargs))
36
- else:
37
- with ThreadPoolExecutor() as executor:
38
- future = executor.submit(
39
- lambda: _run_async_function_do_not_use_directly(
40
- async_func, *args, **kwargs
41
- )
42
- )
43
- return future.result()