hatchet-sdk 1.0.0__py3-none-any.whl → 1.0.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hatchet-sdk might be problematic. Click here for more details.
- hatchet_sdk/__init__.py +27 -16
- hatchet_sdk/client.py +13 -63
- hatchet_sdk/clients/admin.py +203 -124
- hatchet_sdk/clients/dispatcher/action_listener.py +42 -42
- hatchet_sdk/clients/dispatcher/dispatcher.py +18 -16
- hatchet_sdk/clients/durable_event_listener.py +327 -0
- hatchet_sdk/clients/rest/__init__.py +12 -1
- hatchet_sdk/clients/rest/api/log_api.py +258 -0
- hatchet_sdk/clients/rest/api/task_api.py +32 -6
- hatchet_sdk/clients/rest/api/workflow_runs_api.py +626 -0
- hatchet_sdk/clients/rest/models/__init__.py +12 -1
- hatchet_sdk/clients/rest/models/v1_log_line.py +94 -0
- hatchet_sdk/clients/rest/models/v1_log_line_level.py +39 -0
- hatchet_sdk/clients/rest/models/v1_log_line_list.py +110 -0
- hatchet_sdk/clients/rest/models/v1_task_summary.py +80 -64
- hatchet_sdk/clients/rest/models/v1_trigger_workflow_run_request.py +95 -0
- hatchet_sdk/clients/rest/models/v1_workflow_run_display_name.py +98 -0
- hatchet_sdk/clients/rest/models/v1_workflow_run_display_name_list.py +114 -0
- hatchet_sdk/clients/rest/models/workflow_run_shape_item_for_workflow_run_details.py +9 -4
- hatchet_sdk/clients/rest_client.py +21 -0
- hatchet_sdk/clients/run_event_listener.py +0 -1
- hatchet_sdk/context/context.py +85 -147
- hatchet_sdk/contracts/dispatcher_pb2_grpc.py +1 -1
- hatchet_sdk/contracts/events_pb2.py +2 -2
- hatchet_sdk/contracts/events_pb2_grpc.py +1 -1
- hatchet_sdk/contracts/v1/dispatcher_pb2.py +36 -0
- hatchet_sdk/contracts/v1/dispatcher_pb2.pyi +38 -0
- hatchet_sdk/contracts/v1/dispatcher_pb2_grpc.py +145 -0
- hatchet_sdk/contracts/v1/shared/condition_pb2.py +39 -0
- hatchet_sdk/contracts/v1/shared/condition_pb2.pyi +72 -0
- hatchet_sdk/contracts/v1/shared/condition_pb2_grpc.py +29 -0
- hatchet_sdk/contracts/v1/workflows_pb2.py +67 -0
- hatchet_sdk/contracts/v1/workflows_pb2.pyi +228 -0
- hatchet_sdk/contracts/v1/workflows_pb2_grpc.py +234 -0
- hatchet_sdk/contracts/workflows_pb2_grpc.py +1 -1
- hatchet_sdk/features/cron.py +3 -3
- hatchet_sdk/features/scheduled.py +2 -2
- hatchet_sdk/hatchet.py +427 -151
- hatchet_sdk/opentelemetry/instrumentor.py +8 -13
- hatchet_sdk/rate_limit.py +33 -39
- hatchet_sdk/runnables/contextvars.py +12 -0
- hatchet_sdk/runnables/standalone.py +194 -0
- hatchet_sdk/runnables/task.py +144 -0
- hatchet_sdk/runnables/types.py +138 -0
- hatchet_sdk/runnables/workflow.py +764 -0
- hatchet_sdk/utils/aio_utils.py +0 -79
- hatchet_sdk/utils/proto_enums.py +0 -7
- hatchet_sdk/utils/timedelta_to_expression.py +23 -0
- hatchet_sdk/utils/typing.py +2 -2
- hatchet_sdk/v0/clients/rest_client.py +9 -0
- hatchet_sdk/v0/worker/action_listener_process.py +18 -2
- hatchet_sdk/waits.py +120 -0
- hatchet_sdk/worker/action_listener_process.py +64 -30
- hatchet_sdk/worker/runner/run_loop_manager.py +35 -25
- hatchet_sdk/worker/runner/runner.py +72 -49
- hatchet_sdk/worker/runner/utils/capture_logs.py +3 -11
- hatchet_sdk/worker/worker.py +155 -118
- hatchet_sdk/workflow_run.py +4 -5
- {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.0a1.dist-info}/METADATA +1 -2
- {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.0a1.dist-info}/RECORD +62 -42
- {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.0a1.dist-info}/entry_points.txt +2 -0
- hatchet_sdk/semver.py +0 -30
- hatchet_sdk/worker/runner/utils/error_with_traceback.py +0 -6
- hatchet_sdk/workflow.py +0 -527
- {hatchet_sdk-1.0.0.dist-info → hatchet_sdk-1.0.0a1.dist-info}/WHEEL +0 -0
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import json
|
|
3
3
|
import time
|
|
4
|
-
from dataclasses import
|
|
4
|
+
from dataclasses import field
|
|
5
5
|
from enum import Enum
|
|
6
|
-
from typing import Any, AsyncGenerator,
|
|
6
|
+
from typing import Any, AsyncGenerator, cast
|
|
7
7
|
|
|
8
8
|
import grpc
|
|
9
9
|
import grpc.aio
|
|
10
10
|
from grpc._cython import cygrpc # type: ignore[attr-defined]
|
|
11
|
-
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
|
11
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
|
12
12
|
|
|
13
13
|
from hatchet_sdk.clients.event_ts import ThreadSafeEvent, read_with_interrupt
|
|
14
14
|
from hatchet_sdk.clients.events import proto_timestamp_now
|
|
@@ -36,25 +36,29 @@ DEFAULT_ACTION_TIMEOUT = 600 # seconds
|
|
|
36
36
|
DEFAULT_ACTION_LISTENER_RETRY_COUNT = 15
|
|
37
37
|
|
|
38
38
|
|
|
39
|
-
|
|
40
|
-
|
|
39
|
+
class GetActionListenerRequest(BaseModel):
|
|
40
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
41
|
+
|
|
41
42
|
worker_name: str
|
|
42
43
|
services: list[str]
|
|
43
44
|
actions: list[str]
|
|
44
|
-
|
|
45
|
-
|
|
45
|
+
slots: int = 100
|
|
46
|
+
raw_labels: dict[str, str | int] = Field(default_factory=dict)
|
|
46
47
|
|
|
47
|
-
labels: dict[str, WorkerLabels] =
|
|
48
|
+
labels: dict[str, WorkerLabels] = Field(default_factory=dict)
|
|
48
49
|
|
|
49
|
-
|
|
50
|
+
@model_validator(mode="after")
|
|
51
|
+
def validate_labels(self) -> "GetActionListenerRequest":
|
|
50
52
|
self.labels = {}
|
|
51
53
|
|
|
52
|
-
for key, value in self.
|
|
54
|
+
for key, value in self.raw_labels.items():
|
|
53
55
|
if isinstance(value, int):
|
|
54
56
|
self.labels[key] = WorkerLabels(intValue=value)
|
|
55
57
|
else:
|
|
56
58
|
self.labels[key] = WorkerLabels(strValue=str(value))
|
|
57
59
|
|
|
60
|
+
return self
|
|
61
|
+
|
|
58
62
|
|
|
59
63
|
class ActionPayload(BaseModel):
|
|
60
64
|
model_config = ConfigDict(extra="allow")
|
|
@@ -65,6 +69,7 @@ class ActionPayload(BaseModel):
|
|
|
65
69
|
user_data: JSONSerializableMapping = Field(default_factory=dict)
|
|
66
70
|
step_run_errors: dict[str, str] = Field(default_factory=dict)
|
|
67
71
|
triggered_by: str | None = None
|
|
72
|
+
triggers: JSONSerializableMapping = Field(default_factory=dict)
|
|
68
73
|
|
|
69
74
|
@field_validator(
|
|
70
75
|
"input", "parents", "overrides", "user_data", "step_run_errors", mode="before"
|
|
@@ -142,30 +147,24 @@ def parse_additional_metadata(additional_metadata: str) -> JSONSerializableMappi
|
|
|
142
147
|
return {}
|
|
143
148
|
|
|
144
149
|
|
|
145
|
-
@dataclass
|
|
146
150
|
class ActionListener:
|
|
147
|
-
config: ClientConfig
|
|
148
|
-
|
|
151
|
+
def __init__(self, config: ClientConfig, worker_id: str) -> None:
|
|
152
|
+
self.config = config
|
|
153
|
+
self.worker_id = worker_id
|
|
149
154
|
|
|
150
|
-
client: DispatcherStub = field(init=False)
|
|
151
|
-
aio_client: DispatcherStub = field(init=False)
|
|
152
|
-
token: str = field(init=False)
|
|
153
|
-
retries: int = field(default=0, init=False)
|
|
154
|
-
last_connection_attempt: float = field(default=0, init=False)
|
|
155
|
-
last_heartbeat_succeeded: bool = field(default=True, init=False)
|
|
156
|
-
time_last_hb_succeeded: float = field(default=9999999999999, init=False)
|
|
157
|
-
heartbeat_task: Optional[asyncio.Task[None]] = field(default=None, init=False)
|
|
158
|
-
run_heartbeat: bool = field(default=True, init=False)
|
|
159
|
-
listen_strategy: str = field(default="v2", init=False)
|
|
160
|
-
stop_signal: bool = field(default=False, init=False)
|
|
161
|
-
|
|
162
|
-
missed_heartbeats: int = field(default=0, init=False)
|
|
163
|
-
|
|
164
|
-
def __post_init__(self) -> None:
|
|
165
|
-
self.client = DispatcherStub(new_conn(self.config, False)) # type: ignore[no-untyped-call]
|
|
166
155
|
self.aio_client = DispatcherStub(new_conn(self.config, True)) # type: ignore[no-untyped-call]
|
|
167
156
|
self.token = self.config.token
|
|
168
157
|
|
|
158
|
+
self.retries = 0
|
|
159
|
+
self.last_heartbeat_succeeded = True
|
|
160
|
+
self.time_last_hb_succeeded = 9999999999999.0
|
|
161
|
+
self.last_connection_attempt = 0.0
|
|
162
|
+
self.heartbeat_task: asyncio.Task[None] | None = None
|
|
163
|
+
self.run_heartbeat = True
|
|
164
|
+
self.listen_strategy = "v2"
|
|
165
|
+
self.stop_signal = False
|
|
166
|
+
self.missed_heartbeats = 0
|
|
167
|
+
|
|
169
168
|
def is_healthy(self) -> bool:
|
|
170
169
|
return self.last_heartbeat_succeeded
|
|
171
170
|
|
|
@@ -292,11 +291,16 @@ class ActionListener:
|
|
|
292
291
|
|
|
293
292
|
self.retries = 0
|
|
294
293
|
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
294
|
+
try:
|
|
295
|
+
action_payload = (
|
|
296
|
+
ActionPayload()
|
|
297
|
+
if not assigned_action.actionPayload
|
|
298
|
+
else ActionPayload.model_validate_json(
|
|
299
|
+
assigned_action.actionPayload
|
|
300
|
+
)
|
|
301
|
+
)
|
|
302
|
+
except (ValueError, json.JSONDecodeError) as e:
|
|
303
|
+
raise ValueError(f"Error decoding payload: {e}")
|
|
300
304
|
|
|
301
305
|
action = Action(
|
|
302
306
|
tenant_id=assigned_action.tenantId,
|
|
@@ -309,7 +313,7 @@ class ActionListener:
|
|
|
309
313
|
step_id=assigned_action.stepId,
|
|
310
314
|
step_run_id=assigned_action.stepRunId,
|
|
311
315
|
action_id=assigned_action.actionId,
|
|
312
|
-
action_payload=
|
|
316
|
+
action_payload=action_payload,
|
|
313
317
|
action_type=convert_proto_enum_to_python(
|
|
314
318
|
assigned_action.actionType,
|
|
315
319
|
ActionType,
|
|
@@ -352,16 +356,10 @@ class ActionListener:
|
|
|
352
356
|
|
|
353
357
|
self.retries = self.retries + 1
|
|
354
358
|
|
|
355
|
-
def parse_action_payload(self, payload: str) -> JSONSerializableMapping:
|
|
356
|
-
try:
|
|
357
|
-
return cast(JSONSerializableMapping, json.loads(payload))
|
|
358
|
-
except json.JSONDecodeError as e:
|
|
359
|
-
raise ValueError(f"Error decoding payload: {e}")
|
|
360
|
-
|
|
361
359
|
async def get_listen_client(
|
|
362
360
|
self,
|
|
363
361
|
) -> grpc.aio.UnaryStreamCall[WorkerListenRequest, AssignedAction]:
|
|
364
|
-
current_time =
|
|
362
|
+
current_time = time.time()
|
|
365
363
|
|
|
366
364
|
if (
|
|
367
365
|
current_time - self.last_connection_attempt
|
|
@@ -438,8 +436,10 @@ class ActionListener:
|
|
|
438
436
|
timeout=5,
|
|
439
437
|
metadata=get_metadata(self.token),
|
|
440
438
|
)
|
|
439
|
+
|
|
441
440
|
if self.interrupt is not None:
|
|
442
441
|
self.interrupt.set()
|
|
442
|
+
|
|
443
443
|
return cast(WorkerUnsubscribeRequest, req)
|
|
444
444
|
except grpc.RpcError as e:
|
|
445
445
|
raise Exception(f"Failed to unsubscribe: {e}")
|
|
@@ -55,17 +55,19 @@ class DispatcherClient:
|
|
|
55
55
|
for key, value in preset_labels.items():
|
|
56
56
|
req.labels[key] = WorkerLabels(strValue=str(value))
|
|
57
57
|
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
58
|
+
response = cast(
|
|
59
|
+
WorkerRegisterResponse,
|
|
60
|
+
await self.aio_client.Register(
|
|
61
|
+
WorkerRegisterRequest(
|
|
62
|
+
workerName=req.worker_name,
|
|
63
|
+
actions=req.actions,
|
|
64
|
+
services=req.services,
|
|
65
|
+
maxRuns=req.slots,
|
|
66
|
+
labels=req.labels,
|
|
67
|
+
),
|
|
68
|
+
timeout=DEFAULT_REGISTER_TIMEOUT,
|
|
69
|
+
metadata=get_metadata(self.token),
|
|
66
70
|
),
|
|
67
|
-
timeout=DEFAULT_REGISTER_TIMEOUT,
|
|
68
|
-
metadata=get_metadata(self.token),
|
|
69
71
|
)
|
|
70
72
|
|
|
71
73
|
return ActionListener(self.config, response.workerId)
|
|
@@ -93,8 +95,8 @@ class DispatcherClient:
|
|
|
93
95
|
async def _try_send_step_action_event(
|
|
94
96
|
self, action: Action, event_type: StepActionEventType, payload: str
|
|
95
97
|
) -> grpc.aio.UnaryUnaryCall[StepActionEvent, ActionEventResponse]:
|
|
96
|
-
|
|
97
|
-
|
|
98
|
+
event_timestamp = Timestamp()
|
|
99
|
+
event_timestamp.GetCurrentTime()
|
|
98
100
|
|
|
99
101
|
event = StepActionEvent(
|
|
100
102
|
workerId=action.worker_id,
|
|
@@ -103,7 +105,7 @@ class DispatcherClient:
|
|
|
103
105
|
stepId=action.step_id,
|
|
104
106
|
stepRunId=action.step_run_id,
|
|
105
107
|
actionId=action.action_id,
|
|
106
|
-
eventTimestamp=
|
|
108
|
+
eventTimestamp=event_timestamp,
|
|
107
109
|
eventType=event_type,
|
|
108
110
|
eventPayload=payload,
|
|
109
111
|
retryCount=action.retry_count,
|
|
@@ -120,15 +122,15 @@ class DispatcherClient:
|
|
|
120
122
|
async def send_group_key_action_event(
|
|
121
123
|
self, action: Action, event_type: GroupKeyActionEventType, payload: str
|
|
122
124
|
) -> grpc.aio.UnaryUnaryCall[GroupKeyActionEvent, ActionEventResponse]:
|
|
123
|
-
|
|
124
|
-
|
|
125
|
+
event_timestamp = Timestamp()
|
|
126
|
+
event_timestamp.GetCurrentTime()
|
|
125
127
|
|
|
126
128
|
event = GroupKeyActionEvent(
|
|
127
129
|
workerId=action.worker_id,
|
|
128
130
|
workflowRunId=action.workflow_run_id,
|
|
129
131
|
getGroupKeyRunId=action.get_group_key_run_id,
|
|
130
132
|
actionId=action.action_id,
|
|
131
|
-
eventTimestamp=
|
|
133
|
+
eventTimestamp=event_timestamp,
|
|
132
134
|
eventType=event_type,
|
|
133
135
|
eventPayload=payload,
|
|
134
136
|
)
|
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
from collections.abc import AsyncIterator
|
|
4
|
+
from typing import Any, Literal, cast
|
|
5
|
+
|
|
6
|
+
import grpc
|
|
7
|
+
import grpc.aio
|
|
8
|
+
from grpc._cython import cygrpc # type: ignore[attr-defined]
|
|
9
|
+
from pydantic import BaseModel, ConfigDict
|
|
10
|
+
|
|
11
|
+
from hatchet_sdk.clients.event_ts import ThreadSafeEvent, read_with_interrupt
|
|
12
|
+
from hatchet_sdk.clients.rest.tenacity_utils import tenacity_retry
|
|
13
|
+
from hatchet_sdk.config import ClientConfig
|
|
14
|
+
from hatchet_sdk.connection import new_conn
|
|
15
|
+
from hatchet_sdk.contracts.v1.dispatcher_pb2 import (
|
|
16
|
+
DurableEvent,
|
|
17
|
+
ListenForDurableEventRequest,
|
|
18
|
+
)
|
|
19
|
+
from hatchet_sdk.contracts.v1.dispatcher_pb2 import (
|
|
20
|
+
RegisterDurableEventRequest as RegisterDurableEventRequestProto,
|
|
21
|
+
)
|
|
22
|
+
from hatchet_sdk.contracts.v1.dispatcher_pb2_grpc import V1DispatcherStub
|
|
23
|
+
from hatchet_sdk.contracts.v1.shared.condition_pb2 import DurableEventListenerConditions
|
|
24
|
+
from hatchet_sdk.logger import logger
|
|
25
|
+
from hatchet_sdk.metadata import get_metadata
|
|
26
|
+
from hatchet_sdk.waits import SleepCondition, UserEventCondition
|
|
27
|
+
|
|
28
|
+
DEFAULT_DURABLE_EVENT_LISTENER_RETRY_INTERVAL = 3 # seconds
|
|
29
|
+
DEFAULT_DURABLE_EVENT_LISTENER_RETRY_COUNT = 5
|
|
30
|
+
DEFAULT_DURABLE_EVENT_LISTENER_INTERRUPT_INTERVAL = 1800 # 30 minutes
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class _Subscription:
|
|
34
|
+
def __init__(self, id: int, task_id: str, signal_key: str):
|
|
35
|
+
self.id = id
|
|
36
|
+
self.task_id = task_id
|
|
37
|
+
self.signal_key = signal_key
|
|
38
|
+
self.queue: asyncio.Queue[DurableEvent | None] = asyncio.Queue()
|
|
39
|
+
|
|
40
|
+
async def __aiter__(self) -> "_Subscription":
|
|
41
|
+
return self
|
|
42
|
+
|
|
43
|
+
async def __anext__(self) -> DurableEvent | None:
|
|
44
|
+
return await self.queue.get()
|
|
45
|
+
|
|
46
|
+
async def get(self) -> DurableEvent:
|
|
47
|
+
event = await self.queue.get()
|
|
48
|
+
|
|
49
|
+
if event is None:
|
|
50
|
+
raise StopAsyncIteration
|
|
51
|
+
|
|
52
|
+
return event
|
|
53
|
+
|
|
54
|
+
async def put(self, item: DurableEvent) -> None:
|
|
55
|
+
await self.queue.put(item)
|
|
56
|
+
|
|
57
|
+
async def close(self) -> None:
|
|
58
|
+
await self.queue.put(None)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class RegisterDurableEventRequest(BaseModel):
|
|
62
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
63
|
+
|
|
64
|
+
task_id: str
|
|
65
|
+
signal_key: str
|
|
66
|
+
conditions: list[SleepCondition | UserEventCondition]
|
|
67
|
+
|
|
68
|
+
def to_proto(self) -> RegisterDurableEventRequestProto:
|
|
69
|
+
return RegisterDurableEventRequestProto(
|
|
70
|
+
task_id=self.task_id,
|
|
71
|
+
signal_key=self.signal_key,
|
|
72
|
+
conditions=DurableEventListenerConditions(
|
|
73
|
+
sleep_conditions=[
|
|
74
|
+
c.to_pb() for c in self.conditions if isinstance(c, SleepCondition)
|
|
75
|
+
],
|
|
76
|
+
user_event_conditions=[
|
|
77
|
+
c.to_pb()
|
|
78
|
+
for c in self.conditions
|
|
79
|
+
if isinstance(c, UserEventCondition)
|
|
80
|
+
],
|
|
81
|
+
),
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class DurableEventListener:
|
|
86
|
+
def __init__(self, config: ClientConfig):
|
|
87
|
+
try:
|
|
88
|
+
asyncio.get_running_loop()
|
|
89
|
+
except RuntimeError:
|
|
90
|
+
loop = asyncio.new_event_loop()
|
|
91
|
+
asyncio.set_event_loop(loop)
|
|
92
|
+
|
|
93
|
+
conn = new_conn(config, True)
|
|
94
|
+
self.client = V1DispatcherStub(conn) # type: ignore[no-untyped-call]
|
|
95
|
+
self.token = config.token
|
|
96
|
+
self.config = config
|
|
97
|
+
|
|
98
|
+
# list of all active subscriptions, mapping from a subscription id to a task id and signal key
|
|
99
|
+
self.subscriptions_to_task_id_signal_key: dict[int, tuple[str, str]] = {}
|
|
100
|
+
|
|
101
|
+
# task id-signal key tuples mapped to an array of subscription ids
|
|
102
|
+
self.task_id_signal_key_to_subscriptions: dict[tuple[str, str], list[int]] = {}
|
|
103
|
+
|
|
104
|
+
self.subscription_counter: int = 0
|
|
105
|
+
self.subscription_counter_lock: asyncio.Lock = asyncio.Lock()
|
|
106
|
+
|
|
107
|
+
self.requests: asyncio.Queue[ListenForDurableEventRequest | int] = (
|
|
108
|
+
asyncio.Queue()
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
self.listener: (
|
|
112
|
+
grpc.aio.UnaryStreamCall[ListenForDurableEventRequest, DurableEvent] | None
|
|
113
|
+
) = None
|
|
114
|
+
self.listener_task: asyncio.Task[None] | None = None
|
|
115
|
+
|
|
116
|
+
self.curr_requester: int = 0
|
|
117
|
+
|
|
118
|
+
self.events: dict[int, _Subscription] = {}
|
|
119
|
+
|
|
120
|
+
self.interrupter: asyncio.Task[None] | None = None
|
|
121
|
+
|
|
122
|
+
async def _interrupter(self) -> None:
|
|
123
|
+
"""
|
|
124
|
+
_interrupter runs in a separate thread and interrupts the listener according to a configurable duration.
|
|
125
|
+
"""
|
|
126
|
+
await asyncio.sleep(DEFAULT_DURABLE_EVENT_LISTENER_INTERRUPT_INTERVAL)
|
|
127
|
+
|
|
128
|
+
if self.interrupt is not None:
|
|
129
|
+
self.interrupt.set()
|
|
130
|
+
|
|
131
|
+
async def _init_producer(self) -> None:
|
|
132
|
+
try:
|
|
133
|
+
if not self.listener:
|
|
134
|
+
while True:
|
|
135
|
+
try:
|
|
136
|
+
self.listener = await self._retry_subscribe()
|
|
137
|
+
|
|
138
|
+
logger.debug("Workflow run listener connected.")
|
|
139
|
+
|
|
140
|
+
# spawn an interrupter task
|
|
141
|
+
if self.interrupter is not None and not self.interrupter.done():
|
|
142
|
+
self.interrupter.cancel()
|
|
143
|
+
|
|
144
|
+
self.interrupter = asyncio.create_task(self._interrupter())
|
|
145
|
+
|
|
146
|
+
while True:
|
|
147
|
+
self.interrupt = ThreadSafeEvent()
|
|
148
|
+
if self.listener is None:
|
|
149
|
+
continue
|
|
150
|
+
|
|
151
|
+
t = asyncio.create_task(
|
|
152
|
+
read_with_interrupt(self.listener, self.interrupt)
|
|
153
|
+
)
|
|
154
|
+
await self.interrupt.wait()
|
|
155
|
+
|
|
156
|
+
if not t.done():
|
|
157
|
+
logger.warning(
|
|
158
|
+
"Interrupted read_with_interrupt task of durable event listener"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
t.cancel()
|
|
162
|
+
if self.listener:
|
|
163
|
+
self.listener.cancel()
|
|
164
|
+
await asyncio.sleep(
|
|
165
|
+
DEFAULT_DURABLE_EVENT_LISTENER_RETRY_INTERVAL
|
|
166
|
+
)
|
|
167
|
+
break
|
|
168
|
+
|
|
169
|
+
event = t.result()
|
|
170
|
+
|
|
171
|
+
if event is cygrpc.EOF:
|
|
172
|
+
break
|
|
173
|
+
|
|
174
|
+
# get a list of subscriptions for this task-signal pair
|
|
175
|
+
subscriptions = (
|
|
176
|
+
self.task_id_signal_key_to_subscriptions.get(
|
|
177
|
+
(event.task_id, event.signal_key), []
|
|
178
|
+
)
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
for subscription_id in subscriptions:
|
|
182
|
+
await self.events[subscription_id].put(event)
|
|
183
|
+
|
|
184
|
+
except grpc.RpcError as e:
|
|
185
|
+
logger.debug(f"grpc error in durable event listener: {e}")
|
|
186
|
+
await asyncio.sleep(
|
|
187
|
+
DEFAULT_DURABLE_EVENT_LISTENER_RETRY_INTERVAL
|
|
188
|
+
)
|
|
189
|
+
continue
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
logger.error(f"Error in durable event listener: {e}")
|
|
193
|
+
|
|
194
|
+
self.listener = None
|
|
195
|
+
|
|
196
|
+
# close all subscriptions
|
|
197
|
+
for subscription_id in self.events:
|
|
198
|
+
await self.events[subscription_id].close()
|
|
199
|
+
|
|
200
|
+
raise e
|
|
201
|
+
|
|
202
|
+
async def _request(self) -> AsyncIterator[ListenForDurableEventRequest]:
|
|
203
|
+
self.curr_requester = self.curr_requester + 1
|
|
204
|
+
|
|
205
|
+
# replay all existing subscriptions
|
|
206
|
+
for task_id, signal_key in set(
|
|
207
|
+
self.subscriptions_to_task_id_signal_key.values()
|
|
208
|
+
):
|
|
209
|
+
yield ListenForDurableEventRequest(
|
|
210
|
+
task_id=task_id,
|
|
211
|
+
signal_key=signal_key,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
while True:
|
|
215
|
+
request = await self.requests.get()
|
|
216
|
+
|
|
217
|
+
# if the request is an int which matches the current requester, then we should stop
|
|
218
|
+
if request == self.curr_requester:
|
|
219
|
+
break
|
|
220
|
+
|
|
221
|
+
# if we've gotten an int that doesn't match the current requester, then we should ignore it
|
|
222
|
+
if isinstance(request, int):
|
|
223
|
+
continue
|
|
224
|
+
|
|
225
|
+
yield request
|
|
226
|
+
self.requests.task_done()
|
|
227
|
+
|
|
228
|
+
def cleanup_subscription(self, subscription_id: int) -> None:
|
|
229
|
+
task_id_signal_key = self.subscriptions_to_task_id_signal_key[subscription_id]
|
|
230
|
+
|
|
231
|
+
if task_id_signal_key in self.task_id_signal_key_to_subscriptions:
|
|
232
|
+
self.task_id_signal_key_to_subscriptions[task_id_signal_key].remove(
|
|
233
|
+
subscription_id
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
del self.subscriptions_to_task_id_signal_key[subscription_id]
|
|
237
|
+
del self.events[subscription_id]
|
|
238
|
+
|
|
239
|
+
async def subscribe(self, task_id: str, signal_key: str) -> DurableEvent:
|
|
240
|
+
try:
|
|
241
|
+
# create a new subscription id, place a mutex on the counter
|
|
242
|
+
async with self.subscription_counter_lock:
|
|
243
|
+
self.subscription_counter += 1
|
|
244
|
+
subscription_id = self.subscription_counter
|
|
245
|
+
|
|
246
|
+
self.subscriptions_to_task_id_signal_key[subscription_id] = (
|
|
247
|
+
task_id,
|
|
248
|
+
signal_key,
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
if (task_id, signal_key) not in self.task_id_signal_key_to_subscriptions:
|
|
252
|
+
self.task_id_signal_key_to_subscriptions[(task_id, signal_key)] = [
|
|
253
|
+
subscription_id
|
|
254
|
+
]
|
|
255
|
+
else:
|
|
256
|
+
self.task_id_signal_key_to_subscriptions[(task_id, signal_key)].append(
|
|
257
|
+
subscription_id
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
self.events[subscription_id] = _Subscription(
|
|
261
|
+
subscription_id, task_id, signal_key
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
await self.requests.put(
|
|
265
|
+
ListenForDurableEventRequest(
|
|
266
|
+
task_id=task_id,
|
|
267
|
+
signal_key=signal_key,
|
|
268
|
+
)
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
if not self.listener_task or self.listener_task.done():
|
|
272
|
+
self.listener_task = asyncio.create_task(self._init_producer())
|
|
273
|
+
|
|
274
|
+
return await self.events[subscription_id].get()
|
|
275
|
+
except asyncio.CancelledError:
|
|
276
|
+
raise
|
|
277
|
+
finally:
|
|
278
|
+
self.cleanup_subscription(subscription_id)
|
|
279
|
+
|
|
280
|
+
async def _retry_subscribe(
|
|
281
|
+
self,
|
|
282
|
+
) -> grpc.aio.UnaryStreamCall[ListenForDurableEventRequest, DurableEvent]:
|
|
283
|
+
retries = 0
|
|
284
|
+
|
|
285
|
+
while retries < DEFAULT_DURABLE_EVENT_LISTENER_RETRY_COUNT:
|
|
286
|
+
try:
|
|
287
|
+
if retries > 0:
|
|
288
|
+
await asyncio.sleep(DEFAULT_DURABLE_EVENT_LISTENER_RETRY_INTERVAL)
|
|
289
|
+
|
|
290
|
+
# signal previous async iterator to stop
|
|
291
|
+
if self.curr_requester != 0:
|
|
292
|
+
self.requests.put_nowait(self.curr_requester)
|
|
293
|
+
|
|
294
|
+
return cast(
|
|
295
|
+
grpc.aio.UnaryStreamCall[
|
|
296
|
+
ListenForDurableEventRequest, DurableEvent
|
|
297
|
+
],
|
|
298
|
+
self.client.ListenForDurableEvent(
|
|
299
|
+
self._request(),
|
|
300
|
+
metadata=get_metadata(self.token),
|
|
301
|
+
),
|
|
302
|
+
)
|
|
303
|
+
except grpc.RpcError as e:
|
|
304
|
+
if e.code() == grpc.StatusCode.UNAVAILABLE:
|
|
305
|
+
retries = retries + 1
|
|
306
|
+
else:
|
|
307
|
+
raise ValueError(f"gRPC error: {e}")
|
|
308
|
+
|
|
309
|
+
raise ValueError("Failed to connect to durable event listener")
|
|
310
|
+
|
|
311
|
+
@tenacity_retry
|
|
312
|
+
def register_durable_event(
|
|
313
|
+
self, request: RegisterDurableEventRequest
|
|
314
|
+
) -> Literal[True]:
|
|
315
|
+
self.client.RegisterDurableEvent(
|
|
316
|
+
request.to_proto(),
|
|
317
|
+
timeout=5,
|
|
318
|
+
metadata=get_metadata(self.token),
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
return True
|
|
322
|
+
|
|
323
|
+
@tenacity_retry
|
|
324
|
+
async def result(self, task_id: str, signal_key: str) -> dict[str, Any]:
|
|
325
|
+
event = await self.subscribe(task_id, signal_key)
|
|
326
|
+
|
|
327
|
+
return cast(dict[str, Any], json.loads(event.data.decode("utf-8")))
|
|
@@ -230,8 +230,10 @@ from hatchet_sdk.clients.rest.models.user_tenant_memberships_list import (
|
|
|
230
230
|
from hatchet_sdk.clients.rest.models.user_tenant_public import UserTenantPublic
|
|
231
231
|
from hatchet_sdk.clients.rest.models.v1_cancel_task_request import V1CancelTaskRequest
|
|
232
232
|
from hatchet_sdk.clients.rest.models.v1_dag_children import V1DagChildren
|
|
233
|
+
from hatchet_sdk.clients.rest.models.v1_log_line import V1LogLine
|
|
234
|
+
from hatchet_sdk.clients.rest.models.v1_log_line_level import V1LogLineLevel
|
|
235
|
+
from hatchet_sdk.clients.rest.models.v1_log_line_list import V1LogLineList
|
|
233
236
|
from hatchet_sdk.clients.rest.models.v1_replay_task_request import V1ReplayTaskRequest
|
|
234
|
-
from hatchet_sdk.clients.rest.models.v1_task import V1Task
|
|
235
237
|
from hatchet_sdk.clients.rest.models.v1_task_event import V1TaskEvent
|
|
236
238
|
from hatchet_sdk.clients.rest.models.v1_task_event_list import V1TaskEventList
|
|
237
239
|
from hatchet_sdk.clients.rest.models.v1_task_event_type import V1TaskEventType
|
|
@@ -243,8 +245,17 @@ from hatchet_sdk.clients.rest.models.v1_task_run_status import V1TaskRunStatus
|
|
|
243
245
|
from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus
|
|
244
246
|
from hatchet_sdk.clients.rest.models.v1_task_summary import V1TaskSummary
|
|
245
247
|
from hatchet_sdk.clients.rest.models.v1_task_summary_list import V1TaskSummaryList
|
|
248
|
+
from hatchet_sdk.clients.rest.models.v1_trigger_workflow_run_request import (
|
|
249
|
+
V1TriggerWorkflowRunRequest,
|
|
250
|
+
)
|
|
246
251
|
from hatchet_sdk.clients.rest.models.v1_workflow_run import V1WorkflowRun
|
|
247
252
|
from hatchet_sdk.clients.rest.models.v1_workflow_run_details import V1WorkflowRunDetails
|
|
253
|
+
from hatchet_sdk.clients.rest.models.v1_workflow_run_display_name import (
|
|
254
|
+
V1WorkflowRunDisplayName,
|
|
255
|
+
)
|
|
256
|
+
from hatchet_sdk.clients.rest.models.v1_workflow_run_display_name_list import (
|
|
257
|
+
V1WorkflowRunDisplayNameList,
|
|
258
|
+
)
|
|
248
259
|
from hatchet_sdk.clients.rest.models.v1_workflow_type import V1WorkflowType
|
|
249
260
|
from hatchet_sdk.clients.rest.models.webhook_worker import WebhookWorker
|
|
250
261
|
from hatchet_sdk.clients.rest.models.webhook_worker_create_request import (
|