rappel 0.4.1__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rappel might be problematic. Click here for more details.
- proto/ast_pb2.py +117 -0
- proto/ast_pb2.pyi +1609 -0
- proto/ast_pb2_grpc.py +24 -0
- proto/ast_pb2_grpc.pyi +22 -0
- proto/messages_pb2.py +106 -0
- proto/messages_pb2.pyi +1170 -0
- proto/messages_pb2_grpc.py +406 -0
- proto/messages_pb2_grpc.pyi +380 -0
- rappel/__init__.py +56 -0
- rappel/actions.py +81 -0
- rappel/bin/boot-rappel-singleton.exe +0 -0
- rappel/bin/rappel-bridge.exe +0 -0
- rappel/bin/start-workers.exe +0 -0
- rappel/bridge.py +228 -0
- rappel/dependencies.py +135 -0
- rappel/exceptions.py +11 -0
- rappel/formatter.py +110 -0
- rappel/ir_builder.py +3146 -0
- rappel/logger.py +39 -0
- rappel/registry.py +75 -0
- rappel/schedule.py +294 -0
- rappel/serialization.py +205 -0
- rappel/worker.py +191 -0
- rappel/workflow.py +236 -0
- rappel/workflow_runtime.py +137 -0
- rappel-0.4.1.data/scripts/boot-rappel-singleton.exe +0 -0
- rappel-0.4.1.data/scripts/rappel-bridge.exe +0 -0
- rappel-0.4.1.data/scripts/start-workers.exe +0 -0
- rappel-0.4.1.dist-info/METADATA +292 -0
- rappel-0.4.1.dist-info/RECORD +32 -0
- rappel-0.4.1.dist-info/WHEEL +4 -0
- rappel-0.4.1.dist-info/entry_points.txt +2 -0
rappel/worker.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
"""gRPC worker client that executes rappel actions."""
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import asyncio
|
|
5
|
+
import importlib
|
|
6
|
+
import logging
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
from typing import Any, AsyncIterator, cast
|
|
10
|
+
|
|
11
|
+
import grpc
|
|
12
|
+
|
|
13
|
+
from proto import messages_pb2 as pb2
|
|
14
|
+
from proto import messages_pb2_grpc as pb2_grpc
|
|
15
|
+
from rappel.actions import serialize_error_payload, serialize_result_payload
|
|
16
|
+
|
|
17
|
+
from . import workflow_runtime
|
|
18
|
+
from .logger import configure as configure_logger
|
|
19
|
+
|
|
20
|
+
LOGGER = configure_logger("rappel.worker")
|
|
21
|
+
aio = cast(Any, grpc).aio
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _parse_args(argv: list[str] | None) -> argparse.Namespace:
|
|
25
|
+
parser = argparse.ArgumentParser(description="Rappel workflow worker")
|
|
26
|
+
parser.add_argument("--bridge", required=True, help="gRPC address of the Rust bridge")
|
|
27
|
+
parser.add_argument("--worker-id", required=True, type=int, help="Logical worker identifier")
|
|
28
|
+
parser.add_argument(
|
|
29
|
+
"--user-module",
|
|
30
|
+
action="append",
|
|
31
|
+
default=[],
|
|
32
|
+
help="Optional user module(s) to import eagerly",
|
|
33
|
+
)
|
|
34
|
+
return parser.parse_args(argv)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
async def _outgoing_stream(
|
|
38
|
+
queue: "asyncio.Queue[pb2.Envelope]", worker_id: int
|
|
39
|
+
) -> AsyncIterator[pb2.Envelope]:
|
|
40
|
+
hello = pb2.WorkerHello(worker_id=worker_id)
|
|
41
|
+
envelope = pb2.Envelope(
|
|
42
|
+
delivery_id=0,
|
|
43
|
+
partition_id=0,
|
|
44
|
+
kind=pb2.MessageKind.MESSAGE_KIND_WORKER_HELLO,
|
|
45
|
+
payload=hello.SerializeToString(),
|
|
46
|
+
)
|
|
47
|
+
yield envelope
|
|
48
|
+
try:
|
|
49
|
+
while True:
|
|
50
|
+
message = await queue.get()
|
|
51
|
+
yield message
|
|
52
|
+
except asyncio.CancelledError: # pragma: no cover - best effort shutdown
|
|
53
|
+
return
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
async def _send_ack(outgoing: "asyncio.Queue[pb2.Envelope]", envelope: pb2.Envelope) -> None:
|
|
57
|
+
ack = pb2.Ack(acked_delivery_id=envelope.delivery_id)
|
|
58
|
+
ack_envelope = pb2.Envelope(
|
|
59
|
+
delivery_id=envelope.delivery_id,
|
|
60
|
+
partition_id=envelope.partition_id,
|
|
61
|
+
kind=pb2.MessageKind.MESSAGE_KIND_ACK,
|
|
62
|
+
payload=ack.SerializeToString(),
|
|
63
|
+
)
|
|
64
|
+
await outgoing.put(ack_envelope)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
async def _handle_dispatch(
|
|
68
|
+
envelope: pb2.Envelope,
|
|
69
|
+
outgoing: "asyncio.Queue[pb2.Envelope]",
|
|
70
|
+
) -> None:
|
|
71
|
+
await _send_ack(outgoing, envelope)
|
|
72
|
+
dispatch = pb2.ActionDispatch()
|
|
73
|
+
dispatch.ParseFromString(envelope.payload)
|
|
74
|
+
timeout_seconds = dispatch.timeout_seconds if dispatch.HasField("timeout_seconds") else 0
|
|
75
|
+
|
|
76
|
+
worker_start = time.perf_counter_ns()
|
|
77
|
+
success = True
|
|
78
|
+
action_name = dispatch.action_name
|
|
79
|
+
execution: workflow_runtime.ActionExecutionResult | None = None
|
|
80
|
+
try:
|
|
81
|
+
if timeout_seconds > 0:
|
|
82
|
+
execution = await asyncio.wait_for(
|
|
83
|
+
workflow_runtime.execute_action(dispatch), timeout=timeout_seconds
|
|
84
|
+
)
|
|
85
|
+
else:
|
|
86
|
+
execution = await workflow_runtime.execute_action(dispatch)
|
|
87
|
+
|
|
88
|
+
if execution.exception:
|
|
89
|
+
success = False
|
|
90
|
+
response_payload = serialize_error_payload(action_name, execution.exception)
|
|
91
|
+
else:
|
|
92
|
+
response_payload = serialize_result_payload(execution.result)
|
|
93
|
+
except asyncio.TimeoutError:
|
|
94
|
+
success = False
|
|
95
|
+
error = TimeoutError(f"action {action_name} timed out after {timeout_seconds} seconds")
|
|
96
|
+
response_payload = serialize_error_payload(action_name, error)
|
|
97
|
+
LOGGER.warning(
|
|
98
|
+
"Action %s timed out after %ss for action_id=%s sequence=%s",
|
|
99
|
+
action_name,
|
|
100
|
+
timeout_seconds,
|
|
101
|
+
dispatch.action_id,
|
|
102
|
+
dispatch.sequence,
|
|
103
|
+
)
|
|
104
|
+
except Exception as exc: # noqa: BLE001 - propagate structured errors
|
|
105
|
+
success = False
|
|
106
|
+
response_payload = serialize_error_payload(action_name, exc)
|
|
107
|
+
LOGGER.exception(
|
|
108
|
+
"Action %s failed for action_id=%s sequence=%s",
|
|
109
|
+
action_name,
|
|
110
|
+
dispatch.action_id,
|
|
111
|
+
dispatch.sequence,
|
|
112
|
+
)
|
|
113
|
+
worker_end = time.perf_counter_ns()
|
|
114
|
+
response = pb2.ActionResult(
|
|
115
|
+
action_id=dispatch.action_id,
|
|
116
|
+
success=success,
|
|
117
|
+
worker_start_ns=worker_start,
|
|
118
|
+
worker_end_ns=worker_end,
|
|
119
|
+
)
|
|
120
|
+
response.payload.CopyFrom(response_payload)
|
|
121
|
+
if dispatch.dispatch_token:
|
|
122
|
+
response.dispatch_token = dispatch.dispatch_token
|
|
123
|
+
response_envelope = pb2.Envelope(
|
|
124
|
+
delivery_id=envelope.delivery_id,
|
|
125
|
+
partition_id=envelope.partition_id,
|
|
126
|
+
kind=pb2.MessageKind.MESSAGE_KIND_ACTION_RESULT,
|
|
127
|
+
payload=response.SerializeToString(),
|
|
128
|
+
)
|
|
129
|
+
await outgoing.put(response_envelope)
|
|
130
|
+
LOGGER.debug("Handled action=%s seq=%s success=%s", action_name, dispatch.sequence, success)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
async def _handle_incoming_stream(
|
|
134
|
+
stub: pb2_grpc.WorkerBridgeStub,
|
|
135
|
+
worker_id: int,
|
|
136
|
+
outgoing: "asyncio.Queue[pb2.Envelope]",
|
|
137
|
+
) -> None:
|
|
138
|
+
"""Process incoming messages, running action dispatches concurrently."""
|
|
139
|
+
pending_tasks: set[asyncio.Task[None]] = set()
|
|
140
|
+
|
|
141
|
+
async for envelope in stub.Attach(_outgoing_stream(outgoing, worker_id)):
|
|
142
|
+
kind = envelope.kind
|
|
143
|
+
if kind == pb2.MessageKind.MESSAGE_KIND_ACTION_DISPATCH:
|
|
144
|
+
# Spawn task to handle dispatch concurrently
|
|
145
|
+
task = asyncio.create_task(_handle_dispatch(envelope, outgoing))
|
|
146
|
+
pending_tasks.add(task)
|
|
147
|
+
task.add_done_callback(pending_tasks.discard)
|
|
148
|
+
elif kind == pb2.MessageKind.MESSAGE_KIND_HEARTBEAT:
|
|
149
|
+
LOGGER.debug("Received heartbeat delivery=%s", envelope.delivery_id)
|
|
150
|
+
await _send_ack(outgoing, envelope)
|
|
151
|
+
else:
|
|
152
|
+
LOGGER.warning("Unhandled message kind: %s", kind)
|
|
153
|
+
await _send_ack(outgoing, envelope)
|
|
154
|
+
|
|
155
|
+
# Wait for any remaining tasks on stream close
|
|
156
|
+
if pending_tasks:
|
|
157
|
+
await asyncio.gather(*pending_tasks, return_exceptions=True)
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
async def _run_worker(args: argparse.Namespace) -> None:
|
|
161
|
+
outgoing: "asyncio.Queue[pb2.Envelope]" = asyncio.Queue()
|
|
162
|
+
for module_name in args.user_module:
|
|
163
|
+
if not module_name:
|
|
164
|
+
continue
|
|
165
|
+
LOGGER.info("Preloading user module %s", module_name)
|
|
166
|
+
importlib.import_module(module_name)
|
|
167
|
+
|
|
168
|
+
async with aio.insecure_channel(args.bridge) as channel:
|
|
169
|
+
stub = pb2_grpc.WorkerBridgeStub(channel)
|
|
170
|
+
LOGGER.info("Worker %s connected to %s", args.worker_id, args.bridge)
|
|
171
|
+
try:
|
|
172
|
+
await _handle_incoming_stream(stub, args.worker_id, outgoing)
|
|
173
|
+
except aio.AioRpcError as exc: # pragma: no cover
|
|
174
|
+
status = exc.code()
|
|
175
|
+
LOGGER.error("Worker stream closed: %s", status)
|
|
176
|
+
raise
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def main(argv: list[str] | None = None) -> None:
|
|
180
|
+
args = _parse_args(argv)
|
|
181
|
+
logging.basicConfig(level=logging.INFO, format="[worker] %(message)s", stream=sys.stderr)
|
|
182
|
+
try:
|
|
183
|
+
asyncio.run(_run_worker(args))
|
|
184
|
+
except KeyboardInterrupt: # pragma: no cover - exit quietly on Ctrl+C
|
|
185
|
+
return
|
|
186
|
+
except grpc.RpcError:
|
|
187
|
+
sys.exit(1)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
if __name__ == "__main__":
|
|
191
|
+
main()
|
rappel/workflow.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workflow base class and registration decorator.
|
|
3
|
+
|
|
4
|
+
This module provides the foundation for defining workflows that can be
|
|
5
|
+
compiled to IR and executed by the Rappel runtime.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import hashlib
|
|
9
|
+
import inspect
|
|
10
|
+
import os
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
from datetime import timedelta
|
|
13
|
+
from functools import wraps
|
|
14
|
+
from threading import RLock
|
|
15
|
+
from typing import Any, Awaitable, ClassVar, Optional, TypeVar
|
|
16
|
+
|
|
17
|
+
from proto import ast_pb2 as ir
|
|
18
|
+
from proto import messages_pb2 as pb2
|
|
19
|
+
|
|
20
|
+
from . import bridge
|
|
21
|
+
from .actions import deserialize_result_payload
|
|
22
|
+
from .ir_builder import build_workflow_ir
|
|
23
|
+
from .logger import configure as configure_logger
|
|
24
|
+
from .serialization import build_arguments_from_kwargs
|
|
25
|
+
from .workflow_runtime import WorkflowNodeResult
|
|
26
|
+
|
|
27
|
+
logger = configure_logger("rappel.workflow")
|
|
28
|
+
|
|
29
|
+
TWorkflow = TypeVar("TWorkflow", bound="Workflow")
|
|
30
|
+
TResult = TypeVar("TResult")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass(frozen=True)
|
|
34
|
+
class RetryPolicy:
|
|
35
|
+
"""Retry policy for action execution.
|
|
36
|
+
|
|
37
|
+
Maps to IR RetryPolicy: [ExceptionType -> retry: N, backoff: Xs]
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
attempts: Maximum number of retry attempts.
|
|
41
|
+
exception_types: List of exception type names to retry on. Empty = catch all.
|
|
42
|
+
backoff_seconds: Constant backoff duration between retries in seconds.
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
attempts: Optional[int] = None
|
|
46
|
+
exception_types: Optional[list[str]] = None
|
|
47
|
+
backoff_seconds: Optional[float] = None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class Workflow:
|
|
51
|
+
"""Base class for workflow definitions."""
|
|
52
|
+
|
|
53
|
+
name: ClassVar[Optional[str]] = None
|
|
54
|
+
"""Human-friendly identifier. Override to pin the registry key; defaults to lowercase class name."""
|
|
55
|
+
|
|
56
|
+
concurrent: ClassVar[bool] = False
|
|
57
|
+
"""When True, downstream engines may respect DAG-parallel execution; False preserves sequential semantics."""
|
|
58
|
+
|
|
59
|
+
_workflow_ir: ClassVar[Optional[ir.Program]] = None
|
|
60
|
+
_ir_lock: ClassVar[RLock] = RLock()
|
|
61
|
+
_workflow_version_id: ClassVar[Optional[str]] = None
|
|
62
|
+
|
|
63
|
+
async def run(self) -> Any:
|
|
64
|
+
raise NotImplementedError
|
|
65
|
+
|
|
66
|
+
async def run_action(
|
|
67
|
+
self,
|
|
68
|
+
awaitable: Awaitable[TResult],
|
|
69
|
+
*,
|
|
70
|
+
retry: Optional[RetryPolicy] = None,
|
|
71
|
+
timeout: Optional[float | int | timedelta] = None,
|
|
72
|
+
) -> TResult:
|
|
73
|
+
"""Helper that simply awaits the provided action coroutine.
|
|
74
|
+
|
|
75
|
+
The retry and timeout arguments are consumed by the workflow compiler
|
|
76
|
+
(IR builder) rather than the runtime execution path.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
awaitable: The action coroutine to execute.
|
|
80
|
+
retry: Retry policy including max attempts, exception types, and backoff.
|
|
81
|
+
timeout: Timeout duration in seconds (or timedelta).
|
|
82
|
+
"""
|
|
83
|
+
# Parameters are intentionally unused at runtime; the workflow compiler
|
|
84
|
+
# inspects the AST to record them.
|
|
85
|
+
del retry, timeout
|
|
86
|
+
return await awaitable
|
|
87
|
+
|
|
88
|
+
@classmethod
|
|
89
|
+
def short_name(cls) -> str:
|
|
90
|
+
if cls.name:
|
|
91
|
+
return cls.name
|
|
92
|
+
return cls.__name__.lower()
|
|
93
|
+
|
|
94
|
+
@classmethod
|
|
95
|
+
def workflow_ir(cls) -> ir.Program:
|
|
96
|
+
"""Build and cache the IR program for this workflow."""
|
|
97
|
+
if cls._workflow_ir is None:
|
|
98
|
+
with cls._ir_lock:
|
|
99
|
+
if cls._workflow_ir is None:
|
|
100
|
+
cls._workflow_ir = build_workflow_ir(cls)
|
|
101
|
+
return cls._workflow_ir
|
|
102
|
+
|
|
103
|
+
@classmethod
|
|
104
|
+
def _build_registration_payload(
|
|
105
|
+
cls, initial_context: Optional[pb2.WorkflowArguments] = None
|
|
106
|
+
) -> pb2.WorkflowRegistration:
|
|
107
|
+
"""Build a registration payload with the serialized IR."""
|
|
108
|
+
program = cls.workflow_ir()
|
|
109
|
+
|
|
110
|
+
# Serialize IR to bytes
|
|
111
|
+
ir_bytes = program.SerializeToString()
|
|
112
|
+
ir_hash = hashlib.sha256(ir_bytes).hexdigest()
|
|
113
|
+
|
|
114
|
+
message = pb2.WorkflowRegistration(
|
|
115
|
+
workflow_name=cls.short_name(),
|
|
116
|
+
ir=ir_bytes,
|
|
117
|
+
ir_hash=ir_hash,
|
|
118
|
+
concurrent=cls.concurrent,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
if initial_context:
|
|
122
|
+
message.initial_context.CopyFrom(initial_context)
|
|
123
|
+
|
|
124
|
+
return message
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class WorkflowRegistry:
|
|
128
|
+
"""Registry of workflow definitions keyed by workflow name."""
|
|
129
|
+
|
|
130
|
+
def __init__(self) -> None:
|
|
131
|
+
self._workflows: dict[str, type[Workflow]] = {}
|
|
132
|
+
self._lock = RLock()
|
|
133
|
+
|
|
134
|
+
def register(self, name: str, workflow_cls: type[Workflow]) -> None:
|
|
135
|
+
with self._lock:
|
|
136
|
+
if name in self._workflows:
|
|
137
|
+
raise ValueError(f"workflow '{name}' already registered")
|
|
138
|
+
self._workflows[name] = workflow_cls
|
|
139
|
+
|
|
140
|
+
def get(self, name: str) -> Optional[type[Workflow]]:
|
|
141
|
+
with self._lock:
|
|
142
|
+
return self._workflows.get(name)
|
|
143
|
+
|
|
144
|
+
def names(self) -> list[str]:
|
|
145
|
+
with self._lock:
|
|
146
|
+
return sorted(self._workflows.keys())
|
|
147
|
+
|
|
148
|
+
def reset(self) -> None:
|
|
149
|
+
with self._lock:
|
|
150
|
+
self._workflows.clear()
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
workflow_registry = WorkflowRegistry()
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def workflow(cls: type[TWorkflow]) -> type[TWorkflow]:
|
|
157
|
+
"""Decorator that registers workflow classes and caches their IR."""
|
|
158
|
+
|
|
159
|
+
if not issubclass(cls, Workflow):
|
|
160
|
+
raise TypeError("workflow decorator requires Workflow subclasses")
|
|
161
|
+
run_impl = cls.run
|
|
162
|
+
if not inspect.iscoroutinefunction(run_impl):
|
|
163
|
+
raise TypeError("workflow run() must be defined with 'async def'")
|
|
164
|
+
|
|
165
|
+
@wraps(run_impl)
|
|
166
|
+
async def run_public(self: Workflow, *args: Any, **kwargs: Any) -> Any:
|
|
167
|
+
if _running_under_pytest():
|
|
168
|
+
cls.workflow_ir()
|
|
169
|
+
return await run_impl(self, *args, **kwargs)
|
|
170
|
+
|
|
171
|
+
# Get the signature of run() to map positional args to parameter names
|
|
172
|
+
sig = inspect.signature(run_impl)
|
|
173
|
+
params = list(sig.parameters.keys())[1:] # Skip 'self'
|
|
174
|
+
|
|
175
|
+
# Convert positional args to kwargs
|
|
176
|
+
for i, arg in enumerate(args):
|
|
177
|
+
if i < len(params):
|
|
178
|
+
kwargs[params[i]] = arg
|
|
179
|
+
|
|
180
|
+
# Serialize kwargs using common logic
|
|
181
|
+
initial_context = build_arguments_from_kwargs(kwargs)
|
|
182
|
+
|
|
183
|
+
payload = cls._build_registration_payload(initial_context)
|
|
184
|
+
run_result = await bridge.run_instance(payload.SerializeToString())
|
|
185
|
+
cls._workflow_version_id = run_result.workflow_version_id
|
|
186
|
+
if _skip_wait_for_instance():
|
|
187
|
+
logger.info(
|
|
188
|
+
"Skipping wait_for_instance for workflow %s due to RAPPEL_SKIP_WAIT_FOR_INSTANCE",
|
|
189
|
+
cls.short_name(),
|
|
190
|
+
)
|
|
191
|
+
return None
|
|
192
|
+
result_bytes = await bridge.wait_for_instance(
|
|
193
|
+
instance_id=run_result.workflow_instance_id,
|
|
194
|
+
poll_interval_secs=1.0,
|
|
195
|
+
)
|
|
196
|
+
if result_bytes is None:
|
|
197
|
+
raise TimeoutError(
|
|
198
|
+
f"workflow instance {run_result.workflow_instance_id} did not complete"
|
|
199
|
+
)
|
|
200
|
+
arguments = pb2.WorkflowArguments()
|
|
201
|
+
arguments.ParseFromString(result_bytes)
|
|
202
|
+
result = deserialize_result_payload(arguments)
|
|
203
|
+
if result.error:
|
|
204
|
+
raise RuntimeError(f"workflow failed: {result.error}")
|
|
205
|
+
|
|
206
|
+
# Unwrap WorkflowNodeResult if present (internal worker representation)
|
|
207
|
+
if isinstance(result.result, WorkflowNodeResult):
|
|
208
|
+
# Extract the actual result from the variables dict
|
|
209
|
+
variables = result.result.variables
|
|
210
|
+
program = cls.workflow_ir()
|
|
211
|
+
# Get the return variable from the IR if available
|
|
212
|
+
if program.functions:
|
|
213
|
+
outputs = list(program.functions[0].io.outputs)
|
|
214
|
+
if outputs:
|
|
215
|
+
return_var = outputs[0]
|
|
216
|
+
if return_var in variables:
|
|
217
|
+
return variables[return_var]
|
|
218
|
+
return None
|
|
219
|
+
|
|
220
|
+
return result.result
|
|
221
|
+
|
|
222
|
+
cls.__workflow_run_impl__ = run_impl
|
|
223
|
+
cls.run = run_public # type: ignore[assignment]
|
|
224
|
+
workflow_registry.register(cls.short_name(), cls)
|
|
225
|
+
return cls
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _running_under_pytest() -> bool:
|
|
229
|
+
return bool(os.environ.get("PYTEST_CURRENT_TEST"))
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def _skip_wait_for_instance() -> bool:
|
|
233
|
+
value = os.environ.get("RAPPEL_SKIP_WAIT_FOR_INSTANCE")
|
|
234
|
+
if not value:
|
|
235
|
+
return False
|
|
236
|
+
return value.strip().lower() not in {"0", "false", "no"}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""Runtime helpers for executing actions inside the worker.
|
|
2
|
+
|
|
3
|
+
This module provides the execution layer for Python workers that receive
|
|
4
|
+
action dispatch commands from the Rust scheduler.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import dataclasses
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from typing import Any, Dict, get_type_hints
|
|
11
|
+
|
|
12
|
+
from pydantic import BaseModel
|
|
13
|
+
|
|
14
|
+
from proto import messages_pb2 as pb2
|
|
15
|
+
|
|
16
|
+
from .dependencies import provide_dependencies
|
|
17
|
+
from .registry import registry
|
|
18
|
+
from .serialization import arguments_to_kwargs
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class WorkflowNodeResult(BaseModel):
|
|
22
|
+
"""Result from a workflow node execution containing variable bindings."""
|
|
23
|
+
|
|
24
|
+
variables: Dict[str, Any]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class ActionExecutionResult:
|
|
29
|
+
"""Result of an action execution."""
|
|
30
|
+
|
|
31
|
+
result: Any
|
|
32
|
+
exception: BaseException | None = None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _is_pydantic_model(cls: type) -> bool:
|
|
36
|
+
"""Check if a class is a Pydantic BaseModel subclass."""
|
|
37
|
+
try:
|
|
38
|
+
return isinstance(cls, type) and issubclass(cls, BaseModel)
|
|
39
|
+
except TypeError:
|
|
40
|
+
return False
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _is_dataclass_type(cls: type) -> bool:
|
|
44
|
+
"""Check if a class is a dataclass."""
|
|
45
|
+
return dataclasses.is_dataclass(cls) and isinstance(cls, type)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _coerce_dict_to_model(value: Any, target_type: type) -> Any:
|
|
49
|
+
"""Convert a dict to a Pydantic model or dataclass if needed.
|
|
50
|
+
|
|
51
|
+
If value is a dict and target_type is a Pydantic model or dataclass,
|
|
52
|
+
instantiate the model with the dict values. Otherwise, return value unchanged.
|
|
53
|
+
"""
|
|
54
|
+
if not isinstance(value, dict):
|
|
55
|
+
return value
|
|
56
|
+
|
|
57
|
+
if _is_pydantic_model(target_type):
|
|
58
|
+
# Use model_validate for Pydantic v2, fall back to direct instantiation
|
|
59
|
+
model_validate = getattr(target_type, "model_validate", None)
|
|
60
|
+
if model_validate is not None:
|
|
61
|
+
return model_validate(value)
|
|
62
|
+
return target_type(**value)
|
|
63
|
+
|
|
64
|
+
if _is_dataclass_type(target_type):
|
|
65
|
+
return target_type(**value)
|
|
66
|
+
|
|
67
|
+
return value
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _coerce_kwargs_to_type_hints(handler: Any, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
|
71
|
+
"""Coerce dict kwargs to Pydantic models or dataclasses based on type hints.
|
|
72
|
+
|
|
73
|
+
When the IR converts a Pydantic model or dataclass constructor call to a dict,
|
|
74
|
+
the action runner needs to convert that dict back to the expected type based
|
|
75
|
+
on the handler's type annotations.
|
|
76
|
+
"""
|
|
77
|
+
try:
|
|
78
|
+
type_hints = get_type_hints(handler)
|
|
79
|
+
except Exception:
|
|
80
|
+
# If we can't get type hints (e.g., forward references), return as-is
|
|
81
|
+
return kwargs
|
|
82
|
+
|
|
83
|
+
coerced = {}
|
|
84
|
+
for key, value in kwargs.items():
|
|
85
|
+
if key in type_hints:
|
|
86
|
+
target_type = type_hints[key]
|
|
87
|
+
coerced[key] = _coerce_dict_to_model(value, target_type)
|
|
88
|
+
else:
|
|
89
|
+
coerced[key] = value
|
|
90
|
+
|
|
91
|
+
return coerced
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
async def execute_action(dispatch: pb2.ActionDispatch) -> ActionExecutionResult:
|
|
95
|
+
"""Execute an action based on the dispatch command.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
dispatch: The action dispatch command from the Rust scheduler.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
The result of executing the action.
|
|
102
|
+
"""
|
|
103
|
+
action_name = dispatch.action_name
|
|
104
|
+
module_name = dispatch.module_name
|
|
105
|
+
|
|
106
|
+
# Import the module if specified (this registers actions via @action decorator)
|
|
107
|
+
if module_name:
|
|
108
|
+
import importlib
|
|
109
|
+
|
|
110
|
+
importlib.import_module(module_name)
|
|
111
|
+
|
|
112
|
+
# Get the action handler using both module and name
|
|
113
|
+
handler = registry.get(module_name, action_name)
|
|
114
|
+
if handler is None:
|
|
115
|
+
return ActionExecutionResult(
|
|
116
|
+
result=None,
|
|
117
|
+
exception=KeyError(f"action '{module_name}:{action_name}' not registered"),
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Deserialize kwargs
|
|
121
|
+
kwargs = arguments_to_kwargs(dispatch.kwargs)
|
|
122
|
+
|
|
123
|
+
# Coerce dict arguments to Pydantic models or dataclasses based on type hints
|
|
124
|
+
# This is needed because the IR converts model constructor calls to dicts
|
|
125
|
+
kwargs = _coerce_kwargs_to_type_hints(handler, kwargs)
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
async with provide_dependencies(handler, kwargs) as call_kwargs:
|
|
129
|
+
value = handler(**call_kwargs)
|
|
130
|
+
if asyncio.iscoroutine(value):
|
|
131
|
+
value = await value
|
|
132
|
+
return ActionExecutionResult(result=value)
|
|
133
|
+
except Exception as e:
|
|
134
|
+
return ActionExecutionResult(
|
|
135
|
+
result=None,
|
|
136
|
+
exception=e,
|
|
137
|
+
)
|
|
Binary file
|
|
Binary file
|
|
Binary file
|