loom-core 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- loom_core-0.1.0.dist-info/METADATA +342 -0
- loom_core-0.1.0.dist-info/RECORD +50 -0
- loom_core-0.1.0.dist-info/WHEEL +5 -0
- loom_core-0.1.0.dist-info/entry_points.txt +2 -0
- loom_core-0.1.0.dist-info/licenses/LICENSE +21 -0
- loom_core-0.1.0.dist-info/top_level.txt +1 -0
- src/__init__.py +45 -0
- src/cli/__init__.py +5 -0
- src/cli/cli.py +246 -0
- src/common/activity.py +30 -0
- src/common/config.py +9 -0
- src/common/errors.py +64 -0
- src/common/workflow.py +56 -0
- src/core/__init__.py +0 -0
- src/core/compiled.py +41 -0
- src/core/context.py +256 -0
- src/core/engine.py +106 -0
- src/core/handle.py +166 -0
- src/core/logger.py +60 -0
- src/core/runner.py +53 -0
- src/core/state.py +96 -0
- src/core/worker.py +147 -0
- src/core/workflow.py +168 -0
- src/database/__init__.py +0 -0
- src/database/db.py +716 -0
- src/decorators/__init__.py +0 -0
- src/decorators/activity.py +126 -0
- src/decorators/workflow.py +46 -0
- src/lib/progress.py +109 -0
- src/lib/utils.py +25 -0
- src/migrations/down/001_setup_pragma.sql +5 -0
- src/migrations/down/002_create_workflows.sql +3 -0
- src/migrations/down/003.create_events.sql +3 -0
- src/migrations/down/004.create_tasks.sql +3 -0
- src/migrations/down/005.create_indexes.sql +5 -0
- src/migrations/down/006_auto_update_triggers.sql +4 -0
- src/migrations/down/007_create_logs.sql +1 -0
- src/migrations/up/001_setup_pragma.sql +11 -0
- src/migrations/up/002_create_workflows.sql +15 -0
- src/migrations/up/003_create_events.sql +13 -0
- src/migrations/up/004_create_tasks.sql +23 -0
- src/migrations/up/005_create_indexes.sql +11 -0
- src/migrations/up/006_auto_update_triggers.sql +19 -0
- src/migrations/up/007_create_logs.sql +10 -0
- src/schemas/__init__.py +0 -0
- src/schemas/activity.py +13 -0
- src/schemas/database.py +17 -0
- src/schemas/events.py +70 -0
- src/schemas/tasks.py +58 -0
- src/schemas/workflow.py +33 -0
src/core/context.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
from datetime import timedelta
|
|
3
|
+
from typing import Any, Awaitable, Callable, Generic, List
|
|
4
|
+
|
|
5
|
+
from ..common.errors import NonDeterministicWorkflowError, StopReplay
|
|
6
|
+
from ..database.db import Database
|
|
7
|
+
from ..schemas.activity import ActivityMetadata
|
|
8
|
+
from ..schemas.events import Event
|
|
9
|
+
from ..schemas.workflow import InputT, StateT
|
|
10
|
+
from .logger import WorkflowLogger
|
|
11
|
+
from .state import StateProxy
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class WorkflowContext(Generic[InputT, StateT]):
|
|
15
|
+
"""Execution context for workflow steps with replay capabilities.
|
|
16
|
+
|
|
17
|
+
The WorkflowContext provides a controlled execution environment for workflow
|
|
18
|
+
steps, managing event history replay, state reconstruction, and activity
|
|
19
|
+
scheduling. It enforces deterministic execution by controlling access to
|
|
20
|
+
external resources and side effects.
|
|
21
|
+
|
|
22
|
+
Type Parameters:
|
|
23
|
+
InputT: Immutable input type for the workflow
|
|
24
|
+
StateT: Mutable state type that gets reconstructed during replay
|
|
25
|
+
|
|
26
|
+
Attributes:
|
|
27
|
+
id: Unique workflow identifier
|
|
28
|
+
input: Immutable workflow input data
|
|
29
|
+
history: Chronological list of workflow events
|
|
30
|
+
state: Current mutable workflow state
|
|
31
|
+
cursor: Current position in event history during replay
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
id: str
|
|
35
|
+
input: InputT
|
|
36
|
+
history: List[Event]
|
|
37
|
+
state: StateProxy[InputT, StateT]
|
|
38
|
+
cursor: int = 0
|
|
39
|
+
logger: WorkflowLogger
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self, id: str, input: InputT, history: List[Event], state: StateT
|
|
43
|
+
) -> None:
|
|
44
|
+
"""Initialize workflow context with replay state.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
id: Unique workflow identifier
|
|
48
|
+
input: Immutable workflow input data
|
|
49
|
+
history: List of events for replay
|
|
50
|
+
state: Current workflow state
|
|
51
|
+
"""
|
|
52
|
+
self.id = id
|
|
53
|
+
self.input = input
|
|
54
|
+
self.history = history
|
|
55
|
+
self.state = StateProxy(self, state)
|
|
56
|
+
self.logger = WorkflowLogger(self)
|
|
57
|
+
|
|
58
|
+
# === Private Replay Management Methods ===
|
|
59
|
+
|
|
60
|
+
def _peek(self) -> Event | None:
|
|
61
|
+
"""Look at the next event in history without consuming it.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Next event in history or None if at end
|
|
65
|
+
"""
|
|
66
|
+
if self.cursor >= len(self.history):
|
|
67
|
+
return None
|
|
68
|
+
return self.history[self.cursor]
|
|
69
|
+
|
|
70
|
+
def _consume(self) -> Event:
|
|
71
|
+
"""Consume and return the next event in history.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Next event in history
|
|
75
|
+
|
|
76
|
+
Raises:
|
|
77
|
+
RuntimeError: If no event is available to consume
|
|
78
|
+
"""
|
|
79
|
+
event = self._peek()
|
|
80
|
+
if event is None:
|
|
81
|
+
raise RuntimeError("No event available to consume")
|
|
82
|
+
self.cursor += 1
|
|
83
|
+
return event
|
|
84
|
+
|
|
85
|
+
def _match_event(self, expected_type: str) -> Event | None:
|
|
86
|
+
"""
|
|
87
|
+
Safe helper to check if the NEXT event matches what we expect.
|
|
88
|
+
Returns the event if it matches (does NOT consume).
|
|
89
|
+
Returns None if the next event is something else (or end of history).
|
|
90
|
+
"""
|
|
91
|
+
event = self._peek()
|
|
92
|
+
if event and event["type"] == expected_type:
|
|
93
|
+
return event
|
|
94
|
+
return None
|
|
95
|
+
|
|
96
|
+
@property
|
|
97
|
+
def is_replaying(self) -> bool:
|
|
98
|
+
"""Check if the workflow is currently replaying events.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
True if there are remaining events to replay, False otherwise
|
|
102
|
+
"""
|
|
103
|
+
return self.cursor < len(self.history)
|
|
104
|
+
|
|
105
|
+
def _extract_activity_metadata[FuncReturn](
|
|
106
|
+
self, fn: Callable[..., Awaitable[FuncReturn]], args: tuple[Any, ...]
|
|
107
|
+
) -> ActivityMetadata:
|
|
108
|
+
"""Extract metadata from an activity function for scheduling.
|
|
109
|
+
|
|
110
|
+
Retrieves activity configuration attributes that were set by the
|
|
111
|
+
@activity decorator, including retry settings and timeout values.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
fn: Activity function to extract metadata from
|
|
115
|
+
args: Arguments that will be passed to the activity
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
ActivityMetadata dictionary with function metadata
|
|
119
|
+
"""
|
|
120
|
+
return {
|
|
121
|
+
"name": getattr(fn, "_activity_name", fn.__name__),
|
|
122
|
+
"description": getattr(fn, "_activity_description", ""),
|
|
123
|
+
"retry_count": getattr(fn, "_activity_retry_count", 0),
|
|
124
|
+
"timeout_seconds": getattr(fn, "_activity_timeout_seconds", 0),
|
|
125
|
+
"func": fn.__name__,
|
|
126
|
+
"module": fn.__module__,
|
|
127
|
+
"args": list(args),
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
# === Public Activity Execution Methods ===
|
|
131
|
+
|
|
132
|
+
async def activity[FuncReturn](
|
|
133
|
+
self,
|
|
134
|
+
fn: Callable[..., Awaitable[FuncReturn]],
|
|
135
|
+
*args,
|
|
136
|
+
) -> FuncReturn:
|
|
137
|
+
metadata = self._extract_activity_metadata(fn, args)
|
|
138
|
+
|
|
139
|
+
scheduled_event = self._match_event("ACTIVITY_SCHEDULED")
|
|
140
|
+
|
|
141
|
+
if scheduled_event:
|
|
142
|
+
if scheduled_event["payload"]["name"] != metadata["name"]:
|
|
143
|
+
raise NonDeterministicWorkflowError(
|
|
144
|
+
f"Replay mismatch: Expected activity {metadata['name']}, "
|
|
145
|
+
f"found {scheduled_event['payload']['name']} in history."
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
self._consume()
|
|
149
|
+
|
|
150
|
+
completed_event = self._match_event("ACTIVITY_COMPLETED")
|
|
151
|
+
|
|
152
|
+
if completed_event:
|
|
153
|
+
self._consume()
|
|
154
|
+
return completed_event["payload"]["result"] # type: ignore
|
|
155
|
+
|
|
156
|
+
raise StopReplay
|
|
157
|
+
|
|
158
|
+
unexpected_event = self._peek()
|
|
159
|
+
if unexpected_event:
|
|
160
|
+
raise NonDeterministicWorkflowError(
|
|
161
|
+
f"Replay mismatch: Code wants to schedule activity {metadata['name']}, "
|
|
162
|
+
f"but history contains {unexpected_event['type']}."
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
async with Database[InputT, StateT]() as db:
|
|
166
|
+
await db.create_activity(
|
|
167
|
+
workflow_id=self.id,
|
|
168
|
+
metadata=metadata,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
raise StopReplay
|
|
172
|
+
|
|
173
|
+
async def sleep(
|
|
174
|
+
self, delta: timedelta | None = None, until: datetime.datetime | None = None
|
|
175
|
+
) -> None:
|
|
176
|
+
if delta is None and until is None:
|
|
177
|
+
raise ValueError("Either 'delta' or 'until' must be provided")
|
|
178
|
+
|
|
179
|
+
fire_at: datetime.datetime = (
|
|
180
|
+
datetime.datetime.now(datetime.timezone.utc) + delta if delta else until # type: ignore
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
scheduled_event = self._match_event("TIMER_SCHEDULED")
|
|
184
|
+
|
|
185
|
+
if scheduled_event:
|
|
186
|
+
self._consume()
|
|
187
|
+
|
|
188
|
+
fired_event = self._match_event("TIMER_FIRED")
|
|
189
|
+
if fired_event:
|
|
190
|
+
self._consume()
|
|
191
|
+
return # Timer is done
|
|
192
|
+
|
|
193
|
+
raise StopReplay
|
|
194
|
+
|
|
195
|
+
unexpected_event = self._peek()
|
|
196
|
+
if unexpected_event:
|
|
197
|
+
raise NonDeterministicWorkflowError(
|
|
198
|
+
f"Replay mismatch: Code wants to sleep, "
|
|
199
|
+
f"but history contains {unexpected_event['type']}."
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
async with Database[InputT, StateT]() as db:
|
|
203
|
+
await db.create_timer(self.id, fire_at)
|
|
204
|
+
|
|
205
|
+
raise StopReplay
|
|
206
|
+
|
|
207
|
+
async def wait_until_signal(self, signal_name: str) -> Any:
|
|
208
|
+
"""Pauses the workflow until a specific signal is received.
|
|
209
|
+
|
|
210
|
+
If the signal is already in history (replay), it returns the data immediately.
|
|
211
|
+
If not, it raises StopReplay to suspend execution until the signal arrives.
|
|
212
|
+
"""
|
|
213
|
+
# 1. Check if the signal is next in history
|
|
214
|
+
event = self._match_event("SIGNAL_RECEIVED")
|
|
215
|
+
|
|
216
|
+
if event:
|
|
217
|
+
# STRICT CHECK: Ensure this is the signal we are waiting for.
|
|
218
|
+
# If the history has "Signal B" but we are waiting for "Signal A",
|
|
219
|
+
# it means the code logic has changed or the flow is non-deterministic.
|
|
220
|
+
if event["payload"]["name"] != signal_name:
|
|
221
|
+
raise NonDeterministicWorkflowError(
|
|
222
|
+
f"Replay mismatch: Expected signal '{signal_name}', "
|
|
223
|
+
f"but history contains signal '{event['payload']['name']}'."
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
self._consume()
|
|
227
|
+
return event["payload"]["data"]
|
|
228
|
+
|
|
229
|
+
unexpected_event = self._peek()
|
|
230
|
+
if unexpected_event:
|
|
231
|
+
raise NonDeterministicWorkflowError(
|
|
232
|
+
f"Replay mismatch: Workflow expecting signal '{signal_name}', "
|
|
233
|
+
f"but history contains {unexpected_event['type']}."
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
self.logger.info(f"Waiting for signal: {signal_name}")
|
|
237
|
+
raise StopReplay
|
|
238
|
+
|
|
239
|
+
def last_emitted_event_type(self) -> str | None:
|
|
240
|
+
"""Get the type of the last emitted event in the history."""
|
|
241
|
+
return self.history[-1]["type"]
|
|
242
|
+
|
|
243
|
+
async def _append_event(self, type: str, payload: dict[str, Any]) -> None:
|
|
244
|
+
"""Append a new event to the workflow's event history in the database.
|
|
245
|
+
|
|
246
|
+
Args:
|
|
247
|
+
type: Type of the event to append
|
|
248
|
+
payload: Payload data for the event
|
|
249
|
+
"""
|
|
250
|
+
async with Database[InputT, StateT]() as db:
|
|
251
|
+
await db.create_event(
|
|
252
|
+
workflow_id=self.id,
|
|
253
|
+
type=type,
|
|
254
|
+
payload=payload,
|
|
255
|
+
)
|
|
256
|
+
self.history.append({"type": type, "payload": payload})
|
src/core/engine.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
from datetime import datetime, timedelta, timezone
|
|
2
|
+
from typing import Generic
|
|
3
|
+
|
|
4
|
+
from ..common.activity import load_activity
|
|
5
|
+
from ..common.errors import StopReplay
|
|
6
|
+
from ..common.workflow import workflow_registry
|
|
7
|
+
from ..database.db import Database
|
|
8
|
+
from ..schemas.activity import ActivityMetadata
|
|
9
|
+
from ..schemas.tasks import Task
|
|
10
|
+
from ..schemas.workflow import InputT, StateT
|
|
11
|
+
from .context import WorkflowContext
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Engine(Generic[InputT, StateT]):
|
|
15
|
+
"""Core workflow execution engine with replay capabilities.
|
|
16
|
+
|
|
17
|
+
The Engine is responsible for workflow replay and step execution. It
|
|
18
|
+
reconstructs workflow state from event history and executes steps in
|
|
19
|
+
a deterministic manner, stopping when side effects are encountered.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
@staticmethod
|
|
23
|
+
async def replay_activity(task: Task):
|
|
24
|
+
try:
|
|
25
|
+
workflow_id = task["workflow_id"]
|
|
26
|
+
activity_name = task["target"]
|
|
27
|
+
async with Database[InputT, StateT]() as db:
|
|
28
|
+
event = await db.get_activity_event(
|
|
29
|
+
workflow_id, activity_name, task["attempts"]
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
if not event:
|
|
33
|
+
raise ValueError(
|
|
34
|
+
f"No event found for activity {activity_name} in workflow {workflow_id} on attempt {task['attempts']}"
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
metadata = ActivityMetadata(**event["payload"]) # type: ignore
|
|
38
|
+
fn = load_activity(metadata["module"], metadata["func"])
|
|
39
|
+
args = metadata["args"]
|
|
40
|
+
response = await fn(*args)
|
|
41
|
+
|
|
42
|
+
async with Database[InputT, StateT]() as db:
|
|
43
|
+
await db.create_event(
|
|
44
|
+
workflow_id,
|
|
45
|
+
"ACTIVITY_COMPLETED",
|
|
46
|
+
payload={
|
|
47
|
+
"name": activity_name,
|
|
48
|
+
"result": response,
|
|
49
|
+
},
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
await db.task_completed(task["id"])
|
|
53
|
+
await db.recreate_workflow_task(workflow_id)
|
|
54
|
+
|
|
55
|
+
except Exception as e:
|
|
56
|
+
async with Database[InputT, StateT]() as db:
|
|
57
|
+
if task["attempts"] >= task["max_attempts"]:
|
|
58
|
+
await db.task_failed(task["id"], str(e))
|
|
59
|
+
await db.create_event(
|
|
60
|
+
workflow_id=task["workflow_id"],
|
|
61
|
+
type="ACTIVITY_FAILED",
|
|
62
|
+
payload={
|
|
63
|
+
"name": task["target"],
|
|
64
|
+
"error": str(e),
|
|
65
|
+
},
|
|
66
|
+
)
|
|
67
|
+
else:
|
|
68
|
+
|
|
69
|
+
delay = min(60, 2 ** task["attempts"])
|
|
70
|
+
next_run = datetime.now(timezone.utc) + timedelta(seconds=delay)
|
|
71
|
+
await db.schedule_retry(task["id"], next_run, str(e))
|
|
72
|
+
|
|
73
|
+
@staticmethod
|
|
74
|
+
async def replay_until_block(workflow_id: str) -> None:
|
|
75
|
+
# Load workflow event history
|
|
76
|
+
async with Database[InputT, StateT]() as db:
|
|
77
|
+
workflow_def = await db.get_workflow_info(workflow_id)
|
|
78
|
+
history = await db.get_workflow_events(workflow_id=workflow_def["id"])
|
|
79
|
+
|
|
80
|
+
ctx: WorkflowContext = WorkflowContext(
|
|
81
|
+
workflow_def["id"], workflow_def["input"], history, {}
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
first_event = ctx._peek()
|
|
85
|
+
if first_event and first_event["type"] == "WORKFLOW_STARTED":
|
|
86
|
+
ctx._consume()
|
|
87
|
+
|
|
88
|
+
workflow_cls = workflow_registry(workflow_def["module"], workflow_def["name"])
|
|
89
|
+
workflow = workflow_cls()
|
|
90
|
+
steps = workflow._discover_workflow_steps()
|
|
91
|
+
|
|
92
|
+
try:
|
|
93
|
+
for step in steps:
|
|
94
|
+
step_fn = getattr(workflow, step["fn"])
|
|
95
|
+
await step_fn(ctx)
|
|
96
|
+
|
|
97
|
+
except StopReplay:
|
|
98
|
+
last = ctx.last_emitted_event_type()
|
|
99
|
+
if last in ("STATE_SET", "STATE_UPDATE"):
|
|
100
|
+
async with Database[InputT, StateT]() as db:
|
|
101
|
+
await db.rotate_workflow_driver(workflow_id)
|
|
102
|
+
return
|
|
103
|
+
|
|
104
|
+
async with Database[InputT, StateT]() as db:
|
|
105
|
+
await db.create_event(workflow_id, "WORKFLOW_COMPLETED", {})
|
|
106
|
+
await db.complete_running_step(workflow_id)
|
src/core/handle.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import Any, Dict, Generic, Iterable, List
|
|
3
|
+
|
|
4
|
+
from ..common.errors import (
|
|
5
|
+
WorkerCancelledError,
|
|
6
|
+
WorkflowExecutionError,
|
|
7
|
+
WorkflowStillRunningError,
|
|
8
|
+
)
|
|
9
|
+
from ..database.db import Database
|
|
10
|
+
from ..schemas.events import (
|
|
11
|
+
ActivityFailurePayload,
|
|
12
|
+
Event,
|
|
13
|
+
ExtractedError,
|
|
14
|
+
WorkflowFailurePayload,
|
|
15
|
+
)
|
|
16
|
+
from ..schemas.workflow import InputT, StateT, WorkflowInfo
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class WorkflowHandle(Generic[InputT, StateT]):
|
|
20
|
+
"""
|
|
21
|
+
Handle for managing workflows.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
id: str
|
|
25
|
+
|
|
26
|
+
def __init__(self, id: str) -> None:
|
|
27
|
+
self.id = id
|
|
28
|
+
|
|
29
|
+
async def info(self) -> WorkflowInfo:
|
|
30
|
+
async with Database[InputT, StateT]() as db:
|
|
31
|
+
row = await db.get_workflow_info(self.id)
|
|
32
|
+
|
|
33
|
+
return WorkflowInfo(
|
|
34
|
+
id=row["id"],
|
|
35
|
+
name=row["name"],
|
|
36
|
+
status=row["status"],
|
|
37
|
+
module=row["module"],
|
|
38
|
+
created_at=datetime.fromisoformat(row["created_at"]),
|
|
39
|
+
updated_at=datetime.fromisoformat(row["updated_at"]),
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
async def status(self) -> str:
|
|
43
|
+
"""Get workflow status efficiently without fetching all info."""
|
|
44
|
+
async with Database[InputT, StateT]() as db:
|
|
45
|
+
return await db.get_workflow_status(self.id)
|
|
46
|
+
|
|
47
|
+
async def result(self) -> StateT:
|
|
48
|
+
# Check status first to avoid unnecessary work
|
|
49
|
+
async with Database[InputT, StateT]() as db:
|
|
50
|
+
status = await db.get_workflow_status(self.id)
|
|
51
|
+
|
|
52
|
+
if status == "RUNNING":
|
|
53
|
+
raise WorkflowStillRunningError(
|
|
54
|
+
"Workflow is still running; result is not available."
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
async with Database[InputT, StateT]() as db:
|
|
58
|
+
events = await db.get_workflow_events(self.id)
|
|
59
|
+
|
|
60
|
+
state = self._replay_state(events)
|
|
61
|
+
|
|
62
|
+
if status == "FAILED":
|
|
63
|
+
error = self._extract_error(events)
|
|
64
|
+
raise WorkflowExecutionError(error)
|
|
65
|
+
if status == "CANCELED":
|
|
66
|
+
raise WorkerCancelledError("Workflow was canceled; no result is available.")
|
|
67
|
+
|
|
68
|
+
return state
|
|
69
|
+
|
|
70
|
+
async def signal(self, name: str, payload: Dict[str, Any]) -> None:
|
|
71
|
+
# Validate signal name and payload.
|
|
72
|
+
if not name:
|
|
73
|
+
raise ValueError("Signal name must be a non-empty string.")
|
|
74
|
+
if not isinstance(payload, dict):
|
|
75
|
+
raise ValueError("Signal payload must be a dictionary.")
|
|
76
|
+
|
|
77
|
+
async with Database[InputT, StateT]() as db:
|
|
78
|
+
await db.create_signal_event(self.id, name, payload)
|
|
79
|
+
|
|
80
|
+
def _replay_state(self, events: List[Event]) -> StateT:
|
|
81
|
+
"""Replay workflow state from events with optimized processing."""
|
|
82
|
+
# Initialize as dict since we don't know the concrete StateT type at runtime
|
|
83
|
+
state_dict = {}
|
|
84
|
+
|
|
85
|
+
for event in events:
|
|
86
|
+
event_type = event["type"]
|
|
87
|
+
|
|
88
|
+
if event_type == "STATE_SET":
|
|
89
|
+
try:
|
|
90
|
+
payload = event["payload"]
|
|
91
|
+
state_dict[payload["key"]] = payload["value"]
|
|
92
|
+
except (KeyError, TypeError):
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
elif event_type == "STATE_UPDATE":
|
|
96
|
+
try:
|
|
97
|
+
payload = event["payload"]
|
|
98
|
+
state_dict.update(payload)
|
|
99
|
+
except TypeError:
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
return state_dict # type: ignore
|
|
103
|
+
|
|
104
|
+
def _extract_error(self, events: Iterable[Event]) -> ExtractedError:
|
|
105
|
+
"""
|
|
106
|
+
Extract the most relevant failure from workflow events.
|
|
107
|
+
|
|
108
|
+
Rules:
|
|
109
|
+
- Prefer WORKFLOW_FAILED over ACTIVITY_FAILED
|
|
110
|
+
- Use the last failure event
|
|
111
|
+
- Never raise from this method
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
last_workflow_failure: WorkflowFailurePayload | None = None
|
|
115
|
+
last_activity_failure: ActivityFailurePayload | None = None
|
|
116
|
+
|
|
117
|
+
for event in events:
|
|
118
|
+
etype = event["type"]
|
|
119
|
+
payload = event.get("payload", {})
|
|
120
|
+
|
|
121
|
+
if etype == "WORKFLOW_FAILED":
|
|
122
|
+
last_workflow_failure = payload # type: ignore
|
|
123
|
+
|
|
124
|
+
elif etype == "ACTIVITY_FAILED":
|
|
125
|
+
last_activity_failure = payload # type: ignore
|
|
126
|
+
|
|
127
|
+
if last_workflow_failure:
|
|
128
|
+
return {
|
|
129
|
+
"source": "WORKFLOW",
|
|
130
|
+
"message": last_workflow_failure.get("error", "Workflow failed"),
|
|
131
|
+
"step": last_workflow_failure.get("step"),
|
|
132
|
+
"details": dict(last_workflow_failure),
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
if last_activity_failure:
|
|
136
|
+
return {
|
|
137
|
+
"source": "ACTIVITY",
|
|
138
|
+
"message": last_activity_failure.get("error", "Activity failed"),
|
|
139
|
+
"activity": last_activity_failure.get("activity"),
|
|
140
|
+
"details": dict(last_activity_failure),
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
# Defensive fallback
|
|
144
|
+
return {
|
|
145
|
+
"source": "WORKFLOW",
|
|
146
|
+
"message": "Workflow failed for unknown reasons",
|
|
147
|
+
"details": {},
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
@classmethod
|
|
151
|
+
def with_id(cls, id: str) -> "WorkflowHandle[InputT, StateT]":
|
|
152
|
+
"""
|
|
153
|
+
Create a new WorkflowHandle with the specified ID.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
id: The workflow ID to associate with the handle.
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
A new WorkflowHandle instance with the given ID.
|
|
160
|
+
|
|
161
|
+
Example:
|
|
162
|
+
```python
|
|
163
|
+
handle = WorkflowHandle.with_id("workflow-1234")
|
|
164
|
+
```
|
|
165
|
+
"""
|
|
166
|
+
return cls(id)
|
src/core/logger.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from ..database.db import Database
|
|
6
|
+
|
|
7
|
+
# Configure logging with rich formatting if available
|
|
8
|
+
try:
|
|
9
|
+
from rich.logging import RichHandler
|
|
10
|
+
|
|
11
|
+
logging.basicConfig(
|
|
12
|
+
level=logging.INFO,
|
|
13
|
+
format="%(message)s",
|
|
14
|
+
handlers=[RichHandler(rich_tracebacks=True, show_time=True, show_path=False)],
|
|
15
|
+
)
|
|
16
|
+
except ImportError:
|
|
17
|
+
# Fallback to standard logging
|
|
18
|
+
logging.basicConfig(
|
|
19
|
+
level=logging.INFO,
|
|
20
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class WorkflowLogger:
|
|
25
|
+
def __init__(self, ctx: Any):
|
|
26
|
+
self._ctx = ctx
|
|
27
|
+
self._std_logger = logging.getLogger("workflow")
|
|
28
|
+
self._std_logger.setLevel(logging.DEBUG)
|
|
29
|
+
|
|
30
|
+
def info(self, msg: str):
|
|
31
|
+
self._log("INFO", msg)
|
|
32
|
+
|
|
33
|
+
def error(self, msg: str):
|
|
34
|
+
self._log("ERROR", msg)
|
|
35
|
+
|
|
36
|
+
def warning(self, msg: str):
|
|
37
|
+
self._log("WARNING", msg)
|
|
38
|
+
|
|
39
|
+
def debug(self, msg: str):
|
|
40
|
+
self._log("DEBUG", msg)
|
|
41
|
+
|
|
42
|
+
def _log(self, level: str, msg: str):
|
|
43
|
+
if self._ctx.is_replaying:
|
|
44
|
+
return
|
|
45
|
+
|
|
46
|
+
self._std_logger.info(f"{msg}")
|
|
47
|
+
asyncio.create_task(self._write_log_to_db(level, msg))
|
|
48
|
+
|
|
49
|
+
async def _write_log_to_db(self, level: str, msg: str):
|
|
50
|
+
# This is a "best effort" write. If it fails, workflow proceeds.
|
|
51
|
+
try:
|
|
52
|
+
async with Database[Any, Any]() as db:
|
|
53
|
+
await db.create_log(
|
|
54
|
+
workflow_id=self._ctx.id,
|
|
55
|
+
level=level,
|
|
56
|
+
message=msg,
|
|
57
|
+
)
|
|
58
|
+
except Exception:
|
|
59
|
+
# Never crash the workflow because logging failed
|
|
60
|
+
pass
|
src/core/runner.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
import traceback
|
|
3
|
+
|
|
4
|
+
from ..common.errors import StopReplay
|
|
5
|
+
from ..database.db import Database
|
|
6
|
+
from .engine import Engine
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
async def run_once() -> bool:
|
|
10
|
+
"""Execute a single task from the queue.
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
bool: True if a task was executed, False if no tasks available
|
|
14
|
+
"""
|
|
15
|
+
db: Database = Database()
|
|
16
|
+
async with db:
|
|
17
|
+
task = await db.claim_task()
|
|
18
|
+
if not task:
|
|
19
|
+
return False
|
|
20
|
+
|
|
21
|
+
workflow_id = task["workflow_id"]
|
|
22
|
+
is_completed = await db.workflow_is_completed(workflow_id)
|
|
23
|
+
if is_completed:
|
|
24
|
+
await db.task_completed(task["id"])
|
|
25
|
+
return True
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
if task["kind"] == "STEP":
|
|
29
|
+
await Engine.replay_until_block(workflow_id)
|
|
30
|
+
elif task["kind"] == "ACTIVITY":
|
|
31
|
+
await Engine.replay_activity(task)
|
|
32
|
+
elif task["kind"] == "TIMER":
|
|
33
|
+
now = datetime.datetime.now(datetime.timezone.utc)
|
|
34
|
+
run_at = datetime.datetime.fromisoformat(task["run_at"])
|
|
35
|
+
if now < run_at:
|
|
36
|
+
await db.release_task(task["id"])
|
|
37
|
+
return True
|
|
38
|
+
|
|
39
|
+
await db.create_event(
|
|
40
|
+
workflow_id=workflow_id,
|
|
41
|
+
type="TIMER_FIRED",
|
|
42
|
+
payload={},
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
await db.rotate_workflow_driver(task["workflow_id"])
|
|
46
|
+
await db.task_completed(task["id"])
|
|
47
|
+
return True
|
|
48
|
+
except StopReplay:
|
|
49
|
+
return True
|
|
50
|
+
except Exception as e:
|
|
51
|
+
traceback.print_exc()
|
|
52
|
+
await db.task_failed(task["id"], str(e))
|
|
53
|
+
return True
|