aury-agent 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. aury/__init__.py +2 -0
  2. aury/agents/__init__.py +55 -0
  3. aury/agents/a2a/__init__.py +168 -0
  4. aury/agents/backends/__init__.py +196 -0
  5. aury/agents/backends/artifact/__init__.py +9 -0
  6. aury/agents/backends/artifact/memory.py +130 -0
  7. aury/agents/backends/artifact/types.py +133 -0
  8. aury/agents/backends/code/__init__.py +65 -0
  9. aury/agents/backends/file/__init__.py +11 -0
  10. aury/agents/backends/file/local.py +66 -0
  11. aury/agents/backends/file/types.py +40 -0
  12. aury/agents/backends/invocation/__init__.py +8 -0
  13. aury/agents/backends/invocation/memory.py +81 -0
  14. aury/agents/backends/invocation/types.py +110 -0
  15. aury/agents/backends/memory/__init__.py +8 -0
  16. aury/agents/backends/memory/memory.py +179 -0
  17. aury/agents/backends/memory/types.py +136 -0
  18. aury/agents/backends/message/__init__.py +9 -0
  19. aury/agents/backends/message/memory.py +122 -0
  20. aury/agents/backends/message/types.py +124 -0
  21. aury/agents/backends/sandbox.py +275 -0
  22. aury/agents/backends/session/__init__.py +8 -0
  23. aury/agents/backends/session/memory.py +93 -0
  24. aury/agents/backends/session/types.py +124 -0
  25. aury/agents/backends/shell/__init__.py +11 -0
  26. aury/agents/backends/shell/local.py +110 -0
  27. aury/agents/backends/shell/types.py +55 -0
  28. aury/agents/backends/shell.py +209 -0
  29. aury/agents/backends/snapshot/__init__.py +19 -0
  30. aury/agents/backends/snapshot/git.py +95 -0
  31. aury/agents/backends/snapshot/hybrid.py +125 -0
  32. aury/agents/backends/snapshot/memory.py +86 -0
  33. aury/agents/backends/snapshot/types.py +59 -0
  34. aury/agents/backends/state/__init__.py +29 -0
  35. aury/agents/backends/state/composite.py +49 -0
  36. aury/agents/backends/state/file.py +57 -0
  37. aury/agents/backends/state/memory.py +52 -0
  38. aury/agents/backends/state/sqlite.py +262 -0
  39. aury/agents/backends/state/types.py +178 -0
  40. aury/agents/backends/subagent/__init__.py +165 -0
  41. aury/agents/cli/__init__.py +41 -0
  42. aury/agents/cli/chat.py +239 -0
  43. aury/agents/cli/config.py +236 -0
  44. aury/agents/cli/extensions.py +460 -0
  45. aury/agents/cli/main.py +189 -0
  46. aury/agents/cli/session.py +337 -0
  47. aury/agents/cli/workflow.py +276 -0
  48. aury/agents/context_providers/__init__.py +66 -0
  49. aury/agents/context_providers/artifact.py +299 -0
  50. aury/agents/context_providers/base.py +177 -0
  51. aury/agents/context_providers/memory.py +70 -0
  52. aury/agents/context_providers/message.py +130 -0
  53. aury/agents/context_providers/skill.py +50 -0
  54. aury/agents/context_providers/subagent.py +46 -0
  55. aury/agents/context_providers/tool.py +68 -0
  56. aury/agents/core/__init__.py +83 -0
  57. aury/agents/core/base.py +573 -0
  58. aury/agents/core/context.py +797 -0
  59. aury/agents/core/context_builder.py +303 -0
  60. aury/agents/core/event_bus/__init__.py +15 -0
  61. aury/agents/core/event_bus/bus.py +203 -0
  62. aury/agents/core/factory.py +169 -0
  63. aury/agents/core/isolator.py +97 -0
  64. aury/agents/core/logging.py +95 -0
  65. aury/agents/core/parallel.py +194 -0
  66. aury/agents/core/runner.py +139 -0
  67. aury/agents/core/services/__init__.py +5 -0
  68. aury/agents/core/services/file_session.py +144 -0
  69. aury/agents/core/services/message.py +53 -0
  70. aury/agents/core/services/session.py +53 -0
  71. aury/agents/core/signals.py +109 -0
  72. aury/agents/core/state.py +363 -0
  73. aury/agents/core/types/__init__.py +107 -0
  74. aury/agents/core/types/action.py +176 -0
  75. aury/agents/core/types/artifact.py +135 -0
  76. aury/agents/core/types/block.py +736 -0
  77. aury/agents/core/types/message.py +350 -0
  78. aury/agents/core/types/recall.py +144 -0
  79. aury/agents/core/types/session.py +257 -0
  80. aury/agents/core/types/subagent.py +154 -0
  81. aury/agents/core/types/tool.py +205 -0
  82. aury/agents/eval/__init__.py +331 -0
  83. aury/agents/hitl/__init__.py +57 -0
  84. aury/agents/hitl/ask_user.py +242 -0
  85. aury/agents/hitl/compaction.py +230 -0
  86. aury/agents/hitl/exceptions.py +87 -0
  87. aury/agents/hitl/permission.py +617 -0
  88. aury/agents/hitl/revert.py +216 -0
  89. aury/agents/llm/__init__.py +31 -0
  90. aury/agents/llm/adapter.py +367 -0
  91. aury/agents/llm/openai.py +294 -0
  92. aury/agents/llm/provider.py +476 -0
  93. aury/agents/mcp/__init__.py +153 -0
  94. aury/agents/memory/__init__.py +46 -0
  95. aury/agents/memory/compaction.py +394 -0
  96. aury/agents/memory/manager.py +465 -0
  97. aury/agents/memory/processor.py +177 -0
  98. aury/agents/memory/store.py +187 -0
  99. aury/agents/memory/types.py +137 -0
  100. aury/agents/messages/__init__.py +40 -0
  101. aury/agents/messages/config.py +47 -0
  102. aury/agents/messages/raw_store.py +224 -0
  103. aury/agents/messages/store.py +118 -0
  104. aury/agents/messages/types.py +88 -0
  105. aury/agents/middleware/__init__.py +31 -0
  106. aury/agents/middleware/base.py +341 -0
  107. aury/agents/middleware/chain.py +342 -0
  108. aury/agents/middleware/message.py +129 -0
  109. aury/agents/middleware/message_container.py +126 -0
  110. aury/agents/middleware/raw_message.py +153 -0
  111. aury/agents/middleware/truncation.py +139 -0
  112. aury/agents/middleware/types.py +81 -0
  113. aury/agents/plugin.py +162 -0
  114. aury/agents/react/__init__.py +4 -0
  115. aury/agents/react/agent.py +1923 -0
  116. aury/agents/sandbox/__init__.py +23 -0
  117. aury/agents/sandbox/local.py +239 -0
  118. aury/agents/sandbox/remote.py +200 -0
  119. aury/agents/sandbox/types.py +115 -0
  120. aury/agents/skill/__init__.py +16 -0
  121. aury/agents/skill/loader.py +180 -0
  122. aury/agents/skill/types.py +83 -0
  123. aury/agents/tool/__init__.py +39 -0
  124. aury/agents/tool/builtin/__init__.py +23 -0
  125. aury/agents/tool/builtin/ask_user.py +155 -0
  126. aury/agents/tool/builtin/bash.py +107 -0
  127. aury/agents/tool/builtin/delegate.py +726 -0
  128. aury/agents/tool/builtin/edit.py +121 -0
  129. aury/agents/tool/builtin/plan.py +277 -0
  130. aury/agents/tool/builtin/read.py +91 -0
  131. aury/agents/tool/builtin/thinking.py +111 -0
  132. aury/agents/tool/builtin/yield_result.py +130 -0
  133. aury/agents/tool/decorator.py +252 -0
  134. aury/agents/tool/set.py +204 -0
  135. aury/agents/usage/__init__.py +12 -0
  136. aury/agents/usage/tracker.py +236 -0
  137. aury/agents/workflow/__init__.py +85 -0
  138. aury/agents/workflow/adapter.py +268 -0
  139. aury/agents/workflow/dag.py +116 -0
  140. aury/agents/workflow/dsl.py +575 -0
  141. aury/agents/workflow/executor.py +659 -0
  142. aury/agents/workflow/expression.py +136 -0
  143. aury/agents/workflow/parser.py +182 -0
  144. aury/agents/workflow/state.py +145 -0
  145. aury/agents/workflow/types.py +86 -0
  146. aury_agent-0.0.4.dist-info/METADATA +90 -0
  147. aury_agent-0.0.4.dist-info/RECORD +149 -0
  148. aury_agent-0.0.4.dist-info/WHEEL +4 -0
  149. aury_agent-0.0.4.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,659 @@
1
+ """Workflow executor with middleware support and lifecycle hooks."""
2
+ from __future__ import annotations
3
+
4
+ import asyncio
5
+ import contextvars
6
+ import time
7
+ from typing import Any, AsyncIterator, TYPE_CHECKING
8
+
9
+ from ..core.logging import workflow_logger as logger
10
+ from ..core.context import InvocationContext, set_parent_id, reset_parent_id
11
+ from ..core.event_bus import Events
12
+ from ..core.types.block import BlockEvent, BlockKind, BlockOp
13
+ from ..core.types.session import generate_id
14
+ from ..core.signals import SuspendSignal, HITLSuspend
15
+ from ..middleware import HookAction
16
+ from .types import NodeType, NodeSpec, Workflow
17
+ from .expression import ExpressionEvaluator
18
+ from .state import WorkflowState, get_merge_strategy
19
+ from .dag import DAGExecutor
20
+ from ..core.factory import AgentFactory
21
+
22
+ if TYPE_CHECKING:
23
+ from ..middleware import MiddlewareChain
24
+
25
+
26
+ class WorkflowExecutor:
27
+ """Workflow executor with middleware hooks.
28
+
29
+ Middleware priority:
30
+ 1. Node-level middleware (from NodeSpec.middleware)
31
+ 2. Workflow-level middleware (from WorkflowSpec.middleware)
32
+ 3. Context middleware (from InvocationContext.middleware)
33
+
34
+ Calls middleware hooks:
35
+ - on_subagent_start/end: when executing agent nodes
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ workflow: Workflow,
41
+ agent_factory: AgentFactory,
42
+ ctx: InvocationContext,
43
+ middleware: "MiddlewareChain | None" = None,
44
+ ):
45
+ self.workflow = workflow
46
+ self.agent_factory = agent_factory
47
+ self.ctx = ctx
48
+ # Priority: explicit > workflow spec > context
49
+ self.middleware = middleware or workflow.spec.middleware or ctx.middleware
50
+ self.evaluator = ExpressionEvaluator()
51
+
52
+ self._state = WorkflowState()
53
+ self._paused = False
54
+ self._waiting_for_input = False
55
+ self._suspended = False
56
+ self._suspended_node_id: str | None = None
57
+ self._pending_request: dict | None = None
58
+ self._start_time: float | None = None
59
+ self._node_usage: dict[str, dict] = {} # Track per-node usage
60
+
61
+ async def execute(
62
+ self,
63
+ inputs: dict[str, Any],
64
+ resume_state: dict[str, Any] | None = None,
65
+ ) -> dict[str, Any]:
66
+ """Execute workflow.
67
+
68
+ Args:
69
+ inputs: Workflow inputs
70
+ resume_state: State to resume from (for pause/resume)
71
+
72
+ Returns:
73
+ Final workflow state/result
74
+ """
75
+ self._start_time = time.time()
76
+
77
+ # Emit START block
78
+ await self.ctx.emit(BlockEvent(
79
+ block_id=generate_id("blk"),
80
+ kind=BlockKind.START,
81
+ op=BlockOp.APPLY,
82
+ data={
83
+ "workflow": self.workflow.spec.name,
84
+ "inputs": inputs,
85
+ },
86
+ ))
87
+
88
+ # Publish start event via Bus
89
+ await self.ctx.bus.publish(Events.INVOCATION_START, {
90
+ "invocation_id": self.ctx.invocation_id,
91
+ "session_id": self.ctx.session_id,
92
+ "workflow": self.workflow.spec.name,
93
+ })
94
+
95
+ # Resume from saved state if provided
96
+ if resume_state:
97
+ self._state = WorkflowState.from_dict(resume_state.get("workflow_state", {}))
98
+ completed_nodes = set(resume_state.get("completed_nodes", []))
99
+ else:
100
+ completed_nodes = set()
101
+
102
+ logger.info(
103
+ "Starting workflow execution",
104
+ extra={
105
+ "workflow": self.workflow.spec.name,
106
+ "session_id": self.ctx.session_id,
107
+ "invocation_id": self.ctx.invocation_id,
108
+ }
109
+ )
110
+
111
+ eval_context = {
112
+ "inputs": inputs,
113
+ "state": self._state,
114
+ }
115
+
116
+ dag = DAGExecutor(
117
+ tasks=self.workflow.spec.nodes,
118
+ get_task_id=lambda n: n.id,
119
+ get_dependencies=lambda n: self.workflow.incoming_edges[n.id],
120
+ )
121
+
122
+ # Mark already completed nodes (for resume)
123
+ for node_id in completed_nodes:
124
+ dag.mark_completed(node_id)
125
+
126
+ while not dag.is_finished() and not self.ctx.is_aborted and not self._paused and not self._waiting_for_input and not self._suspended:
127
+ ready_nodes = dag.get_ready_tasks()
128
+
129
+ if not ready_nodes:
130
+ # Check if we're blocked due to failed dependencies
131
+ if dag.is_blocked():
132
+ logger.warning(
133
+ "Workflow blocked due to failed dependencies",
134
+ extra={"workflow": self.workflow.spec.name, "failed": list(dag.failed)}
135
+ )
136
+ # Mark remaining blocked nodes as skipped
137
+ processed = dag.completed | dag.failed | dag.running | dag.skipped
138
+ for node in self.workflow.spec.nodes:
139
+ if node.id not in processed:
140
+ dag.mark_skipped(node.id)
141
+ break
142
+ await asyncio.sleep(0.05)
143
+ continue
144
+
145
+ tasks = []
146
+ # Copy current context to ensure ContextVars are inherited by child tasks
147
+ ctx = contextvars.copy_context()
148
+
149
+ for node in ready_nodes:
150
+ # Check condition
151
+ if node.when:
152
+ if not self.evaluator.evaluate_condition(node.when, eval_context):
153
+ dag.mark_skipped(node.id)
154
+ continue
155
+
156
+ dag.mark_running(node.id)
157
+ # Create task with explicit context to preserve ContextVars (emit_queue, parent_id)
158
+ task = asyncio.create_task(
159
+ self._execute_node_with_context(node, eval_context, dag),
160
+ context=ctx,
161
+ )
162
+ tasks.append(task)
163
+
164
+ if tasks:
165
+ await asyncio.gather(*tasks, return_exceptions=True)
166
+
167
+ # Persist state periodically
168
+ await self._persist_state(dag, inputs)
169
+
170
+ # Persist final state (including suspended state)
171
+ await self._persist_state(dag, inputs)
172
+
173
+ # Publish end event
174
+ status = dag.get_status()
175
+
176
+ # Calculate workflow duration
177
+ duration_ms = int((time.time() - self._start_time) * 1000) if self._start_time else 0
178
+
179
+ # Summarize usage
180
+ usage_summary = None
181
+ if self.ctx.usage:
182
+ usage_summary = self.ctx.usage.summarize(
183
+ session_id=self.ctx.session_id,
184
+ invocation_id=self.ctx.invocation_id,
185
+ )
186
+
187
+ final_data = {
188
+ "state": self._state.to_dict(),
189
+ "status": status,
190
+ "duration_ms": duration_ms,
191
+ "usage": usage_summary,
192
+ "node_usage": self._node_usage,
193
+ }
194
+
195
+ # Determine final status
196
+ if self._suspended:
197
+ final_status = "suspended"
198
+ elif self._waiting_for_input:
199
+ final_status = "paused"
200
+ else:
201
+ final_status = "completed"
202
+
203
+ # Emit END block with output (as sibling of start, both are roots)
204
+ await self.ctx.emit(BlockEvent(
205
+ block_id=generate_id("blk"),
206
+ kind=BlockKind.END,
207
+ op=BlockOp.APPLY,
208
+ data={
209
+ "status": final_status,
210
+ "output": self._state.to_dict(),
211
+ "duration_ms": duration_ms,
212
+ "usage": usage_summary,
213
+ },
214
+ ))
215
+
216
+ # Publish end event via Bus (includes usage summary)
217
+ await self.ctx.bus.publish(Events.INVOCATION_END, {
218
+ "invocation_id": self.ctx.invocation_id,
219
+ "session_id": self.ctx.session_id,
220
+ "status": final_status,
221
+ "usage": usage_summary,
222
+ })
223
+
224
+ logger.info(
225
+ "Workflow execution completed",
226
+ extra={
227
+ "workflow": self.workflow.spec.name,
228
+ "completed": status["completed"],
229
+ "failed": status["failed"],
230
+ "duration_ms": duration_ms,
231
+ "total_tokens": usage_summary.get("total_tokens") if usage_summary else 0,
232
+ }
233
+ )
234
+
235
+ return final_data
236
+
237
+ async def _persist_state(self, dag: DAGExecutor, inputs: dict[str, Any] | None = None) -> None:
238
+ """Persist workflow state for recovery.
239
+
240
+ Stores execution state including:
241
+ - workflow_state: WorkflowState output values
242
+ - completed_nodes: Nodes that finished execution
243
+ - current_node: Node where suspension occurred (if suspended)
244
+ - pending_request: HITL request details (if suspended)
245
+ - inputs: Original workflow inputs
246
+ """
247
+ state_key = f"workflow_state:{self.ctx.invocation_id}"
248
+ state_data = {
249
+ "workflow_state": self._state.to_dict(),
250
+ "completed_nodes": list(dag.get_status().get("completed_ids", [])),
251
+ "inputs": inputs or {},
252
+ "waiting_for_input": self._waiting_for_input,
253
+ "suspended": self._suspended,
254
+ }
255
+
256
+ # Add suspension-specific data
257
+ if self._suspended:
258
+ state_data["current_node"] = self._suspended_node_id
259
+ if self._pending_request:
260
+ state_data["pending_request"] = self._pending_request
261
+
262
+ if self.ctx.backends and self.ctx.backends.state:
263
+ await self.ctx.backends.state.set("workflow", state_key, state_data)
264
+
265
+ def pause(self) -> None:
266
+ """Pause execution."""
267
+ self._paused = True
268
+
269
+ async def _execute_node_with_context(
270
+ self,
271
+ node: NodeSpec,
272
+ eval_context: dict[str, Any],
273
+ dag: DAGExecutor,
274
+ ) -> None:
275
+ """Execute node directly without context copying.
276
+
277
+ Previously used context copying + nested task creation,
278
+ but this caused ContextVar issues. Now we execute directly
279
+ since set_parent_id is called within _run_single_agent.
280
+ """
281
+ await self._execute_node(node, eval_context, dag)
282
+
283
+ async def _execute_node(
284
+ self,
285
+ node: NodeSpec,
286
+ eval_context: dict[str, Any],
287
+ dag: DAGExecutor,
288
+ ) -> None:
289
+ """Execute single node with lifecycle hooks.
290
+
291
+ Each node execution creates a NODE block. All child agent blocks
292
+ will have this node block as their parent via set_parent_id().
293
+
294
+ Block hierarchy:
295
+ NODE block (node_id, status: running -> completed/failed)
296
+ └── [Child agent blocks, all with parent_id = node_block_id]
297
+ ├── text
298
+ ├── tool_use
299
+ └── tool_result
300
+ """
301
+ node_start_time = time.time()
302
+ node_block_id = generate_id("blk")
303
+
304
+ try:
305
+ match node.type:
306
+ case NodeType.TRIGGER:
307
+ # Start node - emit NODE block with inputs
308
+ await self.ctx.emit(BlockEvent(
309
+ block_id=node_block_id,
310
+ kind=BlockKind.NODE,
311
+ op=BlockOp.APPLY,
312
+ data={
313
+ "node_id": node.id,
314
+ "agent": "start",
315
+ "status": "running",
316
+ "inputs": eval_context.get("inputs", {}),
317
+ },
318
+ ))
319
+ # Immediately complete
320
+ await self.ctx.emit(BlockEvent(
321
+ block_id=node_block_id,
322
+ kind=BlockKind.NODE,
323
+ op=BlockOp.PATCH,
324
+ data={
325
+ "status": "completed",
326
+ "duration_ms": int((time.time() - node_start_time) * 1000),
327
+ },
328
+ ))
329
+ dag.mark_completed(node.id)
330
+
331
+ case NodeType.TERMINAL:
332
+ # End node - emit NODE block with final output
333
+ # Resolve output from node config or collect from state
334
+ output = self._state.to_dict()
335
+ if node.inputs:
336
+ output = self.evaluator.resolve_inputs(node.inputs, eval_context)
337
+
338
+ await self.ctx.emit(BlockEvent(
339
+ block_id=node_block_id,
340
+ kind=BlockKind.NODE,
341
+ op=BlockOp.APPLY,
342
+ data={
343
+ "node_id": node.id,
344
+ "agent": "end",
345
+ "status": "running",
346
+ "inputs": output,
347
+ },
348
+ ))
349
+ # Immediately complete with output
350
+ await self.ctx.emit(BlockEvent(
351
+ block_id=node_block_id,
352
+ kind=BlockKind.NODE,
353
+ op=BlockOp.PATCH,
354
+ data={
355
+ "status": "completed",
356
+ "duration_ms": int((time.time() - node_start_time) * 1000),
357
+ "output": output,
358
+ },
359
+ ))
360
+ dag.mark_completed(node.id)
361
+
362
+ case NodeType.AGENT:
363
+ # Resolve inputs
364
+ inputs = self.evaluator.resolve_inputs(node.inputs, eval_context)
365
+
366
+ # Emit NODE block with status "running"
367
+ await self.ctx.emit(BlockEvent(
368
+ block_id=node_block_id,
369
+ kind=BlockKind.NODE,
370
+ op=BlockOp.APPLY,
371
+ data={
372
+ "node_id": node.id,
373
+ "agent": node.agent,
374
+ "status": "running",
375
+ "inputs": inputs,
376
+ },
377
+ ))
378
+
379
+ # Execute agent with node_block_id as parent for child blocks
380
+ result = await self._execute_agent_node(node, eval_context, node_block_id)
381
+
382
+ # Record node duration
383
+ duration_ms = int((time.time() - node_start_time) * 1000)
384
+ self._node_usage[node.id] = {
385
+ "duration_ms": duration_ms,
386
+ "agent": node.agent,
387
+ }
388
+
389
+ # Patch NODE block with completed status
390
+ await self.ctx.emit(BlockEvent(
391
+ block_id=node_block_id,
392
+ kind=BlockKind.NODE,
393
+ op=BlockOp.PATCH,
394
+ data={
395
+ "status": "completed",
396
+ "duration_ms": duration_ms,
397
+ "output": result,
398
+ },
399
+ ))
400
+
401
+ dag.mark_completed(node.id)
402
+
403
+ case NodeType.CONDITION:
404
+ await self._execute_condition_node(node, eval_context, dag)
405
+
406
+ case _:
407
+ dag.mark_completed(node.id)
408
+
409
+ except SuspendSignal as e:
410
+ # HITL/Pause signal from child agent or tool
411
+ logger.info(
412
+ "Node suspended",
413
+ extra={
414
+ "node_id": node.id,
415
+ "signal_type": type(e).__name__,
416
+ }
417
+ )
418
+
419
+ # Patch NODE block with suspended status
420
+ if node.type == NodeType.AGENT:
421
+ await self.ctx.emit(BlockEvent(
422
+ block_id=node_block_id,
423
+ kind=BlockKind.NODE,
424
+ op=BlockOp.PATCH,
425
+ data={
426
+ "status": "suspended",
427
+ "duration_ms": int((time.time() - node_start_time) * 1000),
428
+ },
429
+ ))
430
+
431
+ # Store suspension state
432
+ self._suspended = True
433
+ self._suspended_node_id = node.id
434
+ if isinstance(e, HITLSuspend):
435
+ self._pending_request = e.to_dict()
436
+
437
+ # Don't mark as failed or completed - will resume later
438
+ # The DAG executor will stop because self._suspended is True
439
+
440
+ except Exception as e:
441
+ logger.error(
442
+ "Node execution failed",
443
+ extra={"node_id": node.id, "error": str(e)}
444
+ )
445
+
446
+ # Patch NODE block with failed status (if it was created)
447
+ if node.type == NodeType.AGENT:
448
+ await self.ctx.emit(BlockEvent(
449
+ block_id=node_block_id,
450
+ kind=BlockKind.NODE,
451
+ op=BlockOp.PATCH,
452
+ data={
453
+ "status": "failed",
454
+ "error": str(e),
455
+ "duration_ms": int((time.time() - node_start_time) * 1000),
456
+ },
457
+ ))
458
+
459
+ dag.mark_failed(node.id)
460
+
461
+ async def _execute_agent_node(
462
+ self,
463
+ node: NodeSpec,
464
+ eval_context: dict[str, Any],
465
+ parent_block_id: str,
466
+ ) -> Any:
467
+ """Execute agent node and return result.
468
+
469
+ Args:
470
+ node: Node specification
471
+ eval_context: Evaluation context with inputs and state
472
+ parent_block_id: Block ID to use as parent for all child blocks
473
+ """
474
+ if "foreach" in node.config:
475
+ items = self.evaluator.evaluate(node.config["foreach"], eval_context)
476
+ item_var = node.config.get("as", "item")
477
+ merge_strategy = node.config.get("merge", "collect_list")
478
+
479
+ results = []
480
+ for item in items:
481
+ branch_state = self._state.create_branch()
482
+ branch_context = {
483
+ **eval_context,
484
+ item_var: item,
485
+ "state": branch_state,
486
+ }
487
+
488
+ result = await self._run_single_agent(node, branch_context, parent_block_id)
489
+ results.append(result)
490
+
491
+ strategy = get_merge_strategy(merge_strategy)
492
+ merged = strategy.merge(results)
493
+ if node.output:
494
+ self._state[node.output] = merged
495
+ return merged
496
+ else:
497
+ result = await self._run_single_agent(node, eval_context, parent_block_id)
498
+ if node.output:
499
+ self._state[node.output] = result
500
+ return result
501
+
502
+ def _get_effective_middleware(
503
+ self,
504
+ node: NodeSpec,
505
+ ) -> "MiddlewareChain | None":
506
+ """Get effective middleware for a node.
507
+
508
+ Merges node-level middleware with workflow/context middleware.
509
+ Node middleware takes precedence (runs first).
510
+ """
511
+ from ..middleware import MiddlewareChain
512
+
513
+ # Start with workflow/context middleware
514
+ base_middleware = self.middleware
515
+
516
+ # If node has its own middleware, create merged chain
517
+ if node.middleware:
518
+ merged = MiddlewareChain()
519
+
520
+ # Add node middleware first (higher priority)
521
+ for mw in node.middleware:
522
+ merged.use(mw)
523
+
524
+ # Add base middleware (lower priority)
525
+ if base_middleware:
526
+ for mw in base_middleware.middlewares:
527
+ merged.use(mw)
528
+
529
+ return merged
530
+
531
+ return base_middleware
532
+
533
+ async def _run_single_agent(
534
+ self,
535
+ node: NodeSpec,
536
+ eval_context: dict[str, Any],
537
+ parent_block_id: str,
538
+ ) -> Any:
539
+ """Execute single agent with middleware hooks.
540
+
541
+ Sub-agent's emit calls go to the same ContextVar queue,
542
+ so they automatically flow to the parent's run() yield.
543
+
544
+ The parent_block_id is set via ContextVar so all child blocks
545
+ automatically inherit it as their parent_id.
546
+
547
+ Args:
548
+ node: Node specification
549
+ eval_context: Evaluation context
550
+ parent_block_id: Block ID for parent-child nesting
551
+ """
552
+ inputs = self.evaluator.resolve_inputs(node.inputs, eval_context)
553
+
554
+ # Get effective middleware for this node
555
+ effective_middleware = self._get_effective_middleware(node)
556
+
557
+ # Build middleware context
558
+ mw_context = {
559
+ "session_id": self.ctx.session_id,
560
+ "invocation_id": self.ctx.invocation_id,
561
+ "parent_agent_id": self.workflow.spec.name,
562
+ "child_agent_id": node.agent,
563
+ "node_id": node.id,
564
+ "parent_block_id": parent_block_id,
565
+ "has_node_middleware": bool(node.middleware),
566
+ }
567
+
568
+ # === Middleware: on_subagent_start ===
569
+ if effective_middleware:
570
+ hook_result = await effective_middleware.process_subagent_start(
571
+ self.workflow.spec.name,
572
+ node.agent,
573
+ "embedded", # Workflow nodes are embedded execution
574
+ mw_context,
575
+ )
576
+ if hook_result.action == HookAction.SKIP:
577
+ logger.info(f"SubAgent {node.agent} skipped by middleware")
578
+ return {"skipped": True, "message": hook_result.message}
579
+
580
+ # Set parent_block_id via ContextVar so all child blocks inherit it
581
+ # This is the key mechanism for block nesting
582
+ token = set_parent_id(parent_block_id)
583
+
584
+ try:
585
+ # Create child context for sub-agent with effective middleware
586
+ # Note: parent_block_id is already set via ContextVar above
587
+ child_ctx = self.ctx.create_child(
588
+ agent_id=node.agent,
589
+ middleware=effective_middleware,
590
+ )
591
+
592
+ agent = self.agent_factory.create(
593
+ agent_type=node.agent,
594
+ ctx=child_ctx,
595
+ )
596
+
597
+ # Run agent and fully consume the generator
598
+ # Must consume completely to avoid ContextVar issues
599
+ result = None
600
+ try:
601
+ async for response in agent.run(inputs):
602
+ # Check for result in response
603
+ if hasattr(response, 'type') and response.type == "session_end" and response.data:
604
+ result = response.data.get("result")
605
+ except GeneratorExit:
606
+ pass # Generator was closed early, that's ok
607
+
608
+ # Check for result stored on agent instance (WorkflowNodeAgent pattern)
609
+ # Prefer _outputs dict (typed outputs), fallback to _result (legacy)
610
+ if hasattr(agent, '_outputs') and agent._outputs:
611
+ result = agent._outputs
612
+ elif result is None and hasattr(agent, '_result'):
613
+ result = agent._result
614
+
615
+ # === Middleware: on_subagent_end ===
616
+ if effective_middleware:
617
+ await effective_middleware.process_subagent_end(
618
+ self.workflow.spec.name,
619
+ node.agent,
620
+ result,
621
+ mw_context,
622
+ )
623
+
624
+ return result
625
+
626
+ finally:
627
+ # Always reset parent_id to previous value
628
+ reset_parent_id(token)
629
+
630
+ async def _execute_condition_node(
631
+ self,
632
+ node: NodeSpec,
633
+ eval_context: dict[str, Any],
634
+ dag: DAGExecutor,
635
+ ) -> None:
636
+ """Execute condition node."""
637
+ condition_result = self.evaluator.evaluate_condition(
638
+ node.expression, eval_context
639
+ )
640
+
641
+ if condition_result:
642
+ # then branch - mark else branch as skipped
643
+ if node.else_node:
644
+ dag.mark_skipped(node.else_node)
645
+ else:
646
+ # else branch - mark then branch as skipped
647
+ if node.then_node:
648
+ dag.mark_skipped(node.then_node)
649
+
650
+ dag.mark_completed(node.id)
651
+
652
+ def stop(self) -> None:
653
+ """Stop execution."""
654
+ self.ctx.abort_self.set()
655
+
656
+ @property
657
+ def state(self) -> WorkflowState:
658
+ """Get current state."""
659
+ return self._state