RouteKitAI 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- routekitai/__init__.py +53 -0
- routekitai/cli/__init__.py +18 -0
- routekitai/cli/main.py +40 -0
- routekitai/cli/replay.py +80 -0
- routekitai/cli/run.py +95 -0
- routekitai/cli/serve.py +966 -0
- routekitai/cli/test_agent.py +178 -0
- routekitai/cli/trace.py +209 -0
- routekitai/cli/trace_analyze.py +120 -0
- routekitai/cli/trace_search.py +126 -0
- routekitai/core/__init__.py +58 -0
- routekitai/core/agent.py +325 -0
- routekitai/core/errors.py +49 -0
- routekitai/core/hooks.py +174 -0
- routekitai/core/memory.py +54 -0
- routekitai/core/message.py +132 -0
- routekitai/core/model.py +91 -0
- routekitai/core/policies.py +373 -0
- routekitai/core/policy.py +85 -0
- routekitai/core/policy_adapter.py +133 -0
- routekitai/core/runtime.py +1403 -0
- routekitai/core/tool.py +148 -0
- routekitai/core/tools.py +180 -0
- routekitai/evals/__init__.py +13 -0
- routekitai/evals/dataset.py +75 -0
- routekitai/evals/metrics.py +101 -0
- routekitai/evals/runner.py +184 -0
- routekitai/graphs/__init__.py +12 -0
- routekitai/graphs/executors.py +457 -0
- routekitai/graphs/graph.py +164 -0
- routekitai/memory/__init__.py +13 -0
- routekitai/memory/episodic.py +242 -0
- routekitai/memory/kv.py +34 -0
- routekitai/memory/retrieval.py +192 -0
- routekitai/memory/vector.py +700 -0
- routekitai/memory/working.py +66 -0
- routekitai/message.py +29 -0
- routekitai/model.py +48 -0
- routekitai/observability/__init__.py +21 -0
- routekitai/observability/analyzer.py +314 -0
- routekitai/observability/exporters/__init__.py +10 -0
- routekitai/observability/exporters/base.py +30 -0
- routekitai/observability/exporters/jsonl.py +81 -0
- routekitai/observability/exporters/otel.py +119 -0
- routekitai/observability/spans.py +111 -0
- routekitai/observability/streaming.py +117 -0
- routekitai/observability/trace.py +144 -0
- routekitai/providers/__init__.py +9 -0
- routekitai/providers/anthropic.py +227 -0
- routekitai/providers/azure_openai.py +243 -0
- routekitai/providers/local.py +196 -0
- routekitai/providers/openai.py +321 -0
- routekitai/py.typed +0 -0
- routekitai/sandbox/__init__.py +12 -0
- routekitai/sandbox/filesystem.py +131 -0
- routekitai/sandbox/network.py +142 -0
- routekitai/sandbox/permissions.py +70 -0
- routekitai/tool.py +33 -0
- routekitai-0.1.0.dist-info/METADATA +328 -0
- routekitai-0.1.0.dist-info/RECORD +64 -0
- routekitai-0.1.0.dist-info/WHEEL +5 -0
- routekitai-0.1.0.dist-info/entry_points.txt +2 -0
- routekitai-0.1.0.dist-info/licenses/LICENSE +21 -0
- routekitai-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Graph-based orchestration for RouteKit."""
|
|
2
|
+
|
|
3
|
+
from routekitai.graphs.executors import GraphExecutor
|
|
4
|
+
from routekitai.graphs.graph import Graph, GraphEdge, GraphNode, NodeType
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"Graph",
|
|
8
|
+
"GraphNode",
|
|
9
|
+
"GraphEdge",
|
|
10
|
+
"NodeType",
|
|
11
|
+
"GraphExecutor",
|
|
12
|
+
]
|
|
@@ -0,0 +1,457 @@
|
|
|
1
|
+
"""Graph execution engine."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
from routekitai.core.errors import RuntimeError as RouteKitRuntimeError
|
|
8
|
+
from routekitai.core.runtime import Runtime
|
|
9
|
+
from routekitai.graphs.graph import Graph, GraphNode, NodeType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class GraphExecutionState(BaseModel):
|
|
13
|
+
"""State during graph execution."""
|
|
14
|
+
|
|
15
|
+
current_node: str | None = Field(default=None, description="Current node ID")
|
|
16
|
+
visited_nodes: set[str] = Field(default_factory=set, description="Visited node IDs")
|
|
17
|
+
state: dict[str, Any] = Field(default_factory=dict, description="Graph state data")
|
|
18
|
+
execution_path: list[str] = Field(default_factory=list, description="Execution path (node IDs)")
|
|
19
|
+
completed: bool = Field(default=False, description="Whether execution is complete")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class GraphExecutor(BaseModel):
|
|
23
|
+
"""Executes graph-based agent workflows deterministically.
|
|
24
|
+
|
|
25
|
+
Graph execution is step-based and integrates with Runtime for tracing and replay.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
runtime: Runtime = Field(..., description="Runtime for agent execution")
|
|
29
|
+
graph: Graph = Field(..., description="Graph to execute")
|
|
30
|
+
max_iterations: int = Field(default=100, description="Maximum execution iterations")
|
|
31
|
+
|
|
32
|
+
async def execute(
|
|
33
|
+
self, input_data: dict[str, Any] | None = None, **kwargs: Any
|
|
34
|
+
) -> dict[str, Any]:
|
|
35
|
+
"""Execute the graph with input data.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
input_data: Initial input data for graph state
|
|
39
|
+
**kwargs: Additional execution parameters
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Execution result with final output and full state
|
|
43
|
+
|
|
44
|
+
Raises:
|
|
45
|
+
RouteKitRuntimeError: If graph execution fails
|
|
46
|
+
"""
|
|
47
|
+
# Validate graph
|
|
48
|
+
errors = self.graph.validate_structure()
|
|
49
|
+
if errors:
|
|
50
|
+
raise RouteKitRuntimeError(f"Graph validation failed: {'; '.join(errors)}")
|
|
51
|
+
|
|
52
|
+
# Check for cycles before execution
|
|
53
|
+
cycle = self._detect_cycles()
|
|
54
|
+
if cycle:
|
|
55
|
+
raise RouteKitRuntimeError(
|
|
56
|
+
f"Graph contains a cycle: {' -> '.join(cycle)}",
|
|
57
|
+
context={"graph_name": self.graph.name},
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Validate entry node exists
|
|
61
|
+
entry_node = self.graph.get_node(self.graph.entry_node)
|
|
62
|
+
if not entry_node:
|
|
63
|
+
raise RouteKitRuntimeError(
|
|
64
|
+
f"Entry node '{self.graph.entry_node}' not found in graph '{self.graph.name}'",
|
|
65
|
+
context={"graph_name": self.graph.name, "entry_node": self.graph.entry_node},
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Initialize execution state
|
|
69
|
+
exec_state = GraphExecutionState(
|
|
70
|
+
current_node=self.graph.entry_node,
|
|
71
|
+
state=input_data or {},
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
iteration = 0
|
|
75
|
+
while iteration < self.max_iterations:
|
|
76
|
+
if exec_state.completed:
|
|
77
|
+
break
|
|
78
|
+
|
|
79
|
+
current_node_id = exec_state.current_node
|
|
80
|
+
if not current_node_id:
|
|
81
|
+
break
|
|
82
|
+
|
|
83
|
+
# Get current node
|
|
84
|
+
node = self.graph.get_node(current_node_id)
|
|
85
|
+
if not node:
|
|
86
|
+
raise RouteKitRuntimeError(
|
|
87
|
+
f"Node '{current_node_id}' not found in graph '{self.graph.name}'",
|
|
88
|
+
context={
|
|
89
|
+
"graph_name": self.graph.name,
|
|
90
|
+
"node_id": current_node_id,
|
|
91
|
+
"execution_path": exec_state.execution_path,
|
|
92
|
+
"iteration": iteration,
|
|
93
|
+
},
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Mark node as visited (track both set and path for different purposes)
|
|
97
|
+
# Set is for cycle detection, path is for execution history
|
|
98
|
+
exec_state.visited_nodes.add(current_node_id)
|
|
99
|
+
exec_state.execution_path.append(current_node_id)
|
|
100
|
+
|
|
101
|
+
# Detect if we're revisiting a node (potential infinite loop, even if not a cycle)
|
|
102
|
+
if exec_state.execution_path.count(current_node_id) > 1:
|
|
103
|
+
# Warn but don't fail - might be intentional for retry logic
|
|
104
|
+
# Only fail if we've visited this node too many times
|
|
105
|
+
visit_count = exec_state.execution_path.count(current_node_id)
|
|
106
|
+
if visit_count > 10: # Arbitrary threshold
|
|
107
|
+
raise RouteKitRuntimeError(
|
|
108
|
+
f"Node '{current_node_id}' visited {visit_count} times - possible infinite loop",
|
|
109
|
+
context={
|
|
110
|
+
"node_id": current_node_id,
|
|
111
|
+
"graph_name": self.graph.name,
|
|
112
|
+
"execution_path": exec_state.execution_path,
|
|
113
|
+
"visit_count": visit_count,
|
|
114
|
+
},
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# Execute node
|
|
118
|
+
try:
|
|
119
|
+
node_output = await self._execute_node(node, exec_state.state)
|
|
120
|
+
except RouteKitRuntimeError:
|
|
121
|
+
# Re-raise routkitai errors as-is
|
|
122
|
+
raise
|
|
123
|
+
except Exception as e:
|
|
124
|
+
# Wrap unknown exceptions
|
|
125
|
+
raise RouteKitRuntimeError(
|
|
126
|
+
f"Node '{current_node_id}' execution failed: {e}",
|
|
127
|
+
context={
|
|
128
|
+
"node_id": current_node_id,
|
|
129
|
+
"node_type": node.type.value,
|
|
130
|
+
"graph_name": self.graph.name,
|
|
131
|
+
},
|
|
132
|
+
) from e
|
|
133
|
+
|
|
134
|
+
# Update state with node output (safely merge, don't overwrite critical keys)
|
|
135
|
+
if node.output_mapping:
|
|
136
|
+
for output_key, state_key in node.output_mapping.items():
|
|
137
|
+
if output_key in node_output:
|
|
138
|
+
# Preserve existing state if key exists and is important
|
|
139
|
+
if state_key in exec_state.state and state_key.startswith("_"):
|
|
140
|
+
# Don't overwrite internal state keys
|
|
141
|
+
continue
|
|
142
|
+
exec_state.state[state_key] = node_output[output_key]
|
|
143
|
+
else:
|
|
144
|
+
# Default: merge all outputs into state, but preserve internal keys
|
|
145
|
+
for key, value in node_output.items():
|
|
146
|
+
if not key.startswith("_"): # Don't overwrite internal state
|
|
147
|
+
exec_state.state[key] = value
|
|
148
|
+
|
|
149
|
+
# Determine next node(s)
|
|
150
|
+
next_node = self._get_next_node(node, exec_state.state)
|
|
151
|
+
if next_node:
|
|
152
|
+
exec_state.current_node = next_node
|
|
153
|
+
elif current_node_id == self.graph.exit_node:
|
|
154
|
+
exec_state.completed = True
|
|
155
|
+
else:
|
|
156
|
+
# No more edges, execution complete
|
|
157
|
+
exec_state.completed = True
|
|
158
|
+
|
|
159
|
+
iteration += 1
|
|
160
|
+
|
|
161
|
+
if iteration >= self.max_iterations:
|
|
162
|
+
raise RouteKitRuntimeError(
|
|
163
|
+
f"Graph execution exceeded max iterations ({self.max_iterations})"
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
return {
|
|
167
|
+
"output": exec_state.state.get("output", exec_state.state),
|
|
168
|
+
"state": exec_state.state,
|
|
169
|
+
"execution_path": exec_state.execution_path,
|
|
170
|
+
"visited_nodes": list(exec_state.visited_nodes),
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
async def _execute_node(self, node: GraphNode, state: dict[str, Any]) -> dict[str, Any]:
|
|
174
|
+
"""Execute a single graph node.
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
node: Node to execute
|
|
178
|
+
state: Current graph state
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
Node output data
|
|
182
|
+
"""
|
|
183
|
+
# Prepare node inputs from state
|
|
184
|
+
node_inputs = {}
|
|
185
|
+
if node.input_mapping:
|
|
186
|
+
for state_key, input_key in node.input_mapping.items():
|
|
187
|
+
if state_key in state:
|
|
188
|
+
node_inputs[input_key] = state[state_key]
|
|
189
|
+
else:
|
|
190
|
+
# Default: pass all state as input, but filter out internal keys
|
|
191
|
+
node_inputs = {k: v for k, v in state.items() if not k.startswith("_")}
|
|
192
|
+
|
|
193
|
+
if node.type == NodeType.MODEL:
|
|
194
|
+
return await self._execute_model_node(node, node_inputs)
|
|
195
|
+
elif node.type == NodeType.TOOL:
|
|
196
|
+
return await self._execute_tool_node(node, node_inputs)
|
|
197
|
+
elif node.type == NodeType.SUBGRAPH:
|
|
198
|
+
return await self._execute_subgraph_node(node, node_inputs)
|
|
199
|
+
elif node.type == NodeType.CONDITION:
|
|
200
|
+
return await self._execute_condition_node(node, node_inputs)
|
|
201
|
+
else:
|
|
202
|
+
raise RouteKitRuntimeError(f"Unknown node type: {node.type}")
|
|
203
|
+
|
|
204
|
+
async def _execute_model_node(self, node: GraphNode, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
205
|
+
"""Execute a model/agent node.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
node: Model node
|
|
209
|
+
inputs: Node inputs
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
Node output
|
|
213
|
+
"""
|
|
214
|
+
if not node.agent_name:
|
|
215
|
+
raise RouteKitRuntimeError(f"Node '{node.id}': MODEL type requires agent_name")
|
|
216
|
+
|
|
217
|
+
if node.agent_name not in self.runtime.agents:
|
|
218
|
+
raise RouteKitRuntimeError(f"Agent '{node.agent_name}' not found in runtime")
|
|
219
|
+
|
|
220
|
+
# Extract prompt from inputs
|
|
221
|
+
# Try common input keys, fallback to string representation
|
|
222
|
+
prompt = inputs.get("prompt") or inputs.get("input") or inputs.get("text") or str(inputs)
|
|
223
|
+
|
|
224
|
+
# Execute agent
|
|
225
|
+
result = await self.runtime.run(node.agent_name, prompt)
|
|
226
|
+
|
|
227
|
+
return {
|
|
228
|
+
"output": result.output.content,
|
|
229
|
+
"messages": [m.model_dump() for m in result.messages],
|
|
230
|
+
"trace_id": result.trace_id,
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
async def _execute_tool_node(self, node: GraphNode, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
234
|
+
"""Execute a tool node.
|
|
235
|
+
|
|
236
|
+
This method handles tool execution within a graph workflow. It:
|
|
237
|
+
1. Locates the tool by name across all registered agents
|
|
238
|
+
2. Maps graph state inputs to tool arguments using the tool's input model schema
|
|
239
|
+
3. Executes the tool and converts the result to a format suitable for graph state
|
|
240
|
+
|
|
241
|
+
The input mapping logic tries multiple strategies:
|
|
242
|
+
- If inputs contain an "arguments" key, use it directly
|
|
243
|
+
- If inputs contain a "message" key, use it for tools expecting a message parameter
|
|
244
|
+
- If inputs contain an "output" key, use it as the message (common for echo tools)
|
|
245
|
+
- Otherwise, match inputs to tool input model fields by name
|
|
246
|
+
- As a fallback, use the first input value as the message
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
node: Tool node to execute
|
|
250
|
+
inputs: Node inputs from graph state (may be transformed via input_mapping)
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
Node output dictionary with "result" and "output" keys
|
|
254
|
+
|
|
255
|
+
Raises:
|
|
256
|
+
RouteKitRuntimeError: If tool is not found or execution fails
|
|
257
|
+
"""
|
|
258
|
+
if not node.tool_name:
|
|
259
|
+
raise RouteKitRuntimeError(f"Node '{node.id}': TOOL type requires tool_name")
|
|
260
|
+
|
|
261
|
+
# Find tool in any agent
|
|
262
|
+
tool = None
|
|
263
|
+
for agent in self.runtime.agents.values():
|
|
264
|
+
tool = next((t for t in agent.tools if t.name == node.tool_name), None)
|
|
265
|
+
if tool:
|
|
266
|
+
break
|
|
267
|
+
|
|
268
|
+
if not tool:
|
|
269
|
+
raise RouteKitRuntimeError(f"Tool '{node.tool_name}' not found")
|
|
270
|
+
|
|
271
|
+
# Execute tool using execute() method which handles input validation
|
|
272
|
+
# First, extract the actual tool arguments from inputs
|
|
273
|
+
tool_args = {}
|
|
274
|
+
|
|
275
|
+
if isinstance(inputs, dict):
|
|
276
|
+
# Check for explicit arguments key
|
|
277
|
+
if "arguments" in inputs and isinstance(inputs["arguments"], dict):
|
|
278
|
+
tool_args = inputs["arguments"]
|
|
279
|
+
else:
|
|
280
|
+
# Try to match input model fields
|
|
281
|
+
# For EchoTool, we need "message"
|
|
282
|
+
if "message" in inputs:
|
|
283
|
+
tool_args = {"message": inputs["message"]}
|
|
284
|
+
elif "output" in inputs:
|
|
285
|
+
# Previous node output - use as message for echo tool
|
|
286
|
+
tool_args = {"message": str(inputs["output"])}
|
|
287
|
+
else:
|
|
288
|
+
# Try to use inputs directly, filtering out non-tool keys
|
|
289
|
+
# Get input model schema to see what fields are needed
|
|
290
|
+
if hasattr(tool, "input_model") and tool.input_model:
|
|
291
|
+
schema = tool.input_model.model_json_schema()
|
|
292
|
+
required_fields = schema.get("properties", {}).keys()
|
|
293
|
+
# Try to match inputs to required fields
|
|
294
|
+
for field in required_fields:
|
|
295
|
+
if field in inputs:
|
|
296
|
+
tool_args[field] = inputs[field]
|
|
297
|
+
# If no matches, use first value as message (common pattern)
|
|
298
|
+
if not tool_args and inputs:
|
|
299
|
+
first_value = list(inputs.values())[0]
|
|
300
|
+
tool_args = {"message": str(first_value)}
|
|
301
|
+
else:
|
|
302
|
+
tool_args = inputs
|
|
303
|
+
else:
|
|
304
|
+
tool_args = {"message": str(inputs)}
|
|
305
|
+
|
|
306
|
+
# Execute tool
|
|
307
|
+
result = await tool.execute(**tool_args)
|
|
308
|
+
|
|
309
|
+
# Convert result to string if it's a Pydantic model
|
|
310
|
+
if hasattr(result, "model_dump"):
|
|
311
|
+
result_dict = result.model_dump()
|
|
312
|
+
# Extract the actual result value (e.g., "echoed" for EchoTool)
|
|
313
|
+
result = result_dict.get("echoed", result_dict.get("result", str(result)))
|
|
314
|
+
elif hasattr(result, "dict"):
|
|
315
|
+
result_dict = result.dict()
|
|
316
|
+
result = result_dict.get("echoed", result_dict.get("result", str(result)))
|
|
317
|
+
|
|
318
|
+
return {"result": result, "output": str(result)}
|
|
319
|
+
|
|
320
|
+
async def _execute_subgraph_node(
|
|
321
|
+
self, node: GraphNode, inputs: dict[str, Any]
|
|
322
|
+
) -> dict[str, Any]:
|
|
323
|
+
"""Execute a subgraph node.
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
node: Subgraph node
|
|
327
|
+
inputs: Node inputs
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
Node output
|
|
331
|
+
|
|
332
|
+
Raises:
|
|
333
|
+
RouteKitRuntimeError: If subgraph not found or execution fails
|
|
334
|
+
"""
|
|
335
|
+
if not node.subgraph_name:
|
|
336
|
+
raise RouteKitRuntimeError(f"Node '{node.id}': SUBGRAPH type requires subgraph_name")
|
|
337
|
+
|
|
338
|
+
# Check if subgraph is registered in runtime's graph registry
|
|
339
|
+
# For now, we'll look for a graph with the same name in the runtime's config
|
|
340
|
+
graph_registry = self.runtime.config.get("graph_registry", {})
|
|
341
|
+
|
|
342
|
+
if node.subgraph_name not in graph_registry:
|
|
343
|
+
raise RouteKitRuntimeError(
|
|
344
|
+
f"Subgraph '{node.subgraph_name}' not found in graph registry",
|
|
345
|
+
context={"node_id": node.id, "subgraph_name": node.subgraph_name},
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
subgraph = graph_registry[node.subgraph_name]
|
|
349
|
+
|
|
350
|
+
# Create a new executor for the subgraph
|
|
351
|
+
subgraph_executor = GraphExecutor(
|
|
352
|
+
runtime=self.runtime,
|
|
353
|
+
graph=subgraph,
|
|
354
|
+
max_iterations=self.max_iterations,
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
# Execute subgraph with inputs
|
|
358
|
+
try:
|
|
359
|
+
subgraph_result = await subgraph_executor.execute(input_data=inputs)
|
|
360
|
+
# Return the subgraph's output
|
|
361
|
+
return {
|
|
362
|
+
"output": subgraph_result.get("output"),
|
|
363
|
+
"state": subgraph_result.get("state", {}),
|
|
364
|
+
}
|
|
365
|
+
except Exception as e:
|
|
366
|
+
raise RouteKitRuntimeError(
|
|
367
|
+
f"Subgraph '{node.subgraph_name}' execution failed: {e}",
|
|
368
|
+
context={"node_id": node.id, "subgraph_name": node.subgraph_name},
|
|
369
|
+
) from e
|
|
370
|
+
|
|
371
|
+
async def _execute_condition_node(
|
|
372
|
+
self, node: GraphNode, inputs: dict[str, Any]
|
|
373
|
+
) -> dict[str, Any]:
|
|
374
|
+
"""Execute a condition node.
|
|
375
|
+
|
|
376
|
+
Args:
|
|
377
|
+
node: Condition node
|
|
378
|
+
inputs: Node inputs
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
Node output with selected edge
|
|
382
|
+
"""
|
|
383
|
+
if not node.condition:
|
|
384
|
+
raise RouteKitRuntimeError(
|
|
385
|
+
f"Node '{node.id}': CONDITION type requires condition function"
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
# Evaluate condition
|
|
389
|
+
selected_edge = node.condition(inputs)
|
|
390
|
+
|
|
391
|
+
return {"selected_edge": selected_edge, "condition_result": selected_edge}
|
|
392
|
+
|
|
393
|
+
def _detect_cycles(self) -> list[str] | None:
|
|
394
|
+
"""Detect cycles in the graph using DFS.
|
|
395
|
+
|
|
396
|
+
Returns:
|
|
397
|
+
List of node IDs forming a cycle, or None if no cycle found
|
|
398
|
+
"""
|
|
399
|
+
visited: set[str] = set()
|
|
400
|
+
recursion_stack: set[str] = set()
|
|
401
|
+
path: list[str] = []
|
|
402
|
+
|
|
403
|
+
def dfs(node_id: str) -> list[str] | None:
|
|
404
|
+
visited.add(node_id)
|
|
405
|
+
recursion_stack.add(node_id)
|
|
406
|
+
path.append(node_id)
|
|
407
|
+
|
|
408
|
+
for edge in self.graph.get_outgoing_edges(node_id):
|
|
409
|
+
if edge.target not in visited:
|
|
410
|
+
cycle = dfs(edge.target)
|
|
411
|
+
if cycle:
|
|
412
|
+
return cycle
|
|
413
|
+
elif edge.target in recursion_stack:
|
|
414
|
+
# Cycle detected - find the cycle path
|
|
415
|
+
cycle_start_index = path.index(edge.target)
|
|
416
|
+
return path[cycle_start_index:] + [edge.target]
|
|
417
|
+
|
|
418
|
+
path.pop()
|
|
419
|
+
recursion_stack.remove(node_id)
|
|
420
|
+
return None
|
|
421
|
+
|
|
422
|
+
for node in self.graph.nodes:
|
|
423
|
+
if node.id not in visited:
|
|
424
|
+
cycle = dfs(node.id)
|
|
425
|
+
if cycle:
|
|
426
|
+
return cycle
|
|
427
|
+
return None
|
|
428
|
+
|
|
429
|
+
def _get_next_node(self, node: GraphNode, state: dict[str, Any]) -> str | None:
|
|
430
|
+
"""Get the next node to execute based on outgoing edges.
|
|
431
|
+
|
|
432
|
+
Args:
|
|
433
|
+
node: Current node
|
|
434
|
+
state: Current state
|
|
435
|
+
|
|
436
|
+
Returns:
|
|
437
|
+
Next node ID or None if no next node
|
|
438
|
+
"""
|
|
439
|
+
outgoing_edges = self.graph.get_outgoing_edges(node.id)
|
|
440
|
+
|
|
441
|
+
if not outgoing_edges:
|
|
442
|
+
return None
|
|
443
|
+
|
|
444
|
+
# If condition node, use condition result
|
|
445
|
+
if node.type == NodeType.CONDITION:
|
|
446
|
+
selected_edge = state.get("selected_edge")
|
|
447
|
+
if selected_edge:
|
|
448
|
+
# Find edge with matching condition label
|
|
449
|
+
for edge in outgoing_edges:
|
|
450
|
+
if edge.condition == selected_edge:
|
|
451
|
+
return edge.target
|
|
452
|
+
# Fallback: use first edge
|
|
453
|
+
return outgoing_edges[0].target
|
|
454
|
+
return outgoing_edges[0].target if outgoing_edges else None
|
|
455
|
+
|
|
456
|
+
# For other nodes, use first outgoing edge (can be extended for parallel execution)
|
|
457
|
+
return outgoing_edges[0].target if outgoing_edges else None
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
"""Graph definition for agent orchestration."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Callable
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel, Field
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class NodeType(str, Enum):
|
|
11
|
+
"""Type of graph node."""
|
|
12
|
+
|
|
13
|
+
MODEL = "model" # Execute a model call
|
|
14
|
+
TOOL = "tool" # Execute a tool
|
|
15
|
+
SUBGRAPH = "subgraph" # Execute a nested graph
|
|
16
|
+
CONDITION = "condition" # Conditional branching
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class GraphNode(BaseModel):
|
|
20
|
+
"""Node in an agent orchestration graph.
|
|
21
|
+
|
|
22
|
+
Nodes represent execution units in the graph. They can be:
|
|
23
|
+
- Model nodes: Execute a model/agent
|
|
24
|
+
- Tool nodes: Execute a tool
|
|
25
|
+
- Subgraph nodes: Execute a nested graph
|
|
26
|
+
- Condition nodes: Branch based on state
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
id: str = Field(..., description="Unique node ID")
|
|
30
|
+
type: NodeType = Field(..., description="Node type")
|
|
31
|
+
agent_name: str | None = Field(default=None, description="Agent name (for MODEL nodes)")
|
|
32
|
+
tool_name: str | None = Field(default=None, description="Tool name (for TOOL nodes)")
|
|
33
|
+
subgraph_name: str | None = Field(
|
|
34
|
+
default=None, description="Subgraph name (for SUBGRAPH nodes)"
|
|
35
|
+
)
|
|
36
|
+
condition: Callable[[dict[str, Any]], str] | None = Field(
|
|
37
|
+
default=None, description="Condition function for CONDITION nodes (returns edge ID)"
|
|
38
|
+
)
|
|
39
|
+
config: dict[str, Any] = Field(default_factory=dict, description="Node configuration")
|
|
40
|
+
input_mapping: dict[str, str] = Field(
|
|
41
|
+
default_factory=dict, description="Map graph state keys to node inputs"
|
|
42
|
+
)
|
|
43
|
+
output_mapping: dict[str, str] = Field(
|
|
44
|
+
default_factory=dict, description="Map node outputs to graph state keys"
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class GraphEdge(BaseModel):
|
|
49
|
+
"""Edge connecting nodes in a graph."""
|
|
50
|
+
|
|
51
|
+
source: str = Field(..., description="Source node ID")
|
|
52
|
+
target: str = Field(..., description="Target node ID")
|
|
53
|
+
condition: str | None = Field(
|
|
54
|
+
default=None, description="Optional condition label for conditional edges"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class Graph(BaseModel):
|
|
59
|
+
"""Graph-based orchestration workflow.
|
|
60
|
+
|
|
61
|
+
Graphs define explicit control flow between nodes, enabling:
|
|
62
|
+
- Sequential execution
|
|
63
|
+
- Parallel execution
|
|
64
|
+
- Conditional branching
|
|
65
|
+
- Nested subgraphs
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
name: str = Field(..., description="Graph name")
|
|
69
|
+
nodes: list[GraphNode] = Field(default_factory=list, description="Graph nodes")
|
|
70
|
+
edges: list[GraphEdge] = Field(default_factory=list, description="Graph edges")
|
|
71
|
+
entry_node: str = Field(..., description="Entry node ID")
|
|
72
|
+
exit_node: str | None = Field(default=None, description="Exit node ID (optional)")
|
|
73
|
+
state_schema: dict[str, Any] = Field(default_factory=dict, description="Schema for graph state")
|
|
74
|
+
config: dict[str, Any] = Field(default_factory=dict, description="Graph configuration")
|
|
75
|
+
|
|
76
|
+
def get_node(self, node_id: str) -> GraphNode | None:
|
|
77
|
+
"""Get a node by ID.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
node_id: Node ID
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
GraphNode or None if not found
|
|
84
|
+
"""
|
|
85
|
+
return next((n for n in self.nodes if n.id == node_id), None)
|
|
86
|
+
|
|
87
|
+
def get_outgoing_edges(self, node_id: str) -> list[GraphEdge]:
|
|
88
|
+
"""Get all outgoing edges from a node.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
node_id: Source node ID
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
List of outgoing edges
|
|
95
|
+
"""
|
|
96
|
+
return [e for e in self.edges if e.source == node_id]
|
|
97
|
+
|
|
98
|
+
def get_incoming_edges(self, node_id: str) -> list[GraphEdge]:
|
|
99
|
+
"""Get all incoming edges to a node.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
node_id: Target node ID
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
List of incoming edges
|
|
106
|
+
"""
|
|
107
|
+
return [e for e in self.edges if e.target == node_id]
|
|
108
|
+
|
|
109
|
+
def validate_structure(self) -> list[str]:
|
|
110
|
+
"""Validate graph structure.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
List of validation errors (empty if valid)
|
|
114
|
+
"""
|
|
115
|
+
errors = []
|
|
116
|
+
|
|
117
|
+
# Check entry node exists
|
|
118
|
+
if not self.get_node(self.entry_node):
|
|
119
|
+
errors.append(f"Entry node '{self.entry_node}' not found")
|
|
120
|
+
|
|
121
|
+
# Check exit node exists (if specified)
|
|
122
|
+
if self.exit_node and not self.get_node(self.exit_node):
|
|
123
|
+
errors.append(f"Exit node '{self.exit_node}' not found")
|
|
124
|
+
|
|
125
|
+
# Check all edges reference valid nodes
|
|
126
|
+
node_ids = {n.id for n in self.nodes}
|
|
127
|
+
for edge in self.edges:
|
|
128
|
+
if edge.source not in node_ids:
|
|
129
|
+
errors.append(f"Edge source '{edge.source}' not found")
|
|
130
|
+
if edge.target not in node_ids:
|
|
131
|
+
errors.append(f"Edge target '{edge.target}' not found")
|
|
132
|
+
|
|
133
|
+
# Check node types match their configuration
|
|
134
|
+
for node in self.nodes:
|
|
135
|
+
if node.type == NodeType.MODEL and not node.agent_name:
|
|
136
|
+
errors.append(f"Node '{node.id}': MODEL type requires agent_name")
|
|
137
|
+
elif node.type == NodeType.TOOL and not node.tool_name:
|
|
138
|
+
errors.append(f"Node '{node.id}': TOOL type requires tool_name")
|
|
139
|
+
elif node.type == NodeType.SUBGRAPH and not node.subgraph_name:
|
|
140
|
+
errors.append(f"Node '{node.id}': SUBGRAPH type requires subgraph_name")
|
|
141
|
+
elif node.type == NodeType.CONDITION and not node.condition:
|
|
142
|
+
errors.append(f"Node '{node.id}': CONDITION type requires condition function")
|
|
143
|
+
|
|
144
|
+
# Check for unreachable nodes (nodes with no incoming edges except entry node)
|
|
145
|
+
if self.nodes:
|
|
146
|
+
reachable_nodes = {self.entry_node}
|
|
147
|
+
# BFS from entry node to find all reachable nodes
|
|
148
|
+
queue = [self.entry_node]
|
|
149
|
+
while queue:
|
|
150
|
+
current = queue.pop(0)
|
|
151
|
+
for edge in self.get_outgoing_edges(current):
|
|
152
|
+
if edge.target not in reachable_nodes:
|
|
153
|
+
reachable_nodes.add(edge.target)
|
|
154
|
+
queue.append(edge.target)
|
|
155
|
+
|
|
156
|
+
# Check for unreachable nodes (warn, but don't error - might be intentional)
|
|
157
|
+
all_node_ids = {n.id for n in self.nodes}
|
|
158
|
+
unreachable = all_node_ids - reachable_nodes
|
|
159
|
+
if unreachable and self.exit_node:
|
|
160
|
+
# Only warn if exit node is unreachable, otherwise it's just unused nodes
|
|
161
|
+
if self.exit_node in unreachable:
|
|
162
|
+
errors.append(f"Exit node '{self.exit_node}' is unreachable from entry node")
|
|
163
|
+
|
|
164
|
+
return errors
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Memory systems for routkitai agents."""
|
|
2
|
+
|
|
3
|
+
from routekitai.core.memory import Memory
|
|
4
|
+
from routekitai.memory.episodic import EpisodicMemory
|
|
5
|
+
from routekitai.memory.retrieval import RetrievalMemory
|
|
6
|
+
from routekitai.memory.working import WorkingMemory
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"Memory",
|
|
10
|
+
"WorkingMemory",
|
|
11
|
+
"EpisodicMemory",
|
|
12
|
+
"RetrievalMemory",
|
|
13
|
+
]
|