yamlgraph 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of yamlgraph might be problematic. Click here for more details.

Files changed (111) hide show
  1. examples/__init__.py +1 -0
  2. examples/storyboard/__init__.py +1 -0
  3. examples/storyboard/generate_videos.py +335 -0
  4. examples/storyboard/nodes/__init__.py +10 -0
  5. examples/storyboard/nodes/animated_character_node.py +248 -0
  6. examples/storyboard/nodes/animated_image_node.py +138 -0
  7. examples/storyboard/nodes/character_node.py +162 -0
  8. examples/storyboard/nodes/image_node.py +118 -0
  9. examples/storyboard/nodes/replicate_tool.py +238 -0
  10. examples/storyboard/retry_images.py +118 -0
  11. tests/__init__.py +1 -0
  12. tests/conftest.py +178 -0
  13. tests/integration/__init__.py +1 -0
  14. tests/integration/test_animated_storyboard.py +63 -0
  15. tests/integration/test_cli_commands.py +242 -0
  16. tests/integration/test_map_demo.py +50 -0
  17. tests/integration/test_memory_demo.py +281 -0
  18. tests/integration/test_pipeline_flow.py +105 -0
  19. tests/integration/test_providers.py +163 -0
  20. tests/integration/test_resume.py +75 -0
  21. tests/unit/__init__.py +1 -0
  22. tests/unit/test_agent_nodes.py +200 -0
  23. tests/unit/test_checkpointer.py +212 -0
  24. tests/unit/test_cli.py +121 -0
  25. tests/unit/test_cli_package.py +81 -0
  26. tests/unit/test_compile_graph_map.py +132 -0
  27. tests/unit/test_conditions_routing.py +253 -0
  28. tests/unit/test_config.py +93 -0
  29. tests/unit/test_conversation_memory.py +270 -0
  30. tests/unit/test_database.py +145 -0
  31. tests/unit/test_deprecation.py +104 -0
  32. tests/unit/test_executor.py +60 -0
  33. tests/unit/test_executor_async.py +179 -0
  34. tests/unit/test_export.py +150 -0
  35. tests/unit/test_expressions.py +178 -0
  36. tests/unit/test_format_prompt.py +145 -0
  37. tests/unit/test_generic_report.py +200 -0
  38. tests/unit/test_graph_commands.py +327 -0
  39. tests/unit/test_graph_loader.py +299 -0
  40. tests/unit/test_graph_schema.py +193 -0
  41. tests/unit/test_inline_schema.py +151 -0
  42. tests/unit/test_issues.py +164 -0
  43. tests/unit/test_jinja2_prompts.py +85 -0
  44. tests/unit/test_langsmith.py +319 -0
  45. tests/unit/test_llm_factory.py +109 -0
  46. tests/unit/test_llm_factory_async.py +118 -0
  47. tests/unit/test_loops.py +403 -0
  48. tests/unit/test_map_node.py +144 -0
  49. tests/unit/test_no_backward_compat.py +56 -0
  50. tests/unit/test_node_factory.py +225 -0
  51. tests/unit/test_prompts.py +166 -0
  52. tests/unit/test_python_nodes.py +198 -0
  53. tests/unit/test_reliability.py +298 -0
  54. tests/unit/test_result_export.py +234 -0
  55. tests/unit/test_router.py +296 -0
  56. tests/unit/test_sanitize.py +99 -0
  57. tests/unit/test_schema_loader.py +295 -0
  58. tests/unit/test_shell_tools.py +229 -0
  59. tests/unit/test_state_builder.py +331 -0
  60. tests/unit/test_state_builder_map.py +104 -0
  61. tests/unit/test_state_config.py +197 -0
  62. tests/unit/test_template.py +190 -0
  63. tests/unit/test_tool_nodes.py +129 -0
  64. yamlgraph/__init__.py +35 -0
  65. yamlgraph/builder.py +110 -0
  66. yamlgraph/cli/__init__.py +139 -0
  67. yamlgraph/cli/__main__.py +6 -0
  68. yamlgraph/cli/commands.py +232 -0
  69. yamlgraph/cli/deprecation.py +92 -0
  70. yamlgraph/cli/graph_commands.py +382 -0
  71. yamlgraph/cli/validators.py +37 -0
  72. yamlgraph/config.py +67 -0
  73. yamlgraph/constants.py +66 -0
  74. yamlgraph/error_handlers.py +226 -0
  75. yamlgraph/executor.py +275 -0
  76. yamlgraph/executor_async.py +122 -0
  77. yamlgraph/graph_loader.py +337 -0
  78. yamlgraph/map_compiler.py +138 -0
  79. yamlgraph/models/__init__.py +36 -0
  80. yamlgraph/models/graph_schema.py +141 -0
  81. yamlgraph/models/schemas.py +124 -0
  82. yamlgraph/models/state_builder.py +236 -0
  83. yamlgraph/node_factory.py +240 -0
  84. yamlgraph/routing.py +87 -0
  85. yamlgraph/schema_loader.py +160 -0
  86. yamlgraph/storage/__init__.py +17 -0
  87. yamlgraph/storage/checkpointer.py +72 -0
  88. yamlgraph/storage/database.py +320 -0
  89. yamlgraph/storage/export.py +269 -0
  90. yamlgraph/tools/__init__.py +1 -0
  91. yamlgraph/tools/agent.py +235 -0
  92. yamlgraph/tools/nodes.py +124 -0
  93. yamlgraph/tools/python_tool.py +178 -0
  94. yamlgraph/tools/shell.py +205 -0
  95. yamlgraph/utils/__init__.py +47 -0
  96. yamlgraph/utils/conditions.py +157 -0
  97. yamlgraph/utils/expressions.py +111 -0
  98. yamlgraph/utils/langsmith.py +308 -0
  99. yamlgraph/utils/llm_factory.py +118 -0
  100. yamlgraph/utils/llm_factory_async.py +105 -0
  101. yamlgraph/utils/logging.py +127 -0
  102. yamlgraph/utils/prompts.py +116 -0
  103. yamlgraph/utils/sanitize.py +98 -0
  104. yamlgraph/utils/template.py +102 -0
  105. yamlgraph/utils/validators.py +181 -0
  106. yamlgraph-0.1.1.dist-info/METADATA +854 -0
  107. yamlgraph-0.1.1.dist-info/RECORD +111 -0
  108. yamlgraph-0.1.1.dist-info/WHEEL +5 -0
  109. yamlgraph-0.1.1.dist-info/entry_points.txt +2 -0
  110. yamlgraph-0.1.1.dist-info/licenses/LICENSE +21 -0
  111. yamlgraph-0.1.1.dist-info/top_level.txt +3 -0
@@ -0,0 +1,269 @@
1
+ """JSON Export - Serialize pipeline results.
2
+
3
+ Provides functions to export pipeline state and results
4
+ to JSON format for sharing and archival.
5
+ """
6
+
7
+ import json
8
+ from datetime import datetime
9
+ from pathlib import Path
10
+ from typing import Any
11
+
12
+ from pydantic import BaseModel
13
+
14
+ from yamlgraph.config import OUTPUTS_DIR
15
+
16
+
17
+ def export_state(
18
+ state: dict,
19
+ output_dir: str | Path | None = None,
20
+ prefix: str = "export",
21
+ ) -> Path:
22
+ """Export pipeline state to JSON file.
23
+
24
+ Args:
25
+ state: State dictionary to export
26
+ output_dir: Directory for output files (default: outputs/)
27
+ prefix: Filename prefix
28
+
29
+ Returns:
30
+ Path to the created file
31
+ """
32
+ if output_dir is None:
33
+ output_dir = OUTPUTS_DIR
34
+ output_path = Path(output_dir)
35
+ output_path.mkdir(parents=True, exist_ok=True)
36
+
37
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
38
+ thread_id = state.get("thread_id", "unknown")
39
+ filename = f"{prefix}_{thread_id}_{timestamp}.json"
40
+
41
+ filepath = output_path / filename
42
+
43
+ # Convert state to JSON-serializable format
44
+ export_data = _serialize_state(state)
45
+
46
+ with open(filepath, "w") as f:
47
+ json.dump(export_data, f, indent=2, default=str)
48
+
49
+ return filepath
50
+
51
+
52
+ def _serialize_state(state: dict) -> dict:
53
+ """Convert state to JSON-serializable format.
54
+
55
+ Handles Pydantic models and other complex types.
56
+
57
+ Args:
58
+ state: State dictionary
59
+
60
+ Returns:
61
+ JSON-serializable dictionary
62
+ """
63
+ result = {}
64
+
65
+ for key, value in state.items():
66
+ if isinstance(value, BaseModel):
67
+ result[key] = value.model_dump()
68
+ elif hasattr(value, "__dict__"):
69
+ result[key] = _serialize_object(value)
70
+ else:
71
+ result[key] = value
72
+
73
+ return result
74
+
75
+
76
+ def _serialize_object(obj: Any) -> Any:
77
+ """Recursively serialize an object.
78
+
79
+ Args:
80
+ obj: Object to serialize
81
+
82
+ Returns:
83
+ JSON-serializable representation
84
+ """
85
+ if isinstance(obj, BaseModel):
86
+ return obj.model_dump()
87
+ elif isinstance(obj, dict):
88
+ return {k: _serialize_object(v) for k, v in obj.items()}
89
+ elif isinstance(obj, (list, tuple)):
90
+ return [_serialize_object(item) for item in obj]
91
+ elif hasattr(obj, "isoformat"):
92
+ return obj.isoformat()
93
+ else:
94
+ return obj
95
+
96
+
97
+ def load_export(filepath: str | Path) -> dict:
98
+ """Load an exported JSON file.
99
+
100
+ Args:
101
+ filepath: Path to JSON file
102
+
103
+ Returns:
104
+ Loaded dictionary
105
+ """
106
+ with open(filepath) as f:
107
+ return json.load(f)
108
+
109
+
110
+ def list_exports(
111
+ output_dir: str | Path = "outputs", prefix: str = "export"
112
+ ) -> list[Path]:
113
+ """List all export files in a directory.
114
+
115
+ Args:
116
+ output_dir: Directory to search
117
+ prefix: Filename prefix filter
118
+
119
+ Returns:
120
+ List of matching file paths, sorted by modification time
121
+ """
122
+ output_path = Path(output_dir)
123
+ if not output_path.exists():
124
+ return []
125
+
126
+ files = list(output_path.glob(f"{prefix}_*.json"))
127
+ return sorted(files, key=lambda f: f.stat().st_mtime, reverse=True)
128
+
129
+
130
+ def export_summary(state: dict) -> dict:
131
+ """Create a summary export (without full content).
132
+
133
+ Useful for quick review of pipeline results.
134
+ Works generically with any Pydantic models in state.
135
+
136
+ Args:
137
+ state: Full state dictionary
138
+
139
+ Returns:
140
+ Summary dictionary with key information only
141
+ """
142
+ # Internal keys to skip
143
+ internal_keys = frozenset(
144
+ {"_route", "_loop_counts", "thread_id", "topic", "current_step", "error"}
145
+ )
146
+
147
+ summary = {
148
+ "thread_id": state.get("thread_id"),
149
+ "topic": state.get("topic"),
150
+ "current_step": state.get("current_step"),
151
+ "error": state.get("error"),
152
+ }
153
+
154
+ # Process all non-internal fields generically
155
+ for key, value in state.items():
156
+ if key in internal_keys or value is None:
157
+ continue
158
+
159
+ if isinstance(value, BaseModel):
160
+ # Extract scalar fields from any Pydantic model
161
+ summary[key] = _extract_scalar_summary(value)
162
+ elif isinstance(value, str):
163
+ # For strings, include presence only
164
+ summary[f"has_{key}"] = bool(value)
165
+
166
+ return summary
167
+
168
+
169
+ def _extract_scalar_summary(model: BaseModel) -> dict[str, Any]:
170
+ """Extract scalar fields from a Pydantic model for summary.
171
+
172
+ Args:
173
+ model: Any Pydantic model
174
+
175
+ Returns:
176
+ Dict with scalar field names and values (strings truncated)
177
+ """
178
+ result = {}
179
+ for field_name, field_value in model.model_dump().items():
180
+ if isinstance(field_value, str):
181
+ # Truncate long strings
182
+ result[field_name] = (
183
+ field_value[:100] + "..." if len(field_value) > 100 else field_value
184
+ )
185
+ elif isinstance(field_value, (int, float, bool)):
186
+ result[field_name] = field_value
187
+ elif isinstance(field_value, list):
188
+ result[f"{field_name}_count"] = len(field_value)
189
+ return result
190
+
191
+
192
+ def export_result(
193
+ state: dict,
194
+ export_config: dict,
195
+ base_path: str | Path = "outputs",
196
+ ) -> list[Path]:
197
+ """Export state fields to files.
198
+
199
+ Args:
200
+ state: Final graph state
201
+ export_config: Mapping of field -> export settings
202
+ base_path: Base directory for exports
203
+
204
+ Returns:
205
+ List of paths to exported files
206
+
207
+ Example config:
208
+ {
209
+ "final_summary": {"format": "markdown", "filename": "summary.md"},
210
+ "generated": {"format": "json", "filename": "content.json"},
211
+ }
212
+ """
213
+ base_path = Path(base_path)
214
+ thread_id = state.get("thread_id", "unknown")
215
+ output_dir = base_path / thread_id
216
+ output_dir.mkdir(parents=True, exist_ok=True)
217
+
218
+ exported = []
219
+
220
+ for field, settings in export_config.items():
221
+ if field not in state or state[field] is None:
222
+ continue
223
+
224
+ value = state[field]
225
+ filename = settings.get("filename", f"{field}.txt")
226
+ format_type = settings.get("format", "text")
227
+
228
+ file_path = output_dir / filename
229
+
230
+ if format_type == "json":
231
+ content = _serialize_to_json(value)
232
+ file_path.write_text(content)
233
+ elif format_type == "markdown":
234
+ content = _serialize_to_markdown(value)
235
+ file_path.write_text(content)
236
+ else:
237
+ file_path.write_text(str(value))
238
+
239
+ exported.append(file_path)
240
+
241
+ return exported
242
+
243
+
244
+ def _serialize_to_json(value: Any) -> str:
245
+ """Serialize value to JSON string."""
246
+ if isinstance(value, BaseModel):
247
+ return value.model_dump_json(indent=2)
248
+ return json.dumps(value, default=str, indent=2)
249
+
250
+
251
+ def _serialize_to_markdown(value: Any) -> str:
252
+ """Serialize value to Markdown string."""
253
+ if isinstance(value, BaseModel):
254
+ return _pydantic_to_markdown(value)
255
+ return str(value)
256
+
257
+
258
+ def _pydantic_to_markdown(model: BaseModel) -> str:
259
+ """Convert Pydantic model to Markdown."""
260
+ lines = [f"# {model.__class__.__name__}", ""]
261
+ for field, value in model.model_dump().items():
262
+ if isinstance(value, list):
263
+ lines.append(f"## {field.replace('_', ' ').title()}")
264
+ for item in value:
265
+ lines.append(f"- {item}")
266
+ lines.append("")
267
+ else:
268
+ lines.append(f"**{field.replace('_', ' ').title()}**: {value}")
269
+ return "\n".join(lines)
@@ -0,0 +1 @@
1
+ """Shell tool execution utilities."""
@@ -0,0 +1,235 @@
1
+ """Agent node factory for LLM-driven tool loops.
2
+
3
+ This module provides the agent node type that allows the LLM to
4
+ autonomously decide which tools to call until it has enough
5
+ information to provide a final answer.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ from collections.abc import Callable
12
+ from typing import Any
13
+
14
+ import yaml
15
+ from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
16
+
17
+ from yamlgraph.config import PROMPTS_DIR
18
+ from yamlgraph.tools.shell import ShellToolConfig, execute_shell_tool
19
+ from yamlgraph.utils.llm_factory import create_llm
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ def build_langchain_tool(name: str, config: ShellToolConfig) -> Callable:
25
+ """Convert shell config to LangChain Tool.
26
+
27
+ Args:
28
+ name: Tool name for LLM to reference
29
+ config: Shell tool configuration
30
+
31
+ Returns:
32
+ LangChain-compatible tool function
33
+ """
34
+ import re
35
+
36
+ from langchain_core.tools import StructuredTool
37
+ from pydantic import Field, create_model
38
+
39
+ # Extract variable names from command template
40
+ var_names = re.findall(r"\{(\w+)\}", config.command)
41
+
42
+ # Create dynamic Pydantic model for tool args
43
+ if var_names:
44
+ fields = {
45
+ var: (str, Field(description=f"Value for {var}")) for var in var_names
46
+ }
47
+ ArgsModel = create_model(f"{name}_args", **fields)
48
+ else:
49
+ ArgsModel = None
50
+
51
+ def execute_tool_with_dict(**kwargs) -> str:
52
+ """Execute shell command with provided arguments."""
53
+ result = execute_shell_tool(config, kwargs)
54
+ if result.success:
55
+ return (
56
+ str(result.output).strip() if result.output is not None else "Success"
57
+ )
58
+ else:
59
+ return f"Error: {result.error}"
60
+
61
+ return StructuredTool.from_function(
62
+ func=execute_tool_with_dict,
63
+ name=name,
64
+ description=config.description,
65
+ args_schema=ArgsModel,
66
+ )
67
+
68
+
69
+ def _load_prompt(prompt_name: str) -> tuple[str, str]:
70
+ """Load system and user prompts from YAML file.
71
+
72
+ Args:
73
+ prompt_name: Name of prompt file (without .yaml)
74
+
75
+ Returns:
76
+ Tuple of (system_prompt, user_template)
77
+ """
78
+ prompt_path = PROMPTS_DIR / f"{prompt_name}.yaml"
79
+ if not prompt_path.exists():
80
+ raise FileNotFoundError(f"Prompt file not found: {prompt_path}")
81
+
82
+ with open(prompt_path) as f:
83
+ prompt_config = yaml.safe_load(f)
84
+
85
+ return prompt_config.get("system", ""), prompt_config.get("user", "{input}")
86
+
87
+
88
+ def create_agent_node(
89
+ node_name: str,
90
+ node_config: dict[str, Any],
91
+ tools: dict[str, ShellToolConfig],
92
+ ) -> Callable[[dict], dict]:
93
+ """Create an agent node that loops with tool calls.
94
+
95
+ The agent will:
96
+ 1. Send the prompt to the LLM with available tools
97
+ 2. If LLM returns tool calls, execute them and feed results back
98
+ 3. Repeat until LLM returns without tool calls or max_iterations reached
99
+
100
+ Args:
101
+ node_name: Name of the node in the graph
102
+ node_config: Node configuration from YAML
103
+ tools: Registry of available tools
104
+
105
+ Returns:
106
+ Node function that runs the agent loop
107
+
108
+ Config options:
109
+ - tools: List of tool names to make available
110
+ - max_iterations: Max tool-call loops (default: 5)
111
+ - state_key: Key to store final answer (default: node_name)
112
+ - prompt: Prompt file name (default: "agent")
113
+ - tool_results_key: Optional key to store raw tool outputs
114
+ """
115
+ tool_names = node_config.get("tools", [])
116
+ max_iterations = node_config.get("max_iterations", 5)
117
+ state_key = node_config.get("state_key", node_name)
118
+ prompt_name = node_config.get("prompt", "agent")
119
+ tool_results_key = node_config.get("tool_results_key")
120
+
121
+ # Build LangChain tools from shell configs
122
+ lc_tools = [build_langchain_tool(name, tools[name]) for name in tool_names]
123
+ tool_lookup = {name: tools[name] for name in tool_names}
124
+
125
+ def node_fn(state: dict) -> dict:
126
+ """Execute the agent loop."""
127
+ # Load prompts - fail fast if missing
128
+ system_prompt, user_template = _load_prompt(prompt_name)
129
+
130
+ # Format user prompt with state - handle missing keys
131
+ import re
132
+
133
+ def replace_var(match):
134
+ key = match.group(1)
135
+ return str(state.get(key, f"{{{key}}}"))
136
+
137
+ user_prompt = re.sub(r"\{(\w+)\}", replace_var, user_template)
138
+
139
+ # Initialize messages - preserve existing if multi-turn
140
+ existing_messages = list(state.get("messages", []))
141
+ if existing_messages:
142
+ # Multi-turn: add new user message to existing conversation
143
+ messages = existing_messages + [HumanMessage(content=user_prompt)]
144
+ else:
145
+ # New conversation: start with system + user
146
+ messages = [
147
+ SystemMessage(content=system_prompt),
148
+ HumanMessage(content=user_prompt),
149
+ ]
150
+
151
+ # Track raw tool outputs for persistence
152
+ tool_results: list[dict] = []
153
+
154
+ # Get LLM with tools bound
155
+ llm = create_llm().bind_tools(lc_tools)
156
+
157
+ logger.info(
158
+ f"🤖 Starting agent loop: {node_name} (max {max_iterations} iterations)"
159
+ )
160
+ logger.debug(f"Tools available: {[t.name for t in lc_tools]}")
161
+ logger.debug(f"User prompt: {user_prompt[:100]}...")
162
+
163
+ for iteration in range(max_iterations):
164
+ logger.debug(f"Agent iteration {iteration + 1}/{max_iterations}")
165
+
166
+ # Get LLM response
167
+ response = llm.invoke(messages)
168
+ messages.append(response)
169
+
170
+ logger.debug(f"Response tool_calls: {response.tool_calls}")
171
+
172
+ # Check if LLM wants to call tools
173
+ if not response.tool_calls:
174
+ # Done - LLM finished reasoning
175
+ logger.info(f"✓ Agent completed after {iteration + 1} iterations")
176
+ result = {
177
+ state_key: response.content,
178
+ "current_step": node_name,
179
+ "_agent_iterations": iteration + 1,
180
+ "messages": messages, # Return for accumulation
181
+ }
182
+ if tool_results_key and tool_results:
183
+ result[tool_results_key] = tool_results
184
+ return result
185
+
186
+ # Execute tool calls
187
+ for tool_call in response.tool_calls:
188
+ tool_name = tool_call["name"]
189
+ tool_args = tool_call["args"]
190
+ tool_id = tool_call.get("id", f"call_{iteration}")
191
+
192
+ logger.info(f"🔧 Calling tool: {tool_name}({tool_args})")
193
+
194
+ # Execute the tool
195
+ tool_config = tool_lookup.get(tool_name)
196
+ if tool_config:
197
+ result = execute_shell_tool(tool_config, tool_args)
198
+ output = (
199
+ str(result.output)
200
+ if result.success
201
+ else f"Error: {result.error}"
202
+ )
203
+ success = result.success
204
+ else:
205
+ output = f"Error: Unknown tool '{tool_name}'"
206
+ success = False
207
+
208
+ # Store raw tool result for persistence
209
+ tool_results.append(
210
+ {
211
+ "tool": tool_name,
212
+ "args": tool_args,
213
+ "output": output,
214
+ "success": success,
215
+ }
216
+ )
217
+
218
+ # Add tool result to messages
219
+ messages.append(ToolMessage(content=output, tool_call_id=tool_id))
220
+
221
+ # Hit max iterations
222
+ logger.warning(f"Agent hit max iterations ({max_iterations})")
223
+ last_content = messages[-1].content if hasattr(messages[-1], "content") else ""
224
+ result = {
225
+ state_key: last_content,
226
+ "current_step": node_name,
227
+ "_agent_iterations": max_iterations,
228
+ "_agent_limit_reached": True,
229
+ "messages": messages, # Return for accumulation
230
+ }
231
+ if tool_results_key and tool_results:
232
+ result[tool_results_key] = tool_results
233
+ return result
234
+
235
+ return node_fn
@@ -0,0 +1,124 @@
1
+ """Node factories for tool and agent nodes.
2
+
3
+ This module provides functions to create graph nodes that execute
4
+ shell tools, either deterministically (tool nodes) or via LLM
5
+ decision-making (agent nodes).
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ from collections.abc import Callable
12
+ from typing import Any
13
+
14
+ from yamlgraph.models.schemas import ErrorType, PipelineError
15
+ from yamlgraph.tools.shell import ShellToolConfig, execute_shell_tool
16
+ from yamlgraph.utils.expressions import resolve_template
17
+
18
+ # Type alias for state - dynamic TypedDict at runtime
19
+ GraphState = dict[str, Any]
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ def resolve_state_variable(template: str, state: dict[str, Any]) -> str:
25
+ """Resolve {state.path.to.value} to actual state value.
26
+
27
+ Note: Uses consolidated resolve_template from expressions module.
28
+
29
+ Args:
30
+ template: String with {state.key} or {state.nested.key} placeholders
31
+ state: Current graph state
32
+
33
+ Returns:
34
+ Resolved string value
35
+ """
36
+ value = resolve_template(template, state)
37
+ # resolve_template returns the template unchanged if not a state expression
38
+ if value is template:
39
+ return template
40
+ return str(value) if value is not None else ""
41
+
42
+
43
+ def resolve_variables(
44
+ variables_config: dict[str, str],
45
+ state: dict[str, Any],
46
+ ) -> dict[str, Any]:
47
+ """Resolve all variable templates against state.
48
+
49
+ Args:
50
+ variables_config: Dict of {var_name: template_string}
51
+ state: Current graph state
52
+
53
+ Returns:
54
+ Dict of {var_name: resolved_value}
55
+ """
56
+ resolved = {}
57
+ for key, template in variables_config.items():
58
+ resolved[key] = resolve_state_variable(template, state)
59
+ return resolved
60
+
61
+
62
+ def create_tool_node(
63
+ node_name: str,
64
+ node_config: dict[str, Any],
65
+ tools: dict[str, ShellToolConfig],
66
+ ) -> Callable[[GraphState], dict]:
67
+ """Create a node that executes a shell tool.
68
+
69
+ Args:
70
+ node_name: Name of the node in the graph
71
+ node_config: Node configuration from YAML
72
+ tools: Registry of available tools
73
+
74
+ Returns:
75
+ Node function that executes the tool
76
+
77
+ Raises:
78
+ KeyError: If tool name not in registry
79
+ """
80
+ tool_name = node_config["tool"]
81
+ tool_config = tools[tool_name] # Raise KeyError if not found
82
+ state_key = node_config.get("state_key", node_name)
83
+ on_error = node_config.get("on_error", "fail")
84
+ variables_template = node_config.get("variables", {})
85
+
86
+ def node_fn(state: GraphState) -> dict:
87
+ """Execute the shell tool and return state update."""
88
+ # Resolve variables from state
89
+ variables = resolve_variables(variables_template, state)
90
+
91
+ logger.info(f"🔧 Executing tool: {tool_name}")
92
+ result = execute_shell_tool(tool_config, variables)
93
+
94
+ if not result.success:
95
+ logger.warning(f"Tool {tool_name} failed: {result.error}")
96
+
97
+ if on_error == "skip":
98
+ # Return with error tracked but don't raise
99
+ errors = list(state.get("errors") or [])
100
+ errors.append(
101
+ PipelineError(
102
+ node=node_name,
103
+ type=ErrorType.UNKNOWN_ERROR,
104
+ message=result.error or "Tool execution failed",
105
+ )
106
+ )
107
+ return {
108
+ state_key: None,
109
+ "current_step": node_name,
110
+ "errors": errors,
111
+ }
112
+ else:
113
+ # on_error == "fail" - raise exception
114
+ raise RuntimeError(
115
+ f"Tool '{tool_name}' failed in node '{node_name}': {result.error}"
116
+ )
117
+
118
+ logger.info(f"✓ Tool {tool_name} completed")
119
+ return {
120
+ state_key: result.output,
121
+ "current_step": node_name,
122
+ }
123
+
124
+ return node_fn