yamlgraph 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/__init__.py +1 -0
- examples/codegen/__init__.py +5 -0
- examples/codegen/models/__init__.py +13 -0
- examples/codegen/models/schemas.py +76 -0
- examples/codegen/tests/__init__.py +1 -0
- examples/codegen/tests/test_ai_helpers.py +235 -0
- examples/codegen/tests/test_ast_analysis.py +174 -0
- examples/codegen/tests/test_code_analysis.py +134 -0
- examples/codegen/tests/test_code_context.py +301 -0
- examples/codegen/tests/test_code_nav.py +89 -0
- examples/codegen/tests/test_dependency_tools.py +119 -0
- examples/codegen/tests/test_example_tools.py +185 -0
- examples/codegen/tests/test_git_tools.py +112 -0
- examples/codegen/tests/test_impl_agent_schemas.py +193 -0
- examples/codegen/tests/test_impl_agent_v4_graph.py +94 -0
- examples/codegen/tests/test_jedi_analysis.py +226 -0
- examples/codegen/tests/test_meta_tools.py +250 -0
- examples/codegen/tests/test_plan_discovery_prompt.py +98 -0
- examples/codegen/tests/test_syntax_tools.py +85 -0
- examples/codegen/tests/test_synthesize_prompt.py +94 -0
- examples/codegen/tests/test_template_tools.py +244 -0
- examples/codegen/tools/__init__.py +80 -0
- examples/codegen/tools/ai_helpers.py +420 -0
- examples/codegen/tools/ast_analysis.py +92 -0
- examples/codegen/tools/code_context.py +180 -0
- examples/codegen/tools/code_nav.py +52 -0
- examples/codegen/tools/dependency_tools.py +120 -0
- examples/codegen/tools/example_tools.py +188 -0
- examples/codegen/tools/git_tools.py +151 -0
- examples/codegen/tools/impl_executor.py +614 -0
- examples/codegen/tools/jedi_analysis.py +311 -0
- examples/codegen/tools/meta_tools.py +202 -0
- examples/codegen/tools/syntax_tools.py +26 -0
- examples/codegen/tools/template_tools.py +356 -0
- examples/fastapi_interview.py +167 -0
- examples/npc/api/__init__.py +1 -0
- examples/npc/api/app.py +100 -0
- examples/npc/api/routes/__init__.py +5 -0
- examples/npc/api/routes/encounter.py +182 -0
- examples/npc/api/session.py +330 -0
- examples/npc/demo.py +387 -0
- examples/npc/nodes/__init__.py +5 -0
- examples/npc/nodes/image_node.py +92 -0
- examples/npc/run_encounter.py +230 -0
- examples/shared/__init__.py +0 -0
- examples/shared/replicate_tool.py +238 -0
- examples/storyboard/__init__.py +1 -0
- examples/storyboard/generate_videos.py +335 -0
- examples/storyboard/nodes/__init__.py +12 -0
- examples/storyboard/nodes/animated_character_node.py +248 -0
- examples/storyboard/nodes/animated_image_node.py +138 -0
- examples/storyboard/nodes/character_node.py +162 -0
- examples/storyboard/nodes/image_node.py +118 -0
- examples/storyboard/nodes/replicate_tool.py +49 -0
- examples/storyboard/retry_images.py +118 -0
- scripts/demo_async_executor.py +212 -0
- scripts/demo_interview_e2e.py +200 -0
- scripts/demo_streaming.py +140 -0
- scripts/run_interview_demo.py +94 -0
- scripts/test_interrupt_fix.py +26 -0
- tests/__init__.py +1 -0
- tests/conftest.py +178 -0
- tests/integration/__init__.py +1 -0
- tests/integration/test_animated_storyboard.py +63 -0
- tests/integration/test_cli_commands.py +242 -0
- tests/integration/test_colocated_prompts.py +139 -0
- tests/integration/test_map_demo.py +50 -0
- tests/integration/test_memory_demo.py +283 -0
- tests/integration/test_npc_api/__init__.py +1 -0
- tests/integration/test_npc_api/test_routes.py +357 -0
- tests/integration/test_npc_api/test_session.py +216 -0
- tests/integration/test_pipeline_flow.py +105 -0
- tests/integration/test_providers.py +163 -0
- tests/integration/test_resume.py +75 -0
- tests/integration/test_subgraph_integration.py +295 -0
- tests/integration/test_subgraph_interrupt.py +106 -0
- tests/unit/__init__.py +1 -0
- tests/unit/test_agent_nodes.py +355 -0
- tests/unit/test_async_executor.py +346 -0
- tests/unit/test_checkpointer.py +212 -0
- tests/unit/test_checkpointer_factory.py +212 -0
- tests/unit/test_cli.py +121 -0
- tests/unit/test_cli_package.py +81 -0
- tests/unit/test_compile_graph_map.py +132 -0
- tests/unit/test_conditions_routing.py +253 -0
- tests/unit/test_config.py +93 -0
- tests/unit/test_conversation_memory.py +276 -0
- tests/unit/test_database.py +145 -0
- tests/unit/test_deprecation.py +104 -0
- tests/unit/test_executor.py +172 -0
- tests/unit/test_executor_async.py +179 -0
- tests/unit/test_export.py +149 -0
- tests/unit/test_expressions.py +178 -0
- tests/unit/test_feature_brainstorm.py +194 -0
- tests/unit/test_format_prompt.py +145 -0
- tests/unit/test_generic_report.py +200 -0
- tests/unit/test_graph_commands.py +327 -0
- tests/unit/test_graph_linter.py +627 -0
- tests/unit/test_graph_loader.py +357 -0
- tests/unit/test_graph_schema.py +193 -0
- tests/unit/test_inline_schema.py +151 -0
- tests/unit/test_interrupt_node.py +182 -0
- tests/unit/test_issues.py +164 -0
- tests/unit/test_jinja2_prompts.py +85 -0
- tests/unit/test_json_extract.py +134 -0
- tests/unit/test_langsmith.py +600 -0
- tests/unit/test_langsmith_tools.py +204 -0
- tests/unit/test_llm_factory.py +109 -0
- tests/unit/test_llm_factory_async.py +118 -0
- tests/unit/test_loops.py +403 -0
- tests/unit/test_map_node.py +144 -0
- tests/unit/test_no_backward_compat.py +56 -0
- tests/unit/test_node_factory.py +348 -0
- tests/unit/test_passthrough_node.py +126 -0
- tests/unit/test_prompts.py +324 -0
- tests/unit/test_python_nodes.py +198 -0
- tests/unit/test_reliability.py +298 -0
- tests/unit/test_result_export.py +234 -0
- tests/unit/test_router.py +296 -0
- tests/unit/test_sanitize.py +99 -0
- tests/unit/test_schema_loader.py +295 -0
- tests/unit/test_shell_tools.py +229 -0
- tests/unit/test_state_builder.py +331 -0
- tests/unit/test_state_builder_map.py +104 -0
- tests/unit/test_state_config.py +197 -0
- tests/unit/test_streaming.py +307 -0
- tests/unit/test_subgraph.py +596 -0
- tests/unit/test_template.py +190 -0
- tests/unit/test_tool_call_integration.py +164 -0
- tests/unit/test_tool_call_node.py +178 -0
- tests/unit/test_tool_nodes.py +129 -0
- tests/unit/test_websearch.py +234 -0
- yamlgraph/__init__.py +35 -0
- yamlgraph/builder.py +110 -0
- yamlgraph/cli/__init__.py +159 -0
- yamlgraph/cli/__main__.py +6 -0
- yamlgraph/cli/commands.py +231 -0
- yamlgraph/cli/deprecation.py +92 -0
- yamlgraph/cli/graph_commands.py +541 -0
- yamlgraph/cli/validators.py +37 -0
- yamlgraph/config.py +67 -0
- yamlgraph/constants.py +70 -0
- yamlgraph/error_handlers.py +227 -0
- yamlgraph/executor.py +290 -0
- yamlgraph/executor_async.py +288 -0
- yamlgraph/graph_loader.py +451 -0
- yamlgraph/map_compiler.py +150 -0
- yamlgraph/models/__init__.py +36 -0
- yamlgraph/models/graph_schema.py +181 -0
- yamlgraph/models/schemas.py +124 -0
- yamlgraph/models/state_builder.py +236 -0
- yamlgraph/node_factory.py +768 -0
- yamlgraph/routing.py +87 -0
- yamlgraph/schema_loader.py +240 -0
- yamlgraph/storage/__init__.py +20 -0
- yamlgraph/storage/checkpointer.py +72 -0
- yamlgraph/storage/checkpointer_factory.py +123 -0
- yamlgraph/storage/database.py +320 -0
- yamlgraph/storage/export.py +269 -0
- yamlgraph/tools/__init__.py +1 -0
- yamlgraph/tools/agent.py +320 -0
- yamlgraph/tools/graph_linter.py +388 -0
- yamlgraph/tools/langsmith_tools.py +125 -0
- yamlgraph/tools/nodes.py +126 -0
- yamlgraph/tools/python_tool.py +179 -0
- yamlgraph/tools/shell.py +205 -0
- yamlgraph/tools/websearch.py +242 -0
- yamlgraph/utils/__init__.py +48 -0
- yamlgraph/utils/conditions.py +157 -0
- yamlgraph/utils/expressions.py +245 -0
- yamlgraph/utils/json_extract.py +104 -0
- yamlgraph/utils/langsmith.py +416 -0
- yamlgraph/utils/llm_factory.py +118 -0
- yamlgraph/utils/llm_factory_async.py +105 -0
- yamlgraph/utils/logging.py +104 -0
- yamlgraph/utils/prompts.py +171 -0
- yamlgraph/utils/sanitize.py +98 -0
- yamlgraph/utils/template.py +102 -0
- yamlgraph/utils/validators.py +181 -0
- yamlgraph-0.3.9.dist-info/METADATA +1105 -0
- yamlgraph-0.3.9.dist-info/RECORD +185 -0
- yamlgraph-0.3.9.dist-info/WHEEL +5 -0
- yamlgraph-0.3.9.dist-info/entry_points.txt +2 -0
- yamlgraph-0.3.9.dist-info/licenses/LICENSE +33 -0
- yamlgraph-0.3.9.dist-info/top_level.txt +4 -0
|
@@ -0,0 +1,627 @@
|
|
|
1
|
+
"""Unit tests for graph linter.
|
|
2
|
+
|
|
3
|
+
TDD: Red-Green-Refactor approach.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
import pytest
|
|
9
|
+
import yaml
|
|
10
|
+
|
|
11
|
+
from yamlgraph.tools.graph_linter import (
|
|
12
|
+
LintIssue,
|
|
13
|
+
LintResult,
|
|
14
|
+
check_edge_coverage,
|
|
15
|
+
check_node_types,
|
|
16
|
+
check_prompt_files,
|
|
17
|
+
check_state_declarations,
|
|
18
|
+
check_tool_references,
|
|
19
|
+
lint_graph,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
# --- Fixtures ---
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@pytest.fixture
|
|
26
|
+
def temp_graph_dir(tmp_path):
|
|
27
|
+
"""Create a temp directory with prompts folder."""
|
|
28
|
+
prompts_dir = tmp_path / "prompts"
|
|
29
|
+
prompts_dir.mkdir()
|
|
30
|
+
return tmp_path
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def write_graph(tmp_path: Path, content: dict) -> Path:
|
|
34
|
+
"""Helper to write a graph YAML file."""
|
|
35
|
+
graph_path = tmp_path / "test-graph.yaml"
|
|
36
|
+
with open(graph_path, "w") as f:
|
|
37
|
+
yaml.dump(content, f)
|
|
38
|
+
return graph_path
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def write_prompt(tmp_path: Path, name: str, content: str = "system: Test\nuser: Test"):
|
|
42
|
+
"""Helper to create a prompt file."""
|
|
43
|
+
prompts_dir = tmp_path / "prompts"
|
|
44
|
+
prompts_dir.mkdir(exist_ok=True)
|
|
45
|
+
|
|
46
|
+
# Handle nested prompts like "code-analysis/analyzer"
|
|
47
|
+
parts = name.split("/")
|
|
48
|
+
if len(parts) > 1:
|
|
49
|
+
subdir = prompts_dir / parts[0]
|
|
50
|
+
subdir.mkdir(exist_ok=True)
|
|
51
|
+
prompt_path = subdir / f"{parts[1]}.yaml"
|
|
52
|
+
else:
|
|
53
|
+
prompt_path = prompts_dir / f"{name}.yaml"
|
|
54
|
+
|
|
55
|
+
with open(prompt_path, "w") as f:
|
|
56
|
+
f.write(content)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
# --- Test LintIssue and LintResult models ---
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class TestLintModels:
|
|
63
|
+
"""Test Pydantic models for lint results."""
|
|
64
|
+
|
|
65
|
+
def test_lint_issue_creation(self):
|
|
66
|
+
issue = LintIssue(
|
|
67
|
+
severity="error",
|
|
68
|
+
code="E001",
|
|
69
|
+
message="Missing state declaration",
|
|
70
|
+
)
|
|
71
|
+
assert issue.severity == "error"
|
|
72
|
+
assert issue.code == "E001"
|
|
73
|
+
assert issue.fix is None
|
|
74
|
+
|
|
75
|
+
def test_lint_issue_with_fix(self):
|
|
76
|
+
issue = LintIssue(
|
|
77
|
+
severity="warning",
|
|
78
|
+
code="W001",
|
|
79
|
+
message="Unused tool",
|
|
80
|
+
fix="Remove tool 'unused_tool' from tools section",
|
|
81
|
+
)
|
|
82
|
+
assert issue.fix is not None
|
|
83
|
+
|
|
84
|
+
def test_lint_result_valid(self):
|
|
85
|
+
result = LintResult(
|
|
86
|
+
file="test.yaml",
|
|
87
|
+
issues=[],
|
|
88
|
+
valid=True,
|
|
89
|
+
)
|
|
90
|
+
assert result.valid is True
|
|
91
|
+
assert len(result.issues) == 0
|
|
92
|
+
|
|
93
|
+
def test_lint_result_with_errors(self):
|
|
94
|
+
issues = [
|
|
95
|
+
LintIssue(severity="error", code="E001", message="Test error"),
|
|
96
|
+
]
|
|
97
|
+
result = LintResult(
|
|
98
|
+
file="test.yaml",
|
|
99
|
+
issues=issues,
|
|
100
|
+
valid=False,
|
|
101
|
+
)
|
|
102
|
+
assert result.valid is False
|
|
103
|
+
assert len(result.issues) == 1
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
# --- Test check_state_declarations ---
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class TestCheckStateDeclarations:
|
|
110
|
+
"""Test detection of missing state declarations."""
|
|
111
|
+
|
|
112
|
+
def test_valid_state_declaration(self, temp_graph_dir):
|
|
113
|
+
"""Graph with proper state declaration should pass."""
|
|
114
|
+
graph = {
|
|
115
|
+
"version": "1.0",
|
|
116
|
+
"name": "test",
|
|
117
|
+
"state": {"path": "str", "count": "int"},
|
|
118
|
+
"nodes": {
|
|
119
|
+
"step1": {
|
|
120
|
+
"type": "llm",
|
|
121
|
+
"prompt": "test",
|
|
122
|
+
"state_key": "output",
|
|
123
|
+
}
|
|
124
|
+
},
|
|
125
|
+
"edges": [
|
|
126
|
+
{"from": "START", "to": "step1"},
|
|
127
|
+
{"from": "step1", "to": "END"},
|
|
128
|
+
],
|
|
129
|
+
}
|
|
130
|
+
write_prompt(temp_graph_dir, "test")
|
|
131
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
132
|
+
|
|
133
|
+
issues = check_state_declarations(graph_path, temp_graph_dir)
|
|
134
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
135
|
+
assert len(errors) == 0
|
|
136
|
+
|
|
137
|
+
def test_missing_state_for_prompt_variable(self, temp_graph_dir):
|
|
138
|
+
"""Prompt using {path} without state declaration should error."""
|
|
139
|
+
graph = {
|
|
140
|
+
"version": "1.0",
|
|
141
|
+
"name": "test",
|
|
142
|
+
# No state declaration!
|
|
143
|
+
"nodes": {
|
|
144
|
+
"step1": {
|
|
145
|
+
"type": "llm",
|
|
146
|
+
"prompt": "test",
|
|
147
|
+
"state_key": "output",
|
|
148
|
+
}
|
|
149
|
+
},
|
|
150
|
+
"edges": [
|
|
151
|
+
{"from": "START", "to": "step1"},
|
|
152
|
+
{"from": "step1", "to": "END"},
|
|
153
|
+
],
|
|
154
|
+
}
|
|
155
|
+
# Create prompt that uses {path} variable
|
|
156
|
+
write_prompt(temp_graph_dir, "test", "system: Analyze\nuser: Check {path}")
|
|
157
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
158
|
+
|
|
159
|
+
issues = check_state_declarations(graph_path, temp_graph_dir)
|
|
160
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
161
|
+
assert len(errors) >= 1
|
|
162
|
+
assert any("path" in i.message for i in errors)
|
|
163
|
+
|
|
164
|
+
def test_missing_state_for_shell_tool_variable(self, temp_graph_dir):
|
|
165
|
+
"""Shell tool NOT used by agent, using {path} without state declaration should error."""
|
|
166
|
+
graph = {
|
|
167
|
+
"version": "1.0",
|
|
168
|
+
"name": "test",
|
|
169
|
+
# No state declaration for 'path'!
|
|
170
|
+
"tools": {
|
|
171
|
+
"run_check": {
|
|
172
|
+
"type": "shell",
|
|
173
|
+
"command": "ruff check {path}",
|
|
174
|
+
"description": "Run ruff",
|
|
175
|
+
}
|
|
176
|
+
},
|
|
177
|
+
"nodes": {
|
|
178
|
+
"step1": {
|
|
179
|
+
"type": "llm", # LLM node, not agent - must have state for tool vars
|
|
180
|
+
"prompt": "test",
|
|
181
|
+
"state_key": "output",
|
|
182
|
+
}
|
|
183
|
+
},
|
|
184
|
+
"edges": [
|
|
185
|
+
{"from": "START", "to": "step1"},
|
|
186
|
+
{"from": "step1", "to": "END"},
|
|
187
|
+
],
|
|
188
|
+
}
|
|
189
|
+
write_prompt(temp_graph_dir, "test")
|
|
190
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
191
|
+
|
|
192
|
+
issues = check_state_declarations(graph_path, temp_graph_dir)
|
|
193
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
194
|
+
assert len(errors) >= 1
|
|
195
|
+
assert any("path" in i.message for i in errors)
|
|
196
|
+
|
|
197
|
+
def test_agent_tool_variables_not_required_in_state(self, temp_graph_dir):
|
|
198
|
+
"""Shell tools used by agents get variables from LLM, not state."""
|
|
199
|
+
graph = {
|
|
200
|
+
"version": "1.0",
|
|
201
|
+
"name": "test",
|
|
202
|
+
# No state declaration for 'path' - but that's OK for agent tools
|
|
203
|
+
"tools": {
|
|
204
|
+
"run_check": {
|
|
205
|
+
"type": "shell",
|
|
206
|
+
"command": "ruff check {path}",
|
|
207
|
+
"description": "Run ruff",
|
|
208
|
+
}
|
|
209
|
+
},
|
|
210
|
+
"nodes": {
|
|
211
|
+
"step1": {
|
|
212
|
+
"type": "agent", # Agent node - LLM provides tool args
|
|
213
|
+
"prompt": "test",
|
|
214
|
+
"tools": ["run_check"],
|
|
215
|
+
"state_key": "output",
|
|
216
|
+
}
|
|
217
|
+
},
|
|
218
|
+
"edges": [
|
|
219
|
+
{"from": "START", "to": "step1"},
|
|
220
|
+
{"from": "step1", "to": "END"},
|
|
221
|
+
],
|
|
222
|
+
}
|
|
223
|
+
write_prompt(temp_graph_dir, "test")
|
|
224
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
225
|
+
|
|
226
|
+
issues = check_state_declarations(graph_path, temp_graph_dir)
|
|
227
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
228
|
+
# No errors because agent tools get variables from LLM
|
|
229
|
+
assert len(errors) == 0
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
# --- Test check_tool_references ---
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class TestCheckToolReferences:
|
|
236
|
+
"""Test detection of undefined tool references."""
|
|
237
|
+
|
|
238
|
+
def test_valid_tool_reference(self, temp_graph_dir):
|
|
239
|
+
"""Node referencing defined tool should pass."""
|
|
240
|
+
graph = {
|
|
241
|
+
"version": "1.0",
|
|
242
|
+
"name": "test",
|
|
243
|
+
"tools": {
|
|
244
|
+
"my_tool": {
|
|
245
|
+
"type": "shell",
|
|
246
|
+
"command": "echo hello",
|
|
247
|
+
"description": "Test tool",
|
|
248
|
+
}
|
|
249
|
+
},
|
|
250
|
+
"nodes": {
|
|
251
|
+
"step1": {
|
|
252
|
+
"type": "agent",
|
|
253
|
+
"prompt": "test",
|
|
254
|
+
"tools": ["my_tool"],
|
|
255
|
+
"state_key": "output",
|
|
256
|
+
}
|
|
257
|
+
},
|
|
258
|
+
"edges": [
|
|
259
|
+
{"from": "START", "to": "step1"},
|
|
260
|
+
{"from": "step1", "to": "END"},
|
|
261
|
+
],
|
|
262
|
+
}
|
|
263
|
+
write_prompt(temp_graph_dir, "test")
|
|
264
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
265
|
+
|
|
266
|
+
issues = check_tool_references(graph_path)
|
|
267
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
268
|
+
assert len(errors) == 0
|
|
269
|
+
|
|
270
|
+
def test_undefined_tool_reference(self, temp_graph_dir):
|
|
271
|
+
"""Node referencing undefined tool should error."""
|
|
272
|
+
graph = {
|
|
273
|
+
"version": "1.0",
|
|
274
|
+
"name": "test",
|
|
275
|
+
"tools": {
|
|
276
|
+
"defined_tool": {
|
|
277
|
+
"type": "shell",
|
|
278
|
+
"command": "echo hello",
|
|
279
|
+
"description": "Test tool",
|
|
280
|
+
}
|
|
281
|
+
},
|
|
282
|
+
"nodes": {
|
|
283
|
+
"step1": {
|
|
284
|
+
"type": "agent",
|
|
285
|
+
"prompt": "test",
|
|
286
|
+
"tools": ["undefined_tool"], # This doesn't exist!
|
|
287
|
+
"state_key": "output",
|
|
288
|
+
}
|
|
289
|
+
},
|
|
290
|
+
"edges": [
|
|
291
|
+
{"from": "START", "to": "step1"},
|
|
292
|
+
{"from": "step1", "to": "END"},
|
|
293
|
+
],
|
|
294
|
+
}
|
|
295
|
+
write_prompt(temp_graph_dir, "test")
|
|
296
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
297
|
+
|
|
298
|
+
issues = check_tool_references(graph_path)
|
|
299
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
300
|
+
assert len(errors) >= 1
|
|
301
|
+
assert any("undefined_tool" in i.message for i in errors)
|
|
302
|
+
|
|
303
|
+
def test_unused_tool_warning(self, temp_graph_dir):
|
|
304
|
+
"""Defined but unused tool should warn."""
|
|
305
|
+
graph = {
|
|
306
|
+
"version": "1.0",
|
|
307
|
+
"name": "test",
|
|
308
|
+
"tools": {
|
|
309
|
+
"used_tool": {
|
|
310
|
+
"type": "shell",
|
|
311
|
+
"command": "echo used",
|
|
312
|
+
"description": "Used tool",
|
|
313
|
+
},
|
|
314
|
+
"unused_tool": {
|
|
315
|
+
"type": "shell",
|
|
316
|
+
"command": "echo unused",
|
|
317
|
+
"description": "Unused tool",
|
|
318
|
+
},
|
|
319
|
+
},
|
|
320
|
+
"nodes": {
|
|
321
|
+
"step1": {
|
|
322
|
+
"type": "agent",
|
|
323
|
+
"prompt": "test",
|
|
324
|
+
"tools": ["used_tool"], # unused_tool not used
|
|
325
|
+
"state_key": "output",
|
|
326
|
+
}
|
|
327
|
+
},
|
|
328
|
+
"edges": [
|
|
329
|
+
{"from": "START", "to": "step1"},
|
|
330
|
+
{"from": "step1", "to": "END"},
|
|
331
|
+
],
|
|
332
|
+
}
|
|
333
|
+
write_prompt(temp_graph_dir, "test")
|
|
334
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
335
|
+
|
|
336
|
+
issues = check_tool_references(graph_path)
|
|
337
|
+
warnings = [i for i in issues if i.severity == "warning"]
|
|
338
|
+
assert len(warnings) >= 1
|
|
339
|
+
assert any("unused_tool" in i.message for i in warnings)
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
# --- Test check_prompt_files ---
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
class TestCheckPromptFiles:
|
|
346
|
+
"""Test detection of missing prompt files."""
|
|
347
|
+
|
|
348
|
+
def test_valid_prompt_exists(self, temp_graph_dir):
|
|
349
|
+
"""Node with existing prompt file should pass."""
|
|
350
|
+
graph = {
|
|
351
|
+
"version": "1.0",
|
|
352
|
+
"name": "test",
|
|
353
|
+
"nodes": {
|
|
354
|
+
"step1": {
|
|
355
|
+
"type": "llm",
|
|
356
|
+
"prompt": "my_prompt",
|
|
357
|
+
"state_key": "output",
|
|
358
|
+
}
|
|
359
|
+
},
|
|
360
|
+
"edges": [
|
|
361
|
+
{"from": "START", "to": "step1"},
|
|
362
|
+
{"from": "step1", "to": "END"},
|
|
363
|
+
],
|
|
364
|
+
}
|
|
365
|
+
write_prompt(temp_graph_dir, "my_prompt")
|
|
366
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
367
|
+
|
|
368
|
+
issues = check_prompt_files(graph_path, temp_graph_dir)
|
|
369
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
370
|
+
assert len(errors) == 0
|
|
371
|
+
|
|
372
|
+
def test_missing_prompt_file(self, temp_graph_dir):
|
|
373
|
+
"""Node with missing prompt file should error."""
|
|
374
|
+
graph = {
|
|
375
|
+
"version": "1.0",
|
|
376
|
+
"name": "test",
|
|
377
|
+
"nodes": {
|
|
378
|
+
"step1": {
|
|
379
|
+
"type": "llm",
|
|
380
|
+
"prompt": "nonexistent_prompt",
|
|
381
|
+
"state_key": "output",
|
|
382
|
+
}
|
|
383
|
+
},
|
|
384
|
+
"edges": [
|
|
385
|
+
{"from": "START", "to": "step1"},
|
|
386
|
+
{"from": "step1", "to": "END"},
|
|
387
|
+
],
|
|
388
|
+
}
|
|
389
|
+
# Don't create the prompt file!
|
|
390
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
391
|
+
|
|
392
|
+
issues = check_prompt_files(graph_path, temp_graph_dir)
|
|
393
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
394
|
+
assert len(errors) >= 1
|
|
395
|
+
assert any("nonexistent_prompt" in i.message for i in errors)
|
|
396
|
+
|
|
397
|
+
def test_nested_prompt_path(self, temp_graph_dir):
|
|
398
|
+
"""Nested prompt paths like 'code-analysis/analyzer' should work."""
|
|
399
|
+
graph = {
|
|
400
|
+
"version": "1.0",
|
|
401
|
+
"name": "test",
|
|
402
|
+
"nodes": {
|
|
403
|
+
"step1": {
|
|
404
|
+
"type": "llm",
|
|
405
|
+
"prompt": "code-analysis/analyzer",
|
|
406
|
+
"state_key": "output",
|
|
407
|
+
}
|
|
408
|
+
},
|
|
409
|
+
"edges": [
|
|
410
|
+
{"from": "START", "to": "step1"},
|
|
411
|
+
{"from": "step1", "to": "END"},
|
|
412
|
+
],
|
|
413
|
+
}
|
|
414
|
+
write_prompt(temp_graph_dir, "code-analysis/analyzer")
|
|
415
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
416
|
+
|
|
417
|
+
issues = check_prompt_files(graph_path, temp_graph_dir)
|
|
418
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
419
|
+
assert len(errors) == 0
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
# --- Test check_edge_coverage ---
|
|
423
|
+
|
|
424
|
+
|
|
425
|
+
class TestCheckEdgeCoverage:
|
|
426
|
+
"""Test detection of unreachable nodes."""
|
|
427
|
+
|
|
428
|
+
def test_all_nodes_reachable(self, temp_graph_dir):
|
|
429
|
+
"""All nodes connected should pass."""
|
|
430
|
+
graph = {
|
|
431
|
+
"version": "1.0",
|
|
432
|
+
"name": "test",
|
|
433
|
+
"nodes": {
|
|
434
|
+
"step1": {"type": "llm", "prompt": "test", "state_key": "a"},
|
|
435
|
+
"step2": {"type": "llm", "prompt": "test", "state_key": "b"},
|
|
436
|
+
},
|
|
437
|
+
"edges": [
|
|
438
|
+
{"from": "START", "to": "step1"},
|
|
439
|
+
{"from": "step1", "to": "step2"},
|
|
440
|
+
{"from": "step2", "to": "END"},
|
|
441
|
+
],
|
|
442
|
+
}
|
|
443
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
444
|
+
|
|
445
|
+
issues = check_edge_coverage(graph_path)
|
|
446
|
+
warnings = [i for i in issues if i.severity == "warning"]
|
|
447
|
+
assert len(warnings) == 0
|
|
448
|
+
|
|
449
|
+
def test_unreachable_node(self, temp_graph_dir):
|
|
450
|
+
"""Node not in any edge should warn."""
|
|
451
|
+
graph = {
|
|
452
|
+
"version": "1.0",
|
|
453
|
+
"name": "test",
|
|
454
|
+
"nodes": {
|
|
455
|
+
"step1": {"type": "llm", "prompt": "test", "state_key": "a"},
|
|
456
|
+
"orphan": {
|
|
457
|
+
"type": "llm",
|
|
458
|
+
"prompt": "test",
|
|
459
|
+
"state_key": "b",
|
|
460
|
+
}, # Not connected!
|
|
461
|
+
},
|
|
462
|
+
"edges": [
|
|
463
|
+
{"from": "START", "to": "step1"},
|
|
464
|
+
{"from": "step1", "to": "END"},
|
|
465
|
+
],
|
|
466
|
+
}
|
|
467
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
468
|
+
|
|
469
|
+
issues = check_edge_coverage(graph_path)
|
|
470
|
+
warnings = [i for i in issues if i.severity == "warning"]
|
|
471
|
+
assert len(warnings) >= 1
|
|
472
|
+
assert any("orphan" in i.message for i in warnings)
|
|
473
|
+
|
|
474
|
+
def test_no_path_to_end(self, temp_graph_dir):
|
|
475
|
+
"""Node without path to END should warn."""
|
|
476
|
+
graph = {
|
|
477
|
+
"version": "1.0",
|
|
478
|
+
"name": "test",
|
|
479
|
+
"nodes": {
|
|
480
|
+
"step1": {"type": "llm", "prompt": "test", "state_key": "a"},
|
|
481
|
+
"dead_end": {"type": "llm", "prompt": "test", "state_key": "b"},
|
|
482
|
+
},
|
|
483
|
+
"edges": [
|
|
484
|
+
{"from": "START", "to": "step1"},
|
|
485
|
+
{"from": "step1", "to": "dead_end"},
|
|
486
|
+
# dead_end has no edge to END!
|
|
487
|
+
{"from": "step1", "to": "END"},
|
|
488
|
+
],
|
|
489
|
+
}
|
|
490
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
491
|
+
|
|
492
|
+
issues = check_edge_coverage(graph_path)
|
|
493
|
+
warnings = [i for i in issues if i.severity == "warning"]
|
|
494
|
+
assert len(warnings) >= 1
|
|
495
|
+
assert any("dead_end" in i.message for i in warnings)
|
|
496
|
+
|
|
497
|
+
|
|
498
|
+
# --- Test check_node_types ---
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
class TestCheckNodeTypes:
|
|
502
|
+
"""Test detection of invalid node types."""
|
|
503
|
+
|
|
504
|
+
def test_valid_node_types(self, temp_graph_dir):
|
|
505
|
+
"""Valid node types should pass."""
|
|
506
|
+
graph = {
|
|
507
|
+
"version": "1.0",
|
|
508
|
+
"name": "test",
|
|
509
|
+
"nodes": {
|
|
510
|
+
"a": {"type": "llm", "prompt": "test", "state_key": "a"},
|
|
511
|
+
"b": {
|
|
512
|
+
"type": "router",
|
|
513
|
+
"prompt": "test",
|
|
514
|
+
"routes": {},
|
|
515
|
+
"state_key": "b",
|
|
516
|
+
},
|
|
517
|
+
"c": {"type": "agent", "prompt": "test", "tools": [], "state_key": "c"},
|
|
518
|
+
"d": {"type": "map", "prompt": "test", "state_key": "d"},
|
|
519
|
+
"e": {
|
|
520
|
+
"type": "python",
|
|
521
|
+
"module": "test",
|
|
522
|
+
"function": "fn",
|
|
523
|
+
"state_key": "e",
|
|
524
|
+
},
|
|
525
|
+
},
|
|
526
|
+
"edges": [
|
|
527
|
+
{"from": "START", "to": "a"},
|
|
528
|
+
{"from": "a", "to": "END"},
|
|
529
|
+
],
|
|
530
|
+
}
|
|
531
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
532
|
+
|
|
533
|
+
issues = check_node_types(graph_path)
|
|
534
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
535
|
+
assert len(errors) == 0
|
|
536
|
+
|
|
537
|
+
def test_invalid_node_type(self, temp_graph_dir):
|
|
538
|
+
"""Invalid node type should error."""
|
|
539
|
+
graph = {
|
|
540
|
+
"version": "1.0",
|
|
541
|
+
"name": "test",
|
|
542
|
+
"nodes": {
|
|
543
|
+
"step1": {
|
|
544
|
+
"type": "invalid_type", # Not a valid type!
|
|
545
|
+
"prompt": "test",
|
|
546
|
+
"state_key": "output",
|
|
547
|
+
}
|
|
548
|
+
},
|
|
549
|
+
"edges": [
|
|
550
|
+
{"from": "START", "to": "step1"},
|
|
551
|
+
{"from": "step1", "to": "END"},
|
|
552
|
+
],
|
|
553
|
+
}
|
|
554
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
555
|
+
|
|
556
|
+
issues = check_node_types(graph_path)
|
|
557
|
+
errors = [i for i in issues if i.severity == "error"]
|
|
558
|
+
assert len(errors) >= 1
|
|
559
|
+
assert any("invalid_type" in i.message for i in errors)
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
# --- Test full lint_graph function ---
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
class TestLintGraph:
|
|
566
|
+
"""Test the main lint_graph entry point."""
|
|
567
|
+
|
|
568
|
+
def test_valid_graph_passes(self, temp_graph_dir):
|
|
569
|
+
"""A well-formed graph should pass linting."""
|
|
570
|
+
graph = {
|
|
571
|
+
"version": "1.0",
|
|
572
|
+
"name": "test",
|
|
573
|
+
"description": "A test graph",
|
|
574
|
+
"state": {"input": "str"},
|
|
575
|
+
"nodes": {
|
|
576
|
+
"step1": {
|
|
577
|
+
"type": "llm",
|
|
578
|
+
"prompt": "test",
|
|
579
|
+
"state_key": "output",
|
|
580
|
+
}
|
|
581
|
+
},
|
|
582
|
+
"edges": [
|
|
583
|
+
{"from": "START", "to": "step1"},
|
|
584
|
+
{"from": "step1", "to": "END"},
|
|
585
|
+
],
|
|
586
|
+
}
|
|
587
|
+
write_prompt(temp_graph_dir, "test")
|
|
588
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
589
|
+
|
|
590
|
+
result = lint_graph(graph_path, temp_graph_dir)
|
|
591
|
+
assert result.valid is True
|
|
592
|
+
errors = [i for i in result.issues if i.severity == "error"]
|
|
593
|
+
assert len(errors) == 0
|
|
594
|
+
|
|
595
|
+
def test_multiple_issues_detected(self, temp_graph_dir):
|
|
596
|
+
"""Graph with multiple issues should report all."""
|
|
597
|
+
graph = {
|
|
598
|
+
"version": "1.0",
|
|
599
|
+
"name": "test",
|
|
600
|
+
# Missing description (warning)
|
|
601
|
+
# Missing state for {path} (error)
|
|
602
|
+
"tools": {
|
|
603
|
+
"unused": {"type": "shell", "command": "echo", "description": "x"},
|
|
604
|
+
},
|
|
605
|
+
"nodes": {
|
|
606
|
+
"step1": {
|
|
607
|
+
"type": "invalid", # Invalid type (error)
|
|
608
|
+
"prompt": "missing_prompt", # Missing file (error)
|
|
609
|
+
"tools": ["undefined"], # Undefined tool (error)
|
|
610
|
+
"state_key": "output",
|
|
611
|
+
},
|
|
612
|
+
"orphan": { # Unreachable (warning)
|
|
613
|
+
"type": "llm",
|
|
614
|
+
"prompt": "test",
|
|
615
|
+
"state_key": "orphan",
|
|
616
|
+
},
|
|
617
|
+
},
|
|
618
|
+
"edges": [
|
|
619
|
+
{"from": "START", "to": "step1"},
|
|
620
|
+
{"from": "step1", "to": "END"},
|
|
621
|
+
],
|
|
622
|
+
}
|
|
623
|
+
graph_path = write_graph(temp_graph_dir, graph)
|
|
624
|
+
|
|
625
|
+
result = lint_graph(graph_path, temp_graph_dir)
|
|
626
|
+
assert result.valid is False
|
|
627
|
+
assert len(result.issues) >= 3 # At least 3 issues
|