yamlgraph 0.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (185) hide show
  1. examples/__init__.py +1 -0
  2. examples/codegen/__init__.py +5 -0
  3. examples/codegen/models/__init__.py +13 -0
  4. examples/codegen/models/schemas.py +76 -0
  5. examples/codegen/tests/__init__.py +1 -0
  6. examples/codegen/tests/test_ai_helpers.py +235 -0
  7. examples/codegen/tests/test_ast_analysis.py +174 -0
  8. examples/codegen/tests/test_code_analysis.py +134 -0
  9. examples/codegen/tests/test_code_context.py +301 -0
  10. examples/codegen/tests/test_code_nav.py +89 -0
  11. examples/codegen/tests/test_dependency_tools.py +119 -0
  12. examples/codegen/tests/test_example_tools.py +185 -0
  13. examples/codegen/tests/test_git_tools.py +112 -0
  14. examples/codegen/tests/test_impl_agent_schemas.py +193 -0
  15. examples/codegen/tests/test_impl_agent_v4_graph.py +94 -0
  16. examples/codegen/tests/test_jedi_analysis.py +226 -0
  17. examples/codegen/tests/test_meta_tools.py +250 -0
  18. examples/codegen/tests/test_plan_discovery_prompt.py +98 -0
  19. examples/codegen/tests/test_syntax_tools.py +85 -0
  20. examples/codegen/tests/test_synthesize_prompt.py +94 -0
  21. examples/codegen/tests/test_template_tools.py +244 -0
  22. examples/codegen/tools/__init__.py +80 -0
  23. examples/codegen/tools/ai_helpers.py +420 -0
  24. examples/codegen/tools/ast_analysis.py +92 -0
  25. examples/codegen/tools/code_context.py +180 -0
  26. examples/codegen/tools/code_nav.py +52 -0
  27. examples/codegen/tools/dependency_tools.py +120 -0
  28. examples/codegen/tools/example_tools.py +188 -0
  29. examples/codegen/tools/git_tools.py +151 -0
  30. examples/codegen/tools/impl_executor.py +614 -0
  31. examples/codegen/tools/jedi_analysis.py +311 -0
  32. examples/codegen/tools/meta_tools.py +202 -0
  33. examples/codegen/tools/syntax_tools.py +26 -0
  34. examples/codegen/tools/template_tools.py +356 -0
  35. examples/fastapi_interview.py +167 -0
  36. examples/npc/api/__init__.py +1 -0
  37. examples/npc/api/app.py +100 -0
  38. examples/npc/api/routes/__init__.py +5 -0
  39. examples/npc/api/routes/encounter.py +182 -0
  40. examples/npc/api/session.py +330 -0
  41. examples/npc/demo.py +387 -0
  42. examples/npc/nodes/__init__.py +5 -0
  43. examples/npc/nodes/image_node.py +92 -0
  44. examples/npc/run_encounter.py +230 -0
  45. examples/shared/__init__.py +0 -0
  46. examples/shared/replicate_tool.py +238 -0
  47. examples/storyboard/__init__.py +1 -0
  48. examples/storyboard/generate_videos.py +335 -0
  49. examples/storyboard/nodes/__init__.py +12 -0
  50. examples/storyboard/nodes/animated_character_node.py +248 -0
  51. examples/storyboard/nodes/animated_image_node.py +138 -0
  52. examples/storyboard/nodes/character_node.py +162 -0
  53. examples/storyboard/nodes/image_node.py +118 -0
  54. examples/storyboard/nodes/replicate_tool.py +49 -0
  55. examples/storyboard/retry_images.py +118 -0
  56. scripts/demo_async_executor.py +212 -0
  57. scripts/demo_interview_e2e.py +200 -0
  58. scripts/demo_streaming.py +140 -0
  59. scripts/run_interview_demo.py +94 -0
  60. scripts/test_interrupt_fix.py +26 -0
  61. tests/__init__.py +1 -0
  62. tests/conftest.py +178 -0
  63. tests/integration/__init__.py +1 -0
  64. tests/integration/test_animated_storyboard.py +63 -0
  65. tests/integration/test_cli_commands.py +242 -0
  66. tests/integration/test_colocated_prompts.py +139 -0
  67. tests/integration/test_map_demo.py +50 -0
  68. tests/integration/test_memory_demo.py +283 -0
  69. tests/integration/test_npc_api/__init__.py +1 -0
  70. tests/integration/test_npc_api/test_routes.py +357 -0
  71. tests/integration/test_npc_api/test_session.py +216 -0
  72. tests/integration/test_pipeline_flow.py +105 -0
  73. tests/integration/test_providers.py +163 -0
  74. tests/integration/test_resume.py +75 -0
  75. tests/integration/test_subgraph_integration.py +295 -0
  76. tests/integration/test_subgraph_interrupt.py +106 -0
  77. tests/unit/__init__.py +1 -0
  78. tests/unit/test_agent_nodes.py +355 -0
  79. tests/unit/test_async_executor.py +346 -0
  80. tests/unit/test_checkpointer.py +212 -0
  81. tests/unit/test_checkpointer_factory.py +212 -0
  82. tests/unit/test_cli.py +121 -0
  83. tests/unit/test_cli_package.py +81 -0
  84. tests/unit/test_compile_graph_map.py +132 -0
  85. tests/unit/test_conditions_routing.py +253 -0
  86. tests/unit/test_config.py +93 -0
  87. tests/unit/test_conversation_memory.py +276 -0
  88. tests/unit/test_database.py +145 -0
  89. tests/unit/test_deprecation.py +104 -0
  90. tests/unit/test_executor.py +172 -0
  91. tests/unit/test_executor_async.py +179 -0
  92. tests/unit/test_export.py +149 -0
  93. tests/unit/test_expressions.py +178 -0
  94. tests/unit/test_feature_brainstorm.py +194 -0
  95. tests/unit/test_format_prompt.py +145 -0
  96. tests/unit/test_generic_report.py +200 -0
  97. tests/unit/test_graph_commands.py +327 -0
  98. tests/unit/test_graph_linter.py +627 -0
  99. tests/unit/test_graph_loader.py +357 -0
  100. tests/unit/test_graph_schema.py +193 -0
  101. tests/unit/test_inline_schema.py +151 -0
  102. tests/unit/test_interrupt_node.py +182 -0
  103. tests/unit/test_issues.py +164 -0
  104. tests/unit/test_jinja2_prompts.py +85 -0
  105. tests/unit/test_json_extract.py +134 -0
  106. tests/unit/test_langsmith.py +600 -0
  107. tests/unit/test_langsmith_tools.py +204 -0
  108. tests/unit/test_llm_factory.py +109 -0
  109. tests/unit/test_llm_factory_async.py +118 -0
  110. tests/unit/test_loops.py +403 -0
  111. tests/unit/test_map_node.py +144 -0
  112. tests/unit/test_no_backward_compat.py +56 -0
  113. tests/unit/test_node_factory.py +348 -0
  114. tests/unit/test_passthrough_node.py +126 -0
  115. tests/unit/test_prompts.py +324 -0
  116. tests/unit/test_python_nodes.py +198 -0
  117. tests/unit/test_reliability.py +298 -0
  118. tests/unit/test_result_export.py +234 -0
  119. tests/unit/test_router.py +296 -0
  120. tests/unit/test_sanitize.py +99 -0
  121. tests/unit/test_schema_loader.py +295 -0
  122. tests/unit/test_shell_tools.py +229 -0
  123. tests/unit/test_state_builder.py +331 -0
  124. tests/unit/test_state_builder_map.py +104 -0
  125. tests/unit/test_state_config.py +197 -0
  126. tests/unit/test_streaming.py +307 -0
  127. tests/unit/test_subgraph.py +596 -0
  128. tests/unit/test_template.py +190 -0
  129. tests/unit/test_tool_call_integration.py +164 -0
  130. tests/unit/test_tool_call_node.py +178 -0
  131. tests/unit/test_tool_nodes.py +129 -0
  132. tests/unit/test_websearch.py +234 -0
  133. yamlgraph/__init__.py +35 -0
  134. yamlgraph/builder.py +110 -0
  135. yamlgraph/cli/__init__.py +159 -0
  136. yamlgraph/cli/__main__.py +6 -0
  137. yamlgraph/cli/commands.py +231 -0
  138. yamlgraph/cli/deprecation.py +92 -0
  139. yamlgraph/cli/graph_commands.py +541 -0
  140. yamlgraph/cli/validators.py +37 -0
  141. yamlgraph/config.py +67 -0
  142. yamlgraph/constants.py +70 -0
  143. yamlgraph/error_handlers.py +227 -0
  144. yamlgraph/executor.py +290 -0
  145. yamlgraph/executor_async.py +288 -0
  146. yamlgraph/graph_loader.py +451 -0
  147. yamlgraph/map_compiler.py +150 -0
  148. yamlgraph/models/__init__.py +36 -0
  149. yamlgraph/models/graph_schema.py +181 -0
  150. yamlgraph/models/schemas.py +124 -0
  151. yamlgraph/models/state_builder.py +236 -0
  152. yamlgraph/node_factory.py +768 -0
  153. yamlgraph/routing.py +87 -0
  154. yamlgraph/schema_loader.py +240 -0
  155. yamlgraph/storage/__init__.py +20 -0
  156. yamlgraph/storage/checkpointer.py +72 -0
  157. yamlgraph/storage/checkpointer_factory.py +123 -0
  158. yamlgraph/storage/database.py +320 -0
  159. yamlgraph/storage/export.py +269 -0
  160. yamlgraph/tools/__init__.py +1 -0
  161. yamlgraph/tools/agent.py +320 -0
  162. yamlgraph/tools/graph_linter.py +388 -0
  163. yamlgraph/tools/langsmith_tools.py +125 -0
  164. yamlgraph/tools/nodes.py +126 -0
  165. yamlgraph/tools/python_tool.py +179 -0
  166. yamlgraph/tools/shell.py +205 -0
  167. yamlgraph/tools/websearch.py +242 -0
  168. yamlgraph/utils/__init__.py +48 -0
  169. yamlgraph/utils/conditions.py +157 -0
  170. yamlgraph/utils/expressions.py +245 -0
  171. yamlgraph/utils/json_extract.py +104 -0
  172. yamlgraph/utils/langsmith.py +416 -0
  173. yamlgraph/utils/llm_factory.py +118 -0
  174. yamlgraph/utils/llm_factory_async.py +105 -0
  175. yamlgraph/utils/logging.py +104 -0
  176. yamlgraph/utils/prompts.py +171 -0
  177. yamlgraph/utils/sanitize.py +98 -0
  178. yamlgraph/utils/template.py +102 -0
  179. yamlgraph/utils/validators.py +181 -0
  180. yamlgraph-0.3.9.dist-info/METADATA +1105 -0
  181. yamlgraph-0.3.9.dist-info/RECORD +185 -0
  182. yamlgraph-0.3.9.dist-info/WHEEL +5 -0
  183. yamlgraph-0.3.9.dist-info/entry_points.txt +2 -0
  184. yamlgraph-0.3.9.dist-info/licenses/LICENSE +33 -0
  185. yamlgraph-0.3.9.dist-info/top_level.txt +4 -0
@@ -0,0 +1,357 @@
1
+ """Tests for YAML graph loader.
2
+
3
+ TDD: Write tests first, then implement graph_loader.py.
4
+
5
+ Note: Node factory tests (resolve_class, resolve_template, create_node_function)
6
+ have been moved to test_node_factory.py for better organization.
7
+ """
8
+
9
+ from unittest.mock import patch
10
+
11
+ import pytest
12
+
13
+ from tests.conftest import FixtureGeneratedContent
14
+ from yamlgraph.graph_loader import (
15
+ GraphConfig,
16
+ compile_graph,
17
+ load_and_compile,
18
+ load_graph_config,
19
+ )
20
+
21
+ # =============================================================================
22
+ # Fixtures
23
+ # =============================================================================
24
+
25
+
26
+ @pytest.fixture
27
+ def sample_yaml_content():
28
+ """Minimal valid YAML config."""
29
+ return """
30
+ version: "1.0"
31
+ name: test_graph
32
+ description: Test pipeline
33
+
34
+ defaults:
35
+ provider: mistral
36
+ temperature: 0.7
37
+
38
+ nodes:
39
+ generate:
40
+ type: llm
41
+ prompt: generate
42
+ output_model: yamlgraph.models.GenericReport
43
+ temperature: 0.8
44
+ variables:
45
+ topic: "{state.topic}"
46
+ state_key: generated
47
+
48
+ edges:
49
+ - from: START
50
+ to: generate
51
+ - from: generate
52
+ to: END
53
+ """
54
+
55
+
56
+ @pytest.fixture
57
+ def sample_yaml_file(tmp_path, sample_yaml_content):
58
+ """Create a temporary YAML file."""
59
+ yaml_file = tmp_path / "test_graph.yaml"
60
+ yaml_file.write_text(sample_yaml_content)
61
+ return yaml_file
62
+
63
+
64
+ @pytest.fixture
65
+ def sample_config(sample_yaml_file):
66
+ """Load sample config."""
67
+ return load_graph_config(sample_yaml_file)
68
+
69
+
70
+ # =============================================================================
71
+ # TestLoadGraphConfig
72
+ # =============================================================================
73
+
74
+
75
+ class TestLoadGraphConfig:
76
+ """Tests for loading YAML graph configs."""
77
+
78
+ def test_load_valid_yaml(self, sample_yaml_file):
79
+ """Load a valid graph YAML file."""
80
+ config = load_graph_config(sample_yaml_file)
81
+
82
+ assert isinstance(config, GraphConfig)
83
+ assert config.name == "test_graph"
84
+ assert config.version == "1.0"
85
+
86
+ def test_load_missing_file_raises(self, tmp_path):
87
+ """FileNotFoundError for missing file."""
88
+ missing = tmp_path / "nonexistent.yaml"
89
+
90
+ with pytest.raises(FileNotFoundError):
91
+ load_graph_config(missing)
92
+
93
+ def test_parse_nodes(self, sample_config):
94
+ """Nodes parsed with correct attributes."""
95
+ assert "generate" in sample_config.nodes
96
+
97
+ node = sample_config.nodes["generate"]
98
+ assert node["type"] == "llm"
99
+ assert node["prompt"] == "generate"
100
+ assert node["temperature"] == 0.8
101
+
102
+ def test_parse_edges(self, sample_config):
103
+ """Edges parsed correctly."""
104
+ assert len(sample_config.edges) == 2
105
+ assert sample_config.edges[0]["from"] == "START"
106
+ assert sample_config.edges[0]["to"] == "generate"
107
+
108
+ def test_parse_defaults(self, sample_config):
109
+ """Defaults parsed correctly."""
110
+ assert sample_config.defaults["provider"] == "mistral"
111
+ assert sample_config.defaults["temperature"] == 0.7
112
+
113
+ def test_parse_prompts_relative(self, tmp_path):
114
+ """Should parse prompts_relative from defaults."""
115
+ yaml_content = """
116
+ version: "1.0"
117
+ name: test_graph
118
+
119
+ defaults:
120
+ prompts_relative: true
121
+
122
+ nodes:
123
+ greet:
124
+ type: llm
125
+ prompt: prompts/greet
126
+ state_key: greeting
127
+
128
+ edges:
129
+ - from: START
130
+ to: greet
131
+ - from: greet
132
+ to: END
133
+ """
134
+ yaml_file = tmp_path / "test.yaml"
135
+ yaml_file.write_text(yaml_content)
136
+
137
+ config = load_graph_config(yaml_file)
138
+
139
+ assert config.prompts_relative is True
140
+ assert config.prompts_dir is None
141
+
142
+ def test_parse_prompts_dir(self, tmp_path):
143
+ """Should parse prompts_dir from defaults."""
144
+ yaml_content = """
145
+ version: "1.0"
146
+ name: test_graph
147
+
148
+ defaults:
149
+ prompts_dir: shared/prompts
150
+
151
+ nodes:
152
+ greet:
153
+ type: llm
154
+ prompt: greet
155
+ state_key: greeting
156
+
157
+ edges:
158
+ - from: START
159
+ to: greet
160
+ - from: greet
161
+ to: END
162
+ """
163
+ yaml_file = tmp_path / "test.yaml"
164
+ yaml_file.write_text(yaml_content)
165
+
166
+ config = load_graph_config(yaml_file)
167
+
168
+ assert config.prompts_dir == "shared/prompts"
169
+ assert config.prompts_relative is False
170
+
171
+ def test_parse_state_class(self, sample_config):
172
+ """State class defaults to empty (dynamic generation)."""
173
+ assert sample_config.state_class == ""
174
+
175
+
176
+ # =============================================================================
177
+ # TestCompileGraph
178
+ # =============================================================================
179
+
180
+
181
+ class TestCompileGraph:
182
+ """Tests for compiling config to LangGraph."""
183
+
184
+ def test_graph_has_all_nodes(self, sample_config):
185
+ """Compiled graph contains all defined nodes."""
186
+ graph = compile_graph(sample_config)
187
+
188
+ # Check node was added (nodes are stored in graph.nodes)
189
+ assert "generate" in graph.nodes
190
+
191
+ def test_entry_point_set(self, sample_config):
192
+ """START edge sets entry point correctly."""
193
+ graph = compile_graph(sample_config)
194
+
195
+ # Verify entry point by checking the graph compiles and
196
+ # the first node is reachable from START
197
+ compiled = graph.compile()
198
+ assert compiled is not None
199
+
200
+ # The 'generate' node should be in the graph
201
+ assert "generate" in graph.nodes
202
+
203
+ def test_edges_connected(self, sample_config):
204
+ """Edges create correct topology."""
205
+ graph = compile_graph(sample_config)
206
+
207
+ # Compile to check it works
208
+ compiled = graph.compile()
209
+ assert compiled is not None
210
+
211
+
212
+ # =============================================================================
213
+ # TestLoadAndCompile
214
+ # =============================================================================
215
+
216
+
217
+ class TestLoadAndCompile:
218
+ """Integration tests for full load-compile flow."""
219
+
220
+ def test_load_and_compile_yamlgraph(self):
221
+ """Load the actual yamlgraph.yaml and compile it."""
222
+ from yamlgraph.config import GRAPHS_DIR
223
+
224
+ yamlgraph_path = GRAPHS_DIR / "yamlgraph.yaml"
225
+ if not yamlgraph_path.exists():
226
+ pytest.skip("yamlgraph.yaml not created yet")
227
+
228
+ graph = load_and_compile(yamlgraph_path)
229
+ compiled = graph.compile()
230
+
231
+ assert compiled is not None
232
+
233
+ def test_compiled_graph_invocable(self, sample_yaml_file):
234
+ """Compiled graph can be invoked with initial state."""
235
+ mock_result = FixtureGeneratedContent(
236
+ title="Test",
237
+ content="Content",
238
+ word_count=100,
239
+ tags=[],
240
+ )
241
+
242
+ with patch("yamlgraph.node_factory.execute_prompt", return_value=mock_result):
243
+ graph = load_and_compile(sample_yaml_file)
244
+ compiled = graph.compile()
245
+
246
+ initial_state = {
247
+ "thread_id": "test",
248
+ "topic": "AI",
249
+ "style": "casual",
250
+ "word_count": 100,
251
+ }
252
+
253
+ result = compiled.invoke(initial_state)
254
+
255
+ assert result.get("generated") is not None
256
+ assert result["generated"].title == "Test"
257
+
258
+
259
+ # =============================================================================
260
+ # TestYAMLSchemaValidation
261
+ # =============================================================================
262
+
263
+
264
+ class TestYAMLSchemaValidation:
265
+ """Tests for YAML schema validation on load."""
266
+
267
+ def test_missing_nodes_raises_error(self, tmp_path):
268
+ """YAML without nodes should raise ValidationError."""
269
+ yaml_content = """
270
+ version: "1.0"
271
+ name: empty_graph
272
+ edges:
273
+ - from: START
274
+ to: END
275
+ """
276
+ yaml_file = tmp_path / "no_nodes.yaml"
277
+ yaml_file.write_text(yaml_content)
278
+
279
+ with pytest.raises(ValueError, match="nodes"):
280
+ load_graph_config(yaml_file)
281
+
282
+ def test_missing_edges_raises_error(self, tmp_path):
283
+ """YAML without edges should raise ValidationError."""
284
+ yaml_content = """
285
+ version: "1.0"
286
+ name: no_edges
287
+ nodes:
288
+ generate:
289
+ type: llm
290
+ prompt: generate
291
+ """
292
+ yaml_file = tmp_path / "no_edges.yaml"
293
+ yaml_file.write_text(yaml_content)
294
+
295
+ with pytest.raises(ValueError, match="edges"):
296
+ load_graph_config(yaml_file)
297
+
298
+ def test_node_missing_prompt_raises_error(self, tmp_path):
299
+ """Node without prompt should raise ValidationError."""
300
+ yaml_content = """
301
+ version: "1.0"
302
+ name: bad_node
303
+ nodes:
304
+ generate:
305
+ type: llm
306
+ output_model: yamlgraph.models.GenericReport
307
+ edges:
308
+ - from: START
309
+ to: generate
310
+ """
311
+ yaml_file = tmp_path / "no_prompt.yaml"
312
+ yaml_file.write_text(yaml_content)
313
+
314
+ with pytest.raises(ValueError, match="prompt"):
315
+ load_graph_config(yaml_file)
316
+
317
+ def test_edge_missing_from_raises_error(self, tmp_path):
318
+ """Edge without 'from' should raise ValidationError."""
319
+ yaml_content = """
320
+ version: "1.0"
321
+ name: bad_edge
322
+ nodes:
323
+ generate:
324
+ type: llm
325
+ prompt: generate
326
+ edges:
327
+ - to: generate
328
+ """
329
+ yaml_file = tmp_path / "no_from.yaml"
330
+ yaml_file.write_text(yaml_content)
331
+
332
+ with pytest.raises(ValueError, match="from"):
333
+ load_graph_config(yaml_file)
334
+
335
+ def test_edge_missing_to_raises_error(self, tmp_path):
336
+ """Edge without 'to' should raise ValidationError."""
337
+ yaml_content = """
338
+ version: "1.0"
339
+ name: bad_edge
340
+ nodes:
341
+ generate:
342
+ type: llm
343
+ prompt: generate
344
+ edges:
345
+ - from: START
346
+ """
347
+ yaml_file = tmp_path / "no_to.yaml"
348
+ yaml_file.write_text(yaml_content)
349
+
350
+ with pytest.raises(ValueError, match="to"):
351
+ load_graph_config(yaml_file)
352
+
353
+ def test_valid_yaml_passes_validation(self, sample_yaml_file):
354
+ """Valid YAML should load without errors."""
355
+ config = load_graph_config(sample_yaml_file)
356
+ assert config.name == "test_graph"
357
+ assert "generate" in config.nodes
@@ -0,0 +1,193 @@
1
+ """Tests for graph configuration Pydantic schema validation."""
2
+
3
+ import pytest
4
+ from pydantic import ValidationError
5
+
6
+ from yamlgraph.models.graph_schema import (
7
+ EdgeConfig,
8
+ NodeConfig,
9
+ validate_graph_schema,
10
+ )
11
+
12
+
13
+ class TestNodeConfig:
14
+ """Tests for NodeConfig validation."""
15
+
16
+ def test_default_node_type_is_llm(self):
17
+ """Default node type is llm."""
18
+ node = NodeConfig(prompt="test")
19
+ assert node.type == "llm"
20
+
21
+ def test_llm_node_requires_prompt(self):
22
+ """LLM node must have prompt."""
23
+ with pytest.raises(ValidationError, match="requires 'prompt'"):
24
+ NodeConfig(type="llm")
25
+
26
+ def test_router_requires_routes(self):
27
+ """Router node must have routes."""
28
+ with pytest.raises(ValidationError, match="requires 'routes'"):
29
+ NodeConfig(type="router", prompt="classify")
30
+
31
+ def test_router_with_routes_valid(self):
32
+ """Router with routes is valid."""
33
+ node = NodeConfig(
34
+ type="router",
35
+ prompt="classify",
36
+ routes={"positive": "happy", "negative": "sad"},
37
+ )
38
+ assert node.routes == {"positive": "happy", "negative": "sad"}
39
+
40
+ def test_map_requires_all_fields(self):
41
+ """Map node requires over, as, node, collect."""
42
+ # Missing 'as'
43
+ with pytest.raises(ValidationError, match="requires 'as'"):
44
+ NodeConfig(
45
+ type="map",
46
+ over="{state.items}",
47
+ node={"prompt": "process"},
48
+ collect="results",
49
+ )
50
+
51
+ def test_map_with_all_fields_valid(self):
52
+ """Map node with all fields is valid."""
53
+ node = NodeConfig.model_validate(
54
+ {
55
+ "type": "map",
56
+ "over": "{state.items}",
57
+ "as": "item",
58
+ "node": {"prompt": "process"},
59
+ "collect": "results",
60
+ }
61
+ )
62
+ assert node.item_var == "item"
63
+ assert node.collect == "results"
64
+
65
+ def test_invalid_on_error_rejected(self):
66
+ """Invalid on_error value is rejected."""
67
+ with pytest.raises(ValidationError, match="Invalid on_error"):
68
+ NodeConfig(prompt="test", on_error="invalid_handler")
69
+
70
+ def test_valid_on_error_accepted(self):
71
+ """Valid on_error values accepted."""
72
+ for handler in ["skip", "retry", "fail", "fallback"]:
73
+ node = NodeConfig(prompt="test", on_error=handler)
74
+ assert node.on_error == handler
75
+
76
+ def test_temperature_range(self):
77
+ """Temperature must be 0-2."""
78
+ NodeConfig(prompt="test", temperature=0.5) # Valid
79
+
80
+ with pytest.raises(ValidationError):
81
+ NodeConfig(prompt="test", temperature=-0.1)
82
+
83
+ with pytest.raises(ValidationError):
84
+ NodeConfig(prompt="test", temperature=2.5)
85
+
86
+
87
+ class TestEdgeConfig:
88
+ """Tests for EdgeConfig validation."""
89
+
90
+ def test_simple_edge(self):
91
+ """Simple from/to edge."""
92
+ edge = EdgeConfig.model_validate({"from": "a", "to": "b"})
93
+ assert edge.from_node == "a"
94
+ assert edge.to == "b"
95
+
96
+ def test_edge_with_condition(self):
97
+ """Edge with condition expression."""
98
+ edge = EdgeConfig.model_validate(
99
+ {
100
+ "from": "critique",
101
+ "to": "refine",
102
+ "condition": "score < 0.8",
103
+ }
104
+ )
105
+ assert edge.condition == "score < 0.8"
106
+
107
+ def test_edge_to_multiple_targets(self):
108
+ """Edge can have list of targets."""
109
+ edge = EdgeConfig.model_validate(
110
+ {
111
+ "from": "a",
112
+ "to": ["b", "c"],
113
+ }
114
+ )
115
+ assert edge.to == ["b", "c"]
116
+
117
+
118
+ class TestGraphConfigSchema:
119
+ """Tests for full graph schema validation."""
120
+
121
+ def test_minimal_valid_graph(self):
122
+ """Minimal valid graph configuration."""
123
+ config = {
124
+ "nodes": {
125
+ "greet": {"prompt": "greet"},
126
+ },
127
+ "edges": [
128
+ {"from": "START", "to": "greet"},
129
+ {"from": "greet", "to": "END"},
130
+ ],
131
+ }
132
+ schema = validate_graph_schema(config)
133
+ assert schema.name == "unnamed"
134
+ assert "greet" in schema.nodes
135
+
136
+ def test_full_graph_config(self):
137
+ """Full graph with all optional fields."""
138
+ config = {
139
+ "version": "1.0",
140
+ "name": "test-graph",
141
+ "description": "A test graph",
142
+ "defaults": {"provider": "anthropic"},
143
+ "nodes": {
144
+ "generate": {"prompt": "generate", "temperature": 0.8},
145
+ },
146
+ "edges": [
147
+ {"from": "START", "to": "generate"},
148
+ {"from": "generate", "to": "END"},
149
+ ],
150
+ "loop_limits": {"refine": 3},
151
+ }
152
+ schema = validate_graph_schema(config)
153
+ assert schema.name == "test-graph"
154
+ assert schema.defaults == {"provider": "anthropic"}
155
+
156
+ def test_router_targets_validated(self):
157
+ """Router targets must exist as nodes."""
158
+ config = {
159
+ "nodes": {
160
+ "classify": {
161
+ "type": "router",
162
+ "prompt": "classify",
163
+ "routes": {"a": "nonexistent"},
164
+ },
165
+ },
166
+ "edges": [{"from": "START", "to": "classify"}],
167
+ }
168
+ with pytest.raises(ValidationError, match="nonexistent"):
169
+ validate_graph_schema(config)
170
+
171
+ def test_edge_nodes_validated(self):
172
+ """Edge nodes must exist."""
173
+ config = {
174
+ "nodes": {"a": {"prompt": "test"}},
175
+ "edges": [
176
+ {"from": "START", "to": "a"},
177
+ {"from": "a", "to": "missing"},
178
+ ],
179
+ }
180
+ with pytest.raises(ValidationError, match="missing"):
181
+ validate_graph_schema(config)
182
+
183
+ def test_start_end_always_valid(self):
184
+ """START and END are always valid node references."""
185
+ config = {
186
+ "nodes": {"middle": {"prompt": "test"}},
187
+ "edges": [
188
+ {"from": "START", "to": "middle"},
189
+ {"from": "middle", "to": "END"},
190
+ ],
191
+ }
192
+ schema = validate_graph_schema(config)
193
+ assert len(schema.edges) == 2
@@ -0,0 +1,151 @@
1
+ """Tests for YAML inline schema integration with node factory.
2
+
3
+ TDD: RED phase - tests for loading output_model from prompt YAML schema block.
4
+ """
5
+
6
+ import pytest
7
+
8
+
9
+ class TestInlineSchemaIntegration:
10
+ """Test node factory uses inline schema from prompt YAML."""
11
+
12
+ def test_node_uses_inline_schema_from_prompt(self, tmp_path, monkeypatch):
13
+ """Node uses schema defined in prompt YAML instead of output_model."""
14
+ # Create a prompt file with inline schema
15
+ prompt_dir = tmp_path / "prompts" / "test"
16
+ prompt_dir.mkdir(parents=True)
17
+
18
+ prompt_file = prompt_dir / "classify.yaml"
19
+ prompt_file.write_text("""
20
+ name: classify_tone
21
+ version: "1.0"
22
+
23
+ schema:
24
+ name: InlineClassification
25
+ fields:
26
+ result:
27
+ type: str
28
+ description: "Classification result"
29
+ score:
30
+ type: float
31
+ description: "Confidence score"
32
+ constraints:
33
+ ge: 0.0
34
+ le: 1.0
35
+
36
+ system: You are a classifier.
37
+ user: "Classify: {message}"
38
+ """)
39
+
40
+ # Patch prompts directory
41
+ monkeypatch.setenv("PROMPTS_DIR", str(tmp_path / "prompts"))
42
+
43
+ # Create node without explicit output_model - should use inline schema
44
+ node_config = {
45
+ "type": "llm",
46
+ "prompt": "test/classify",
47
+ # No output_model specified - should load from YAML
48
+ }
49
+
50
+ # This should work and detect inline schema
51
+ from yamlgraph.node_factory import get_output_model_for_node
52
+
53
+ model = get_output_model_for_node(node_config, str(tmp_path / "prompts"))
54
+
55
+ assert model is not None
56
+ assert model.__name__ == "InlineClassification"
57
+
58
+ # Verify model works
59
+ instance = model(result="positive", score=0.95)
60
+ assert instance.result == "positive"
61
+ assert instance.score == 0.95
62
+
63
+ def test_explicit_output_model_overrides_inline_schema(self, tmp_path, monkeypatch):
64
+ """Explicit output_model in node config takes precedence."""
65
+ prompt_dir = tmp_path / "prompts" / "test"
66
+ prompt_dir.mkdir(parents=True)
67
+
68
+ prompt_file = prompt_dir / "with_schema.yaml"
69
+ prompt_file.write_text("""
70
+ name: test_prompt
71
+ schema:
72
+ name: InlineModel
73
+ fields:
74
+ value: {type: str}
75
+ system: Test
76
+ user: "{input}"
77
+ """)
78
+
79
+ monkeypatch.setenv("PROMPTS_DIR", str(tmp_path / "prompts"))
80
+
81
+ # Node config has explicit output_model
82
+ node_config = {
83
+ "type": "llm",
84
+ "prompt": "test/with_schema",
85
+ "output_model": "yamlgraph.models.GenericReport", # Explicit - takes precedence
86
+ }
87
+
88
+ from yamlgraph.node_factory import get_output_model_for_node
89
+
90
+ model = get_output_model_for_node(node_config, str(tmp_path / "prompts"))
91
+
92
+ # Should use explicit model, not inline
93
+ assert model.__name__ == "GenericReport"
94
+
95
+ def test_no_schema_returns_none(self, tmp_path, monkeypatch):
96
+ """Prompt without schema returns None for output_model."""
97
+ prompt_dir = tmp_path / "prompts" / "test"
98
+ prompt_dir.mkdir(parents=True)
99
+
100
+ prompt_file = prompt_dir / "plain.yaml"
101
+ prompt_file.write_text("""
102
+ name: plain_prompt
103
+ system: Test
104
+ user: "{input}"
105
+ """)
106
+
107
+ monkeypatch.setenv("PROMPTS_DIR", str(tmp_path / "prompts"))
108
+
109
+ node_config = {
110
+ "type": "llm",
111
+ "prompt": "test/plain",
112
+ }
113
+
114
+ from yamlgraph.node_factory import get_output_model_for_node
115
+
116
+ model = get_output_model_for_node(node_config, str(tmp_path / "prompts"))
117
+
118
+ assert model is None
119
+
120
+
121
+ class TestResolvePromptPath:
122
+ """Test resolving prompt name to full file path."""
123
+
124
+ def test_resolve_prompt_path(self, tmp_path):
125
+ """Resolve prompt name to full YAML path."""
126
+ prompt_dir = tmp_path / "prompts"
127
+ prompt_dir.mkdir()
128
+
129
+ (prompt_dir / "simple.yaml").write_text("name: simple")
130
+ (prompt_dir / "nested").mkdir()
131
+ (prompt_dir / "nested" / "deep.yaml").write_text("name: deep")
132
+
133
+ from yamlgraph.node_factory import resolve_prompt_path
134
+
135
+ # Simple prompt - now returns Path object
136
+ path = resolve_prompt_path("simple", prompt_dir)
137
+ assert path.name == "simple.yaml"
138
+
139
+ # Nested prompt
140
+ path = resolve_prompt_path("nested/deep", prompt_dir)
141
+ assert path.name == "deep.yaml"
142
+
143
+ def test_resolve_missing_prompt_raises(self, tmp_path):
144
+ """Missing prompt file raises FileNotFoundError."""
145
+ prompt_dir = tmp_path / "prompts"
146
+ prompt_dir.mkdir()
147
+
148
+ from yamlgraph.node_factory import resolve_prompt_path
149
+
150
+ with pytest.raises(FileNotFoundError):
151
+ resolve_prompt_path("nonexistent", prompt_dir)