yamlgraph 0.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (185) hide show
  1. examples/__init__.py +1 -0
  2. examples/codegen/__init__.py +5 -0
  3. examples/codegen/models/__init__.py +13 -0
  4. examples/codegen/models/schemas.py +76 -0
  5. examples/codegen/tests/__init__.py +1 -0
  6. examples/codegen/tests/test_ai_helpers.py +235 -0
  7. examples/codegen/tests/test_ast_analysis.py +174 -0
  8. examples/codegen/tests/test_code_analysis.py +134 -0
  9. examples/codegen/tests/test_code_context.py +301 -0
  10. examples/codegen/tests/test_code_nav.py +89 -0
  11. examples/codegen/tests/test_dependency_tools.py +119 -0
  12. examples/codegen/tests/test_example_tools.py +185 -0
  13. examples/codegen/tests/test_git_tools.py +112 -0
  14. examples/codegen/tests/test_impl_agent_schemas.py +193 -0
  15. examples/codegen/tests/test_impl_agent_v4_graph.py +94 -0
  16. examples/codegen/tests/test_jedi_analysis.py +226 -0
  17. examples/codegen/tests/test_meta_tools.py +250 -0
  18. examples/codegen/tests/test_plan_discovery_prompt.py +98 -0
  19. examples/codegen/tests/test_syntax_tools.py +85 -0
  20. examples/codegen/tests/test_synthesize_prompt.py +94 -0
  21. examples/codegen/tests/test_template_tools.py +244 -0
  22. examples/codegen/tools/__init__.py +80 -0
  23. examples/codegen/tools/ai_helpers.py +420 -0
  24. examples/codegen/tools/ast_analysis.py +92 -0
  25. examples/codegen/tools/code_context.py +180 -0
  26. examples/codegen/tools/code_nav.py +52 -0
  27. examples/codegen/tools/dependency_tools.py +120 -0
  28. examples/codegen/tools/example_tools.py +188 -0
  29. examples/codegen/tools/git_tools.py +151 -0
  30. examples/codegen/tools/impl_executor.py +614 -0
  31. examples/codegen/tools/jedi_analysis.py +311 -0
  32. examples/codegen/tools/meta_tools.py +202 -0
  33. examples/codegen/tools/syntax_tools.py +26 -0
  34. examples/codegen/tools/template_tools.py +356 -0
  35. examples/fastapi_interview.py +167 -0
  36. examples/npc/api/__init__.py +1 -0
  37. examples/npc/api/app.py +100 -0
  38. examples/npc/api/routes/__init__.py +5 -0
  39. examples/npc/api/routes/encounter.py +182 -0
  40. examples/npc/api/session.py +330 -0
  41. examples/npc/demo.py +387 -0
  42. examples/npc/nodes/__init__.py +5 -0
  43. examples/npc/nodes/image_node.py +92 -0
  44. examples/npc/run_encounter.py +230 -0
  45. examples/shared/__init__.py +0 -0
  46. examples/shared/replicate_tool.py +238 -0
  47. examples/storyboard/__init__.py +1 -0
  48. examples/storyboard/generate_videos.py +335 -0
  49. examples/storyboard/nodes/__init__.py +12 -0
  50. examples/storyboard/nodes/animated_character_node.py +248 -0
  51. examples/storyboard/nodes/animated_image_node.py +138 -0
  52. examples/storyboard/nodes/character_node.py +162 -0
  53. examples/storyboard/nodes/image_node.py +118 -0
  54. examples/storyboard/nodes/replicate_tool.py +49 -0
  55. examples/storyboard/retry_images.py +118 -0
  56. scripts/demo_async_executor.py +212 -0
  57. scripts/demo_interview_e2e.py +200 -0
  58. scripts/demo_streaming.py +140 -0
  59. scripts/run_interview_demo.py +94 -0
  60. scripts/test_interrupt_fix.py +26 -0
  61. tests/__init__.py +1 -0
  62. tests/conftest.py +178 -0
  63. tests/integration/__init__.py +1 -0
  64. tests/integration/test_animated_storyboard.py +63 -0
  65. tests/integration/test_cli_commands.py +242 -0
  66. tests/integration/test_colocated_prompts.py +139 -0
  67. tests/integration/test_map_demo.py +50 -0
  68. tests/integration/test_memory_demo.py +283 -0
  69. tests/integration/test_npc_api/__init__.py +1 -0
  70. tests/integration/test_npc_api/test_routes.py +357 -0
  71. tests/integration/test_npc_api/test_session.py +216 -0
  72. tests/integration/test_pipeline_flow.py +105 -0
  73. tests/integration/test_providers.py +163 -0
  74. tests/integration/test_resume.py +75 -0
  75. tests/integration/test_subgraph_integration.py +295 -0
  76. tests/integration/test_subgraph_interrupt.py +106 -0
  77. tests/unit/__init__.py +1 -0
  78. tests/unit/test_agent_nodes.py +355 -0
  79. tests/unit/test_async_executor.py +346 -0
  80. tests/unit/test_checkpointer.py +212 -0
  81. tests/unit/test_checkpointer_factory.py +212 -0
  82. tests/unit/test_cli.py +121 -0
  83. tests/unit/test_cli_package.py +81 -0
  84. tests/unit/test_compile_graph_map.py +132 -0
  85. tests/unit/test_conditions_routing.py +253 -0
  86. tests/unit/test_config.py +93 -0
  87. tests/unit/test_conversation_memory.py +276 -0
  88. tests/unit/test_database.py +145 -0
  89. tests/unit/test_deprecation.py +104 -0
  90. tests/unit/test_executor.py +172 -0
  91. tests/unit/test_executor_async.py +179 -0
  92. tests/unit/test_export.py +149 -0
  93. tests/unit/test_expressions.py +178 -0
  94. tests/unit/test_feature_brainstorm.py +194 -0
  95. tests/unit/test_format_prompt.py +145 -0
  96. tests/unit/test_generic_report.py +200 -0
  97. tests/unit/test_graph_commands.py +327 -0
  98. tests/unit/test_graph_linter.py +627 -0
  99. tests/unit/test_graph_loader.py +357 -0
  100. tests/unit/test_graph_schema.py +193 -0
  101. tests/unit/test_inline_schema.py +151 -0
  102. tests/unit/test_interrupt_node.py +182 -0
  103. tests/unit/test_issues.py +164 -0
  104. tests/unit/test_jinja2_prompts.py +85 -0
  105. tests/unit/test_json_extract.py +134 -0
  106. tests/unit/test_langsmith.py +600 -0
  107. tests/unit/test_langsmith_tools.py +204 -0
  108. tests/unit/test_llm_factory.py +109 -0
  109. tests/unit/test_llm_factory_async.py +118 -0
  110. tests/unit/test_loops.py +403 -0
  111. tests/unit/test_map_node.py +144 -0
  112. tests/unit/test_no_backward_compat.py +56 -0
  113. tests/unit/test_node_factory.py +348 -0
  114. tests/unit/test_passthrough_node.py +126 -0
  115. tests/unit/test_prompts.py +324 -0
  116. tests/unit/test_python_nodes.py +198 -0
  117. tests/unit/test_reliability.py +298 -0
  118. tests/unit/test_result_export.py +234 -0
  119. tests/unit/test_router.py +296 -0
  120. tests/unit/test_sanitize.py +99 -0
  121. tests/unit/test_schema_loader.py +295 -0
  122. tests/unit/test_shell_tools.py +229 -0
  123. tests/unit/test_state_builder.py +331 -0
  124. tests/unit/test_state_builder_map.py +104 -0
  125. tests/unit/test_state_config.py +197 -0
  126. tests/unit/test_streaming.py +307 -0
  127. tests/unit/test_subgraph.py +596 -0
  128. tests/unit/test_template.py +190 -0
  129. tests/unit/test_tool_call_integration.py +164 -0
  130. tests/unit/test_tool_call_node.py +178 -0
  131. tests/unit/test_tool_nodes.py +129 -0
  132. tests/unit/test_websearch.py +234 -0
  133. yamlgraph/__init__.py +35 -0
  134. yamlgraph/builder.py +110 -0
  135. yamlgraph/cli/__init__.py +159 -0
  136. yamlgraph/cli/__main__.py +6 -0
  137. yamlgraph/cli/commands.py +231 -0
  138. yamlgraph/cli/deprecation.py +92 -0
  139. yamlgraph/cli/graph_commands.py +541 -0
  140. yamlgraph/cli/validators.py +37 -0
  141. yamlgraph/config.py +67 -0
  142. yamlgraph/constants.py +70 -0
  143. yamlgraph/error_handlers.py +227 -0
  144. yamlgraph/executor.py +290 -0
  145. yamlgraph/executor_async.py +288 -0
  146. yamlgraph/graph_loader.py +451 -0
  147. yamlgraph/map_compiler.py +150 -0
  148. yamlgraph/models/__init__.py +36 -0
  149. yamlgraph/models/graph_schema.py +181 -0
  150. yamlgraph/models/schemas.py +124 -0
  151. yamlgraph/models/state_builder.py +236 -0
  152. yamlgraph/node_factory.py +768 -0
  153. yamlgraph/routing.py +87 -0
  154. yamlgraph/schema_loader.py +240 -0
  155. yamlgraph/storage/__init__.py +20 -0
  156. yamlgraph/storage/checkpointer.py +72 -0
  157. yamlgraph/storage/checkpointer_factory.py +123 -0
  158. yamlgraph/storage/database.py +320 -0
  159. yamlgraph/storage/export.py +269 -0
  160. yamlgraph/tools/__init__.py +1 -0
  161. yamlgraph/tools/agent.py +320 -0
  162. yamlgraph/tools/graph_linter.py +388 -0
  163. yamlgraph/tools/langsmith_tools.py +125 -0
  164. yamlgraph/tools/nodes.py +126 -0
  165. yamlgraph/tools/python_tool.py +179 -0
  166. yamlgraph/tools/shell.py +205 -0
  167. yamlgraph/tools/websearch.py +242 -0
  168. yamlgraph/utils/__init__.py +48 -0
  169. yamlgraph/utils/conditions.py +157 -0
  170. yamlgraph/utils/expressions.py +245 -0
  171. yamlgraph/utils/json_extract.py +104 -0
  172. yamlgraph/utils/langsmith.py +416 -0
  173. yamlgraph/utils/llm_factory.py +118 -0
  174. yamlgraph/utils/llm_factory_async.py +105 -0
  175. yamlgraph/utils/logging.py +104 -0
  176. yamlgraph/utils/prompts.py +171 -0
  177. yamlgraph/utils/sanitize.py +98 -0
  178. yamlgraph/utils/template.py +102 -0
  179. yamlgraph/utils/validators.py +181 -0
  180. yamlgraph-0.3.9.dist-info/METADATA +1105 -0
  181. yamlgraph-0.3.9.dist-info/RECORD +185 -0
  182. yamlgraph-0.3.9.dist-info/WHEEL +5 -0
  183. yamlgraph-0.3.9.dist-info/entry_points.txt +2 -0
  184. yamlgraph-0.3.9.dist-info/licenses/LICENSE +33 -0
  185. yamlgraph-0.3.9.dist-info/top_level.txt +4 -0
@@ -0,0 +1,355 @@
1
+ """Tests for agent nodes (type: agent).
2
+
3
+ Agent nodes allow the LLM to autonomously decide which tools to call
4
+ in a loop until it has enough information to respond.
5
+ """
6
+
7
+ from unittest.mock import MagicMock, patch
8
+
9
+ from yamlgraph.tools.agent import (
10
+ build_langchain_tool,
11
+ build_python_tool,
12
+ create_agent_node,
13
+ )
14
+ from yamlgraph.tools.python_tool import PythonToolConfig
15
+ from yamlgraph.tools.shell import ShellToolConfig
16
+
17
+
18
+ class TestBuildLangchainTool:
19
+ """Tests for build_langchain_tool function."""
20
+
21
+ def test_creates_tool_with_name(self):
22
+ """Tool has correct name."""
23
+ config = ShellToolConfig(
24
+ command="echo test",
25
+ description="Test tool",
26
+ )
27
+ tool = build_langchain_tool("my_tool", config)
28
+ assert tool.name == "my_tool"
29
+
30
+ def test_creates_tool_with_description(self):
31
+ """Tool has correct description."""
32
+ config = ShellToolConfig(
33
+ command="echo test",
34
+ description="A helpful test tool",
35
+ )
36
+ tool = build_langchain_tool("test", config)
37
+ assert tool.description == "A helpful test tool"
38
+
39
+ def test_tool_executes_command(self):
40
+ """Tool invocation runs shell command."""
41
+ config = ShellToolConfig(
42
+ command="echo {message}",
43
+ description="Echo a message",
44
+ )
45
+ tool = build_langchain_tool("echo", config)
46
+ result = tool.invoke({"message": "hello"})
47
+ assert "hello" in result
48
+
49
+
50
+ class TestCreateAgentNode:
51
+ """Tests for create_agent_node function."""
52
+
53
+ @patch("yamlgraph.tools.agent.create_llm")
54
+ def test_agent_completes_without_tools(self, mock_create_llm):
55
+ """Agent can finish with no tool calls."""
56
+ # Mock LLM that returns a direct answer (no tool calls)
57
+ mock_llm = MagicMock()
58
+ mock_response = MagicMock()
59
+ mock_response.tool_calls = []
60
+ mock_response.content = "The answer is 42"
61
+ mock_llm.bind_tools.return_value = mock_llm
62
+ mock_llm.invoke.return_value = mock_response
63
+ mock_create_llm.return_value = mock_llm
64
+
65
+ tools = {
66
+ "search": ShellToolConfig(command="echo search", description="Search"),
67
+ }
68
+ node_config = {
69
+ "prompt": "agent",
70
+ "tools": ["search"],
71
+ "max_iterations": 5,
72
+ "state_key": "result",
73
+ }
74
+
75
+ node_fn = create_agent_node("agent", node_config, tools)
76
+ result = node_fn({"input": "What is the meaning of life?"})
77
+
78
+ assert result["result"] == "The answer is 42"
79
+ assert result["_agent_iterations"] == 1
80
+
81
+ @patch("yamlgraph.tools.agent.create_llm")
82
+ def test_agent_calls_tool(self, mock_create_llm):
83
+ """LLM tool call executes shell command."""
84
+ # Mock LLM that first calls a tool, then returns answer
85
+ mock_llm = MagicMock()
86
+
87
+ # First response: call a tool
88
+ first_response = MagicMock()
89
+ first_response.tool_calls = [
90
+ {"id": "call1", "name": "echo", "args": {"message": "test"}}
91
+ ]
92
+ first_response.content = ""
93
+
94
+ # Second response: final answer
95
+ second_response = MagicMock()
96
+ second_response.tool_calls = []
97
+ second_response.content = "I echoed: test"
98
+
99
+ mock_llm.bind_tools.return_value = mock_llm
100
+ mock_llm.invoke.side_effect = [first_response, second_response]
101
+ mock_create_llm.return_value = mock_llm
102
+
103
+ tools = {
104
+ "echo": ShellToolConfig(command="echo {message}", description="Echo"),
105
+ }
106
+ node_config = {
107
+ "prompt": "agent",
108
+ "tools": ["echo"],
109
+ "max_iterations": 5,
110
+ "state_key": "result",
111
+ }
112
+
113
+ node_fn = create_agent_node("agent", node_config, tools)
114
+ result = node_fn({"input": "Echo something"})
115
+
116
+ assert result["result"] == "I echoed: test"
117
+ assert result["_agent_iterations"] == 2
118
+
119
+ @patch("yamlgraph.tools.agent.create_llm")
120
+ def test_max_iterations_enforced(self, mock_create_llm):
121
+ """Stops after max_iterations reached."""
122
+ # Mock LLM that always calls a tool (never finishes)
123
+ mock_llm = MagicMock()
124
+ mock_response = MagicMock()
125
+ mock_response.tool_calls = [
126
+ {"id": "call1", "name": "search", "args": {"query": "more"}}
127
+ ]
128
+ mock_response.content = "Still searching..."
129
+ mock_llm.bind_tools.return_value = mock_llm
130
+ mock_llm.invoke.return_value = mock_response
131
+ mock_create_llm.return_value = mock_llm
132
+
133
+ tools = {
134
+ "search": ShellToolConfig(command="echo searching", description="Search"),
135
+ }
136
+ node_config = {
137
+ "prompt": "agent",
138
+ "tools": ["search"],
139
+ "max_iterations": 3,
140
+ "state_key": "result",
141
+ }
142
+
143
+ node_fn = create_agent_node("agent", node_config, tools)
144
+ result = node_fn({"input": "Search forever"})
145
+
146
+ # Should stop at max_iterations
147
+ assert result["_agent_limit_reached"] is True
148
+ assert mock_llm.invoke.call_count == 3
149
+
150
+ @patch("yamlgraph.tools.agent.create_llm")
151
+ def test_tool_result_returned_to_llm(self, mock_create_llm):
152
+ """LLM sees tool output in next turn."""
153
+ mock_llm = MagicMock()
154
+
155
+ # First: call tool
156
+ first_response = MagicMock()
157
+ first_response.tool_calls = [
158
+ {"id": "call1", "name": "calc", "args": {"expr": "2+2"}}
159
+ ]
160
+ first_response.content = ""
161
+
162
+ # Second: answer based on tool result
163
+ second_response = MagicMock()
164
+ second_response.tool_calls = []
165
+ second_response.content = "The result is 4"
166
+
167
+ mock_llm.bind_tools.return_value = mock_llm
168
+ mock_llm.invoke.side_effect = [first_response, second_response]
169
+ mock_create_llm.return_value = mock_llm
170
+
171
+ tools = {
172
+ "calc": ShellToolConfig(
173
+ command="echo 4", # Simulates python calc
174
+ description="Calculate",
175
+ ),
176
+ }
177
+ node_config = {
178
+ "prompt": "agent",
179
+ "tools": ["calc"],
180
+ "max_iterations": 5,
181
+ "state_key": "answer",
182
+ }
183
+
184
+ node_fn = create_agent_node("agent", node_config, tools)
185
+ node_fn({"input": "What is 2+2?"})
186
+
187
+ # Check that second invoke received messages with tool result
188
+ second_call_messages = mock_llm.invoke.call_args_list[1][0][0]
189
+ # Should have: system, user, ai (with tool call), tool result
190
+ assert len(second_call_messages) >= 4
191
+
192
+ def test_default_max_iterations(self):
193
+ """Default max_iterations is 5."""
194
+ tools = {
195
+ "test": ShellToolConfig(command="echo test", description="Test"),
196
+ }
197
+ node_config = {
198
+ "prompt": "agent",
199
+ "tools": ["test"],
200
+ # No max_iterations specified
201
+ }
202
+
203
+ # Just verify it doesn't fail - actual behavior tested above
204
+ node_fn = create_agent_node("agent", node_config, tools)
205
+ assert callable(node_fn)
206
+
207
+
208
+ class TestBuildPythonTool:
209
+ """Tests for build_python_tool function."""
210
+
211
+ def test_creates_tool_with_name(self):
212
+ """Tool has correct name."""
213
+ config = PythonToolConfig(
214
+ module="yamlgraph.utils.langsmith",
215
+ function="get_run_details",
216
+ description="Get run details",
217
+ )
218
+ tool = build_python_tool("get_run_details", config)
219
+ assert tool.name == "get_run_details"
220
+
221
+ def test_creates_tool_with_description(self):
222
+ """Tool has correct description."""
223
+ config = PythonToolConfig(
224
+ module="yamlgraph.utils.langsmith",
225
+ function="get_run_details",
226
+ description="Get details about a LangSmith run",
227
+ )
228
+ tool = build_python_tool("run_details", config)
229
+ assert tool.description == "Get details about a LangSmith run"
230
+
231
+ def test_tool_is_structured_tool(self):
232
+ """Tool is a LangChain StructuredTool."""
233
+ from langchain_core.tools import StructuredTool
234
+
235
+ config = PythonToolConfig(
236
+ module="yamlgraph.utils.langsmith",
237
+ function="get_run_details",
238
+ description="Get run details",
239
+ )
240
+ tool = build_python_tool("test_tool", config)
241
+ assert isinstance(tool, StructuredTool)
242
+
243
+ def test_tool_executes_function(self):
244
+ """Tool invocation calls the Python function."""
245
+ # Use a simple test function
246
+ config = PythonToolConfig(
247
+ module="os.path",
248
+ function="join",
249
+ description="Join paths",
250
+ )
251
+ tool = build_python_tool("path_join", config)
252
+ result = tool.invoke({"a": "/home", "p": "user"})
253
+ assert "/home" in result or "user" in result
254
+
255
+
256
+ class TestAgentWithPythonTools:
257
+ """Tests for agent nodes using Python tools."""
258
+
259
+ @patch("yamlgraph.tools.agent.create_llm")
260
+ def test_agent_calls_python_tool(self, mock_create_llm):
261
+ """Agent can use Python tools."""
262
+ mock_llm = MagicMock()
263
+
264
+ # First response: call a python tool
265
+ first_response = MagicMock()
266
+ first_response.tool_calls = [
267
+ {
268
+ "id": "call1",
269
+ "name": "my_python_tool",
270
+ "args": {"a": "/home", "p": "user"},
271
+ }
272
+ ]
273
+ first_response.content = ""
274
+
275
+ # Second response: final answer
276
+ second_response = MagicMock()
277
+ second_response.tool_calls = []
278
+ second_response.content = "Path is /home/user"
279
+
280
+ mock_llm.bind_tools.return_value = mock_llm
281
+ mock_llm.invoke.side_effect = [first_response, second_response]
282
+ mock_create_llm.return_value = mock_llm
283
+
284
+ python_tools = {
285
+ "my_python_tool": PythonToolConfig(
286
+ module="os.path",
287
+ function="join",
288
+ description="Join paths",
289
+ ),
290
+ }
291
+ node_config = {
292
+ "prompt": "agent",
293
+ "tools": ["my_python_tool"],
294
+ "max_iterations": 5,
295
+ "state_key": "result",
296
+ }
297
+
298
+ node_fn = create_agent_node("agent", node_config, {}, python_tools=python_tools)
299
+ result = node_fn({"input": "Join home and user"})
300
+
301
+ assert result["result"] == "Path is /home/user"
302
+ assert result["_agent_iterations"] == 2
303
+
304
+ @patch("yamlgraph.tools.agent.create_llm")
305
+ def test_agent_mixes_shell_and_python_tools(self, mock_create_llm):
306
+ """Agent can use both shell and python tools."""
307
+ mock_llm = MagicMock()
308
+
309
+ # First: call shell tool
310
+ first_response = MagicMock()
311
+ first_response.tool_calls = [
312
+ {"id": "call1", "name": "echo_tool", "args": {"message": "hello"}}
313
+ ]
314
+ first_response.content = ""
315
+
316
+ # Second: call python tool
317
+ second_response = MagicMock()
318
+ second_response.tool_calls = [
319
+ {"id": "call2", "name": "path_tool", "args": {"a": "/", "p": "tmp"}}
320
+ ]
321
+ second_response.content = ""
322
+
323
+ # Third: final answer
324
+ third_response = MagicMock()
325
+ third_response.tool_calls = []
326
+ third_response.content = "Done with both tools"
327
+
328
+ mock_llm.bind_tools.return_value = mock_llm
329
+ mock_llm.invoke.side_effect = [first_response, second_response, third_response]
330
+ mock_create_llm.return_value = mock_llm
331
+
332
+ shell_tools = {
333
+ "echo_tool": ShellToolConfig(command="echo {message}", description="Echo"),
334
+ }
335
+ python_tools = {
336
+ "path_tool": PythonToolConfig(
337
+ module="os.path",
338
+ function="join",
339
+ description="Join paths",
340
+ ),
341
+ }
342
+ node_config = {
343
+ "prompt": "agent",
344
+ "tools": ["echo_tool", "path_tool"],
345
+ "max_iterations": 5,
346
+ "state_key": "result",
347
+ }
348
+
349
+ node_fn = create_agent_node(
350
+ "agent", node_config, shell_tools, python_tools=python_tools
351
+ )
352
+ result = node_fn({"input": "Use both tools"})
353
+
354
+ assert result["result"] == "Done with both tools"
355
+ assert result["_agent_iterations"] == 3
@@ -0,0 +1,346 @@
1
+ """Tests for async executor - Phase 2 (003).
2
+
3
+ TDD: RED phase - write tests first.
4
+ """
5
+
6
+ import asyncio
7
+ from unittest.mock import AsyncMock, MagicMock, patch
8
+
9
+ import pytest
10
+ from pydantic import BaseModel
11
+
12
+ from yamlgraph.executor_async import execute_prompt_async
13
+
14
+
15
+ class MockResponse(BaseModel):
16
+ """Mock response model for testing."""
17
+
18
+ summary: str
19
+ score: int
20
+
21
+
22
+ # ==============================================================================
23
+ # execute_prompt_async tests (existing function - verify it works)
24
+ # ==============================================================================
25
+
26
+
27
+ @pytest.mark.asyncio
28
+ async def test_execute_prompt_async_returns_string():
29
+ """execute_prompt_async returns string when no output_model."""
30
+ mock_llm = MagicMock()
31
+ mock_response = MagicMock()
32
+ mock_response.content = "Hello, World!"
33
+
34
+ with (
35
+ patch("yamlgraph.executor_async.create_llm", return_value=mock_llm),
36
+ patch(
37
+ "yamlgraph.executor_async.invoke_async", new_callable=AsyncMock
38
+ ) as mock_invoke,
39
+ patch("yamlgraph.executor_async.load_prompt") as mock_load,
40
+ ):
41
+ mock_load.return_value = {
42
+ "system": "You are helpful.",
43
+ "user": "Say hello to {name}",
44
+ }
45
+ mock_invoke.return_value = "Hello, World!"
46
+
47
+ result = await execute_prompt_async(
48
+ "greet",
49
+ variables={"name": "World"},
50
+ )
51
+
52
+ assert result == "Hello, World!"
53
+ mock_invoke.assert_called_once()
54
+
55
+
56
+ @pytest.mark.asyncio
57
+ async def test_execute_prompt_async_with_output_model():
58
+ """execute_prompt_async returns parsed model when output_model provided."""
59
+ mock_llm = MagicMock()
60
+ expected = MockResponse(summary="Test", score=42)
61
+
62
+ with (
63
+ patch("yamlgraph.executor_async.create_llm", return_value=mock_llm),
64
+ patch(
65
+ "yamlgraph.executor_async.invoke_async", new_callable=AsyncMock
66
+ ) as mock_invoke,
67
+ patch("yamlgraph.executor_async.load_prompt") as mock_load,
68
+ ):
69
+ mock_load.return_value = {
70
+ "system": "Analyze this.",
71
+ "user": "Input: {text}",
72
+ }
73
+ mock_invoke.return_value = expected
74
+
75
+ result = await execute_prompt_async(
76
+ "analyze",
77
+ variables={"text": "test input"},
78
+ output_model=MockResponse,
79
+ )
80
+
81
+ assert isinstance(result, MockResponse)
82
+ assert result.summary == "Test"
83
+ assert result.score == 42
84
+
85
+
86
+ @pytest.mark.asyncio
87
+ async def test_execute_prompt_async_uses_provider_from_yaml():
88
+ """execute_prompt_async extracts provider from YAML metadata."""
89
+ mock_llm = MagicMock()
90
+
91
+ with (
92
+ patch(
93
+ "yamlgraph.executor_async.create_llm", return_value=mock_llm
94
+ ) as mock_create,
95
+ patch(
96
+ "yamlgraph.executor_async.invoke_async", new_callable=AsyncMock
97
+ ) as mock_invoke,
98
+ patch("yamlgraph.executor_async.load_prompt") as mock_load,
99
+ ):
100
+ mock_load.return_value = {
101
+ "system": "Hello",
102
+ "user": "{input}",
103
+ "provider": "openai", # Provider in YAML
104
+ }
105
+ mock_invoke.return_value = "response"
106
+
107
+ await execute_prompt_async("test", variables={"input": "x"})
108
+
109
+ # Should use provider from YAML
110
+ mock_create.assert_called_once()
111
+ call_kwargs = mock_create.call_args.kwargs
112
+ assert call_kwargs.get("provider") == "openai"
113
+
114
+
115
+ # ==============================================================================
116
+ # run_graph_async tests (new function)
117
+ # ==============================================================================
118
+
119
+
120
+ @pytest.mark.asyncio
121
+ async def test_run_graph_async_executes_graph():
122
+ """run_graph_async invokes graph asynchronously."""
123
+ from yamlgraph.executor_async import run_graph_async
124
+
125
+ # Mock compiled graph
126
+ mock_app = AsyncMock()
127
+ mock_app.ainvoke.return_value = {"output": "result", "current_step": "done"}
128
+
129
+ result = await run_graph_async(
130
+ mock_app,
131
+ initial_state={"input": "test"},
132
+ config={"configurable": {"thread_id": "t1"}},
133
+ )
134
+
135
+ assert result["output"] == "result"
136
+ mock_app.ainvoke.assert_called_once_with(
137
+ {"input": "test"},
138
+ {"configurable": {"thread_id": "t1"}},
139
+ )
140
+
141
+
142
+ @pytest.mark.asyncio
143
+ async def test_run_graph_async_with_checkpointer():
144
+ """run_graph_async works with checkpointer in config."""
145
+ from yamlgraph.executor_async import run_graph_async
146
+
147
+ mock_app = AsyncMock()
148
+ mock_app.ainvoke.return_value = {"result": "ok"}
149
+
150
+ result = await run_graph_async(
151
+ mock_app,
152
+ initial_state={"query": "hello"},
153
+ config={"configurable": {"thread_id": "test-thread"}},
154
+ )
155
+
156
+ assert result["result"] == "ok"
157
+
158
+
159
+ @pytest.mark.asyncio
160
+ async def test_run_graph_async_handles_interrupt():
161
+ """run_graph_async returns interrupt payload when graph pauses."""
162
+ from yamlgraph.executor_async import run_graph_async
163
+
164
+ mock_app = AsyncMock()
165
+ # Simulate interrupt response
166
+ interrupt_value = MagicMock()
167
+ interrupt_value.value = {"question": "What is your name?"}
168
+ mock_app.ainvoke.return_value = {"__interrupt__": (interrupt_value,)}
169
+
170
+ result = await run_graph_async(
171
+ mock_app,
172
+ initial_state={},
173
+ config={"configurable": {"thread_id": "t1"}},
174
+ )
175
+
176
+ assert "__interrupt__" in result
177
+ assert result["__interrupt__"][0].value == {"question": "What is your name?"}
178
+
179
+
180
+ @pytest.mark.asyncio
181
+ async def test_run_graph_async_resume_with_command():
182
+ """run_graph_async can resume with Command."""
183
+ from langgraph.types import Command
184
+
185
+ from yamlgraph.executor_async import run_graph_async
186
+
187
+ mock_app = AsyncMock()
188
+ mock_app.ainvoke.return_value = {"user_name": "Alice", "greeting": "Hello Alice!"}
189
+
190
+ result = await run_graph_async(
191
+ mock_app,
192
+ initial_state=Command(resume="Alice"),
193
+ config={"configurable": {"thread_id": "t1"}},
194
+ )
195
+
196
+ assert result["user_name"] == "Alice"
197
+ mock_app.ainvoke.assert_called_once()
198
+
199
+
200
+ # ==============================================================================
201
+ # compile_graph_async tests (new function)
202
+ # ==============================================================================
203
+
204
+
205
+ @pytest.mark.asyncio
206
+ async def test_compile_graph_async_with_memory_checkpointer():
207
+ """compile_graph_async compiles graph with memory checkpointer."""
208
+ from yamlgraph.executor_async import compile_graph_async
209
+
210
+ # Use minimal test graph
211
+ with patch("yamlgraph.storage.checkpointer_factory.get_checkpointer") as mock_cp:
212
+ from langgraph.checkpoint.memory import MemorySaver
213
+
214
+ mock_cp.return_value = MemorySaver()
215
+
216
+ config = MagicMock()
217
+ config.checkpointer = {"type": "memory"}
218
+
219
+ mock_graph = MagicMock()
220
+ mock_compiled = MagicMock()
221
+ mock_graph.compile.return_value = mock_compiled
222
+
223
+ result = compile_graph_async(mock_graph, config)
224
+
225
+ mock_graph.compile.assert_called_once()
226
+ assert result == mock_compiled
227
+
228
+
229
+ @pytest.mark.asyncio
230
+ async def test_compile_graph_async_sets_async_mode():
231
+ """compile_graph_async passes async_mode=True to get_checkpointer."""
232
+ from yamlgraph.executor_async import compile_graph_async
233
+
234
+ with patch("yamlgraph.storage.checkpointer_factory.get_checkpointer") as mock_cp:
235
+ mock_cp.return_value = MagicMock()
236
+
237
+ mock_graph = MagicMock()
238
+ mock_graph.compile.return_value = MagicMock()
239
+
240
+ config = MagicMock()
241
+ config.checkpointer = {"type": "redis", "url": "redis://localhost"}
242
+
243
+ compile_graph_async(mock_graph, config)
244
+
245
+ mock_cp.assert_called_once_with(config.checkpointer, async_mode=True)
246
+
247
+
248
+ # ==============================================================================
249
+ # load_and_compile_async tests (convenience function)
250
+ # ==============================================================================
251
+
252
+
253
+ @pytest.mark.asyncio
254
+ async def test_load_and_compile_async_returns_compiled_graph():
255
+ """load_and_compile_async loads YAML and returns compiled graph."""
256
+ from yamlgraph.executor_async import load_and_compile_async
257
+
258
+ with (
259
+ patch("yamlgraph.graph_loader.load_graph_config") as mock_load,
260
+ patch("yamlgraph.graph_loader.compile_graph") as mock_compile,
261
+ patch("yamlgraph.storage.checkpointer_factory.get_checkpointer") as mock_cp,
262
+ ):
263
+ mock_config = MagicMock()
264
+ mock_config.name = "test-graph"
265
+ mock_config.version = "1.0"
266
+ mock_config.checkpointer = {"type": "memory"}
267
+ mock_load.return_value = mock_config
268
+
269
+ mock_state_graph = MagicMock()
270
+ mock_compiled = MagicMock()
271
+ mock_state_graph.compile.return_value = mock_compiled
272
+ mock_compile.return_value = mock_state_graph
273
+
274
+ mock_cp.return_value = None
275
+
276
+ result = await load_and_compile_async("graphs/test.yaml")
277
+
278
+ assert result == mock_compiled
279
+ mock_load.assert_called_once_with("graphs/test.yaml")
280
+
281
+
282
+ # ==============================================================================
283
+ # Concurrent execution tests
284
+ # ==============================================================================
285
+
286
+
287
+ @pytest.mark.asyncio
288
+ async def test_run_graphs_concurrent():
289
+ """Multiple graphs can run concurrently."""
290
+ from yamlgraph.executor_async import run_graph_async
291
+
292
+ mock_app1 = AsyncMock()
293
+ mock_app1.ainvoke.return_value = {"result": "first"}
294
+
295
+ mock_app2 = AsyncMock()
296
+ mock_app2.ainvoke.return_value = {"result": "second"}
297
+
298
+ results = await asyncio.gather(
299
+ run_graph_async(mock_app1, {}, {"configurable": {"thread_id": "t1"}}),
300
+ run_graph_async(mock_app2, {}, {"configurable": {"thread_id": "t2"}}),
301
+ )
302
+
303
+ assert results[0]["result"] == "first"
304
+ assert results[1]["result"] == "second"
305
+
306
+
307
+ # ==============================================================================
308
+ # Error handling tests
309
+ # ==============================================================================
310
+
311
+
312
+ @pytest.mark.asyncio
313
+ async def test_run_graph_async_propagates_errors():
314
+ """run_graph_async propagates exceptions from graph execution."""
315
+ from yamlgraph.executor_async import run_graph_async
316
+
317
+ mock_app = AsyncMock()
318
+ mock_app.ainvoke.side_effect = ValueError("Graph execution failed")
319
+
320
+ with pytest.raises(ValueError, match="Graph execution failed"):
321
+ await run_graph_async(
322
+ mock_app,
323
+ initial_state={},
324
+ config={"configurable": {"thread_id": "t1"}},
325
+ )
326
+
327
+
328
+ @pytest.mark.asyncio
329
+ async def test_compile_graph_async_without_checkpointer():
330
+ """compile_graph_async works without checkpointer config."""
331
+ from yamlgraph.executor_async import compile_graph_async
332
+
333
+ mock_graph = MagicMock()
334
+ mock_compiled = MagicMock()
335
+ mock_graph.compile.return_value = mock_compiled
336
+
337
+ config = MagicMock()
338
+ config.checkpointer = None
339
+
340
+ with patch(
341
+ "yamlgraph.storage.checkpointer_factory.get_checkpointer", return_value=None
342
+ ):
343
+ result = compile_graph_async(mock_graph, config)
344
+
345
+ mock_graph.compile.assert_called_once_with(checkpointer=None)
346
+ assert result == mock_compiled