yamlgraph 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/__init__.py +1 -0
- examples/codegen/__init__.py +5 -0
- examples/codegen/models/__init__.py +13 -0
- examples/codegen/models/schemas.py +76 -0
- examples/codegen/tests/__init__.py +1 -0
- examples/codegen/tests/test_ai_helpers.py +235 -0
- examples/codegen/tests/test_ast_analysis.py +174 -0
- examples/codegen/tests/test_code_analysis.py +134 -0
- examples/codegen/tests/test_code_context.py +301 -0
- examples/codegen/tests/test_code_nav.py +89 -0
- examples/codegen/tests/test_dependency_tools.py +119 -0
- examples/codegen/tests/test_example_tools.py +185 -0
- examples/codegen/tests/test_git_tools.py +112 -0
- examples/codegen/tests/test_impl_agent_schemas.py +193 -0
- examples/codegen/tests/test_impl_agent_v4_graph.py +94 -0
- examples/codegen/tests/test_jedi_analysis.py +226 -0
- examples/codegen/tests/test_meta_tools.py +250 -0
- examples/codegen/tests/test_plan_discovery_prompt.py +98 -0
- examples/codegen/tests/test_syntax_tools.py +85 -0
- examples/codegen/tests/test_synthesize_prompt.py +94 -0
- examples/codegen/tests/test_template_tools.py +244 -0
- examples/codegen/tools/__init__.py +80 -0
- examples/codegen/tools/ai_helpers.py +420 -0
- examples/codegen/tools/ast_analysis.py +92 -0
- examples/codegen/tools/code_context.py +180 -0
- examples/codegen/tools/code_nav.py +52 -0
- examples/codegen/tools/dependency_tools.py +120 -0
- examples/codegen/tools/example_tools.py +188 -0
- examples/codegen/tools/git_tools.py +151 -0
- examples/codegen/tools/impl_executor.py +614 -0
- examples/codegen/tools/jedi_analysis.py +311 -0
- examples/codegen/tools/meta_tools.py +202 -0
- examples/codegen/tools/syntax_tools.py +26 -0
- examples/codegen/tools/template_tools.py +356 -0
- examples/fastapi_interview.py +167 -0
- examples/npc/api/__init__.py +1 -0
- examples/npc/api/app.py +100 -0
- examples/npc/api/routes/__init__.py +5 -0
- examples/npc/api/routes/encounter.py +182 -0
- examples/npc/api/session.py +330 -0
- examples/npc/demo.py +387 -0
- examples/npc/nodes/__init__.py +5 -0
- examples/npc/nodes/image_node.py +92 -0
- examples/npc/run_encounter.py +230 -0
- examples/shared/__init__.py +0 -0
- examples/shared/replicate_tool.py +238 -0
- examples/storyboard/__init__.py +1 -0
- examples/storyboard/generate_videos.py +335 -0
- examples/storyboard/nodes/__init__.py +12 -0
- examples/storyboard/nodes/animated_character_node.py +248 -0
- examples/storyboard/nodes/animated_image_node.py +138 -0
- examples/storyboard/nodes/character_node.py +162 -0
- examples/storyboard/nodes/image_node.py +118 -0
- examples/storyboard/nodes/replicate_tool.py +49 -0
- examples/storyboard/retry_images.py +118 -0
- scripts/demo_async_executor.py +212 -0
- scripts/demo_interview_e2e.py +200 -0
- scripts/demo_streaming.py +140 -0
- scripts/run_interview_demo.py +94 -0
- scripts/test_interrupt_fix.py +26 -0
- tests/__init__.py +1 -0
- tests/conftest.py +178 -0
- tests/integration/__init__.py +1 -0
- tests/integration/test_animated_storyboard.py +63 -0
- tests/integration/test_cli_commands.py +242 -0
- tests/integration/test_colocated_prompts.py +139 -0
- tests/integration/test_map_demo.py +50 -0
- tests/integration/test_memory_demo.py +283 -0
- tests/integration/test_npc_api/__init__.py +1 -0
- tests/integration/test_npc_api/test_routes.py +357 -0
- tests/integration/test_npc_api/test_session.py +216 -0
- tests/integration/test_pipeline_flow.py +105 -0
- tests/integration/test_providers.py +163 -0
- tests/integration/test_resume.py +75 -0
- tests/integration/test_subgraph_integration.py +295 -0
- tests/integration/test_subgraph_interrupt.py +106 -0
- tests/unit/__init__.py +1 -0
- tests/unit/test_agent_nodes.py +355 -0
- tests/unit/test_async_executor.py +346 -0
- tests/unit/test_checkpointer.py +212 -0
- tests/unit/test_checkpointer_factory.py +212 -0
- tests/unit/test_cli.py +121 -0
- tests/unit/test_cli_package.py +81 -0
- tests/unit/test_compile_graph_map.py +132 -0
- tests/unit/test_conditions_routing.py +253 -0
- tests/unit/test_config.py +93 -0
- tests/unit/test_conversation_memory.py +276 -0
- tests/unit/test_database.py +145 -0
- tests/unit/test_deprecation.py +104 -0
- tests/unit/test_executor.py +172 -0
- tests/unit/test_executor_async.py +179 -0
- tests/unit/test_export.py +149 -0
- tests/unit/test_expressions.py +178 -0
- tests/unit/test_feature_brainstorm.py +194 -0
- tests/unit/test_format_prompt.py +145 -0
- tests/unit/test_generic_report.py +200 -0
- tests/unit/test_graph_commands.py +327 -0
- tests/unit/test_graph_linter.py +627 -0
- tests/unit/test_graph_loader.py +357 -0
- tests/unit/test_graph_schema.py +193 -0
- tests/unit/test_inline_schema.py +151 -0
- tests/unit/test_interrupt_node.py +182 -0
- tests/unit/test_issues.py +164 -0
- tests/unit/test_jinja2_prompts.py +85 -0
- tests/unit/test_json_extract.py +134 -0
- tests/unit/test_langsmith.py +600 -0
- tests/unit/test_langsmith_tools.py +204 -0
- tests/unit/test_llm_factory.py +109 -0
- tests/unit/test_llm_factory_async.py +118 -0
- tests/unit/test_loops.py +403 -0
- tests/unit/test_map_node.py +144 -0
- tests/unit/test_no_backward_compat.py +56 -0
- tests/unit/test_node_factory.py +348 -0
- tests/unit/test_passthrough_node.py +126 -0
- tests/unit/test_prompts.py +324 -0
- tests/unit/test_python_nodes.py +198 -0
- tests/unit/test_reliability.py +298 -0
- tests/unit/test_result_export.py +234 -0
- tests/unit/test_router.py +296 -0
- tests/unit/test_sanitize.py +99 -0
- tests/unit/test_schema_loader.py +295 -0
- tests/unit/test_shell_tools.py +229 -0
- tests/unit/test_state_builder.py +331 -0
- tests/unit/test_state_builder_map.py +104 -0
- tests/unit/test_state_config.py +197 -0
- tests/unit/test_streaming.py +307 -0
- tests/unit/test_subgraph.py +596 -0
- tests/unit/test_template.py +190 -0
- tests/unit/test_tool_call_integration.py +164 -0
- tests/unit/test_tool_call_node.py +178 -0
- tests/unit/test_tool_nodes.py +129 -0
- tests/unit/test_websearch.py +234 -0
- yamlgraph/__init__.py +35 -0
- yamlgraph/builder.py +110 -0
- yamlgraph/cli/__init__.py +159 -0
- yamlgraph/cli/__main__.py +6 -0
- yamlgraph/cli/commands.py +231 -0
- yamlgraph/cli/deprecation.py +92 -0
- yamlgraph/cli/graph_commands.py +541 -0
- yamlgraph/cli/validators.py +37 -0
- yamlgraph/config.py +67 -0
- yamlgraph/constants.py +70 -0
- yamlgraph/error_handlers.py +227 -0
- yamlgraph/executor.py +290 -0
- yamlgraph/executor_async.py +288 -0
- yamlgraph/graph_loader.py +451 -0
- yamlgraph/map_compiler.py +150 -0
- yamlgraph/models/__init__.py +36 -0
- yamlgraph/models/graph_schema.py +181 -0
- yamlgraph/models/schemas.py +124 -0
- yamlgraph/models/state_builder.py +236 -0
- yamlgraph/node_factory.py +768 -0
- yamlgraph/routing.py +87 -0
- yamlgraph/schema_loader.py +240 -0
- yamlgraph/storage/__init__.py +20 -0
- yamlgraph/storage/checkpointer.py +72 -0
- yamlgraph/storage/checkpointer_factory.py +123 -0
- yamlgraph/storage/database.py +320 -0
- yamlgraph/storage/export.py +269 -0
- yamlgraph/tools/__init__.py +1 -0
- yamlgraph/tools/agent.py +320 -0
- yamlgraph/tools/graph_linter.py +388 -0
- yamlgraph/tools/langsmith_tools.py +125 -0
- yamlgraph/tools/nodes.py +126 -0
- yamlgraph/tools/python_tool.py +179 -0
- yamlgraph/tools/shell.py +205 -0
- yamlgraph/tools/websearch.py +242 -0
- yamlgraph/utils/__init__.py +48 -0
- yamlgraph/utils/conditions.py +157 -0
- yamlgraph/utils/expressions.py +245 -0
- yamlgraph/utils/json_extract.py +104 -0
- yamlgraph/utils/langsmith.py +416 -0
- yamlgraph/utils/llm_factory.py +118 -0
- yamlgraph/utils/llm_factory_async.py +105 -0
- yamlgraph/utils/logging.py +104 -0
- yamlgraph/utils/prompts.py +171 -0
- yamlgraph/utils/sanitize.py +98 -0
- yamlgraph/utils/template.py +102 -0
- yamlgraph/utils/validators.py +181 -0
- yamlgraph-0.3.9.dist-info/METADATA +1105 -0
- yamlgraph-0.3.9.dist-info/RECORD +185 -0
- yamlgraph-0.3.9.dist-info/WHEEL +5 -0
- yamlgraph-0.3.9.dist-info/entry_points.txt +2 -0
- yamlgraph-0.3.9.dist-info/licenses/LICENSE +33 -0
- yamlgraph-0.3.9.dist-info/top_level.txt +4 -0
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Async Executor Demo - Showcases async graph execution with interrupts.
|
|
3
|
+
|
|
4
|
+
Demonstrates:
|
|
5
|
+
- Async graph loading with load_and_compile_async()
|
|
6
|
+
- Async execution with run_graph_async()
|
|
7
|
+
- Interrupt handling and Command(resume=...) flow
|
|
8
|
+
- Real-time user interaction
|
|
9
|
+
|
|
10
|
+
Usage:
|
|
11
|
+
# Interactive mode (real LLM + user input)
|
|
12
|
+
python scripts/demo_async_executor.py --interactive
|
|
13
|
+
|
|
14
|
+
# Verification mode (mock inputs for CI)
|
|
15
|
+
python scripts/demo_async_executor.py --verify
|
|
16
|
+
|
|
17
|
+
# Custom graph
|
|
18
|
+
python scripts/demo_async_executor.py --graph graphs/my-graph.yaml
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
import argparse
|
|
22
|
+
import asyncio
|
|
23
|
+
import sys
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
|
|
26
|
+
# Add project root to path
|
|
27
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
28
|
+
|
|
29
|
+
from langgraph.types import Command
|
|
30
|
+
|
|
31
|
+
from yamlgraph.executor_async import load_and_compile_async, run_graph_async
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def print_banner(title: str) -> None:
|
|
35
|
+
"""Print a styled banner."""
|
|
36
|
+
width = 50
|
|
37
|
+
print("ā" + "ā" * width + "ā")
|
|
38
|
+
print(f"ā {title:<{width-1}}ā")
|
|
39
|
+
print("ā" + "ā" * width + "ā¤")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def print_footer() -> None:
|
|
43
|
+
"""Print footer."""
|
|
44
|
+
print("ā" + "ā" * 50 + "ā")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def get_interrupt_message(result: dict) -> str:
|
|
48
|
+
"""Extract message from interrupt payload."""
|
|
49
|
+
if "__interrupt__" not in result:
|
|
50
|
+
return ""
|
|
51
|
+
interrupt = result["__interrupt__"][0]
|
|
52
|
+
value = interrupt.value
|
|
53
|
+
if isinstance(value, dict):
|
|
54
|
+
return value.get("question") or value.get("prompt") or str(value)
|
|
55
|
+
return str(value)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
async def run_demo(
|
|
59
|
+
graph_path: str,
|
|
60
|
+
interactive: bool = False,
|
|
61
|
+
mock_inputs: list[str] | None = None,
|
|
62
|
+
) -> dict:
|
|
63
|
+
"""Run the async executor demo.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
graph_path: Path to YAML graph definition
|
|
67
|
+
interactive: If True, prompt for real user input
|
|
68
|
+
mock_inputs: List of mock inputs for verification mode
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
Final state dict
|
|
72
|
+
"""
|
|
73
|
+
mock_inputs = mock_inputs or ["TestUser", "Python"]
|
|
74
|
+
mock_index = 0
|
|
75
|
+
|
|
76
|
+
print_banner("š Async Executor Demo")
|
|
77
|
+
print(f"ā Graph: {graph_path:<41}ā")
|
|
78
|
+
print(f"ā Mode: {'interactive' if interactive else 'verify':<42}ā")
|
|
79
|
+
print("ā" + " " * 50 + "ā")
|
|
80
|
+
|
|
81
|
+
# Load and compile
|
|
82
|
+
print("ā Loading graph... ā")
|
|
83
|
+
try:
|
|
84
|
+
app = await load_and_compile_async(graph_path)
|
|
85
|
+
print("ā ā
Compiled with memory checkpointer ā")
|
|
86
|
+
except FileNotFoundError:
|
|
87
|
+
print(f"ā ā Graph not found: {graph_path:<28}ā")
|
|
88
|
+
print_footer()
|
|
89
|
+
return {"error": "Graph not found"}
|
|
90
|
+
|
|
91
|
+
print("ā" + " " * 50 + "ā")
|
|
92
|
+
|
|
93
|
+
# Config with thread_id for checkpointer
|
|
94
|
+
config = {"configurable": {"thread_id": "demo-async-001"}}
|
|
95
|
+
|
|
96
|
+
# Initial run
|
|
97
|
+
print("ā Running graph async... ā")
|
|
98
|
+
result = await run_graph_async(app, {"input": "start"}, config)
|
|
99
|
+
|
|
100
|
+
# Show welcome if present
|
|
101
|
+
if welcome := result.get("welcome_message"):
|
|
102
|
+
preview = welcome[:40] + "..." if len(welcome) > 40 else welcome
|
|
103
|
+
print(f"ā š¬ Welcome: \"{preview}\"ā")
|
|
104
|
+
|
|
105
|
+
# Interrupt loop
|
|
106
|
+
interrupt_count = 0
|
|
107
|
+
while "__interrupt__" in result:
|
|
108
|
+
interrupt_count += 1
|
|
109
|
+
message = get_interrupt_message(result)
|
|
110
|
+
print("ā" + " " * 50 + "ā")
|
|
111
|
+
print(f"ā āøļø INTERRUPT #{interrupt_count}: {message:<30}ā")
|
|
112
|
+
|
|
113
|
+
# Get input
|
|
114
|
+
if interactive:
|
|
115
|
+
print("ā" + " " * 50 + "ā")
|
|
116
|
+
user_input = input("ā > ")
|
|
117
|
+
else:
|
|
118
|
+
user_input = mock_inputs[mock_index] if mock_index < len(mock_inputs) else "default"
|
|
119
|
+
mock_index += 1
|
|
120
|
+
print(f"ā > {user_input:<47}ā")
|
|
121
|
+
|
|
122
|
+
# Resume
|
|
123
|
+
print("ā" + " " * 50 + "ā")
|
|
124
|
+
print("ā Resuming... ā")
|
|
125
|
+
result = await run_graph_async(app, Command(resume=user_input), config)
|
|
126
|
+
|
|
127
|
+
# Complete
|
|
128
|
+
print("ā" + " " * 50 + "ā")
|
|
129
|
+
print("ā ā
Complete! ā")
|
|
130
|
+
|
|
131
|
+
# Show final response
|
|
132
|
+
response = result.get("greeting") or result.get("response") or result.get("output")
|
|
133
|
+
if response:
|
|
134
|
+
# Truncate for display
|
|
135
|
+
preview = response[:38] + "..." if len(response) > 38 else response
|
|
136
|
+
print(f"ā š Response: \"{preview}\"ā")
|
|
137
|
+
|
|
138
|
+
print_footer()
|
|
139
|
+
|
|
140
|
+
# Verification output
|
|
141
|
+
if not interactive:
|
|
142
|
+
print("\nš Final State:")
|
|
143
|
+
for key, value in result.items():
|
|
144
|
+
if not key.startswith("_") and value is not None:
|
|
145
|
+
val_str = str(value)[:50]
|
|
146
|
+
print(f" {key}: {val_str}")
|
|
147
|
+
|
|
148
|
+
return result
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
async def main():
|
|
152
|
+
"""Main entry point."""
|
|
153
|
+
parser = argparse.ArgumentParser(description="Async Executor Demo")
|
|
154
|
+
parser.add_argument(
|
|
155
|
+
"--graph",
|
|
156
|
+
default="graphs/interview-demo.yaml",
|
|
157
|
+
help="Path to YAML graph definition",
|
|
158
|
+
)
|
|
159
|
+
parser.add_argument(
|
|
160
|
+
"--interactive",
|
|
161
|
+
action="store_true",
|
|
162
|
+
help="Enable interactive mode with real user input",
|
|
163
|
+
)
|
|
164
|
+
parser.add_argument(
|
|
165
|
+
"--verify",
|
|
166
|
+
action="store_true",
|
|
167
|
+
help="Run in verification mode with mock inputs",
|
|
168
|
+
)
|
|
169
|
+
parser.add_argument(
|
|
170
|
+
"--inputs",
|
|
171
|
+
nargs="*",
|
|
172
|
+
default=["Alice", "async programming"],
|
|
173
|
+
help="Mock inputs for verification mode",
|
|
174
|
+
)
|
|
175
|
+
args = parser.parse_args()
|
|
176
|
+
|
|
177
|
+
# Verify mode is default if neither specified
|
|
178
|
+
interactive = args.interactive and not args.verify
|
|
179
|
+
|
|
180
|
+
result = await run_demo(
|
|
181
|
+
graph_path=args.graph,
|
|
182
|
+
interactive=interactive,
|
|
183
|
+
mock_inputs=args.inputs,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Exit with error if graph failed
|
|
187
|
+
if "error" in result:
|
|
188
|
+
sys.exit(1)
|
|
189
|
+
|
|
190
|
+
# Verify expected state in verify mode
|
|
191
|
+
if args.verify:
|
|
192
|
+
print("\nš Verification:")
|
|
193
|
+
checks = [
|
|
194
|
+
("user_name", result.get("user_name")),
|
|
195
|
+
("user_topic", result.get("user_topic")),
|
|
196
|
+
("greeting", result.get("greeting")),
|
|
197
|
+
]
|
|
198
|
+
all_pass = True
|
|
199
|
+
for field, value in checks:
|
|
200
|
+
status = "ā
" if value else "ā"
|
|
201
|
+
print(f" {status} {field}: {'present' if value else 'MISSING'}")
|
|
202
|
+
if not value:
|
|
203
|
+
all_pass = False
|
|
204
|
+
|
|
205
|
+
if not all_pass:
|
|
206
|
+
print("\nā Verification FAILED")
|
|
207
|
+
sys.exit(1)
|
|
208
|
+
print("\nā
Verification PASSED")
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
if __name__ == "__main__":
|
|
212
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""End-to-end test for interrupt node demo.
|
|
3
|
+
|
|
4
|
+
This script tests the full interrupt/resume flow:
|
|
5
|
+
1. Graph pauses at first interrupt (ask_name)
|
|
6
|
+
2. Resume with "Alice"
|
|
7
|
+
3. Graph pauses at second interrupt (ask_topic)
|
|
8
|
+
4. Resume with "Python"
|
|
9
|
+
5. LLM generates personalized greeting
|
|
10
|
+
|
|
11
|
+
Can run as:
|
|
12
|
+
- Automated test: python scripts/demo_interview_e2e.py
|
|
13
|
+
- Interactive mode: python scripts/demo_interview_e2e.py --interactive
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import argparse
|
|
17
|
+
import sys
|
|
18
|
+
import uuid
|
|
19
|
+
|
|
20
|
+
from langgraph.types import Command
|
|
21
|
+
|
|
22
|
+
from yamlgraph.graph_loader import (
|
|
23
|
+
compile_graph,
|
|
24
|
+
get_checkpointer_for_graph,
|
|
25
|
+
load_graph_config,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def run_demo(interactive: bool = False) -> dict:
|
|
30
|
+
"""Run the interview demo.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
interactive: If True, prompt for user input. Otherwise use test values.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Final state dict with greeting
|
|
37
|
+
"""
|
|
38
|
+
print("\n" + "=" * 50)
|
|
39
|
+
print("š¤ YAMLGraph Interview Demo - Human-in-the-Loop")
|
|
40
|
+
print("=" * 50 + "\n")
|
|
41
|
+
|
|
42
|
+
# Load and compile graph
|
|
43
|
+
config = load_graph_config("graphs/interview-demo.yaml")
|
|
44
|
+
graph = compile_graph(config)
|
|
45
|
+
checkpointer = get_checkpointer_for_graph(config)
|
|
46
|
+
app = graph.compile(checkpointer=checkpointer)
|
|
47
|
+
|
|
48
|
+
# Generate unique thread ID
|
|
49
|
+
thread_id = str(uuid.uuid4())
|
|
50
|
+
run_config = {"configurable": {"thread_id": thread_id}}
|
|
51
|
+
|
|
52
|
+
print(f"Thread ID: {thread_id[:8]}...")
|
|
53
|
+
print()
|
|
54
|
+
|
|
55
|
+
# First invoke - LLM generates welcome, then hits first interrupt
|
|
56
|
+
result = app.invoke({}, run_config)
|
|
57
|
+
interrupt = result.get("__interrupt__")
|
|
58
|
+
|
|
59
|
+
if not interrupt:
|
|
60
|
+
raise RuntimeError("Expected interrupt at ask_name node")
|
|
61
|
+
|
|
62
|
+
# Show the LLM-generated welcome message
|
|
63
|
+
welcome = result.get("welcome_message", "")
|
|
64
|
+
if welcome:
|
|
65
|
+
print(f"š¤ {welcome}")
|
|
66
|
+
print()
|
|
67
|
+
|
|
68
|
+
question1 = interrupt[0].value
|
|
69
|
+
print(f"š¬ {question1}")
|
|
70
|
+
|
|
71
|
+
if interactive:
|
|
72
|
+
answer1 = input(" Your answer: ").strip()
|
|
73
|
+
else:
|
|
74
|
+
answer1 = "Alice"
|
|
75
|
+
print(f" Your answer: {answer1}")
|
|
76
|
+
|
|
77
|
+
print()
|
|
78
|
+
|
|
79
|
+
# Resume with first answer - hits second interrupt
|
|
80
|
+
result = app.invoke(Command(resume=answer1), run_config)
|
|
81
|
+
interrupt = result.get("__interrupt__")
|
|
82
|
+
|
|
83
|
+
if not interrupt:
|
|
84
|
+
raise RuntimeError("Expected interrupt at ask_topic node")
|
|
85
|
+
|
|
86
|
+
question2 = interrupt[0].value
|
|
87
|
+
print(f"š¬ {question2}")
|
|
88
|
+
|
|
89
|
+
if interactive:
|
|
90
|
+
answer2 = input(" Your answer: ").strip()
|
|
91
|
+
else:
|
|
92
|
+
answer2 = "Python"
|
|
93
|
+
print(f" Your answer: {answer2}")
|
|
94
|
+
|
|
95
|
+
print()
|
|
96
|
+
|
|
97
|
+
# Resume with second answer - completes graph
|
|
98
|
+
result = app.invoke(Command(resume=answer2), run_config)
|
|
99
|
+
|
|
100
|
+
# Verify no more interrupts
|
|
101
|
+
if result.get("__interrupt__"):
|
|
102
|
+
raise RuntimeError("Unexpected interrupt after ask_topic")
|
|
103
|
+
|
|
104
|
+
# Display result
|
|
105
|
+
print("-" * 50)
|
|
106
|
+
print("⨠Final Response:")
|
|
107
|
+
print("-" * 50)
|
|
108
|
+
|
|
109
|
+
greeting = result.get("greeting", "")
|
|
110
|
+
if greeting:
|
|
111
|
+
print(greeting)
|
|
112
|
+
else:
|
|
113
|
+
print("(No greeting generated)")
|
|
114
|
+
|
|
115
|
+
print()
|
|
116
|
+
print("=" * 50)
|
|
117
|
+
print("Demo complete!")
|
|
118
|
+
print("=" * 50 + "\n")
|
|
119
|
+
|
|
120
|
+
return result
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def verify_result(result: dict) -> bool:
|
|
124
|
+
"""Verify the demo produced expected output.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
result: Final state dict
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
True if verification passed
|
|
131
|
+
"""
|
|
132
|
+
errors = []
|
|
133
|
+
|
|
134
|
+
# Check state contains expected keys
|
|
135
|
+
if "user_name" not in result:
|
|
136
|
+
errors.append("Missing 'user_name' in state")
|
|
137
|
+
elif result["user_name"] != "Alice":
|
|
138
|
+
errors.append(f"Expected user_name='Alice', got '{result['user_name']}'")
|
|
139
|
+
|
|
140
|
+
if "user_topic" not in result:
|
|
141
|
+
errors.append("Missing 'user_topic' in state")
|
|
142
|
+
elif result["user_topic"] != "Python":
|
|
143
|
+
errors.append(f"Expected user_topic='Python', got '{result['user_topic']}'")
|
|
144
|
+
|
|
145
|
+
if "greeting" not in result:
|
|
146
|
+
errors.append("Missing 'greeting' in state")
|
|
147
|
+
elif not result["greeting"]:
|
|
148
|
+
errors.append("Greeting is empty")
|
|
149
|
+
|
|
150
|
+
# Check greeting mentions the user and topic
|
|
151
|
+
greeting = str(result.get("greeting", "")).lower()
|
|
152
|
+
if "alice" not in greeting:
|
|
153
|
+
errors.append("Greeting doesn't mention 'Alice'")
|
|
154
|
+
if "python" not in greeting:
|
|
155
|
+
errors.append("Greeting doesn't mention 'Python'")
|
|
156
|
+
|
|
157
|
+
if errors:
|
|
158
|
+
print("\nā Verification FAILED:")
|
|
159
|
+
for error in errors:
|
|
160
|
+
print(f" - {error}")
|
|
161
|
+
return False
|
|
162
|
+
|
|
163
|
+
print("\nā
Verification PASSED:")
|
|
164
|
+
print(" - State contains user_name='Alice'")
|
|
165
|
+
print(" - State contains user_topic='Python'")
|
|
166
|
+
print(" - Greeting mentions both user and topic")
|
|
167
|
+
return True
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def main():
|
|
171
|
+
"""Main entry point."""
|
|
172
|
+
parser = argparse.ArgumentParser(description="Interview demo E2E test")
|
|
173
|
+
parser.add_argument(
|
|
174
|
+
"--interactive",
|
|
175
|
+
"-i",
|
|
176
|
+
action="store_true",
|
|
177
|
+
help="Run in interactive mode (prompt for input)",
|
|
178
|
+
)
|
|
179
|
+
parser.add_argument(
|
|
180
|
+
"--verify",
|
|
181
|
+
"-v",
|
|
182
|
+
action="store_true",
|
|
183
|
+
help="Verify output after demo",
|
|
184
|
+
)
|
|
185
|
+
args = parser.parse_args()
|
|
186
|
+
|
|
187
|
+
try:
|
|
188
|
+
result = run_demo(interactive=args.interactive)
|
|
189
|
+
|
|
190
|
+
if args.verify or not args.interactive:
|
|
191
|
+
success = verify_result(result)
|
|
192
|
+
sys.exit(0 if success else 1)
|
|
193
|
+
|
|
194
|
+
except Exception as e:
|
|
195
|
+
print(f"\nā Demo failed: {e}")
|
|
196
|
+
sys.exit(1)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
if __name__ == "__main__":
|
|
200
|
+
main()
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Streaming Demo - Showcases token-by-token LLM output.
|
|
3
|
+
|
|
4
|
+
Demonstrates:
|
|
5
|
+
- execute_prompt_streaming() async generator
|
|
6
|
+
- Real-time token output to terminal
|
|
7
|
+
- Collecting streamed tokens
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
# Interactive streaming
|
|
11
|
+
python scripts/demo_streaming.py
|
|
12
|
+
|
|
13
|
+
# With custom prompt
|
|
14
|
+
python scripts/demo_streaming.py --prompt "Tell me a short story about a robot"
|
|
15
|
+
|
|
16
|
+
# Verification mode (no LLM, mock output)
|
|
17
|
+
python scripts/demo_streaming.py --verify
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import argparse
|
|
21
|
+
import asyncio
|
|
22
|
+
import sys
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
|
|
25
|
+
# Add project root to path
|
|
26
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
27
|
+
|
|
28
|
+
from yamlgraph.executor_async import execute_prompt_streaming
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def print_banner(title: str) -> None:
|
|
32
|
+
"""Print a styled banner."""
|
|
33
|
+
width = 50
|
|
34
|
+
print("ā" + "ā" * width + "ā")
|
|
35
|
+
print(f"ā {title:<{width-1}}ā")
|
|
36
|
+
print("ā" + "ā" * width + "ā¤")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def print_footer() -> None:
|
|
40
|
+
"""Print footer."""
|
|
41
|
+
print("ā" + "ā" * 50 + "ā")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
async def run_streaming_demo(
|
|
45
|
+
user_prompt: str,
|
|
46
|
+
verify: bool = False,
|
|
47
|
+
) -> str:
|
|
48
|
+
"""Run the streaming demo.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
user_prompt: What to ask the LLM
|
|
52
|
+
verify: If True, skip actual LLM call
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Full collected response
|
|
56
|
+
"""
|
|
57
|
+
print_banner("š Streaming Demo")
|
|
58
|
+
print(f"ā Prompt: {user_prompt[:40]:<41}ā")
|
|
59
|
+
print("ā" + " " * 50 + "ā")
|
|
60
|
+
|
|
61
|
+
if verify:
|
|
62
|
+
# Mock streaming for verification
|
|
63
|
+
print("ā [Verify mode - mock streaming] ā")
|
|
64
|
+
print("ā" + " " * 50 + "ā")
|
|
65
|
+
print("ā Response: ā")
|
|
66
|
+
print("ā ", end="")
|
|
67
|
+
|
|
68
|
+
mock_response = "Hello! This is a mock streaming response for testing purposes."
|
|
69
|
+
for char in mock_response:
|
|
70
|
+
print(char, end="", flush=True)
|
|
71
|
+
await asyncio.sleep(0.02)
|
|
72
|
+
|
|
73
|
+
print()
|
|
74
|
+
print("ā" + " " * 50 + "ā")
|
|
75
|
+
print_footer()
|
|
76
|
+
return mock_response
|
|
77
|
+
|
|
78
|
+
# Real streaming from LLM
|
|
79
|
+
print("ā Streaming response: ā")
|
|
80
|
+
print("ā" + " " * 50 + "ā")
|
|
81
|
+
|
|
82
|
+
tokens_collected = []
|
|
83
|
+
|
|
84
|
+
# Create a simple prompt YAML on the fly by using greet prompt
|
|
85
|
+
# In real usage, you'd have a prompt file
|
|
86
|
+
try:
|
|
87
|
+
async for token in execute_prompt_streaming(
|
|
88
|
+
"greet",
|
|
89
|
+
variables={"name": "streaming demo user", "style": user_prompt},
|
|
90
|
+
provider="mistral",
|
|
91
|
+
):
|
|
92
|
+
print(token, end="", flush=True)
|
|
93
|
+
tokens_collected.append(token)
|
|
94
|
+
except Exception as e:
|
|
95
|
+
print(f"\nā ā Error: {e!s:.40}ā")
|
|
96
|
+
print_footer()
|
|
97
|
+
return ""
|
|
98
|
+
|
|
99
|
+
full_response = "".join(tokens_collected)
|
|
100
|
+
|
|
101
|
+
print()
|
|
102
|
+
print("ā" + " " * 50 + "ā")
|
|
103
|
+
print(f"ā ā
Received {len(tokens_collected)} chunks, {len(full_response)} charsā")
|
|
104
|
+
print_footer()
|
|
105
|
+
|
|
106
|
+
return full_response
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
async def main():
|
|
110
|
+
"""Main entry point."""
|
|
111
|
+
parser = argparse.ArgumentParser(description="Streaming Demo")
|
|
112
|
+
parser.add_argument(
|
|
113
|
+
"--prompt",
|
|
114
|
+
default="casual and friendly",
|
|
115
|
+
help="Style for the greeting prompt",
|
|
116
|
+
)
|
|
117
|
+
parser.add_argument(
|
|
118
|
+
"--verify",
|
|
119
|
+
action="store_true",
|
|
120
|
+
help="Run in verification mode (mock output)",
|
|
121
|
+
)
|
|
122
|
+
args = parser.parse_args()
|
|
123
|
+
|
|
124
|
+
result = await run_streaming_demo(
|
|
125
|
+
user_prompt=args.prompt,
|
|
126
|
+
verify=args.verify,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
if args.verify:
|
|
130
|
+
# Verification check
|
|
131
|
+
print("\nš Verification:")
|
|
132
|
+
if len(result) > 0:
|
|
133
|
+
print(" ā
Streaming produced output")
|
|
134
|
+
else:
|
|
135
|
+
print(" ā No output received")
|
|
136
|
+
sys.exit(1)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
if __name__ == "__main__":
|
|
140
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Demo script for human-in-the-loop interrupt nodes.
|
|
3
|
+
|
|
4
|
+
This script demonstrates the interrupt feature:
|
|
5
|
+
1. Graph pauses at interrupt nodes
|
|
6
|
+
2. User provides input via terminal
|
|
7
|
+
3. Graph resumes with user's response
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
python scripts/run_interview_demo.py
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import uuid
|
|
14
|
+
|
|
15
|
+
from langgraph.types import Command
|
|
16
|
+
|
|
17
|
+
from yamlgraph.graph_loader import (
|
|
18
|
+
compile_graph,
|
|
19
|
+
get_checkpointer_for_graph,
|
|
20
|
+
load_graph_config,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def run_interview():
|
|
25
|
+
"""Run the interactive interview demo."""
|
|
26
|
+
print("\n" + "=" * 50)
|
|
27
|
+
print("š¤ YAMLGraph Interview Demo - Human-in-the-Loop")
|
|
28
|
+
print("=" * 50 + "\n")
|
|
29
|
+
|
|
30
|
+
# Load and compile graph
|
|
31
|
+
config = load_graph_config("graphs/interview-demo.yaml")
|
|
32
|
+
graph = compile_graph(config)
|
|
33
|
+
|
|
34
|
+
# Get checkpointer (required for interrupts)
|
|
35
|
+
checkpointer = get_checkpointer_for_graph(config)
|
|
36
|
+
|
|
37
|
+
# Compile with checkpointer
|
|
38
|
+
app = graph.compile(checkpointer=checkpointer)
|
|
39
|
+
|
|
40
|
+
# Generate unique thread ID for this session
|
|
41
|
+
thread_id = str(uuid.uuid4())
|
|
42
|
+
run_config = {"configurable": {"thread_id": thread_id}}
|
|
43
|
+
|
|
44
|
+
print("Starting interview...\n")
|
|
45
|
+
|
|
46
|
+
# Initial invocation - will hit first interrupt
|
|
47
|
+
state = {}
|
|
48
|
+
result = app.invoke(state, run_config)
|
|
49
|
+
|
|
50
|
+
# Loop through interrupts
|
|
51
|
+
while True:
|
|
52
|
+
# Check for interrupt
|
|
53
|
+
interrupt_info = result.get("__interrupt__")
|
|
54
|
+
|
|
55
|
+
if interrupt_info:
|
|
56
|
+
# Extract the interrupt payload (question)
|
|
57
|
+
payload = interrupt_info[0].value if interrupt_info else "Input needed:"
|
|
58
|
+
print(f"\nš¬ {payload}")
|
|
59
|
+
|
|
60
|
+
# Get user input
|
|
61
|
+
user_response = input(" Your answer: ").strip()
|
|
62
|
+
|
|
63
|
+
if user_response.lower() in ("quit", "exit", "q"):
|
|
64
|
+
print("\nš Goodbye!")
|
|
65
|
+
return
|
|
66
|
+
|
|
67
|
+
# Resume with user's response
|
|
68
|
+
result = app.invoke(Command(resume=user_response), run_config)
|
|
69
|
+
else:
|
|
70
|
+
# No more interrupts - we're done
|
|
71
|
+
break
|
|
72
|
+
|
|
73
|
+
# Display final result
|
|
74
|
+
print("\n" + "-" * 50)
|
|
75
|
+
print("⨠Final Response:")
|
|
76
|
+
print("-" * 50)
|
|
77
|
+
|
|
78
|
+
greeting = result.get("greeting")
|
|
79
|
+
if greeting:
|
|
80
|
+
# Handle both string and Pydantic model responses
|
|
81
|
+
if hasattr(greeting, "content"):
|
|
82
|
+
print(greeting.content)
|
|
83
|
+
elif isinstance(greeting, str):
|
|
84
|
+
print(greeting)
|
|
85
|
+
else:
|
|
86
|
+
print(greeting)
|
|
87
|
+
|
|
88
|
+
print("\n" + "=" * 50)
|
|
89
|
+
print("Demo complete!")
|
|
90
|
+
print("=" * 50 + "\n")
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
if __name__ == "__main__":
|
|
94
|
+
run_interview()
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""Test FR-006: interrupt_output_mapping with subgraph."""
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from langgraph.checkpoint.memory import MemorySaver
|
|
4
|
+
|
|
5
|
+
from yamlgraph.graph_loader import load_graph_config, compile_graph
|
|
6
|
+
|
|
7
|
+
print("=== Testing FR-006: interrupt_output_mapping ===")
|
|
8
|
+
parent_path = Path("graphs/interrupt-parent.yaml")
|
|
9
|
+
config = load_graph_config(parent_path)
|
|
10
|
+
state_graph = compile_graph(config)
|
|
11
|
+
checkpointer = MemorySaver()
|
|
12
|
+
parent_app = state_graph.compile(checkpointer=checkpointer)
|
|
13
|
+
|
|
14
|
+
thread_config = {"configurable": {"thread_id": "test-fr006"}}
|
|
15
|
+
|
|
16
|
+
result = parent_app.invoke({"user_input": "hello"}, thread_config)
|
|
17
|
+
print("Parent result keys:", result.keys())
|
|
18
|
+
print()
|
|
19
|
+
print("child_phase:", result.get("child_phase"))
|
|
20
|
+
print("child_data:", result.get("child_data"))
|
|
21
|
+
print("__interrupt__:", "__interrupt__" in result)
|
|
22
|
+
print()
|
|
23
|
+
if "child_phase" in result and "child_data" in result:
|
|
24
|
+
print("ā
FR-006 SUCCESS: Child state mapped to parent!")
|
|
25
|
+
else:
|
|
26
|
+
print("ā FR-006 FAILED: Child state not in result")
|
tests/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Test suite for yamlgraph."""
|