yamlgraph 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/__init__.py +1 -0
- examples/codegen/__init__.py +5 -0
- examples/codegen/models/__init__.py +13 -0
- examples/codegen/models/schemas.py +76 -0
- examples/codegen/tests/__init__.py +1 -0
- examples/codegen/tests/test_ai_helpers.py +235 -0
- examples/codegen/tests/test_ast_analysis.py +174 -0
- examples/codegen/tests/test_code_analysis.py +134 -0
- examples/codegen/tests/test_code_context.py +301 -0
- examples/codegen/tests/test_code_nav.py +89 -0
- examples/codegen/tests/test_dependency_tools.py +119 -0
- examples/codegen/tests/test_example_tools.py +185 -0
- examples/codegen/tests/test_git_tools.py +112 -0
- examples/codegen/tests/test_impl_agent_schemas.py +193 -0
- examples/codegen/tests/test_impl_agent_v4_graph.py +94 -0
- examples/codegen/tests/test_jedi_analysis.py +226 -0
- examples/codegen/tests/test_meta_tools.py +250 -0
- examples/codegen/tests/test_plan_discovery_prompt.py +98 -0
- examples/codegen/tests/test_syntax_tools.py +85 -0
- examples/codegen/tests/test_synthesize_prompt.py +94 -0
- examples/codegen/tests/test_template_tools.py +244 -0
- examples/codegen/tools/__init__.py +80 -0
- examples/codegen/tools/ai_helpers.py +420 -0
- examples/codegen/tools/ast_analysis.py +92 -0
- examples/codegen/tools/code_context.py +180 -0
- examples/codegen/tools/code_nav.py +52 -0
- examples/codegen/tools/dependency_tools.py +120 -0
- examples/codegen/tools/example_tools.py +188 -0
- examples/codegen/tools/git_tools.py +151 -0
- examples/codegen/tools/impl_executor.py +614 -0
- examples/codegen/tools/jedi_analysis.py +311 -0
- examples/codegen/tools/meta_tools.py +202 -0
- examples/codegen/tools/syntax_tools.py +26 -0
- examples/codegen/tools/template_tools.py +356 -0
- examples/fastapi_interview.py +167 -0
- examples/npc/api/__init__.py +1 -0
- examples/npc/api/app.py +100 -0
- examples/npc/api/routes/__init__.py +5 -0
- examples/npc/api/routes/encounter.py +182 -0
- examples/npc/api/session.py +330 -0
- examples/npc/demo.py +387 -0
- examples/npc/nodes/__init__.py +5 -0
- examples/npc/nodes/image_node.py +92 -0
- examples/npc/run_encounter.py +230 -0
- examples/shared/__init__.py +0 -0
- examples/shared/replicate_tool.py +238 -0
- examples/storyboard/__init__.py +1 -0
- examples/storyboard/generate_videos.py +335 -0
- examples/storyboard/nodes/__init__.py +12 -0
- examples/storyboard/nodes/animated_character_node.py +248 -0
- examples/storyboard/nodes/animated_image_node.py +138 -0
- examples/storyboard/nodes/character_node.py +162 -0
- examples/storyboard/nodes/image_node.py +118 -0
- examples/storyboard/nodes/replicate_tool.py +49 -0
- examples/storyboard/retry_images.py +118 -0
- scripts/demo_async_executor.py +212 -0
- scripts/demo_interview_e2e.py +200 -0
- scripts/demo_streaming.py +140 -0
- scripts/run_interview_demo.py +94 -0
- scripts/test_interrupt_fix.py +26 -0
- tests/__init__.py +1 -0
- tests/conftest.py +178 -0
- tests/integration/__init__.py +1 -0
- tests/integration/test_animated_storyboard.py +63 -0
- tests/integration/test_cli_commands.py +242 -0
- tests/integration/test_colocated_prompts.py +139 -0
- tests/integration/test_map_demo.py +50 -0
- tests/integration/test_memory_demo.py +283 -0
- tests/integration/test_npc_api/__init__.py +1 -0
- tests/integration/test_npc_api/test_routes.py +357 -0
- tests/integration/test_npc_api/test_session.py +216 -0
- tests/integration/test_pipeline_flow.py +105 -0
- tests/integration/test_providers.py +163 -0
- tests/integration/test_resume.py +75 -0
- tests/integration/test_subgraph_integration.py +295 -0
- tests/integration/test_subgraph_interrupt.py +106 -0
- tests/unit/__init__.py +1 -0
- tests/unit/test_agent_nodes.py +355 -0
- tests/unit/test_async_executor.py +346 -0
- tests/unit/test_checkpointer.py +212 -0
- tests/unit/test_checkpointer_factory.py +212 -0
- tests/unit/test_cli.py +121 -0
- tests/unit/test_cli_package.py +81 -0
- tests/unit/test_compile_graph_map.py +132 -0
- tests/unit/test_conditions_routing.py +253 -0
- tests/unit/test_config.py +93 -0
- tests/unit/test_conversation_memory.py +276 -0
- tests/unit/test_database.py +145 -0
- tests/unit/test_deprecation.py +104 -0
- tests/unit/test_executor.py +172 -0
- tests/unit/test_executor_async.py +179 -0
- tests/unit/test_export.py +149 -0
- tests/unit/test_expressions.py +178 -0
- tests/unit/test_feature_brainstorm.py +194 -0
- tests/unit/test_format_prompt.py +145 -0
- tests/unit/test_generic_report.py +200 -0
- tests/unit/test_graph_commands.py +327 -0
- tests/unit/test_graph_linter.py +627 -0
- tests/unit/test_graph_loader.py +357 -0
- tests/unit/test_graph_schema.py +193 -0
- tests/unit/test_inline_schema.py +151 -0
- tests/unit/test_interrupt_node.py +182 -0
- tests/unit/test_issues.py +164 -0
- tests/unit/test_jinja2_prompts.py +85 -0
- tests/unit/test_json_extract.py +134 -0
- tests/unit/test_langsmith.py +600 -0
- tests/unit/test_langsmith_tools.py +204 -0
- tests/unit/test_llm_factory.py +109 -0
- tests/unit/test_llm_factory_async.py +118 -0
- tests/unit/test_loops.py +403 -0
- tests/unit/test_map_node.py +144 -0
- tests/unit/test_no_backward_compat.py +56 -0
- tests/unit/test_node_factory.py +348 -0
- tests/unit/test_passthrough_node.py +126 -0
- tests/unit/test_prompts.py +324 -0
- tests/unit/test_python_nodes.py +198 -0
- tests/unit/test_reliability.py +298 -0
- tests/unit/test_result_export.py +234 -0
- tests/unit/test_router.py +296 -0
- tests/unit/test_sanitize.py +99 -0
- tests/unit/test_schema_loader.py +295 -0
- tests/unit/test_shell_tools.py +229 -0
- tests/unit/test_state_builder.py +331 -0
- tests/unit/test_state_builder_map.py +104 -0
- tests/unit/test_state_config.py +197 -0
- tests/unit/test_streaming.py +307 -0
- tests/unit/test_subgraph.py +596 -0
- tests/unit/test_template.py +190 -0
- tests/unit/test_tool_call_integration.py +164 -0
- tests/unit/test_tool_call_node.py +178 -0
- tests/unit/test_tool_nodes.py +129 -0
- tests/unit/test_websearch.py +234 -0
- yamlgraph/__init__.py +35 -0
- yamlgraph/builder.py +110 -0
- yamlgraph/cli/__init__.py +159 -0
- yamlgraph/cli/__main__.py +6 -0
- yamlgraph/cli/commands.py +231 -0
- yamlgraph/cli/deprecation.py +92 -0
- yamlgraph/cli/graph_commands.py +541 -0
- yamlgraph/cli/validators.py +37 -0
- yamlgraph/config.py +67 -0
- yamlgraph/constants.py +70 -0
- yamlgraph/error_handlers.py +227 -0
- yamlgraph/executor.py +290 -0
- yamlgraph/executor_async.py +288 -0
- yamlgraph/graph_loader.py +451 -0
- yamlgraph/map_compiler.py +150 -0
- yamlgraph/models/__init__.py +36 -0
- yamlgraph/models/graph_schema.py +181 -0
- yamlgraph/models/schemas.py +124 -0
- yamlgraph/models/state_builder.py +236 -0
- yamlgraph/node_factory.py +768 -0
- yamlgraph/routing.py +87 -0
- yamlgraph/schema_loader.py +240 -0
- yamlgraph/storage/__init__.py +20 -0
- yamlgraph/storage/checkpointer.py +72 -0
- yamlgraph/storage/checkpointer_factory.py +123 -0
- yamlgraph/storage/database.py +320 -0
- yamlgraph/storage/export.py +269 -0
- yamlgraph/tools/__init__.py +1 -0
- yamlgraph/tools/agent.py +320 -0
- yamlgraph/tools/graph_linter.py +388 -0
- yamlgraph/tools/langsmith_tools.py +125 -0
- yamlgraph/tools/nodes.py +126 -0
- yamlgraph/tools/python_tool.py +179 -0
- yamlgraph/tools/shell.py +205 -0
- yamlgraph/tools/websearch.py +242 -0
- yamlgraph/utils/__init__.py +48 -0
- yamlgraph/utils/conditions.py +157 -0
- yamlgraph/utils/expressions.py +245 -0
- yamlgraph/utils/json_extract.py +104 -0
- yamlgraph/utils/langsmith.py +416 -0
- yamlgraph/utils/llm_factory.py +118 -0
- yamlgraph/utils/llm_factory_async.py +105 -0
- yamlgraph/utils/logging.py +104 -0
- yamlgraph/utils/prompts.py +171 -0
- yamlgraph/utils/sanitize.py +98 -0
- yamlgraph/utils/template.py +102 -0
- yamlgraph/utils/validators.py +181 -0
- yamlgraph-0.3.9.dist-info/METADATA +1105 -0
- yamlgraph-0.3.9.dist-info/RECORD +185 -0
- yamlgraph-0.3.9.dist-info/WHEEL +5 -0
- yamlgraph-0.3.9.dist-info/entry_points.txt +2 -0
- yamlgraph-0.3.9.dist-info/licenses/LICENSE +33 -0
- yamlgraph-0.3.9.dist-info/top_level.txt +4 -0
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
"""Tests for feature-brainstorm graph.
|
|
2
|
+
|
|
3
|
+
TDD: Write tests first, then implement graph and prompts.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
import yaml
|
|
9
|
+
|
|
10
|
+
from yamlgraph.tools.graph_linter import lint_graph
|
|
11
|
+
|
|
12
|
+
# Use absolute paths relative to project root
|
|
13
|
+
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
|
14
|
+
GRAPH_PATH = PROJECT_ROOT / "graphs/feature-brainstorm.yaml"
|
|
15
|
+
PROMPTS_DIR = PROJECT_ROOT / "prompts/feature-brainstorm"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class TestFeatureBrainstormStructure:
|
|
19
|
+
"""Test graph file structure and validity."""
|
|
20
|
+
|
|
21
|
+
def test_graph_file_exists(self):
|
|
22
|
+
"""Graph file should exist."""
|
|
23
|
+
assert GRAPH_PATH.exists(), f"Missing {GRAPH_PATH}"
|
|
24
|
+
|
|
25
|
+
def test_graph_passes_linter(self):
|
|
26
|
+
"""Graph should pass all lint checks."""
|
|
27
|
+
result = lint_graph(GRAPH_PATH, project_root=PROJECT_ROOT)
|
|
28
|
+
assert result.valid, f"Lint errors: {[i.message for i in result.issues]}"
|
|
29
|
+
|
|
30
|
+
def test_graph_has_required_fields(self):
|
|
31
|
+
"""Graph should have name, description, state, tools, nodes, edges."""
|
|
32
|
+
with open(GRAPH_PATH) as f:
|
|
33
|
+
graph = yaml.safe_load(f)
|
|
34
|
+
|
|
35
|
+
assert "name" in graph, "Missing 'name'"
|
|
36
|
+
assert "description" in graph, "Missing 'description'"
|
|
37
|
+
assert "state" in graph, "Missing 'state'"
|
|
38
|
+
assert "tools" in graph, "Missing 'tools'"
|
|
39
|
+
assert "nodes" in graph, "Missing 'nodes'"
|
|
40
|
+
assert "edges" in graph, "Missing 'edges'"
|
|
41
|
+
|
|
42
|
+
def test_graph_has_focus_state_variable(self):
|
|
43
|
+
"""Graph should have optional 'focus' state variable."""
|
|
44
|
+
with open(GRAPH_PATH) as f:
|
|
45
|
+
graph = yaml.safe_load(f)
|
|
46
|
+
|
|
47
|
+
state = graph.get("state", {})
|
|
48
|
+
assert "focus" in state, "Missing 'focus' in state"
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class TestFeatureBrainstormTools:
|
|
52
|
+
"""Test tool definitions."""
|
|
53
|
+
|
|
54
|
+
def test_has_codebase_reading_tools(self):
|
|
55
|
+
"""Graph should have tools to read codebase."""
|
|
56
|
+
with open(GRAPH_PATH) as f:
|
|
57
|
+
graph = yaml.safe_load(f)
|
|
58
|
+
|
|
59
|
+
tools = graph.get("tools", {})
|
|
60
|
+
tool_names = set(tools.keys())
|
|
61
|
+
|
|
62
|
+
# Should have at least these for context gathering
|
|
63
|
+
expected = {"read_patterns", "read_readme", "search_todos"}
|
|
64
|
+
missing = expected - tool_names
|
|
65
|
+
assert not missing, f"Missing tools: {missing}"
|
|
66
|
+
|
|
67
|
+
def test_has_websearch_tool(self):
|
|
68
|
+
"""Graph should have websearch tool for research."""
|
|
69
|
+
with open(GRAPH_PATH) as f:
|
|
70
|
+
graph = yaml.safe_load(f)
|
|
71
|
+
|
|
72
|
+
tools = graph.get("tools", {})
|
|
73
|
+
|
|
74
|
+
# Find websearch tool
|
|
75
|
+
websearch_tools = [
|
|
76
|
+
name for name, config in tools.items() if config.get("type") == "websearch"
|
|
77
|
+
]
|
|
78
|
+
assert websearch_tools, "Missing websearch tool"
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class TestFeatureBrainstormNodes:
|
|
82
|
+
"""Test node definitions."""
|
|
83
|
+
|
|
84
|
+
def test_has_gather_context_node(self):
|
|
85
|
+
"""Graph should have gather_context agent node."""
|
|
86
|
+
with open(GRAPH_PATH) as f:
|
|
87
|
+
graph = yaml.safe_load(f)
|
|
88
|
+
|
|
89
|
+
nodes = graph.get("nodes", {})
|
|
90
|
+
assert "gather_context" in nodes, "Missing 'gather_context' node"
|
|
91
|
+
assert nodes["gather_context"]["type"] == "agent"
|
|
92
|
+
|
|
93
|
+
def test_has_research_node(self):
|
|
94
|
+
"""Graph should have research agent node."""
|
|
95
|
+
with open(GRAPH_PATH) as f:
|
|
96
|
+
graph = yaml.safe_load(f)
|
|
97
|
+
|
|
98
|
+
nodes = graph.get("nodes", {})
|
|
99
|
+
assert "research_alternatives" in nodes, "Missing 'research_alternatives' node"
|
|
100
|
+
|
|
101
|
+
def test_has_brainstorm_node(self):
|
|
102
|
+
"""Graph should have brainstorm LLM node."""
|
|
103
|
+
with open(GRAPH_PATH) as f:
|
|
104
|
+
graph = yaml.safe_load(f)
|
|
105
|
+
|
|
106
|
+
nodes = graph.get("nodes", {})
|
|
107
|
+
assert "brainstorm" in nodes, "Missing 'brainstorm' node"
|
|
108
|
+
assert nodes["brainstorm"]["type"] == "llm"
|
|
109
|
+
|
|
110
|
+
def test_has_prioritize_node(self):
|
|
111
|
+
"""Graph should have prioritize LLM node."""
|
|
112
|
+
with open(GRAPH_PATH) as f:
|
|
113
|
+
graph = yaml.safe_load(f)
|
|
114
|
+
|
|
115
|
+
nodes = graph.get("nodes", {})
|
|
116
|
+
assert "prioritize" in nodes, "Missing 'prioritize' node"
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class TestFeatureBrainstormPrompts:
|
|
120
|
+
"""Test prompt files exist and are valid."""
|
|
121
|
+
|
|
122
|
+
def test_prompts_directory_exists(self):
|
|
123
|
+
"""Prompts directory should exist."""
|
|
124
|
+
assert PROMPTS_DIR.exists(), f"Missing {PROMPTS_DIR}"
|
|
125
|
+
|
|
126
|
+
def test_gather_prompt_exists(self):
|
|
127
|
+
"""gather.yaml prompt should exist."""
|
|
128
|
+
prompt_path = PROMPTS_DIR / "gather.yaml"
|
|
129
|
+
assert prompt_path.exists(), f"Missing {prompt_path}"
|
|
130
|
+
|
|
131
|
+
def test_research_prompt_exists(self):
|
|
132
|
+
"""research.yaml prompt should exist."""
|
|
133
|
+
prompt_path = PROMPTS_DIR / "research.yaml"
|
|
134
|
+
assert prompt_path.exists(), f"Missing {prompt_path}"
|
|
135
|
+
|
|
136
|
+
def test_ideate_prompt_exists(self):
|
|
137
|
+
"""ideate.yaml prompt should exist."""
|
|
138
|
+
prompt_path = PROMPTS_DIR / "ideate.yaml"
|
|
139
|
+
assert prompt_path.exists(), f"Missing {prompt_path}"
|
|
140
|
+
|
|
141
|
+
def test_prioritize_prompt_exists(self):
|
|
142
|
+
"""prioritize.yaml prompt should exist."""
|
|
143
|
+
prompt_path = PROMPTS_DIR / "prioritize.yaml"
|
|
144
|
+
assert prompt_path.exists(), f"Missing {prompt_path}"
|
|
145
|
+
|
|
146
|
+
def test_prompts_have_required_fields(self):
|
|
147
|
+
"""All prompts should have system and user fields."""
|
|
148
|
+
for prompt_file in PROMPTS_DIR.glob("*.yaml"):
|
|
149
|
+
with open(prompt_file) as f:
|
|
150
|
+
prompt = yaml.safe_load(f)
|
|
151
|
+
|
|
152
|
+
assert (
|
|
153
|
+
"system" in prompt or "user" in prompt
|
|
154
|
+
), f"{prompt_file.name} missing 'system' or 'user'"
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class TestFeatureBrainstormEdges:
|
|
158
|
+
"""Test edge definitions create valid flow."""
|
|
159
|
+
|
|
160
|
+
def test_starts_with_gather_context(self):
|
|
161
|
+
"""Graph should start with gather_context."""
|
|
162
|
+
with open(GRAPH_PATH) as f:
|
|
163
|
+
graph = yaml.safe_load(f)
|
|
164
|
+
|
|
165
|
+
edges = graph.get("edges", [])
|
|
166
|
+
start_edges = [e for e in edges if e.get("from") == "START"]
|
|
167
|
+
|
|
168
|
+
assert start_edges, "Missing START edge"
|
|
169
|
+
assert start_edges[0]["to"] == "gather_context"
|
|
170
|
+
|
|
171
|
+
def test_ends_with_prioritize(self):
|
|
172
|
+
"""Graph should end after prioritize."""
|
|
173
|
+
with open(GRAPH_PATH) as f:
|
|
174
|
+
graph = yaml.safe_load(f)
|
|
175
|
+
|
|
176
|
+
edges = graph.get("edges", [])
|
|
177
|
+
end_edges = [e for e in edges if e.get("to") == "END"]
|
|
178
|
+
|
|
179
|
+
assert end_edges, "Missing END edge"
|
|
180
|
+
# Last node before END should be prioritize
|
|
181
|
+
assert any(e["from"] == "prioritize" for e in end_edges)
|
|
182
|
+
|
|
183
|
+
def test_has_complete_flow(self):
|
|
184
|
+
"""Graph should have edges connecting all nodes."""
|
|
185
|
+
with open(GRAPH_PATH) as f:
|
|
186
|
+
graph = yaml.safe_load(f)
|
|
187
|
+
|
|
188
|
+
edges = graph.get("edges", [])
|
|
189
|
+
nodes = set(graph.get("nodes", {}).keys())
|
|
190
|
+
|
|
191
|
+
# All nodes should be reachable (appear as 'to')
|
|
192
|
+
targets = {e["to"] for e in edges if e["to"] != "END"}
|
|
193
|
+
unreachable = nodes - targets
|
|
194
|
+
assert not unreachable, f"Unreachable nodes: {unreachable}"
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
"""Tests for prompt formatting with Jinja2 support."""
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
|
|
5
|
+
from yamlgraph.executor import format_prompt
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class TestFormatPrompt:
|
|
9
|
+
"""Test the format_prompt function with both simple and Jinja2 templates."""
|
|
10
|
+
|
|
11
|
+
def test_simple_format_basic(self):
|
|
12
|
+
"""Test basic string formatting with {variable} syntax."""
|
|
13
|
+
template = "Hello {name}!"
|
|
14
|
+
variables = {"name": "World"}
|
|
15
|
+
result = format_prompt(template, variables)
|
|
16
|
+
assert result == "Hello World!"
|
|
17
|
+
|
|
18
|
+
def test_simple_format_multiple_variables(self):
|
|
19
|
+
"""Test formatting with multiple variables."""
|
|
20
|
+
template = "Topic: {topic}, Style: {style}, Words: {word_count}"
|
|
21
|
+
variables = {"topic": "AI", "style": "casual", "word_count": 500}
|
|
22
|
+
result = format_prompt(template, variables)
|
|
23
|
+
assert result == "Topic: AI, Style: casual, Words: 500"
|
|
24
|
+
|
|
25
|
+
def test_simple_format_missing_variable(self):
|
|
26
|
+
"""Test that missing variables raise KeyError."""
|
|
27
|
+
template = "Hello {name}!"
|
|
28
|
+
variables = {}
|
|
29
|
+
with pytest.raises(KeyError):
|
|
30
|
+
format_prompt(template, variables)
|
|
31
|
+
|
|
32
|
+
def test_jinja2_basic_variable(self):
|
|
33
|
+
"""Test Jinja2 template with basic {{ variable }} syntax."""
|
|
34
|
+
template = "Hello {{ name }}!"
|
|
35
|
+
variables = {"name": "World"}
|
|
36
|
+
result = format_prompt(template, variables)
|
|
37
|
+
assert result == "Hello World!"
|
|
38
|
+
|
|
39
|
+
def test_jinja2_for_loop(self):
|
|
40
|
+
"""Test Jinja2 template with for loop."""
|
|
41
|
+
template = """{% for item in items %}
|
|
42
|
+
- {{ item }}
|
|
43
|
+
{% endfor %}"""
|
|
44
|
+
variables = {"items": ["apple", "banana", "cherry"]}
|
|
45
|
+
result = format_prompt(template, variables)
|
|
46
|
+
# Jinja2 preserves whitespace from template
|
|
47
|
+
assert "- apple" in result
|
|
48
|
+
assert "- banana" in result
|
|
49
|
+
assert "- cherry" in result
|
|
50
|
+
|
|
51
|
+
def test_jinja2_conditional(self):
|
|
52
|
+
"""Test Jinja2 template with if/else."""
|
|
53
|
+
template = """{% if premium %}Premium User{% else %}Regular User{% endif %}"""
|
|
54
|
+
|
|
55
|
+
result_premium = format_prompt(template, {"premium": True})
|
|
56
|
+
assert result_premium == "Premium User"
|
|
57
|
+
|
|
58
|
+
result_regular = format_prompt(template, {"premium": False})
|
|
59
|
+
assert result_regular == "Regular User"
|
|
60
|
+
|
|
61
|
+
def test_jinja2_filter_slice(self):
|
|
62
|
+
"""Test Jinja2 template with slice filter."""
|
|
63
|
+
template = "Summary: {{ text[:50] }}..."
|
|
64
|
+
variables = {
|
|
65
|
+
"text": "This is a very long text that should be truncated to show only first fifty characters"
|
|
66
|
+
}
|
|
67
|
+
result = format_prompt(template, variables)
|
|
68
|
+
# Check that the text is sliced to 50 characters
|
|
69
|
+
assert result.startswith(
|
|
70
|
+
"Summary: This is a very long text that should be truncated"
|
|
71
|
+
)
|
|
72
|
+
assert result.endswith("...")
|
|
73
|
+
assert len(result) < len(variables["text"]) + len("Summary: ...")
|
|
74
|
+
|
|
75
|
+
def test_jinja2_filter_upper(self):
|
|
76
|
+
"""Test Jinja2 template with upper filter."""
|
|
77
|
+
template = "{{ name | upper }}"
|
|
78
|
+
variables = {"name": "world"}
|
|
79
|
+
result = format_prompt(template, variables)
|
|
80
|
+
assert result == "WORLD"
|
|
81
|
+
|
|
82
|
+
def test_jinja2_complex_template(self):
|
|
83
|
+
"""Test complex Jinja2 template with loops and conditionals."""
|
|
84
|
+
template = """Items in {{ category }}:
|
|
85
|
+
{% for item in items %}
|
|
86
|
+
{% if item.available %}
|
|
87
|
+
- {{ item.name }}: ${{ item.price }}
|
|
88
|
+
{% endif %}
|
|
89
|
+
{% endfor %}"""
|
|
90
|
+
variables = {
|
|
91
|
+
"category": "Fruits",
|
|
92
|
+
"items": [
|
|
93
|
+
{"name": "Apple", "price": 1.50, "available": True},
|
|
94
|
+
{"name": "Banana", "price": 0.75, "available": False},
|
|
95
|
+
{"name": "Cherry", "price": 2.00, "available": True},
|
|
96
|
+
],
|
|
97
|
+
}
|
|
98
|
+
result = format_prompt(template, variables)
|
|
99
|
+
assert "Apple: $1.5" in result
|
|
100
|
+
assert "Cherry: $2.0" in result
|
|
101
|
+
assert "Banana" not in result
|
|
102
|
+
|
|
103
|
+
def test_jinja2_missing_variable_graceful(self):
|
|
104
|
+
"""Test that Jinja2 missing variables are handled (rendered as empty by default)."""
|
|
105
|
+
template = "Hello {{ name }}!"
|
|
106
|
+
variables = {}
|
|
107
|
+
result = format_prompt(template, variables)
|
|
108
|
+
# Jinja2 by default renders undefined variables as empty strings
|
|
109
|
+
assert result == "Hello !"
|
|
110
|
+
|
|
111
|
+
def test_detection_uses_jinja2_for_double_braces(self):
|
|
112
|
+
"""Test that {{ triggers Jinja2 mode."""
|
|
113
|
+
template = "Value: {{ x }}"
|
|
114
|
+
variables = {"x": 42}
|
|
115
|
+
result = format_prompt(template, variables)
|
|
116
|
+
assert result == "Value: 42"
|
|
117
|
+
|
|
118
|
+
def test_detection_uses_jinja2_for_statements(self):
|
|
119
|
+
"""Test that {% triggers Jinja2 mode."""
|
|
120
|
+
template = "{% if true %}Yes{% endif %}"
|
|
121
|
+
variables = {}
|
|
122
|
+
result = format_prompt(template, variables)
|
|
123
|
+
assert result == "Yes"
|
|
124
|
+
|
|
125
|
+
def test_backward_compatibility_no_jinja2_syntax(self):
|
|
126
|
+
"""Test that templates without Jinja2 syntax still use simple format."""
|
|
127
|
+
# This ensures backward compatibility
|
|
128
|
+
template = "Simple {var} template"
|
|
129
|
+
variables = {"var": "test"}
|
|
130
|
+
result = format_prompt(template, variables)
|
|
131
|
+
assert result == "Simple test template"
|
|
132
|
+
|
|
133
|
+
def test_empty_template(self):
|
|
134
|
+
"""Test formatting empty template."""
|
|
135
|
+
template = ""
|
|
136
|
+
variables = {}
|
|
137
|
+
result = format_prompt(template, variables)
|
|
138
|
+
assert result == ""
|
|
139
|
+
|
|
140
|
+
def test_template_with_no_placeholders(self):
|
|
141
|
+
"""Test template with no variables."""
|
|
142
|
+
template = "Just plain text"
|
|
143
|
+
variables = {"unused": "value"}
|
|
144
|
+
result = format_prompt(template, variables)
|
|
145
|
+
assert result == "Just plain text"
|
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
"""Tests for Phase 6.5: Generic Report Schema.
|
|
2
|
+
|
|
3
|
+
Tests for flexible GenericReport model that works for most analysis/summary tasks.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import pytest
|
|
7
|
+
from pydantic import ValidationError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class TestGenericReportSchema:
|
|
11
|
+
"""Tests for GenericReport model."""
|
|
12
|
+
|
|
13
|
+
def test_generic_report_exists(self):
|
|
14
|
+
"""GenericReport model is importable."""
|
|
15
|
+
from yamlgraph.models.schemas import GenericReport
|
|
16
|
+
|
|
17
|
+
assert GenericReport is not None
|
|
18
|
+
|
|
19
|
+
def test_minimal_report(self):
|
|
20
|
+
"""Report works with just title and summary."""
|
|
21
|
+
from yamlgraph.models.schemas import GenericReport
|
|
22
|
+
|
|
23
|
+
report = GenericReport(
|
|
24
|
+
title="Test Report",
|
|
25
|
+
summary="A brief summary of findings.",
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
assert report.title == "Test Report"
|
|
29
|
+
assert report.summary == "A brief summary of findings."
|
|
30
|
+
|
|
31
|
+
def test_report_with_sections(self):
|
|
32
|
+
"""Sections field accepts arbitrary dict content."""
|
|
33
|
+
from yamlgraph.models.schemas import GenericReport
|
|
34
|
+
|
|
35
|
+
report = GenericReport(
|
|
36
|
+
title="Report",
|
|
37
|
+
summary="Summary",
|
|
38
|
+
sections={
|
|
39
|
+
"overview": "First section content",
|
|
40
|
+
"details": {"nested": "data", "count": 42},
|
|
41
|
+
"items": ["a", "b", "c"],
|
|
42
|
+
},
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
assert report.sections["overview"] == "First section content"
|
|
46
|
+
assert report.sections["details"]["count"] == 42
|
|
47
|
+
assert len(report.sections["items"]) == 3
|
|
48
|
+
|
|
49
|
+
def test_report_with_findings(self):
|
|
50
|
+
"""Findings field is list of strings."""
|
|
51
|
+
from yamlgraph.models.schemas import GenericReport
|
|
52
|
+
|
|
53
|
+
report = GenericReport(
|
|
54
|
+
title="Report",
|
|
55
|
+
summary="Summary",
|
|
56
|
+
findings=["Finding 1", "Finding 2", "Finding 3"],
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
assert len(report.findings) == 3
|
|
60
|
+
assert "Finding 1" in report.findings
|
|
61
|
+
|
|
62
|
+
def test_report_with_recommendations(self):
|
|
63
|
+
"""Recommendations field is list of strings."""
|
|
64
|
+
from yamlgraph.models.schemas import GenericReport
|
|
65
|
+
|
|
66
|
+
report = GenericReport(
|
|
67
|
+
title="Report",
|
|
68
|
+
summary="Summary",
|
|
69
|
+
recommendations=["Action 1", "Action 2"],
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
assert len(report.recommendations) == 2
|
|
73
|
+
|
|
74
|
+
def test_report_with_metadata(self):
|
|
75
|
+
"""Metadata field accepts arbitrary key-value data."""
|
|
76
|
+
from yamlgraph.models.schemas import GenericReport
|
|
77
|
+
|
|
78
|
+
report = GenericReport(
|
|
79
|
+
title="Report",
|
|
80
|
+
summary="Summary",
|
|
81
|
+
metadata={
|
|
82
|
+
"author": "Test Author",
|
|
83
|
+
"version": 1.0,
|
|
84
|
+
"tags": ["a", "b"],
|
|
85
|
+
},
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
assert report.metadata["author"] == "Test Author"
|
|
89
|
+
assert report.metadata["version"] == 1.0
|
|
90
|
+
|
|
91
|
+
def test_defaults_are_empty(self):
|
|
92
|
+
"""Optional fields default to empty collections."""
|
|
93
|
+
from yamlgraph.models.schemas import GenericReport
|
|
94
|
+
|
|
95
|
+
report = GenericReport(title="Report", summary="Summary")
|
|
96
|
+
|
|
97
|
+
assert report.sections == {}
|
|
98
|
+
assert report.findings == []
|
|
99
|
+
assert report.recommendations == []
|
|
100
|
+
assert report.metadata == {}
|
|
101
|
+
|
|
102
|
+
def test_title_is_required(self):
|
|
103
|
+
"""Title field is required."""
|
|
104
|
+
from yamlgraph.models.schemas import GenericReport
|
|
105
|
+
|
|
106
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
107
|
+
GenericReport(summary="Summary")
|
|
108
|
+
|
|
109
|
+
errors = exc_info.value.errors()
|
|
110
|
+
assert any(e["loc"] == ("title",) for e in errors)
|
|
111
|
+
|
|
112
|
+
def test_summary_is_required(self):
|
|
113
|
+
"""Summary field is required."""
|
|
114
|
+
from yamlgraph.models.schemas import GenericReport
|
|
115
|
+
|
|
116
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
117
|
+
GenericReport(title="Title")
|
|
118
|
+
|
|
119
|
+
errors = exc_info.value.errors()
|
|
120
|
+
assert any(e["loc"] == ("summary",) for e in errors)
|
|
121
|
+
|
|
122
|
+
def test_model_serializes_to_dict(self):
|
|
123
|
+
"""Report serializes to dictionary."""
|
|
124
|
+
from yamlgraph.models.schemas import GenericReport
|
|
125
|
+
|
|
126
|
+
report = GenericReport(
|
|
127
|
+
title="Test",
|
|
128
|
+
summary="Summary",
|
|
129
|
+
findings=["A", "B"],
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
data = report.model_dump()
|
|
133
|
+
|
|
134
|
+
assert data["title"] == "Test"
|
|
135
|
+
assert data["summary"] == "Summary"
|
|
136
|
+
assert data["findings"] == ["A", "B"]
|
|
137
|
+
|
|
138
|
+
def test_model_serializes_to_json(self):
|
|
139
|
+
"""Report serializes to JSON string."""
|
|
140
|
+
import json
|
|
141
|
+
|
|
142
|
+
from yamlgraph.models.schemas import GenericReport
|
|
143
|
+
|
|
144
|
+
report = GenericReport(title="Test", summary="Summary")
|
|
145
|
+
|
|
146
|
+
json_str = report.model_dump_json()
|
|
147
|
+
data = json.loads(json_str)
|
|
148
|
+
|
|
149
|
+
assert data["title"] == "Test"
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class TestGenericReportUseCases:
|
|
153
|
+
"""Verify GenericReport works for common analysis patterns."""
|
|
154
|
+
|
|
155
|
+
def test_git_analysis_report(self):
|
|
156
|
+
"""GenericReport works for git analysis output."""
|
|
157
|
+
from yamlgraph.models.schemas import GenericReport
|
|
158
|
+
|
|
159
|
+
report = GenericReport(
|
|
160
|
+
title="Git Repository Analysis",
|
|
161
|
+
summary="Analysis of recent activity in the repository.",
|
|
162
|
+
sections={
|
|
163
|
+
"commit_summary": "15 commits in last 7 days",
|
|
164
|
+
"authors": ["alice", "bob", "carol"],
|
|
165
|
+
},
|
|
166
|
+
findings=[
|
|
167
|
+
"High activity in src/core module",
|
|
168
|
+
"No tests for new features",
|
|
169
|
+
"Breaking changes in v2.0",
|
|
170
|
+
],
|
|
171
|
+
recommendations=[
|
|
172
|
+
"Add tests for recent changes",
|
|
173
|
+
"Review breaking changes before release",
|
|
174
|
+
],
|
|
175
|
+
metadata={
|
|
176
|
+
"repo": "langgraph-showcase",
|
|
177
|
+
"analyzed_at": "2024-01-01T00:00:00",
|
|
178
|
+
},
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
assert "Git Repository" in report.title
|
|
182
|
+
assert len(report.findings) == 3
|
|
183
|
+
assert report.metadata["repo"] == "langgraph-showcase"
|
|
184
|
+
|
|
185
|
+
def test_api_analysis_report(self):
|
|
186
|
+
"""GenericReport works for API analysis output."""
|
|
187
|
+
from yamlgraph.models.schemas import GenericReport
|
|
188
|
+
|
|
189
|
+
report = GenericReport(
|
|
190
|
+
title="API Performance Report",
|
|
191
|
+
summary="Performance analysis of API endpoints.",
|
|
192
|
+
sections={
|
|
193
|
+
"latency": {"p50": 45, "p95": 120, "p99": 250},
|
|
194
|
+
"errors": {"rate": 0.02, "top_errors": ["500", "429"]},
|
|
195
|
+
},
|
|
196
|
+
findings=["High latency on /search endpoint"],
|
|
197
|
+
recommendations=["Add caching for /search"],
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
assert report.sections["latency"]["p95"] == 120
|