yamlgraph 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of yamlgraph might be problematic. Click here for more details.
- examples/__init__.py +1 -0
- examples/storyboard/__init__.py +1 -0
- examples/storyboard/generate_videos.py +335 -0
- examples/storyboard/nodes/__init__.py +10 -0
- examples/storyboard/nodes/animated_character_node.py +248 -0
- examples/storyboard/nodes/animated_image_node.py +138 -0
- examples/storyboard/nodes/character_node.py +162 -0
- examples/storyboard/nodes/image_node.py +118 -0
- examples/storyboard/nodes/replicate_tool.py +238 -0
- examples/storyboard/retry_images.py +118 -0
- tests/__init__.py +1 -0
- tests/conftest.py +178 -0
- tests/integration/__init__.py +1 -0
- tests/integration/test_animated_storyboard.py +63 -0
- tests/integration/test_cli_commands.py +242 -0
- tests/integration/test_map_demo.py +50 -0
- tests/integration/test_memory_demo.py +281 -0
- tests/integration/test_pipeline_flow.py +105 -0
- tests/integration/test_providers.py +163 -0
- tests/integration/test_resume.py +75 -0
- tests/unit/__init__.py +1 -0
- tests/unit/test_agent_nodes.py +200 -0
- tests/unit/test_checkpointer.py +212 -0
- tests/unit/test_cli.py +121 -0
- tests/unit/test_cli_package.py +81 -0
- tests/unit/test_compile_graph_map.py +132 -0
- tests/unit/test_conditions_routing.py +253 -0
- tests/unit/test_config.py +93 -0
- tests/unit/test_conversation_memory.py +270 -0
- tests/unit/test_database.py +145 -0
- tests/unit/test_deprecation.py +104 -0
- tests/unit/test_executor.py +60 -0
- tests/unit/test_executor_async.py +179 -0
- tests/unit/test_export.py +150 -0
- tests/unit/test_expressions.py +178 -0
- tests/unit/test_format_prompt.py +145 -0
- tests/unit/test_generic_report.py +200 -0
- tests/unit/test_graph_commands.py +327 -0
- tests/unit/test_graph_loader.py +299 -0
- tests/unit/test_graph_schema.py +193 -0
- tests/unit/test_inline_schema.py +151 -0
- tests/unit/test_issues.py +164 -0
- tests/unit/test_jinja2_prompts.py +85 -0
- tests/unit/test_langsmith.py +319 -0
- tests/unit/test_llm_factory.py +109 -0
- tests/unit/test_llm_factory_async.py +118 -0
- tests/unit/test_loops.py +403 -0
- tests/unit/test_map_node.py +144 -0
- tests/unit/test_no_backward_compat.py +56 -0
- tests/unit/test_node_factory.py +225 -0
- tests/unit/test_prompts.py +166 -0
- tests/unit/test_python_nodes.py +198 -0
- tests/unit/test_reliability.py +298 -0
- tests/unit/test_result_export.py +234 -0
- tests/unit/test_router.py +296 -0
- tests/unit/test_sanitize.py +99 -0
- tests/unit/test_schema_loader.py +295 -0
- tests/unit/test_shell_tools.py +229 -0
- tests/unit/test_state_builder.py +331 -0
- tests/unit/test_state_builder_map.py +104 -0
- tests/unit/test_state_config.py +197 -0
- tests/unit/test_template.py +190 -0
- tests/unit/test_tool_nodes.py +129 -0
- yamlgraph/__init__.py +35 -0
- yamlgraph/builder.py +110 -0
- yamlgraph/cli/__init__.py +139 -0
- yamlgraph/cli/__main__.py +6 -0
- yamlgraph/cli/commands.py +232 -0
- yamlgraph/cli/deprecation.py +92 -0
- yamlgraph/cli/graph_commands.py +382 -0
- yamlgraph/cli/validators.py +37 -0
- yamlgraph/config.py +67 -0
- yamlgraph/constants.py +66 -0
- yamlgraph/error_handlers.py +226 -0
- yamlgraph/executor.py +275 -0
- yamlgraph/executor_async.py +122 -0
- yamlgraph/graph_loader.py +337 -0
- yamlgraph/map_compiler.py +138 -0
- yamlgraph/models/__init__.py +36 -0
- yamlgraph/models/graph_schema.py +141 -0
- yamlgraph/models/schemas.py +124 -0
- yamlgraph/models/state_builder.py +236 -0
- yamlgraph/node_factory.py +240 -0
- yamlgraph/routing.py +87 -0
- yamlgraph/schema_loader.py +160 -0
- yamlgraph/storage/__init__.py +17 -0
- yamlgraph/storage/checkpointer.py +72 -0
- yamlgraph/storage/database.py +320 -0
- yamlgraph/storage/export.py +269 -0
- yamlgraph/tools/__init__.py +1 -0
- yamlgraph/tools/agent.py +235 -0
- yamlgraph/tools/nodes.py +124 -0
- yamlgraph/tools/python_tool.py +178 -0
- yamlgraph/tools/shell.py +205 -0
- yamlgraph/utils/__init__.py +47 -0
- yamlgraph/utils/conditions.py +157 -0
- yamlgraph/utils/expressions.py +111 -0
- yamlgraph/utils/langsmith.py +308 -0
- yamlgraph/utils/llm_factory.py +118 -0
- yamlgraph/utils/llm_factory_async.py +105 -0
- yamlgraph/utils/logging.py +127 -0
- yamlgraph/utils/prompts.py +116 -0
- yamlgraph/utils/sanitize.py +98 -0
- yamlgraph/utils/template.py +102 -0
- yamlgraph/utils/validators.py +181 -0
- yamlgraph-0.1.1.dist-info/METADATA +854 -0
- yamlgraph-0.1.1.dist-info/RECORD +111 -0
- yamlgraph-0.1.1.dist-info/WHEEL +5 -0
- yamlgraph-0.1.1.dist-info/entry_points.txt +2 -0
- yamlgraph-0.1.1.dist-info/licenses/LICENSE +21 -0
- yamlgraph-0.1.1.dist-info/top_level.txt +3 -0
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"""Integration tests for compile_graph with map nodes."""
|
|
2
|
+
|
|
3
|
+
from unittest.mock import MagicMock, patch
|
|
4
|
+
|
|
5
|
+
from yamlgraph.graph_loader import GraphConfig, compile_graph
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def make_graph_config(nodes: dict, edges: list) -> GraphConfig:
|
|
9
|
+
"""Helper to create GraphConfig from nodes and edges."""
|
|
10
|
+
config_dict = {
|
|
11
|
+
"name": "test-map",
|
|
12
|
+
"version": "0.1",
|
|
13
|
+
"nodes": nodes,
|
|
14
|
+
"edges": edges,
|
|
15
|
+
}
|
|
16
|
+
return GraphConfig(config_dict)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def make_map_node_config(collect: str = "results") -> dict:
|
|
20
|
+
"""Helper to create a valid map node config."""
|
|
21
|
+
return {
|
|
22
|
+
"type": "map",
|
|
23
|
+
"over": "{state.items}",
|
|
24
|
+
"as": "item",
|
|
25
|
+
"node": {"prompt": "process", "state_key": "result"},
|
|
26
|
+
"collect": collect,
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class TestCompileGraphMap:
|
|
31
|
+
"""Tests for compile_graph handling type: map nodes."""
|
|
32
|
+
|
|
33
|
+
def test_map_node_compiled_to_graph(self) -> None:
|
|
34
|
+
"""Map node is correctly added to graph."""
|
|
35
|
+
config = make_graph_config(
|
|
36
|
+
nodes={
|
|
37
|
+
"start_node": {"prompt": "generate", "state_key": "items"},
|
|
38
|
+
"process_items": make_map_node_config(),
|
|
39
|
+
},
|
|
40
|
+
edges=[
|
|
41
|
+
{"from": "START", "to": "start_node"},
|
|
42
|
+
{"from": "start_node", "to": "process_items"},
|
|
43
|
+
{"from": "process_items", "to": "END"},
|
|
44
|
+
],
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
with patch("yamlgraph.graph_loader.compile_map_node") as mock_compile_map:
|
|
48
|
+
# Setup mock return value
|
|
49
|
+
mock_map_edge_fn = MagicMock()
|
|
50
|
+
mock_compile_map.return_value = (mock_map_edge_fn, "_map_process_items_sub")
|
|
51
|
+
|
|
52
|
+
compile_graph(config)
|
|
53
|
+
|
|
54
|
+
# verify compile_map_node was called with map node config
|
|
55
|
+
mock_compile_map.assert_called_once()
|
|
56
|
+
call_args = mock_compile_map.call_args
|
|
57
|
+
assert call_args[0][0] == "process_items" # name
|
|
58
|
+
assert call_args[0][1]["type"] == "map" # config
|
|
59
|
+
|
|
60
|
+
def test_map_node_sub_node_added(self) -> None:
|
|
61
|
+
"""Map node's wrapped sub_node is added to graph."""
|
|
62
|
+
config = make_graph_config(
|
|
63
|
+
nodes={
|
|
64
|
+
"start_node": {"prompt": "generate", "state_key": "items"},
|
|
65
|
+
"map_node": make_map_node_config(),
|
|
66
|
+
},
|
|
67
|
+
edges=[
|
|
68
|
+
{"from": "START", "to": "start_node"},
|
|
69
|
+
{"from": "start_node", "to": "map_node"},
|
|
70
|
+
{"from": "map_node", "to": "END"},
|
|
71
|
+
],
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
with patch("yamlgraph.graph_loader.compile_map_node") as mock_compile_map:
|
|
75
|
+
mock_map_edge_fn = MagicMock()
|
|
76
|
+
mock_compile_map.return_value = (mock_map_edge_fn, "_map_map_node_sub")
|
|
77
|
+
|
|
78
|
+
compile_graph(config)
|
|
79
|
+
|
|
80
|
+
# Check sub node was added by compile_map_node
|
|
81
|
+
# (the mocked compile_map_node adds it via the builder)
|
|
82
|
+
# In the real implementation, compile_map_node adds the node
|
|
83
|
+
|
|
84
|
+
def test_map_node_conditional_edge_wired(self) -> None:
|
|
85
|
+
"""Map node predecessor gets conditional edge with Send function."""
|
|
86
|
+
config = make_graph_config(
|
|
87
|
+
nodes={
|
|
88
|
+
"start_node": {"prompt": "generate", "state_key": "items"},
|
|
89
|
+
"map_node": make_map_node_config(),
|
|
90
|
+
"final_node": {"prompt": "summarize", "state_key": "summary"},
|
|
91
|
+
},
|
|
92
|
+
edges=[
|
|
93
|
+
{"from": "START", "to": "start_node"},
|
|
94
|
+
{"from": "start_node", "to": "map_node"},
|
|
95
|
+
{"from": "map_node", "to": "final_node"},
|
|
96
|
+
{"from": "final_node", "to": "END"},
|
|
97
|
+
],
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
with patch("yamlgraph.graph_loader.compile_map_node") as mock_compile_map:
|
|
101
|
+
mock_map_edge_fn = MagicMock()
|
|
102
|
+
mock_compile_map.return_value = (mock_map_edge_fn, "_map_map_node_sub")
|
|
103
|
+
|
|
104
|
+
# For fan-in, we need to verify sub_node -> next_node edge is created
|
|
105
|
+
compile_graph(config)
|
|
106
|
+
|
|
107
|
+
# The graph should have conditional edge from start_node using map_edge_fn
|
|
108
|
+
|
|
109
|
+
def test_map_node_fanin_edge_wired(self) -> None:
|
|
110
|
+
"""Map sub_node has edge to next node for fan-in."""
|
|
111
|
+
config = make_graph_config(
|
|
112
|
+
nodes={
|
|
113
|
+
"start_node": {"prompt": "generate", "state_key": "items"},
|
|
114
|
+
"map_node": make_map_node_config(),
|
|
115
|
+
"final_node": {"prompt": "summarize", "state_key": "summary"},
|
|
116
|
+
},
|
|
117
|
+
edges=[
|
|
118
|
+
{"from": "START", "to": "start_node"},
|
|
119
|
+
{"from": "start_node", "to": "map_node"},
|
|
120
|
+
{"from": "map_node", "to": "final_node"},
|
|
121
|
+
{"from": "final_node", "to": "END"},
|
|
122
|
+
],
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
with patch("yamlgraph.graph_loader.compile_map_node") as mock_compile_map:
|
|
126
|
+
mock_map_edge_fn = MagicMock()
|
|
127
|
+
mock_compile_map.return_value = (mock_map_edge_fn, "_map_map_node_sub")
|
|
128
|
+
|
|
129
|
+
# For fan-in, we need to verify sub_node -> next_node edge is created
|
|
130
|
+
compile_graph(config)
|
|
131
|
+
|
|
132
|
+
# We'll verify by examining the graph's edges
|
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
"""Unit tests for conditions and routing modules.
|
|
2
|
+
|
|
3
|
+
Tests the expression evaluation and routing functions used for
|
|
4
|
+
graph edge conditions.
|
|
5
|
+
|
|
6
|
+
Note: resolve_value tests are in test_expressions.py (TestResolveStatePath)
|
|
7
|
+
since resolve_value delegates to resolve_state_path.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import pytest
|
|
11
|
+
from pydantic import BaseModel
|
|
12
|
+
|
|
13
|
+
from yamlgraph.routing import make_expr_router_fn, make_router_fn
|
|
14
|
+
from yamlgraph.utils.conditions import (
|
|
15
|
+
evaluate_comparison,
|
|
16
|
+
evaluate_condition,
|
|
17
|
+
parse_literal,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class TestParseLiteral:
|
|
22
|
+
"""Tests for parse_literal function."""
|
|
23
|
+
|
|
24
|
+
def test_integer(self):
|
|
25
|
+
"""Should parse integer."""
|
|
26
|
+
assert parse_literal("42") == 42
|
|
27
|
+
assert parse_literal("-5") == -5
|
|
28
|
+
|
|
29
|
+
def test_float(self):
|
|
30
|
+
"""Should parse float."""
|
|
31
|
+
assert parse_literal("0.8") == 0.8
|
|
32
|
+
assert parse_literal("-3.14") == -3.14
|
|
33
|
+
|
|
34
|
+
def test_boolean_true(self):
|
|
35
|
+
"""Should parse boolean true (case insensitive)."""
|
|
36
|
+
assert parse_literal("true") is True
|
|
37
|
+
assert parse_literal("True") is True
|
|
38
|
+
assert parse_literal("TRUE") is True
|
|
39
|
+
|
|
40
|
+
def test_boolean_false(self):
|
|
41
|
+
"""Should parse boolean false."""
|
|
42
|
+
assert parse_literal("false") is False
|
|
43
|
+
assert parse_literal("False") is False
|
|
44
|
+
|
|
45
|
+
def test_null_none(self):
|
|
46
|
+
"""Should parse null/none."""
|
|
47
|
+
assert parse_literal("null") is None
|
|
48
|
+
assert parse_literal("None") is None
|
|
49
|
+
|
|
50
|
+
def test_quoted_string(self):
|
|
51
|
+
"""Should parse quoted strings."""
|
|
52
|
+
assert parse_literal('"hello"') == "hello"
|
|
53
|
+
assert parse_literal("'world'") == "world"
|
|
54
|
+
|
|
55
|
+
def test_unquoted_string(self):
|
|
56
|
+
"""Should return unquoted string as-is."""
|
|
57
|
+
assert parse_literal("hello") == "hello"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class TestEvaluateComparison:
|
|
61
|
+
"""Tests for evaluate_comparison function."""
|
|
62
|
+
|
|
63
|
+
def test_less_than(self):
|
|
64
|
+
"""Should evaluate < operator."""
|
|
65
|
+
state = {"score": 0.5}
|
|
66
|
+
assert evaluate_comparison("score", "<", "0.8", state) is True
|
|
67
|
+
assert evaluate_comparison("score", "<", "0.3", state) is False
|
|
68
|
+
|
|
69
|
+
def test_greater_than(self):
|
|
70
|
+
"""Should evaluate > operator."""
|
|
71
|
+
state = {"score": 0.9}
|
|
72
|
+
assert evaluate_comparison("score", ">", "0.5", state) is True
|
|
73
|
+
assert evaluate_comparison("score", ">", "1.0", state) is False
|
|
74
|
+
|
|
75
|
+
def test_less_than_or_equal(self):
|
|
76
|
+
"""Should evaluate <= operator."""
|
|
77
|
+
state = {"value": 5}
|
|
78
|
+
assert evaluate_comparison("value", "<=", "5", state) is True
|
|
79
|
+
assert evaluate_comparison("value", "<=", "10", state) is True
|
|
80
|
+
assert evaluate_comparison("value", "<=", "3", state) is False
|
|
81
|
+
|
|
82
|
+
def test_greater_than_or_equal(self):
|
|
83
|
+
"""Should evaluate >= operator."""
|
|
84
|
+
state = {"value": 5}
|
|
85
|
+
assert evaluate_comparison("value", ">=", "5", state) is True
|
|
86
|
+
assert evaluate_comparison("value", ">=", "3", state) is True
|
|
87
|
+
assert evaluate_comparison("value", ">=", "10", state) is False
|
|
88
|
+
|
|
89
|
+
def test_equal(self):
|
|
90
|
+
"""Should evaluate == operator."""
|
|
91
|
+
state = {"status": "done", "count": 3}
|
|
92
|
+
assert evaluate_comparison("status", "==", '"done"', state) is True
|
|
93
|
+
assert evaluate_comparison("count", "==", "3", state) is True
|
|
94
|
+
assert evaluate_comparison("status", "==", '"pending"', state) is False
|
|
95
|
+
|
|
96
|
+
def test_not_equal(self):
|
|
97
|
+
"""Should evaluate != operator."""
|
|
98
|
+
state = {"status": "done"}
|
|
99
|
+
assert evaluate_comparison("status", "!=", '"pending"', state) is True
|
|
100
|
+
assert evaluate_comparison("status", "!=", '"done"', state) is False
|
|
101
|
+
|
|
102
|
+
def test_missing_value_returns_false(self):
|
|
103
|
+
"""Missing value should return False for comparison (except ==, !=)."""
|
|
104
|
+
state = {"a": 1}
|
|
105
|
+
assert evaluate_comparison("missing", "<", "5", state) is False
|
|
106
|
+
assert evaluate_comparison("missing", ">", "5", state) is False
|
|
107
|
+
|
|
108
|
+
def test_missing_value_equals_none(self):
|
|
109
|
+
"""Missing value equals None."""
|
|
110
|
+
state = {"a": 1}
|
|
111
|
+
assert evaluate_comparison("missing", "==", "None", state) is True
|
|
112
|
+
|
|
113
|
+
def test_type_mismatch_returns_false(self):
|
|
114
|
+
"""Type mismatch in comparison should return False."""
|
|
115
|
+
state = {"value": "not_a_number"}
|
|
116
|
+
assert evaluate_comparison("value", "<", "5", state) is False
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class TestEvaluateCondition:
|
|
120
|
+
"""Tests for evaluate_condition function."""
|
|
121
|
+
|
|
122
|
+
def test_simple_comparison(self):
|
|
123
|
+
"""Should evaluate simple comparison."""
|
|
124
|
+
assert evaluate_condition("score < 0.8", {"score": 0.5}) is True
|
|
125
|
+
assert evaluate_condition("score >= 0.8", {"score": 0.9}) is True
|
|
126
|
+
|
|
127
|
+
def test_nested_path(self):
|
|
128
|
+
"""Should evaluate nested path comparison."""
|
|
129
|
+
state = {"critique": {"score": 0.7}}
|
|
130
|
+
assert evaluate_condition("critique.score < 0.8", state) is True
|
|
131
|
+
assert evaluate_condition("critique.score >= 0.8", state) is False
|
|
132
|
+
|
|
133
|
+
def test_compound_and(self):
|
|
134
|
+
"""Should evaluate AND expression."""
|
|
135
|
+
state = {"a": 5, "b": 10}
|
|
136
|
+
assert evaluate_condition("a > 1 and b < 20", state) is True
|
|
137
|
+
assert evaluate_condition("a > 1 and b > 20", state) is False
|
|
138
|
+
assert evaluate_condition("a > 10 and b < 20", state) is False
|
|
139
|
+
|
|
140
|
+
def test_compound_or(self):
|
|
141
|
+
"""Should evaluate OR expression."""
|
|
142
|
+
state = {"a": 5, "b": 10}
|
|
143
|
+
assert evaluate_condition("a > 10 or b < 20", state) is True
|
|
144
|
+
assert evaluate_condition("a > 1 or b > 100", state) is True
|
|
145
|
+
assert evaluate_condition("a > 10 or b > 100", state) is False
|
|
146
|
+
|
|
147
|
+
def test_mixed_and_or(self):
|
|
148
|
+
"""Should handle mixed AND/OR (AND has higher precedence)."""
|
|
149
|
+
state = {"a": 5, "b": 10, "c": 15}
|
|
150
|
+
# a > 10 OR (b < 20 AND c > 10) -> False OR True -> True
|
|
151
|
+
assert evaluate_condition("a > 10 or b < 20 and c > 10", state) is True
|
|
152
|
+
|
|
153
|
+
def test_whitespace_handling(self):
|
|
154
|
+
"""Should handle various whitespace."""
|
|
155
|
+
state = {"score": 0.5}
|
|
156
|
+
assert evaluate_condition(" score < 0.8 ", state) is True
|
|
157
|
+
assert evaluate_condition("score<0.8", state) is True
|
|
158
|
+
|
|
159
|
+
def test_invalid_expression_raises(self):
|
|
160
|
+
"""Should raise ValueError for invalid expression."""
|
|
161
|
+
with pytest.raises(ValueError, match="Invalid condition"):
|
|
162
|
+
evaluate_condition("not a valid expression !!!", {})
|
|
163
|
+
|
|
164
|
+
def test_pydantic_model_in_state(self):
|
|
165
|
+
"""Should work with Pydantic models in state."""
|
|
166
|
+
|
|
167
|
+
class Critique(BaseModel):
|
|
168
|
+
score: float
|
|
169
|
+
|
|
170
|
+
state = {"critique": Critique(score=0.75)}
|
|
171
|
+
assert evaluate_condition("critique.score < 0.8", state) is True
|
|
172
|
+
assert evaluate_condition("critique.score >= 0.8", state) is False
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
class TestMakeRouterFn:
|
|
176
|
+
"""Tests for make_router_fn factory."""
|
|
177
|
+
|
|
178
|
+
def test_routes_to_matching_target(self):
|
|
179
|
+
"""Should route to target matching _route."""
|
|
180
|
+
router = make_router_fn(["positive", "negative", "neutral"])
|
|
181
|
+
assert router({"_route": "positive"}) == "positive"
|
|
182
|
+
assert router({"_route": "negative"}) == "negative"
|
|
183
|
+
|
|
184
|
+
def test_defaults_to_first_target(self):
|
|
185
|
+
"""Should default to first target when no match."""
|
|
186
|
+
router = make_router_fn(["a", "b", "c"])
|
|
187
|
+
assert router({"_route": "unknown"}) == "a"
|
|
188
|
+
assert router({}) == "a"
|
|
189
|
+
|
|
190
|
+
def test_ignores_invalid_route(self):
|
|
191
|
+
"""Should ignore route not in targets."""
|
|
192
|
+
router = make_router_fn(["x", "y"])
|
|
193
|
+
assert router({"_route": "z"}) == "x"
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
class TestMakeExprRouterFn:
|
|
197
|
+
"""Tests for make_expr_router_fn factory."""
|
|
198
|
+
|
|
199
|
+
def test_routes_on_first_matching_condition(self):
|
|
200
|
+
"""Should route to first matching condition."""
|
|
201
|
+
edges = [
|
|
202
|
+
("score < 0.5", "refine"),
|
|
203
|
+
("score >= 0.5", "done"),
|
|
204
|
+
]
|
|
205
|
+
router = make_expr_router_fn(edges, "test_node")
|
|
206
|
+
|
|
207
|
+
assert router({"score": 0.3}) == "refine"
|
|
208
|
+
assert router({"score": 0.8}) == "done"
|
|
209
|
+
|
|
210
|
+
def test_loop_limit_takes_precedence(self):
|
|
211
|
+
"""Should return END when loop limit reached."""
|
|
212
|
+
from langgraph.graph import END
|
|
213
|
+
|
|
214
|
+
edges = [("score < 0.8", "continue")]
|
|
215
|
+
router = make_expr_router_fn(edges, "test_node")
|
|
216
|
+
|
|
217
|
+
assert router({"_loop_limit_reached": True, "score": 0.5}) == END
|
|
218
|
+
|
|
219
|
+
def test_defaults_to_end_when_no_match(self):
|
|
220
|
+
"""Should return END when no condition matches."""
|
|
221
|
+
from langgraph.graph import END
|
|
222
|
+
|
|
223
|
+
edges = [
|
|
224
|
+
("score < 0.5", "a"),
|
|
225
|
+
("score > 0.9", "b"),
|
|
226
|
+
]
|
|
227
|
+
router = make_expr_router_fn(edges, "test_node")
|
|
228
|
+
|
|
229
|
+
# score = 0.7 doesn't match either
|
|
230
|
+
assert router({"score": 0.7}) == END
|
|
231
|
+
|
|
232
|
+
def test_handles_condition_error_gracefully(self):
|
|
233
|
+
"""Should log warning and continue on condition error."""
|
|
234
|
+
|
|
235
|
+
edges = [
|
|
236
|
+
("invalid!!! expression", "a"), # This will fail
|
|
237
|
+
("score >= 0.8", "done"),
|
|
238
|
+
]
|
|
239
|
+
router = make_expr_router_fn(edges, "test_node")
|
|
240
|
+
|
|
241
|
+
# Should skip invalid and match second condition
|
|
242
|
+
assert router({"score": 0.9}) == "done"
|
|
243
|
+
|
|
244
|
+
def test_condition_order_matters(self):
|
|
245
|
+
"""First matching condition wins."""
|
|
246
|
+
edges = [
|
|
247
|
+
("score >= 0.5", "half"),
|
|
248
|
+
("score >= 0.8", "high"), # Never reached if first matches
|
|
249
|
+
]
|
|
250
|
+
router = make_expr_router_fn(edges, "test_node")
|
|
251
|
+
|
|
252
|
+
# Both conditions true, but first wins
|
|
253
|
+
assert router({"score": 0.9}) == "half"
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
"""Tests for yamlgraph.config module."""
|
|
2
|
+
|
|
3
|
+
from yamlgraph.config import (
|
|
4
|
+
DATABASE_PATH,
|
|
5
|
+
DEFAULT_MAX_TOKENS,
|
|
6
|
+
DEFAULT_MODELS,
|
|
7
|
+
DEFAULT_TEMPERATURE,
|
|
8
|
+
MAX_TOPIC_LENGTH,
|
|
9
|
+
MAX_WORD_COUNT,
|
|
10
|
+
MIN_WORD_COUNT,
|
|
11
|
+
OUTPUTS_DIR,
|
|
12
|
+
PACKAGE_ROOT,
|
|
13
|
+
PROMPTS_DIR,
|
|
14
|
+
VALID_STYLES,
|
|
15
|
+
WORKING_DIR,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class TestPaths:
|
|
20
|
+
"""Tests for path configuration."""
|
|
21
|
+
|
|
22
|
+
def test_package_root_exists(self):
|
|
23
|
+
"""Package root directory should exist."""
|
|
24
|
+
assert PACKAGE_ROOT.exists()
|
|
25
|
+
assert PACKAGE_ROOT.is_dir()
|
|
26
|
+
|
|
27
|
+
def test_working_dir_exists(self):
|
|
28
|
+
"""Working directory should exist."""
|
|
29
|
+
assert WORKING_DIR.exists()
|
|
30
|
+
assert WORKING_DIR.is_dir()
|
|
31
|
+
|
|
32
|
+
def test_prompts_dir_exists(self):
|
|
33
|
+
"""Prompts directory should exist."""
|
|
34
|
+
assert PROMPTS_DIR.exists()
|
|
35
|
+
assert PROMPTS_DIR.is_dir()
|
|
36
|
+
|
|
37
|
+
def test_prompts_dir_has_yaml_files(self):
|
|
38
|
+
"""Prompts directory should contain YAML files."""
|
|
39
|
+
yaml_files = list(PROMPTS_DIR.glob("*.yaml"))
|
|
40
|
+
assert len(yaml_files) > 0
|
|
41
|
+
|
|
42
|
+
def test_outputs_dir_path(self):
|
|
43
|
+
"""Outputs directory path should be under working dir."""
|
|
44
|
+
assert OUTPUTS_DIR.parent == WORKING_DIR
|
|
45
|
+
|
|
46
|
+
def test_database_path(self):
|
|
47
|
+
"""Database path should be in outputs directory."""
|
|
48
|
+
assert DATABASE_PATH.parent == OUTPUTS_DIR
|
|
49
|
+
assert DATABASE_PATH.suffix == ".db"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class TestLLMConfig:
|
|
53
|
+
"""Tests for LLM configuration."""
|
|
54
|
+
|
|
55
|
+
def test_default_models_has_all_providers(self):
|
|
56
|
+
"""Default models dict should have all supported providers."""
|
|
57
|
+
assert "anthropic" in DEFAULT_MODELS
|
|
58
|
+
assert "mistral" in DEFAULT_MODELS
|
|
59
|
+
assert "openai" in DEFAULT_MODELS
|
|
60
|
+
|
|
61
|
+
def test_default_models_are_strings(self):
|
|
62
|
+
"""All default models should be non-empty strings."""
|
|
63
|
+
for provider, model in DEFAULT_MODELS.items():
|
|
64
|
+
assert isinstance(model, str), f"{provider} model should be string"
|
|
65
|
+
assert len(model) > 0, f"{provider} model should not be empty"
|
|
66
|
+
|
|
67
|
+
def test_default_temperature_range(self):
|
|
68
|
+
"""Default temperature should be in valid range."""
|
|
69
|
+
assert 0.0 <= DEFAULT_TEMPERATURE <= 1.0
|
|
70
|
+
|
|
71
|
+
def test_default_max_tokens_positive(self):
|
|
72
|
+
"""Max tokens should be positive."""
|
|
73
|
+
assert DEFAULT_MAX_TOKENS > 0
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class TestCLIConstraints:
|
|
77
|
+
"""Tests for CLI validation constraints."""
|
|
78
|
+
|
|
79
|
+
def test_topic_length_constraint(self):
|
|
80
|
+
"""Max topic length should be reasonable."""
|
|
81
|
+
assert MAX_TOPIC_LENGTH > 0
|
|
82
|
+
assert MAX_TOPIC_LENGTH <= 10000
|
|
83
|
+
|
|
84
|
+
def test_word_count_constraints(self):
|
|
85
|
+
"""Word count constraints should be valid."""
|
|
86
|
+
assert MIN_WORD_COUNT > 0
|
|
87
|
+
assert MAX_WORD_COUNT > MIN_WORD_COUNT
|
|
88
|
+
|
|
89
|
+
def test_valid_styles(self):
|
|
90
|
+
"""Valid styles should include expected options."""
|
|
91
|
+
assert "informative" in VALID_STYLES
|
|
92
|
+
assert "casual" in VALID_STYLES
|
|
93
|
+
assert "technical" in VALID_STYLES
|