yamlgraph 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/__init__.py +1 -0
- examples/codegen/__init__.py +5 -0
- examples/codegen/models/__init__.py +13 -0
- examples/codegen/models/schemas.py +76 -0
- examples/codegen/tests/__init__.py +1 -0
- examples/codegen/tests/test_ai_helpers.py +235 -0
- examples/codegen/tests/test_ast_analysis.py +174 -0
- examples/codegen/tests/test_code_analysis.py +134 -0
- examples/codegen/tests/test_code_context.py +301 -0
- examples/codegen/tests/test_code_nav.py +89 -0
- examples/codegen/tests/test_dependency_tools.py +119 -0
- examples/codegen/tests/test_example_tools.py +185 -0
- examples/codegen/tests/test_git_tools.py +112 -0
- examples/codegen/tests/test_impl_agent_schemas.py +193 -0
- examples/codegen/tests/test_impl_agent_v4_graph.py +94 -0
- examples/codegen/tests/test_jedi_analysis.py +226 -0
- examples/codegen/tests/test_meta_tools.py +250 -0
- examples/codegen/tests/test_plan_discovery_prompt.py +98 -0
- examples/codegen/tests/test_syntax_tools.py +85 -0
- examples/codegen/tests/test_synthesize_prompt.py +94 -0
- examples/codegen/tests/test_template_tools.py +244 -0
- examples/codegen/tools/__init__.py +80 -0
- examples/codegen/tools/ai_helpers.py +420 -0
- examples/codegen/tools/ast_analysis.py +92 -0
- examples/codegen/tools/code_context.py +180 -0
- examples/codegen/tools/code_nav.py +52 -0
- examples/codegen/tools/dependency_tools.py +120 -0
- examples/codegen/tools/example_tools.py +188 -0
- examples/codegen/tools/git_tools.py +151 -0
- examples/codegen/tools/impl_executor.py +614 -0
- examples/codegen/tools/jedi_analysis.py +311 -0
- examples/codegen/tools/meta_tools.py +202 -0
- examples/codegen/tools/syntax_tools.py +26 -0
- examples/codegen/tools/template_tools.py +356 -0
- examples/fastapi_interview.py +167 -0
- examples/npc/api/__init__.py +1 -0
- examples/npc/api/app.py +100 -0
- examples/npc/api/routes/__init__.py +5 -0
- examples/npc/api/routes/encounter.py +182 -0
- examples/npc/api/session.py +330 -0
- examples/npc/demo.py +387 -0
- examples/npc/nodes/__init__.py +5 -0
- examples/npc/nodes/image_node.py +92 -0
- examples/npc/run_encounter.py +230 -0
- examples/shared/__init__.py +0 -0
- examples/shared/replicate_tool.py +238 -0
- examples/storyboard/__init__.py +1 -0
- examples/storyboard/generate_videos.py +335 -0
- examples/storyboard/nodes/__init__.py +12 -0
- examples/storyboard/nodes/animated_character_node.py +248 -0
- examples/storyboard/nodes/animated_image_node.py +138 -0
- examples/storyboard/nodes/character_node.py +162 -0
- examples/storyboard/nodes/image_node.py +118 -0
- examples/storyboard/nodes/replicate_tool.py +49 -0
- examples/storyboard/retry_images.py +118 -0
- scripts/demo_async_executor.py +212 -0
- scripts/demo_interview_e2e.py +200 -0
- scripts/demo_streaming.py +140 -0
- scripts/run_interview_demo.py +94 -0
- scripts/test_interrupt_fix.py +26 -0
- tests/__init__.py +1 -0
- tests/conftest.py +178 -0
- tests/integration/__init__.py +1 -0
- tests/integration/test_animated_storyboard.py +63 -0
- tests/integration/test_cli_commands.py +242 -0
- tests/integration/test_colocated_prompts.py +139 -0
- tests/integration/test_map_demo.py +50 -0
- tests/integration/test_memory_demo.py +283 -0
- tests/integration/test_npc_api/__init__.py +1 -0
- tests/integration/test_npc_api/test_routes.py +357 -0
- tests/integration/test_npc_api/test_session.py +216 -0
- tests/integration/test_pipeline_flow.py +105 -0
- tests/integration/test_providers.py +163 -0
- tests/integration/test_resume.py +75 -0
- tests/integration/test_subgraph_integration.py +295 -0
- tests/integration/test_subgraph_interrupt.py +106 -0
- tests/unit/__init__.py +1 -0
- tests/unit/test_agent_nodes.py +355 -0
- tests/unit/test_async_executor.py +346 -0
- tests/unit/test_checkpointer.py +212 -0
- tests/unit/test_checkpointer_factory.py +212 -0
- tests/unit/test_cli.py +121 -0
- tests/unit/test_cli_package.py +81 -0
- tests/unit/test_compile_graph_map.py +132 -0
- tests/unit/test_conditions_routing.py +253 -0
- tests/unit/test_config.py +93 -0
- tests/unit/test_conversation_memory.py +276 -0
- tests/unit/test_database.py +145 -0
- tests/unit/test_deprecation.py +104 -0
- tests/unit/test_executor.py +172 -0
- tests/unit/test_executor_async.py +179 -0
- tests/unit/test_export.py +149 -0
- tests/unit/test_expressions.py +178 -0
- tests/unit/test_feature_brainstorm.py +194 -0
- tests/unit/test_format_prompt.py +145 -0
- tests/unit/test_generic_report.py +200 -0
- tests/unit/test_graph_commands.py +327 -0
- tests/unit/test_graph_linter.py +627 -0
- tests/unit/test_graph_loader.py +357 -0
- tests/unit/test_graph_schema.py +193 -0
- tests/unit/test_inline_schema.py +151 -0
- tests/unit/test_interrupt_node.py +182 -0
- tests/unit/test_issues.py +164 -0
- tests/unit/test_jinja2_prompts.py +85 -0
- tests/unit/test_json_extract.py +134 -0
- tests/unit/test_langsmith.py +600 -0
- tests/unit/test_langsmith_tools.py +204 -0
- tests/unit/test_llm_factory.py +109 -0
- tests/unit/test_llm_factory_async.py +118 -0
- tests/unit/test_loops.py +403 -0
- tests/unit/test_map_node.py +144 -0
- tests/unit/test_no_backward_compat.py +56 -0
- tests/unit/test_node_factory.py +348 -0
- tests/unit/test_passthrough_node.py +126 -0
- tests/unit/test_prompts.py +324 -0
- tests/unit/test_python_nodes.py +198 -0
- tests/unit/test_reliability.py +298 -0
- tests/unit/test_result_export.py +234 -0
- tests/unit/test_router.py +296 -0
- tests/unit/test_sanitize.py +99 -0
- tests/unit/test_schema_loader.py +295 -0
- tests/unit/test_shell_tools.py +229 -0
- tests/unit/test_state_builder.py +331 -0
- tests/unit/test_state_builder_map.py +104 -0
- tests/unit/test_state_config.py +197 -0
- tests/unit/test_streaming.py +307 -0
- tests/unit/test_subgraph.py +596 -0
- tests/unit/test_template.py +190 -0
- tests/unit/test_tool_call_integration.py +164 -0
- tests/unit/test_tool_call_node.py +178 -0
- tests/unit/test_tool_nodes.py +129 -0
- tests/unit/test_websearch.py +234 -0
- yamlgraph/__init__.py +35 -0
- yamlgraph/builder.py +110 -0
- yamlgraph/cli/__init__.py +159 -0
- yamlgraph/cli/__main__.py +6 -0
- yamlgraph/cli/commands.py +231 -0
- yamlgraph/cli/deprecation.py +92 -0
- yamlgraph/cli/graph_commands.py +541 -0
- yamlgraph/cli/validators.py +37 -0
- yamlgraph/config.py +67 -0
- yamlgraph/constants.py +70 -0
- yamlgraph/error_handlers.py +227 -0
- yamlgraph/executor.py +290 -0
- yamlgraph/executor_async.py +288 -0
- yamlgraph/graph_loader.py +451 -0
- yamlgraph/map_compiler.py +150 -0
- yamlgraph/models/__init__.py +36 -0
- yamlgraph/models/graph_schema.py +181 -0
- yamlgraph/models/schemas.py +124 -0
- yamlgraph/models/state_builder.py +236 -0
- yamlgraph/node_factory.py +768 -0
- yamlgraph/routing.py +87 -0
- yamlgraph/schema_loader.py +240 -0
- yamlgraph/storage/__init__.py +20 -0
- yamlgraph/storage/checkpointer.py +72 -0
- yamlgraph/storage/checkpointer_factory.py +123 -0
- yamlgraph/storage/database.py +320 -0
- yamlgraph/storage/export.py +269 -0
- yamlgraph/tools/__init__.py +1 -0
- yamlgraph/tools/agent.py +320 -0
- yamlgraph/tools/graph_linter.py +388 -0
- yamlgraph/tools/langsmith_tools.py +125 -0
- yamlgraph/tools/nodes.py +126 -0
- yamlgraph/tools/python_tool.py +179 -0
- yamlgraph/tools/shell.py +205 -0
- yamlgraph/tools/websearch.py +242 -0
- yamlgraph/utils/__init__.py +48 -0
- yamlgraph/utils/conditions.py +157 -0
- yamlgraph/utils/expressions.py +245 -0
- yamlgraph/utils/json_extract.py +104 -0
- yamlgraph/utils/langsmith.py +416 -0
- yamlgraph/utils/llm_factory.py +118 -0
- yamlgraph/utils/llm_factory_async.py +105 -0
- yamlgraph/utils/logging.py +104 -0
- yamlgraph/utils/prompts.py +171 -0
- yamlgraph/utils/sanitize.py +98 -0
- yamlgraph/utils/template.py +102 -0
- yamlgraph/utils/validators.py +181 -0
- yamlgraph-0.3.9.dist-info/METADATA +1105 -0
- yamlgraph-0.3.9.dist-info/RECORD +185 -0
- yamlgraph-0.3.9.dist-info/WHEEL +5 -0
- yamlgraph-0.3.9.dist-info/entry_points.txt +2 -0
- yamlgraph-0.3.9.dist-info/licenses/LICENSE +33 -0
- yamlgraph-0.3.9.dist-info/top_level.txt +4 -0
yamlgraph/routing.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Routing utilities for LangGraph edge conditions.
|
|
2
|
+
|
|
3
|
+
Provides factory functions for creating router functions that determine
|
|
4
|
+
which node to route to based on state values and expressions.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from collections.abc import Callable
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from langgraph.graph import END
|
|
12
|
+
|
|
13
|
+
from yamlgraph.utils.conditions import evaluate_condition
|
|
14
|
+
|
|
15
|
+
# Type alias for dynamic state
|
|
16
|
+
GraphState = dict[str, Any]
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def make_router_fn(targets: list[str]) -> Callable[[dict], str]:
|
|
22
|
+
"""Create a router function that reads _route from state.
|
|
23
|
+
|
|
24
|
+
Used for type: router nodes with conditional edges to multiple targets.
|
|
25
|
+
|
|
26
|
+
NOTE: Use `state: dict` not `state: GraphState` - type hints cause
|
|
27
|
+
LangGraph to filter state fields. See docs/debug-router-type-hints.md
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
targets: List of valid target node names
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
Router function that returns the target node name
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def router_fn(state: dict) -> str:
|
|
37
|
+
route = state.get("_route")
|
|
38
|
+
logger.debug(f"Router: _route={route}, targets={targets}")
|
|
39
|
+
if route and route in targets:
|
|
40
|
+
logger.debug(f"Router: matched route {route}")
|
|
41
|
+
return route
|
|
42
|
+
# Default to first target
|
|
43
|
+
logger.debug(f"Router: defaulting to {targets[0]}")
|
|
44
|
+
return targets[0]
|
|
45
|
+
|
|
46
|
+
return router_fn
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def make_expr_router_fn(
|
|
50
|
+
edges: list[tuple[str, str]],
|
|
51
|
+
source_node: str,
|
|
52
|
+
) -> Callable[[GraphState], str]:
|
|
53
|
+
"""Create router that evaluates expression conditions.
|
|
54
|
+
|
|
55
|
+
Used for reflexion-style loops with expression-based conditions
|
|
56
|
+
like "critique.score < 0.8".
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
edges: List of (condition, target) tuples
|
|
60
|
+
source_node: Name of the source node (for logging)
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Router function that evaluates conditions and returns target
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def expr_router_fn(state: GraphState) -> str:
|
|
67
|
+
# Check loop limit first
|
|
68
|
+
if state.get("_loop_limit_reached"):
|
|
69
|
+
return END
|
|
70
|
+
|
|
71
|
+
for condition, target in edges:
|
|
72
|
+
try:
|
|
73
|
+
if evaluate_condition(condition, state):
|
|
74
|
+
logger.debug(
|
|
75
|
+
f"Condition '{condition}' matched, routing to {target}"
|
|
76
|
+
)
|
|
77
|
+
return target
|
|
78
|
+
except ValueError as e:
|
|
79
|
+
logger.warning(f"Failed to evaluate condition '{condition}': {e}")
|
|
80
|
+
# No condition matched - this shouldn't happen with well-formed graphs
|
|
81
|
+
logger.warning(f"No condition matched for {source_node}, defaulting to END")
|
|
82
|
+
return END
|
|
83
|
+
|
|
84
|
+
return expr_router_fn
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
__all__ = ["make_router_fn", "make_expr_router_fn"]
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
"""Dynamic Pydantic model generation from YAML schema definitions.
|
|
2
|
+
|
|
3
|
+
This module enables defining output schemas in YAML prompt files,
|
|
4
|
+
making prompts fully self-contained with their expected output structure.
|
|
5
|
+
|
|
6
|
+
Example YAML schema:
|
|
7
|
+
schema:
|
|
8
|
+
name: MyOutputModel
|
|
9
|
+
fields:
|
|
10
|
+
title:
|
|
11
|
+
type: str
|
|
12
|
+
description: "The output title"
|
|
13
|
+
confidence:
|
|
14
|
+
type: float
|
|
15
|
+
constraints: {ge: 0.0, le: 1.0}
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import re
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
import yaml
|
|
23
|
+
from pydantic import Field, create_model
|
|
24
|
+
|
|
25
|
+
# =============================================================================
|
|
26
|
+
# Type Resolution
|
|
27
|
+
# =============================================================================
|
|
28
|
+
|
|
29
|
+
# Mapping from type strings to Python types
|
|
30
|
+
TYPE_MAP: dict[str, type] = {
|
|
31
|
+
"str": str,
|
|
32
|
+
"int": int,
|
|
33
|
+
"float": float,
|
|
34
|
+
"bool": bool,
|
|
35
|
+
"Any": Any,
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def resolve_type(type_str: str, field_name: str | None = None) -> type:
|
|
40
|
+
"""Resolve a type string to a Python type.
|
|
41
|
+
|
|
42
|
+
Supports:
|
|
43
|
+
- Basic types: str, int, float, bool, Any
|
|
44
|
+
- Generic types: list[str], list[int], dict[str, str]
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
type_str: Type string like "str", "list[str]", "dict[str, Any]"
|
|
48
|
+
field_name: Optional field name for better error messages
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Python type annotation
|
|
52
|
+
|
|
53
|
+
Raises:
|
|
54
|
+
ValueError: If type string is not recognized
|
|
55
|
+
"""
|
|
56
|
+
# Check basic types first
|
|
57
|
+
if type_str in TYPE_MAP:
|
|
58
|
+
return TYPE_MAP[type_str]
|
|
59
|
+
|
|
60
|
+
# Handle list[T] pattern
|
|
61
|
+
list_match = re.match(r"list\[(\w+)\]", type_str)
|
|
62
|
+
if list_match:
|
|
63
|
+
inner_type = resolve_type(list_match.group(1), field_name)
|
|
64
|
+
return list[inner_type]
|
|
65
|
+
|
|
66
|
+
# Handle dict[K, V] pattern
|
|
67
|
+
dict_match = re.match(r"dict\[(\w+),\s*(\w+)\]", type_str)
|
|
68
|
+
if dict_match:
|
|
69
|
+
key_type = resolve_type(dict_match.group(1), field_name)
|
|
70
|
+
value_type = resolve_type(dict_match.group(2), field_name)
|
|
71
|
+
return dict[key_type, value_type]
|
|
72
|
+
|
|
73
|
+
# Provide helpful error with supported types
|
|
74
|
+
supported = ", ".join(TYPE_MAP.keys())
|
|
75
|
+
context = f" for field '{field_name}'" if field_name else ""
|
|
76
|
+
raise ValueError(
|
|
77
|
+
f"Unknown type: '{type_str}'{context}. "
|
|
78
|
+
f"Supported types: {supported}, list[T], dict[K, V]"
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
# =============================================================================
|
|
83
|
+
# Model Building
|
|
84
|
+
# =============================================================================
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def build_pydantic_model(schema: dict) -> type:
|
|
88
|
+
"""Build a Pydantic model dynamically from a schema dict.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
schema: Schema definition with 'name' and 'fields' keys
|
|
92
|
+
Example:
|
|
93
|
+
{
|
|
94
|
+
"name": "MyOutputModel",
|
|
95
|
+
"fields": {
|
|
96
|
+
"title": {"type": "str", "description": "..."},
|
|
97
|
+
"score": {"type": "float", "constraints": {"ge": 0}},
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Dynamically created Pydantic model class
|
|
103
|
+
"""
|
|
104
|
+
model_name = schema["name"]
|
|
105
|
+
field_definitions = {}
|
|
106
|
+
|
|
107
|
+
for field_name, field_def in schema["fields"].items():
|
|
108
|
+
# Resolve the type - pass field_name for better error messages
|
|
109
|
+
field_type = resolve_type(field_def["type"], field_name)
|
|
110
|
+
|
|
111
|
+
# Handle optional fields
|
|
112
|
+
is_optional = field_def.get("optional", False)
|
|
113
|
+
if is_optional:
|
|
114
|
+
field_type = field_type | None
|
|
115
|
+
|
|
116
|
+
# Build Field kwargs
|
|
117
|
+
field_kwargs: dict[str, Any] = {}
|
|
118
|
+
|
|
119
|
+
if "description" in field_def:
|
|
120
|
+
field_kwargs["description"] = field_def["description"]
|
|
121
|
+
|
|
122
|
+
if "default" in field_def:
|
|
123
|
+
field_kwargs["default"] = field_def["default"]
|
|
124
|
+
elif is_optional:
|
|
125
|
+
field_kwargs["default"] = None
|
|
126
|
+
|
|
127
|
+
# Add constraints (ge, le, min_length, max_length, etc.)
|
|
128
|
+
if constraints := field_def.get("constraints"):
|
|
129
|
+
field_kwargs.update(constraints)
|
|
130
|
+
|
|
131
|
+
# Create field tuple: (type, Field(...))
|
|
132
|
+
if field_kwargs:
|
|
133
|
+
field_definitions[field_name] = (field_type, Field(**field_kwargs))
|
|
134
|
+
else:
|
|
135
|
+
field_definitions[field_name] = (field_type, ...)
|
|
136
|
+
|
|
137
|
+
return create_model(model_name, **field_definitions)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
# JSON Schema type mapping
|
|
141
|
+
JSON_SCHEMA_TYPE_MAP: dict[str, type] = {
|
|
142
|
+
"string": str,
|
|
143
|
+
"integer": int,
|
|
144
|
+
"number": float,
|
|
145
|
+
"boolean": bool,
|
|
146
|
+
"array": list,
|
|
147
|
+
"object": dict,
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def build_pydantic_model_from_json_schema(
|
|
152
|
+
schema: dict, model_name: str = "DynamicOutput"
|
|
153
|
+
) -> type:
|
|
154
|
+
"""Build a Pydantic model from a JSON Schema-style definition.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
schema: JSON Schema with 'type: object' and 'properties'
|
|
158
|
+
model_name: Name for the generated model
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Dynamically created Pydantic model class
|
|
162
|
+
"""
|
|
163
|
+
if schema.get("type") != "object":
|
|
164
|
+
raise ValueError("output_schema must have type: object")
|
|
165
|
+
|
|
166
|
+
properties = schema.get("properties", {})
|
|
167
|
+
required = set(schema.get("required", []))
|
|
168
|
+
|
|
169
|
+
field_definitions = {}
|
|
170
|
+
|
|
171
|
+
for field_name, field_def in properties.items():
|
|
172
|
+
json_type = field_def.get("type", "string")
|
|
173
|
+
description = field_def.get("description", "")
|
|
174
|
+
|
|
175
|
+
# Handle array types
|
|
176
|
+
if json_type == "array":
|
|
177
|
+
items = field_def.get("items", {})
|
|
178
|
+
item_type = JSON_SCHEMA_TYPE_MAP.get(items.get("type", "string"), str)
|
|
179
|
+
field_type = list[item_type]
|
|
180
|
+
# Handle enum types
|
|
181
|
+
elif "enum" in field_def:
|
|
182
|
+
field_type = str # Enums become str in Pydantic
|
|
183
|
+
else:
|
|
184
|
+
field_type = JSON_SCHEMA_TYPE_MAP.get(json_type, str)
|
|
185
|
+
|
|
186
|
+
# Check if required
|
|
187
|
+
is_optional = field_name not in required
|
|
188
|
+
if is_optional:
|
|
189
|
+
field_type = field_type | None
|
|
190
|
+
|
|
191
|
+
# Build Field
|
|
192
|
+
field_kwargs: dict[str, Any] = {}
|
|
193
|
+
if description:
|
|
194
|
+
field_kwargs["description"] = description
|
|
195
|
+
if is_optional:
|
|
196
|
+
field_kwargs["default"] = None
|
|
197
|
+
|
|
198
|
+
if field_kwargs:
|
|
199
|
+
field_definitions[field_name] = (field_type, Field(**field_kwargs))
|
|
200
|
+
else:
|
|
201
|
+
field_definitions[field_name] = (field_type, ...)
|
|
202
|
+
|
|
203
|
+
return create_model(model_name, **field_definitions)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
# =============================================================================
|
|
207
|
+
# YAML Loading
|
|
208
|
+
# =============================================================================
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def load_schema_from_yaml(yaml_path: str | Path) -> type | None:
|
|
212
|
+
"""Load a Pydantic model from a prompt YAML file's schema block.
|
|
213
|
+
|
|
214
|
+
Supports two formats:
|
|
215
|
+
1. Native format (schema: with name/fields)
|
|
216
|
+
2. JSON Schema format (output_schema: with type/properties)
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
yaml_path: Path to the YAML prompt file
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
Dynamically created Pydantic model, or None if no schema defined
|
|
223
|
+
"""
|
|
224
|
+
with open(yaml_path) as f:
|
|
225
|
+
config = yaml.safe_load(f)
|
|
226
|
+
|
|
227
|
+
# Check for native format first
|
|
228
|
+
if "schema" in config:
|
|
229
|
+
return build_pydantic_model(config["schema"])
|
|
230
|
+
|
|
231
|
+
# Check for JSON Schema format (output_schema)
|
|
232
|
+
if "output_schema" in config:
|
|
233
|
+
# Generate model name from file name
|
|
234
|
+
path = Path(yaml_path)
|
|
235
|
+
model_name = "".join(word.title() for word in path.stem.split("_")) + "Output"
|
|
236
|
+
return build_pydantic_model_from_json_schema(
|
|
237
|
+
config["output_schema"], model_name
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
return None
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""Storage utilities for persistence and export."""
|
|
2
|
+
|
|
3
|
+
from yamlgraph.storage.checkpointer_factory import expand_env_vars, get_checkpointer
|
|
4
|
+
from yamlgraph.storage.database import YamlGraphDB
|
|
5
|
+
from yamlgraph.storage.export import (
|
|
6
|
+
export_state,
|
|
7
|
+
export_summary,
|
|
8
|
+
list_exports,
|
|
9
|
+
load_export,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"YamlGraphDB",
|
|
14
|
+
"export_state",
|
|
15
|
+
"export_summary",
|
|
16
|
+
"expand_env_vars",
|
|
17
|
+
"get_checkpointer",
|
|
18
|
+
"list_exports",
|
|
19
|
+
"load_export",
|
|
20
|
+
]
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""LangGraph native checkpointer integration.
|
|
2
|
+
|
|
3
|
+
Provides SQLite-based checkpointing for graph state persistence,
|
|
4
|
+
enabling time travel, replay, and resume from any checkpoint.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import sqlite3
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from langgraph.checkpoint.sqlite import SqliteSaver
|
|
12
|
+
from langgraph.graph.state import CompiledStateGraph
|
|
13
|
+
|
|
14
|
+
from yamlgraph.config import DATABASE_PATH
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def get_checkpointer(db_path: str | Path | None = None) -> SqliteSaver:
|
|
18
|
+
"""Get a SQLite checkpointer for graph compilation.
|
|
19
|
+
|
|
20
|
+
The checkpointer enables:
|
|
21
|
+
- Automatic state persistence after each node
|
|
22
|
+
- Time travel via get_state_history()
|
|
23
|
+
- Resume from any checkpoint
|
|
24
|
+
- Fault tolerance with pending writes
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
db_path: Path to SQLite database file.
|
|
28
|
+
Defaults to outputs/yamlgraph.db
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
SqliteSaver instance for use with graph.compile()
|
|
32
|
+
|
|
33
|
+
Example:
|
|
34
|
+
>>> checkpointer = get_checkpointer()
|
|
35
|
+
>>> graph = workflow.compile(checkpointer=checkpointer)
|
|
36
|
+
>>> result = graph.invoke(input, {"configurable": {"thread_id": "abc"}})
|
|
37
|
+
"""
|
|
38
|
+
if db_path is None:
|
|
39
|
+
db_path = DATABASE_PATH
|
|
40
|
+
|
|
41
|
+
path = Path(db_path)
|
|
42
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
43
|
+
|
|
44
|
+
conn = sqlite3.connect(str(path), check_same_thread=False)
|
|
45
|
+
return SqliteSaver(conn)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_state_history(
|
|
49
|
+
graph: CompiledStateGraph,
|
|
50
|
+
thread_id: str,
|
|
51
|
+
) -> list[Any]:
|
|
52
|
+
"""Get checkpoint history for a thread.
|
|
53
|
+
|
|
54
|
+
Returns checkpoints in reverse chronological order (most recent first).
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
graph: Compiled graph with checkpointer
|
|
58
|
+
thread_id: Thread identifier to query
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
List of StateSnapshot objects, or empty list if thread doesn't exist
|
|
62
|
+
|
|
63
|
+
Example:
|
|
64
|
+
>>> history = get_state_history(graph, "my-thread")
|
|
65
|
+
>>> for snapshot in history:
|
|
66
|
+
... print(f"Step {snapshot.metadata.get('step')}: {snapshot.values}")
|
|
67
|
+
"""
|
|
68
|
+
config = {"configurable": {"thread_id": thread_id}}
|
|
69
|
+
try:
|
|
70
|
+
return list(graph.get_state_history(config))
|
|
71
|
+
except Exception:
|
|
72
|
+
return []
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
"""Checkpointer factory for YAML-configured persistence.
|
|
2
|
+
|
|
3
|
+
Creates checkpointer instances from YAML configuration with support for:
|
|
4
|
+
- Multiple backends (memory, sqlite, redis)
|
|
5
|
+
- Environment variable expansion for secrets
|
|
6
|
+
- Sync and async modes for Redis
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
import re
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def expand_env_vars(value: Any) -> Any:
|
|
17
|
+
"""Expand ${VAR} patterns in string.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
value: Value to expand. Non-strings pass through unchanged.
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
String with ${VAR} patterns replaced by environment values.
|
|
24
|
+
Missing vars keep original ${VAR} pattern.
|
|
25
|
+
"""
|
|
26
|
+
if not isinstance(value, str):
|
|
27
|
+
return value
|
|
28
|
+
|
|
29
|
+
def replacer(match: re.Match) -> str:
|
|
30
|
+
var_name = match.group(1)
|
|
31
|
+
return os.environ.get(var_name, match.group(0))
|
|
32
|
+
|
|
33
|
+
return re.sub(r"\$\{([^}]+)\}", replacer, value)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def get_checkpointer(
|
|
37
|
+
config: dict | None,
|
|
38
|
+
*,
|
|
39
|
+
async_mode: bool = False,
|
|
40
|
+
) -> BaseCheckpointSaver | None:
|
|
41
|
+
"""Create checkpointer from config.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
config: Checkpointer configuration dict with keys:
|
|
45
|
+
- type: "memory" | "sqlite" | "redis" (default: "memory")
|
|
46
|
+
- url: Redis connection URL (for redis type)
|
|
47
|
+
- path: SQLite file path (for sqlite type)
|
|
48
|
+
- ttl: TTL in minutes (for redis type, default: 60)
|
|
49
|
+
async_mode: If True, return async-compatible saver for FastAPI/async usage
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Configured checkpointer or None if config is None
|
|
53
|
+
|
|
54
|
+
Raises:
|
|
55
|
+
ValueError: If unknown checkpointer type
|
|
56
|
+
ImportError: If redis type used without yamlgraph[redis] installed
|
|
57
|
+
"""
|
|
58
|
+
if not config:
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
cp_type = config.get("type", "memory")
|
|
62
|
+
|
|
63
|
+
if cp_type == "redis":
|
|
64
|
+
url = expand_env_vars(config.get("url", ""))
|
|
65
|
+
ttl = config.get("ttl", 60)
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
if async_mode:
|
|
69
|
+
from langgraph.checkpoint.redis.aio import (
|
|
70
|
+
AsyncRedisSaver,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
saver = AsyncRedisSaver.from_conn_string(
|
|
74
|
+
url,
|
|
75
|
+
ttl={"default_ttl": ttl},
|
|
76
|
+
)
|
|
77
|
+
# For async, caller must await saver.asetup()
|
|
78
|
+
else:
|
|
79
|
+
from langgraph.checkpoint.redis import RedisSaver
|
|
80
|
+
|
|
81
|
+
saver = RedisSaver.from_conn_string(
|
|
82
|
+
url,
|
|
83
|
+
ttl={"default_ttl": ttl},
|
|
84
|
+
)
|
|
85
|
+
saver.setup()
|
|
86
|
+
|
|
87
|
+
return saver
|
|
88
|
+
except ImportError as e:
|
|
89
|
+
raise ImportError(
|
|
90
|
+
"Install redis support: pip install yamlgraph[redis]"
|
|
91
|
+
) from e
|
|
92
|
+
|
|
93
|
+
elif cp_type == "sqlite":
|
|
94
|
+
path = expand_env_vars(config.get("path", ":memory:"))
|
|
95
|
+
|
|
96
|
+
if async_mode:
|
|
97
|
+
# MemorySaver supports both sync and async operations
|
|
98
|
+
# For production async SQLite, use AsyncSqliteSaver with aiosqlite
|
|
99
|
+
# but that requires async context management which complicates the API
|
|
100
|
+
import logging
|
|
101
|
+
|
|
102
|
+
if path != ":memory:":
|
|
103
|
+
logging.getLogger(__name__).info(
|
|
104
|
+
f"Using MemorySaver for async mode (sqlite path '{path}' ignored). "
|
|
105
|
+
"For persistent async storage, use Redis checkpointer."
|
|
106
|
+
)
|
|
107
|
+
from langgraph.checkpoint.memory import MemorySaver
|
|
108
|
+
|
|
109
|
+
return MemorySaver()
|
|
110
|
+
else:
|
|
111
|
+
import sqlite3
|
|
112
|
+
|
|
113
|
+
from langgraph.checkpoint.sqlite import SqliteSaver
|
|
114
|
+
|
|
115
|
+
conn = sqlite3.connect(path, check_same_thread=False)
|
|
116
|
+
return SqliteSaver(conn)
|
|
117
|
+
|
|
118
|
+
elif cp_type == "memory":
|
|
119
|
+
from langgraph.checkpoint.memory import MemorySaver
|
|
120
|
+
|
|
121
|
+
return MemorySaver()
|
|
122
|
+
|
|
123
|
+
raise ValueError(f"Unknown checkpointer type: {cp_type}")
|