yamlgraph 0.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (185) hide show
  1. examples/__init__.py +1 -0
  2. examples/codegen/__init__.py +5 -0
  3. examples/codegen/models/__init__.py +13 -0
  4. examples/codegen/models/schemas.py +76 -0
  5. examples/codegen/tests/__init__.py +1 -0
  6. examples/codegen/tests/test_ai_helpers.py +235 -0
  7. examples/codegen/tests/test_ast_analysis.py +174 -0
  8. examples/codegen/tests/test_code_analysis.py +134 -0
  9. examples/codegen/tests/test_code_context.py +301 -0
  10. examples/codegen/tests/test_code_nav.py +89 -0
  11. examples/codegen/tests/test_dependency_tools.py +119 -0
  12. examples/codegen/tests/test_example_tools.py +185 -0
  13. examples/codegen/tests/test_git_tools.py +112 -0
  14. examples/codegen/tests/test_impl_agent_schemas.py +193 -0
  15. examples/codegen/tests/test_impl_agent_v4_graph.py +94 -0
  16. examples/codegen/tests/test_jedi_analysis.py +226 -0
  17. examples/codegen/tests/test_meta_tools.py +250 -0
  18. examples/codegen/tests/test_plan_discovery_prompt.py +98 -0
  19. examples/codegen/tests/test_syntax_tools.py +85 -0
  20. examples/codegen/tests/test_synthesize_prompt.py +94 -0
  21. examples/codegen/tests/test_template_tools.py +244 -0
  22. examples/codegen/tools/__init__.py +80 -0
  23. examples/codegen/tools/ai_helpers.py +420 -0
  24. examples/codegen/tools/ast_analysis.py +92 -0
  25. examples/codegen/tools/code_context.py +180 -0
  26. examples/codegen/tools/code_nav.py +52 -0
  27. examples/codegen/tools/dependency_tools.py +120 -0
  28. examples/codegen/tools/example_tools.py +188 -0
  29. examples/codegen/tools/git_tools.py +151 -0
  30. examples/codegen/tools/impl_executor.py +614 -0
  31. examples/codegen/tools/jedi_analysis.py +311 -0
  32. examples/codegen/tools/meta_tools.py +202 -0
  33. examples/codegen/tools/syntax_tools.py +26 -0
  34. examples/codegen/tools/template_tools.py +356 -0
  35. examples/fastapi_interview.py +167 -0
  36. examples/npc/api/__init__.py +1 -0
  37. examples/npc/api/app.py +100 -0
  38. examples/npc/api/routes/__init__.py +5 -0
  39. examples/npc/api/routes/encounter.py +182 -0
  40. examples/npc/api/session.py +330 -0
  41. examples/npc/demo.py +387 -0
  42. examples/npc/nodes/__init__.py +5 -0
  43. examples/npc/nodes/image_node.py +92 -0
  44. examples/npc/run_encounter.py +230 -0
  45. examples/shared/__init__.py +0 -0
  46. examples/shared/replicate_tool.py +238 -0
  47. examples/storyboard/__init__.py +1 -0
  48. examples/storyboard/generate_videos.py +335 -0
  49. examples/storyboard/nodes/__init__.py +12 -0
  50. examples/storyboard/nodes/animated_character_node.py +248 -0
  51. examples/storyboard/nodes/animated_image_node.py +138 -0
  52. examples/storyboard/nodes/character_node.py +162 -0
  53. examples/storyboard/nodes/image_node.py +118 -0
  54. examples/storyboard/nodes/replicate_tool.py +49 -0
  55. examples/storyboard/retry_images.py +118 -0
  56. scripts/demo_async_executor.py +212 -0
  57. scripts/demo_interview_e2e.py +200 -0
  58. scripts/demo_streaming.py +140 -0
  59. scripts/run_interview_demo.py +94 -0
  60. scripts/test_interrupt_fix.py +26 -0
  61. tests/__init__.py +1 -0
  62. tests/conftest.py +178 -0
  63. tests/integration/__init__.py +1 -0
  64. tests/integration/test_animated_storyboard.py +63 -0
  65. tests/integration/test_cli_commands.py +242 -0
  66. tests/integration/test_colocated_prompts.py +139 -0
  67. tests/integration/test_map_demo.py +50 -0
  68. tests/integration/test_memory_demo.py +283 -0
  69. tests/integration/test_npc_api/__init__.py +1 -0
  70. tests/integration/test_npc_api/test_routes.py +357 -0
  71. tests/integration/test_npc_api/test_session.py +216 -0
  72. tests/integration/test_pipeline_flow.py +105 -0
  73. tests/integration/test_providers.py +163 -0
  74. tests/integration/test_resume.py +75 -0
  75. tests/integration/test_subgraph_integration.py +295 -0
  76. tests/integration/test_subgraph_interrupt.py +106 -0
  77. tests/unit/__init__.py +1 -0
  78. tests/unit/test_agent_nodes.py +355 -0
  79. tests/unit/test_async_executor.py +346 -0
  80. tests/unit/test_checkpointer.py +212 -0
  81. tests/unit/test_checkpointer_factory.py +212 -0
  82. tests/unit/test_cli.py +121 -0
  83. tests/unit/test_cli_package.py +81 -0
  84. tests/unit/test_compile_graph_map.py +132 -0
  85. tests/unit/test_conditions_routing.py +253 -0
  86. tests/unit/test_config.py +93 -0
  87. tests/unit/test_conversation_memory.py +276 -0
  88. tests/unit/test_database.py +145 -0
  89. tests/unit/test_deprecation.py +104 -0
  90. tests/unit/test_executor.py +172 -0
  91. tests/unit/test_executor_async.py +179 -0
  92. tests/unit/test_export.py +149 -0
  93. tests/unit/test_expressions.py +178 -0
  94. tests/unit/test_feature_brainstorm.py +194 -0
  95. tests/unit/test_format_prompt.py +145 -0
  96. tests/unit/test_generic_report.py +200 -0
  97. tests/unit/test_graph_commands.py +327 -0
  98. tests/unit/test_graph_linter.py +627 -0
  99. tests/unit/test_graph_loader.py +357 -0
  100. tests/unit/test_graph_schema.py +193 -0
  101. tests/unit/test_inline_schema.py +151 -0
  102. tests/unit/test_interrupt_node.py +182 -0
  103. tests/unit/test_issues.py +164 -0
  104. tests/unit/test_jinja2_prompts.py +85 -0
  105. tests/unit/test_json_extract.py +134 -0
  106. tests/unit/test_langsmith.py +600 -0
  107. tests/unit/test_langsmith_tools.py +204 -0
  108. tests/unit/test_llm_factory.py +109 -0
  109. tests/unit/test_llm_factory_async.py +118 -0
  110. tests/unit/test_loops.py +403 -0
  111. tests/unit/test_map_node.py +144 -0
  112. tests/unit/test_no_backward_compat.py +56 -0
  113. tests/unit/test_node_factory.py +348 -0
  114. tests/unit/test_passthrough_node.py +126 -0
  115. tests/unit/test_prompts.py +324 -0
  116. tests/unit/test_python_nodes.py +198 -0
  117. tests/unit/test_reliability.py +298 -0
  118. tests/unit/test_result_export.py +234 -0
  119. tests/unit/test_router.py +296 -0
  120. tests/unit/test_sanitize.py +99 -0
  121. tests/unit/test_schema_loader.py +295 -0
  122. tests/unit/test_shell_tools.py +229 -0
  123. tests/unit/test_state_builder.py +331 -0
  124. tests/unit/test_state_builder_map.py +104 -0
  125. tests/unit/test_state_config.py +197 -0
  126. tests/unit/test_streaming.py +307 -0
  127. tests/unit/test_subgraph.py +596 -0
  128. tests/unit/test_template.py +190 -0
  129. tests/unit/test_tool_call_integration.py +164 -0
  130. tests/unit/test_tool_call_node.py +178 -0
  131. tests/unit/test_tool_nodes.py +129 -0
  132. tests/unit/test_websearch.py +234 -0
  133. yamlgraph/__init__.py +35 -0
  134. yamlgraph/builder.py +110 -0
  135. yamlgraph/cli/__init__.py +159 -0
  136. yamlgraph/cli/__main__.py +6 -0
  137. yamlgraph/cli/commands.py +231 -0
  138. yamlgraph/cli/deprecation.py +92 -0
  139. yamlgraph/cli/graph_commands.py +541 -0
  140. yamlgraph/cli/validators.py +37 -0
  141. yamlgraph/config.py +67 -0
  142. yamlgraph/constants.py +70 -0
  143. yamlgraph/error_handlers.py +227 -0
  144. yamlgraph/executor.py +290 -0
  145. yamlgraph/executor_async.py +288 -0
  146. yamlgraph/graph_loader.py +451 -0
  147. yamlgraph/map_compiler.py +150 -0
  148. yamlgraph/models/__init__.py +36 -0
  149. yamlgraph/models/graph_schema.py +181 -0
  150. yamlgraph/models/schemas.py +124 -0
  151. yamlgraph/models/state_builder.py +236 -0
  152. yamlgraph/node_factory.py +768 -0
  153. yamlgraph/routing.py +87 -0
  154. yamlgraph/schema_loader.py +240 -0
  155. yamlgraph/storage/__init__.py +20 -0
  156. yamlgraph/storage/checkpointer.py +72 -0
  157. yamlgraph/storage/checkpointer_factory.py +123 -0
  158. yamlgraph/storage/database.py +320 -0
  159. yamlgraph/storage/export.py +269 -0
  160. yamlgraph/tools/__init__.py +1 -0
  161. yamlgraph/tools/agent.py +320 -0
  162. yamlgraph/tools/graph_linter.py +388 -0
  163. yamlgraph/tools/langsmith_tools.py +125 -0
  164. yamlgraph/tools/nodes.py +126 -0
  165. yamlgraph/tools/python_tool.py +179 -0
  166. yamlgraph/tools/shell.py +205 -0
  167. yamlgraph/tools/websearch.py +242 -0
  168. yamlgraph/utils/__init__.py +48 -0
  169. yamlgraph/utils/conditions.py +157 -0
  170. yamlgraph/utils/expressions.py +245 -0
  171. yamlgraph/utils/json_extract.py +104 -0
  172. yamlgraph/utils/langsmith.py +416 -0
  173. yamlgraph/utils/llm_factory.py +118 -0
  174. yamlgraph/utils/llm_factory_async.py +105 -0
  175. yamlgraph/utils/logging.py +104 -0
  176. yamlgraph/utils/prompts.py +171 -0
  177. yamlgraph/utils/sanitize.py +98 -0
  178. yamlgraph/utils/template.py +102 -0
  179. yamlgraph/utils/validators.py +181 -0
  180. yamlgraph-0.3.9.dist-info/METADATA +1105 -0
  181. yamlgraph-0.3.9.dist-info/RECORD +185 -0
  182. yamlgraph-0.3.9.dist-info/WHEEL +5 -0
  183. yamlgraph-0.3.9.dist-info/entry_points.txt +2 -0
  184. yamlgraph-0.3.9.dist-info/licenses/LICENSE +33 -0
  185. yamlgraph-0.3.9.dist-info/top_level.txt +4 -0
@@ -0,0 +1,288 @@
1
+ """Async Prompt Executor - Async interface for LLM calls.
2
+
3
+ This module provides async versions of execute_prompt for use in
4
+ async contexts like web servers or concurrent pipelines.
5
+
6
+ Note: This is a foundation module. The underlying LLM calls still
7
+ use sync HTTP clients wrapped with run_in_executor.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import asyncio
13
+ import logging
14
+ from collections.abc import AsyncIterator
15
+ from typing import TYPE_CHECKING, TypeVar
16
+
17
+ from langchain_core.messages import HumanMessage, SystemMessage
18
+ from pydantic import BaseModel
19
+
20
+ from yamlgraph.config import DEFAULT_TEMPERATURE
21
+ from yamlgraph.executor import format_prompt, load_prompt
22
+ from yamlgraph.utils.llm_factory import create_llm
23
+ from yamlgraph.utils.llm_factory_async import invoke_async
24
+
25
+ if TYPE_CHECKING:
26
+ from langgraph.graph.state import CompiledStateGraph
27
+ from yamlgraph.utils.template import validate_variables
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+ T = TypeVar("T", bound=BaseModel)
32
+
33
+
34
+ async def execute_prompt_async(
35
+ prompt_name: str,
36
+ variables: dict | None = None,
37
+ output_model: type[T] | None = None,
38
+ temperature: float = DEFAULT_TEMPERATURE,
39
+ provider: str | None = None,
40
+ ) -> T | str:
41
+ """Execute a YAML prompt asynchronously.
42
+
43
+ Async version of execute_prompt for use in async contexts.
44
+
45
+ Args:
46
+ prompt_name: Name of the prompt file (without .yaml)
47
+ variables: Variables to substitute in the template
48
+ output_model: Optional Pydantic model for structured output
49
+ temperature: LLM temperature setting
50
+ provider: LLM provider ("anthropic", "mistral", "openai")
51
+
52
+ Returns:
53
+ Parsed Pydantic model if output_model provided, else raw string
54
+
55
+ Example:
56
+ >>> result = await execute_prompt_async(
57
+ ... "greet",
58
+ ... variables={"name": "World"},
59
+ ... output_model=GenericReport,
60
+ ... )
61
+ """
62
+ variables = variables or {}
63
+
64
+ # Load and validate prompt (sync - file I/O is fast)
65
+ prompt_config = load_prompt(prompt_name)
66
+
67
+ full_template = prompt_config.get("system", "") + prompt_config.get("user", "")
68
+ validate_variables(full_template, variables, prompt_name)
69
+
70
+ # Extract provider from YAML metadata if not provided
71
+ if provider is None and "provider" in prompt_config:
72
+ provider = prompt_config["provider"]
73
+ logger.debug(f"Using provider from YAML metadata: {provider}")
74
+
75
+ system_text = format_prompt(prompt_config.get("system", ""), variables)
76
+ user_text = format_prompt(prompt_config["user"], variables)
77
+
78
+ messages = []
79
+ if system_text:
80
+ messages.append(SystemMessage(content=system_text))
81
+ messages.append(HumanMessage(content=user_text))
82
+
83
+ # Create LLM (cached via factory)
84
+ llm = create_llm(temperature=temperature, provider=provider)
85
+
86
+ # Invoke asynchronously
87
+ return await invoke_async(llm, messages, output_model)
88
+
89
+
90
+ async def execute_prompts_concurrent(
91
+ prompts: list[dict],
92
+ ) -> list[BaseModel | str]:
93
+ """Execute multiple prompts concurrently.
94
+
95
+ Useful for parallel LLM calls in pipelines.
96
+
97
+ Args:
98
+ prompts: List of dicts with keys:
99
+ - prompt_name: str (required)
100
+ - variables: dict (optional)
101
+ - output_model: Type[BaseModel] (optional)
102
+ - temperature: float (optional)
103
+ - provider: str (optional)
104
+
105
+ Returns:
106
+ List of results in same order as input prompts
107
+
108
+ Example:
109
+ >>> results = await execute_prompts_concurrent([
110
+ ... {"prompt_name": "summarize", "variables": {"text": "..."}},
111
+ ... {"prompt_name": "analyze", "variables": {"text": "..."}},
112
+ ... ])
113
+ """
114
+ tasks = []
115
+ for prompt_config in prompts:
116
+ task = execute_prompt_async(
117
+ prompt_name=prompt_config["prompt_name"],
118
+ variables=prompt_config.get("variables"),
119
+ output_model=prompt_config.get("output_model"),
120
+ temperature=prompt_config.get("temperature", DEFAULT_TEMPERATURE),
121
+ provider=prompt_config.get("provider"),
122
+ )
123
+ tasks.append(task)
124
+
125
+ return await asyncio.gather(*tasks)
126
+
127
+
128
+ # ==============================================================================
129
+ # Streaming Support (Phase 3 - Feature 004)
130
+ # ==============================================================================
131
+
132
+
133
+ async def execute_prompt_streaming(
134
+ prompt_name: str,
135
+ variables: dict | None = None,
136
+ temperature: float = DEFAULT_TEMPERATURE,
137
+ provider: str | None = None,
138
+ ) -> AsyncIterator[str]:
139
+ """Execute a YAML prompt with streaming token output.
140
+
141
+ Yields tokens as they are generated by the LLM. Does not support
142
+ structured output (output_model) - use execute_prompt_async for that.
143
+
144
+ Args:
145
+ prompt_name: Name of the prompt file (without .yaml)
146
+ variables: Variables to substitute in the template
147
+ temperature: LLM temperature setting
148
+ provider: LLM provider ("anthropic", "mistral", "openai")
149
+
150
+ Yields:
151
+ Token strings as they are generated
152
+
153
+ Example:
154
+ >>> async for token in execute_prompt_streaming("greet", {"name": "World"}):
155
+ ... print(token, end="", flush=True)
156
+ Hello, World!
157
+ """
158
+ variables = variables or {}
159
+
160
+ # Load and validate prompt
161
+ prompt_config = load_prompt(prompt_name)
162
+
163
+ full_template = prompt_config.get("system", "") + prompt_config.get("user", "")
164
+ validate_variables(full_template, variables, prompt_name)
165
+
166
+ # Extract provider from YAML metadata if not provided
167
+ if provider is None and "provider" in prompt_config:
168
+ provider = prompt_config["provider"]
169
+ logger.debug(f"Using provider from YAML metadata: {provider}")
170
+
171
+ system_text = format_prompt(prompt_config.get("system", ""), variables)
172
+ user_text = format_prompt(prompt_config["user"], variables)
173
+
174
+ messages = []
175
+ if system_text:
176
+ messages.append(SystemMessage(content=system_text))
177
+ messages.append(HumanMessage(content=user_text))
178
+
179
+ # Create LLM (cached via factory)
180
+ llm = create_llm(temperature=temperature, provider=provider)
181
+
182
+ # Stream tokens
183
+ async for chunk in llm.astream(messages):
184
+ content = chunk.content
185
+ if content: # Skip empty chunks
186
+ yield content
187
+
188
+
189
+ # ==============================================================================
190
+ # Async Graph Execution (Phase 2 - Feature 003)
191
+ # ==============================================================================
192
+
193
+
194
+ async def run_graph_async(
195
+ app,
196
+ initial_state: dict,
197
+ config: dict | None = None,
198
+ ) -> dict:
199
+ """Execute a compiled graph asynchronously.
200
+
201
+ Thin wrapper around LangGraph's ainvoke for consistent API.
202
+ Supports interrupt handling and Command resume.
203
+
204
+ Args:
205
+ app: Compiled LangGraph app (from graph.compile())
206
+ initial_state: Initial state dict or Command(resume=...) for resuming
207
+ config: LangGraph config with thread_id, e.g.
208
+ {"configurable": {"thread_id": "my-thread"}}
209
+
210
+ Returns:
211
+ Final state dict. If interrupted, contains "__interrupt__" key.
212
+
213
+ Example:
214
+ >>> app = load_and_compile_async("graphs/interview.yaml")
215
+ >>> result = await run_graph_async(
216
+ ... app,
217
+ ... {"query": "hello"},
218
+ ... {"configurable": {"thread_id": "t1"}},
219
+ ... )
220
+ >>> if "__interrupt__" in result:
221
+ ... # Handle interrupt - get user input
222
+ ... result = await run_graph_async(
223
+ ... app,
224
+ ... Command(resume="user answer"),
225
+ ... {"configurable": {"thread_id": "t1"}},
226
+ ... )
227
+ """
228
+ config = config or {}
229
+ return await app.ainvoke(initial_state, config)
230
+
231
+
232
+ def compile_graph_async(
233
+ graph,
234
+ config,
235
+ ) -> CompiledStateGraph:
236
+ """Compile a StateGraph with async-compatible checkpointer.
237
+
238
+ Uses async_mode=True when fetching checkpointer to get
239
+ AsyncRedisSaver instead of RedisSaver.
240
+
241
+ Args:
242
+ graph: StateGraph instance
243
+ config: GraphConfig with optional checkpointer field
244
+
245
+ Returns:
246
+ Compiled graph ready for ainvoke()
247
+ """
248
+ from yamlgraph.storage.checkpointer_factory import get_checkpointer
249
+
250
+ checkpointer_config = getattr(config, "checkpointer", None)
251
+ checkpointer = get_checkpointer(checkpointer_config, async_mode=True)
252
+
253
+ return graph.compile(checkpointer=checkpointer)
254
+
255
+
256
+ async def load_and_compile_async(path: str) -> CompiledStateGraph:
257
+ """Load YAML and compile to async-ready graph.
258
+
259
+ Convenience function combining load_graph_config, compile_graph,
260
+ and compile_graph_async.
261
+
262
+ Args:
263
+ path: Path to YAML graph definition
264
+
265
+ Returns:
266
+ Compiled graph ready for ainvoke()
267
+
268
+ Example:
269
+ >>> app = await load_and_compile_async("graphs/interview.yaml")
270
+ >>> result = await run_graph_async(app, {"input": "hi"}, config)
271
+ """
272
+ from yamlgraph.graph_loader import compile_graph, load_graph_config
273
+
274
+ config = load_graph_config(path)
275
+ logger.info(f"Loaded graph config: {config.name} v{config.version}")
276
+
277
+ state_graph = compile_graph(config)
278
+ return compile_graph_async(state_graph, config)
279
+
280
+
281
+ __all__ = [
282
+ "execute_prompt_async",
283
+ "execute_prompt_streaming",
284
+ "execute_prompts_concurrent",
285
+ "run_graph_async",
286
+ "compile_graph_async",
287
+ "load_and_compile_async",
288
+ ]