yamlgraph 0.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (185) hide show
  1. examples/__init__.py +1 -0
  2. examples/codegen/__init__.py +5 -0
  3. examples/codegen/models/__init__.py +13 -0
  4. examples/codegen/models/schemas.py +76 -0
  5. examples/codegen/tests/__init__.py +1 -0
  6. examples/codegen/tests/test_ai_helpers.py +235 -0
  7. examples/codegen/tests/test_ast_analysis.py +174 -0
  8. examples/codegen/tests/test_code_analysis.py +134 -0
  9. examples/codegen/tests/test_code_context.py +301 -0
  10. examples/codegen/tests/test_code_nav.py +89 -0
  11. examples/codegen/tests/test_dependency_tools.py +119 -0
  12. examples/codegen/tests/test_example_tools.py +185 -0
  13. examples/codegen/tests/test_git_tools.py +112 -0
  14. examples/codegen/tests/test_impl_agent_schemas.py +193 -0
  15. examples/codegen/tests/test_impl_agent_v4_graph.py +94 -0
  16. examples/codegen/tests/test_jedi_analysis.py +226 -0
  17. examples/codegen/tests/test_meta_tools.py +250 -0
  18. examples/codegen/tests/test_plan_discovery_prompt.py +98 -0
  19. examples/codegen/tests/test_syntax_tools.py +85 -0
  20. examples/codegen/tests/test_synthesize_prompt.py +94 -0
  21. examples/codegen/tests/test_template_tools.py +244 -0
  22. examples/codegen/tools/__init__.py +80 -0
  23. examples/codegen/tools/ai_helpers.py +420 -0
  24. examples/codegen/tools/ast_analysis.py +92 -0
  25. examples/codegen/tools/code_context.py +180 -0
  26. examples/codegen/tools/code_nav.py +52 -0
  27. examples/codegen/tools/dependency_tools.py +120 -0
  28. examples/codegen/tools/example_tools.py +188 -0
  29. examples/codegen/tools/git_tools.py +151 -0
  30. examples/codegen/tools/impl_executor.py +614 -0
  31. examples/codegen/tools/jedi_analysis.py +311 -0
  32. examples/codegen/tools/meta_tools.py +202 -0
  33. examples/codegen/tools/syntax_tools.py +26 -0
  34. examples/codegen/tools/template_tools.py +356 -0
  35. examples/fastapi_interview.py +167 -0
  36. examples/npc/api/__init__.py +1 -0
  37. examples/npc/api/app.py +100 -0
  38. examples/npc/api/routes/__init__.py +5 -0
  39. examples/npc/api/routes/encounter.py +182 -0
  40. examples/npc/api/session.py +330 -0
  41. examples/npc/demo.py +387 -0
  42. examples/npc/nodes/__init__.py +5 -0
  43. examples/npc/nodes/image_node.py +92 -0
  44. examples/npc/run_encounter.py +230 -0
  45. examples/shared/__init__.py +0 -0
  46. examples/shared/replicate_tool.py +238 -0
  47. examples/storyboard/__init__.py +1 -0
  48. examples/storyboard/generate_videos.py +335 -0
  49. examples/storyboard/nodes/__init__.py +12 -0
  50. examples/storyboard/nodes/animated_character_node.py +248 -0
  51. examples/storyboard/nodes/animated_image_node.py +138 -0
  52. examples/storyboard/nodes/character_node.py +162 -0
  53. examples/storyboard/nodes/image_node.py +118 -0
  54. examples/storyboard/nodes/replicate_tool.py +49 -0
  55. examples/storyboard/retry_images.py +118 -0
  56. scripts/demo_async_executor.py +212 -0
  57. scripts/demo_interview_e2e.py +200 -0
  58. scripts/demo_streaming.py +140 -0
  59. scripts/run_interview_demo.py +94 -0
  60. scripts/test_interrupt_fix.py +26 -0
  61. tests/__init__.py +1 -0
  62. tests/conftest.py +178 -0
  63. tests/integration/__init__.py +1 -0
  64. tests/integration/test_animated_storyboard.py +63 -0
  65. tests/integration/test_cli_commands.py +242 -0
  66. tests/integration/test_colocated_prompts.py +139 -0
  67. tests/integration/test_map_demo.py +50 -0
  68. tests/integration/test_memory_demo.py +283 -0
  69. tests/integration/test_npc_api/__init__.py +1 -0
  70. tests/integration/test_npc_api/test_routes.py +357 -0
  71. tests/integration/test_npc_api/test_session.py +216 -0
  72. tests/integration/test_pipeline_flow.py +105 -0
  73. tests/integration/test_providers.py +163 -0
  74. tests/integration/test_resume.py +75 -0
  75. tests/integration/test_subgraph_integration.py +295 -0
  76. tests/integration/test_subgraph_interrupt.py +106 -0
  77. tests/unit/__init__.py +1 -0
  78. tests/unit/test_agent_nodes.py +355 -0
  79. tests/unit/test_async_executor.py +346 -0
  80. tests/unit/test_checkpointer.py +212 -0
  81. tests/unit/test_checkpointer_factory.py +212 -0
  82. tests/unit/test_cli.py +121 -0
  83. tests/unit/test_cli_package.py +81 -0
  84. tests/unit/test_compile_graph_map.py +132 -0
  85. tests/unit/test_conditions_routing.py +253 -0
  86. tests/unit/test_config.py +93 -0
  87. tests/unit/test_conversation_memory.py +276 -0
  88. tests/unit/test_database.py +145 -0
  89. tests/unit/test_deprecation.py +104 -0
  90. tests/unit/test_executor.py +172 -0
  91. tests/unit/test_executor_async.py +179 -0
  92. tests/unit/test_export.py +149 -0
  93. tests/unit/test_expressions.py +178 -0
  94. tests/unit/test_feature_brainstorm.py +194 -0
  95. tests/unit/test_format_prompt.py +145 -0
  96. tests/unit/test_generic_report.py +200 -0
  97. tests/unit/test_graph_commands.py +327 -0
  98. tests/unit/test_graph_linter.py +627 -0
  99. tests/unit/test_graph_loader.py +357 -0
  100. tests/unit/test_graph_schema.py +193 -0
  101. tests/unit/test_inline_schema.py +151 -0
  102. tests/unit/test_interrupt_node.py +182 -0
  103. tests/unit/test_issues.py +164 -0
  104. tests/unit/test_jinja2_prompts.py +85 -0
  105. tests/unit/test_json_extract.py +134 -0
  106. tests/unit/test_langsmith.py +600 -0
  107. tests/unit/test_langsmith_tools.py +204 -0
  108. tests/unit/test_llm_factory.py +109 -0
  109. tests/unit/test_llm_factory_async.py +118 -0
  110. tests/unit/test_loops.py +403 -0
  111. tests/unit/test_map_node.py +144 -0
  112. tests/unit/test_no_backward_compat.py +56 -0
  113. tests/unit/test_node_factory.py +348 -0
  114. tests/unit/test_passthrough_node.py +126 -0
  115. tests/unit/test_prompts.py +324 -0
  116. tests/unit/test_python_nodes.py +198 -0
  117. tests/unit/test_reliability.py +298 -0
  118. tests/unit/test_result_export.py +234 -0
  119. tests/unit/test_router.py +296 -0
  120. tests/unit/test_sanitize.py +99 -0
  121. tests/unit/test_schema_loader.py +295 -0
  122. tests/unit/test_shell_tools.py +229 -0
  123. tests/unit/test_state_builder.py +331 -0
  124. tests/unit/test_state_builder_map.py +104 -0
  125. tests/unit/test_state_config.py +197 -0
  126. tests/unit/test_streaming.py +307 -0
  127. tests/unit/test_subgraph.py +596 -0
  128. tests/unit/test_template.py +190 -0
  129. tests/unit/test_tool_call_integration.py +164 -0
  130. tests/unit/test_tool_call_node.py +178 -0
  131. tests/unit/test_tool_nodes.py +129 -0
  132. tests/unit/test_websearch.py +234 -0
  133. yamlgraph/__init__.py +35 -0
  134. yamlgraph/builder.py +110 -0
  135. yamlgraph/cli/__init__.py +159 -0
  136. yamlgraph/cli/__main__.py +6 -0
  137. yamlgraph/cli/commands.py +231 -0
  138. yamlgraph/cli/deprecation.py +92 -0
  139. yamlgraph/cli/graph_commands.py +541 -0
  140. yamlgraph/cli/validators.py +37 -0
  141. yamlgraph/config.py +67 -0
  142. yamlgraph/constants.py +70 -0
  143. yamlgraph/error_handlers.py +227 -0
  144. yamlgraph/executor.py +290 -0
  145. yamlgraph/executor_async.py +288 -0
  146. yamlgraph/graph_loader.py +451 -0
  147. yamlgraph/map_compiler.py +150 -0
  148. yamlgraph/models/__init__.py +36 -0
  149. yamlgraph/models/graph_schema.py +181 -0
  150. yamlgraph/models/schemas.py +124 -0
  151. yamlgraph/models/state_builder.py +236 -0
  152. yamlgraph/node_factory.py +768 -0
  153. yamlgraph/routing.py +87 -0
  154. yamlgraph/schema_loader.py +240 -0
  155. yamlgraph/storage/__init__.py +20 -0
  156. yamlgraph/storage/checkpointer.py +72 -0
  157. yamlgraph/storage/checkpointer_factory.py +123 -0
  158. yamlgraph/storage/database.py +320 -0
  159. yamlgraph/storage/export.py +269 -0
  160. yamlgraph/tools/__init__.py +1 -0
  161. yamlgraph/tools/agent.py +320 -0
  162. yamlgraph/tools/graph_linter.py +388 -0
  163. yamlgraph/tools/langsmith_tools.py +125 -0
  164. yamlgraph/tools/nodes.py +126 -0
  165. yamlgraph/tools/python_tool.py +179 -0
  166. yamlgraph/tools/shell.py +205 -0
  167. yamlgraph/tools/websearch.py +242 -0
  168. yamlgraph/utils/__init__.py +48 -0
  169. yamlgraph/utils/conditions.py +157 -0
  170. yamlgraph/utils/expressions.py +245 -0
  171. yamlgraph/utils/json_extract.py +104 -0
  172. yamlgraph/utils/langsmith.py +416 -0
  173. yamlgraph/utils/llm_factory.py +118 -0
  174. yamlgraph/utils/llm_factory_async.py +105 -0
  175. yamlgraph/utils/logging.py +104 -0
  176. yamlgraph/utils/prompts.py +171 -0
  177. yamlgraph/utils/sanitize.py +98 -0
  178. yamlgraph/utils/template.py +102 -0
  179. yamlgraph/utils/validators.py +181 -0
  180. yamlgraph-0.3.9.dist-info/METADATA +1105 -0
  181. yamlgraph-0.3.9.dist-info/RECORD +185 -0
  182. yamlgraph-0.3.9.dist-info/WHEEL +5 -0
  183. yamlgraph-0.3.9.dist-info/entry_points.txt +2 -0
  184. yamlgraph-0.3.9.dist-info/licenses/LICENSE +33 -0
  185. yamlgraph-0.3.9.dist-info/top_level.txt +4 -0
@@ -0,0 +1,320 @@
1
+ """SQLite Storage - Simple persistence for pipeline state.
2
+
3
+ Provides a lightweight wrapper around SQLite for storing
4
+ and retrieving pipeline execution state.
5
+
6
+ Supports optional connection pooling for high-throughput scenarios.
7
+ """
8
+
9
+ import json
10
+ import sqlite3
11
+ import threading
12
+ from collections.abc import Iterator
13
+ from contextlib import contextmanager
14
+ from datetime import datetime
15
+ from pathlib import Path
16
+ from queue import Empty, Queue
17
+
18
+ from pydantic import BaseModel
19
+
20
+ from yamlgraph.config import DATABASE_PATH
21
+
22
+
23
+ class ConnectionPool:
24
+ """Thread-safe SQLite connection pool.
25
+
26
+ Maintains a pool of reusable connections for high-throughput scenarios.
27
+ Connections are returned to the pool after use instead of being closed.
28
+ """
29
+
30
+ def __init__(self, db_path: Path, pool_size: int = 5):
31
+ """Initialize connection pool.
32
+
33
+ Args:
34
+ db_path: Path to SQLite database
35
+ pool_size: Maximum number of connections to maintain
36
+ """
37
+ self._db_path = db_path
38
+ self._pool_size = pool_size
39
+ self._pool: Queue[sqlite3.Connection] = Queue(maxsize=pool_size)
40
+ self._lock = threading.Lock()
41
+ self._total_connections = 0
42
+
43
+ def _create_connection(self) -> sqlite3.Connection:
44
+ """Create a new database connection."""
45
+ conn = sqlite3.connect(self._db_path, check_same_thread=False)
46
+ conn.row_factory = sqlite3.Row
47
+ return conn
48
+
49
+ @contextmanager
50
+ def get_connection(self) -> Iterator[sqlite3.Connection]:
51
+ """Get a connection from the pool.
52
+
53
+ Creates a new connection if pool is empty and under limit.
54
+
55
+ Yields:
56
+ Database connection (returned to pool on exit)
57
+ """
58
+ conn = None
59
+ try:
60
+ # Try to get from pool
61
+ try:
62
+ conn = self._pool.get_nowait()
63
+ except Empty:
64
+ # Pool empty - create new connection if under limit
65
+ with self._lock:
66
+ if self._total_connections < self._pool_size:
67
+ conn = self._create_connection()
68
+ self._total_connections += 1
69
+ else:
70
+ # At limit - block waiting for connection
71
+ pass
72
+
73
+ if conn is None:
74
+ conn = self._pool.get() # Blocking wait
75
+
76
+ yield conn
77
+
78
+ finally:
79
+ # Return connection to pool
80
+ if conn is not None:
81
+ try:
82
+ self._pool.put_nowait(conn)
83
+ except Exception:
84
+ # Pool full, close connection
85
+ conn.close()
86
+ with self._lock:
87
+ self._total_connections -= 1
88
+
89
+ def close_all(self) -> None:
90
+ """Close all connections in the pool."""
91
+ while True:
92
+ try:
93
+ conn = self._pool.get_nowait()
94
+ conn.close()
95
+ except Empty:
96
+ break
97
+ with self._lock:
98
+ self._total_connections = 0
99
+
100
+
101
+ class YamlGraphDB:
102
+ """SQLite wrapper for yamlgraph state persistence.
103
+
104
+ Supports two connection modes:
105
+ - Default: Creates new connection per operation (simple, safe)
106
+ - Pooled: Reuses connections from pool (high-throughput)
107
+
108
+ Example:
109
+ # Default mode (simple)
110
+ db = YamlGraphDB()
111
+
112
+ # Pooled mode (high-throughput)
113
+ db = YamlGraphDB(use_pool=True, pool_size=10)
114
+ """
115
+
116
+ def __init__(
117
+ self,
118
+ db_path: str | Path | None = None,
119
+ use_pool: bool = False,
120
+ pool_size: int = 5,
121
+ ):
122
+ """Initialize database connection.
123
+
124
+ Args:
125
+ db_path: Path to SQLite database file (default: outputs/yamlgraph.db)
126
+ use_pool: Enable connection pooling for high-throughput scenarios
127
+ pool_size: Maximum connections in pool (only used if use_pool=True)
128
+ """
129
+ if db_path is None:
130
+ db_path = DATABASE_PATH
131
+ self.db_path = Path(db_path)
132
+ self.db_path.parent.mkdir(parents=True, exist_ok=True)
133
+
134
+ self._use_pool = use_pool
135
+ self._pool: ConnectionPool | None = None
136
+ if use_pool:
137
+ self._pool = ConnectionPool(self.db_path, pool_size)
138
+
139
+ self._init_db()
140
+
141
+ @contextmanager
142
+ def _get_connection(self) -> Iterator[sqlite3.Connection]:
143
+ """Get a database connection.
144
+
145
+ Uses pool if enabled, otherwise creates new connection.
146
+
147
+ Yields:
148
+ Database connection
149
+ """
150
+ if self._pool is not None:
151
+ with self._pool.get_connection() as conn:
152
+ yield conn
153
+ else:
154
+ conn = sqlite3.connect(self.db_path)
155
+ conn.row_factory = sqlite3.Row
156
+ try:
157
+ yield conn
158
+ finally:
159
+ conn.close()
160
+
161
+ def close(self) -> None:
162
+ """Close database connections.
163
+
164
+ For pooled mode, closes all connections in pool.
165
+ """
166
+ if self._pool is not None:
167
+ self._pool.close_all()
168
+
169
+ def _init_db(self):
170
+ """Initialize database tables."""
171
+ with self._get_connection() as conn:
172
+ conn.execute("""
173
+ CREATE TABLE IF NOT EXISTS pipeline_runs (
174
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
175
+ thread_id TEXT NOT NULL,
176
+ created_at TEXT NOT NULL,
177
+ updated_at TEXT NOT NULL,
178
+ status TEXT NOT NULL DEFAULT 'running',
179
+ state_json TEXT NOT NULL
180
+ )
181
+ """)
182
+ conn.execute("""
183
+ CREATE INDEX IF NOT EXISTS idx_thread_id
184
+ ON pipeline_runs(thread_id)
185
+ """)
186
+ conn.commit()
187
+
188
+ def save_state(self, thread_id: str, state: dict, status: str = "running") -> int:
189
+ """Save pipeline state.
190
+
191
+ Args:
192
+ thread_id: Unique identifier for this run
193
+ state: State dictionary to persist
194
+ status: Current status (running, completed, failed)
195
+
196
+ Returns:
197
+ Row ID of the saved state
198
+ """
199
+ now = datetime.now().isoformat()
200
+ state_json = json.dumps(self._serialize_state(state), default=str)
201
+
202
+ with self._get_connection() as conn:
203
+ # Check if thread exists
204
+ existing = conn.execute(
205
+ "SELECT id FROM pipeline_runs WHERE thread_id = ?", (thread_id,)
206
+ ).fetchone()
207
+
208
+ if existing:
209
+ conn.execute(
210
+ """UPDATE pipeline_runs
211
+ SET updated_at = ?, status = ?, state_json = ?
212
+ WHERE thread_id = ?""",
213
+ (now, status, state_json, thread_id),
214
+ )
215
+ conn.commit()
216
+ return existing["id"]
217
+ else:
218
+ cursor = conn.execute(
219
+ """INSERT INTO pipeline_runs
220
+ (thread_id, created_at, updated_at, status, state_json)
221
+ VALUES (?, ?, ?, ?, ?)""",
222
+ (thread_id, now, now, status, state_json),
223
+ )
224
+ conn.commit()
225
+ return cursor.lastrowid
226
+
227
+ def load_state(self, thread_id: str) -> dict | None:
228
+ """Load pipeline state by thread ID.
229
+
230
+ Args:
231
+ thread_id: Unique identifier for the run
232
+
233
+ Returns:
234
+ State dictionary or None if not found
235
+ """
236
+ with self._get_connection() as conn:
237
+ row = conn.execute(
238
+ "SELECT state_json FROM pipeline_runs WHERE thread_id = ?", (thread_id,)
239
+ ).fetchone()
240
+
241
+ if row:
242
+ return json.loads(row["state_json"])
243
+ return None
244
+
245
+ def get_run_info(self, thread_id: str) -> dict | None:
246
+ """Get run metadata without full state.
247
+
248
+ Args:
249
+ thread_id: Unique identifier for the run
250
+
251
+ Returns:
252
+ Dictionary with id, thread_id, created_at, updated_at, status
253
+ """
254
+ with self._get_connection() as conn:
255
+ row = conn.execute(
256
+ """SELECT id, thread_id, created_at, updated_at, status
257
+ FROM pipeline_runs WHERE thread_id = ?""",
258
+ (thread_id,),
259
+ ).fetchone()
260
+
261
+ if row:
262
+ return dict(row)
263
+ return None
264
+
265
+ def list_runs(self, limit: int = 10) -> list[dict]:
266
+ """List recent pipeline runs.
267
+
268
+ Args:
269
+ limit: Maximum number of runs to return
270
+
271
+ Returns:
272
+ List of run metadata dictionaries
273
+ """
274
+ with self._get_connection() as conn:
275
+ rows = conn.execute(
276
+ """SELECT id, thread_id, created_at, updated_at, status
277
+ FROM pipeline_runs
278
+ ORDER BY updated_at DESC
279
+ LIMIT ?""",
280
+ (limit,),
281
+ ).fetchall()
282
+
283
+ return [dict(row) for row in rows]
284
+
285
+ def delete_run(self, thread_id: str) -> bool:
286
+ """Delete a pipeline run.
287
+
288
+ Args:
289
+ thread_id: Unique identifier for the run
290
+
291
+ Returns:
292
+ True if deleted, False if not found
293
+ """
294
+ with self._get_connection() as conn:
295
+ cursor = conn.execute(
296
+ "DELETE FROM pipeline_runs WHERE thread_id = ?", (thread_id,)
297
+ )
298
+ conn.commit()
299
+ return cursor.rowcount > 0
300
+
301
+ def _serialize_state(self, state: dict) -> dict:
302
+ """Convert state to JSON-serializable format.
303
+
304
+ Handles Pydantic models and other complex types.
305
+
306
+ Args:
307
+ state: State dictionary
308
+
309
+ Returns:
310
+ JSON-serializable dictionary
311
+ """
312
+ result = {}
313
+ for key, value in state.items():
314
+ if isinstance(value, BaseModel):
315
+ result[key] = value.model_dump()
316
+ elif hasattr(value, "__dict__"):
317
+ result[key] = vars(value)
318
+ else:
319
+ result[key] = value
320
+ return result
@@ -0,0 +1,269 @@
1
+ """JSON Export - Serialize pipeline results.
2
+
3
+ Provides functions to export pipeline state and results
4
+ to JSON format for sharing and archival.
5
+ """
6
+
7
+ import json
8
+ from datetime import datetime
9
+ from pathlib import Path
10
+ from typing import Any
11
+
12
+ from pydantic import BaseModel
13
+
14
+ from yamlgraph.config import OUTPUTS_DIR
15
+
16
+
17
+ def export_state(
18
+ state: dict,
19
+ output_dir: str | Path | None = None,
20
+ prefix: str = "export",
21
+ ) -> Path:
22
+ """Export pipeline state to JSON file.
23
+
24
+ Args:
25
+ state: State dictionary to export
26
+ output_dir: Directory for output files (default: outputs/)
27
+ prefix: Filename prefix
28
+
29
+ Returns:
30
+ Path to the created file
31
+ """
32
+ if output_dir is None:
33
+ output_dir = OUTPUTS_DIR
34
+ output_path = Path(output_dir)
35
+ output_path.mkdir(parents=True, exist_ok=True)
36
+
37
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
38
+ thread_id = state.get("thread_id", "unknown")
39
+ filename = f"{prefix}_{thread_id}_{timestamp}.json"
40
+
41
+ filepath = output_path / filename
42
+
43
+ # Convert state to JSON-serializable format
44
+ export_data = _serialize_state(state)
45
+
46
+ with open(filepath, "w") as f:
47
+ json.dump(export_data, f, indent=2, default=str)
48
+
49
+ return filepath
50
+
51
+
52
+ def _serialize_state(state: dict) -> dict:
53
+ """Convert state to JSON-serializable format.
54
+
55
+ Handles Pydantic models and other complex types.
56
+
57
+ Args:
58
+ state: State dictionary
59
+
60
+ Returns:
61
+ JSON-serializable dictionary
62
+ """
63
+ result = {}
64
+
65
+ for key, value in state.items():
66
+ if isinstance(value, BaseModel):
67
+ result[key] = value.model_dump()
68
+ elif hasattr(value, "__dict__"):
69
+ result[key] = _serialize_object(value)
70
+ else:
71
+ result[key] = value
72
+
73
+ return result
74
+
75
+
76
+ def _serialize_object(obj: Any) -> Any:
77
+ """Recursively serialize an object.
78
+
79
+ Args:
80
+ obj: Object to serialize
81
+
82
+ Returns:
83
+ JSON-serializable representation
84
+ """
85
+ if isinstance(obj, BaseModel):
86
+ return obj.model_dump()
87
+ elif isinstance(obj, dict):
88
+ return {k: _serialize_object(v) for k, v in obj.items()}
89
+ elif isinstance(obj, (list, tuple)):
90
+ return [_serialize_object(item) for item in obj]
91
+ elif hasattr(obj, "isoformat"):
92
+ return obj.isoformat()
93
+ else:
94
+ return obj
95
+
96
+
97
+ def load_export(filepath: str | Path) -> dict:
98
+ """Load an exported JSON file.
99
+
100
+ Args:
101
+ filepath: Path to JSON file
102
+
103
+ Returns:
104
+ Loaded dictionary
105
+ """
106
+ with open(filepath) as f:
107
+ return json.load(f)
108
+
109
+
110
+ def list_exports(
111
+ output_dir: str | Path = "outputs", prefix: str = "export"
112
+ ) -> list[Path]:
113
+ """List all export files in a directory.
114
+
115
+ Args:
116
+ output_dir: Directory to search
117
+ prefix: Filename prefix filter
118
+
119
+ Returns:
120
+ List of matching file paths, sorted by modification time
121
+ """
122
+ output_path = Path(output_dir)
123
+ if not output_path.exists():
124
+ return []
125
+
126
+ files = list(output_path.glob(f"{prefix}_*.json"))
127
+ return sorted(files, key=lambda f: f.stat().st_mtime, reverse=True)
128
+
129
+
130
+ def export_summary(state: dict) -> dict:
131
+ """Create a summary export (without full content).
132
+
133
+ Useful for quick review of pipeline results.
134
+ Works generically with any Pydantic models in state.
135
+
136
+ Args:
137
+ state: Full state dictionary
138
+
139
+ Returns:
140
+ Summary dictionary with key information only
141
+ """
142
+ # Internal keys to skip
143
+ internal_keys = frozenset(
144
+ {"_route", "_loop_counts", "thread_id", "topic", "current_step", "error"}
145
+ )
146
+
147
+ summary = {
148
+ "thread_id": state.get("thread_id"),
149
+ "topic": state.get("topic"),
150
+ "current_step": state.get("current_step"),
151
+ "error": state.get("error"),
152
+ }
153
+
154
+ # Process all non-internal fields generically
155
+ for key, value in state.items():
156
+ if key in internal_keys or value is None:
157
+ continue
158
+
159
+ if isinstance(value, BaseModel):
160
+ # Extract scalar fields from any Pydantic model
161
+ summary[key] = _extract_scalar_summary(value)
162
+ elif isinstance(value, str):
163
+ # For strings, include presence only
164
+ summary[f"has_{key}"] = bool(value)
165
+
166
+ return summary
167
+
168
+
169
+ def _extract_scalar_summary(model: BaseModel) -> dict[str, Any]:
170
+ """Extract scalar fields from a Pydantic model for summary.
171
+
172
+ Args:
173
+ model: Any Pydantic model
174
+
175
+ Returns:
176
+ Dict with scalar field names and values (strings truncated)
177
+ """
178
+ result = {}
179
+ for field_name, field_value in model.model_dump().items():
180
+ if isinstance(field_value, str):
181
+ # Truncate long strings
182
+ result[field_name] = (
183
+ field_value[:100] + "..." if len(field_value) > 100 else field_value
184
+ )
185
+ elif isinstance(field_value, (int, float, bool)):
186
+ result[field_name] = field_value
187
+ elif isinstance(field_value, list):
188
+ result[f"{field_name}_count"] = len(field_value)
189
+ return result
190
+
191
+
192
+ def export_result(
193
+ state: dict,
194
+ export_config: dict,
195
+ base_path: str | Path = "outputs",
196
+ ) -> list[Path]:
197
+ """Export state fields to files.
198
+
199
+ Args:
200
+ state: Final graph state
201
+ export_config: Mapping of field -> export settings
202
+ base_path: Base directory for exports
203
+
204
+ Returns:
205
+ List of paths to exported files
206
+
207
+ Example config:
208
+ {
209
+ "final_summary": {"format": "markdown", "filename": "summary.md"},
210
+ "generated": {"format": "json", "filename": "content.json"},
211
+ }
212
+ """
213
+ base_path = Path(base_path)
214
+ thread_id = state.get("thread_id", "unknown")
215
+ output_dir = base_path / thread_id
216
+ output_dir.mkdir(parents=True, exist_ok=True)
217
+
218
+ exported = []
219
+
220
+ for field, settings in export_config.items():
221
+ if field not in state or state[field] is None:
222
+ continue
223
+
224
+ value = state[field]
225
+ filename = settings.get("filename", f"{field}.txt")
226
+ format_type = settings.get("format", "text")
227
+
228
+ file_path = output_dir / filename
229
+
230
+ if format_type == "json":
231
+ content = _serialize_to_json(value)
232
+ file_path.write_text(content)
233
+ elif format_type == "markdown":
234
+ content = _serialize_to_markdown(value)
235
+ file_path.write_text(content)
236
+ else:
237
+ file_path.write_text(str(value))
238
+
239
+ exported.append(file_path)
240
+
241
+ return exported
242
+
243
+
244
+ def _serialize_to_json(value: Any) -> str:
245
+ """Serialize value to JSON string."""
246
+ if isinstance(value, BaseModel):
247
+ return value.model_dump_json(indent=2)
248
+ return json.dumps(value, default=str, indent=2)
249
+
250
+
251
+ def _serialize_to_markdown(value: Any) -> str:
252
+ """Serialize value to Markdown string."""
253
+ if isinstance(value, BaseModel):
254
+ return _pydantic_to_markdown(value)
255
+ return str(value)
256
+
257
+
258
+ def _pydantic_to_markdown(model: BaseModel) -> str:
259
+ """Convert Pydantic model to Markdown."""
260
+ lines = [f"# {model.__class__.__name__}", ""]
261
+ for field, value in model.model_dump().items():
262
+ if isinstance(value, list):
263
+ lines.append(f"## {field.replace('_', ' ').title()}")
264
+ for item in value:
265
+ lines.append(f"- {item}")
266
+ lines.append("")
267
+ else:
268
+ lines.append(f"**{field.replace('_', ' ').title()}**: {value}")
269
+ return "\n".join(lines)
@@ -0,0 +1 @@
1
+ """Shell tool execution utilities."""