yamlgraph 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/__init__.py +1 -0
- examples/codegen/__init__.py +5 -0
- examples/codegen/models/__init__.py +13 -0
- examples/codegen/models/schemas.py +76 -0
- examples/codegen/tests/__init__.py +1 -0
- examples/codegen/tests/test_ai_helpers.py +235 -0
- examples/codegen/tests/test_ast_analysis.py +174 -0
- examples/codegen/tests/test_code_analysis.py +134 -0
- examples/codegen/tests/test_code_context.py +301 -0
- examples/codegen/tests/test_code_nav.py +89 -0
- examples/codegen/tests/test_dependency_tools.py +119 -0
- examples/codegen/tests/test_example_tools.py +185 -0
- examples/codegen/tests/test_git_tools.py +112 -0
- examples/codegen/tests/test_impl_agent_schemas.py +193 -0
- examples/codegen/tests/test_impl_agent_v4_graph.py +94 -0
- examples/codegen/tests/test_jedi_analysis.py +226 -0
- examples/codegen/tests/test_meta_tools.py +250 -0
- examples/codegen/tests/test_plan_discovery_prompt.py +98 -0
- examples/codegen/tests/test_syntax_tools.py +85 -0
- examples/codegen/tests/test_synthesize_prompt.py +94 -0
- examples/codegen/tests/test_template_tools.py +244 -0
- examples/codegen/tools/__init__.py +80 -0
- examples/codegen/tools/ai_helpers.py +420 -0
- examples/codegen/tools/ast_analysis.py +92 -0
- examples/codegen/tools/code_context.py +180 -0
- examples/codegen/tools/code_nav.py +52 -0
- examples/codegen/tools/dependency_tools.py +120 -0
- examples/codegen/tools/example_tools.py +188 -0
- examples/codegen/tools/git_tools.py +151 -0
- examples/codegen/tools/impl_executor.py +614 -0
- examples/codegen/tools/jedi_analysis.py +311 -0
- examples/codegen/tools/meta_tools.py +202 -0
- examples/codegen/tools/syntax_tools.py +26 -0
- examples/codegen/tools/template_tools.py +356 -0
- examples/fastapi_interview.py +167 -0
- examples/npc/api/__init__.py +1 -0
- examples/npc/api/app.py +100 -0
- examples/npc/api/routes/__init__.py +5 -0
- examples/npc/api/routes/encounter.py +182 -0
- examples/npc/api/session.py +330 -0
- examples/npc/demo.py +387 -0
- examples/npc/nodes/__init__.py +5 -0
- examples/npc/nodes/image_node.py +92 -0
- examples/npc/run_encounter.py +230 -0
- examples/shared/__init__.py +0 -0
- examples/shared/replicate_tool.py +238 -0
- examples/storyboard/__init__.py +1 -0
- examples/storyboard/generate_videos.py +335 -0
- examples/storyboard/nodes/__init__.py +12 -0
- examples/storyboard/nodes/animated_character_node.py +248 -0
- examples/storyboard/nodes/animated_image_node.py +138 -0
- examples/storyboard/nodes/character_node.py +162 -0
- examples/storyboard/nodes/image_node.py +118 -0
- examples/storyboard/nodes/replicate_tool.py +49 -0
- examples/storyboard/retry_images.py +118 -0
- scripts/demo_async_executor.py +212 -0
- scripts/demo_interview_e2e.py +200 -0
- scripts/demo_streaming.py +140 -0
- scripts/run_interview_demo.py +94 -0
- scripts/test_interrupt_fix.py +26 -0
- tests/__init__.py +1 -0
- tests/conftest.py +178 -0
- tests/integration/__init__.py +1 -0
- tests/integration/test_animated_storyboard.py +63 -0
- tests/integration/test_cli_commands.py +242 -0
- tests/integration/test_colocated_prompts.py +139 -0
- tests/integration/test_map_demo.py +50 -0
- tests/integration/test_memory_demo.py +283 -0
- tests/integration/test_npc_api/__init__.py +1 -0
- tests/integration/test_npc_api/test_routes.py +357 -0
- tests/integration/test_npc_api/test_session.py +216 -0
- tests/integration/test_pipeline_flow.py +105 -0
- tests/integration/test_providers.py +163 -0
- tests/integration/test_resume.py +75 -0
- tests/integration/test_subgraph_integration.py +295 -0
- tests/integration/test_subgraph_interrupt.py +106 -0
- tests/unit/__init__.py +1 -0
- tests/unit/test_agent_nodes.py +355 -0
- tests/unit/test_async_executor.py +346 -0
- tests/unit/test_checkpointer.py +212 -0
- tests/unit/test_checkpointer_factory.py +212 -0
- tests/unit/test_cli.py +121 -0
- tests/unit/test_cli_package.py +81 -0
- tests/unit/test_compile_graph_map.py +132 -0
- tests/unit/test_conditions_routing.py +253 -0
- tests/unit/test_config.py +93 -0
- tests/unit/test_conversation_memory.py +276 -0
- tests/unit/test_database.py +145 -0
- tests/unit/test_deprecation.py +104 -0
- tests/unit/test_executor.py +172 -0
- tests/unit/test_executor_async.py +179 -0
- tests/unit/test_export.py +149 -0
- tests/unit/test_expressions.py +178 -0
- tests/unit/test_feature_brainstorm.py +194 -0
- tests/unit/test_format_prompt.py +145 -0
- tests/unit/test_generic_report.py +200 -0
- tests/unit/test_graph_commands.py +327 -0
- tests/unit/test_graph_linter.py +627 -0
- tests/unit/test_graph_loader.py +357 -0
- tests/unit/test_graph_schema.py +193 -0
- tests/unit/test_inline_schema.py +151 -0
- tests/unit/test_interrupt_node.py +182 -0
- tests/unit/test_issues.py +164 -0
- tests/unit/test_jinja2_prompts.py +85 -0
- tests/unit/test_json_extract.py +134 -0
- tests/unit/test_langsmith.py +600 -0
- tests/unit/test_langsmith_tools.py +204 -0
- tests/unit/test_llm_factory.py +109 -0
- tests/unit/test_llm_factory_async.py +118 -0
- tests/unit/test_loops.py +403 -0
- tests/unit/test_map_node.py +144 -0
- tests/unit/test_no_backward_compat.py +56 -0
- tests/unit/test_node_factory.py +348 -0
- tests/unit/test_passthrough_node.py +126 -0
- tests/unit/test_prompts.py +324 -0
- tests/unit/test_python_nodes.py +198 -0
- tests/unit/test_reliability.py +298 -0
- tests/unit/test_result_export.py +234 -0
- tests/unit/test_router.py +296 -0
- tests/unit/test_sanitize.py +99 -0
- tests/unit/test_schema_loader.py +295 -0
- tests/unit/test_shell_tools.py +229 -0
- tests/unit/test_state_builder.py +331 -0
- tests/unit/test_state_builder_map.py +104 -0
- tests/unit/test_state_config.py +197 -0
- tests/unit/test_streaming.py +307 -0
- tests/unit/test_subgraph.py +596 -0
- tests/unit/test_template.py +190 -0
- tests/unit/test_tool_call_integration.py +164 -0
- tests/unit/test_tool_call_node.py +178 -0
- tests/unit/test_tool_nodes.py +129 -0
- tests/unit/test_websearch.py +234 -0
- yamlgraph/__init__.py +35 -0
- yamlgraph/builder.py +110 -0
- yamlgraph/cli/__init__.py +159 -0
- yamlgraph/cli/__main__.py +6 -0
- yamlgraph/cli/commands.py +231 -0
- yamlgraph/cli/deprecation.py +92 -0
- yamlgraph/cli/graph_commands.py +541 -0
- yamlgraph/cli/validators.py +37 -0
- yamlgraph/config.py +67 -0
- yamlgraph/constants.py +70 -0
- yamlgraph/error_handlers.py +227 -0
- yamlgraph/executor.py +290 -0
- yamlgraph/executor_async.py +288 -0
- yamlgraph/graph_loader.py +451 -0
- yamlgraph/map_compiler.py +150 -0
- yamlgraph/models/__init__.py +36 -0
- yamlgraph/models/graph_schema.py +181 -0
- yamlgraph/models/schemas.py +124 -0
- yamlgraph/models/state_builder.py +236 -0
- yamlgraph/node_factory.py +768 -0
- yamlgraph/routing.py +87 -0
- yamlgraph/schema_loader.py +240 -0
- yamlgraph/storage/__init__.py +20 -0
- yamlgraph/storage/checkpointer.py +72 -0
- yamlgraph/storage/checkpointer_factory.py +123 -0
- yamlgraph/storage/database.py +320 -0
- yamlgraph/storage/export.py +269 -0
- yamlgraph/tools/__init__.py +1 -0
- yamlgraph/tools/agent.py +320 -0
- yamlgraph/tools/graph_linter.py +388 -0
- yamlgraph/tools/langsmith_tools.py +125 -0
- yamlgraph/tools/nodes.py +126 -0
- yamlgraph/tools/python_tool.py +179 -0
- yamlgraph/tools/shell.py +205 -0
- yamlgraph/tools/websearch.py +242 -0
- yamlgraph/utils/__init__.py +48 -0
- yamlgraph/utils/conditions.py +157 -0
- yamlgraph/utils/expressions.py +245 -0
- yamlgraph/utils/json_extract.py +104 -0
- yamlgraph/utils/langsmith.py +416 -0
- yamlgraph/utils/llm_factory.py +118 -0
- yamlgraph/utils/llm_factory_async.py +105 -0
- yamlgraph/utils/logging.py +104 -0
- yamlgraph/utils/prompts.py +171 -0
- yamlgraph/utils/sanitize.py +98 -0
- yamlgraph/utils/template.py +102 -0
- yamlgraph/utils/validators.py +181 -0
- yamlgraph-0.3.9.dist-info/METADATA +1105 -0
- yamlgraph-0.3.9.dist-info/RECORD +185 -0
- yamlgraph-0.3.9.dist-info/WHEEL +5 -0
- yamlgraph-0.3.9.dist-info/entry_points.txt +2 -0
- yamlgraph-0.3.9.dist-info/licenses/LICENSE +33 -0
- yamlgraph-0.3.9.dist-info/top_level.txt +4 -0
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""Extract JSON from LLM output with various formats.
|
|
2
|
+
|
|
3
|
+
LLMs often wrap JSON responses in markdown code blocks or add
|
|
4
|
+
explanatory text. This module provides robust extraction.
|
|
5
|
+
|
|
6
|
+
FR-B: JSON Extraction feature.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import re
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def extract_json(text: str) -> dict | list | str:
|
|
14
|
+
"""Extract JSON from LLM response.
|
|
15
|
+
|
|
16
|
+
Extraction order:
|
|
17
|
+
1. Parse as raw JSON (handles both objects and arrays)
|
|
18
|
+
2. Extract from ```json ... ``` code block
|
|
19
|
+
3. Extract from ``` ... ``` code block (any language)
|
|
20
|
+
4. Extract first {...} or [...] pattern
|
|
21
|
+
5. Return original text if no JSON found
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
text: Raw LLM response
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
Parsed JSON (dict/list) or original string if extraction fails
|
|
28
|
+
|
|
29
|
+
Examples:
|
|
30
|
+
>>> extract_json('{"key": "value"}')
|
|
31
|
+
{'key': 'value'}
|
|
32
|
+
|
|
33
|
+
>>> extract_json('```json\\n{"key": "value"}\\n```')
|
|
34
|
+
{'key': 'value'}
|
|
35
|
+
|
|
36
|
+
>>> extract_json('Result: {"x": 1} found')
|
|
37
|
+
{'x': 1}
|
|
38
|
+
"""
|
|
39
|
+
if not text:
|
|
40
|
+
return text
|
|
41
|
+
|
|
42
|
+
text = text.strip()
|
|
43
|
+
|
|
44
|
+
# 1. Try raw JSON first
|
|
45
|
+
try:
|
|
46
|
+
return json.loads(text)
|
|
47
|
+
except json.JSONDecodeError:
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
# 2. Try ```json ... ``` block
|
|
51
|
+
match = re.search(r"```json\s*\n?(.*?)\n?```", text, re.DOTALL | re.IGNORECASE)
|
|
52
|
+
if match:
|
|
53
|
+
try:
|
|
54
|
+
return json.loads(match.group(1).strip())
|
|
55
|
+
except json.JSONDecodeError:
|
|
56
|
+
pass
|
|
57
|
+
|
|
58
|
+
# 3. Try ``` ... ``` block (any language)
|
|
59
|
+
match = re.search(r"```\s*\n?(.*?)\n?```", text, re.DOTALL)
|
|
60
|
+
if match:
|
|
61
|
+
try:
|
|
62
|
+
return json.loads(match.group(1).strip())
|
|
63
|
+
except json.JSONDecodeError:
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
# 4. Try {...} or [...] pattern
|
|
67
|
+
# Find all potential JSON objects/arrays and try parsing each
|
|
68
|
+
# Use non-greedy matching to find smallest valid JSON structures
|
|
69
|
+
for pattern in [
|
|
70
|
+
r"\{[^{}]*\}", # Simple object: {key: value}
|
|
71
|
+
r"\[[^\[\]]*\]", # Simple array: [1, 2, 3]
|
|
72
|
+
]:
|
|
73
|
+
for match in re.finditer(pattern, text):
|
|
74
|
+
try:
|
|
75
|
+
return json.loads(match.group(0))
|
|
76
|
+
except json.JSONDecodeError:
|
|
77
|
+
continue
|
|
78
|
+
|
|
79
|
+
# 5. Try nested structures (greedy, last resort)
|
|
80
|
+
# Find balanced braces manually
|
|
81
|
+
for start_char, end_char in [("{", "}"), ("[", "]")]:
|
|
82
|
+
start_idx = text.find(start_char)
|
|
83
|
+
if start_idx == -1:
|
|
84
|
+
continue
|
|
85
|
+
|
|
86
|
+
# Find matching closing bracket
|
|
87
|
+
depth = 0
|
|
88
|
+
for i, c in enumerate(text[start_idx:], start=start_idx):
|
|
89
|
+
if c == start_char:
|
|
90
|
+
depth += 1
|
|
91
|
+
elif c == end_char:
|
|
92
|
+
depth -= 1
|
|
93
|
+
if depth == 0:
|
|
94
|
+
candidate = text[start_idx : i + 1]
|
|
95
|
+
try:
|
|
96
|
+
return json.loads(candidate)
|
|
97
|
+
except json.JSONDecodeError:
|
|
98
|
+
break # Try next start position
|
|
99
|
+
|
|
100
|
+
# 6. Return original text
|
|
101
|
+
return text
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
__all__ = ["extract_json"]
|
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
"""LangSmith Utilities - Tracing and observability helpers.
|
|
2
|
+
|
|
3
|
+
Provides functions for interacting with LangSmith traces,
|
|
4
|
+
printing execution trees, and logging run information.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_client() -> Any | None:
|
|
16
|
+
"""Get a LangSmith client if available.
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
LangSmith Client instance or None if not configured
|
|
20
|
+
"""
|
|
21
|
+
try:
|
|
22
|
+
from langsmith import Client
|
|
23
|
+
|
|
24
|
+
# Support both LANGCHAIN_* and LANGSMITH_* env vars
|
|
25
|
+
api_key = os.environ.get("LANGCHAIN_API_KEY") or os.environ.get(
|
|
26
|
+
"LANGSMITH_API_KEY"
|
|
27
|
+
)
|
|
28
|
+
if not api_key:
|
|
29
|
+
return None
|
|
30
|
+
|
|
31
|
+
endpoint = (
|
|
32
|
+
os.environ.get("LANGCHAIN_ENDPOINT")
|
|
33
|
+
or os.environ.get("LANGSMITH_ENDPOINT")
|
|
34
|
+
or "https://api.smith.langchain.com"
|
|
35
|
+
)
|
|
36
|
+
return Client(api_url=endpoint, api_key=api_key)
|
|
37
|
+
except ImportError:
|
|
38
|
+
logger.debug("LangSmith package not installed, client unavailable")
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def get_project_name() -> str:
|
|
43
|
+
"""Get the current LangSmith project name.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Project name from environment or default
|
|
47
|
+
"""
|
|
48
|
+
return (
|
|
49
|
+
os.environ.get("LANGCHAIN_PROJECT")
|
|
50
|
+
or os.environ.get("LANGSMITH_PROJECT")
|
|
51
|
+
or "yamlgraph"
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def is_tracing_enabled() -> bool:
|
|
56
|
+
"""Check if LangSmith tracing is enabled.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
True if tracing is enabled
|
|
60
|
+
"""
|
|
61
|
+
# Support both env var names and values
|
|
62
|
+
tracing_v2 = os.environ.get("LANGCHAIN_TRACING_V2", "").lower()
|
|
63
|
+
tracing = os.environ.get("LANGSMITH_TRACING", "").lower()
|
|
64
|
+
return tracing_v2 == "true" or tracing == "true"
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def get_latest_run_id(project_name: str | None = None) -> str | None:
|
|
68
|
+
"""Get the ID of the most recent run.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
project_name: Optional project name (uses default if not provided)
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Run ID string or None
|
|
75
|
+
"""
|
|
76
|
+
client = get_client()
|
|
77
|
+
if not client:
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
project = project_name or get_project_name()
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
runs = list(client.list_runs(project_name=project, limit=1))
|
|
84
|
+
if runs:
|
|
85
|
+
return str(runs[0].id)
|
|
86
|
+
except Exception as e:
|
|
87
|
+
logger.warning("Could not get latest run: %s", e)
|
|
88
|
+
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def share_run(run_id: str | None = None) -> str | None:
|
|
93
|
+
"""Create a public share link for a run.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
run_id: Run ID (uses latest if not provided)
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Public URL string or None if failed
|
|
100
|
+
|
|
101
|
+
Example:
|
|
102
|
+
>>> url = share_run()
|
|
103
|
+
>>> print(url)
|
|
104
|
+
https://eu.smith.langchain.com/public/abc123.../r
|
|
105
|
+
"""
|
|
106
|
+
client = get_client()
|
|
107
|
+
if not client:
|
|
108
|
+
return None
|
|
109
|
+
|
|
110
|
+
if not run_id:
|
|
111
|
+
run_id = get_latest_run_id()
|
|
112
|
+
|
|
113
|
+
if not run_id:
|
|
114
|
+
return None
|
|
115
|
+
|
|
116
|
+
try:
|
|
117
|
+
# Use the share_run method from LangSmith SDK
|
|
118
|
+
return client.share_run(run_id)
|
|
119
|
+
except Exception as e:
|
|
120
|
+
logger.warning("Could not share run: %s", e)
|
|
121
|
+
return None
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def read_run_shared_link(run_id: str) -> str | None:
|
|
125
|
+
"""Get existing share link for a run if it exists.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
run_id: The run ID to check
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Public URL string or None if not shared
|
|
132
|
+
"""
|
|
133
|
+
client = get_client()
|
|
134
|
+
if not client:
|
|
135
|
+
return None
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
return client.read_run_shared_link(run_id)
|
|
139
|
+
except Exception as e:
|
|
140
|
+
logger.debug("Could not read run shared link for %s: %s", run_id, e)
|
|
141
|
+
return None
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def print_run_tree(run_id: str | None = None, verbose: bool = False) -> None:
|
|
145
|
+
"""Print an execution tree for a run.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
run_id: Specific run ID (uses latest if not provided)
|
|
149
|
+
verbose: Include timing and status details
|
|
150
|
+
"""
|
|
151
|
+
client = get_client()
|
|
152
|
+
if not client:
|
|
153
|
+
logger.warning("LangSmith client not available")
|
|
154
|
+
return
|
|
155
|
+
|
|
156
|
+
if not run_id:
|
|
157
|
+
run_id = get_latest_run_id()
|
|
158
|
+
|
|
159
|
+
if not run_id:
|
|
160
|
+
logger.warning("No run found")
|
|
161
|
+
return
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
run = client.read_run(run_id)
|
|
165
|
+
_print_run_node(run, client, verbose=verbose, indent=0)
|
|
166
|
+
except Exception as e:
|
|
167
|
+
logger.warning("Error reading run: %s", e)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _print_run_node(
|
|
171
|
+
run,
|
|
172
|
+
client,
|
|
173
|
+
verbose: bool = False,
|
|
174
|
+
indent: int = 0,
|
|
175
|
+
is_last: bool = True,
|
|
176
|
+
prefix: str = "",
|
|
177
|
+
):
|
|
178
|
+
"""Recursively print a run node and its children in tree format.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
run: The LangSmith run object
|
|
182
|
+
client: LangSmith client
|
|
183
|
+
verbose: Include timing details
|
|
184
|
+
indent: Current indentation level
|
|
185
|
+
is_last: Whether this is the last sibling
|
|
186
|
+
prefix: Prefix string for tree drawing
|
|
187
|
+
"""
|
|
188
|
+
# Status emoji
|
|
189
|
+
if run.status == "success":
|
|
190
|
+
status = "✅"
|
|
191
|
+
elif run.status == "error":
|
|
192
|
+
status = "❌"
|
|
193
|
+
else:
|
|
194
|
+
status = "⏳"
|
|
195
|
+
|
|
196
|
+
# Timing
|
|
197
|
+
timing = ""
|
|
198
|
+
if run.end_time and run.start_time:
|
|
199
|
+
duration = (run.end_time - run.start_time).total_seconds()
|
|
200
|
+
timing = f" ({duration:.1f}s)"
|
|
201
|
+
|
|
202
|
+
# Tree connectors
|
|
203
|
+
if indent == 0:
|
|
204
|
+
connector = "📊 "
|
|
205
|
+
new_prefix = ""
|
|
206
|
+
else:
|
|
207
|
+
connector = "└─ " if is_last else "├─ "
|
|
208
|
+
new_prefix = prefix + (" " if is_last else "│ ")
|
|
209
|
+
|
|
210
|
+
# Clean up run name for display
|
|
211
|
+
display_name = run.name
|
|
212
|
+
if display_name.startswith("Chat"):
|
|
213
|
+
display_name = f"🤖 {display_name}"
|
|
214
|
+
elif "generate" in display_name.lower():
|
|
215
|
+
display_name = f"📝 {display_name}"
|
|
216
|
+
elif "analyze" in display_name.lower():
|
|
217
|
+
display_name = f"🔍 {display_name}"
|
|
218
|
+
elif "summarize" in display_name.lower():
|
|
219
|
+
display_name = f"📊 {display_name}"
|
|
220
|
+
|
|
221
|
+
logger.info("%s%s%s%s %s", prefix, connector, display_name, timing, status)
|
|
222
|
+
|
|
223
|
+
# Get child runs
|
|
224
|
+
try:
|
|
225
|
+
children = list(
|
|
226
|
+
client.list_runs(
|
|
227
|
+
parent_run_id=run.id,
|
|
228
|
+
limit=50,
|
|
229
|
+
)
|
|
230
|
+
)
|
|
231
|
+
# Sort by start time to show in execution order
|
|
232
|
+
children.sort(key=lambda r: r.start_time or datetime.min)
|
|
233
|
+
|
|
234
|
+
for i, child in enumerate(children):
|
|
235
|
+
child_is_last = i == len(children) - 1
|
|
236
|
+
_print_run_node(
|
|
237
|
+
child,
|
|
238
|
+
client,
|
|
239
|
+
verbose=verbose,
|
|
240
|
+
indent=indent + 1,
|
|
241
|
+
is_last=child_is_last,
|
|
242
|
+
prefix=new_prefix,
|
|
243
|
+
)
|
|
244
|
+
except Exception as e:
|
|
245
|
+
logger.debug("Could not fetch child runs for %s: %s", run.id, e)
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def get_run_url(run_id: str | None = None) -> str | None:
|
|
249
|
+
"""Get the LangSmith URL for a run.
|
|
250
|
+
|
|
251
|
+
Args:
|
|
252
|
+
run_id: Run ID (uses latest if not provided)
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
URL string or None
|
|
256
|
+
"""
|
|
257
|
+
if not run_id:
|
|
258
|
+
run_id = get_latest_run_id()
|
|
259
|
+
|
|
260
|
+
if not run_id:
|
|
261
|
+
return None
|
|
262
|
+
|
|
263
|
+
endpoint = os.environ.get("LANGCHAIN_ENDPOINT", "https://api.smith.langchain.com")
|
|
264
|
+
project = get_project_name()
|
|
265
|
+
|
|
266
|
+
# Convert API endpoint to web URL
|
|
267
|
+
web_url = endpoint.replace("api.", "").replace("/api", "")
|
|
268
|
+
if "smith.langchain" in web_url:
|
|
269
|
+
return f"{web_url}/o/default/projects/p/{project}/runs/{run_id}"
|
|
270
|
+
|
|
271
|
+
return f"{web_url}/projects/{project}/runs/{run_id}"
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def get_run_details(run_id: str | None = None) -> dict | None:
|
|
275
|
+
"""Get detailed information about a run.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
run_id: Run ID (uses latest if not provided)
|
|
279
|
+
|
|
280
|
+
Returns:
|
|
281
|
+
Dict with run details or None if failed:
|
|
282
|
+
- id: Run ID
|
|
283
|
+
- name: Run name
|
|
284
|
+
- status: success/error/pending
|
|
285
|
+
- error: Error message if any
|
|
286
|
+
- start_time: ISO timestamp
|
|
287
|
+
- end_time: ISO timestamp
|
|
288
|
+
- inputs: Input data
|
|
289
|
+
- outputs: Output data
|
|
290
|
+
- run_type: chain/llm/tool etc.
|
|
291
|
+
"""
|
|
292
|
+
client = get_client()
|
|
293
|
+
if not client:
|
|
294
|
+
return None
|
|
295
|
+
|
|
296
|
+
if not run_id:
|
|
297
|
+
run_id = get_latest_run_id()
|
|
298
|
+
|
|
299
|
+
if not run_id:
|
|
300
|
+
return None
|
|
301
|
+
|
|
302
|
+
try:
|
|
303
|
+
run = client.read_run(run_id)
|
|
304
|
+
return {
|
|
305
|
+
"id": str(run.id),
|
|
306
|
+
"name": run.name,
|
|
307
|
+
"status": run.status,
|
|
308
|
+
"error": run.error,
|
|
309
|
+
"start_time": run.start_time.isoformat() if run.start_time else None,
|
|
310
|
+
"end_time": run.end_time.isoformat() if run.end_time else None,
|
|
311
|
+
"inputs": run.inputs,
|
|
312
|
+
"outputs": run.outputs,
|
|
313
|
+
"run_type": run.run_type,
|
|
314
|
+
}
|
|
315
|
+
except Exception as e:
|
|
316
|
+
logger.warning("Could not get run details: %s", e)
|
|
317
|
+
return None
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def get_run_errors(run_id: str | None = None) -> list[dict]:
|
|
321
|
+
"""Get all errors from a run and its child runs.
|
|
322
|
+
|
|
323
|
+
Args:
|
|
324
|
+
run_id: Run ID (uses latest if not provided)
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
List of error dicts with:
|
|
328
|
+
- node: Name of the failed node
|
|
329
|
+
- error: Error message
|
|
330
|
+
- run_type: Type of run (llm/chain/tool)
|
|
331
|
+
"""
|
|
332
|
+
client = get_client()
|
|
333
|
+
if not client:
|
|
334
|
+
return []
|
|
335
|
+
|
|
336
|
+
if not run_id:
|
|
337
|
+
run_id = get_latest_run_id()
|
|
338
|
+
|
|
339
|
+
if not run_id:
|
|
340
|
+
return []
|
|
341
|
+
|
|
342
|
+
errors = []
|
|
343
|
+
try:
|
|
344
|
+
# Get parent run
|
|
345
|
+
run = client.read_run(run_id)
|
|
346
|
+
if run.error:
|
|
347
|
+
errors.append(
|
|
348
|
+
{
|
|
349
|
+
"node": run.name,
|
|
350
|
+
"error": run.error,
|
|
351
|
+
"run_type": run.run_type,
|
|
352
|
+
}
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
# Get child runs with errors
|
|
356
|
+
children = client.list_runs(
|
|
357
|
+
parent_run_id=run_id,
|
|
358
|
+
error=True,
|
|
359
|
+
limit=50,
|
|
360
|
+
)
|
|
361
|
+
for child in children:
|
|
362
|
+
if child.error:
|
|
363
|
+
errors.append(
|
|
364
|
+
{
|
|
365
|
+
"node": child.name,
|
|
366
|
+
"error": child.error,
|
|
367
|
+
"run_type": child.run_type,
|
|
368
|
+
}
|
|
369
|
+
)
|
|
370
|
+
except Exception as e:
|
|
371
|
+
logger.warning("Could not get run errors: %s", e)
|
|
372
|
+
|
|
373
|
+
return errors
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
def get_failed_runs(
|
|
377
|
+
project_name: str | None = None,
|
|
378
|
+
limit: int = 10,
|
|
379
|
+
) -> list[dict]:
|
|
380
|
+
"""Get recent failed runs from a project.
|
|
381
|
+
|
|
382
|
+
Args:
|
|
383
|
+
project_name: Project name (uses default if not provided)
|
|
384
|
+
limit: Maximum number of runs to return
|
|
385
|
+
|
|
386
|
+
Returns:
|
|
387
|
+
List of failed run summaries with:
|
|
388
|
+
- id: Run ID
|
|
389
|
+
- name: Run name
|
|
390
|
+
- error: Error message
|
|
391
|
+
- start_time: ISO timestamp
|
|
392
|
+
"""
|
|
393
|
+
client = get_client()
|
|
394
|
+
if not client:
|
|
395
|
+
return []
|
|
396
|
+
|
|
397
|
+
project = project_name or get_project_name()
|
|
398
|
+
|
|
399
|
+
try:
|
|
400
|
+
runs = client.list_runs(
|
|
401
|
+
project_name=project,
|
|
402
|
+
error=True,
|
|
403
|
+
limit=limit,
|
|
404
|
+
)
|
|
405
|
+
return [
|
|
406
|
+
{
|
|
407
|
+
"id": str(r.id),
|
|
408
|
+
"name": r.name,
|
|
409
|
+
"error": r.error,
|
|
410
|
+
"start_time": r.start_time.isoformat() if r.start_time else None,
|
|
411
|
+
}
|
|
412
|
+
for r in runs
|
|
413
|
+
]
|
|
414
|
+
except Exception as e:
|
|
415
|
+
logger.warning("Could not list failed runs: %s", e)
|
|
416
|
+
return []
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
"""LLM Factory - Multi-provider abstraction for language models.
|
|
2
|
+
|
|
3
|
+
This module provides a simple factory pattern for creating LLM instances
|
|
4
|
+
across different providers (Anthropic, Mistral, OpenAI).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
import threading
|
|
10
|
+
from typing import Literal
|
|
11
|
+
|
|
12
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
|
13
|
+
|
|
14
|
+
from yamlgraph.config import DEFAULT_MODELS
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
# Type alias for supported providers
|
|
19
|
+
ProviderType = Literal["anthropic", "mistral", "openai"]
|
|
20
|
+
|
|
21
|
+
# Thread-safe cache for LLM instances
|
|
22
|
+
_llm_cache: dict[tuple, BaseChatModel] = {}
|
|
23
|
+
_cache_lock = threading.Lock()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def create_llm(
|
|
27
|
+
provider: ProviderType | None = None,
|
|
28
|
+
model: str | None = None,
|
|
29
|
+
temperature: float = 0.7,
|
|
30
|
+
) -> BaseChatModel:
|
|
31
|
+
"""Create an LLM instance with multi-provider support.
|
|
32
|
+
|
|
33
|
+
Supports Anthropic (default), Mistral, and OpenAI providers.
|
|
34
|
+
Provider can be specified via parameter or PROVIDER environment variable.
|
|
35
|
+
Model can be specified via parameter or {PROVIDER}_MODEL environment variable.
|
|
36
|
+
|
|
37
|
+
LLM instances are cached by (provider, model, temperature) to improve performance.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
provider: LLM provider ("anthropic", "mistral", "openai").
|
|
41
|
+
Defaults to PROVIDER env var or "anthropic".
|
|
42
|
+
model: Model name. Defaults to {PROVIDER}_MODEL env var or provider default.
|
|
43
|
+
temperature: Temperature for generation (0.0-1.0).
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Configured LLM instance.
|
|
47
|
+
|
|
48
|
+
Raises:
|
|
49
|
+
ValueError: If provider is invalid.
|
|
50
|
+
|
|
51
|
+
Examples:
|
|
52
|
+
>>> # Use default Anthropic
|
|
53
|
+
>>> llm = create_llm(temperature=0.7)
|
|
54
|
+
|
|
55
|
+
>>> # Override provider
|
|
56
|
+
>>> llm = create_llm(provider="mistral", temperature=0.8)
|
|
57
|
+
|
|
58
|
+
>>> # Custom model
|
|
59
|
+
>>> llm = create_llm(provider="openai", model="gpt-4o-mini")
|
|
60
|
+
"""
|
|
61
|
+
# Determine provider (parameter > env var > default)
|
|
62
|
+
selected_provider = provider or os.getenv("PROVIDER") or "anthropic"
|
|
63
|
+
|
|
64
|
+
# Validate provider
|
|
65
|
+
if selected_provider not in DEFAULT_MODELS:
|
|
66
|
+
raise ValueError(
|
|
67
|
+
f"Invalid provider: {selected_provider}. "
|
|
68
|
+
f"Must be one of: {', '.join(DEFAULT_MODELS.keys())}"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Determine model (parameter > env var > default)
|
|
72
|
+
# Note: DEFAULT_MODELS already handles env var via config.py
|
|
73
|
+
selected_model = model or DEFAULT_MODELS[selected_provider]
|
|
74
|
+
|
|
75
|
+
# Create cache key
|
|
76
|
+
cache_key = (selected_provider, selected_model, temperature)
|
|
77
|
+
|
|
78
|
+
# Thread-safe cache access
|
|
79
|
+
with _cache_lock:
|
|
80
|
+
# Return cached instance if available
|
|
81
|
+
if cache_key in _llm_cache:
|
|
82
|
+
logger.debug(
|
|
83
|
+
f"Using cached LLM: {selected_provider}/{selected_model} (temp={temperature})"
|
|
84
|
+
)
|
|
85
|
+
return _llm_cache[cache_key]
|
|
86
|
+
|
|
87
|
+
# Create new LLM instance
|
|
88
|
+
logger.info(
|
|
89
|
+
f"Creating LLM: {selected_provider}/{selected_model} (temp={temperature})"
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
if selected_provider == "mistral":
|
|
93
|
+
from langchain_mistralai import ChatMistralAI
|
|
94
|
+
|
|
95
|
+
llm = ChatMistralAI(model=selected_model, temperature=temperature)
|
|
96
|
+
elif selected_provider == "openai":
|
|
97
|
+
from langchain_openai import ChatOpenAI
|
|
98
|
+
|
|
99
|
+
llm = ChatOpenAI(model=selected_model, temperature=temperature)
|
|
100
|
+
else: # anthropic (default)
|
|
101
|
+
from langchain_anthropic import ChatAnthropic
|
|
102
|
+
|
|
103
|
+
llm = ChatAnthropic(model=selected_model, temperature=temperature)
|
|
104
|
+
|
|
105
|
+
# Cache the instance
|
|
106
|
+
_llm_cache[cache_key] = llm
|
|
107
|
+
|
|
108
|
+
return llm
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def clear_cache() -> None:
|
|
112
|
+
"""Clear the LLM instance cache.
|
|
113
|
+
|
|
114
|
+
Useful for testing or when you want to force recreation of LLM instances.
|
|
115
|
+
"""
|
|
116
|
+
with _cache_lock:
|
|
117
|
+
_llm_cache.clear()
|
|
118
|
+
logger.debug("LLM cache cleared")
|