langgraph-init-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langgraph_cli/__init__.py +5 -0
- langgraph_cli/cli.py +42 -0
- langgraph_cli/generator.py +47 -0
- langgraph_cli/templates/advanced/README.md +3 -0
- langgraph_cli/templates/advanced/langgraph.json +6 -0
- langgraph_cli/templates/advanced/pyproject.toml +20 -0
- langgraph_cli/templates/advanced/src/__init__.py +1 -0
- langgraph_cli/templates/advanced/src/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/__init__.py +1 -0
- langgraph_cli/templates/advanced/src/app/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/__pycache__/main.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/graph/__init__.py +1 -0
- langgraph_cli/templates/advanced/src/app/graph/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/graph/__pycache__/builder.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/graph/__pycache__/constants.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/graph/__pycache__/edges.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/graph/__pycache__/registry.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/graph/__pycache__/state.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/graph/builder.py +18 -0
- langgraph_cli/templates/advanced/src/app/graph/constants.py +20 -0
- langgraph_cli/templates/advanced/src/app/graph/edges.py +14 -0
- langgraph_cli/templates/advanced/src/app/graph/registry.py +8 -0
- langgraph_cli/templates/advanced/src/app/graph/state.py +9 -0
- langgraph_cli/templates/advanced/src/app/main.py +12 -0
- langgraph_cli/templates/advanced/src/app/nodes/__init__.py +1 -0
- langgraph_cli/templates/advanced/src/app/nodes/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/nodes/__pycache__/intent.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/nodes/__pycache__/output.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/nodes/__pycache__/processing.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/nodes/intent.py +9 -0
- langgraph_cli/templates/advanced/src/app/nodes/output.py +10 -0
- langgraph_cli/templates/advanced/src/app/nodes/processing.py +14 -0
- langgraph_cli/templates/advanced/src/app/prompts/__init__.py +1 -0
- langgraph_cli/templates/advanced/src/app/prompts/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/prompts/__pycache__/registry.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/prompts/registry.py +12 -0
- langgraph_cli/templates/advanced/src/app/services/__init__.py +1 -0
- langgraph_cli/templates/advanced/src/app/services/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/services/__pycache__/prompt_service.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/services/prompt_service.py +6 -0
- langgraph_cli/templates/advanced/src/app/utils/__init__.py +1 -0
- langgraph_cli/templates/advanced/src/app/utils/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/utils/__pycache__/logger.cpython-313.pyc +0 -0
- langgraph_cli/templates/advanced/src/app/utils/logger.py +6 -0
- langgraph_cli/templates/base/README.md +3 -0
- langgraph_cli/templates/base/langgraph.json +6 -0
- langgraph_cli/templates/base/pyproject.toml +20 -0
- langgraph_cli/templates/base/src/__init__.py +1 -0
- langgraph_cli/templates/base/src/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/base/src/app/__init__.py +1 -0
- langgraph_cli/templates/base/src/app/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/base/src/app/__pycache__/main.cpython-313.pyc +0 -0
- langgraph_cli/templates/base/src/app/__pycache__/nodes.cpython-313.pyc +0 -0
- langgraph_cli/templates/base/src/app/__pycache__/state.cpython-313.pyc +0 -0
- langgraph_cli/templates/base/src/app/main.py +27 -0
- langgraph_cli/templates/base/src/app/nodes.py +22 -0
- langgraph_cli/templates/base/src/app/state.py +7 -0
- langgraph_cli/templates/production/README.md +121 -0
- langgraph_cli/templates/production/langgraph.json +6 -0
- langgraph_cli/templates/production/pyproject.toml +20 -0
- langgraph_cli/templates/production/src/__init__.py +1 -0
- langgraph_cli/templates/production/src/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/__pycache__/config.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/__pycache__/main.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/api/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/api/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/api/__pycache__/app.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/api/__pycache__/routes.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/api/app.py +11 -0
- langgraph_cli/templates/production/src/app/api/routes.py +16 -0
- langgraph_cli/templates/production/src/app/config.py +37 -0
- langgraph_cli/templates/production/src/app/evaluation/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/evaluation/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/evaluation/__pycache__/evaluator.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/evaluation/__pycache__/scoring.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/evaluation/evaluator.py +12 -0
- langgraph_cli/templates/production/src/app/evaluation/scoring.py +2 -0
- langgraph_cli/templates/production/src/app/graph/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/graph/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/graph/__pycache__/builder.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/graph/__pycache__/constants.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/graph/__pycache__/edges.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/graph/__pycache__/registry.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/graph/__pycache__/state.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/graph/builder.py +20 -0
- langgraph_cli/templates/production/src/app/graph/constants.py +28 -0
- langgraph_cli/templates/production/src/app/graph/edges.py +21 -0
- langgraph_cli/templates/production/src/app/graph/registry.py +10 -0
- langgraph_cli/templates/production/src/app/graph/state.py +15 -0
- langgraph_cli/templates/production/src/app/main.py +27 -0
- langgraph_cli/templates/production/src/app/models/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/models/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/models/__pycache__/schema.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/models/__pycache__/workflow.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/models/schema.py +11 -0
- langgraph_cli/templates/production/src/app/models/workflow.py +30 -0
- langgraph_cli/templates/production/src/app/nodes/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/nodes/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/nodes/__pycache__/error.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/nodes/__pycache__/intent.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/nodes/__pycache__/output.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/nodes/__pycache__/processing.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/nodes/__pycache__/validation.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/nodes/error.py +7 -0
- langgraph_cli/templates/production/src/app/nodes/intent.py +21 -0
- langgraph_cli/templates/production/src/app/nodes/output.py +13 -0
- langgraph_cli/templates/production/src/app/nodes/processing.py +32 -0
- langgraph_cli/templates/production/src/app/nodes/validation.py +22 -0
- langgraph_cli/templates/production/src/app/observability/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/observability/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/observability/__pycache__/langsmith.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/observability/__pycache__/metrics.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/observability/langsmith.py +31 -0
- langgraph_cli/templates/production/src/app/observability/metrics.py +15 -0
- langgraph_cli/templates/production/src/app/prompts/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/prompts/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/prompts/__pycache__/registry.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/prompts/registry.py +20 -0
- langgraph_cli/templates/production/src/app/prompts/versions/extraction/v1.txt +1 -0
- langgraph_cli/templates/production/src/app/prompts/versions/intent/v1.txt +1 -0
- langgraph_cli/templates/production/src/app/prompts/versions/intent/v2.txt +1 -0
- langgraph_cli/templates/production/src/app/prompts/versions/validation/v1.txt +1 -0
- langgraph_cli/templates/production/src/app/services/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/services/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/services/__pycache__/evaluation_service.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/services/__pycache__/llm_service.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/services/__pycache__/prompt_service.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/services/__pycache__/tool_service.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/services/__pycache__/versioning_service.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/services/evaluation_service.py +19 -0
- langgraph_cli/templates/production/src/app/services/llm_service.py +56 -0
- langgraph_cli/templates/production/src/app/services/prompt_service.py +21 -0
- langgraph_cli/templates/production/src/app/services/tool_service.py +22 -0
- langgraph_cli/templates/production/src/app/services/versioning_service.py +12 -0
- langgraph_cli/templates/production/src/app/storage/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/storage/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/storage/__pycache__/cache.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/storage/__pycache__/prompt_store.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/storage/__pycache__/workflow_store.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/storage/cache.py +12 -0
- langgraph_cli/templates/production/src/app/storage/prompt_store.py +11 -0
- langgraph_cli/templates/production/src/app/storage/workflow_store.py +13 -0
- langgraph_cli/templates/production/src/app/tools/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/tools/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/tools/__pycache__/base.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/tools/__pycache__/registry.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/tools/api/__pycache__/http_tool.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/tools/api/http_tool.py +12 -0
- langgraph_cli/templates/production/src/app/tools/base.py +15 -0
- langgraph_cli/templates/production/src/app/tools/db/__pycache__/query_tool.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/tools/db/query_tool.py +12 -0
- langgraph_cli/templates/production/src/app/tools/rag/__pycache__/retriever_tool.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/tools/rag/retriever_tool.py +18 -0
- langgraph_cli/templates/production/src/app/tools/registry.py +26 -0
- langgraph_cli/templates/production/src/app/tools/utils/__pycache__/calculator_tool.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/tools/utils/calculator_tool.py +21 -0
- langgraph_cli/templates/production/src/app/utils/__init__.py +1 -0
- langgraph_cli/templates/production/src/app/utils/__pycache__/__init__.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/utils/__pycache__/logger.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/utils/__pycache__/parallel.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/utils/__pycache__/tracing.cpython-313.pyc +0 -0
- langgraph_cli/templates/production/src/app/utils/logger.py +33 -0
- langgraph_cli/templates/production/src/app/utils/parallel.py +9 -0
- langgraph_cli/templates/production/src/app/utils/tracing.py +16 -0
- langgraph_init_cli-0.1.0.dist-info/METADATA +383 -0
- langgraph_init_cli-0.1.0.dist-info/RECORD +172 -0
- langgraph_init_cli-0.1.0.dist-info/WHEEL +5 -0
- langgraph_init_cli-0.1.0.dist-info/entry_points.txt +2 -0
- langgraph_init_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
- langgraph_init_cli-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from src.app.config import settings
|
|
4
|
+
from src.app.graph.builder import build_graph
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def invoke_workflow(payload: dict[str, object]) -> dict[str, object]:
|
|
8
|
+
app = build_graph()
|
|
9
|
+
initial_state = {
|
|
10
|
+
"input_text": payload.get("input_text", ""),
|
|
11
|
+
"retry_count": 0,
|
|
12
|
+
"messages": [],
|
|
13
|
+
"prompt_versions": payload.get("prompt_versions", settings.default_prompt_versions.copy()),
|
|
14
|
+
"tool_results": {},
|
|
15
|
+
}
|
|
16
|
+
return app.invoke(initial_state)
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from dotenv import load_dotenv
|
|
8
|
+
|
|
9
|
+
load_dotenv()
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass(slots=True)
|
|
13
|
+
class Settings:
|
|
14
|
+
project_name: str = "{{PROJECT_NAME}}"
|
|
15
|
+
environment: str = os.getenv("APP_ENV", "development")
|
|
16
|
+
log_level: str = os.getenv("LOG_LEVEL", "INFO")
|
|
17
|
+
langsmith_tracing: bool = os.getenv("LANGSMITH_TRACING", "false").lower() == "true"
|
|
18
|
+
langsmith_endpoint: str = os.getenv("LANGSMITH_ENDPOINT", "https://api.smith.langchain.com")
|
|
19
|
+
langsmith_api_key: str = os.getenv("LANGSMITH_API_KEY", "")
|
|
20
|
+
langsmith_project: str = os.getenv("LANGSMITH_PROJECT", "{{PROJECT_NAME}}")
|
|
21
|
+
default_prompt_versions: dict[str, str] | None = None
|
|
22
|
+
max_validation_retries: int = 2
|
|
23
|
+
|
|
24
|
+
def __post_init__(self) -> None:
|
|
25
|
+
if self.default_prompt_versions is None:
|
|
26
|
+
self.default_prompt_versions = {
|
|
27
|
+
"intent": "v2",
|
|
28
|
+
"extraction": "v1",
|
|
29
|
+
"validation": "v1",
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
@property
|
|
33
|
+
def prompt_root(self) -> Path:
|
|
34
|
+
return Path(__file__).resolve().parent / "prompts" / "versions"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
settings = Settings()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Evaluation package."""
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class Evaluator:
|
|
5
|
+
def evaluate(self, extracted: dict[str, object], prompt: str) -> dict[str, object]:
|
|
6
|
+
populated = [value for value in extracted.values() if value not in ("", None, [], {})]
|
|
7
|
+
total_fields = max(len(extracted), 1)
|
|
8
|
+
coverage = len(populated) / total_fields
|
|
9
|
+
return {
|
|
10
|
+
"field_coverage": round(coverage, 2),
|
|
11
|
+
"checked_with_prompt": bool(prompt),
|
|
12
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Graph package."""
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from langgraph.graph import END, START, StateGraph
|
|
2
|
+
|
|
3
|
+
from src.app.graph.constants import Names
|
|
4
|
+
from src.app.graph.edges import EDGE_MAP, route_after_validation
|
|
5
|
+
from src.app.graph.registry import NODE_REGISTRY
|
|
6
|
+
from src.app.graph.state import WorkflowState
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def build_graph():
|
|
10
|
+
graph = StateGraph(WorkflowState)
|
|
11
|
+
for name, node in NODE_REGISTRY.items():
|
|
12
|
+
graph.add_node(name, node)
|
|
13
|
+
|
|
14
|
+
graph.add_edge(START, Names.INTENT)
|
|
15
|
+
graph.add_edge(Names.INTENT, Names.PROCESSING)
|
|
16
|
+
graph.add_edge(Names.PROCESSING, Names.VALIDATION)
|
|
17
|
+
graph.add_conditional_edges(Names.VALIDATION, route_after_validation, EDGE_MAP)
|
|
18
|
+
graph.add_edge(Names.OUTPUT, END)
|
|
19
|
+
graph.add_edge(Names.ERROR, END)
|
|
20
|
+
return graph.compile()
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from src.app.nodes.error import error_node
|
|
2
|
+
from src.app.nodes.intent import intent_node
|
|
3
|
+
from src.app.nodes.output import output_node
|
|
4
|
+
from src.app.nodes.processing import processing_node
|
|
5
|
+
from src.app.nodes.validation import validation_node
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Names:
|
|
9
|
+
INTENT = "intent"
|
|
10
|
+
PROCESSING = "processing"
|
|
11
|
+
VALIDATION = "validation"
|
|
12
|
+
OUTPUT = "output"
|
|
13
|
+
ERROR = "error"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Tags:
|
|
17
|
+
CONTINUE = "continue"
|
|
18
|
+
RETRY = "retry"
|
|
19
|
+
COMPLETE = "complete"
|
|
20
|
+
ERROR = "error"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class Nodes:
|
|
24
|
+
INTENT = intent_node
|
|
25
|
+
PROCESSING = processing_node
|
|
26
|
+
VALIDATION = validation_node
|
|
27
|
+
OUTPUT = output_node
|
|
28
|
+
ERROR = error_node
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from src.app.config import settings
|
|
2
|
+
from src.app.graph.constants import Names, Tags
|
|
3
|
+
from src.app.graph.state import WorkflowState
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def route_after_validation(state: WorkflowState) -> str:
|
|
7
|
+
validation = state.get("validation", {})
|
|
8
|
+
if state.get("error"):
|
|
9
|
+
return Tags.ERROR
|
|
10
|
+
if validation.get("is_valid"):
|
|
11
|
+
return Tags.COMPLETE
|
|
12
|
+
if state.get("retry_count", 0) < settings.max_validation_retries:
|
|
13
|
+
return Tags.RETRY
|
|
14
|
+
return Tags.ERROR
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
EDGE_MAP = {
|
|
18
|
+
Tags.COMPLETE: Names.OUTPUT,
|
|
19
|
+
Tags.RETRY: Names.PROCESSING,
|
|
20
|
+
Tags.ERROR: Names.ERROR,
|
|
21
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from typing import Any, TypedDict
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class WorkflowState(TypedDict, total=False):
|
|
5
|
+
input_text: str
|
|
6
|
+
intent: str
|
|
7
|
+
extracted_data: dict[str, Any]
|
|
8
|
+
validation: dict[str, Any]
|
|
9
|
+
output: str
|
|
10
|
+
error: str
|
|
11
|
+
confidence: float
|
|
12
|
+
retry_count: int
|
|
13
|
+
messages: list[str]
|
|
14
|
+
prompt_versions: dict[str, str]
|
|
15
|
+
tool_results: dict[str, Any]
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from src.app.config import settings
|
|
4
|
+
from src.app.graph.builder import build_graph
|
|
5
|
+
from src.app.observability.metrics import metrics
|
|
6
|
+
from src.app.utils.logger import get_logger
|
|
7
|
+
|
|
8
|
+
logger = get_logger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def run():
|
|
12
|
+
app = build_graph()
|
|
13
|
+
sample_state = {
|
|
14
|
+
"input_text": "Please calculate 12 + 7 and validate the answer.",
|
|
15
|
+
"retry_count": 0,
|
|
16
|
+
"messages": [],
|
|
17
|
+
"prompt_versions": settings.default_prompt_versions.copy(),
|
|
18
|
+
"tool_results": {},
|
|
19
|
+
}
|
|
20
|
+
result = app.invoke(sample_state)
|
|
21
|
+
logger.info("workflow_complete", extra={"output": result.get("output"), "metrics": metrics.snapshot()})
|
|
22
|
+
print(result.get("output", "No output produced."))
|
|
23
|
+
return result
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
if __name__ == "__main__":
|
|
27
|
+
run()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Models package."""
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass(slots=True)
|
|
7
|
+
class ExtractionSchema:
|
|
8
|
+
numbers: list[int] = field(default_factory=list)
|
|
9
|
+
prompt_used: str = ""
|
|
10
|
+
requires_tool: bool = False
|
|
11
|
+
text_length: int = 0
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass(slots=True)
|
|
7
|
+
class WorkflowResult:
|
|
8
|
+
intent: str
|
|
9
|
+
confidence: float
|
|
10
|
+
status: str
|
|
11
|
+
validation_reason: str
|
|
12
|
+
tool_results: dict[str, object] = field(default_factory=dict)
|
|
13
|
+
|
|
14
|
+
@classmethod
|
|
15
|
+
def from_state(cls, state):
|
|
16
|
+
validation = state.get("validation", {})
|
|
17
|
+
return cls(
|
|
18
|
+
intent=state.get("intent", "unknown"),
|
|
19
|
+
confidence=state.get("confidence", 0.0),
|
|
20
|
+
status="success" if validation.get("is_valid") else "failed",
|
|
21
|
+
validation_reason=validation.get("reason", "No validation result."),
|
|
22
|
+
tool_results=state.get("tool_results", {}),
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
def render(self) -> str:
|
|
26
|
+
return (
|
|
27
|
+
f"intent={self.intent}; confidence={self.confidence:.2f}; "
|
|
28
|
+
f"status={self.status}; validation={self.validation_reason}; "
|
|
29
|
+
f"tools={self.tool_results}"
|
|
30
|
+
)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Node package."""
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
from src.app.graph.state import WorkflowState
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def error_node(state: WorkflowState) -> WorkflowState:
|
|
5
|
+
validation = state.get("validation", {})
|
|
6
|
+
error = state.get("error") or validation.get("reason") or "Workflow failed validation."
|
|
7
|
+
return {"output": f"ERROR: {error}", "error": error}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from src.app.graph.state import WorkflowState
|
|
2
|
+
from src.app.services.llm_service import LLMService
|
|
3
|
+
from src.app.services.prompt_service import PromptService
|
|
4
|
+
from src.app.utils.logger import get_logger
|
|
5
|
+
|
|
6
|
+
logger = get_logger(__name__)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def intent_node(state: WorkflowState) -> WorkflowState:
|
|
10
|
+
prompt_service = PromptService()
|
|
11
|
+
llm_service = LLMService(prompt_service=prompt_service)
|
|
12
|
+
user_input = state.get("input_text", "")
|
|
13
|
+
version = state.get("prompt_versions", {}).get("intent")
|
|
14
|
+
prompt = prompt_service.load_prompt("intent", version=version)
|
|
15
|
+
intent_result = llm_service.classify_intent(prompt=prompt, text=user_input)
|
|
16
|
+
logger.info("intent_node", extra={"intent": intent_result["intent"], "confidence": intent_result["confidence"]})
|
|
17
|
+
return {
|
|
18
|
+
"intent": intent_result["intent"],
|
|
19
|
+
"confidence": intent_result["confidence"],
|
|
20
|
+
"messages": state.get("messages", []) + [f"intent:{intent_result['intent']}"],
|
|
21
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from src.app.graph.state import WorkflowState
|
|
2
|
+
from src.app.models.workflow import WorkflowResult
|
|
3
|
+
from src.app.storage.workflow_store import WorkflowStore
|
|
4
|
+
from src.app.utils.logger import get_logger
|
|
5
|
+
|
|
6
|
+
logger = get_logger(__name__)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def output_node(state: WorkflowState) -> WorkflowState:
|
|
10
|
+
result = WorkflowResult.from_state(state)
|
|
11
|
+
WorkflowStore().save(result)
|
|
12
|
+
logger.info("output_node", extra={"status": result.status})
|
|
13
|
+
return {"output": result.render()}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from src.app.graph.state import WorkflowState
|
|
2
|
+
from src.app.services.llm_service import LLMService
|
|
3
|
+
from src.app.services.prompt_service import PromptService
|
|
4
|
+
from src.app.services.tool_service import ToolService
|
|
5
|
+
from src.app.utils.logger import get_logger
|
|
6
|
+
|
|
7
|
+
logger = get_logger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def processing_node(state: WorkflowState) -> WorkflowState:
|
|
11
|
+
prompt_service = PromptService()
|
|
12
|
+
llm_service = LLMService(prompt_service=prompt_service)
|
|
13
|
+
tool_service = ToolService()
|
|
14
|
+
prompt_version = state.get("prompt_versions", {}).get("extraction")
|
|
15
|
+
prompt = prompt_service.load_prompt("extraction", version=prompt_version)
|
|
16
|
+
enrichment = llm_service.parallel_enrich(prompt=prompt, text=state.get("input_text", ""))
|
|
17
|
+
tool_payload = tool_service.execute_for_intent(
|
|
18
|
+
intent=state.get("intent", "general"),
|
|
19
|
+
text=state.get("input_text", ""),
|
|
20
|
+
extracted=enrichment["extraction"],
|
|
21
|
+
)
|
|
22
|
+
extracted_data = {
|
|
23
|
+
**enrichment["extraction"],
|
|
24
|
+
"analysis": enrichment["analysis"],
|
|
25
|
+
}
|
|
26
|
+
logger.info("processing_node", extra={"extracted_keys": list(extracted_data.keys()), "tool_count": len(tool_payload)})
|
|
27
|
+
return {
|
|
28
|
+
"extracted_data": extracted_data,
|
|
29
|
+
"tool_results": tool_payload,
|
|
30
|
+
"retry_count": state.get("retry_count", 0) + 1,
|
|
31
|
+
"messages": state.get("messages", []) + ["processed"],
|
|
32
|
+
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from src.app.graph.state import WorkflowState
|
|
2
|
+
from src.app.services.evaluation_service import EvaluationService
|
|
3
|
+
from src.app.services.prompt_service import PromptService
|
|
4
|
+
from src.app.utils.logger import get_logger
|
|
5
|
+
|
|
6
|
+
logger = get_logger(__name__)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def validation_node(state: WorkflowState) -> WorkflowState:
|
|
10
|
+
prompt_service = PromptService()
|
|
11
|
+
evaluation_service = EvaluationService()
|
|
12
|
+
validation_prompt = prompt_service.load_prompt(
|
|
13
|
+
"validation",
|
|
14
|
+
version=state.get("prompt_versions", {}).get("validation"),
|
|
15
|
+
)
|
|
16
|
+
validation = evaluation_service.validate(
|
|
17
|
+
extracted=state.get("extracted_data", {}),
|
|
18
|
+
confidence=state.get("confidence", 0.0),
|
|
19
|
+
prompt=validation_prompt,
|
|
20
|
+
)
|
|
21
|
+
logger.info("validation_node", extra={"validation": validation})
|
|
22
|
+
return {"validation": validation, "messages": state.get("messages", []) + ["validated"]}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Observability package."""
|
|
Binary file
|
langgraph_cli/templates/production/src/app/observability/__pycache__/langsmith.cpython-313.pyc
ADDED
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import functools
|
|
4
|
+
import os
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
|
|
7
|
+
from src.app.config import settings
|
|
8
|
+
from src.app.utils.logger import get_logger
|
|
9
|
+
|
|
10
|
+
logger = get_logger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def configure_langsmith() -> None:
|
|
14
|
+
os.environ.setdefault("LANGSMITH_TRACING", str(settings.langsmith_tracing).lower())
|
|
15
|
+
os.environ.setdefault("LANGSMITH_ENDPOINT", settings.langsmith_endpoint)
|
|
16
|
+
if settings.langsmith_api_key:
|
|
17
|
+
os.environ.setdefault("LANGSMITH_API_KEY", settings.langsmith_api_key)
|
|
18
|
+
os.environ.setdefault("LANGSMITH_PROJECT", settings.langsmith_project)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def traceable(name: str) -> Callable:
|
|
22
|
+
def decorator(func: Callable) -> Callable:
|
|
23
|
+
@functools.wraps(func)
|
|
24
|
+
def wrapper(*args, **kwargs):
|
|
25
|
+
configure_langsmith()
|
|
26
|
+
logger.info("langsmith_trace", extra={"name": name, "enabled": settings.langsmith_tracing})
|
|
27
|
+
return func(*args, **kwargs)
|
|
28
|
+
|
|
29
|
+
return wrapper
|
|
30
|
+
|
|
31
|
+
return decorator
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class Metrics:
|
|
5
|
+
def __init__(self) -> None:
|
|
6
|
+
self._counters: dict[str, int] = {}
|
|
7
|
+
|
|
8
|
+
def increment(self, name: str, amount: int = 1) -> None:
|
|
9
|
+
self._counters[name] = self._counters.get(name, 0) + amount
|
|
10
|
+
|
|
11
|
+
def snapshot(self) -> dict[str, int]:
|
|
12
|
+
return dict(self._counters)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
metrics = Metrics()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Prompts package."""
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from src.app.config import settings
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class PromptRegistry:
|
|
9
|
+
def __init__(self) -> None:
|
|
10
|
+
self.root = Path(settings.prompt_root)
|
|
11
|
+
|
|
12
|
+
def resolve_version(self, task: str, version: str | None = None) -> str:
|
|
13
|
+
return version or settings.default_prompt_versions.get(task, "v1")
|
|
14
|
+
|
|
15
|
+
def load(self, task: str, version: str | None = None) -> str:
|
|
16
|
+
selected = self.resolve_version(task, version)
|
|
17
|
+
prompt_path = self.root / task / f"{selected}.txt"
|
|
18
|
+
if not prompt_path.exists():
|
|
19
|
+
prompt_path = self.root / task / "v1.txt"
|
|
20
|
+
return prompt_path.read_text(encoding="utf-8").strip()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
Extract structured fields that can support downstream validation and tool execution.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
Classify the user request into one of: general, calculation, validation.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
Classify the request with strong preference for deterministic intents. Use calculation when math symbols or arithmetic verbs appear.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
Validate that extraction produced enough fields and confidence to produce a final answer.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Services package."""
|
|
Binary file
|
langgraph_cli/templates/production/src/app/services/__pycache__/evaluation_service.cpython-313.pyc
ADDED
|
Binary file
|
|
Binary file
|
langgraph_cli/templates/production/src/app/services/__pycache__/prompt_service.cpython-313.pyc
ADDED
|
Binary file
|
|
Binary file
|
langgraph_cli/templates/production/src/app/services/__pycache__/versioning_service.cpython-313.pyc
ADDED
|
Binary file
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from src.app.evaluation.evaluator import Evaluator
|
|
4
|
+
from src.app.evaluation.scoring import score_validation
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class EvaluationService:
|
|
8
|
+
def __init__(self) -> None:
|
|
9
|
+
self.evaluator = Evaluator()
|
|
10
|
+
|
|
11
|
+
def validate(self, extracted: dict[str, object], confidence: float, prompt: str) -> dict[str, object]:
|
|
12
|
+
evaluation = self.evaluator.evaluate(extracted=extracted, prompt=prompt)
|
|
13
|
+
evaluation["confidence_score"] = score_validation(field_coverage=evaluation["field_coverage"], confidence=confidence)
|
|
14
|
+
evaluation["is_valid"] = evaluation["field_coverage"] >= 0.5 and evaluation["confidence_score"] >= 0.5
|
|
15
|
+
if evaluation["is_valid"]:
|
|
16
|
+
evaluation["reason"] = "Validation passed."
|
|
17
|
+
else:
|
|
18
|
+
evaluation["reason"] = "Validation failed because coverage or confidence was too low."
|
|
19
|
+
return evaluation
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
|
|
6
|
+
from langchain_core.runnables import RunnableLambda, RunnableParallel
|
|
7
|
+
|
|
8
|
+
from src.app.observability.langsmith import traceable
|
|
9
|
+
from src.app.observability.metrics import metrics
|
|
10
|
+
from src.app.utils.logger import get_logger
|
|
11
|
+
|
|
12
|
+
logger = get_logger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass(slots=True)
|
|
16
|
+
class LLMService:
|
|
17
|
+
prompt_service: object
|
|
18
|
+
|
|
19
|
+
@traceable(name="classify_intent")
|
|
20
|
+
def classify_intent(self, prompt: str, text: str) -> dict[str, object]:
|
|
21
|
+
lowered = text.lower()
|
|
22
|
+
intent = "general"
|
|
23
|
+
confidence = 0.55
|
|
24
|
+
if any(token in lowered for token in ("calculate", "+", "-", "*", "/")):
|
|
25
|
+
intent = "calculation"
|
|
26
|
+
confidence = 0.93
|
|
27
|
+
elif "validate" in lowered:
|
|
28
|
+
intent = "validation"
|
|
29
|
+
confidence = 0.8
|
|
30
|
+
metrics.increment("llm.classify_intent.calls")
|
|
31
|
+
logger.info("classify_intent", extra={"prompt_preview": prompt[:40], "intent": intent})
|
|
32
|
+
return {"intent": intent, "confidence": confidence}
|
|
33
|
+
|
|
34
|
+
@traceable(name="parallel_enrich")
|
|
35
|
+
def parallel_enrich(self, prompt: str, text: str) -> dict[str, dict[str, object]]:
|
|
36
|
+
extractor = RunnableLambda(lambda payload: self._extract_fields(prompt=payload["prompt"], text=payload["text"]))
|
|
37
|
+
analyzer = RunnableLambda(lambda payload: self._derive_analysis(text=payload["text"]))
|
|
38
|
+
pipeline = RunnableParallel(extraction=extractor, analysis=analyzer)
|
|
39
|
+
metrics.increment("llm.parallel_enrich.calls")
|
|
40
|
+
return pipeline.invoke({"prompt": prompt, "text": text})
|
|
41
|
+
|
|
42
|
+
def _extract_fields(self, prompt: str, text: str) -> dict[str, object]:
|
|
43
|
+
numbers = [int(match) for match in re.findall(r"-?\d+", text)]
|
|
44
|
+
return {
|
|
45
|
+
"prompt_used": prompt.splitlines()[0] if prompt else "missing-prompt",
|
|
46
|
+
"numbers": numbers,
|
|
47
|
+
"requires_tool": bool(numbers),
|
|
48
|
+
"text_length": len(text),
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
def _derive_analysis(self, text: str) -> dict[str, object]:
|
|
52
|
+
lowered = text.lower()
|
|
53
|
+
return {
|
|
54
|
+
"contains_validation": "validate" in lowered,
|
|
55
|
+
"contains_math": any(token in lowered for token in ("calculate", "+", "-", "*", "/")),
|
|
56
|
+
}
|