langgraph-init-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (172) hide show
  1. langgraph_cli/__init__.py +5 -0
  2. langgraph_cli/cli.py +42 -0
  3. langgraph_cli/generator.py +47 -0
  4. langgraph_cli/templates/advanced/README.md +3 -0
  5. langgraph_cli/templates/advanced/langgraph.json +6 -0
  6. langgraph_cli/templates/advanced/pyproject.toml +20 -0
  7. langgraph_cli/templates/advanced/src/__init__.py +1 -0
  8. langgraph_cli/templates/advanced/src/__pycache__/__init__.cpython-313.pyc +0 -0
  9. langgraph_cli/templates/advanced/src/app/__init__.py +1 -0
  10. langgraph_cli/templates/advanced/src/app/__pycache__/__init__.cpython-313.pyc +0 -0
  11. langgraph_cli/templates/advanced/src/app/__pycache__/main.cpython-313.pyc +0 -0
  12. langgraph_cli/templates/advanced/src/app/graph/__init__.py +1 -0
  13. langgraph_cli/templates/advanced/src/app/graph/__pycache__/__init__.cpython-313.pyc +0 -0
  14. langgraph_cli/templates/advanced/src/app/graph/__pycache__/builder.cpython-313.pyc +0 -0
  15. langgraph_cli/templates/advanced/src/app/graph/__pycache__/constants.cpython-313.pyc +0 -0
  16. langgraph_cli/templates/advanced/src/app/graph/__pycache__/edges.cpython-313.pyc +0 -0
  17. langgraph_cli/templates/advanced/src/app/graph/__pycache__/registry.cpython-313.pyc +0 -0
  18. langgraph_cli/templates/advanced/src/app/graph/__pycache__/state.cpython-313.pyc +0 -0
  19. langgraph_cli/templates/advanced/src/app/graph/builder.py +18 -0
  20. langgraph_cli/templates/advanced/src/app/graph/constants.py +20 -0
  21. langgraph_cli/templates/advanced/src/app/graph/edges.py +14 -0
  22. langgraph_cli/templates/advanced/src/app/graph/registry.py +8 -0
  23. langgraph_cli/templates/advanced/src/app/graph/state.py +9 -0
  24. langgraph_cli/templates/advanced/src/app/main.py +12 -0
  25. langgraph_cli/templates/advanced/src/app/nodes/__init__.py +1 -0
  26. langgraph_cli/templates/advanced/src/app/nodes/__pycache__/__init__.cpython-313.pyc +0 -0
  27. langgraph_cli/templates/advanced/src/app/nodes/__pycache__/intent.cpython-313.pyc +0 -0
  28. langgraph_cli/templates/advanced/src/app/nodes/__pycache__/output.cpython-313.pyc +0 -0
  29. langgraph_cli/templates/advanced/src/app/nodes/__pycache__/processing.cpython-313.pyc +0 -0
  30. langgraph_cli/templates/advanced/src/app/nodes/intent.py +9 -0
  31. langgraph_cli/templates/advanced/src/app/nodes/output.py +10 -0
  32. langgraph_cli/templates/advanced/src/app/nodes/processing.py +14 -0
  33. langgraph_cli/templates/advanced/src/app/prompts/__init__.py +1 -0
  34. langgraph_cli/templates/advanced/src/app/prompts/__pycache__/__init__.cpython-313.pyc +0 -0
  35. langgraph_cli/templates/advanced/src/app/prompts/__pycache__/registry.cpython-313.pyc +0 -0
  36. langgraph_cli/templates/advanced/src/app/prompts/registry.py +12 -0
  37. langgraph_cli/templates/advanced/src/app/services/__init__.py +1 -0
  38. langgraph_cli/templates/advanced/src/app/services/__pycache__/__init__.cpython-313.pyc +0 -0
  39. langgraph_cli/templates/advanced/src/app/services/__pycache__/prompt_service.cpython-313.pyc +0 -0
  40. langgraph_cli/templates/advanced/src/app/services/prompt_service.py +6 -0
  41. langgraph_cli/templates/advanced/src/app/utils/__init__.py +1 -0
  42. langgraph_cli/templates/advanced/src/app/utils/__pycache__/__init__.cpython-313.pyc +0 -0
  43. langgraph_cli/templates/advanced/src/app/utils/__pycache__/logger.cpython-313.pyc +0 -0
  44. langgraph_cli/templates/advanced/src/app/utils/logger.py +6 -0
  45. langgraph_cli/templates/base/README.md +3 -0
  46. langgraph_cli/templates/base/langgraph.json +6 -0
  47. langgraph_cli/templates/base/pyproject.toml +20 -0
  48. langgraph_cli/templates/base/src/__init__.py +1 -0
  49. langgraph_cli/templates/base/src/__pycache__/__init__.cpython-313.pyc +0 -0
  50. langgraph_cli/templates/base/src/app/__init__.py +1 -0
  51. langgraph_cli/templates/base/src/app/__pycache__/__init__.cpython-313.pyc +0 -0
  52. langgraph_cli/templates/base/src/app/__pycache__/main.cpython-313.pyc +0 -0
  53. langgraph_cli/templates/base/src/app/__pycache__/nodes.cpython-313.pyc +0 -0
  54. langgraph_cli/templates/base/src/app/__pycache__/state.cpython-313.pyc +0 -0
  55. langgraph_cli/templates/base/src/app/main.py +27 -0
  56. langgraph_cli/templates/base/src/app/nodes.py +22 -0
  57. langgraph_cli/templates/base/src/app/state.py +7 -0
  58. langgraph_cli/templates/production/README.md +121 -0
  59. langgraph_cli/templates/production/langgraph.json +6 -0
  60. langgraph_cli/templates/production/pyproject.toml +20 -0
  61. langgraph_cli/templates/production/src/__init__.py +1 -0
  62. langgraph_cli/templates/production/src/__pycache__/__init__.cpython-313.pyc +0 -0
  63. langgraph_cli/templates/production/src/app/__init__.py +1 -0
  64. langgraph_cli/templates/production/src/app/__pycache__/__init__.cpython-313.pyc +0 -0
  65. langgraph_cli/templates/production/src/app/__pycache__/config.cpython-313.pyc +0 -0
  66. langgraph_cli/templates/production/src/app/__pycache__/main.cpython-313.pyc +0 -0
  67. langgraph_cli/templates/production/src/app/api/__init__.py +1 -0
  68. langgraph_cli/templates/production/src/app/api/__pycache__/__init__.cpython-313.pyc +0 -0
  69. langgraph_cli/templates/production/src/app/api/__pycache__/app.cpython-313.pyc +0 -0
  70. langgraph_cli/templates/production/src/app/api/__pycache__/routes.cpython-313.pyc +0 -0
  71. langgraph_cli/templates/production/src/app/api/app.py +11 -0
  72. langgraph_cli/templates/production/src/app/api/routes.py +16 -0
  73. langgraph_cli/templates/production/src/app/config.py +37 -0
  74. langgraph_cli/templates/production/src/app/evaluation/__init__.py +1 -0
  75. langgraph_cli/templates/production/src/app/evaluation/__pycache__/__init__.cpython-313.pyc +0 -0
  76. langgraph_cli/templates/production/src/app/evaluation/__pycache__/evaluator.cpython-313.pyc +0 -0
  77. langgraph_cli/templates/production/src/app/evaluation/__pycache__/scoring.cpython-313.pyc +0 -0
  78. langgraph_cli/templates/production/src/app/evaluation/evaluator.py +12 -0
  79. langgraph_cli/templates/production/src/app/evaluation/scoring.py +2 -0
  80. langgraph_cli/templates/production/src/app/graph/__init__.py +1 -0
  81. langgraph_cli/templates/production/src/app/graph/__pycache__/__init__.cpython-313.pyc +0 -0
  82. langgraph_cli/templates/production/src/app/graph/__pycache__/builder.cpython-313.pyc +0 -0
  83. langgraph_cli/templates/production/src/app/graph/__pycache__/constants.cpython-313.pyc +0 -0
  84. langgraph_cli/templates/production/src/app/graph/__pycache__/edges.cpython-313.pyc +0 -0
  85. langgraph_cli/templates/production/src/app/graph/__pycache__/registry.cpython-313.pyc +0 -0
  86. langgraph_cli/templates/production/src/app/graph/__pycache__/state.cpython-313.pyc +0 -0
  87. langgraph_cli/templates/production/src/app/graph/builder.py +20 -0
  88. langgraph_cli/templates/production/src/app/graph/constants.py +28 -0
  89. langgraph_cli/templates/production/src/app/graph/edges.py +21 -0
  90. langgraph_cli/templates/production/src/app/graph/registry.py +10 -0
  91. langgraph_cli/templates/production/src/app/graph/state.py +15 -0
  92. langgraph_cli/templates/production/src/app/main.py +27 -0
  93. langgraph_cli/templates/production/src/app/models/__init__.py +1 -0
  94. langgraph_cli/templates/production/src/app/models/__pycache__/__init__.cpython-313.pyc +0 -0
  95. langgraph_cli/templates/production/src/app/models/__pycache__/schema.cpython-313.pyc +0 -0
  96. langgraph_cli/templates/production/src/app/models/__pycache__/workflow.cpython-313.pyc +0 -0
  97. langgraph_cli/templates/production/src/app/models/schema.py +11 -0
  98. langgraph_cli/templates/production/src/app/models/workflow.py +30 -0
  99. langgraph_cli/templates/production/src/app/nodes/__init__.py +1 -0
  100. langgraph_cli/templates/production/src/app/nodes/__pycache__/__init__.cpython-313.pyc +0 -0
  101. langgraph_cli/templates/production/src/app/nodes/__pycache__/error.cpython-313.pyc +0 -0
  102. langgraph_cli/templates/production/src/app/nodes/__pycache__/intent.cpython-313.pyc +0 -0
  103. langgraph_cli/templates/production/src/app/nodes/__pycache__/output.cpython-313.pyc +0 -0
  104. langgraph_cli/templates/production/src/app/nodes/__pycache__/processing.cpython-313.pyc +0 -0
  105. langgraph_cli/templates/production/src/app/nodes/__pycache__/validation.cpython-313.pyc +0 -0
  106. langgraph_cli/templates/production/src/app/nodes/error.py +7 -0
  107. langgraph_cli/templates/production/src/app/nodes/intent.py +21 -0
  108. langgraph_cli/templates/production/src/app/nodes/output.py +13 -0
  109. langgraph_cli/templates/production/src/app/nodes/processing.py +32 -0
  110. langgraph_cli/templates/production/src/app/nodes/validation.py +22 -0
  111. langgraph_cli/templates/production/src/app/observability/__init__.py +1 -0
  112. langgraph_cli/templates/production/src/app/observability/__pycache__/__init__.cpython-313.pyc +0 -0
  113. langgraph_cli/templates/production/src/app/observability/__pycache__/langsmith.cpython-313.pyc +0 -0
  114. langgraph_cli/templates/production/src/app/observability/__pycache__/metrics.cpython-313.pyc +0 -0
  115. langgraph_cli/templates/production/src/app/observability/langsmith.py +31 -0
  116. langgraph_cli/templates/production/src/app/observability/metrics.py +15 -0
  117. langgraph_cli/templates/production/src/app/prompts/__init__.py +1 -0
  118. langgraph_cli/templates/production/src/app/prompts/__pycache__/__init__.cpython-313.pyc +0 -0
  119. langgraph_cli/templates/production/src/app/prompts/__pycache__/registry.cpython-313.pyc +0 -0
  120. langgraph_cli/templates/production/src/app/prompts/registry.py +20 -0
  121. langgraph_cli/templates/production/src/app/prompts/versions/extraction/v1.txt +1 -0
  122. langgraph_cli/templates/production/src/app/prompts/versions/intent/v1.txt +1 -0
  123. langgraph_cli/templates/production/src/app/prompts/versions/intent/v2.txt +1 -0
  124. langgraph_cli/templates/production/src/app/prompts/versions/validation/v1.txt +1 -0
  125. langgraph_cli/templates/production/src/app/services/__init__.py +1 -0
  126. langgraph_cli/templates/production/src/app/services/__pycache__/__init__.cpython-313.pyc +0 -0
  127. langgraph_cli/templates/production/src/app/services/__pycache__/evaluation_service.cpython-313.pyc +0 -0
  128. langgraph_cli/templates/production/src/app/services/__pycache__/llm_service.cpython-313.pyc +0 -0
  129. langgraph_cli/templates/production/src/app/services/__pycache__/prompt_service.cpython-313.pyc +0 -0
  130. langgraph_cli/templates/production/src/app/services/__pycache__/tool_service.cpython-313.pyc +0 -0
  131. langgraph_cli/templates/production/src/app/services/__pycache__/versioning_service.cpython-313.pyc +0 -0
  132. langgraph_cli/templates/production/src/app/services/evaluation_service.py +19 -0
  133. langgraph_cli/templates/production/src/app/services/llm_service.py +56 -0
  134. langgraph_cli/templates/production/src/app/services/prompt_service.py +21 -0
  135. langgraph_cli/templates/production/src/app/services/tool_service.py +22 -0
  136. langgraph_cli/templates/production/src/app/services/versioning_service.py +12 -0
  137. langgraph_cli/templates/production/src/app/storage/__init__.py +1 -0
  138. langgraph_cli/templates/production/src/app/storage/__pycache__/__init__.cpython-313.pyc +0 -0
  139. langgraph_cli/templates/production/src/app/storage/__pycache__/cache.cpython-313.pyc +0 -0
  140. langgraph_cli/templates/production/src/app/storage/__pycache__/prompt_store.cpython-313.pyc +0 -0
  141. langgraph_cli/templates/production/src/app/storage/__pycache__/workflow_store.cpython-313.pyc +0 -0
  142. langgraph_cli/templates/production/src/app/storage/cache.py +12 -0
  143. langgraph_cli/templates/production/src/app/storage/prompt_store.py +11 -0
  144. langgraph_cli/templates/production/src/app/storage/workflow_store.py +13 -0
  145. langgraph_cli/templates/production/src/app/tools/__init__.py +1 -0
  146. langgraph_cli/templates/production/src/app/tools/__pycache__/__init__.cpython-313.pyc +0 -0
  147. langgraph_cli/templates/production/src/app/tools/__pycache__/base.cpython-313.pyc +0 -0
  148. langgraph_cli/templates/production/src/app/tools/__pycache__/registry.cpython-313.pyc +0 -0
  149. langgraph_cli/templates/production/src/app/tools/api/__pycache__/http_tool.cpython-313.pyc +0 -0
  150. langgraph_cli/templates/production/src/app/tools/api/http_tool.py +12 -0
  151. langgraph_cli/templates/production/src/app/tools/base.py +15 -0
  152. langgraph_cli/templates/production/src/app/tools/db/__pycache__/query_tool.cpython-313.pyc +0 -0
  153. langgraph_cli/templates/production/src/app/tools/db/query_tool.py +12 -0
  154. langgraph_cli/templates/production/src/app/tools/rag/__pycache__/retriever_tool.cpython-313.pyc +0 -0
  155. langgraph_cli/templates/production/src/app/tools/rag/retriever_tool.py +18 -0
  156. langgraph_cli/templates/production/src/app/tools/registry.py +26 -0
  157. langgraph_cli/templates/production/src/app/tools/utils/__pycache__/calculator_tool.cpython-313.pyc +0 -0
  158. langgraph_cli/templates/production/src/app/tools/utils/calculator_tool.py +21 -0
  159. langgraph_cli/templates/production/src/app/utils/__init__.py +1 -0
  160. langgraph_cli/templates/production/src/app/utils/__pycache__/__init__.cpython-313.pyc +0 -0
  161. langgraph_cli/templates/production/src/app/utils/__pycache__/logger.cpython-313.pyc +0 -0
  162. langgraph_cli/templates/production/src/app/utils/__pycache__/parallel.cpython-313.pyc +0 -0
  163. langgraph_cli/templates/production/src/app/utils/__pycache__/tracing.cpython-313.pyc +0 -0
  164. langgraph_cli/templates/production/src/app/utils/logger.py +33 -0
  165. langgraph_cli/templates/production/src/app/utils/parallel.py +9 -0
  166. langgraph_cli/templates/production/src/app/utils/tracing.py +16 -0
  167. langgraph_init_cli-0.1.0.dist-info/METADATA +383 -0
  168. langgraph_init_cli-0.1.0.dist-info/RECORD +172 -0
  169. langgraph_init_cli-0.1.0.dist-info/WHEEL +5 -0
  170. langgraph_init_cli-0.1.0.dist-info/entry_points.txt +2 -0
  171. langgraph_init_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
  172. langgraph_init_cli-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,11 @@
1
+ from __future__ import annotations
2
+
3
+ from src.app.api.routes import invoke_workflow
4
+
5
+
6
+ class Application:
7
+ def handle(self, input_text: str) -> dict[str, object]:
8
+ return invoke_workflow({"input_text": input_text})
9
+
10
+
11
+ app = Application()
@@ -0,0 +1,16 @@
1
+ from __future__ import annotations
2
+
3
+ from src.app.config import settings
4
+ from src.app.graph.builder import build_graph
5
+
6
+
7
+ def invoke_workflow(payload: dict[str, object]) -> dict[str, object]:
8
+ app = build_graph()
9
+ initial_state = {
10
+ "input_text": payload.get("input_text", ""),
11
+ "retry_count": 0,
12
+ "messages": [],
13
+ "prompt_versions": payload.get("prompt_versions", settings.default_prompt_versions.copy()),
14
+ "tool_results": {},
15
+ }
16
+ return app.invoke(initial_state)
@@ -0,0 +1,37 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv()
10
+
11
+
12
+ @dataclass(slots=True)
13
+ class Settings:
14
+ project_name: str = "{{PROJECT_NAME}}"
15
+ environment: str = os.getenv("APP_ENV", "development")
16
+ log_level: str = os.getenv("LOG_LEVEL", "INFO")
17
+ langsmith_tracing: bool = os.getenv("LANGSMITH_TRACING", "false").lower() == "true"
18
+ langsmith_endpoint: str = os.getenv("LANGSMITH_ENDPOINT", "https://api.smith.langchain.com")
19
+ langsmith_api_key: str = os.getenv("LANGSMITH_API_KEY", "")
20
+ langsmith_project: str = os.getenv("LANGSMITH_PROJECT", "{{PROJECT_NAME}}")
21
+ default_prompt_versions: dict[str, str] | None = None
22
+ max_validation_retries: int = 2
23
+
24
+ def __post_init__(self) -> None:
25
+ if self.default_prompt_versions is None:
26
+ self.default_prompt_versions = {
27
+ "intent": "v2",
28
+ "extraction": "v1",
29
+ "validation": "v1",
30
+ }
31
+
32
+ @property
33
+ def prompt_root(self) -> Path:
34
+ return Path(__file__).resolve().parent / "prompts" / "versions"
35
+
36
+
37
+ settings = Settings()
@@ -0,0 +1 @@
1
+ """Evaluation package."""
@@ -0,0 +1,12 @@
1
+ from __future__ import annotations
2
+
3
+
4
+ class Evaluator:
5
+ def evaluate(self, extracted: dict[str, object], prompt: str) -> dict[str, object]:
6
+ populated = [value for value in extracted.values() if value not in ("", None, [], {})]
7
+ total_fields = max(len(extracted), 1)
8
+ coverage = len(populated) / total_fields
9
+ return {
10
+ "field_coverage": round(coverage, 2),
11
+ "checked_with_prompt": bool(prompt),
12
+ }
@@ -0,0 +1,2 @@
1
+ def score_validation(field_coverage: float, confidence: float) -> float:
2
+ return round((field_coverage * 0.6) + (confidence * 0.4), 2)
@@ -0,0 +1 @@
1
+ """Graph package."""
@@ -0,0 +1,20 @@
1
+ from langgraph.graph import END, START, StateGraph
2
+
3
+ from src.app.graph.constants import Names
4
+ from src.app.graph.edges import EDGE_MAP, route_after_validation
5
+ from src.app.graph.registry import NODE_REGISTRY
6
+ from src.app.graph.state import WorkflowState
7
+
8
+
9
+ def build_graph():
10
+ graph = StateGraph(WorkflowState)
11
+ for name, node in NODE_REGISTRY.items():
12
+ graph.add_node(name, node)
13
+
14
+ graph.add_edge(START, Names.INTENT)
15
+ graph.add_edge(Names.INTENT, Names.PROCESSING)
16
+ graph.add_edge(Names.PROCESSING, Names.VALIDATION)
17
+ graph.add_conditional_edges(Names.VALIDATION, route_after_validation, EDGE_MAP)
18
+ graph.add_edge(Names.OUTPUT, END)
19
+ graph.add_edge(Names.ERROR, END)
20
+ return graph.compile()
@@ -0,0 +1,28 @@
1
+ from src.app.nodes.error import error_node
2
+ from src.app.nodes.intent import intent_node
3
+ from src.app.nodes.output import output_node
4
+ from src.app.nodes.processing import processing_node
5
+ from src.app.nodes.validation import validation_node
6
+
7
+
8
+ class Names:
9
+ INTENT = "intent"
10
+ PROCESSING = "processing"
11
+ VALIDATION = "validation"
12
+ OUTPUT = "output"
13
+ ERROR = "error"
14
+
15
+
16
+ class Tags:
17
+ CONTINUE = "continue"
18
+ RETRY = "retry"
19
+ COMPLETE = "complete"
20
+ ERROR = "error"
21
+
22
+
23
+ class Nodes:
24
+ INTENT = intent_node
25
+ PROCESSING = processing_node
26
+ VALIDATION = validation_node
27
+ OUTPUT = output_node
28
+ ERROR = error_node
@@ -0,0 +1,21 @@
1
+ from src.app.config import settings
2
+ from src.app.graph.constants import Names, Tags
3
+ from src.app.graph.state import WorkflowState
4
+
5
+
6
+ def route_after_validation(state: WorkflowState) -> str:
7
+ validation = state.get("validation", {})
8
+ if state.get("error"):
9
+ return Tags.ERROR
10
+ if validation.get("is_valid"):
11
+ return Tags.COMPLETE
12
+ if state.get("retry_count", 0) < settings.max_validation_retries:
13
+ return Tags.RETRY
14
+ return Tags.ERROR
15
+
16
+
17
+ EDGE_MAP = {
18
+ Tags.COMPLETE: Names.OUTPUT,
19
+ Tags.RETRY: Names.PROCESSING,
20
+ Tags.ERROR: Names.ERROR,
21
+ }
@@ -0,0 +1,10 @@
1
+ from src.app.graph.constants import Names, Nodes
2
+
3
+
4
+ NODE_REGISTRY = {
5
+ Names.INTENT: Nodes.INTENT,
6
+ Names.PROCESSING: Nodes.PROCESSING,
7
+ Names.VALIDATION: Nodes.VALIDATION,
8
+ Names.OUTPUT: Nodes.OUTPUT,
9
+ Names.ERROR: Nodes.ERROR,
10
+ }
@@ -0,0 +1,15 @@
1
+ from typing import Any, TypedDict
2
+
3
+
4
+ class WorkflowState(TypedDict, total=False):
5
+ input_text: str
6
+ intent: str
7
+ extracted_data: dict[str, Any]
8
+ validation: dict[str, Any]
9
+ output: str
10
+ error: str
11
+ confidence: float
12
+ retry_count: int
13
+ messages: list[str]
14
+ prompt_versions: dict[str, str]
15
+ tool_results: dict[str, Any]
@@ -0,0 +1,27 @@
1
+ from __future__ import annotations
2
+
3
+ from src.app.config import settings
4
+ from src.app.graph.builder import build_graph
5
+ from src.app.observability.metrics import metrics
6
+ from src.app.utils.logger import get_logger
7
+
8
+ logger = get_logger(__name__)
9
+
10
+
11
+ def run():
12
+ app = build_graph()
13
+ sample_state = {
14
+ "input_text": "Please calculate 12 + 7 and validate the answer.",
15
+ "retry_count": 0,
16
+ "messages": [],
17
+ "prompt_versions": settings.default_prompt_versions.copy(),
18
+ "tool_results": {},
19
+ }
20
+ result = app.invoke(sample_state)
21
+ logger.info("workflow_complete", extra={"output": result.get("output"), "metrics": metrics.snapshot()})
22
+ print(result.get("output", "No output produced."))
23
+ return result
24
+
25
+
26
+ if __name__ == "__main__":
27
+ run()
@@ -0,0 +1 @@
1
+ """Models package."""
@@ -0,0 +1,11 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+
5
+
6
+ @dataclass(slots=True)
7
+ class ExtractionSchema:
8
+ numbers: list[int] = field(default_factory=list)
9
+ prompt_used: str = ""
10
+ requires_tool: bool = False
11
+ text_length: int = 0
@@ -0,0 +1,30 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+
5
+
6
+ @dataclass(slots=True)
7
+ class WorkflowResult:
8
+ intent: str
9
+ confidence: float
10
+ status: str
11
+ validation_reason: str
12
+ tool_results: dict[str, object] = field(default_factory=dict)
13
+
14
+ @classmethod
15
+ def from_state(cls, state):
16
+ validation = state.get("validation", {})
17
+ return cls(
18
+ intent=state.get("intent", "unknown"),
19
+ confidence=state.get("confidence", 0.0),
20
+ status="success" if validation.get("is_valid") else "failed",
21
+ validation_reason=validation.get("reason", "No validation result."),
22
+ tool_results=state.get("tool_results", {}),
23
+ )
24
+
25
+ def render(self) -> str:
26
+ return (
27
+ f"intent={self.intent}; confidence={self.confidence:.2f}; "
28
+ f"status={self.status}; validation={self.validation_reason}; "
29
+ f"tools={self.tool_results}"
30
+ )
@@ -0,0 +1 @@
1
+ """Node package."""
@@ -0,0 +1,7 @@
1
+ from src.app.graph.state import WorkflowState
2
+
3
+
4
+ def error_node(state: WorkflowState) -> WorkflowState:
5
+ validation = state.get("validation", {})
6
+ error = state.get("error") or validation.get("reason") or "Workflow failed validation."
7
+ return {"output": f"ERROR: {error}", "error": error}
@@ -0,0 +1,21 @@
1
+ from src.app.graph.state import WorkflowState
2
+ from src.app.services.llm_service import LLMService
3
+ from src.app.services.prompt_service import PromptService
4
+ from src.app.utils.logger import get_logger
5
+
6
+ logger = get_logger(__name__)
7
+
8
+
9
+ def intent_node(state: WorkflowState) -> WorkflowState:
10
+ prompt_service = PromptService()
11
+ llm_service = LLMService(prompt_service=prompt_service)
12
+ user_input = state.get("input_text", "")
13
+ version = state.get("prompt_versions", {}).get("intent")
14
+ prompt = prompt_service.load_prompt("intent", version=version)
15
+ intent_result = llm_service.classify_intent(prompt=prompt, text=user_input)
16
+ logger.info("intent_node", extra={"intent": intent_result["intent"], "confidence": intent_result["confidence"]})
17
+ return {
18
+ "intent": intent_result["intent"],
19
+ "confidence": intent_result["confidence"],
20
+ "messages": state.get("messages", []) + [f"intent:{intent_result['intent']}"],
21
+ }
@@ -0,0 +1,13 @@
1
+ from src.app.graph.state import WorkflowState
2
+ from src.app.models.workflow import WorkflowResult
3
+ from src.app.storage.workflow_store import WorkflowStore
4
+ from src.app.utils.logger import get_logger
5
+
6
+ logger = get_logger(__name__)
7
+
8
+
9
+ def output_node(state: WorkflowState) -> WorkflowState:
10
+ result = WorkflowResult.from_state(state)
11
+ WorkflowStore().save(result)
12
+ logger.info("output_node", extra={"status": result.status})
13
+ return {"output": result.render()}
@@ -0,0 +1,32 @@
1
+ from src.app.graph.state import WorkflowState
2
+ from src.app.services.llm_service import LLMService
3
+ from src.app.services.prompt_service import PromptService
4
+ from src.app.services.tool_service import ToolService
5
+ from src.app.utils.logger import get_logger
6
+
7
+ logger = get_logger(__name__)
8
+
9
+
10
+ def processing_node(state: WorkflowState) -> WorkflowState:
11
+ prompt_service = PromptService()
12
+ llm_service = LLMService(prompt_service=prompt_service)
13
+ tool_service = ToolService()
14
+ prompt_version = state.get("prompt_versions", {}).get("extraction")
15
+ prompt = prompt_service.load_prompt("extraction", version=prompt_version)
16
+ enrichment = llm_service.parallel_enrich(prompt=prompt, text=state.get("input_text", ""))
17
+ tool_payload = tool_service.execute_for_intent(
18
+ intent=state.get("intent", "general"),
19
+ text=state.get("input_text", ""),
20
+ extracted=enrichment["extraction"],
21
+ )
22
+ extracted_data = {
23
+ **enrichment["extraction"],
24
+ "analysis": enrichment["analysis"],
25
+ }
26
+ logger.info("processing_node", extra={"extracted_keys": list(extracted_data.keys()), "tool_count": len(tool_payload)})
27
+ return {
28
+ "extracted_data": extracted_data,
29
+ "tool_results": tool_payload,
30
+ "retry_count": state.get("retry_count", 0) + 1,
31
+ "messages": state.get("messages", []) + ["processed"],
32
+ }
@@ -0,0 +1,22 @@
1
+ from src.app.graph.state import WorkflowState
2
+ from src.app.services.evaluation_service import EvaluationService
3
+ from src.app.services.prompt_service import PromptService
4
+ from src.app.utils.logger import get_logger
5
+
6
+ logger = get_logger(__name__)
7
+
8
+
9
+ def validation_node(state: WorkflowState) -> WorkflowState:
10
+ prompt_service = PromptService()
11
+ evaluation_service = EvaluationService()
12
+ validation_prompt = prompt_service.load_prompt(
13
+ "validation",
14
+ version=state.get("prompt_versions", {}).get("validation"),
15
+ )
16
+ validation = evaluation_service.validate(
17
+ extracted=state.get("extracted_data", {}),
18
+ confidence=state.get("confidence", 0.0),
19
+ prompt=validation_prompt,
20
+ )
21
+ logger.info("validation_node", extra={"validation": validation})
22
+ return {"validation": validation, "messages": state.get("messages", []) + ["validated"]}
@@ -0,0 +1 @@
1
+ """Observability package."""
@@ -0,0 +1,31 @@
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ import os
5
+ from collections.abc import Callable
6
+
7
+ from src.app.config import settings
8
+ from src.app.utils.logger import get_logger
9
+
10
+ logger = get_logger(__name__)
11
+
12
+
13
+ def configure_langsmith() -> None:
14
+ os.environ.setdefault("LANGSMITH_TRACING", str(settings.langsmith_tracing).lower())
15
+ os.environ.setdefault("LANGSMITH_ENDPOINT", settings.langsmith_endpoint)
16
+ if settings.langsmith_api_key:
17
+ os.environ.setdefault("LANGSMITH_API_KEY", settings.langsmith_api_key)
18
+ os.environ.setdefault("LANGSMITH_PROJECT", settings.langsmith_project)
19
+
20
+
21
+ def traceable(name: str) -> Callable:
22
+ def decorator(func: Callable) -> Callable:
23
+ @functools.wraps(func)
24
+ def wrapper(*args, **kwargs):
25
+ configure_langsmith()
26
+ logger.info("langsmith_trace", extra={"name": name, "enabled": settings.langsmith_tracing})
27
+ return func(*args, **kwargs)
28
+
29
+ return wrapper
30
+
31
+ return decorator
@@ -0,0 +1,15 @@
1
+ from __future__ import annotations
2
+
3
+
4
+ class Metrics:
5
+ def __init__(self) -> None:
6
+ self._counters: dict[str, int] = {}
7
+
8
+ def increment(self, name: str, amount: int = 1) -> None:
9
+ self._counters[name] = self._counters.get(name, 0) + amount
10
+
11
+ def snapshot(self) -> dict[str, int]:
12
+ return dict(self._counters)
13
+
14
+
15
+ metrics = Metrics()
@@ -0,0 +1 @@
1
+ """Prompts package."""
@@ -0,0 +1,20 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ from src.app.config import settings
6
+
7
+
8
+ class PromptRegistry:
9
+ def __init__(self) -> None:
10
+ self.root = Path(settings.prompt_root)
11
+
12
+ def resolve_version(self, task: str, version: str | None = None) -> str:
13
+ return version or settings.default_prompt_versions.get(task, "v1")
14
+
15
+ def load(self, task: str, version: str | None = None) -> str:
16
+ selected = self.resolve_version(task, version)
17
+ prompt_path = self.root / task / f"{selected}.txt"
18
+ if not prompt_path.exists():
19
+ prompt_path = self.root / task / "v1.txt"
20
+ return prompt_path.read_text(encoding="utf-8").strip()
@@ -0,0 +1 @@
1
+ Extract structured fields that can support downstream validation and tool execution.
@@ -0,0 +1 @@
1
+ Classify the user request into one of: general, calculation, validation.
@@ -0,0 +1 @@
1
+ Classify the request with strong preference for deterministic intents. Use calculation when math symbols or arithmetic verbs appear.
@@ -0,0 +1 @@
1
+ Validate that extraction produced enough fields and confidence to produce a final answer.
@@ -0,0 +1 @@
1
+ """Services package."""
@@ -0,0 +1,19 @@
1
+ from __future__ import annotations
2
+
3
+ from src.app.evaluation.evaluator import Evaluator
4
+ from src.app.evaluation.scoring import score_validation
5
+
6
+
7
+ class EvaluationService:
8
+ def __init__(self) -> None:
9
+ self.evaluator = Evaluator()
10
+
11
+ def validate(self, extracted: dict[str, object], confidence: float, prompt: str) -> dict[str, object]:
12
+ evaluation = self.evaluator.evaluate(extracted=extracted, prompt=prompt)
13
+ evaluation["confidence_score"] = score_validation(field_coverage=evaluation["field_coverage"], confidence=confidence)
14
+ evaluation["is_valid"] = evaluation["field_coverage"] >= 0.5 and evaluation["confidence_score"] >= 0.5
15
+ if evaluation["is_valid"]:
16
+ evaluation["reason"] = "Validation passed."
17
+ else:
18
+ evaluation["reason"] = "Validation failed because coverage or confidence was too low."
19
+ return evaluation
@@ -0,0 +1,56 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ from dataclasses import dataclass
5
+
6
+ from langchain_core.runnables import RunnableLambda, RunnableParallel
7
+
8
+ from src.app.observability.langsmith import traceable
9
+ from src.app.observability.metrics import metrics
10
+ from src.app.utils.logger import get_logger
11
+
12
+ logger = get_logger(__name__)
13
+
14
+
15
+ @dataclass(slots=True)
16
+ class LLMService:
17
+ prompt_service: object
18
+
19
+ @traceable(name="classify_intent")
20
+ def classify_intent(self, prompt: str, text: str) -> dict[str, object]:
21
+ lowered = text.lower()
22
+ intent = "general"
23
+ confidence = 0.55
24
+ if any(token in lowered for token in ("calculate", "+", "-", "*", "/")):
25
+ intent = "calculation"
26
+ confidence = 0.93
27
+ elif "validate" in lowered:
28
+ intent = "validation"
29
+ confidence = 0.8
30
+ metrics.increment("llm.classify_intent.calls")
31
+ logger.info("classify_intent", extra={"prompt_preview": prompt[:40], "intent": intent})
32
+ return {"intent": intent, "confidence": confidence}
33
+
34
+ @traceable(name="parallel_enrich")
35
+ def parallel_enrich(self, prompt: str, text: str) -> dict[str, dict[str, object]]:
36
+ extractor = RunnableLambda(lambda payload: self._extract_fields(prompt=payload["prompt"], text=payload["text"]))
37
+ analyzer = RunnableLambda(lambda payload: self._derive_analysis(text=payload["text"]))
38
+ pipeline = RunnableParallel(extraction=extractor, analysis=analyzer)
39
+ metrics.increment("llm.parallel_enrich.calls")
40
+ return pipeline.invoke({"prompt": prompt, "text": text})
41
+
42
+ def _extract_fields(self, prompt: str, text: str) -> dict[str, object]:
43
+ numbers = [int(match) for match in re.findall(r"-?\d+", text)]
44
+ return {
45
+ "prompt_used": prompt.splitlines()[0] if prompt else "missing-prompt",
46
+ "numbers": numbers,
47
+ "requires_tool": bool(numbers),
48
+ "text_length": len(text),
49
+ }
50
+
51
+ def _derive_analysis(self, text: str) -> dict[str, object]:
52
+ lowered = text.lower()
53
+ return {
54
+ "contains_validation": "validate" in lowered,
55
+ "contains_math": any(token in lowered for token in ("calculate", "+", "-", "*", "/")),
56
+ }