lattice-langgraph 0.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,12 @@
1
+ __pycache__/
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ .Python
6
+ *.egg-info/
7
+ dist/
8
+ build/
9
+ .venv/
10
+ venv/
11
+ *.egg
12
+ .pytest_cache/
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 heybeaux
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,199 @@
1
+ Metadata-Version: 2.4
2
+ Name: lattice-langgraph
3
+ Version: 0.2.1
4
+ Summary: Lattice State Contract coordination layer for LangGraph workflows
5
+ Project-URL: Homepage, https://github.com/heybeaux/lattice
6
+ Project-URL: Documentation, https://heybeaux.github.io/lattice/
7
+ Project-URL: Repository, https://github.com/heybeaux/lattice/tree/main/packages/adapter-langgraph
8
+ Project-URL: Issues, https://github.com/heybeaux/lattice/issues
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Keywords: ai-agents,circuit-breaker,coordination,langgraph,lattice,state-contract
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
+ Requires-Python: >=3.10
22
+ Requires-Dist: jsonschema>=4.0.0
23
+ Requires-Dist: numpy>=1.24.0
24
+ Requires-Dist: openai>=1.0.0
25
+ Requires-Dist: python-ulid>=2.0.0
26
+ Provides-Extra: dev
27
+ Requires-Dist: langchain-core>=0.2.0; extra == 'dev'
28
+ Requires-Dist: langgraph>=0.2.0; extra == 'dev'
29
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
30
+ Requires-Dist: pytest>=7.0.0; extra == 'dev'
31
+ Description-Content-Type: text/markdown
32
+
33
+ # lattice-langgraph
34
+
35
+ Lattice coordination layer for [LangGraph](https://github.com/langchain-ai/langgraph) workflows.
36
+
37
+ Wraps LangGraph nodes with **State Contracts** and **Circuit Breakers** — every handoff becomes a typed, validated, auditable envelope.
38
+
39
+ ## Install
40
+
41
+ ```bash
42
+ pip install lattice-langgraph
43
+ ```
44
+
45
+ ## Quick start
46
+
47
+ ### Option A — wrap individual nodes
48
+
49
+ ```python
50
+ from lattice_langgraph import wrap_node, BreakerConfig
51
+ from langgraph.graph import StateGraph, END
52
+ from typing import TypedDict
53
+
54
+ class State(TypedDict):
55
+ topic: str
56
+ result: str
57
+
58
+ def researcher(state):
59
+ return {"result": f"findings about {state['topic']}"}
60
+
61
+ def writer(state):
62
+ return {"result": f"article: {state['result']}"}
63
+
64
+ graph = StateGraph(State)
65
+ graph.add_node("researcher", wrap_node(researcher, agent_id="researcher", to_agent="writer"))
66
+ graph.add_node("writer", wrap_node(writer, agent_id="writer"))
67
+ graph.set_entry_point("researcher")
68
+ graph.add_edge("researcher", "writer")
69
+ graph.add_edge("writer", END)
70
+
71
+ app = graph.compile()
72
+ result = app.invoke({"topic": "quantum computing"})
73
+ ```
74
+
75
+ ### Option B — wrap the whole graph with `LatticeMiddleware`
76
+
77
+ ```python
78
+ from lattice_langgraph import LatticeMiddleware
79
+ import os
80
+
81
+ # Build your graph normally
82
+ graph = StateGraph(State)
83
+ graph.add_node("researcher", researcher)
84
+ graph.add_node("writer", writer)
85
+ graph.set_entry_point("researcher")
86
+ graph.add_edge("researcher", "writer")
87
+ graph.add_edge("writer", END)
88
+ compiled = graph.compile()
89
+
90
+ # Wrap it
91
+ app = LatticeMiddleware(
92
+ compiled,
93
+ audit_log_path="./lattice-audit.jsonl",
94
+ openai_api_key=os.environ["OPENAI_API_KEY"],
95
+ )
96
+ result = app.invoke({"topic": "quantum computing"})
97
+ ```
98
+
99
+ ## Validation tiers
100
+
101
+ | Tier | What | When | Latency |
102
+ |------|------|------|---------|
103
+ | L1 | JSON Schema validation | Always | <200ms |
104
+ | L2 | Embedding similarity | On every handoff | ~500ms |
105
+ | L3 | LLM-as-judge (gpt-4o-mini) | L2 similarity < 0.85 or high-risk edge | 1–3s |
106
+ | auto | L1 always, L2 always, L3 on escalation | Default | varies |
107
+
108
+ ```python
109
+ from lattice_langgraph import BreakerConfig
110
+
111
+ # L1 only (fast, no LLM calls)
112
+ config = BreakerConfig(tier="L1")
113
+
114
+ # Full auto (L1+L2, escalate to L3 on low confidence)
115
+ config = BreakerConfig(tier="auto", openai_api_key="sk-...")
116
+
117
+ # Shadow mode — log without blocking
118
+ wrapped = wrap_node(my_node, agent_id="agent", shadow=True, breaker_config=config)
119
+ ```
120
+
121
+ ## Shadow mode
122
+
123
+ Shadow mode logs every handoff without blocking graph execution. Use this to collect production data before enabling enforcement:
124
+
125
+ ```python
126
+ app = LatticeMiddleware(graph, shadow=True, audit_log_path="./audit.jsonl")
127
+ ```
128
+
129
+ The audit log is JSONL. Each line:
130
+
131
+ ```json
132
+ {
133
+ "contract_id": "01HXZ...",
134
+ "trace_id": "01HXZ...",
135
+ "from_agent": "researcher",
136
+ "to_agent": "writer",
137
+ "timestamp": "2026-05-08T...",
138
+ "shadow": false,
139
+ "validation_tier": "L1",
140
+ "validation_passed": true,
141
+ "confidence": 1.0,
142
+ "latency_ms": 12
143
+ }
144
+ ```
145
+
146
+ ## State Contract schema
147
+
148
+ Every handoff produces a `StateContract`:
149
+
150
+ ```python
151
+ from lattice_langgraph import create_contract
152
+
153
+ contract = create_contract(
154
+ from_agent="researcher",
155
+ inputs={"topic": "climate"},
156
+ outputs={"findings": "..."},
157
+ to_agent="writer",
158
+ )
159
+ print(contract.to_dict())
160
+ ```
161
+
162
+ The schema is JSON Schema 2020-12 (same as `@heybeaux/lattice-core`). See [`contract.schema.json`](src/lattice_langgraph/contract.schema.json).
163
+
164
+ ## API reference
165
+
166
+ ### `wrap_node(node_fn, *, agent_id, to_agent=None, breaker_config=None, audit_logger=None, shadow=False, trace_id=None)`
167
+
168
+ Wraps a LangGraph node function. Returns a drop-in replacement with the same `(state: dict) -> dict` signature.
169
+
170
+ ### `LatticeMiddleware(graph, *, audit_log_path, breaker_config=None, shadow=False, openai_api_key=None)`
171
+
172
+ Wraps a compiled LangGraph graph. Delegates `.invoke()`, `.ainvoke()`, `.stream()` to the underlying graph after patching all nodes.
173
+
174
+ ### `BreakerConfig`
175
+
176
+ | Field | Default | Description |
177
+ |-------|---------|-------------|
178
+ | `tier` | `"auto"` | `"L1"`, `"L2"`, `"L3"`, or `"auto"` |
179
+ | `l2_threshold` | `0.85` | Confidence below which L3 is triggered |
180
+ | `openai_api_key` | `None` | Falls back to `OPENAI_API_KEY` env var |
181
+ | `block_on_failure` | `True` | Raise `LatticeValidationError` on failure |
182
+
183
+ ## Benchmark results
184
+
185
+ From the Lattice benchmark suite (real OpenAI API calls):
186
+
187
+ - **100%** hallucination detection (6/6 caught by L3)
188
+ - **0%** false positive rate (4/4 correct outputs passed)
189
+ - **0** false negatives
190
+ - L1 latency: <200ms
191
+ - L3 latency: 1–3s (only on escalation)
192
+
193
+ ## Example
194
+
195
+ See [`examples/research_pipeline.py`](examples/research_pipeline.py) for a complete 4-node research graph using both `wrap_node` and `LatticeMiddleware`.
196
+
197
+ ```bash
198
+ OPENAI_API_KEY=sk-... python examples/research_pipeline.py
199
+ ```
@@ -0,0 +1,167 @@
1
+ # lattice-langgraph
2
+
3
+ Lattice coordination layer for [LangGraph](https://github.com/langchain-ai/langgraph) workflows.
4
+
5
+ Wraps LangGraph nodes with **State Contracts** and **Circuit Breakers** — every handoff becomes a typed, validated, auditable envelope.
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ pip install lattice-langgraph
11
+ ```
12
+
13
+ ## Quick start
14
+
15
+ ### Option A — wrap individual nodes
16
+
17
+ ```python
18
+ from lattice_langgraph import wrap_node, BreakerConfig
19
+ from langgraph.graph import StateGraph, END
20
+ from typing import TypedDict
21
+
22
+ class State(TypedDict):
23
+ topic: str
24
+ result: str
25
+
26
+ def researcher(state):
27
+ return {"result": f"findings about {state['topic']}"}
28
+
29
+ def writer(state):
30
+ return {"result": f"article: {state['result']}"}
31
+
32
+ graph = StateGraph(State)
33
+ graph.add_node("researcher", wrap_node(researcher, agent_id="researcher", to_agent="writer"))
34
+ graph.add_node("writer", wrap_node(writer, agent_id="writer"))
35
+ graph.set_entry_point("researcher")
36
+ graph.add_edge("researcher", "writer")
37
+ graph.add_edge("writer", END)
38
+
39
+ app = graph.compile()
40
+ result = app.invoke({"topic": "quantum computing"})
41
+ ```
42
+
43
+ ### Option B — wrap the whole graph with `LatticeMiddleware`
44
+
45
+ ```python
46
+ from lattice_langgraph import LatticeMiddleware
47
+ import os
48
+
49
+ # Build your graph normally
50
+ graph = StateGraph(State)
51
+ graph.add_node("researcher", researcher)
52
+ graph.add_node("writer", writer)
53
+ graph.set_entry_point("researcher")
54
+ graph.add_edge("researcher", "writer")
55
+ graph.add_edge("writer", END)
56
+ compiled = graph.compile()
57
+
58
+ # Wrap it
59
+ app = LatticeMiddleware(
60
+ compiled,
61
+ audit_log_path="./lattice-audit.jsonl",
62
+ openai_api_key=os.environ["OPENAI_API_KEY"],
63
+ )
64
+ result = app.invoke({"topic": "quantum computing"})
65
+ ```
66
+
67
+ ## Validation tiers
68
+
69
+ | Tier | What | When | Latency |
70
+ |------|------|------|---------|
71
+ | L1 | JSON Schema validation | Always | <200ms |
72
+ | L2 | Embedding similarity | On every handoff | ~500ms |
73
+ | L3 | LLM-as-judge (gpt-4o-mini) | L2 similarity < 0.85 or high-risk edge | 1–3s |
74
+ | auto | L1 always, L2 always, L3 on escalation | Default | varies |
75
+
76
+ ```python
77
+ from lattice_langgraph import BreakerConfig
78
+
79
+ # L1 only (fast, no LLM calls)
80
+ config = BreakerConfig(tier="L1")
81
+
82
+ # Full auto (L1+L2, escalate to L3 on low confidence)
83
+ config = BreakerConfig(tier="auto", openai_api_key="sk-...")
84
+
85
+ # Shadow mode — log without blocking
86
+ wrapped = wrap_node(my_node, agent_id="agent", shadow=True, breaker_config=config)
87
+ ```
88
+
89
+ ## Shadow mode
90
+
91
+ Shadow mode logs every handoff without blocking graph execution. Use this to collect production data before enabling enforcement:
92
+
93
+ ```python
94
+ app = LatticeMiddleware(graph, shadow=True, audit_log_path="./audit.jsonl")
95
+ ```
96
+
97
+ The audit log is JSONL. Each line:
98
+
99
+ ```json
100
+ {
101
+ "contract_id": "01HXZ...",
102
+ "trace_id": "01HXZ...",
103
+ "from_agent": "researcher",
104
+ "to_agent": "writer",
105
+ "timestamp": "2026-05-08T...",
106
+ "shadow": false,
107
+ "validation_tier": "L1",
108
+ "validation_passed": true,
109
+ "confidence": 1.0,
110
+ "latency_ms": 12
111
+ }
112
+ ```
113
+
114
+ ## State Contract schema
115
+
116
+ Every handoff produces a `StateContract`:
117
+
118
+ ```python
119
+ from lattice_langgraph import create_contract
120
+
121
+ contract = create_contract(
122
+ from_agent="researcher",
123
+ inputs={"topic": "climate"},
124
+ outputs={"findings": "..."},
125
+ to_agent="writer",
126
+ )
127
+ print(contract.to_dict())
128
+ ```
129
+
130
+ The schema is JSON Schema 2020-12 (same as `@heybeaux/lattice-core`). See [`contract.schema.json`](src/lattice_langgraph/contract.schema.json).
131
+
132
+ ## API reference
133
+
134
+ ### `wrap_node(node_fn, *, agent_id, to_agent=None, breaker_config=None, audit_logger=None, shadow=False, trace_id=None)`
135
+
136
+ Wraps a LangGraph node function. Returns a drop-in replacement with the same `(state: dict) -> dict` signature.
137
+
138
+ ### `LatticeMiddleware(graph, *, audit_log_path, breaker_config=None, shadow=False, openai_api_key=None)`
139
+
140
+ Wraps a compiled LangGraph graph. Delegates `.invoke()`, `.ainvoke()`, `.stream()` to the underlying graph after patching all nodes.
141
+
142
+ ### `BreakerConfig`
143
+
144
+ | Field | Default | Description |
145
+ |-------|---------|-------------|
146
+ | `tier` | `"auto"` | `"L1"`, `"L2"`, `"L3"`, or `"auto"` |
147
+ | `l2_threshold` | `0.85` | Confidence below which L3 is triggered |
148
+ | `openai_api_key` | `None` | Falls back to `OPENAI_API_KEY` env var |
149
+ | `block_on_failure` | `True` | Raise `LatticeValidationError` on failure |
150
+
151
+ ## Benchmark results
152
+
153
+ From the Lattice benchmark suite (real OpenAI API calls):
154
+
155
+ - **100%** hallucination detection (6/6 caught by L3)
156
+ - **0%** false positive rate (4/4 correct outputs passed)
157
+ - **0** false negatives
158
+ - L1 latency: <200ms
159
+ - L3 latency: 1–3s (only on escalation)
160
+
161
+ ## Example
162
+
163
+ See [`examples/research_pipeline.py`](examples/research_pipeline.py) for a complete 4-node research graph using both `wrap_node` and `LatticeMiddleware`.
164
+
165
+ ```bash
166
+ OPENAI_API_KEY=sk-... python examples/research_pipeline.py
167
+ ```
@@ -0,0 +1,122 @@
1
+ """
2
+ Example: 4-node research pipeline wrapped with Lattice.
3
+
4
+ Graph: planner -> researcher -> writer -> reviewer
5
+
6
+ Run:
7
+ pip install lattice-langgraph langgraph langchain-core
8
+ OPENAI_API_KEY=sk-... python examples/research_pipeline.py
9
+ """
10
+
11
+ import os
12
+ from typing import TypedDict
13
+
14
+ from langgraph.graph import END, StateGraph
15
+
16
+ from lattice_langgraph import BreakerConfig, LatticeMiddleware, wrap_node
17
+
18
+
19
+ class ResearchState(TypedDict):
20
+ topic: str
21
+ plan: str
22
+ research: str
23
+ draft: str
24
+ review: str
25
+
26
+
27
+ # --- Node definitions ---
28
+
29
+ def planner_node(state: ResearchState) -> dict:
30
+ topic = state["topic"]
31
+ return {"plan": f"Research plan for '{topic}': 1) gather sources 2) synthesize 3) draft 4) review"}
32
+
33
+
34
+ def researcher_node(state: ResearchState) -> dict:
35
+ plan = state["plan"]
36
+ topic = state["topic"]
37
+ return {
38
+ "research": (
39
+ f"Key findings on '{topic}': "
40
+ "Source A suggests X. Source B corroborates with Y. "
41
+ "Consensus: the evidence supports Z."
42
+ )
43
+ }
44
+
45
+
46
+ def writer_node(state: ResearchState) -> dict:
47
+ research = state["research"]
48
+ topic = state["topic"]
49
+ return {
50
+ "draft": (
51
+ f"# {topic.title()}\n\n"
52
+ f"Based on recent research:\n\n{research}\n\n"
53
+ "In conclusion, the evidence is clear and warrants further investigation."
54
+ )
55
+ }
56
+
57
+
58
+ def reviewer_node(state: ResearchState) -> dict:
59
+ draft = state["draft"]
60
+ return {
61
+ "review": f"Review passed. The draft covers the topic with adequate sourcing. Word count: {len(draft.split())}."
62
+ }
63
+
64
+
65
+ # --- Option A: wrap individual nodes ---
66
+
67
+ def build_wrapped_graph_individual() -> StateGraph:
68
+ breaker = BreakerConfig(tier="L1") # L1-only for fast local demo
69
+
70
+ graph = StateGraph(ResearchState)
71
+ graph.add_node("planner", wrap_node(planner_node, agent_id="planner", to_agent="researcher", breaker_config=breaker))
72
+ graph.add_node("researcher", wrap_node(researcher_node, agent_id="researcher", to_agent="writer", breaker_config=breaker))
73
+ graph.add_node("writer", wrap_node(writer_node, agent_id="writer", to_agent="reviewer", breaker_config=breaker))
74
+ graph.add_node("reviewer", wrap_node(reviewer_node, agent_id="reviewer", breaker_config=breaker))
75
+
76
+ graph.set_entry_point("planner")
77
+ graph.add_edge("planner", "researcher")
78
+ graph.add_edge("researcher", "writer")
79
+ graph.add_edge("writer", "reviewer")
80
+ graph.add_edge("reviewer", END)
81
+ return graph.compile()
82
+
83
+
84
+ # --- Option B: LatticeMiddleware wraps the whole graph ---
85
+
86
+ def build_wrapped_graph_middleware() -> LatticeMiddleware:
87
+ graph = StateGraph(ResearchState)
88
+ graph.add_node("planner", planner_node)
89
+ graph.add_node("researcher", researcher_node)
90
+ graph.add_node("writer", writer_node)
91
+ graph.add_node("reviewer", reviewer_node)
92
+
93
+ graph.set_entry_point("planner")
94
+ graph.add_edge("planner", "researcher")
95
+ graph.add_edge("researcher", "writer")
96
+ graph.add_edge("writer", "reviewer")
97
+ graph.add_edge("reviewer", END)
98
+
99
+ compiled = graph.compile()
100
+ return LatticeMiddleware(
101
+ compiled,
102
+ audit_log_path="./lattice-audit.jsonl",
103
+ breaker_config=BreakerConfig(
104
+ tier="auto",
105
+ openai_api_key=os.environ.get("OPENAI_API_KEY"),
106
+ ),
107
+ shadow=False,
108
+ )
109
+
110
+
111
+ if __name__ == "__main__":
112
+ print("=== Option A: wrap_node() per node ===")
113
+ graph_a = build_wrapped_graph_individual()
114
+ result_a = graph_a.invoke({"topic": "climate change adaptation strategies"})
115
+ print("Review:", result_a["review"])
116
+ print()
117
+
118
+ print("=== Option B: LatticeMiddleware ===")
119
+ graph_b = build_wrapped_graph_middleware()
120
+ result_b = graph_b.invoke({"topic": "quantum computing in drug discovery"})
121
+ print("Review:", result_b["review"])
122
+ print("Audit log written to ./lattice-audit.jsonl")
@@ -0,0 +1,15 @@
1
+ #!/bin/bash
2
+ # Publish lattice-langgraph to PyPI
3
+ # Requires: twine, build, and PyPI API token
4
+
5
+ set -e
6
+
7
+ echo "Building lattice-langgraph..."
8
+ python -m build
9
+
10
+ echo ""
11
+ echo "Uploading to PyPI..."
12
+ python -m twine upload --username __token__ --password "$PYPI_API_TOKEN" dist/*
13
+
14
+ echo ""
15
+ echo "✓ lattice-langgraph published to PyPI!"
@@ -0,0 +1,51 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "lattice-langgraph"
7
+ version = "0.2.1"
8
+ description = "Lattice State Contract coordination layer for LangGraph workflows"
9
+ readme = "README.md"
10
+ keywords = ["lattice", "langgraph", "ai-agents", "coordination", "state-contract", "circuit-breaker"]
11
+ classifiers = [
12
+ "Development Status :: 4 - Beta",
13
+ "Intended Audience :: Developers",
14
+ "License :: OSI Approved :: MIT License",
15
+ "Programming Language :: Python :: 3",
16
+ "Programming Language :: Python :: 3.10",
17
+ "Programming Language :: Python :: 3.11",
18
+ "Programming Language :: Python :: 3.12",
19
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
20
+ "Topic :: Software Development :: Libraries :: Python Modules",
21
+ ]
22
+ license = { text = "MIT" }
23
+ requires-python = ">=3.10"
24
+ dependencies = [
25
+ "python-ulid>=2.0.0",
26
+ "jsonschema>=4.0.0",
27
+ "openai>=1.0.0",
28
+ "numpy>=1.24.0",
29
+ ]
30
+
31
+ [project.urls]
32
+ Homepage = "https://github.com/heybeaux/lattice"
33
+ Documentation = "https://heybeaux.github.io/lattice/"
34
+ Repository = "https://github.com/heybeaux/lattice/tree/main/packages/adapter-langgraph"
35
+ Issues = "https://github.com/heybeaux/lattice/issues"
36
+
37
+ [project.optional-dependencies]
38
+ dev = [
39
+ "pytest>=7.0.0",
40
+ "pytest-asyncio>=0.21.0",
41
+ "langgraph>=0.2.0",
42
+ "langchain-core>=0.2.0",
43
+ ]
44
+
45
+ [tool.hatch.build.targets.wheel]
46
+ packages = ["src/lattice_langgraph"]
47
+
48
+ [tool.pytest.ini_options]
49
+ testpaths = ["tests"]
50
+ asyncio_mode = "auto"
51
+
@@ -0,0 +1,31 @@
1
+ from .audit import AuditLogger
2
+ from .breaker import BreakerConfig, ValidationResult
3
+ from .contract import (
4
+ Assumption,
5
+ BudgetRecord,
6
+ Constraint,
7
+ ContractPayload,
8
+ Decision,
9
+ StateContract,
10
+ create_contract,
11
+ )
12
+ from .middleware import LatticeMiddleware
13
+ from .wrapper import LatticeValidationError, wrap_node
14
+
15
+ __all__ = [
16
+ "wrap_node",
17
+ "LatticeMiddleware",
18
+ "LatticeValidationError",
19
+ "StateContract",
20
+ "ContractPayload",
21
+ "Decision",
22
+ "Constraint",
23
+ "Assumption",
24
+ "BudgetRecord",
25
+ "BreakerConfig",
26
+ "ValidationResult",
27
+ "AuditLogger",
28
+ "create_contract",
29
+ ]
30
+
31
+ __version__ = "0.1.0"
@@ -0,0 +1,47 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ from pathlib import Path
6
+ from threading import Lock
7
+ from typing import Any
8
+
9
+ from .contract import StateContract
10
+
11
+
12
+ class AuditLogger:
13
+ def __init__(self, path: str | Path) -> None:
14
+ self._path = Path(path)
15
+ self._lock = Lock()
16
+ self._path.parent.mkdir(parents=True, exist_ok=True)
17
+
18
+ def log(
19
+ self,
20
+ contract: StateContract,
21
+ *,
22
+ validation_tier: str | None = None,
23
+ validation_passed: bool | None = None,
24
+ confidence: float | None = None,
25
+ latency_ms: int | None = None,
26
+ shadow: bool = False,
27
+ ) -> None:
28
+ entry: dict[str, Any] = {
29
+ "contract_id": contract.id,
30
+ "trace_id": contract.trace_id,
31
+ "from_agent": contract.from_agent,
32
+ "to_agent": contract.to_agent,
33
+ "timestamp": contract.timestamp,
34
+ "shadow": shadow,
35
+ }
36
+ if validation_tier is not None:
37
+ entry["validation_tier"] = validation_tier
38
+ if validation_passed is not None:
39
+ entry["validation_passed"] = validation_passed
40
+ if confidence is not None:
41
+ entry["confidence"] = confidence
42
+ if latency_ms is not None:
43
+ entry["latency_ms"] = latency_ms
44
+
45
+ with self._lock:
46
+ with open(self._path, "a") as f:
47
+ f.write(json.dumps(entry) + "\n")