primust-langgraph 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,22 @@
1
+ node_modules/
2
+ dist/
3
+ .next/
4
+ __pycache__/
5
+ *.egg-info/
6
+ .venv/
7
+ .env
8
+ .env.local
9
+ .env.*.local
10
+ *.pyc
11
+ .turbo/
12
+ *.pem
13
+ *.key
14
+ .DS_Store
15
+ .claude/
16
+ .claude.json
17
+ .claude.json.backup
18
+ .pytest_cache/
19
+ *.db
20
+ *.db-shm
21
+ *.db-wal
22
+ .vercel
@@ -0,0 +1,19 @@
1
+ Metadata-Version: 2.4
2
+ Name: primust-langgraph
3
+ Version: 1.0.0
4
+ Summary: Primust governance adapter for LangGraph
5
+ Project-URL: Homepage, https://primust.com
6
+ Project-URL: Documentation, https://docs.primust.com/adapters/langgraph
7
+ Author-email: "Primust, Inc." <eng@primust.com>
8
+ License-Expression: LicenseRef-Proprietary
9
+ Classifier: Development Status :: 5 - Production/Stable
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Programming Language :: Python :: 3.13
15
+ Requires-Python: >=3.11
16
+ Requires-Dist: langgraph>=0.2.0
17
+ Requires-Dist: primust>=1.0.0
18
+ Provides-Extra: dev
19
+ Requires-Dist: pytest>=8.0; extra == 'dev'
@@ -0,0 +1,33 @@
1
+ # primust-langgraph
2
+
3
+ Primust governance adapter for [LangGraph](https://github.com/langchain-ai/langgraph).
4
+
5
+ ```bash
6
+ pip install primust-langgraph
7
+ ```
8
+
9
+ ## Quickstart
10
+
11
+ ```python
12
+ import primust
13
+ from primust_langgraph import PrimustLangGraph
14
+
15
+ p = primust.Pipeline(api_key="pk_live_...", workflow_id="my-agent")
16
+ adapter = PrimustLangGraph(pipeline=p)
17
+
18
+ # Wrap your LangGraph compiled graph
19
+ instrumented = adapter.instrument(compiled_graph)
20
+ result = instrumented.invoke({"input": "..."})
21
+ ```
22
+
23
+ ## What it does
24
+
25
+ Automatically records governance checks at LangGraph node boundaries. Each node execution produces a commitment hash — raw content never leaves your environment.
26
+
27
+ ## Docs
28
+
29
+ [docs.primust.com/adapters/langgraph](https://docs.primust.com/adapters/langgraph)
30
+
31
+ ## License
32
+
33
+ Proprietary — see LICENSE file.
@@ -0,0 +1,38 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "primust-langgraph"
7
+ version = "1.0.0"
8
+ description = "Primust governance adapter for LangGraph"
9
+ requires-python = ">=3.11"
10
+ license = "LicenseRef-Proprietary"
11
+ authors = [{ name = "Primust, Inc.", email = "eng@primust.com" }]
12
+ classifiers = [
13
+ "Development Status :: 5 - Production/Stable",
14
+ "Intended Audience :: Developers",
15
+ "Programming Language :: Python :: 3",
16
+ "Programming Language :: Python :: 3.11",
17
+ "Programming Language :: Python :: 3.12",
18
+ "Programming Language :: Python :: 3.13",
19
+ ]
20
+ dependencies = [
21
+ "primust>=1.0.0",
22
+ "langgraph>=0.2.0",
23
+ ]
24
+
25
+ [project.optional-dependencies]
26
+ dev = [
27
+ "pytest>=8.0",
28
+ ]
29
+
30
+ [project.urls]
31
+ Homepage = "https://primust.com"
32
+ Documentation = "https://docs.primust.com/adapters/langgraph"
33
+
34
+ [tool.hatch.build.targets.wheel]
35
+ packages = ["src/primust_langgraph"]
36
+
37
+ [tool.pytest.ini_options]
38
+ testpaths = ["tests"]
@@ -0,0 +1,5 @@
1
+ """Primust governance adapter for LangGraph."""
2
+
3
+ from primust_langgraph.adapter import PrimustLangGraph
4
+
5
+ __all__ = ["PrimustLangGraph"]
@@ -0,0 +1,162 @@
1
+ """
2
+ Primust LangGraph adapter — instruments tool calls and node transitions.
3
+
4
+ Privacy invariant: raw tool input/output NEVER leaves the customer environment.
5
+ Only commitment hashes (poseidon2) transit to the Primust API.
6
+
7
+ Surface declaration:
8
+ surface_type: in_process_adapter
9
+ observation_mode: post_action_realtime
10
+ scope_type: full_workflow
11
+ proof_ceiling: execution (mathematical for deterministic tools)
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import functools
17
+ import logging
18
+ from typing import Any, Callable
19
+
20
+ from primust import Pipeline, CheckSession
21
+
22
+ logger = logging.getLogger("primust.langgraph")
23
+
24
+ PROOF_LEVEL_MAP = {
25
+ "deterministic_rule": "mathematical",
26
+ "zkml_model": "verifiable_inference",
27
+ "ml_model": "execution",
28
+ "witnessed": "witnessed",
29
+ "default": "attestation",
30
+ }
31
+
32
+ SURFACE_DECLARATION = {
33
+ "surface_type": "in_process_adapter",
34
+ "surface_name": "langgraph_tool_hooks",
35
+ "observation_mode": "post_action_realtime",
36
+ "scope_type": "full_workflow",
37
+ "proof_ceiling": "execution",
38
+ "surface_coverage_statement": (
39
+ "All LangGraph tool calls observed via tool execution lifecycle hooks. "
40
+ "Actions outside the LangGraph graph scope are not observed."
41
+ ),
42
+ }
43
+
44
+
45
+ class PrimustLangGraph:
46
+ """Wraps a LangGraph graph. Instruments all tool calls and node transitions."""
47
+
48
+ def __init__(
49
+ self,
50
+ pipeline: Pipeline,
51
+ manifest_map: dict[str, str] | None = None,
52
+ ) -> None:
53
+ self.pipeline = pipeline
54
+ self.manifest_map = manifest_map or {}
55
+
56
+ def wrap(self, graph: Any) -> Any:
57
+ """
58
+ Instrument a LangGraph StateGraph by wrapping its tool nodes.
59
+
60
+ Returns the same graph object with instrumented tool calls.
61
+ """
62
+ if hasattr(graph, "nodes"):
63
+ for node_name, node_fn in list(graph.nodes.items()):
64
+ if callable(node_fn):
65
+ graph.nodes[node_name] = self._wrap_node(node_name, node_fn)
66
+ return graph
67
+
68
+ def wrap_tool(self, check_label: str, tool_fn: Callable[..., Any]) -> Callable[..., Any]:
69
+ """Wrap a single tool function with Primust instrumentation."""
70
+ return self._wrap_node(check_label, tool_fn)
71
+
72
+ def _wrap_node(self, node_name: str, node_fn: Callable[..., Any]) -> Callable[..., Any]:
73
+ adapter = self
74
+
75
+ @functools.wraps(node_fn)
76
+ def instrumented(*args: Any, **kwargs: Any) -> Any:
77
+ manifest_id = adapter.manifest_map.get(node_name, f"auto:{node_name}")
78
+ session: CheckSession | None = None
79
+ try:
80
+ session = adapter.pipeline.open_check(node_name, manifest_id)
81
+ except Exception:
82
+ logger.exception("Failed to open check for %s", node_name)
83
+
84
+ try:
85
+ result = node_fn(*args, **kwargs)
86
+ except Exception as exc:
87
+ if session:
88
+ try:
89
+ adapter.pipeline.record(
90
+ session,
91
+ input=_extract_input(args, kwargs),
92
+ check_result="error",
93
+ )
94
+ except Exception:
95
+ logger.exception("Failed to record error for %s", node_name)
96
+ raise exc
97
+
98
+ if session:
99
+ try:
100
+ adapter.pipeline.record(
101
+ session,
102
+ input=_extract_input(args, kwargs),
103
+ check_result="pass",
104
+ output=result,
105
+ )
106
+ except Exception:
107
+ logger.exception("Failed to record result for %s", node_name)
108
+
109
+ return result
110
+
111
+ @functools.wraps(node_fn)
112
+ async def instrumented_async(*args: Any, **kwargs: Any) -> Any:
113
+ manifest_id = adapter.manifest_map.get(node_name, f"auto:{node_name}")
114
+ session: CheckSession | None = None
115
+ try:
116
+ session = adapter.pipeline.open_check(node_name, manifest_id)
117
+ except Exception:
118
+ logger.exception("Failed to open check for %s", node_name)
119
+
120
+ try:
121
+ result = await node_fn(*args, **kwargs)
122
+ except Exception as exc:
123
+ if session:
124
+ try:
125
+ adapter.pipeline.record(
126
+ session,
127
+ input=_extract_input(args, kwargs),
128
+ check_result="error",
129
+ )
130
+ except Exception:
131
+ logger.exception("Failed to record error for %s", node_name)
132
+ raise exc
133
+
134
+ if session:
135
+ try:
136
+ adapter.pipeline.record(
137
+ session,
138
+ input=_extract_input(args, kwargs),
139
+ check_result="pass",
140
+ output=result,
141
+ )
142
+ except Exception:
143
+ logger.exception("Failed to record result for %s", node_name)
144
+
145
+ return result
146
+
147
+ import asyncio
148
+ if asyncio.iscoroutinefunction(node_fn):
149
+ return instrumented_async
150
+ return instrumented
151
+
152
+ def get_surface_declaration(self) -> dict[str, str]:
153
+ return dict(SURFACE_DECLARATION)
154
+
155
+
156
+ def _extract_input(args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any:
157
+ """Extract tool input from args/kwargs for commitment hashing."""
158
+ if kwargs:
159
+ return kwargs
160
+ if len(args) == 1:
161
+ return args[0]
162
+ return list(args)
@@ -0,0 +1,277 @@
1
+ """
2
+ P11-A: LangGraph adapter tests — 9 MUST PASS.
3
+
4
+ Uses mock Pipeline and mock LangGraph graph objects.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ from typing import Any
11
+ from unittest.mock import MagicMock, patch
12
+
13
+ import httpx
14
+ import pytest
15
+
16
+ from primust_artifact_core import commit, commit_output
17
+ from primust.pipeline import Pipeline, CheckSession
18
+
19
+ from primust_langgraph import PrimustLangGraph
20
+
21
+
22
+ # ── Mock HTTP transport ──
23
+
24
+
25
+ class MockTransport(httpx.BaseTransport):
26
+ def __init__(self) -> None:
27
+ self.requests: list[dict[str, Any]] = []
28
+ self._run_counter = 0
29
+
30
+ def handle_request(self, request: httpx.Request) -> httpx.Response:
31
+ body = request.content.decode("utf-8") if request.content else ""
32
+ parsed = json.loads(body) if body else {}
33
+ self.requests.append({
34
+ "method": request.method,
35
+ "url": str(request.url),
36
+ "body": parsed,
37
+ "raw_body": body,
38
+ })
39
+ path = request.url.path
40
+ if path == "/api/v1/runs" and request.method == "POST":
41
+ self._run_counter += 1
42
+ return httpx.Response(200, json={
43
+ "run_id": f"run_{self._run_counter:04d}",
44
+ "policy_snapshot_hash": "sha256:" + "aa" * 32,
45
+ "process_context_hash": parsed.get("process_context_hash"),
46
+ })
47
+ if "/records" in path and request.method == "POST":
48
+ return httpx.Response(200, json={
49
+ "record_id": "rec_test001",
50
+ "chain_hash": "sha256:" + "bb" * 32,
51
+ })
52
+ if "/close" in path and request.method == "POST":
53
+ return httpx.Response(200, json={
54
+ "vpec_id": "vpec_test001",
55
+ "schema_version": "4.0.0",
56
+ "state": "signed",
57
+ })
58
+ return httpx.Response(404, json={"detail": "not found"})
59
+
60
+
61
+ # ── Mock LangGraph graph ──
62
+
63
+
64
+ class MockStateGraph:
65
+ """Simulates a LangGraph StateGraph with nodes."""
66
+
67
+ def __init__(self) -> None:
68
+ self.nodes: dict[str, Any] = {}
69
+
70
+ def add_node(self, name: str, fn: Any) -> None:
71
+ self.nodes[name] = fn
72
+
73
+
74
+ @pytest.fixture
75
+ def transport() -> MockTransport:
76
+ return MockTransport()
77
+
78
+
79
+ @pytest.fixture
80
+ def pipeline(transport: MockTransport) -> Pipeline:
81
+ client = httpx.Client(
82
+ base_url="https://api.primust.com",
83
+ headers={"X-API-Key": "pk_live_org001_us_secret"},
84
+ transport=transport,
85
+ )
86
+ return Pipeline(
87
+ api_key="pk_live_org001_us_secret",
88
+ workflow_id="wf_langgraph",
89
+ process_context_hash="sha256:" + "cc" * 32,
90
+ http_client=client,
91
+ )
92
+
93
+
94
+ # ── Tests ──
95
+
96
+
97
+ class TestLangGraphAdapter:
98
+ """P11-A: LangGraph adapter."""
99
+
100
+ def test_tool_call_creates_record_with_commitment_hash(
101
+ self, pipeline: Pipeline, transport: MockTransport
102
+ ) -> None:
103
+ """MUST PASS: tool call → CheckExecutionRecord created with commitment_hash."""
104
+ adapter = PrimustLangGraph(
105
+ pipeline=pipeline,
106
+ manifest_map={"search": "manifest_search_v1"},
107
+ )
108
+
109
+ def search_tool(query: str) -> str:
110
+ return f"Results for {query}"
111
+
112
+ wrapped = adapter.wrap_tool("search", search_tool)
113
+ result = wrapped(query="test query")
114
+
115
+ assert result == "Results for test query"
116
+
117
+ record_req = [r for r in transport.requests if "/records" in r["url"]]
118
+ assert len(record_req) == 1
119
+ body = record_req[0]["body"]
120
+ assert body["commitment_hash"].startswith("poseidon2:")
121
+ assert body["manifest_id"] == "manifest_search_v1"
122
+
123
+ def test_raw_tool_input_not_in_http_body(
124
+ self, pipeline: Pipeline, transport: MockTransport
125
+ ) -> None:
126
+ """MUST PASS: raw tool input not in HTTP body (interceptor test)."""
127
+ adapter = PrimustLangGraph(pipeline=pipeline)
128
+
129
+ sensitive_input = "this is sensitive PII data that must never transit"
130
+
131
+ def pii_tool(data: str) -> str:
132
+ return "processed"
133
+
134
+ wrapped = adapter.wrap_tool("pii_tool", pii_tool)
135
+ wrapped(data=sensitive_input)
136
+
137
+ record_req = [r for r in transport.requests if "/records" in r["url"]]
138
+ raw_body = record_req[0]["raw_body"]
139
+ assert sensitive_input not in raw_body
140
+
141
+ def test_check_open_tst_fetched_before_tool_executes(
142
+ self, pipeline: Pipeline, transport: MockTransport
143
+ ) -> None:
144
+ """MUST PASS: check_open_tst fetched before tool executes."""
145
+ adapter = PrimustLangGraph(pipeline=pipeline)
146
+
147
+ def slow_tool(x: int) -> int:
148
+ return x * 2
149
+
150
+ wrapped = adapter.wrap_tool("slow_tool", slow_tool)
151
+ wrapped(x=42)
152
+
153
+ record_req = [r for r in transport.requests if "/records" in r["url"]]
154
+ body = record_req[0]["body"]
155
+ assert body["check_open_tst"] is not None
156
+ from datetime import datetime
157
+ datetime.fromisoformat(body["check_open_tst"]) # valid ISO string
158
+
159
+ def test_check_close_tst_at_record_time(
160
+ self, pipeline: Pipeline, transport: MockTransport
161
+ ) -> None:
162
+ """MUST PASS: check_close_tst fetched at p.record() time."""
163
+ adapter = PrimustLangGraph(pipeline=pipeline)
164
+
165
+ def tool(x: int) -> int:
166
+ return x + 1
167
+
168
+ wrapped = adapter.wrap_tool("tool", tool)
169
+ wrapped(x=1)
170
+
171
+ record_req = [r for r in transport.requests if "/records" in r["url"]]
172
+ body = record_req[0]["body"]
173
+ assert body["check_close_tst"] is not None
174
+ from datetime import datetime
175
+ datetime.fromisoformat(body["check_close_tst"])
176
+
177
+ def test_output_commitment_present_when_tool_returns(
178
+ self, pipeline: Pipeline, transport: MockTransport
179
+ ) -> None:
180
+ """MUST PASS: output_commitment present when tool returns output."""
181
+ adapter = PrimustLangGraph(pipeline=pipeline)
182
+
183
+ def calc_tool(a: int, b: int) -> dict[str, int]:
184
+ return {"sum": a + b}
185
+
186
+ wrapped = adapter.wrap_tool("calc_tool", calc_tool)
187
+ result = wrapped(a=3, b=4)
188
+ assert result == {"sum": 7}
189
+
190
+ record_req = [r for r in transport.requests if "/records" in r["url"]]
191
+ body = record_req[0]["body"]
192
+ assert body["output_commitment"].startswith("poseidon2:")
193
+
194
+ def test_manifest_hash_captured_per_record(
195
+ self, pipeline: Pipeline, transport: MockTransport
196
+ ) -> None:
197
+ """MUST PASS: manifest_hash captured per record."""
198
+ adapter = PrimustLangGraph(
199
+ pipeline=pipeline,
200
+ manifest_map={"tool_a": "manifest_a_v1"},
201
+ )
202
+
203
+ wrapped = adapter.wrap_tool("tool_a", lambda: "ok")
204
+ wrapped()
205
+
206
+ record_req = [r for r in transport.requests if "/records" in r["url"]]
207
+ body = record_req[0]["body"]
208
+ assert body["manifest_id"] == "manifest_a_v1"
209
+
210
+ def test_process_context_hash_propagated(
211
+ self, pipeline: Pipeline, transport: MockTransport
212
+ ) -> None:
213
+ """MUST PASS: process_context_hash propagated from Pipeline."""
214
+ adapter = PrimustLangGraph(pipeline=pipeline)
215
+ wrapped = adapter.wrap_tool("tool", lambda: "ok")
216
+ wrapped()
217
+
218
+ run_req = [r for r in transport.requests if r["url"].endswith("/api/v1/runs")]
219
+ assert run_req[0]["body"]["process_context_hash"] == "sha256:" + "cc" * 32
220
+
221
+ def test_surface_type_in_process_adapter(
222
+ self, pipeline: Pipeline, transport: MockTransport
223
+ ) -> None:
224
+ """MUST PASS: surface_type = in_process_adapter in ObservationSurface."""
225
+ adapter = PrimustLangGraph(pipeline=pipeline)
226
+ surface = adapter.get_surface_declaration()
227
+ assert surface["surface_type"] == "in_process_adapter"
228
+ assert surface["observation_mode"] == "post_action_realtime"
229
+ assert surface["scope_type"] == "full_workflow"
230
+
231
+ def test_all_five_proof_levels_reachable(self) -> None:
232
+ """MUST PASS: all 5 proof levels reachable depending on manifest stage type."""
233
+ from primust_langgraph.adapter import PROOF_LEVEL_MAP
234
+ expected = {"mathematical", "verifiable_inference", "execution", "witnessed", "attestation"}
235
+ assert set(PROOF_LEVEL_MAP.values()) == expected
236
+
237
+ def test_wrap_graph_instruments_nodes(
238
+ self, pipeline: Pipeline, transport: MockTransport
239
+ ) -> None:
240
+ """wrap() instruments all nodes in a StateGraph."""
241
+ adapter = PrimustLangGraph(
242
+ pipeline=pipeline,
243
+ manifest_map={"node_a": "manifest_a"},
244
+ )
245
+
246
+ graph = MockStateGraph()
247
+ graph.add_node("node_a", lambda state: {"output": "done"})
248
+
249
+ adapter.wrap(graph)
250
+
251
+ # Call the wrapped node
252
+ result = graph.nodes["node_a"](state={"input": "test"})
253
+ assert result == {"output": "done"}
254
+
255
+ record_req = [r for r in transport.requests if "/records" in r["url"]]
256
+ assert len(record_req) == 1
257
+
258
+ def test_adapter_failure_does_not_block_tool(
259
+ self, pipeline: Pipeline, transport: MockTransport
260
+ ) -> None:
261
+ """Adapter failure does not block tool execution (fail open)."""
262
+ adapter = PrimustLangGraph(pipeline=pipeline)
263
+
264
+ # Force pipeline.record to raise
265
+ original_record = pipeline.record
266
+ def broken_record(*a: Any, **kw: Any) -> Any:
267
+ raise RuntimeError("record exploded")
268
+ pipeline.record = broken_record # type: ignore
269
+
270
+ def tool(x: int) -> int:
271
+ return x * 10
272
+
273
+ wrapped = adapter.wrap_tool("tool", tool)
274
+ result = wrapped(x=5)
275
+ assert result == 50 # tool still returns correctly
276
+
277
+ pipeline.record = original_record # type: ignore