agentcapsule 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,11 @@
1
+ """Agent Capsule Protocol V0."""
2
+
3
+ from agentcapsule.envelope import CapsuleEnvelope, build_envelope, parse_envelope, render_envelope, verify_envelope
4
+
5
+ __all__ = [
6
+ "CapsuleEnvelope",
7
+ "build_envelope",
8
+ "parse_envelope",
9
+ "render_envelope",
10
+ "verify_envelope",
11
+ ]
agentcapsule/audit.py ADDED
@@ -0,0 +1,73 @@
1
+ """Structured audit events for Agent Capsule governance."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from agentcapsule.policy import CapsulePolicy, policy_to_dict
8
+
9
+ AUDIT_SCHEMA_VERSION = 1
10
+
11
+
12
+ def disposition_from_risk(risk_level: str) -> str:
13
+ if risk_level == "high":
14
+ return "block"
15
+ if risk_level == "medium":
16
+ return "review"
17
+ return "allow"
18
+
19
+
20
+ def disposition_from_status(*, ok: bool, signature_trust: dict[str, object] | None = None) -> str:
21
+ if not ok:
22
+ return "block"
23
+ if signature_trust and signature_trust.get("status") != "trusted":
24
+ return "review"
25
+ return "allow"
26
+
27
+
28
+ def audit_event(
29
+ *,
30
+ operation: str,
31
+ disposition: str,
32
+ policy: CapsulePolicy,
33
+ result: dict[str, Any],
34
+ subject: str | None = None,
35
+ reasons: list[str] | None = None,
36
+ ) -> dict[str, object]:
37
+ return {
38
+ "event_type": "agent_capsule_audit",
39
+ "schema_version": AUDIT_SCHEMA_VERSION,
40
+ "operation": operation,
41
+ "disposition": disposition,
42
+ "subject": subject,
43
+ "policy": policy_to_dict(policy),
44
+ "reasons": reasons or _reasons_from_result(result, disposition),
45
+ "result": result,
46
+ }
47
+
48
+
49
+ def scan_audit_event(*, report: dict[str, Any], policy: CapsulePolicy, subject: str | None = None) -> dict[str, object]:
50
+ return audit_event(
51
+ operation="scan",
52
+ disposition=str(report["disposition"]),
53
+ policy=policy,
54
+ subject=subject,
55
+ reasons=[str(reason) for reason in report.get("reasons", [])],
56
+ result=report,
57
+ )
58
+
59
+
60
+ def _reasons_from_result(result: dict[str, Any], disposition: str) -> list[str]:
61
+ if result.get("verification") == "ok" or result.get("verification_status") == "ok":
62
+ trust = result.get("signature_trust")
63
+ if isinstance(trust, dict) and trust.get("status") != "trusted":
64
+ return [str(trust.get("reason", "signature is valid but not trusted"))]
65
+ if disposition == "allow":
66
+ return ["operation verified successfully"]
67
+ if result.get("verification_error"):
68
+ return [str(result["verification_error"])]
69
+ if disposition == "block":
70
+ return ["operation failed policy or verification"]
71
+ if disposition == "review":
72
+ return ["operation requires review"]
73
+ return []
@@ -0,0 +1,157 @@
1
+ """Payload encoding backends for Agent Capsules."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import base64
6
+ import binascii
7
+ import hashlib
8
+ import json
9
+ from collections.abc import Mapping
10
+ from dataclasses import dataclass
11
+ from pathlib import Path
12
+ from typing import Protocol
13
+
14
+ from lmcodec.codec import decode as lmcodec_decode
15
+ from lmcodec.codec import encode as lmcodec_encode
16
+ from lmcodec.errors import LMCodecError
17
+ from lmcodec.lm import FixedLM, NGramLM
18
+
19
+ from agentcapsule.errors import CapsuleError
20
+ from agentcapsule.registry import known_codecs as registry_known_codecs
21
+
22
+
23
+ class Backend(Protocol):
24
+ name: str
25
+
26
+ def encode(self, payload: bytes, *, headers: Mapping[str, str] | None = None) -> str:
27
+ """Encode bytes into text."""
28
+
29
+ def decode(self, text: str, *, headers: Mapping[str, str] | None = None) -> bytes:
30
+ """Decode text into bytes."""
31
+
32
+
33
+ @dataclass(frozen=True)
34
+ class Base64Backend:
35
+ name: str = "base64"
36
+
37
+ def encode(self, payload: bytes, *, headers: Mapping[str, str] | None = None) -> str:
38
+ return base64.b64encode(payload).decode("ascii")
39
+
40
+ def decode(self, text: str, *, headers: Mapping[str, str] | None = None) -> bytes:
41
+ compact = "".join(text.split())
42
+ try:
43
+ return base64.b64decode(compact.encode("ascii"), validate=True)
44
+ except (binascii.Error, UnicodeEncodeError) as exc:
45
+ raise CapsuleError("invalid base64 payload") from exc
46
+
47
+
48
+ @dataclass(frozen=True)
49
+ class LMCodecFixedBackend:
50
+ name: str = "lmcodec-fixed"
51
+
52
+ def encode(self, payload: bytes, *, headers: Mapping[str, str] | None = None) -> str:
53
+ return lmcodec_encode(payload, model=FixedLM(), wrap=80)
54
+
55
+ def decode(self, text: str, *, headers: Mapping[str, str] | None = None) -> bytes:
56
+ try:
57
+ return lmcodec_decode(text, model=FixedLM())
58
+ except LMCodecError as exc:
59
+ raise CapsuleError(str(exc)) from exc
60
+
61
+
62
+ @dataclass(frozen=True)
63
+ class LMCodecNGramV2Backend:
64
+ name: str = "lmcodec-ngram-v2"
65
+
66
+ def encode(self, payload: bytes, *, headers: Mapping[str, str] | None = None) -> str:
67
+ model = _ngram_model_from_headers(headers)
68
+ try:
69
+ return lmcodec_encode(payload, model=model, wrap=80)
70
+ except LMCodecError as exc:
71
+ raise CapsuleError(str(exc)) from exc
72
+
73
+ def decode(self, text: str, *, headers: Mapping[str, str] | None = None) -> bytes:
74
+ model = _ngram_model_from_headers(headers)
75
+ try:
76
+ return lmcodec_decode(text, model=model)
77
+ except LMCodecError as exc:
78
+ raise CapsuleError(str(exc)) from exc
79
+
80
+
81
+ _BACKENDS: dict[str, Backend] = {
82
+ "base64": Base64Backend(),
83
+ "lmcodec-fixed": LMCodecFixedBackend(),
84
+ "lmcodec-ngram-v2": LMCodecNGramV2Backend(),
85
+ }
86
+
87
+
88
+ def get_backend(name: str) -> Backend:
89
+ try:
90
+ return _BACKENDS[name]
91
+ except KeyError as exc:
92
+ raise CapsuleError(f"unknown capsule codec: {name}") from exc
93
+
94
+
95
+ def known_codecs() -> tuple[str, ...]:
96
+ return registry_known_codecs()
97
+
98
+
99
+ def ngram_v2_headers_from_model_path(path: str | Path) -> dict[str, str]:
100
+ try:
101
+ model = NGramLM.load(path)
102
+ except (OSError, ValueError, KeyError, json.JSONDecodeError) as exc:
103
+ raise CapsuleError(f"invalid n-gram model: {path}") from exc
104
+ canonical = model.to_canonical_json()
105
+ model_bytes = canonical.encode("utf-8")
106
+ return {
107
+ "lmcodec_backend_version": "ngram-v2",
108
+ "lmcodec_model_type": model.model_type,
109
+ "lmcodec_model_fingerprint": model.fingerprint,
110
+ "lmcodec_model_sha256": hashlib.sha256(model_bytes).hexdigest(),
111
+ "lmcodec_model_encoding": "inline-base64-json",
112
+ "lmcodec_model_json_b64": base64.b64encode(model_bytes).decode("ascii"),
113
+ "lmcodec_ngram_order": str(model.order),
114
+ "lmcodec_ngram_uniform_mix": f"{model.uniform_mix:.17g}",
115
+ }
116
+
117
+
118
+ def _ngram_model_from_headers(headers: Mapping[str, str] | None) -> NGramLM:
119
+ if headers is None:
120
+ raise CapsuleError("lmcodec-ngram-v2 requires inline model metadata")
121
+ if headers.get("lmcodec_model_encoding") != "inline-base64-json":
122
+ raise CapsuleError("lmcodec-ngram-v2 requires inline base64 model metadata")
123
+ encoded = headers.get("lmcodec_model_json_b64")
124
+ expected_sha = headers.get("lmcodec_model_sha256")
125
+ expected_fingerprint = headers.get("lmcodec_model_fingerprint")
126
+ if not encoded or not expected_sha or not expected_fingerprint:
127
+ raise CapsuleError("lmcodec-ngram-v2 model metadata is incomplete")
128
+ try:
129
+ model_bytes = base64.b64decode(encoded.encode("ascii"), validate=True)
130
+ except (binascii.Error, UnicodeEncodeError) as exc:
131
+ raise CapsuleError("invalid inline n-gram model encoding") from exc
132
+ if hashlib.sha256(model_bytes).hexdigest() != expected_sha:
133
+ raise CapsuleError("inline n-gram model SHA256 mismatch")
134
+ model = _ngram_model_from_json(model_bytes)
135
+ if model.fingerprint != expected_fingerprint:
136
+ raise CapsuleError("inline n-gram model fingerprint mismatch")
137
+ return model
138
+
139
+
140
+ def _ngram_model_from_json(model_bytes: bytes) -> NGramLM:
141
+ try:
142
+ data = json.loads(model_bytes.decode("utf-8"))
143
+ except (UnicodeDecodeError, json.JSONDecodeError) as exc:
144
+ raise CapsuleError("invalid inline n-gram model JSON") from exc
145
+ if data.get("model_type") != NGramLM.model_type:
146
+ raise CapsuleError("inline model is not an n-gram model")
147
+ try:
148
+ counts = {key: list(value) for key, value in data["counts"].items()}
149
+ return NGramLM(
150
+ vocab=data["vocab"],
151
+ order=int(data["order"]),
152
+ alpha=float(data["alpha"]),
153
+ uniform_mix=float(data["uniform_mix"]),
154
+ counts=counts,
155
+ )
156
+ except (KeyError, TypeError, ValueError) as exc:
157
+ raise CapsuleError("invalid inline n-gram model fields") from exc