ase-python 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. ase/__init__.py +21 -0
  2. ase/adapters/__init__.py +14 -0
  3. ase/adapters/contract.py +28 -0
  4. ase/adapters/frameworks/__init__.py +17 -0
  5. ase/adapters/frameworks/base.py +259 -0
  6. ase/adapters/frameworks/langgraph.py +19 -0
  7. ase/adapters/frameworks/mcp.py +68 -0
  8. ase/adapters/frameworks/openai_agents.py +19 -0
  9. ase/adapters/frameworks/pydantic_ai.py +19 -0
  10. ase/adapters/io.py +50 -0
  11. ase/adapters/model.py +89 -0
  12. ase/adapters/protocol.py +72 -0
  13. ase/adapters/replay.py +261 -0
  14. ase/cli/__init__.py +7 -0
  15. ase/cli/_trace_outputs.py +40 -0
  16. ase/cli/adapter_cmd.py +38 -0
  17. ase/cli/certify_cmd.py +74 -0
  18. ase/cli/compare.py +145 -0
  19. ase/cli/doctor_cmd.py +45 -0
  20. ase/cli/examples_cmd.py +27 -0
  21. ase/cli/history_cmd.py +126 -0
  22. ase/cli/import_cmd.py +34 -0
  23. ase/cli/main.py +134 -0
  24. ase/cli/replay_cmd.py +48 -0
  25. ase/cli/report.py +115 -0
  26. ase/cli/spec_cmd.py +53 -0
  27. ase/cli/test_cmd.py +121 -0
  28. ase/config/env_loader.py +71 -0
  29. ase/config/loader.py +82 -0
  30. ase/config/model.py +51 -0
  31. ase/conformance/__init__.py +7 -0
  32. ase/conformance/matrix.py +111 -0
  33. ase/conformance/model.py +91 -0
  34. ase/conformance/schema.py +37 -0
  35. ase/conformance/service.py +194 -0
  36. ase/core/engine.py +348 -0
  37. ase/errors.py +59 -0
  38. ase/evaluation/__init__.py +7 -0
  39. ase/evaluation/base.py +63 -0
  40. ase/evaluation/consistency.py +79 -0
  41. ase/evaluation/correctness.py +117 -0
  42. ase/evaluation/efficiency.py +145 -0
  43. ase/evaluation/engine.py +182 -0
  44. ase/evaluation/policy.py +134 -0
  45. ase/evaluation/scoring.py +64 -0
  46. ase/evaluation/trace_summary.py +36 -0
  47. ase/examples_matrix.py +118 -0
  48. ase/reporting/__init__.py +7 -0
  49. ase/reporting/json_report.py +45 -0
  50. ase/reporting/junit.py +38 -0
  51. ase/reporting/markdown.py +32 -0
  52. ase/reporting/terminal.py +66 -0
  53. ase/scenario/__init__.py +7 -0
  54. ase/scenario/model.py +294 -0
  55. ase/scenario/parser.py +40 -0
  56. ase/storage/__init__.py +7 -0
  57. ase/storage/trace_store.py +136 -0
  58. ase/trace/__init__.py +7 -0
  59. ase/trace/builder.py +175 -0
  60. ase/trace/model.py +264 -0
  61. ase/trace/otel_export.py +75 -0
  62. ase/trace/otel_import.py +96 -0
  63. ase/trace/redaction.py +10 -0
  64. ase/trace/serializer.py +50 -0
  65. ase_python-0.1.0.dist-info/METADATA +184 -0
  66. ase_python-0.1.0.dist-info/RECORD +69 -0
  67. ase_python-0.1.0.dist-info/WHEEL +4 -0
  68. ase_python-0.1.0.dist-info/entry_points.txt +2 -0
  69. ase_python-0.1.0.dist-info/licenses/LICENSE +105 -0
@@ -0,0 +1,71 @@
1
+ """Minimal `.env` loader used to keep CLI startup dependency-free.
2
+
3
+ ASE only needs project-local environment bootstrapping, not the full feature
4
+ surface of a third-party dotenv package. Keeping this logic in-repo avoids
5
+ startup failures caused by unrelated environment plugins or broken installs.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import os
11
+ import shlex
12
+ from pathlib import Path
13
+
14
+ from ase.errors import ConfigError
15
+
16
+ ENV_FILE_NAME = ".env"
17
+
18
+
19
+ def load_local_dotenv(start: Path | None = None) -> Path | None:
20
+ """Load the nearest project `.env` without overriding existing variables."""
21
+ env_path = _find_dotenv(start or Path.cwd())
22
+ if env_path is None:
23
+ return None
24
+ try:
25
+ lines = env_path.read_text(encoding="utf-8").splitlines()
26
+ except OSError as exc:
27
+ raise ConfigError(f"failed to read env file {env_path}: {exc}") from exc
28
+ for line in lines:
29
+ parsed = _parse_env_line(line)
30
+ if parsed is None:
31
+ continue
32
+ key, value = parsed
33
+ os.environ.setdefault(key, value)
34
+ return env_path
35
+
36
+
37
+ def _find_dotenv(start: Path) -> Path | None:
38
+ """Search upward so ASE commands work from example and subdirectories."""
39
+ current = start.resolve()
40
+ for directory in [current, *current.parents]:
41
+ candidate = directory / ENV_FILE_NAME
42
+ if candidate.exists():
43
+ return candidate
44
+ return None
45
+
46
+
47
+ def _parse_env_line(line: str) -> tuple[str, str] | None:
48
+ """Handle the simple KEY=VALUE syntax ASE documents for local setup."""
49
+ stripped = line.strip()
50
+ if not stripped or stripped.startswith("#"):
51
+ return None
52
+ if stripped.startswith("export "):
53
+ stripped = stripped.removeprefix("export ").strip()
54
+ if "=" not in stripped:
55
+ return None
56
+ key, raw_value = stripped.split("=", 1)
57
+ key = key.strip()
58
+ value = _normalize_value(raw_value.strip())
59
+ return (key, value) if key else None
60
+
61
+
62
+ def _normalize_value(raw_value: str) -> str:
63
+ """Preserve quoted values while allowing inline comments on bare values."""
64
+ if not raw_value:
65
+ return ""
66
+ if raw_value[0] in {"'", '"'}:
67
+ try:
68
+ return shlex.split(raw_value)[0]
69
+ except ValueError:
70
+ return raw_value.strip("'\"")
71
+ return raw_value.split(" #", 1)[0].strip()
ase/config/loader.py ADDED
@@ -0,0 +1,82 @@
1
+ """Configuration discovery and loading for ASE commands.
2
+
3
+ The CLI needs one predictable config flow: walk upward for `ase.yaml`, load it
4
+ if present, and otherwise return defaults. Keeping this logic in one module
5
+ avoids every command reimplementing file discovery and error handling.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from pathlib import Path
11
+
12
+ import yaml
13
+
14
+ from ase.config.model import ASEConfig
15
+ from ase.errors import ConfigError
16
+
17
+ CONFIG_FILE_NAME = "ase.yaml"
18
+
19
+
20
+ def find_config_file(start: Path | None = None) -> Path | None:
21
+ """Search upward so ASE works from nested example and project directories."""
22
+ current = (start or Path.cwd()).resolve()
23
+ for directory in [current, *current.parents]:
24
+ candidate = directory / CONFIG_FILE_NAME
25
+ if candidate.exists():
26
+ return candidate
27
+ return None
28
+
29
+
30
+ def load_config(path: Path | None = None) -> ASEConfig:
31
+ """Return validated config or defaults when no project config exists."""
32
+ config_path = path or find_config_file()
33
+ if config_path is None:
34
+ return ASEConfig()
35
+ data = _read_config_dict(config_path)
36
+ config = _validate_config(data, config_path)
37
+ _load_declared_env_files(config_path, config)
38
+ return config
39
+
40
+
41
+ def _read_config_dict(path: Path) -> dict[str, object]:
42
+ """Translate YAML into a plain mapping with contextual parse errors."""
43
+ try:
44
+ raw = path.read_text(encoding="utf-8")
45
+ except OSError as exc:
46
+ raise ConfigError(f"failed to read config {path}: {exc}") from exc
47
+ try:
48
+ data = yaml.safe_load(raw) or {}
49
+ except yaml.YAMLError as exc:
50
+ raise ConfigError(f"failed to parse config {path}: {exc}") from exc
51
+ if not isinstance(data, dict):
52
+ raise ConfigError(f"invalid config {path}: root must be a mapping")
53
+ return data
54
+
55
+
56
+ def _validate_config(data: dict[str, object], path: Path) -> ASEConfig:
57
+ """Centralize model validation so config errors stay user-readable."""
58
+ try:
59
+ return ASEConfig.model_validate(data)
60
+ except Exception as exc:
61
+ raise ConfigError(f"invalid config {path}: {exc}") from exc
62
+
63
+
64
+ def _load_declared_env_files(config_path: Path, config: ASEConfig) -> None:
65
+ """Load configured env files relative to the config directory in order."""
66
+ from ase.config.env_loader import _parse_env_line
67
+ base_dir = config_path.parent
68
+ for relative in config.env_files:
69
+ env_path = base_dir / relative
70
+ if not env_path.exists():
71
+ continue
72
+ try:
73
+ lines = env_path.read_text(encoding="utf-8").splitlines()
74
+ except OSError as exc:
75
+ raise ConfigError(f"failed to read env file {env_path}: {exc}") from exc
76
+ for line in lines:
77
+ parsed = _parse_env_line(line)
78
+ if parsed is None:
79
+ continue
80
+ key, value = parsed
81
+ import os
82
+ os.environ[key] = value
ase/config/model.py ADDED
@@ -0,0 +1,51 @@
1
+ """Pydantic models for ASE project configuration and CLI output formats."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from enum import StrEnum
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+
10
+ class OutputFormat(StrEnum):
11
+ """Enumerate stable report/output modes shared by ASE CLI commands."""
12
+
13
+ TERMINAL = "terminal"
14
+ JSON = "json"
15
+ JUNIT = "junit"
16
+ MARKDOWN = "markdown"
17
+ OTEL_JSON = "otel-json"
18
+
19
+
20
+ class ProxyConfig(BaseModel):
21
+ """Capture proxy defaults so watch/test can run reproducibly from config."""
22
+
23
+ port: int = 0
24
+ bind_address: str = "127.0.0.1"
25
+
26
+
27
+ class CacheConfig(BaseModel):
28
+ """Describe the on-disk response cache used for deterministic test runs."""
29
+
30
+ enabled: bool = True
31
+ directory: str = ".ase-cache"
32
+ max_entries: int = 1000
33
+
34
+
35
+ class TraceStoreConfig(BaseModel):
36
+ """Describe where ASE stores local run history and how much to retain."""
37
+
38
+ directory: str = ".ase-traces"
39
+ keep_last: int = 100
40
+
41
+
42
+ class ASEConfig(BaseModel):
43
+ """Define the project-level ASE defaults loaded from `ase.yaml`."""
44
+
45
+ version: int = 1
46
+ output: OutputFormat = OutputFormat.TERMINAL
47
+ proxy: ProxyConfig = Field(default_factory=ProxyConfig)
48
+ cache: CacheConfig = Field(default_factory=CacheConfig)
49
+ traces: TraceStoreConfig = Field(default_factory=TraceStoreConfig)
50
+ scenario_dirs: list[str] = Field(default_factory=lambda: ["scenarios"])
51
+ env_files: list[str] = Field(default_factory=list)
@@ -0,0 +1,7 @@
1
+ """Source-backed conformance package that composes with recovery overlays."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from pkgutil import extend_path
6
+
7
+ __path__ = extend_path(__path__, __name__)
@@ -0,0 +1,111 @@
1
+ """Compatibility matrix helpers built from certification result artifacts."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import datetime
6
+ import json
7
+ from pathlib import Path
8
+
9
+ from pydantic import BaseModel
10
+
11
+ from ase.conformance.model import ConformanceResult
12
+ from ase.errors import ConformanceError
13
+
14
+
15
+ class CompatibilityRow(BaseModel):
16
+ """One row in the generated compatibility matrix."""
17
+
18
+ framework: str
19
+ language: str
20
+ adapter_name: str
21
+ adapter_version: str
22
+ bundle_family: str
23
+ certification_level: str
24
+ bundle_version: str
25
+ passed: bool
26
+ generated_at: str
27
+ source_artifact: str
28
+
29
+
30
+ class CertificationArtifact(BaseModel):
31
+ """One validated certification artifact paired with its source path."""
32
+
33
+ path: str
34
+ result: ConformanceResult
35
+
36
+
37
+ def load_results(paths: list[Path]) -> list[CertificationArtifact]:
38
+ """Load certification results from JSON files and directories."""
39
+ return [load_result(path) for path in _expand_result_paths(paths)]
40
+
41
+
42
+ def load_result(path: Path) -> CertificationArtifact:
43
+ """Load one certification result artifact from disk."""
44
+ if not path.exists():
45
+ raise ConformanceError(f"certification result not found: {path}")
46
+ try:
47
+ payload = json.loads(path.read_text(encoding="utf-8"))
48
+ except (OSError, json.JSONDecodeError) as exc:
49
+ raise ConformanceError(f"failed to read certification result {path}: {exc}") from exc
50
+ try:
51
+ result = ConformanceResult.model_validate(payload)
52
+ except Exception as exc:
53
+ raise ConformanceError(f"failed to validate certification result {path}: {exc}") from exc
54
+ return CertificationArtifact(path=str(path), result=result)
55
+
56
+
57
+ def build_rows(artifacts: list[CertificationArtifact]) -> list[CompatibilityRow]:
58
+ """Convert certification results into sorted matrix rows."""
59
+ rows = [
60
+ CompatibilityRow(
61
+ framework=artifact.result.framework or "unknown",
62
+ language=artifact.result.language or "unknown",
63
+ adapter_name=artifact.result.adapter_name,
64
+ adapter_version=artifact.result.adapter_version or "unspecified",
65
+ bundle_family=artifact.result.bundle_family,
66
+ certification_level=artifact.result.certification_level.value,
67
+ bundle_version=artifact.result.bundle_version,
68
+ passed=artifact.result.passed,
69
+ generated_at=_format_generated_at(artifact.result.generated_at_ms),
70
+ source_artifact=artifact.path,
71
+ )
72
+ for artifact in artifacts
73
+ ]
74
+ return sorted(rows, key=lambda row: (row.framework, row.language, row.adapter_name))
75
+
76
+
77
+ def to_markdown(rows: list[CompatibilityRow]) -> str:
78
+ """Render a compatibility matrix as a Markdown table."""
79
+ header = (
80
+ "| Framework | Language | Adapter | Adapter Version | Bundle Family | Level | "
81
+ "Bundle | Status | Generated | Artifact |\n"
82
+ "|---|---|---|---|---|---|---|---|---|---|"
83
+ )
84
+ body = [
85
+ "| "
86
+ f"{row.framework} | {row.language} | {row.adapter_name} | {row.adapter_version} | "
87
+ f"{row.bundle_family} | {row.certification_level} | {row.bundle_version} | "
88
+ f"{'certified' if row.passed else 'failing'} | {row.generated_at} | {row.source_artifact} |"
89
+ for row in rows
90
+ ]
91
+ return "\n".join([header, *body])
92
+
93
+
94
+ def _expand_result_paths(paths: list[Path]) -> list[Path]:
95
+ """Accept both artifact files and downloaded-artifact directories."""
96
+ expanded: list[Path] = []
97
+ for path in paths:
98
+ if path.is_dir():
99
+ expanded.extend(sorted(path.rglob("*.cert.json")))
100
+ else:
101
+ expanded.append(path)
102
+ if not expanded:
103
+ raise ConformanceError("no certification result artifacts found")
104
+ return expanded
105
+
106
+
107
+ def _format_generated_at(ms: float) -> str:
108
+ """Render artifact generation time as an ISO-like UTC timestamp."""
109
+ return datetime.datetime.fromtimestamp(ms / 1000, datetime.UTC).strftime(
110
+ "%Y-%m-%d %H:%M:%SZ"
111
+ )
@@ -0,0 +1,91 @@
1
+ """Models for ASE adapter conformance manifests and certification output."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import time
6
+ from enum import StrEnum
7
+ from pathlib import Path
8
+ from typing import Any
9
+
10
+ from pydantic import BaseModel, Field
11
+
12
+ CONFORMANCE_SPEC_VERSION = 1
13
+
14
+
15
+ class CertificationLevel(StrEnum):
16
+ """Define public certification tiers without scenario-model imports."""
17
+
18
+ CORE = "core"
19
+ STATEFUL = "stateful"
20
+ MULTI_AGENT = "multi_agent"
21
+ MCP = "mcp"
22
+ REALTIME = "realtime"
23
+
24
+
25
+ class ConformanceCase(BaseModel):
26
+ """Describe one reusable certification case inside a conformance bundle."""
27
+
28
+ case_id: str
29
+ name: str
30
+ adapter_events: str
31
+ scenario: str | None = None
32
+ required_event_types: list[str] = Field(default_factory=list)
33
+ required_protocols: list[str] = Field(default_factory=list)
34
+ minimum_fidelity: dict[str, int] = Field(default_factory=dict)
35
+ methodology_profiles: list[str] = Field(default_factory=list)
36
+
37
+
38
+ class ConformanceManifest(BaseModel):
39
+ """Describe one language-neutral certification manifest."""
40
+
41
+ spec_version: int = CONFORMANCE_SPEC_VERSION
42
+ manifest_id: str
43
+ name: str
44
+ adapter_name: str
45
+ adapter_version: str | None = None
46
+ bundle_family: str = "launch"
47
+ bundle_version: str = "1.0.0"
48
+ framework: str | None = None
49
+ language: str | None = None
50
+ certification_target: CertificationLevel = CertificationLevel.CORE
51
+ methodology_profiles: list[str] = Field(default_factory=lambda: ["core"])
52
+ cases: list[ConformanceCase] = Field(default_factory=list)
53
+ metadata: dict[str, Any] = Field(default_factory=dict)
54
+
55
+
56
+ class ConformanceCheckResult(BaseModel):
57
+ """Capture one pass/fail check inside a certification run."""
58
+
59
+ check_id: str
60
+ case_id: str
61
+ passed: bool
62
+ message: str
63
+ details: dict[str, Any] = Field(default_factory=dict)
64
+
65
+
66
+ class ConformanceResult(BaseModel):
67
+ """Represent one certification result emitted by `ase certify`."""
68
+
69
+ spec_version: int = CONFORMANCE_SPEC_VERSION
70
+ manifest_id: str
71
+ manifest_name: str
72
+ adapter_name: str
73
+ adapter_version: str | None = None
74
+ bundle_family: str = "launch"
75
+ bundle_version: str = "1.0.0"
76
+ framework: str | None = None
77
+ language: str | None = None
78
+ certification_level: CertificationLevel
79
+ passed: bool
80
+ methodology_profiles: list[str] = Field(default_factory=list)
81
+ checks: list[ConformanceCheckResult] = Field(default_factory=list)
82
+ generated_at_ms: float = Field(default_factory=lambda: time.time() * 1000)
83
+ report_digest_sha256: str | None = None
84
+ signature_algorithm: str | None = None
85
+ signature: str | None = None
86
+
87
+
88
+ def resolve_case_path(manifest_path: Path, relative_path: str) -> Path:
89
+ """Resolve case-local paths relative to the manifest file location."""
90
+ path = Path(relative_path)
91
+ return path if path.is_absolute() else manifest_path.resolve().parent / path
@@ -0,0 +1,37 @@
1
+ """Schema validation helpers for conformance manifests and results."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from pathlib import Path
7
+ from typing import Any
8
+
9
+ import jsonschema
10
+
11
+ from ase.errors import ConformanceError
12
+
13
+ _SCHEMAS = Path(__file__).resolve().parents[3] / "schemas"
14
+
15
+
16
+ def validate_manifest_dict(data: dict[str, Any], source: str) -> None:
17
+ """Validate a conformance manifest payload against the public schema."""
18
+ _validate(data, _SCHEMAS / "ase_conformance_manifest.schema.json", source)
19
+
20
+
21
+ def validate_result_dict(data: dict[str, Any], source: str) -> None:
22
+ """Validate a conformance result payload against the public schema."""
23
+ _validate(data, _SCHEMAS / "ase_conformance_result.schema.json", source)
24
+
25
+
26
+ def _validate(data: dict[str, Any], schema_path: Path, source: str) -> None:
27
+ """Load one schema file and raise contextual validation errors."""
28
+ try:
29
+ schema = json.loads(schema_path.read_text(encoding="utf-8"))
30
+ except OSError as exc:
31
+ raise ConformanceError(f"failed to read schema {schema_path}: {exc}") from exc
32
+ except json.JSONDecodeError as exc:
33
+ raise ConformanceError(f"invalid schema {schema_path}: {exc}") from exc
34
+ try:
35
+ jsonschema.validate(data, schema)
36
+ except jsonschema.ValidationError as exc:
37
+ raise ConformanceError(f"schema validation failed for {source}: {exc.message}") from exc
@@ -0,0 +1,194 @@
1
+ """Load conformance manifests and execute certification checks."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import hashlib
6
+ import hmac
7
+ import json
8
+ import os
9
+ from pathlib import Path
10
+
11
+ import yaml
12
+
13
+ from ase.adapters.protocol import read_and_verify
14
+ from ase.adapters.replay import trace_from_adapter_events
15
+ from ase.conformance.model import (
16
+ ConformanceCheckResult,
17
+ ConformanceManifest,
18
+ ConformanceResult,
19
+ resolve_case_path,
20
+ )
21
+ from ase.conformance.schema import validate_manifest_dict, validate_result_dict
22
+ from ase.errors import ConformanceError
23
+ from ase.evaluation.engine import EvaluationEngine
24
+ from ase.evaluation.trace_summary import attach_summary
25
+ from ase.scenario.parser import parse_file
26
+
27
+
28
+ def load_manifest(path: Path) -> ConformanceManifest:
29
+ """Load a YAML or JSON conformance manifest from disk."""
30
+ if not path.exists():
31
+ raise ConformanceError(f"conformance manifest not found: {path}")
32
+ try:
33
+ raw = path.read_text(encoding="utf-8")
34
+ except OSError as exc:
35
+ raise ConformanceError(f"failed to read conformance manifest {path}: {exc}") from exc
36
+ try:
37
+ data = yaml.safe_load(raw) or {}
38
+ except yaml.YAMLError as exc:
39
+ raise ConformanceError(f"invalid conformance manifest YAML in {path}: {exc}") from exc
40
+ validate_manifest_dict(data, str(path))
41
+ try:
42
+ return ConformanceManifest.model_validate(data)
43
+ except Exception as exc:
44
+ raise ConformanceError(f"failed to validate conformance manifest {path}: {exc}") from exc
45
+
46
+
47
+ def certify_manifest(
48
+ manifest: ConformanceManifest,
49
+ manifest_path: Path,
50
+ ) -> ConformanceResult:
51
+ """Run all conformance cases and build a certification result."""
52
+ checks: list[ConformanceCheckResult] = []
53
+ eval_engine = EvaluationEngine()
54
+
55
+ for case in manifest.cases:
56
+ event_path = resolve_case_path(manifest_path, case.adapter_events)
57
+ events, verification = read_and_verify(event_path)
58
+ checks.append(
59
+ ConformanceCheckResult(
60
+ check_id="adapter_contract",
61
+ case_id=case.case_id,
62
+ passed=verification.passed,
63
+ message="adapter event stream validates"
64
+ if verification.passed
65
+ else "adapter event stream violates the contract",
66
+ details=verification.model_dump(),
67
+ )
68
+ )
69
+ trace = trace_from_adapter_events(events, case.case_id, case.name)
70
+
71
+ observed_event_types = set(verification.event_type_counts)
72
+ for event_type in case.required_event_types:
73
+ passed = event_type in observed_event_types
74
+ checks.append(
75
+ ConformanceCheckResult(
76
+ check_id=f"requires_event_type:{event_type}",
77
+ case_id=case.case_id,
78
+ passed=passed,
79
+ message=f"required event type {event_type}",
80
+ details={"observed": sorted(observed_event_types)},
81
+ )
82
+ )
83
+
84
+ observed_protocols = {event.protocol for event in events if event.protocol}
85
+ for protocol in case.required_protocols:
86
+ passed = protocol in observed_protocols
87
+ checks.append(
88
+ ConformanceCheckResult(
89
+ check_id=f"requires_protocol:{protocol}",
90
+ case_id=case.case_id,
91
+ passed=passed,
92
+ message=f"required protocol {protocol}",
93
+ details={"observed": sorted(observed_protocols)},
94
+ )
95
+ )
96
+
97
+ for key, minimum in case.minimum_fidelity.items():
98
+ observed = _observed_fidelity(trace, key)
99
+ checks.append(
100
+ ConformanceCheckResult(
101
+ check_id=f"minimum_fidelity:{key}",
102
+ case_id=case.case_id,
103
+ passed=observed >= minimum,
104
+ message=f"minimum fidelity for {key}",
105
+ details={"minimum": minimum, "observed": observed},
106
+ )
107
+ )
108
+
109
+ if case.scenario:
110
+ scenario_path = resolve_case_path(manifest_path, case.scenario)
111
+ scenario = parse_file(scenario_path)
112
+ summary = eval_engine.evaluate(
113
+ trace=trace,
114
+ assertions=scenario.assertions,
115
+ context={},
116
+ )
117
+ attach_summary(trace, summary)
118
+ checks.append(
119
+ ConformanceCheckResult(
120
+ check_id="scenario_assertions",
121
+ case_id=case.case_id,
122
+ passed=summary.passed,
123
+ message="scenario assertions passed"
124
+ if summary.passed
125
+ else "scenario assertions failed",
126
+ details={
127
+ "ase_score": summary.ase_score,
128
+ "failed_count": summary.failed_count,
129
+ },
130
+ )
131
+ )
132
+
133
+ return ConformanceResult(
134
+ manifest_id=manifest.manifest_id,
135
+ manifest_name=manifest.name,
136
+ adapter_name=manifest.adapter_name,
137
+ adapter_version=manifest.adapter_version,
138
+ bundle_family=manifest.bundle_family,
139
+ bundle_version=manifest.bundle_version,
140
+ framework=manifest.framework,
141
+ language=manifest.language,
142
+ certification_level=manifest.certification_target,
143
+ methodology_profiles=list(manifest.methodology_profiles),
144
+ passed=all(check.passed for check in checks),
145
+ checks=checks,
146
+ )
147
+
148
+
149
+ def sign_result(
150
+ result: ConformanceResult,
151
+ signing_key_env: str | None,
152
+ ) -> ConformanceResult:
153
+ """Attach a digest and optional HMAC signature to a certification result."""
154
+ payload = json.dumps(
155
+ result.model_dump(exclude={"report_digest_sha256", "signature_algorithm", "signature"}),
156
+ sort_keys=True,
157
+ separators=(",", ":"),
158
+ )
159
+ digest = hashlib.sha256(payload.encode("utf-8")).hexdigest()
160
+ updated = result.model_copy(update={"report_digest_sha256": digest})
161
+ if not signing_key_env:
162
+ validate_result_dict(updated.model_dump(), "certification result")
163
+ return updated
164
+
165
+ signing_key = os.environ.get(signing_key_env)
166
+ if not signing_key:
167
+ raise ConformanceError(f"missing signing key env var: {signing_key_env}")
168
+
169
+ signature = hmac.new(
170
+ signing_key.encode("utf-8"),
171
+ payload.encode("utf-8"),
172
+ hashlib.sha256,
173
+ ).hexdigest()
174
+ signed = updated.model_copy(
175
+ update={"signature_algorithm": "hmac-sha256", "signature": signature}
176
+ )
177
+ validate_result_dict(signed.model_dump(), "signed certification result")
178
+ return signed
179
+
180
+
181
+ def _observed_fidelity(trace: object, key: str) -> int:
182
+ """Map bundle fidelity keys onto concrete counts from a replayed trace."""
183
+ from ase.trace.model import Trace
184
+
185
+ assert isinstance(trace, Trace)
186
+ counts = {
187
+ "tool_calls": trace.metrics.total_tool_calls,
188
+ "session_events": len(trace.session_events),
189
+ "handoff_edges": len(trace.handoff_edges),
190
+ "protocol_events": len(trace.protocol_events),
191
+ "agent_graph_nodes": len(trace.agent_graph.nodes),
192
+ "external_trace_refs": len(trace.external_trace_refs),
193
+ }
194
+ return counts.get(key, 0)