afterai-runner 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. afterai_runner-0.1.0/PKG-INFO +117 -0
  2. afterai_runner-0.1.0/README.md +92 -0
  3. afterai_runner-0.1.0/pyproject.toml +47 -0
  4. afterai_runner-0.1.0/setup.cfg +4 -0
  5. afterai_runner-0.1.0/src/afterai_runner/__init__.py +7 -0
  6. afterai_runner-0.1.0/src/afterai_runner/__version__.py +1 -0
  7. afterai_runner-0.1.0/src/afterai_runner/ace_rules.py +180 -0
  8. afterai_runner-0.1.0/src/afterai_runner/artifact.py +115 -0
  9. afterai_runner-0.1.0/src/afterai_runner/cli.py +272 -0
  10. afterai_runner-0.1.0/src/afterai_runner/config.py +21 -0
  11. afterai_runner-0.1.0/src/afterai_runner/dataset.py +89 -0
  12. afterai_runner-0.1.0/src/afterai_runner/emit.py +28 -0
  13. afterai_runner-0.1.0/src/afterai_runner/eval/__init__.py +0 -0
  14. afterai_runner-0.1.0/src/afterai_runner/eval/agent_config.py +113 -0
  15. afterai_runner-0.1.0/src/afterai_runner/eval/auth.py +47 -0
  16. afterai_runner-0.1.0/src/afterai_runner/eval/connectors/__init__.py +0 -0
  17. afterai_runner-0.1.0/src/afterai_runner/eval/connectors/base.py +8 -0
  18. afterai_runner-0.1.0/src/afterai_runner/eval/connectors/foundry.py +175 -0
  19. afterai_runner-0.1.0/src/afterai_runner/eval/connectors/http.py +42 -0
  20. afterai_runner-0.1.0/src/afterai_runner/eval/loop.py +121 -0
  21. afterai_runner-0.1.0/src/afterai_runner/manifest.py +83 -0
  22. afterai_runner-0.1.0/src/afterai_runner/payload.py +78 -0
  23. afterai_runner-0.1.0/src/afterai_runner.egg-info/PKG-INFO +117 -0
  24. afterai_runner-0.1.0/src/afterai_runner.egg-info/SOURCES.txt +30 -0
  25. afterai_runner-0.1.0/src/afterai_runner.egg-info/dependency_links.txt +1 -0
  26. afterai_runner-0.1.0/src/afterai_runner.egg-info/entry_points.txt +2 -0
  27. afterai_runner-0.1.0/src/afterai_runner.egg-info/requires.txt +8 -0
  28. afterai_runner-0.1.0/src/afterai_runner.egg-info/top_level.txt +1 -0
  29. afterai_runner-0.1.0/tests/test_artifact.py +124 -0
  30. afterai_runner-0.1.0/tests/test_emit.py +53 -0
  31. afterai_runner-0.1.0/tests/test_manifest.py +135 -0
  32. afterai_runner-0.1.0/tests/test_payload.py +99 -0
@@ -0,0 +1,117 @@
1
+ Metadata-Version: 2.4
2
+ Name: afterai-runner
3
+ Version: 0.1.0
4
+ Summary: AfterAI Runner — AIS signal emission and agent evaluation on customer infrastructure
5
+ Author-email: AfterAI <support@useafter.ai>
6
+ License: MIT
7
+ Keywords: afterai,ais,runner,eval,signals,evaluation
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.9
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Requires-Python: >=3.9
17
+ Description-Content-Type: text/markdown
18
+ Requires-Dist: pydantic>=2.0.0
19
+ Requires-Dist: requests>=2.28.0
20
+ Requires-Dist: afterai>=0.1.0
21
+ Requires-Dist: pyyaml>=6.0
22
+ Provides-Extra: dev
23
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
24
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
25
+
26
+ # AfterAI Runner v0
27
+
28
+ Runner v0 collects **pre-aggregated** D1–W5 metrics from customer-controlled artifact files and emits AIS payloads to the AfterAI API. It does not manage baselines, evaluate thresholds, or trigger ACE.
29
+
30
+ - **Manifest-driven**: you provide a JSON or YAML manifest listing `system` + local artifact paths.
31
+ - **Artifact contract**: each artifact file is one signal (required: `signal_key`, `cadence`, `window`, `metrics`).
32
+ - **Stateless**: no local state; duplicate sends are allowed. Always sends `external_id`.
33
+ - **Local filesystem only (v0)**: artifact paths must be local; no S3, GCS, or HTTPS.
34
+
35
+ ## Install
36
+
37
+ ```bash
38
+ pip install afterai-runner
39
+ ```
40
+
41
+ Or from source:
42
+
43
+ ```bash
44
+ cd afterai-runner && pip install -e .
45
+ ```
46
+
47
+ ## Config
48
+
49
+ | Env | Required | Default |
50
+ |-----|----------|---------|
51
+ | `AFTERAI_API_KEY` | Yes | — |
52
+ | `AFTERAI_BASE_URL` | No | `https://api.useafter.ai` |
53
+
54
+ ## Usage
55
+
56
+ ```bash
57
+ # Run ingestion from a manifest
58
+ afterai-runner run --manifest manifest.yaml
59
+
60
+ # Print payloads without sending
61
+ afterai-runner run --manifest manifest.yaml --dry-run
62
+
63
+ # Optional: ACE-from-AIS rule (out-of-band). Reads GET /signals/debug, applies threshold, POSTs inferred ACE.
64
+ afterai-runner ace-from-ais --min-signals 2 --hours 24
65
+ afterai-runner ace-from-ais --min-signals 2 --severity high,critical --dry-run
66
+ ```
67
+
68
+ ## Manifest example
69
+
70
+ **manifest.yaml** (or `.json`):
71
+
72
+ ```yaml
73
+ version: "2026-02"
74
+ artifacts:
75
+ - system: my-system-01
76
+ path: /path/to/D1-2026-02-03.json
77
+ - system: my-system-01
78
+ path: /path/to/W1-2026-W05.json
79
+ ```
80
+
81
+ `path` must be a **local filesystem path** (no remote URIs in v0).
82
+
83
+ ## Artifact example
84
+
85
+ One file = one signal. **D1-2026-02-03.json**:
86
+
87
+ ```json
88
+ {
89
+ "signal_key": "D1",
90
+ "cadence": "daily",
91
+ "window": {
92
+ "start": "2026-02-03T00:00:00Z",
93
+ "end": "2026-02-03T23:59:59Z"
94
+ },
95
+ "metrics": {
96
+ "score_mean": 0.87,
97
+ "fail_rate": 0.03
98
+ }
99
+ }
100
+ ```
101
+
102
+ Optional passthrough: `severity`, `confidence`, `tags`, `source`, `evidence_refs`, `baseline_ref`, `created_at`.
103
+ Runner adds: `system`, `external_id`, `type: "ais"`, and `created_at` if missing; defaults `severity="low"`, `confidence=0.8` if missing.
104
+
105
+ ## external_id format
106
+
107
+ - **Daily**: `{system}:{signal_key}:{YYYY-MM-DD}`
108
+ - **Weekly**: `{system}:{signal_key}:{YYYY-Www}` (ISO week from `window.end`)
109
+
110
+ ## Docs
111
+
112
+ - [Quickstart](docs/quickstart.md)
113
+ - [Deployment recipes](docs/deployment-recipes.md) (cron, GitHub Actions, K8s CronJob)
114
+
115
+ ## License
116
+
117
+ MIT
@@ -0,0 +1,92 @@
1
+ # AfterAI Runner v0
2
+
3
+ Runner v0 collects **pre-aggregated** D1–W5 metrics from customer-controlled artifact files and emits AIS payloads to the AfterAI API. It does not manage baselines, evaluate thresholds, or trigger ACE.
4
+
5
+ - **Manifest-driven**: you provide a JSON or YAML manifest listing `system` + local artifact paths.
6
+ - **Artifact contract**: each artifact file is one signal (required: `signal_key`, `cadence`, `window`, `metrics`).
7
+ - **Stateless**: no local state; duplicate sends are allowed. Always sends `external_id`.
8
+ - **Local filesystem only (v0)**: artifact paths must be local; no S3, GCS, or HTTPS.
9
+
10
+ ## Install
11
+
12
+ ```bash
13
+ pip install afterai-runner
14
+ ```
15
+
16
+ Or from source:
17
+
18
+ ```bash
19
+ cd afterai-runner && pip install -e .
20
+ ```
21
+
22
+ ## Config
23
+
24
+ | Env | Required | Default |
25
+ |-----|----------|---------|
26
+ | `AFTERAI_API_KEY` | Yes | — |
27
+ | `AFTERAI_BASE_URL` | No | `https://api.useafter.ai` |
28
+
29
+ ## Usage
30
+
31
+ ```bash
32
+ # Run ingestion from a manifest
33
+ afterai-runner run --manifest manifest.yaml
34
+
35
+ # Print payloads without sending
36
+ afterai-runner run --manifest manifest.yaml --dry-run
37
+
38
+ # Optional: ACE-from-AIS rule (out-of-band). Reads GET /signals/debug, applies threshold, POSTs inferred ACE.
39
+ afterai-runner ace-from-ais --min-signals 2 --hours 24
40
+ afterai-runner ace-from-ais --min-signals 2 --severity high,critical --dry-run
41
+ ```
42
+
43
+ ## Manifest example
44
+
45
+ **manifest.yaml** (or `.json`):
46
+
47
+ ```yaml
48
+ version: "2026-02"
49
+ artifacts:
50
+ - system: my-system-01
51
+ path: /path/to/D1-2026-02-03.json
52
+ - system: my-system-01
53
+ path: /path/to/W1-2026-W05.json
54
+ ```
55
+
56
+ `path` must be a **local filesystem path** (no remote URIs in v0).
57
+
58
+ ## Artifact example
59
+
60
+ One file = one signal. **D1-2026-02-03.json**:
61
+
62
+ ```json
63
+ {
64
+ "signal_key": "D1",
65
+ "cadence": "daily",
66
+ "window": {
67
+ "start": "2026-02-03T00:00:00Z",
68
+ "end": "2026-02-03T23:59:59Z"
69
+ },
70
+ "metrics": {
71
+ "score_mean": 0.87,
72
+ "fail_rate": 0.03
73
+ }
74
+ }
75
+ ```
76
+
77
+ Optional passthrough: `severity`, `confidence`, `tags`, `source`, `evidence_refs`, `baseline_ref`, `created_at`.
78
+ Runner adds: `system`, `external_id`, `type: "ais"`, and `created_at` if missing; defaults `severity="low"`, `confidence=0.8` if missing.
79
+
80
+ ## external_id format
81
+
82
+ - **Daily**: `{system}:{signal_key}:{YYYY-MM-DD}`
83
+ - **Weekly**: `{system}:{signal_key}:{YYYY-Www}` (ISO week from `window.end`)
84
+
85
+ ## Docs
86
+
87
+ - [Quickstart](docs/quickstart.md)
88
+ - [Deployment recipes](docs/deployment-recipes.md) (cron, GitHub Actions, K8s CronJob)
89
+
90
+ ## License
91
+
92
+ MIT
@@ -0,0 +1,47 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "afterai-runner"
7
+ version = "0.1.0"
8
+ description = "AfterAI Runner — AIS signal emission and agent evaluation on customer infrastructure"
9
+ readme = "README.md"
10
+ requires-python = ">=3.9"
11
+ license = {text = "MIT"}
12
+ authors = [
13
+ {name = "AfterAI", email = "support@useafter.ai"}
14
+ ]
15
+ keywords = ["afterai", "ais", "runner", "eval", "signals", "evaluation"]
16
+ classifiers = [
17
+ "Development Status :: 3 - Alpha",
18
+ "Intended Audience :: Developers",
19
+ "License :: OSI Approved :: MIT License",
20
+ "Programming Language :: Python :: 3",
21
+ "Programming Language :: Python :: 3.9",
22
+ "Programming Language :: Python :: 3.10",
23
+ "Programming Language :: Python :: 3.11",
24
+ "Programming Language :: Python :: 3.12",
25
+ ]
26
+ dependencies = [
27
+ "pydantic>=2.0.0",
28
+ "requests>=2.28.0",
29
+ "afterai>=0.1.0",
30
+ "pyyaml>=6.0",
31
+ ]
32
+
33
+ [project.optional-dependencies]
34
+ dev = [
35
+ "pytest>=7.0.0",
36
+ "pytest-cov>=4.0.0",
37
+ ]
38
+
39
+ [project.scripts]
40
+ afterai-runner = "afterai_runner.cli:main"
41
+
42
+ [tool.setuptools.packages.find]
43
+ where = ["src"]
44
+
45
+ [tool.pytest.ini_options]
46
+ testpaths = ["tests"]
47
+ pythonpath = ["src"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,7 @@
1
+ """
2
+ AfterAI Runner v0 — manifest-driven ingestion of pre-aggregated D1–W5 metrics
3
+ and emission of AIS payloads to the AfterAI API.
4
+ """
5
+ from .__version__ import __version__
6
+
7
+ __all__ = ["__version__"]
@@ -0,0 +1 @@
1
+ __version__ = "0.1.0"
@@ -0,0 +1,180 @@
1
+ """
2
+ Minimal out-of-band ACE-from-AIS rule: read AIS via GET /signals/debug,
3
+ apply a simple threshold (e.g. N high-severity signals per system in last W hours),
4
+ emit inferred ACE via POST /ingest using SDK-style payload.
5
+ """
6
+ from datetime import datetime, timezone, timedelta
7
+ from typing import Any, Dict, List, Optional
8
+
9
+ import requests
10
+
11
+ from .config import get_api_key, get_base_url
12
+
13
+
14
+ def _parse_rfc3339_to_utc(s: Optional[str]) -> Optional[datetime]:
15
+ if not s:
16
+ return None
17
+ try:
18
+ normalized = s.strip().replace("Z", "+00:00")
19
+ dt = datetime.fromisoformat(normalized)
20
+ if dt.tzinfo is None:
21
+ dt = dt.replace(tzinfo=timezone.utc)
22
+ return dt
23
+ except Exception:
24
+ return None
25
+
26
+
27
+ def fetch_signals_debug(
28
+ base_url: str,
29
+ api_key: str,
30
+ limit: int = 100,
31
+ system: Optional[str] = None,
32
+ timeout: int = 30,
33
+ ) -> List[Dict[str, Any]]:
34
+ """GET /signals/debug; returns list of items with id, system, severity, created_at, etc."""
35
+ url = f"{base_url.rstrip('/')}/signals/debug"
36
+ params: Dict[str, Any] = {"limit": limit}
37
+ if system:
38
+ params["system"] = system
39
+ resp = requests.get(
40
+ url,
41
+ headers={"X-API-Key": api_key},
42
+ params=params,
43
+ timeout=timeout,
44
+ )
45
+ resp.raise_for_status()
46
+ data = resp.json()
47
+ return data.get("items") or []
48
+
49
+
50
+ def filter_by_severity_and_hours(
51
+ items: List[Dict[str, Any]],
52
+ severities: List[str],
53
+ hours: int,
54
+ now: Optional[datetime] = None,
55
+ ) -> List[Dict[str, Any]]:
56
+ """Keep items whose severity is in severities and created_at is within the last hours."""
57
+ if now is None:
58
+ now = datetime.now(timezone.utc)
59
+ cutoff = now - timedelta(hours=hours)
60
+ out = []
61
+ for item in items:
62
+ if item.get("severity") not in severities:
63
+ continue
64
+ created = _parse_rfc3339_to_utc(item.get("created_at"))
65
+ if created is None or created < cutoff:
66
+ continue
67
+ out.append(item)
68
+ return out
69
+
70
+
71
+ def group_by_system(items: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]:
72
+ """Group debug items by system; each value is list of items (with id)."""
73
+ by_system: Dict[str, List[Dict[str, Any]]] = {}
74
+ for item in items:
75
+ sys_id = (item.get("system") or "").strip()
76
+ if not sys_id:
77
+ continue
78
+ by_system.setdefault(sys_id, []).append(item)
79
+ return by_system
80
+
81
+
82
+ def build_inferred_ace_payload(
83
+ system_id: str,
84
+ ais_refs: List[str],
85
+ occurred_at: str,
86
+ environment: str = "production",
87
+ ) -> Dict[str, Any]:
88
+ """Build minimal ACE payload for inferred ACE (drift_detector source, pending, extensions.ais_refs)."""
89
+ return {
90
+ "schema_version": "1.0.0",
91
+ "occurred_at": occurred_at,
92
+ "system": {
93
+ "system_id": system_id,
94
+ "name": system_id,
95
+ "type": "other",
96
+ },
97
+ "source": {
98
+ "origin": "drift_detector",
99
+ "actor": {"actor_type": "system", "actor_id": "afterai-drift"},
100
+ },
101
+ "change": {
102
+ "change_type": "eval",
103
+ "intent": "observed",
104
+ "baseline": {},
105
+ "candidate": {},
106
+ },
107
+ "risk": {
108
+ "severity": "high",
109
+ "blast_radius": "single_app",
110
+ "customer_impact": "minor",
111
+ },
112
+ "fingerprints": {
113
+ "change_fingerprint": "ais-escalation",
114
+ },
115
+ "environment": environment,
116
+ "status": "pending",
117
+ "extensions": {"ais_refs": ais_refs},
118
+ }
119
+
120
+
121
+ def post_ace(
122
+ base_url: str,
123
+ api_key: str,
124
+ payload: Dict[str, Any],
125
+ idempotency_key: Optional[str] = None,
126
+ timeout: int = 30,
127
+ ) -> Dict[str, Any]:
128
+ """POST ACE to /ingest. Returns response JSON."""
129
+ url = f"{base_url.rstrip('/')}/ingest"
130
+ headers = {"X-API-Key": api_key, "Content-Type": "application/json"}
131
+ if idempotency_key:
132
+ headers["Idempotency-Key"] = idempotency_key
133
+ resp = requests.post(url, json=payload, headers=headers, timeout=timeout)
134
+ resp.raise_for_status()
135
+ return resp.json()
136
+
137
+
138
+ def run_ace_from_ais_rule(
139
+ base_url: str,
140
+ api_key: str,
141
+ min_signals: int = 2,
142
+ severities: Optional[List[str]] = None,
143
+ hours: int = 24,
144
+ limit: int = 100,
145
+ system_filter: Optional[str] = None,
146
+ dry_run: bool = False,
147
+ ) -> List[Dict[str, Any]]:
148
+ """
149
+ Fetch AIS, filter by severity and time, group by system; for each system with
150
+ >= min_signals matching items, build and POST an inferred ACE (unless dry_run).
151
+ Returns list of ACE responses (or would-be payloads if dry_run).
152
+ """
153
+ if severities is None:
154
+ severities = ["high", "critical"]
155
+ items = fetch_signals_debug(base_url, api_key, limit=limit, system=system_filter)
156
+ filtered = filter_by_severity_and_hours(items, severities, hours)
157
+ by_system = group_by_system(filtered)
158
+ now_iso = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
159
+ results = []
160
+ for sys_id, sys_items in by_system.items():
161
+ if len(sys_items) < min_signals:
162
+ continue
163
+ signal_ids = [it["id"] for it in sys_items if it.get("id")]
164
+ if not signal_ids:
165
+ continue
166
+ payload = build_inferred_ace_payload(
167
+ system_id=sys_id,
168
+ ais_refs=signal_ids,
169
+ occurred_at=now_iso,
170
+ )
171
+ if dry_run:
172
+ results.append({"system": sys_id, "dry_run": True, "payload": payload})
173
+ continue
174
+ idempotency_key = f"ace-from-ais:{sys_id}:{now_iso[:13]}"
175
+ try:
176
+ ack = post_ace(base_url, api_key, payload, idempotency_key=idempotency_key)
177
+ results.append({"system": sys_id, "ace_id": ack.get("ace_id"), "ack": ack})
178
+ except requests.exceptions.HTTPError as e:
179
+ results.append({"system": sys_id, "error": str(e), "response": getattr(e.response, "text", "")})
180
+ return results
@@ -0,0 +1,115 @@
1
+ """
2
+ Artifact contract: one file = one signal. Validates required keys and RFC3339 window.
3
+ """
4
+ from pathlib import Path
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ from pydantic import BaseModel, Field, field_validator
8
+
9
+ # D1–D5, W1–W5
10
+ VALID_SIGNAL_KEYS = frozenset(
11
+ f"{p}{i}" for p in ("D", "W") for i in range(1, 6)
12
+ )
13
+ CADENCE_VALUES = ("daily", "weekly")
14
+
15
+
16
+ def _parse_rfc3339(s: str) -> None:
17
+ """Validate RFC3339-like string (raise if invalid)."""
18
+ if not s or not isinstance(s, str):
19
+ raise ValueError("RFC3339 value must be a non-empty string")
20
+ s = s.strip()
21
+ # Accept Z or +00:00
22
+ from datetime import datetime
23
+ normalized = s.replace("Z", "+00:00")
24
+ try:
25
+ datetime.fromisoformat(normalized)
26
+ except Exception as e:
27
+ raise ValueError(f"Invalid RFC3339 datetime: {s}") from e
28
+
29
+
30
+ class WindowModel(BaseModel):
31
+ """Window with start/end RFC3339."""
32
+ start: str = Field(..., description="Window start (RFC3339)")
33
+ end: str = Field(..., description="Window end (RFC3339)")
34
+
35
+ @field_validator("start", "end")
36
+ @classmethod
37
+ def check_rfc3339(cls, v: str) -> str:
38
+ _parse_rfc3339(v)
39
+ return v
40
+
41
+
42
+ class ArtifactIn(BaseModel):
43
+ """
44
+ Artifact JSON (one file = one signal).
45
+ Required: signal_key, cadence, window, metrics.
46
+ Optional passthrough: severity, confidence, tags, source, evidence_refs, baseline_ref, created_at.
47
+ """
48
+ signal_key: str = Field(..., description="D1–D5 or W1–W5")
49
+ cadence: str = Field(..., description="daily | weekly")
50
+ window: WindowModel = Field(..., description="start/end RFC3339")
51
+ metrics: Dict[str, Any] = Field(..., description="Pre-aggregated metrics object")
52
+
53
+ severity: Optional[str] = None
54
+ confidence: Optional[float] = None
55
+ tags: Optional[List[str]] = None
56
+ source: Optional[Any] = None
57
+ evidence_refs: Optional[List[str]] = None
58
+ baseline_ref: Optional[str] = None
59
+ created_at: Optional[str] = None
60
+
61
+ @field_validator("signal_key")
62
+ @classmethod
63
+ def valid_signal_key(cls, v: str) -> str:
64
+ key = (v or "").strip()
65
+ if key not in VALID_SIGNAL_KEYS:
66
+ raise ValueError(
67
+ f"signal_key must be one of D1–D5, W1–W5; got {v!r}"
68
+ )
69
+ return key
70
+
71
+ @field_validator("cadence")
72
+ @classmethod
73
+ def valid_cadence(cls, v: str) -> str:
74
+ c = (v or "").strip().lower()
75
+ if c not in CADENCE_VALUES:
76
+ raise ValueError(
77
+ f"cadence must be 'daily' or 'weekly'; got {v!r}"
78
+ )
79
+ return c
80
+
81
+
82
+ def load_artifact(path: str | Path) -> ArtifactIn:
83
+ """
84
+ Load and validate artifact JSON from a local file path.
85
+ Raises FileNotFoundError if path missing, ValueError for invalid content.
86
+ """
87
+ p = Path(path)
88
+ if not p.exists():
89
+ raise FileNotFoundError(f"Artifact file not found: {p}")
90
+ if not p.is_file():
91
+ raise ValueError(f"Artifact path is not a file: {p}")
92
+
93
+ try:
94
+ raw = p.read_text(encoding="utf-8")
95
+ except OSError as e:
96
+ raise ValueError(f"Cannot read artifact file {p}: {e}") from e
97
+
98
+ try:
99
+ data = __import__("json").loads(raw)
100
+ except Exception as e:
101
+ raise ValueError(f"Artifact JSON parse error in {p}: {e}") from e
102
+
103
+ if not isinstance(data, dict):
104
+ raise ValueError(f"Artifact root must be a JSON object; got {type(data).__name__}")
105
+
106
+ for key in ("signal_key", "cadence", "window", "metrics"):
107
+ if key not in data:
108
+ raise ValueError(f"Artifact missing required key: {key}")
109
+
110
+ if not isinstance(data.get("window"), dict):
111
+ raise ValueError("Artifact 'window' must be an object with start and end")
112
+ if "start" not in data["window"] or "end" not in data["window"]:
113
+ raise ValueError("Artifact window must have 'start' and 'end' (RFC3339)")
114
+
115
+ return ArtifactIn(**data)