invarlock 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. invarlock/__init__.py +1 -1
  2. invarlock/_data/runtime/profiles/ci_cpu.yaml +5 -0
  3. invarlock/_data/runtime/tiers.yaml +61 -0
  4. invarlock/adapters/hf_loading.py +97 -0
  5. invarlock/calibration/__init__.py +6 -0
  6. invarlock/calibration/spectral_null.py +301 -0
  7. invarlock/calibration/variance_ve.py +154 -0
  8. invarlock/cli/app.py +15 -0
  9. invarlock/cli/commands/calibrate.py +576 -0
  10. invarlock/cli/commands/doctor.py +16 -4
  11. invarlock/cli/commands/explain_gates.py +53 -9
  12. invarlock/cli/commands/plugins.py +12 -2
  13. invarlock/cli/commands/run.py +323 -81
  14. invarlock/cli/commands/verify.py +40 -0
  15. invarlock/cli/determinism.py +237 -0
  16. invarlock/core/auto_tuning.py +215 -17
  17. invarlock/core/registry.py +9 -4
  18. invarlock/eval/bench.py +467 -141
  19. invarlock/eval/bench_regression.py +12 -0
  20. invarlock/eval/data.py +29 -7
  21. invarlock/guards/spectral.py +216 -9
  22. invarlock/guards/variance.py +6 -3
  23. invarlock/reporting/certificate.py +403 -51
  24. invarlock/reporting/certificate_schema.py +4 -1
  25. invarlock/reporting/guards_analysis.py +108 -10
  26. invarlock/reporting/normalizer.py +21 -1
  27. invarlock/reporting/policy_utils.py +100 -16
  28. {invarlock-0.3.0.dist-info → invarlock-0.3.2.dist-info}/METADATA +12 -10
  29. {invarlock-0.3.0.dist-info → invarlock-0.3.2.dist-info}/RECORD +33 -26
  30. {invarlock-0.3.0.dist-info → invarlock-0.3.2.dist-info}/WHEEL +0 -0
  31. {invarlock-0.3.0.dist-info → invarlock-0.3.2.dist-info}/entry_points.txt +0 -0
  32. {invarlock-0.3.0.dist-info → invarlock-0.3.2.dist-info}/licenses/LICENSE +0 -0
  33. {invarlock-0.3.0.dist-info → invarlock-0.3.2.dist-info}/top_level.txt +0 -0
@@ -35,6 +35,22 @@ from .run import _enforce_provider_parity, _resolve_exit_code
35
35
  console = Console()
36
36
 
37
37
 
38
+ def _coerce_float(value: Any) -> float | None:
39
+ try:
40
+ out = float(value)
41
+ except (TypeError, ValueError):
42
+ return None
43
+ return out if math.isfinite(out) else None
44
+
45
+
46
+ def _coerce_int(value: Any) -> int | None:
47
+ try:
48
+ out = int(value)
49
+ except (TypeError, ValueError):
50
+ return None
51
+ return out if out >= 0 else None
52
+
53
+
38
54
  def _load_certificate(path: Path) -> dict[str, Any]:
39
55
  """Load certificate JSON from disk."""
40
56
  with path.open("r", encoding="utf-8") as handle:
@@ -315,6 +331,30 @@ def _validate_certificate_payload(
315
331
  errors.extend(_validate_drift_band(certificate))
316
332
  errors.extend(_apply_profile_lints(certificate))
317
333
  errors.extend(_validate_tokenizer_hash(certificate))
334
+ # Release-only enforcement: guard overhead must be measured or explicitly skipped.
335
+ if prof == "release":
336
+ go = certificate.get("guard_overhead")
337
+ if not isinstance(go, dict) or not go:
338
+ errors.append(
339
+ "Release verification requires guard_overhead (missing). "
340
+ "Set INVARLOCK_SKIP_OVERHEAD_CHECK=1 to explicitly skip during certification."
341
+ )
342
+ else:
343
+ skipped = bool(go.get("skipped", False)) or (
344
+ str(go.get("mode", "")).strip().lower() == "skipped"
345
+ )
346
+ if not skipped:
347
+ evaluated = go.get("evaluated")
348
+ if evaluated is not True:
349
+ errors.append(
350
+ "Release verification requires evaluated guard_overhead (not evaluated). "
351
+ "Set INVARLOCK_SKIP_OVERHEAD_CHECK=1 to explicitly skip during certification."
352
+ )
353
+ ratio = go.get("overhead_ratio")
354
+ if ratio is None:
355
+ errors.append(
356
+ "Release verification requires guard_overhead.overhead_ratio (missing)."
357
+ )
318
358
  # Legacy cross-checks removed; primary_metric is canonical
319
359
 
320
360
  return errors
@@ -0,0 +1,237 @@
1
+ """Determinism presets for CI/release runs.
2
+
3
+ Centralizes:
4
+ - Seeds (python/numpy/torch)
5
+ - Thread caps (OMP/MKL/etc + torch threads)
6
+ - TF32 policy
7
+ - torch deterministic algorithms
8
+ - A structured "determinism level" for certificate provenance
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import os
14
+ import random
15
+ from typing import Any
16
+
17
+ import numpy as np
18
+
19
+ from invarlock.model_utils import set_seed
20
+
21
+ try: # optional torch
22
+ import torch
23
+ except Exception: # pragma: no cover
24
+ torch = None # type: ignore[assignment]
25
+
26
+
27
+ _THREAD_ENV_VARS: tuple[str, ...] = (
28
+ "OMP_NUM_THREADS",
29
+ "MKL_NUM_THREADS",
30
+ "OPENBLAS_NUM_THREADS",
31
+ "NUMEXPR_NUM_THREADS",
32
+ "VECLIB_MAXIMUM_THREADS",
33
+ )
34
+
35
+
36
+ def _coerce_int(value: Any, default: int) -> int:
37
+ try:
38
+ return int(value)
39
+ except Exception:
40
+ return int(default)
41
+
42
+
43
+ def _coerce_profile(profile: str | None) -> str:
44
+ try:
45
+ return (profile or "").strip().lower()
46
+ except Exception:
47
+ return ""
48
+
49
+
50
+ def _coerce_device(device: str | None) -> str:
51
+ try:
52
+ return (device or "").strip().lower()
53
+ except Exception:
54
+ return "cpu"
55
+
56
+
57
+ def apply_determinism_preset(
58
+ *,
59
+ profile: str | None,
60
+ device: str | None,
61
+ seed: int,
62
+ threads: int = 1,
63
+ ) -> dict[str, Any]:
64
+ """Apply a determinism preset and return a provenance payload."""
65
+
66
+ prof = _coerce_profile(profile)
67
+ dev = _coerce_device(device)
68
+ threads_i = max(1, _coerce_int(threads, 1))
69
+
70
+ requested = "off"
71
+ if prof in {"ci", "release"}:
72
+ requested = "strict"
73
+
74
+ env_set: dict[str, Any] = {}
75
+ torch_flags: dict[str, Any] = {}
76
+ notes: list[str] = []
77
+
78
+ # Thread caps (best-effort): make CPU determinism explicit and reduce drift.
79
+ if requested == "strict":
80
+ for var in _THREAD_ENV_VARS:
81
+ os.environ[var] = str(threads_i)
82
+ env_set[var] = os.environ.get(var)
83
+
84
+ # CUDA determinism: cuBLAS workspace config.
85
+ if requested == "strict" and dev.startswith("cuda"):
86
+ os.environ.setdefault("CUBLAS_WORKSPACE_CONFIG", ":16:8")
87
+ env_set["CUBLAS_WORKSPACE_CONFIG"] = os.environ.get("CUBLAS_WORKSPACE_CONFIG")
88
+
89
+ # Seed all RNGs (python/numpy/torch) using the existing helper for parity.
90
+ set_seed(int(seed))
91
+
92
+ # Derive a stable seed bundle for provenance.
93
+ seed_bundle = {
94
+ "python": int(seed),
95
+ "numpy": int(seed),
96
+ "torch": None,
97
+ }
98
+ try:
99
+ numpy_seed = int(np.random.get_state()[1][0])
100
+ seed_bundle["numpy"] = int(numpy_seed)
101
+ except Exception:
102
+ pass
103
+ if torch is not None:
104
+ try:
105
+ seed_bundle["torch"] = int(torch.initial_seed())
106
+ except Exception:
107
+ seed_bundle["torch"] = int(seed)
108
+
109
+ # Torch-specific controls.
110
+ level = "off" if requested == "off" else "strict"
111
+ if requested == "strict":
112
+ if torch is None:
113
+ level = "tolerance"
114
+ notes.append("torch_unavailable")
115
+ else:
116
+ # Thread caps.
117
+ try:
118
+ if hasattr(torch, "set_num_threads"):
119
+ torch.set_num_threads(threads_i)
120
+ if hasattr(torch, "set_num_interop_threads"):
121
+ torch.set_num_interop_threads(threads_i)
122
+ torch_flags["torch_threads"] = threads_i
123
+ except Exception:
124
+ level = "tolerance"
125
+ notes.append("torch_thread_caps_failed")
126
+
127
+ # Disable TF32 for determinism.
128
+ try:
129
+ matmul = getattr(
130
+ getattr(torch.backends, "cuda", object()), "matmul", None
131
+ )
132
+ if matmul is not None and hasattr(matmul, "allow_tf32"):
133
+ matmul.allow_tf32 = False
134
+ cudnn_mod = getattr(torch.backends, "cudnn", None)
135
+ if cudnn_mod is not None and hasattr(cudnn_mod, "allow_tf32"):
136
+ cudnn_mod.allow_tf32 = False
137
+ except Exception:
138
+ level = "tolerance"
139
+ notes.append("tf32_policy_failed")
140
+
141
+ # Deterministic algorithms.
142
+ try:
143
+ if hasattr(torch, "use_deterministic_algorithms"):
144
+ torch.use_deterministic_algorithms(True, warn_only=False)
145
+ except Exception:
146
+ # Downgrade to tolerance-based determinism rather than crashing.
147
+ level = "tolerance"
148
+ notes.append("deterministic_algorithms_unavailable")
149
+ try:
150
+ if hasattr(torch, "use_deterministic_algorithms"):
151
+ torch.use_deterministic_algorithms(True, warn_only=True)
152
+ except Exception:
153
+ pass
154
+
155
+ # cuDNN knobs.
156
+ try:
157
+ cudnn_mod = getattr(torch.backends, "cudnn", None)
158
+ if cudnn_mod is not None:
159
+ cudnn_mod.benchmark = False
160
+ if hasattr(cudnn_mod, "deterministic"):
161
+ cudnn_mod.deterministic = True
162
+ except Exception:
163
+ level = "tolerance"
164
+ notes.append("cudnn_determinism_failed")
165
+
166
+ # Snapshot applied flags for provenance.
167
+ try:
168
+ det_enabled = getattr(
169
+ torch, "are_deterministic_algorithms_enabled", None
170
+ )
171
+ if callable(det_enabled):
172
+ torch_flags["deterministic_algorithms"] = bool(det_enabled())
173
+ except Exception:
174
+ pass
175
+ try:
176
+ cudnn_mod = getattr(torch.backends, "cudnn", None)
177
+ if cudnn_mod is not None:
178
+ torch_flags["cudnn_deterministic"] = bool(
179
+ getattr(cudnn_mod, "deterministic", False)
180
+ )
181
+ torch_flags["cudnn_benchmark"] = bool(
182
+ getattr(cudnn_mod, "benchmark", False)
183
+ )
184
+ if hasattr(cudnn_mod, "allow_tf32"):
185
+ torch_flags["cudnn_allow_tf32"] = bool(
186
+ getattr(cudnn_mod, "allow_tf32", False)
187
+ )
188
+ except Exception:
189
+ pass
190
+ try:
191
+ matmul = getattr(
192
+ getattr(torch.backends, "cuda", object()), "matmul", None
193
+ )
194
+ if matmul is not None and hasattr(matmul, "allow_tf32"):
195
+ torch_flags["cuda_matmul_allow_tf32"] = bool(matmul.allow_tf32)
196
+ except Exception:
197
+ pass
198
+
199
+ # Normalized level is always one of these.
200
+ if level not in {"off", "strict", "tolerance"}:
201
+ level = "tolerance" if requested == "strict" else "off"
202
+
203
+ # Extra breadcrumb: random module state is not easily serializable; include a coarse marker.
204
+ try:
205
+ torch_flags["python_random"] = isinstance(random.random(), float)
206
+ except Exception:
207
+ pass
208
+
209
+ payload = {
210
+ "requested": requested,
211
+ "level": level,
212
+ "profile": prof or None,
213
+ "device": dev,
214
+ "threads": threads_i if requested == "strict" else None,
215
+ "seed": int(seed),
216
+ "seeds": seed_bundle,
217
+ "env": env_set,
218
+ "torch": torch_flags,
219
+ "notes": notes,
220
+ }
221
+
222
+ # Remove empty sections for stable artifacts.
223
+ if not payload["env"]:
224
+ payload.pop("env", None)
225
+ if not payload["torch"]:
226
+ payload.pop("torch", None)
227
+ if not payload["notes"]:
228
+ payload.pop("notes", None)
229
+ if payload.get("threads") is None:
230
+ payload.pop("threads", None)
231
+ if payload.get("profile") is None:
232
+ payload.pop("profile", None)
233
+
234
+ return payload
235
+
236
+
237
+ __all__ = ["apply_determinism_preset"]
@@ -7,9 +7,21 @@ Maps tier settings (conservative/balanced/aggressive) to specific guard paramete
7
7
  """
8
8
 
9
9
  import copy
10
+ import os
11
+ from functools import lru_cache
12
+ from importlib import resources as _ires
13
+ from pathlib import Path
10
14
  from typing import Any
11
15
 
12
- __all__ = ["resolve_tier_policies", "TIER_POLICIES", "EDIT_ADJUSTMENTS"]
16
+ import yaml
17
+
18
+ __all__ = [
19
+ "clear_tier_policies_cache",
20
+ "get_tier_policies",
21
+ "resolve_tier_policies",
22
+ "TIER_POLICIES",
23
+ "EDIT_ADJUSTMENTS",
24
+ ]
13
25
 
14
26
 
15
27
  # Base tier policy mappings
@@ -198,10 +210,183 @@ EDIT_ADJUSTMENTS: dict[str, dict[str, dict[str, Any]]] = {
198
210
  }
199
211
 
200
212
 
213
+ def _deep_merge(base: dict[str, Any], override: dict[str, Any]) -> dict[str, Any]:
214
+ out = copy.deepcopy(base)
215
+ for key, value in override.items():
216
+ if isinstance(value, dict) and isinstance(out.get(key), dict):
217
+ out[key] = _deep_merge(out[key], value)
218
+ else:
219
+ out[key] = copy.deepcopy(value)
220
+ return out
221
+
222
+
223
+ def _load_runtime_yaml(
224
+ config_root: str | None, *rel_parts: str
225
+ ) -> dict[str, Any] | None:
226
+ """Load YAML from runtime config locations.
227
+
228
+ Search order:
229
+ 1) $INVARLOCK_CONFIG_ROOT/runtime/...
230
+ 2) invarlock._data.runtime package resources
231
+ """
232
+ if config_root:
233
+ p = Path(config_root) / "runtime"
234
+ for part in rel_parts:
235
+ p = p / part
236
+ if p.exists():
237
+ data = yaml.safe_load(p.read_text(encoding="utf-8")) or {}
238
+ if not isinstance(data, dict):
239
+ raise ValueError("Runtime YAML must be a mapping")
240
+ return data
241
+
242
+ try:
243
+ base = _ires.files("invarlock._data.runtime")
244
+ res = base
245
+ for part in rel_parts:
246
+ res = res.joinpath(part)
247
+ if getattr(res, "is_file", None) and res.is_file(): # type: ignore[attr-defined]
248
+ text = res.read_text(encoding="utf-8") # type: ignore[assignment]
249
+ data = yaml.safe_load(text) or {}
250
+ if not isinstance(data, dict):
251
+ raise ValueError("Runtime YAML must be a mapping")
252
+ return data
253
+ except Exception:
254
+ return None
255
+
256
+ return None
257
+
258
+
259
+ def _normalize_family_caps(caps: Any) -> dict[str, dict[str, float]]:
260
+ normalized: dict[str, dict[str, float]] = {}
261
+ if not isinstance(caps, dict):
262
+ return normalized
263
+ for family, value in caps.items():
264
+ family_key = str(family)
265
+ if isinstance(value, dict):
266
+ kappa = value.get("kappa")
267
+ if isinstance(kappa, int | float):
268
+ normalized[family_key] = {"kappa": float(kappa)}
269
+ elif isinstance(value, int | float):
270
+ normalized[family_key] = {"kappa": float(value)}
271
+ return normalized
272
+
273
+
274
+ def _normalize_multiple_testing(mt: Any) -> dict[str, Any]:
275
+ if not isinstance(mt, dict):
276
+ return {}
277
+ out: dict[str, Any] = {}
278
+ method = mt.get("method")
279
+ if method is not None:
280
+ out["method"] = str(method).lower()
281
+ alpha = mt.get("alpha")
282
+ try:
283
+ if alpha is not None:
284
+ out["alpha"] = float(alpha)
285
+ except Exception:
286
+ pass
287
+ m_val = mt.get("m")
288
+ try:
289
+ if m_val is not None:
290
+ out["m"] = int(m_val)
291
+ except Exception:
292
+ pass
293
+ return out
294
+
295
+
296
+ def _tier_entry_to_policy(tier_entry: dict[str, Any]) -> dict[str, dict[str, Any]]:
297
+ """Map a tiers.yaml entry to the canonical policy shape."""
298
+ out: dict[str, dict[str, Any]] = {}
299
+
300
+ metrics = tier_entry.get("metrics")
301
+ if isinstance(metrics, dict):
302
+ out["metrics"] = copy.deepcopy(metrics)
303
+
304
+ spectral_src = tier_entry.get("spectral") or tier_entry.get("spectral_guard")
305
+ if isinstance(spectral_src, dict):
306
+ spectral = copy.deepcopy(spectral_src)
307
+ if "family_caps" in spectral:
308
+ spectral["family_caps"] = _normalize_family_caps(
309
+ spectral.get("family_caps")
310
+ )
311
+ if "multiple_testing" in spectral:
312
+ spectral["multiple_testing"] = _normalize_multiple_testing(
313
+ spectral.get("multiple_testing")
314
+ )
315
+ out["spectral"] = spectral
316
+
317
+ rmt_src = tier_entry.get("rmt") or tier_entry.get("rmt_guard")
318
+ if isinstance(rmt_src, dict):
319
+ rmt = copy.deepcopy(rmt_src)
320
+ eps = rmt.get("epsilon_by_family")
321
+ if isinstance(eps, dict):
322
+ rmt["epsilon_by_family"] = {
323
+ str(k): float(v) for k, v in eps.items() if isinstance(v, int | float)
324
+ }
325
+ # Backward-compat: keep epsilon alias
326
+ rmt["epsilon"] = dict(rmt["epsilon_by_family"])
327
+ out["rmt"] = rmt
328
+
329
+ variance_src = tier_entry.get("variance") or tier_entry.get("variance_guard")
330
+ if isinstance(variance_src, dict):
331
+ out["variance"] = copy.deepcopy(variance_src)
332
+
333
+ return out
334
+
335
+
336
+ @lru_cache(maxsize=8)
337
+ def _load_tier_policies_cached(config_root: str | None) -> dict[str, dict[str, Any]]:
338
+ tiers = _load_runtime_yaml(config_root, "tiers.yaml") or {}
339
+ merged: dict[str, dict[str, Any]] = {}
340
+
341
+ # Start from defaults, then overlay tiers.yaml per-tier.
342
+ for tier_name, defaults in TIER_POLICIES.items():
343
+ merged[str(tier_name).lower()] = copy.deepcopy(defaults)
344
+
345
+ for tier_name, entry in tiers.items():
346
+ if not isinstance(entry, dict):
347
+ continue
348
+ tier_key = str(tier_name).lower()
349
+ resolved_entry = _tier_entry_to_policy(entry)
350
+ if tier_key not in merged:
351
+ merged[tier_key] = {}
352
+ merged[tier_key] = _deep_merge(merged[tier_key], resolved_entry)
353
+
354
+ return merged
355
+
356
+
357
+ def get_tier_policies(*, config_root: str | None = None) -> dict[str, dict[str, Any]]:
358
+ """Return tier policies loaded from runtime tiers.yaml (with safe defaults)."""
359
+ root = config_root
360
+ if root is None:
361
+ root = os.getenv("INVARLOCK_CONFIG_ROOT") or None
362
+ return _load_tier_policies_cached(root)
363
+
364
+
365
+ def clear_tier_policies_cache() -> None:
366
+ _load_tier_policies_cached.cache_clear()
367
+
368
+
369
+ def _load_profile_overrides(
370
+ profile: str | None, *, config_root: str | None
371
+ ) -> dict[str, Any]:
372
+ if not profile:
373
+ return {}
374
+ prof = str(profile).strip().lower()
375
+ candidate = _load_runtime_yaml(config_root, "profiles", f"{prof}.yaml")
376
+ if candidate is None and prof == "ci":
377
+ candidate = _load_runtime_yaml(config_root, "profiles", "ci_cpu.yaml") or {}
378
+ if not isinstance(candidate, dict):
379
+ return {}
380
+ return candidate
381
+
382
+
201
383
  def resolve_tier_policies(
202
384
  tier: str,
203
385
  edit_name: str | None = None,
204
386
  explicit_overrides: dict[str, dict[str, Any]] | None = None,
387
+ *,
388
+ profile: str | None = None,
389
+ config_root: str | None = None,
205
390
  ) -> dict[str, dict[str, Any]]:
206
391
  """
207
392
  Resolve tier-based guard policies with edit-specific adjustments and explicit overrides.
@@ -217,33 +402,45 @@ def resolve_tier_policies(
217
402
  Raises:
218
403
  ValueError: If tier is not recognized
219
404
  """
220
- if tier not in TIER_POLICIES:
405
+ tier_key = str(tier).lower()
406
+ tier_policies = get_tier_policies(config_root=config_root)
407
+ if tier_key not in tier_policies:
221
408
  raise ValueError(
222
- f"Unknown tier '{tier}'. Valid tiers: {list(TIER_POLICIES.keys())}"
409
+ f"Unknown tier '{tier}'. Valid tiers: {list(tier_policies.keys())}"
223
410
  )
224
411
 
225
412
  # Start with base tier policies
226
- policies: dict[str, dict[str, Any]] = copy.deepcopy(TIER_POLICIES[tier])
413
+ policies: dict[str, dict[str, Any]] = copy.deepcopy(tier_policies[tier_key])
414
+
415
+ # Apply profile overrides (when available)
416
+ overrides = _load_profile_overrides(profile, config_root=config_root)
417
+ guards = overrides.get("guards") if isinstance(overrides, dict) else None
418
+ if isinstance(guards, dict):
419
+ for guard_name, guard_overrides in guards.items():
420
+ key = str(guard_name).lower()
421
+ if not isinstance(guard_overrides, dict):
422
+ continue
423
+ if key in policies and isinstance(policies[key], dict):
424
+ policies[key] = _deep_merge(policies[key], guard_overrides)
425
+ else:
426
+ policies[key] = copy.deepcopy(guard_overrides)
227
427
 
228
428
  # Apply edit-specific adjustments
229
429
  if edit_name and edit_name in EDIT_ADJUSTMENTS:
230
430
  edit_adjustments = EDIT_ADJUSTMENTS[edit_name]
231
431
  for guard_name, adjustments in edit_adjustments.items():
232
- if guard_name in policies:
233
- guard_policy = policies[guard_name]
234
- assert isinstance(guard_policy, dict)
235
- guard_policy.update(adjustments)
432
+ if guard_name in policies and isinstance(policies.get(guard_name), dict):
433
+ policies[guard_name] = _deep_merge(policies[guard_name], adjustments)
236
434
 
237
435
  # Apply explicit overrides (highest precedence)
238
436
  if explicit_overrides:
239
437
  for guard_name, overrides in explicit_overrides.items():
240
- if guard_name in policies:
241
- guard_policy = policies[guard_name]
242
- assert isinstance(guard_policy, dict)
243
- guard_policy.update(overrides)
244
- else:
438
+ if guard_name in policies and isinstance(policies.get(guard_name), dict):
439
+ if isinstance(overrides, dict):
440
+ policies[guard_name] = _deep_merge(policies[guard_name], overrides)
441
+ elif isinstance(overrides, dict):
245
442
  # Create new guard policy if not in base tier
246
- policies[guard_name] = overrides.copy()
443
+ policies[guard_name] = copy.deepcopy(overrides)
247
444
 
248
445
  return policies
249
446
 
@@ -273,7 +470,7 @@ def get_tier_summary(tier: str, edit_name: str | None = None) -> dict[str, Any]:
273
470
  "tier": tier,
274
471
  "edit_name": edit_name,
275
472
  "error": str(e),
276
- "valid_tiers": list(TIER_POLICIES.keys()),
473
+ "valid_tiers": list(get_tier_policies().keys()),
277
474
  }
278
475
 
279
476
 
@@ -304,8 +501,9 @@ def validate_tier_config(config: Any) -> tuple[bool, str | None]:
304
501
  return False, "Missing 'tier' in auto configuration"
305
502
 
306
503
  tier = config["tier"]
307
- if tier not in TIER_POLICIES:
308
- valid_options = list(TIER_POLICIES.keys())
504
+ tier_policies = get_tier_policies()
505
+ if tier not in tier_policies:
506
+ valid_options = list(tier_policies.keys())
309
507
  return False, f"Invalid tier '{tier}'. Valid options: {valid_options}"
310
508
 
311
509
  if "enabled" in config and not isinstance(config["enabled"], bool):
@@ -196,16 +196,21 @@ class CoreRegistry:
196
196
 
197
197
  def _check_runtime_dependencies(self, deps: list[str]) -> list[str]:
198
198
  """
199
- Check if runtime dependencies are actually importable.
199
+ Check if runtime dependencies are actually present on the system.
200
+
201
+ Uses importlib.util.find_spec to avoid importing packages and triggering
202
+ heavy side effects (e.g., GPU-only extensions).
200
203
 
201
204
  Returns:
202
205
  List of missing dependency names.
203
206
  """
204
- missing = []
207
+ missing: list[str] = []
205
208
  for dep in deps:
206
209
  try:
207
- importlib.import_module(dep)
208
- except ImportError:
210
+ spec = importlib.util.find_spec(dep) # type: ignore[attr-defined]
211
+ except Exception:
212
+ spec = None
213
+ if spec is None:
209
214
  missing.append(dep)
210
215
  return missing
211
216