invarlock 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- invarlock/__init__.py +33 -0
- invarlock/__main__.py +10 -0
- invarlock/_data/runtime/profiles/ci_cpu.yaml +15 -0
- invarlock/_data/runtime/profiles/release.yaml +23 -0
- invarlock/_data/runtime/tiers.yaml +76 -0
- invarlock/adapters/__init__.py +102 -0
- invarlock/adapters/_capabilities.py +45 -0
- invarlock/adapters/auto.py +99 -0
- invarlock/adapters/base.py +530 -0
- invarlock/adapters/base_types.py +85 -0
- invarlock/adapters/hf_bert.py +852 -0
- invarlock/adapters/hf_gpt2.py +403 -0
- invarlock/adapters/hf_llama.py +485 -0
- invarlock/adapters/hf_mixin.py +383 -0
- invarlock/adapters/hf_onnx.py +112 -0
- invarlock/adapters/hf_t5.py +137 -0
- invarlock/adapters/py.typed +1 -0
- invarlock/assurance/__init__.py +43 -0
- invarlock/cli/__init__.py +8 -0
- invarlock/cli/__main__.py +8 -0
- invarlock/cli/_evidence.py +25 -0
- invarlock/cli/_json.py +75 -0
- invarlock/cli/adapter_auto.py +162 -0
- invarlock/cli/app.py +287 -0
- invarlock/cli/commands/__init__.py +26 -0
- invarlock/cli/commands/certify.py +403 -0
- invarlock/cli/commands/doctor.py +1358 -0
- invarlock/cli/commands/explain_gates.py +151 -0
- invarlock/cli/commands/export_html.py +100 -0
- invarlock/cli/commands/plugins.py +1331 -0
- invarlock/cli/commands/report.py +354 -0
- invarlock/cli/commands/run.py +4146 -0
- invarlock/cli/commands/verify.py +1040 -0
- invarlock/cli/config.py +396 -0
- invarlock/cli/constants.py +68 -0
- invarlock/cli/device.py +92 -0
- invarlock/cli/doctor_helpers.py +74 -0
- invarlock/cli/errors.py +6 -0
- invarlock/cli/overhead_utils.py +60 -0
- invarlock/cli/provenance.py +66 -0
- invarlock/cli/utils.py +41 -0
- invarlock/config.py +56 -0
- invarlock/core/__init__.py +62 -0
- invarlock/core/abi.py +15 -0
- invarlock/core/api.py +274 -0
- invarlock/core/auto_tuning.py +317 -0
- invarlock/core/bootstrap.py +226 -0
- invarlock/core/checkpoint.py +221 -0
- invarlock/core/contracts.py +73 -0
- invarlock/core/error_utils.py +64 -0
- invarlock/core/events.py +298 -0
- invarlock/core/exceptions.py +95 -0
- invarlock/core/registry.py +481 -0
- invarlock/core/retry.py +146 -0
- invarlock/core/runner.py +2041 -0
- invarlock/core/types.py +154 -0
- invarlock/edits/__init__.py +12 -0
- invarlock/edits/_edit_utils.py +249 -0
- invarlock/edits/_external_utils.py +268 -0
- invarlock/edits/noop.py +47 -0
- invarlock/edits/py.typed +1 -0
- invarlock/edits/quant_rtn.py +801 -0
- invarlock/edits/registry.py +166 -0
- invarlock/eval/__init__.py +23 -0
- invarlock/eval/bench.py +1207 -0
- invarlock/eval/bootstrap.py +50 -0
- invarlock/eval/data.py +2052 -0
- invarlock/eval/metrics.py +2167 -0
- invarlock/eval/primary_metric.py +767 -0
- invarlock/eval/probes/__init__.py +24 -0
- invarlock/eval/probes/fft.py +139 -0
- invarlock/eval/probes/mi.py +213 -0
- invarlock/eval/probes/post_attention.py +323 -0
- invarlock/eval/providers/base.py +67 -0
- invarlock/eval/providers/seq2seq.py +111 -0
- invarlock/eval/providers/text_lm.py +113 -0
- invarlock/eval/providers/vision_text.py +93 -0
- invarlock/eval/py.typed +1 -0
- invarlock/guards/__init__.py +18 -0
- invarlock/guards/_contracts.py +9 -0
- invarlock/guards/invariants.py +640 -0
- invarlock/guards/policies.py +805 -0
- invarlock/guards/py.typed +1 -0
- invarlock/guards/rmt.py +2097 -0
- invarlock/guards/spectral.py +1419 -0
- invarlock/guards/tier_config.py +354 -0
- invarlock/guards/variance.py +3298 -0
- invarlock/guards_ref/__init__.py +15 -0
- invarlock/guards_ref/rmt_ref.py +40 -0
- invarlock/guards_ref/spectral_ref.py +135 -0
- invarlock/guards_ref/variance_ref.py +60 -0
- invarlock/model_profile.py +353 -0
- invarlock/model_utils.py +221 -0
- invarlock/observability/__init__.py +10 -0
- invarlock/observability/alerting.py +535 -0
- invarlock/observability/core.py +546 -0
- invarlock/observability/exporters.py +565 -0
- invarlock/observability/health.py +588 -0
- invarlock/observability/metrics.py +457 -0
- invarlock/observability/py.typed +1 -0
- invarlock/observability/utils.py +553 -0
- invarlock/plugins/__init__.py +12 -0
- invarlock/plugins/hello_guard.py +33 -0
- invarlock/plugins/hf_awq_adapter.py +82 -0
- invarlock/plugins/hf_bnb_adapter.py +79 -0
- invarlock/plugins/hf_gptq_adapter.py +78 -0
- invarlock/plugins/py.typed +1 -0
- invarlock/py.typed +1 -0
- invarlock/reporting/__init__.py +7 -0
- invarlock/reporting/certificate.py +3221 -0
- invarlock/reporting/certificate_schema.py +244 -0
- invarlock/reporting/dataset_hashing.py +215 -0
- invarlock/reporting/guards_analysis.py +948 -0
- invarlock/reporting/html.py +32 -0
- invarlock/reporting/normalizer.py +235 -0
- invarlock/reporting/policy_utils.py +517 -0
- invarlock/reporting/primary_metric_utils.py +265 -0
- invarlock/reporting/render.py +1442 -0
- invarlock/reporting/report.py +903 -0
- invarlock/reporting/report_types.py +278 -0
- invarlock/reporting/utils.py +175 -0
- invarlock/reporting/validate.py +631 -0
- invarlock/security.py +176 -0
- invarlock/sparsity_utils.py +323 -0
- invarlock/utils/__init__.py +150 -0
- invarlock/utils/digest.py +45 -0
- invarlock-0.2.0.dist-info/METADATA +586 -0
- invarlock-0.2.0.dist-info/RECORD +132 -0
- invarlock-0.2.0.dist-info/WHEEL +5 -0
- invarlock-0.2.0.dist-info/entry_points.txt +20 -0
- invarlock-0.2.0.dist-info/licenses/LICENSE +201 -0
- invarlock-0.2.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1358 @@
|
|
|
1
|
+
"""
|
|
2
|
+
InvarLock CLI Doctor Command
|
|
3
|
+
========================
|
|
4
|
+
|
|
5
|
+
Handles the 'invarlock doctor' command for health checks.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import importlib.util
|
|
9
|
+
import os as _os
|
|
10
|
+
import platform as _platform
|
|
11
|
+
import shutil as _shutil
|
|
12
|
+
import sys
|
|
13
|
+
import warnings
|
|
14
|
+
from collections.abc import Callable
|
|
15
|
+
|
|
16
|
+
import typer
|
|
17
|
+
from rich.console import Console
|
|
18
|
+
from rich.table import Table
|
|
19
|
+
|
|
20
|
+
from ..constants import DOCTOR_FORMAT_VERSION
|
|
21
|
+
|
|
22
|
+
# Exact wording constant for determinism warning (kept in one place)
|
|
23
|
+
DETERMINISM_SHARDS_WARNING = "Provider workers > 0 without deterministic_shards=True; enable deterministic_shards or set workers=0 for determinism."
|
|
24
|
+
|
|
25
|
+
console = Console()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _cross_check_reports(
|
|
29
|
+
baseline_report: str | None,
|
|
30
|
+
subject_report: str | None,
|
|
31
|
+
*,
|
|
32
|
+
cfg_metric_kind: str | None,
|
|
33
|
+
strict: bool,
|
|
34
|
+
profile: str | None,
|
|
35
|
+
json_out: bool,
|
|
36
|
+
console: Console,
|
|
37
|
+
add_fn: Callable[..., None],
|
|
38
|
+
) -> bool:
|
|
39
|
+
"""Perform baseline vs subject cross-checks and report findings."""
|
|
40
|
+
had_error = False
|
|
41
|
+
try:
|
|
42
|
+
import json as _json_cc
|
|
43
|
+
from pathlib import Path as _Path_cc
|
|
44
|
+
|
|
45
|
+
if baseline_report and subject_report:
|
|
46
|
+
bpath = _Path_cc(baseline_report)
|
|
47
|
+
spath = _Path_cc(subject_report)
|
|
48
|
+
if bpath.exists() and spath.exists():
|
|
49
|
+
bdata = _json_cc.loads(bpath.read_text())
|
|
50
|
+
sdata = _json_cc.loads(spath.read_text())
|
|
51
|
+
bprov = bdata.get("provenance", {}) if isinstance(bdata, dict) else {}
|
|
52
|
+
sprov = sdata.get("provenance", {}) if isinstance(sdata, dict) else {}
|
|
53
|
+
# D009: tokenizer digest mismatch
|
|
54
|
+
try:
|
|
55
|
+
bdig = bprov.get("provider_digest", {}) or {}
|
|
56
|
+
sdig = sprov.get("provider_digest", {}) or {}
|
|
57
|
+
btok = bdig.get("tokenizer_sha256")
|
|
58
|
+
stok = sdig.get("tokenizer_sha256")
|
|
59
|
+
if (
|
|
60
|
+
isinstance(btok, str)
|
|
61
|
+
and isinstance(stok, str)
|
|
62
|
+
and btok
|
|
63
|
+
and stok
|
|
64
|
+
and btok != stok
|
|
65
|
+
):
|
|
66
|
+
add_fn(
|
|
67
|
+
"D009",
|
|
68
|
+
"warning",
|
|
69
|
+
"tokenizer digests differ between baseline and subject; run will abort in ci/release (E002).",
|
|
70
|
+
field="provenance.provider_digest.tokenizer_sha256",
|
|
71
|
+
)
|
|
72
|
+
except Exception:
|
|
73
|
+
pass
|
|
74
|
+
# D010: MLM mask digest missing (only for ppl_mlm)
|
|
75
|
+
try:
|
|
76
|
+
bdig = bprov.get("provider_digest", {}) or {}
|
|
77
|
+
sdig = sprov.get("provider_digest", {}) or {}
|
|
78
|
+
btok = bdig.get("tokenizer_sha256")
|
|
79
|
+
stok = sdig.get("tokenizer_sha256")
|
|
80
|
+
bmask = bdig.get("masking_sha256")
|
|
81
|
+
smask = sdig.get("masking_sha256")
|
|
82
|
+
# Determine if PM is MLM from either report or config context
|
|
83
|
+
try:
|
|
84
|
+
pm_b = (
|
|
85
|
+
(bdata.get("metrics", {}) or {}).get("primary_metric", {})
|
|
86
|
+
or {}
|
|
87
|
+
).get("kind")
|
|
88
|
+
pm_s = (
|
|
89
|
+
(sdata.get("metrics", {}) or {}).get("primary_metric", {})
|
|
90
|
+
or {}
|
|
91
|
+
).get("kind")
|
|
92
|
+
except Exception:
|
|
93
|
+
pm_b = pm_s = None
|
|
94
|
+
is_mlm = False
|
|
95
|
+
for _k in (pm_b, pm_s, cfg_metric_kind):
|
|
96
|
+
try:
|
|
97
|
+
if isinstance(_k, str) and _k.lower() == "ppl_mlm":
|
|
98
|
+
is_mlm = True
|
|
99
|
+
break
|
|
100
|
+
except Exception:
|
|
101
|
+
pass
|
|
102
|
+
if (
|
|
103
|
+
is_mlm
|
|
104
|
+
and isinstance(btok, str)
|
|
105
|
+
and isinstance(stok, str)
|
|
106
|
+
and btok
|
|
107
|
+
and stok
|
|
108
|
+
and btok == stok
|
|
109
|
+
and (not bmask or not smask)
|
|
110
|
+
):
|
|
111
|
+
add_fn(
|
|
112
|
+
"D010",
|
|
113
|
+
"warning",
|
|
114
|
+
"ppl_mlm with matching tokenizer but missing masking digests; ci/release may abort on mask parity.",
|
|
115
|
+
baseline_has_mask=bool(bmask),
|
|
116
|
+
subject_has_mask=bool(smask),
|
|
117
|
+
)
|
|
118
|
+
except Exception:
|
|
119
|
+
pass
|
|
120
|
+
# D011: split mismatch
|
|
121
|
+
try:
|
|
122
|
+
bsplit = bprov.get("dataset_split")
|
|
123
|
+
ssplit = sprov.get("dataset_split")
|
|
124
|
+
if (
|
|
125
|
+
isinstance(bsplit, str)
|
|
126
|
+
and isinstance(ssplit, str)
|
|
127
|
+
and bsplit
|
|
128
|
+
and ssplit
|
|
129
|
+
and bsplit != ssplit
|
|
130
|
+
):
|
|
131
|
+
sev = "error" if bool(strict) else "warning"
|
|
132
|
+
add_fn(
|
|
133
|
+
"D011",
|
|
134
|
+
sev,
|
|
135
|
+
f"dataset split mismatch (baseline={bsplit}, subject={ssplit})",
|
|
136
|
+
field="provenance.dataset_split",
|
|
137
|
+
baseline=bsplit,
|
|
138
|
+
subject=ssplit,
|
|
139
|
+
)
|
|
140
|
+
if sev == "error":
|
|
141
|
+
had_error = True
|
|
142
|
+
except Exception:
|
|
143
|
+
pass
|
|
144
|
+
# D012: Accuracy PM flagged as estimated/pseudo (warn in dev; error in ci/release)
|
|
145
|
+
try:
|
|
146
|
+
spm = (sdata.get("metrics", {}) or {}).get(
|
|
147
|
+
"primary_metric", {}
|
|
148
|
+
) or {}
|
|
149
|
+
kind = str(spm.get("kind", "")).lower()
|
|
150
|
+
if kind in {"accuracy", "vqa_accuracy"}:
|
|
151
|
+
estimated = bool(spm.get("estimated"))
|
|
152
|
+
counts_source = str(spm.get("counts_source", "")).lower()
|
|
153
|
+
if estimated or counts_source == "pseudo_config":
|
|
154
|
+
prof = None
|
|
155
|
+
try:
|
|
156
|
+
prof = str(
|
|
157
|
+
(sdata.get("meta", {}) or {}).get("profile", "")
|
|
158
|
+
).lower()
|
|
159
|
+
except Exception:
|
|
160
|
+
prof = None
|
|
161
|
+
prof_flag = None
|
|
162
|
+
try:
|
|
163
|
+
prof_flag = str(profile).lower() if profile else None
|
|
164
|
+
except Exception:
|
|
165
|
+
prof_flag = None
|
|
166
|
+
eff_prof = prof_flag or prof or "dev"
|
|
167
|
+
sev = "warning" if eff_prof == "dev" else "error"
|
|
168
|
+
add_fn(
|
|
169
|
+
"D012",
|
|
170
|
+
sev,
|
|
171
|
+
"accuracy primary metric uses pseudo/estimated counts; use labeled preset for measured accuracy.",
|
|
172
|
+
field="metrics.primary_metric",
|
|
173
|
+
)
|
|
174
|
+
if sev == "error":
|
|
175
|
+
had_error = True
|
|
176
|
+
except Exception:
|
|
177
|
+
pass
|
|
178
|
+
except Exception:
|
|
179
|
+
pass
|
|
180
|
+
return had_error
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
DATASET_SPLIT_FALLBACK_WARNING = "Dataset split was inferred via fallback; set dataset.split explicitly to avoid drift."
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def doctor_command(
|
|
187
|
+
config: str | None = typer.Option(
|
|
188
|
+
None, "--config", "-c", help="Path to YAML config for preflight lints"
|
|
189
|
+
),
|
|
190
|
+
profile: str | None = typer.Option(
|
|
191
|
+
None, "--profile", help="Profile to apply for preflight (ci|release)"
|
|
192
|
+
),
|
|
193
|
+
baseline: str | None = typer.Option(
|
|
194
|
+
None, "--baseline", help="Optional baseline report to check pairing readiness"
|
|
195
|
+
),
|
|
196
|
+
json_out: bool = typer.Option(
|
|
197
|
+
False,
|
|
198
|
+
"--json",
|
|
199
|
+
help="Emit machine-readable JSON (suppresses human-readable output)",
|
|
200
|
+
),
|
|
201
|
+
tier: str | None = typer.Option(
|
|
202
|
+
None,
|
|
203
|
+
"--tier",
|
|
204
|
+
help="Policy tier for floors preview (conservative|balanced|aggressive)",
|
|
205
|
+
),
|
|
206
|
+
baseline_report: str | None = typer.Option(
|
|
207
|
+
None, "--baseline-report", help="Optional baseline report for cross-checks"
|
|
208
|
+
),
|
|
209
|
+
subject_report: str | None = typer.Option(
|
|
210
|
+
None, "--subject-report", help="Optional subject report for cross-checks"
|
|
211
|
+
),
|
|
212
|
+
strict: bool = typer.Option(
|
|
213
|
+
False,
|
|
214
|
+
"--strict",
|
|
215
|
+
help="Escalate certain warnings (e.g., split mismatch) to errors",
|
|
216
|
+
),
|
|
217
|
+
):
|
|
218
|
+
"""
|
|
219
|
+
Perform health checks on InvarLock installation.
|
|
220
|
+
|
|
221
|
+
Checks PyTorch, device availability, memory, and optional extras.
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
# Normalize Typer OptionInfo placeholders when invoked directly in tests
|
|
225
|
+
def _is_optioninfo_like(obj: object) -> bool:
|
|
226
|
+
try:
|
|
227
|
+
# True for Typer's OptionInfo; robust to import shims/mocks
|
|
228
|
+
cname = getattr(obj, "__class__", type(None)).__name__
|
|
229
|
+
if cname == "OptionInfo":
|
|
230
|
+
return True
|
|
231
|
+
# Heuristic: has typical Typer OptionInfo attributes
|
|
232
|
+
return hasattr(obj, "param_decls") and hasattr(obj, "default")
|
|
233
|
+
except Exception:
|
|
234
|
+
return False
|
|
235
|
+
|
|
236
|
+
def _coerce_opt(val: object, *, bool_default: bool | None = None):
|
|
237
|
+
if _is_optioninfo_like(val):
|
|
238
|
+
if isinstance(bool_default, bool):
|
|
239
|
+
return bool_default
|
|
240
|
+
return None
|
|
241
|
+
return val
|
|
242
|
+
|
|
243
|
+
config = _coerce_opt(config)
|
|
244
|
+
profile = _coerce_opt(profile)
|
|
245
|
+
baseline = _coerce_opt(baseline)
|
|
246
|
+
tier = _coerce_opt(tier)
|
|
247
|
+
baseline_report = _coerce_opt(baseline_report)
|
|
248
|
+
subject_report = _coerce_opt(subject_report)
|
|
249
|
+
strict = bool(_coerce_opt(strict, bool_default=False))
|
|
250
|
+
json_out = bool(_coerce_opt(json_out, bool_default=False))
|
|
251
|
+
|
|
252
|
+
# Findings accumulator for --json mode
|
|
253
|
+
findings: list[dict] = []
|
|
254
|
+
|
|
255
|
+
def _add(code: str, severity: str, message: str, **extra: object) -> None:
|
|
256
|
+
item = {"code": code, "severity": severity, "message": message}
|
|
257
|
+
if extra:
|
|
258
|
+
item.update(extra)
|
|
259
|
+
findings.append(item)
|
|
260
|
+
if not json_out:
|
|
261
|
+
prefix = (
|
|
262
|
+
"ERROR:"
|
|
263
|
+
if severity == "error"
|
|
264
|
+
else ("WARNING:" if severity == "warning" else "NOTE:")
|
|
265
|
+
)
|
|
266
|
+
typer.echo(f"{prefix} {message} [INVARLOCK:{code}]")
|
|
267
|
+
|
|
268
|
+
# Early: surface tiny relax as a note when active (env-based)
|
|
269
|
+
try:
|
|
270
|
+
import os as __os
|
|
271
|
+
|
|
272
|
+
if str(__os.environ.get("INVARLOCK_TINY_RELAX", "")).strip().lower() in {
|
|
273
|
+
"1",
|
|
274
|
+
"true",
|
|
275
|
+
"yes",
|
|
276
|
+
"on",
|
|
277
|
+
}:
|
|
278
|
+
_add(
|
|
279
|
+
"D013",
|
|
280
|
+
"note",
|
|
281
|
+
"tiny relax (dev) active; gates widened and drift/overhead may be informational.",
|
|
282
|
+
field="auto.tiny_relax",
|
|
283
|
+
)
|
|
284
|
+
except Exception:
|
|
285
|
+
pass
|
|
286
|
+
|
|
287
|
+
# Redirect rich Console output in JSON mode so no extra text is emitted
|
|
288
|
+
if json_out:
|
|
289
|
+
from io import StringIO
|
|
290
|
+
|
|
291
|
+
global console
|
|
292
|
+
console = Console(file=StringIO())
|
|
293
|
+
|
|
294
|
+
if not json_out:
|
|
295
|
+
console.print("🏥 InvarLock Health Check")
|
|
296
|
+
console.print("=" * 50)
|
|
297
|
+
|
|
298
|
+
# Environment facts (OS · Python · invarlock)
|
|
299
|
+
try:
|
|
300
|
+
from invarlock import __version__ as _invarlock_version # type: ignore
|
|
301
|
+
except Exception:
|
|
302
|
+
_invarlock_version = "unknown"
|
|
303
|
+
if not json_out:
|
|
304
|
+
os_line = (
|
|
305
|
+
f"OS: {_platform.system()} {_platform.release()} ({_platform.machine()})"
|
|
306
|
+
)
|
|
307
|
+
py_line = f"Python: {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
|
|
308
|
+
console.print(f"{os_line} · {py_line} · invarlock: {_invarlock_version}")
|
|
309
|
+
|
|
310
|
+
health_status = True
|
|
311
|
+
had_error = False
|
|
312
|
+
cfg_metric_kind: str | None = None
|
|
313
|
+
|
|
314
|
+
# Check core components
|
|
315
|
+
try:
|
|
316
|
+
from invarlock.core.registry import get_registry
|
|
317
|
+
|
|
318
|
+
if not json_out:
|
|
319
|
+
console.print("[green]✅ Core components available[/green]")
|
|
320
|
+
except ImportError as e:
|
|
321
|
+
if not json_out:
|
|
322
|
+
console.print(f"[red]❌ Core components missing: {e}[/red]")
|
|
323
|
+
health_status = False
|
|
324
|
+
|
|
325
|
+
# Check PyTorch
|
|
326
|
+
try:
|
|
327
|
+
import torch
|
|
328
|
+
|
|
329
|
+
if not json_out:
|
|
330
|
+
console.print(f"[green]✅ PyTorch {torch.__version__}[/green]")
|
|
331
|
+
|
|
332
|
+
# Device information
|
|
333
|
+
from ..device import get_device_info
|
|
334
|
+
|
|
335
|
+
device_info = get_device_info()
|
|
336
|
+
if not json_out:
|
|
337
|
+
console.print("\n🖥️ Device Information")
|
|
338
|
+
|
|
339
|
+
for device_name, info in device_info.items():
|
|
340
|
+
if device_name == "auto_selected":
|
|
341
|
+
if not json_out:
|
|
342
|
+
console.print(f" ▶ Auto‑selected device: {info}")
|
|
343
|
+
continue
|
|
344
|
+
|
|
345
|
+
if info["available"]:
|
|
346
|
+
if (
|
|
347
|
+
device_name == "cuda"
|
|
348
|
+
and isinstance(info, dict)
|
|
349
|
+
and "device_count" in info
|
|
350
|
+
):
|
|
351
|
+
if not json_out:
|
|
352
|
+
console.print(
|
|
353
|
+
f" [green]✅ {device_name.upper()}: {info['device_count']} device(s) - {info['device_name']} ({info['memory_total']})[/green]"
|
|
354
|
+
)
|
|
355
|
+
else:
|
|
356
|
+
if not json_out:
|
|
357
|
+
console.print(
|
|
358
|
+
f" [green]✅ {device_name.upper()}: Available[/green]"
|
|
359
|
+
)
|
|
360
|
+
else:
|
|
361
|
+
if not json_out:
|
|
362
|
+
console.print(
|
|
363
|
+
f" [dim]❌ {device_name.upper()}: {info['info']}[/dim]"
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
# CUDA triage details
|
|
367
|
+
try:
|
|
368
|
+
cuda_toolkit_found = bool(
|
|
369
|
+
_shutil.which("nvcc") or _shutil.which("nvidia-smi")
|
|
370
|
+
)
|
|
371
|
+
torch_cuda_build = bool(getattr(torch.version, "cuda", None))
|
|
372
|
+
cuda_available = bool(
|
|
373
|
+
getattr(torch, "cuda", None) and torch.cuda.is_available()
|
|
374
|
+
)
|
|
375
|
+
if not json_out:
|
|
376
|
+
console.print(
|
|
377
|
+
f" [dim]• CUDA toolkit: {'found' if cuda_toolkit_found else 'not found'} · "
|
|
378
|
+
f"torch CUDA build: {'yes' if torch_cuda_build else 'no'} · "
|
|
379
|
+
f"cuda.is_available(): {'true' if cuda_available else 'false'}[/dim]"
|
|
380
|
+
)
|
|
381
|
+
except Exception:
|
|
382
|
+
pass
|
|
383
|
+
|
|
384
|
+
# Memory check
|
|
385
|
+
try:
|
|
386
|
+
if torch.cuda.is_available():
|
|
387
|
+
free_memory = torch.cuda.get_device_properties(0).total_memory / 1e9
|
|
388
|
+
if not json_out:
|
|
389
|
+
console.print(f"\n💾 GPU Memory: {free_memory:.1f} GB total")
|
|
390
|
+
if free_memory < 4.0:
|
|
391
|
+
if not json_out:
|
|
392
|
+
console.print(
|
|
393
|
+
"[yellow]⚠️ Warning: Less than 4GB GPU memory available[/yellow]"
|
|
394
|
+
)
|
|
395
|
+
except Exception:
|
|
396
|
+
pass
|
|
397
|
+
|
|
398
|
+
except ImportError:
|
|
399
|
+
if not json_out:
|
|
400
|
+
console.print("[red]❌ PyTorch not available[/red]")
|
|
401
|
+
console.print("Install with: pip install torch")
|
|
402
|
+
health_status = False
|
|
403
|
+
|
|
404
|
+
# Check optional dependencies
|
|
405
|
+
if not json_out:
|
|
406
|
+
console.print("\n📦 Optional Dependencies")
|
|
407
|
+
|
|
408
|
+
optional_deps = [
|
|
409
|
+
("datasets", "Dataset loading (WikiText-2, etc.)"),
|
|
410
|
+
("transformers", "Hugging Face model support"),
|
|
411
|
+
("auto_gptq", "GPTQ quantization (Linux/CUDA only)"),
|
|
412
|
+
("autoawq", "AWQ quantization (Linux/CUDA only)"),
|
|
413
|
+
("bitsandbytes", "8/4-bit loading (GPU)"),
|
|
414
|
+
]
|
|
415
|
+
|
|
416
|
+
# Query CUDA availability once
|
|
417
|
+
try:
|
|
418
|
+
import torch as _torch
|
|
419
|
+
|
|
420
|
+
has_cuda = bool(getattr(_torch, "cuda", None) and _torch.cuda.is_available())
|
|
421
|
+
except Exception:
|
|
422
|
+
has_cuda = False
|
|
423
|
+
|
|
424
|
+
for dep, description in optional_deps:
|
|
425
|
+
spec = importlib.util.find_spec(dep)
|
|
426
|
+
present = spec is not None
|
|
427
|
+
extra_hint = {
|
|
428
|
+
"datasets": "eval",
|
|
429
|
+
"transformers": "adapters",
|
|
430
|
+
"auto_gptq": "gptq",
|
|
431
|
+
"autoawq": "awq",
|
|
432
|
+
"bitsandbytes": "gpu",
|
|
433
|
+
}.get(dep, dep)
|
|
434
|
+
|
|
435
|
+
if dep == "bitsandbytes":
|
|
436
|
+
# Avoid importing bnb to suppress noisy CPU-only warnings. Report based on CUDA.
|
|
437
|
+
if not has_cuda:
|
|
438
|
+
# GPU-only library; note and skip import
|
|
439
|
+
if present:
|
|
440
|
+
if not json_out:
|
|
441
|
+
console.print(
|
|
442
|
+
" [yellow]⚠️ bitsandbytes — CUDA-only; GPU not detected on this host[/yellow]"
|
|
443
|
+
)
|
|
444
|
+
else:
|
|
445
|
+
if not json_out:
|
|
446
|
+
console.print(
|
|
447
|
+
" [dim]⚠️ bitsandbytes — CUDA-only; not installed[/dim]"
|
|
448
|
+
)
|
|
449
|
+
console.print(
|
|
450
|
+
" → Install: pip install 'invarlock[gpu]'",
|
|
451
|
+
markup=False,
|
|
452
|
+
)
|
|
453
|
+
else:
|
|
454
|
+
# CUDA available; try a quiet import and detect CPU-only builds
|
|
455
|
+
try:
|
|
456
|
+
with warnings.catch_warnings():
|
|
457
|
+
warnings.simplefilter("ignore")
|
|
458
|
+
if not json_out:
|
|
459
|
+
console.print(
|
|
460
|
+
" [green]✅ bitsandbytes — 8/4-bit loading (GPU)[/green]"
|
|
461
|
+
)
|
|
462
|
+
except Exception:
|
|
463
|
+
if not json_out:
|
|
464
|
+
console.print(
|
|
465
|
+
" [yellow]⚠️ bitsandbytes — Present but CPU-only build detected[/yellow]"
|
|
466
|
+
)
|
|
467
|
+
console.print(
|
|
468
|
+
" → Reinstall with: pip install 'invarlock[gpu]' on a CUDA host",
|
|
469
|
+
markup=False,
|
|
470
|
+
)
|
|
471
|
+
continue
|
|
472
|
+
|
|
473
|
+
if not json_out:
|
|
474
|
+
if present:
|
|
475
|
+
console.print(f" [green]✅ {dep} — {description}[/green]")
|
|
476
|
+
else:
|
|
477
|
+
console.print(f" [yellow]⚠️ {dep} — {description}[/yellow]")
|
|
478
|
+
# Remediation for platform-gated stacks
|
|
479
|
+
if dep in {"auto_gptq", "autoawq"}:
|
|
480
|
+
console.print(
|
|
481
|
+
f" → Install: pip install 'invarlock[{extra_hint}]' # Linux + CUDA only",
|
|
482
|
+
markup=False,
|
|
483
|
+
)
|
|
484
|
+
else:
|
|
485
|
+
console.print(
|
|
486
|
+
f" → Install: pip install 'invarlock[{extra_hint}]'",
|
|
487
|
+
markup=False,
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
# Optional: Config preflight (determinism & provider)
|
|
491
|
+
if config:
|
|
492
|
+
console.print("\n🧪 Preflight Lints (config)")
|
|
493
|
+
try:
|
|
494
|
+
import json as _json
|
|
495
|
+
from pathlib import Path
|
|
496
|
+
|
|
497
|
+
from invarlock.eval.data import get_provider
|
|
498
|
+
from invarlock.model_profile import detect_model_profile, resolve_tokenizer
|
|
499
|
+
|
|
500
|
+
from ..commands.run import _resolve_metric_and_provider
|
|
501
|
+
from ..config import apply_profile, load_config
|
|
502
|
+
|
|
503
|
+
cfg = load_config(config)
|
|
504
|
+
if profile:
|
|
505
|
+
try:
|
|
506
|
+
cfg = apply_profile(cfg, profile)
|
|
507
|
+
console.print(f" ▶ Profile applied: {profile}")
|
|
508
|
+
except Exception as _e:
|
|
509
|
+
console.print(f" [yellow]⚠️ Profile apply failed: {_e}[/yellow]")
|
|
510
|
+
|
|
511
|
+
# Provider kind sanity (D001)
|
|
512
|
+
try:
|
|
513
|
+
SUPPORTED_PROVIDERS = {
|
|
514
|
+
"wikitext2",
|
|
515
|
+
"hf_text",
|
|
516
|
+
"synthetic",
|
|
517
|
+
"local_jsonl",
|
|
518
|
+
}
|
|
519
|
+
provider_cfg = getattr(
|
|
520
|
+
getattr(cfg, "dataset", object()), "provider", None
|
|
521
|
+
)
|
|
522
|
+
bad_kind: str | None = None
|
|
523
|
+
|
|
524
|
+
# Helper to read mapping-like config values
|
|
525
|
+
def _pget(obj, key: str) -> str | None:
|
|
526
|
+
try:
|
|
527
|
+
if isinstance(obj, dict):
|
|
528
|
+
return obj.get(key) # type: ignore[return-value]
|
|
529
|
+
# support mapping-like config objects (_Obj)
|
|
530
|
+
if hasattr(obj, key):
|
|
531
|
+
return getattr(obj, key) # type: ignore[return-value]
|
|
532
|
+
get = getattr(obj, "get", None)
|
|
533
|
+
if callable(get): # type: ignore[call-arg]
|
|
534
|
+
return get(key) # type: ignore[return-value]
|
|
535
|
+
except Exception:
|
|
536
|
+
return None
|
|
537
|
+
return None
|
|
538
|
+
|
|
539
|
+
if isinstance(provider_cfg, dict):
|
|
540
|
+
k = str(provider_cfg.get("kind", "")).strip()
|
|
541
|
+
if not k or k not in SUPPORTED_PROVIDERS:
|
|
542
|
+
bad_kind = k or ""
|
|
543
|
+
elif isinstance(provider_cfg, str):
|
|
544
|
+
if provider_cfg not in SUPPORTED_PROVIDERS:
|
|
545
|
+
bad_kind = provider_cfg
|
|
546
|
+
else:
|
|
547
|
+
k2 = str(_pget(provider_cfg, "kind") or "").strip()
|
|
548
|
+
if not k2 or k2 not in SUPPORTED_PROVIDERS:
|
|
549
|
+
bad_kind = k2 or ""
|
|
550
|
+
if bad_kind:
|
|
551
|
+
_add(
|
|
552
|
+
"D001",
|
|
553
|
+
"error",
|
|
554
|
+
f'dataset.provider.kind "{bad_kind}" is not supported. Use one of: wikitext2 | hf_text | synthetic | local_jsonl.',
|
|
555
|
+
field="dataset.provider.kind",
|
|
556
|
+
hint="Use one of: wikitext2 | hf_text | synthetic | local_jsonl",
|
|
557
|
+
)
|
|
558
|
+
had_error = True
|
|
559
|
+
# Schema-level validations per provider kind (support mapping-like)
|
|
560
|
+
kind_val = None
|
|
561
|
+
if isinstance(provider_cfg, dict):
|
|
562
|
+
kind_val = provider_cfg.get("kind")
|
|
563
|
+
else:
|
|
564
|
+
kind_val = _pget(provider_cfg, "kind")
|
|
565
|
+
kind = str(kind_val or "").strip()
|
|
566
|
+
if kind == "local_jsonl":
|
|
567
|
+
p = None
|
|
568
|
+
if isinstance(provider_cfg, dict):
|
|
569
|
+
p = (
|
|
570
|
+
provider_cfg.get("file")
|
|
571
|
+
or provider_cfg.get("path")
|
|
572
|
+
or provider_cfg.get("data_files")
|
|
573
|
+
)
|
|
574
|
+
else:
|
|
575
|
+
p = (
|
|
576
|
+
_pget(provider_cfg, "file")
|
|
577
|
+
or _pget(provider_cfg, "path")
|
|
578
|
+
or _pget(provider_cfg, "data_files")
|
|
579
|
+
)
|
|
580
|
+
try:
|
|
581
|
+
from pathlib import Path as _P
|
|
582
|
+
|
|
583
|
+
exists = bool(p) and _P(str(p)).exists()
|
|
584
|
+
except Exception:
|
|
585
|
+
exists = False
|
|
586
|
+
if not exists:
|
|
587
|
+
_add(
|
|
588
|
+
"D011",
|
|
589
|
+
"error",
|
|
590
|
+
"local_jsonl: path does not exist",
|
|
591
|
+
field="dataset.provider.file",
|
|
592
|
+
)
|
|
593
|
+
had_error = True
|
|
594
|
+
tf = None
|
|
595
|
+
if isinstance(provider_cfg, dict):
|
|
596
|
+
tf = str(provider_cfg.get("text_field", "")).strip() or "text"
|
|
597
|
+
else:
|
|
598
|
+
tf = (
|
|
599
|
+
str(_pget(provider_cfg, "text_field") or "").strip()
|
|
600
|
+
or "text"
|
|
601
|
+
)
|
|
602
|
+
if not tf:
|
|
603
|
+
_add(
|
|
604
|
+
"D012",
|
|
605
|
+
"warning",
|
|
606
|
+
"local_jsonl: set dataset.field.text or map 'text' to your column",
|
|
607
|
+
field="dataset.provider.text_field",
|
|
608
|
+
)
|
|
609
|
+
if kind == "hf_text":
|
|
610
|
+
tf2 = None
|
|
611
|
+
if isinstance(provider_cfg, dict):
|
|
612
|
+
tf2 = str(provider_cfg.get("text_field", "")).strip() or "text"
|
|
613
|
+
else:
|
|
614
|
+
tf2 = (
|
|
615
|
+
str(_pget(provider_cfg, "text_field") or "").strip()
|
|
616
|
+
or "text"
|
|
617
|
+
)
|
|
618
|
+
if not tf2:
|
|
619
|
+
_add(
|
|
620
|
+
"D012",
|
|
621
|
+
"warning",
|
|
622
|
+
"hf_text: set dataset.field.text or map 'text' to your column",
|
|
623
|
+
field="dataset.provider.text_field",
|
|
624
|
+
)
|
|
625
|
+
except Exception:
|
|
626
|
+
pass
|
|
627
|
+
|
|
628
|
+
# Resolve adapter & provider
|
|
629
|
+
adapter_name = (
|
|
630
|
+
str(getattr(cfg.model, "adapter", "")).lower()
|
|
631
|
+
if hasattr(cfg, "model")
|
|
632
|
+
else ""
|
|
633
|
+
)
|
|
634
|
+
model_id_raw = (
|
|
635
|
+
str(getattr(cfg.model, "id", "")) if hasattr(cfg, "model") else ""
|
|
636
|
+
)
|
|
637
|
+
model_profile = detect_model_profile(
|
|
638
|
+
model_id=model_id_raw, adapter=adapter_name
|
|
639
|
+
)
|
|
640
|
+
metric_kind_resolved, provider_kind, _metric_opts = (
|
|
641
|
+
_resolve_metric_and_provider(
|
|
642
|
+
cfg, model_profile, resolved_loss_type=None
|
|
643
|
+
)
|
|
644
|
+
)
|
|
645
|
+
try:
|
|
646
|
+
cfg_metric_kind = str(metric_kind_resolved)
|
|
647
|
+
except Exception:
|
|
648
|
+
cfg_metric_kind = cfg_metric_kind
|
|
649
|
+
if not json_out:
|
|
650
|
+
console.print(
|
|
651
|
+
f" Metric: {metric_kind_resolved} · Provider: {provider_kind}"
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
# Resolve provider and tokenizer
|
|
655
|
+
provider = get_provider(provider_kind)
|
|
656
|
+
tokenizer, tok_hash = resolve_tokenizer(model_profile)
|
|
657
|
+
if not json_out:
|
|
658
|
+
console.print(
|
|
659
|
+
f" Tokenizer: {tokenizer.__class__.__name__} · hash={tok_hash}"
|
|
660
|
+
)
|
|
661
|
+
|
|
662
|
+
# CUDA preflight (D002)
|
|
663
|
+
try:
|
|
664
|
+
import torch as _torch
|
|
665
|
+
|
|
666
|
+
requested_device = None
|
|
667
|
+
try:
|
|
668
|
+
requested_device = getattr(
|
|
669
|
+
getattr(cfg, "runner", object()), "device", None
|
|
670
|
+
)
|
|
671
|
+
except Exception:
|
|
672
|
+
requested_device = None
|
|
673
|
+
if requested_device is None:
|
|
674
|
+
try:
|
|
675
|
+
requested_device = getattr(
|
|
676
|
+
getattr(cfg, "model", object()), "device", None
|
|
677
|
+
)
|
|
678
|
+
except Exception:
|
|
679
|
+
requested_device = None
|
|
680
|
+
req = str(requested_device or "").lower()
|
|
681
|
+
if req.startswith("cuda") and not (
|
|
682
|
+
getattr(_torch, "cuda", None) and _torch.cuda.is_available()
|
|
683
|
+
):
|
|
684
|
+
_add(
|
|
685
|
+
"D002",
|
|
686
|
+
"error",
|
|
687
|
+
"CUDA requested but not available (runner.device=cuda). Resolve drivers / install CUDA PyTorch.",
|
|
688
|
+
field="runner.device",
|
|
689
|
+
)
|
|
690
|
+
had_error = True
|
|
691
|
+
except Exception:
|
|
692
|
+
pass
|
|
693
|
+
|
|
694
|
+
# Determinism guard rails: warn when provider.workers>0 without deterministic_shards
|
|
695
|
+
try:
|
|
696
|
+
provider_cfg = None
|
|
697
|
+
if hasattr(cfg.dataset, "provider"):
|
|
698
|
+
provider_cfg = cfg.dataset.provider
|
|
699
|
+
# Accept mapping-shaped provider configs
|
|
700
|
+
workers = None
|
|
701
|
+
det = None
|
|
702
|
+
if isinstance(provider_cfg, dict):
|
|
703
|
+
workers = provider_cfg.get("workers")
|
|
704
|
+
det = provider_cfg.get("deterministic_shards")
|
|
705
|
+
else:
|
|
706
|
+
# Support InvarLockConfig's _Obj wrapper with dict-like get()
|
|
707
|
+
try:
|
|
708
|
+
workers = provider_cfg.get("workers", None) # type: ignore[attr-defined]
|
|
709
|
+
det = provider_cfg.get("deterministic_shards", None) # type: ignore[attr-defined]
|
|
710
|
+
except Exception:
|
|
711
|
+
workers = workers
|
|
712
|
+
det = det
|
|
713
|
+
# Legacy style might place workers directly under dataset
|
|
714
|
+
if workers is None and hasattr(cfg.dataset, "workers"):
|
|
715
|
+
workers = cfg.dataset.workers
|
|
716
|
+
if det is None and hasattr(cfg.dataset, "deterministic_shards"):
|
|
717
|
+
det = cfg.dataset.deterministic_shards
|
|
718
|
+
workers_val = int(workers) if workers is not None else 0
|
|
719
|
+
det_flag = bool(det) if det is not None else False
|
|
720
|
+
if workers_val > 0 and not det_flag:
|
|
721
|
+
# Print the canonical message and include a human-readable hint token
|
|
722
|
+
if not json_out:
|
|
723
|
+
console.print(
|
|
724
|
+
f" [yellow]⚠️ {DETERMINISM_SHARDS_WARNING} (deterministic shards)[/yellow]"
|
|
725
|
+
)
|
|
726
|
+
except Exception:
|
|
727
|
+
# Best-effort linting only
|
|
728
|
+
pass
|
|
729
|
+
|
|
730
|
+
# Determinism hints (D004: low bootstrap reps)
|
|
731
|
+
try:
|
|
732
|
+
reps_val = None
|
|
733
|
+
if hasattr(cfg, "eval") and hasattr(cfg.eval, "bootstrap"):
|
|
734
|
+
try:
|
|
735
|
+
reps_val = getattr(cfg.eval.bootstrap, "replicates", None)
|
|
736
|
+
except Exception:
|
|
737
|
+
reps_val = None
|
|
738
|
+
if reps_val is not None:
|
|
739
|
+
try:
|
|
740
|
+
reps_val = int(reps_val)
|
|
741
|
+
except Exception:
|
|
742
|
+
reps_val = None
|
|
743
|
+
if isinstance(reps_val, int) and reps_val < 200:
|
|
744
|
+
_add(
|
|
745
|
+
"D004",
|
|
746
|
+
"warning",
|
|
747
|
+
"bootstrap replicates (<200) may produce unstable CIs; increase reps or expect wider intervals.",
|
|
748
|
+
field="eval.bootstrap.replicates",
|
|
749
|
+
)
|
|
750
|
+
except Exception:
|
|
751
|
+
pass
|
|
752
|
+
|
|
753
|
+
# Capacity estimation if available
|
|
754
|
+
est = getattr(provider, "estimate_capacity", None)
|
|
755
|
+
if callable(est):
|
|
756
|
+
try:
|
|
757
|
+
seq_len = (
|
|
758
|
+
int(getattr(cfg.dataset, "seq_len", 512))
|
|
759
|
+
if hasattr(cfg, "dataset")
|
|
760
|
+
else 512
|
|
761
|
+
)
|
|
762
|
+
stride = (
|
|
763
|
+
int(getattr(cfg.dataset, "stride", seq_len // 2))
|
|
764
|
+
if hasattr(cfg, "dataset")
|
|
765
|
+
else seq_len // 2
|
|
766
|
+
)
|
|
767
|
+
preview_n = int(getattr(cfg.dataset, "preview_n", 0) or 0)
|
|
768
|
+
final_n = int(getattr(cfg.dataset, "final_n", 0) or 0)
|
|
769
|
+
cap = est(
|
|
770
|
+
tokenizer=tokenizer,
|
|
771
|
+
seq_len=seq_len,
|
|
772
|
+
stride=stride,
|
|
773
|
+
split=getattr(cfg.dataset, "split", "validation"),
|
|
774
|
+
target_total=preview_n + final_n,
|
|
775
|
+
fast_mode=True,
|
|
776
|
+
)
|
|
777
|
+
avail = cap.get("available_nonoverlap") or cap.get(
|
|
778
|
+
"candidate_limit"
|
|
779
|
+
)
|
|
780
|
+
console.print(
|
|
781
|
+
f" Capacity: available={avail} · seq_len={seq_len} · stride={stride}"
|
|
782
|
+
)
|
|
783
|
+
if isinstance(avail, int) and (preview_n + final_n) > avail:
|
|
784
|
+
console.print(
|
|
785
|
+
" [yellow]⚠️ Requested windows exceed provider capacity[/yellow]"
|
|
786
|
+
)
|
|
787
|
+
# Floors preview and capacity insufficiency (D007, D008)
|
|
788
|
+
try:
|
|
789
|
+
import math as _math
|
|
790
|
+
|
|
791
|
+
from invarlock.core.auto_tuning import TIER_POLICIES
|
|
792
|
+
|
|
793
|
+
use_tier = (tier or "balanced").lower()
|
|
794
|
+
metrics_policy = TIER_POLICIES.get(use_tier, {}).get(
|
|
795
|
+
"metrics", {}
|
|
796
|
+
)
|
|
797
|
+
pm_policy = (
|
|
798
|
+
metrics_policy.get("pm_ratio", {})
|
|
799
|
+
if isinstance(metrics_policy, dict)
|
|
800
|
+
else {}
|
|
801
|
+
)
|
|
802
|
+
acc_policy = (
|
|
803
|
+
metrics_policy.get("accuracy", {})
|
|
804
|
+
if isinstance(metrics_policy, dict)
|
|
805
|
+
else {}
|
|
806
|
+
)
|
|
807
|
+
min_tokens = int(pm_policy.get("min_tokens", 0) or 0)
|
|
808
|
+
token_frac = float(
|
|
809
|
+
pm_policy.get("min_token_fraction", 0.0) or 0.0
|
|
810
|
+
)
|
|
811
|
+
min_examples = int(acc_policy.get("min_examples", 0) or 0)
|
|
812
|
+
ex_frac = float(
|
|
813
|
+
acc_policy.get("min_examples_fraction", 0.0) or 0.0
|
|
814
|
+
)
|
|
815
|
+
# Publish policy meta for JSON output
|
|
816
|
+
try:
|
|
817
|
+
global POLICY_META
|
|
818
|
+
POLICY_META = {
|
|
819
|
+
"tier": use_tier,
|
|
820
|
+
"floors": {
|
|
821
|
+
"pm_ratio": {
|
|
822
|
+
"min_tokens": min_tokens,
|
|
823
|
+
"min_token_fraction": token_frac,
|
|
824
|
+
},
|
|
825
|
+
"accuracy": {
|
|
826
|
+
"min_examples": min_examples,
|
|
827
|
+
"min_examples_fraction": ex_frac,
|
|
828
|
+
},
|
|
829
|
+
},
|
|
830
|
+
}
|
|
831
|
+
except Exception:
|
|
832
|
+
pass
|
|
833
|
+
tokens_avail = cap.get("tokens_available")
|
|
834
|
+
examples_avail = cap.get("examples_available")
|
|
835
|
+
eff_tokens = int(min_tokens)
|
|
836
|
+
eff_examples = int(min_examples)
|
|
837
|
+
if isinstance(tokens_avail, int | float) and token_frac > 0:
|
|
838
|
+
eff_tokens = max(
|
|
839
|
+
eff_tokens,
|
|
840
|
+
int(_math.ceil(float(tokens_avail) * token_frac)),
|
|
841
|
+
)
|
|
842
|
+
if isinstance(examples_avail, int | float) and ex_frac > 0:
|
|
843
|
+
eff_examples = max(
|
|
844
|
+
eff_examples,
|
|
845
|
+
int(_math.ceil(float(examples_avail) * ex_frac)),
|
|
846
|
+
)
|
|
847
|
+
if eff_tokens > 0 or eff_examples > 0:
|
|
848
|
+
_add(
|
|
849
|
+
"D007",
|
|
850
|
+
"note",
|
|
851
|
+
f"Floors: tokens >= {eff_tokens} (effective), examples >= {eff_examples} (effective)",
|
|
852
|
+
tokens_min=eff_tokens,
|
|
853
|
+
examples_min=eff_examples,
|
|
854
|
+
)
|
|
855
|
+
insufficient = False
|
|
856
|
+
if (
|
|
857
|
+
isinstance(tokens_avail, int | float)
|
|
858
|
+
and eff_tokens > 0
|
|
859
|
+
and tokens_avail < eff_tokens
|
|
860
|
+
):
|
|
861
|
+
insufficient = True
|
|
862
|
+
if (
|
|
863
|
+
isinstance(examples_avail, int | float)
|
|
864
|
+
and eff_examples > 0
|
|
865
|
+
and examples_avail < eff_examples
|
|
866
|
+
):
|
|
867
|
+
insufficient = True
|
|
868
|
+
if insufficient:
|
|
869
|
+
_add(
|
|
870
|
+
"D008",
|
|
871
|
+
"error",
|
|
872
|
+
f"Insufficient capacity: tokens_available={tokens_avail}, examples_available={examples_avail} below effective floors",
|
|
873
|
+
)
|
|
874
|
+
had_error = True
|
|
875
|
+
except Exception:
|
|
876
|
+
pass
|
|
877
|
+
except Exception as _e:
|
|
878
|
+
console.print(
|
|
879
|
+
f" [yellow]⚠️ Capacity estimation failed: {_e}[/yellow]"
|
|
880
|
+
)
|
|
881
|
+
else:
|
|
882
|
+
console.print(
|
|
883
|
+
" [dim]Provider does not expose estimate_capacity()[/dim]"
|
|
884
|
+
)
|
|
885
|
+
|
|
886
|
+
# Baseline pairing sanity
|
|
887
|
+
if baseline:
|
|
888
|
+
try:
|
|
889
|
+
bpath = Path(baseline)
|
|
890
|
+
if bpath.exists():
|
|
891
|
+
bdata = _json.loads(bpath.read_text())
|
|
892
|
+
has_windows = isinstance(bdata.get("evaluation_windows"), dict)
|
|
893
|
+
console.print(
|
|
894
|
+
f" Baseline windows: {'present' if has_windows else 'missing'}"
|
|
895
|
+
)
|
|
896
|
+
try:
|
|
897
|
+
prov = (
|
|
898
|
+
bdata.get("provenance", {})
|
|
899
|
+
if isinstance(bdata, dict)
|
|
900
|
+
else {}
|
|
901
|
+
)
|
|
902
|
+
if isinstance(prov, dict) and prov.get("split_fallback"):
|
|
903
|
+
_add(
|
|
904
|
+
"D003",
|
|
905
|
+
"warning",
|
|
906
|
+
"dataset split fallback was used. Set dataset.provider.hf_dataset.split explicitly.",
|
|
907
|
+
)
|
|
908
|
+
if not json_out:
|
|
909
|
+
console.print(
|
|
910
|
+
f" [yellow]⚠️ {DATASET_SPLIT_FALLBACK_WARNING}[/yellow]"
|
|
911
|
+
)
|
|
912
|
+
except Exception:
|
|
913
|
+
pass
|
|
914
|
+
else:
|
|
915
|
+
console.print(" [yellow]⚠️ Baseline not found[/yellow]")
|
|
916
|
+
except Exception as _e:
|
|
917
|
+
console.print(f" [yellow]⚠️ Baseline check failed: {_e}[/yellow]")
|
|
918
|
+
except Exception as e:
|
|
919
|
+
console.print(f" [yellow]⚠️ Preflight failed: {e}[/yellow]")
|
|
920
|
+
|
|
921
|
+
# Baseline quick check for split fallback visibility (even without --config)
|
|
922
|
+
try:
|
|
923
|
+
if (baseline or baseline_report) and not config:
|
|
924
|
+
from json import loads as _json_loads
|
|
925
|
+
from pathlib import Path as _Path
|
|
926
|
+
|
|
927
|
+
bpath = _Path(baseline or baseline_report)
|
|
928
|
+
if bpath.exists():
|
|
929
|
+
bdata = _json_loads(bpath.read_text())
|
|
930
|
+
prov = bdata.get("provenance", {}) if isinstance(bdata, dict) else {}
|
|
931
|
+
if isinstance(prov, dict) and prov.get("split_fallback"):
|
|
932
|
+
_add(
|
|
933
|
+
"D003",
|
|
934
|
+
"warning",
|
|
935
|
+
"dataset split fallback was used. Set dataset.provider.hf_dataset.split explicitly.",
|
|
936
|
+
)
|
|
937
|
+
if not json_out:
|
|
938
|
+
console.print(
|
|
939
|
+
f" [yellow]⚠️ {DATASET_SPLIT_FALLBACK_WARNING}[/yellow]"
|
|
940
|
+
)
|
|
941
|
+
except Exception:
|
|
942
|
+
pass
|
|
943
|
+
|
|
944
|
+
had_error = had_error or _cross_check_reports(
|
|
945
|
+
baseline_report,
|
|
946
|
+
subject_report,
|
|
947
|
+
cfg_metric_kind=cfg_metric_kind,
|
|
948
|
+
strict=bool(strict),
|
|
949
|
+
profile=profile,
|
|
950
|
+
json_out=json_out,
|
|
951
|
+
console=console,
|
|
952
|
+
add_fn=_add,
|
|
953
|
+
)
|
|
954
|
+
|
|
955
|
+
# D013: Tiny relax (dev) active — note only
|
|
956
|
+
try:
|
|
957
|
+
tiny_env = str(_os.environ.get("INVARLOCK_TINY_RELAX", "")).strip().lower() in {
|
|
958
|
+
"1",
|
|
959
|
+
"true",
|
|
960
|
+
"yes",
|
|
961
|
+
"on",
|
|
962
|
+
}
|
|
963
|
+
except Exception:
|
|
964
|
+
tiny_env = False
|
|
965
|
+
tiny_cert = False
|
|
966
|
+
try:
|
|
967
|
+
# Best-effort: detect from reports when provided
|
|
968
|
+
import json as _json_d13
|
|
969
|
+
from pathlib import Path as _Path_d13
|
|
970
|
+
|
|
971
|
+
def _readsafe(p):
|
|
972
|
+
try:
|
|
973
|
+
return _json_d13.loads(_Path_d13(p).read_text()) if p else None
|
|
974
|
+
except Exception:
|
|
975
|
+
return None
|
|
976
|
+
|
|
977
|
+
sb = _readsafe(subject_report) if subject_report else None
|
|
978
|
+
bb = _readsafe(baseline_report) if baseline_report else None
|
|
979
|
+
tiny_cert = bool(
|
|
980
|
+
((sb or {}).get("auto", {}) or {}).get("tiny_relax")
|
|
981
|
+
or ((bb or {}).get("auto", {}) or {}).get("tiny_relax")
|
|
982
|
+
)
|
|
983
|
+
except Exception:
|
|
984
|
+
tiny_cert = False
|
|
985
|
+
if tiny_env or tiny_cert:
|
|
986
|
+
_add(
|
|
987
|
+
"D013",
|
|
988
|
+
"note",
|
|
989
|
+
"tiny relax (dev) active; gates widened and drift/overhead may be informational.",
|
|
990
|
+
field="auto.tiny_relax",
|
|
991
|
+
)
|
|
992
|
+
|
|
993
|
+
# Check registry status
|
|
994
|
+
try:
|
|
995
|
+
from invarlock.core.registry import get_registry
|
|
996
|
+
|
|
997
|
+
from .plugins import _check_plugin_extras
|
|
998
|
+
|
|
999
|
+
if not json_out:
|
|
1000
|
+
console.print("\n🔌 Plugin Registry")
|
|
1001
|
+
registry = get_registry()
|
|
1002
|
+
if not json_out:
|
|
1003
|
+
console.print(f" Adapters: {len(registry.list_adapters())}")
|
|
1004
|
+
console.print(f" Edits: {len(registry.list_edits())}")
|
|
1005
|
+
console.print(f" Guards: {len(registry.list_guards())}")
|
|
1006
|
+
# Use module-level _os (avoid shadowing earlier uses)
|
|
1007
|
+
if _os.getenv("INVARLOCK_DISABLE_PLUGIN_DISCOVERY", "").strip() == "1":
|
|
1008
|
+
_add(
|
|
1009
|
+
"D006",
|
|
1010
|
+
"note",
|
|
1011
|
+
"Plugin discovery disabled; doctor will not check optional adapters.",
|
|
1012
|
+
)
|
|
1013
|
+
|
|
1014
|
+
# Detail adapters with Origin/Mode/Backend/Version table
|
|
1015
|
+
def _gather_adapter_rows() -> list[dict]:
|
|
1016
|
+
names = registry.list_adapters()
|
|
1017
|
+
try:
|
|
1018
|
+
import torch as _t
|
|
1019
|
+
|
|
1020
|
+
has_cuda = bool(getattr(_t, "cuda", None) and _t.cuda.is_available())
|
|
1021
|
+
except Exception:
|
|
1022
|
+
has_cuda = False
|
|
1023
|
+
is_linux = _platform.system().lower() == "linux"
|
|
1024
|
+
|
|
1025
|
+
rows: list[dict] = []
|
|
1026
|
+
for n in names:
|
|
1027
|
+
info = registry.get_plugin_info(n, "adapters")
|
|
1028
|
+
module = str(info.get("module") or "")
|
|
1029
|
+
support = (
|
|
1030
|
+
"auto"
|
|
1031
|
+
if module.startswith("invarlock.adapters")
|
|
1032
|
+
and n in {"hf_causal_auto", "hf_mlm_auto"}
|
|
1033
|
+
else (
|
|
1034
|
+
"core"
|
|
1035
|
+
if module.startswith("invarlock.adapters")
|
|
1036
|
+
else "optional"
|
|
1037
|
+
)
|
|
1038
|
+
)
|
|
1039
|
+
origin = "core" if support in {"core", "auto"} else "plugin"
|
|
1040
|
+
mode = "auto-matcher" if support == "auto" else "adapter"
|
|
1041
|
+
|
|
1042
|
+
backend, version = None, None
|
|
1043
|
+
status, enable = "ready", ""
|
|
1044
|
+
|
|
1045
|
+
# Heuristic backend mapping without heavy imports
|
|
1046
|
+
if n in {
|
|
1047
|
+
"hf_gpt2",
|
|
1048
|
+
"hf_bert",
|
|
1049
|
+
"hf_llama",
|
|
1050
|
+
"hf_causal_auto",
|
|
1051
|
+
"hf_mlm_auto",
|
|
1052
|
+
}:
|
|
1053
|
+
# Transformers-based
|
|
1054
|
+
backend = "transformers"
|
|
1055
|
+
try:
|
|
1056
|
+
import transformers as _tf # type: ignore
|
|
1057
|
+
|
|
1058
|
+
version = getattr(_tf, "__version__", None)
|
|
1059
|
+
except Exception:
|
|
1060
|
+
version = None
|
|
1061
|
+
elif n == "hf_gptq":
|
|
1062
|
+
backend = "auto-gptq"
|
|
1063
|
+
elif n == "hf_awq":
|
|
1064
|
+
backend = "autoawq"
|
|
1065
|
+
elif n == "hf_bnb":
|
|
1066
|
+
backend = "bitsandbytes"
|
|
1067
|
+
|
|
1068
|
+
# Presence and platform gating
|
|
1069
|
+
if support == "optional":
|
|
1070
|
+
# Check install presence
|
|
1071
|
+
present = (
|
|
1072
|
+
importlib.util.find_spec((backend or "").replace("-", "_"))
|
|
1073
|
+
is not None
|
|
1074
|
+
if backend
|
|
1075
|
+
else False
|
|
1076
|
+
)
|
|
1077
|
+
if not present:
|
|
1078
|
+
status = "needs_extra"
|
|
1079
|
+
hint = {
|
|
1080
|
+
"hf_gptq": "invarlock[gptq]",
|
|
1081
|
+
"hf_awq": "invarlock[awq]",
|
|
1082
|
+
"hf_bnb": "invarlock[gpu]",
|
|
1083
|
+
}.get(n)
|
|
1084
|
+
if hint:
|
|
1085
|
+
enable = f"pip install '{hint}'"
|
|
1086
|
+
# Special-case: hf_onnx is a core adapter but requires Optimum/ONNXRuntime
|
|
1087
|
+
if n == "hf_onnx":
|
|
1088
|
+
backend = backend or "onnxruntime"
|
|
1089
|
+
present = (
|
|
1090
|
+
importlib.util.find_spec("optimum.onnxruntime") is not None
|
|
1091
|
+
or importlib.util.find_spec("onnxruntime") is not None
|
|
1092
|
+
)
|
|
1093
|
+
if not present:
|
|
1094
|
+
status = "needs_extra"
|
|
1095
|
+
enable = "pip install 'invarlock[onnx]'"
|
|
1096
|
+
# Platform checks
|
|
1097
|
+
if backend in {"auto-gptq", "autoawq"} and not is_linux:
|
|
1098
|
+
status = "unsupported"
|
|
1099
|
+
enable = "Linux-only"
|
|
1100
|
+
if backend == "bitsandbytes" and not has_cuda:
|
|
1101
|
+
status = "unsupported"
|
|
1102
|
+
enable = "Requires CUDA"
|
|
1103
|
+
|
|
1104
|
+
rows.append(
|
|
1105
|
+
{
|
|
1106
|
+
"name": n,
|
|
1107
|
+
"origin": origin,
|
|
1108
|
+
"mode": mode,
|
|
1109
|
+
"backend": backend,
|
|
1110
|
+
"version": version,
|
|
1111
|
+
"status": status,
|
|
1112
|
+
"enable": enable,
|
|
1113
|
+
}
|
|
1114
|
+
)
|
|
1115
|
+
return rows
|
|
1116
|
+
|
|
1117
|
+
def _fmt_backend_ver(
|
|
1118
|
+
backend: str | None, version: str | None
|
|
1119
|
+
) -> tuple[str, str]:
|
|
1120
|
+
b = backend or "—"
|
|
1121
|
+
v = f"=={version}" if backend and version else "—"
|
|
1122
|
+
return b, v
|
|
1123
|
+
|
|
1124
|
+
# Build adapter rows; gracefully handle optional Optimum import errors by
|
|
1125
|
+
# falling back to a lightweight rows helper that only probes availability.
|
|
1126
|
+
try:
|
|
1127
|
+
all_rows = _gather_adapter_rows()
|
|
1128
|
+
except Exception as _adapter_exc:
|
|
1129
|
+
# Known benign case: optional Optimum/ONNXRuntime missing on host
|
|
1130
|
+
if "optimum" in str(_adapter_exc).lower():
|
|
1131
|
+
try:
|
|
1132
|
+
from invarlock.cli.doctor_helpers import (
|
|
1133
|
+
get_adapter_rows as _rows_fallback,
|
|
1134
|
+
)
|
|
1135
|
+
|
|
1136
|
+
all_rows = _rows_fallback()
|
|
1137
|
+
except Exception:
|
|
1138
|
+
raise # re-raise if fallback also fails
|
|
1139
|
+
else:
|
|
1140
|
+
raise
|
|
1141
|
+
if all_rows:
|
|
1142
|
+
# Counts over full set
|
|
1143
|
+
total = len(all_rows)
|
|
1144
|
+
ready = sum(1 for r in all_rows if r["status"] == "ready")
|
|
1145
|
+
need = sum(1 for r in all_rows if r["status"] == "needs_extra")
|
|
1146
|
+
unsupported = sum(1 for r in all_rows if r["status"] == "unsupported")
|
|
1147
|
+
auto = sum(1 for r in all_rows if r["mode"] == "auto-matcher")
|
|
1148
|
+
# Hide unsupported rows in the display
|
|
1149
|
+
rows = [r for r in all_rows if r["status"] != "unsupported"]
|
|
1150
|
+
table = Table(
|
|
1151
|
+
title=f"Adapters — total: {total} · ready: {ready} · auto: {auto} · missing-extras: {need} · unsupported: {unsupported}"
|
|
1152
|
+
)
|
|
1153
|
+
table.add_column("Adapter", style="cyan")
|
|
1154
|
+
table.add_column("Origin", style="dim")
|
|
1155
|
+
table.add_column("Mode", style="dim")
|
|
1156
|
+
table.add_column("Backend", style="magenta")
|
|
1157
|
+
table.add_column("Version", style="magenta")
|
|
1158
|
+
table.add_column("Status / Action", style="green")
|
|
1159
|
+
for r in rows:
|
|
1160
|
+
backend_disp, ver_disp = _fmt_backend_ver(r["backend"], r["version"])
|
|
1161
|
+
if r["mode"] == "auto-matcher":
|
|
1162
|
+
status_disp = "🧩 Auto (selects best hf_* adapter)"
|
|
1163
|
+
elif r["status"] == "ready":
|
|
1164
|
+
status_disp = "✅ Ready"
|
|
1165
|
+
elif r["status"] == "needs_extra":
|
|
1166
|
+
status_disp = (
|
|
1167
|
+
f"⛔ Needs extra → {r['enable']}"
|
|
1168
|
+
if r["enable"]
|
|
1169
|
+
else "⛔ Needs extra"
|
|
1170
|
+
)
|
|
1171
|
+
else:
|
|
1172
|
+
status_disp = r["status"]
|
|
1173
|
+
table.add_row(
|
|
1174
|
+
r["name"],
|
|
1175
|
+
r["origin"].capitalize(),
|
|
1176
|
+
"Auto‑matcher" if r["mode"] == "auto-matcher" else "Adapter",
|
|
1177
|
+
backend_disp,
|
|
1178
|
+
ver_disp,
|
|
1179
|
+
status_disp,
|
|
1180
|
+
)
|
|
1181
|
+
console.print(table)
|
|
1182
|
+
|
|
1183
|
+
# Guards table
|
|
1184
|
+
def _gather_generic_rows(kind: str) -> list[dict]:
|
|
1185
|
+
names = (
|
|
1186
|
+
registry.list_guards() if kind == "guards" else registry.list_edits()
|
|
1187
|
+
)
|
|
1188
|
+
rows: list[dict] = []
|
|
1189
|
+
for n in names:
|
|
1190
|
+
info = registry.get_plugin_info(n, kind)
|
|
1191
|
+
module = str(info.get("module") or "")
|
|
1192
|
+
origin = "core" if module.startswith(f"invarlock.{kind}") else "plugin"
|
|
1193
|
+
mode = "guard" if kind == "guards" else "edit"
|
|
1194
|
+
# Extras
|
|
1195
|
+
status = "ready"
|
|
1196
|
+
enable = ""
|
|
1197
|
+
try:
|
|
1198
|
+
extras = _check_plugin_extras(n, kind)
|
|
1199
|
+
except Exception:
|
|
1200
|
+
extras = ""
|
|
1201
|
+
if (
|
|
1202
|
+
isinstance(extras, str)
|
|
1203
|
+
and extras.startswith("⚠️")
|
|
1204
|
+
and "missing" in extras
|
|
1205
|
+
):
|
|
1206
|
+
status = "needs_extra"
|
|
1207
|
+
hint = extras.split("missing", 1)[-1].strip()
|
|
1208
|
+
if hint:
|
|
1209
|
+
enable = f"pip install '{hint}'"
|
|
1210
|
+
rows.append(
|
|
1211
|
+
{
|
|
1212
|
+
"name": n,
|
|
1213
|
+
"origin": origin,
|
|
1214
|
+
"mode": mode,
|
|
1215
|
+
"backend": None,
|
|
1216
|
+
"version": None,
|
|
1217
|
+
"status": status,
|
|
1218
|
+
"enable": enable,
|
|
1219
|
+
}
|
|
1220
|
+
)
|
|
1221
|
+
return rows
|
|
1222
|
+
|
|
1223
|
+
for kind, title in (("guards", "Guards"), ("edits", "Edits")):
|
|
1224
|
+
grows = _gather_generic_rows(kind)
|
|
1225
|
+
if grows:
|
|
1226
|
+
total = len(grows)
|
|
1227
|
+
ready = sum(1 for r in grows if r["status"] == "ready")
|
|
1228
|
+
need = sum(1 for r in grows if r["status"] == "needs_extra")
|
|
1229
|
+
table = Table(
|
|
1230
|
+
title=f"{title} — total: {total} · ready: {ready} · missing-extras: {need}"
|
|
1231
|
+
)
|
|
1232
|
+
table.add_column("Name", style="cyan")
|
|
1233
|
+
table.add_column("Origin", style="dim")
|
|
1234
|
+
table.add_column("Mode", style="dim")
|
|
1235
|
+
table.add_column("Backend", style="magenta")
|
|
1236
|
+
table.add_column("Version", style="magenta")
|
|
1237
|
+
table.add_column("Status / Action", style="green")
|
|
1238
|
+
for r in grows:
|
|
1239
|
+
b, v = _fmt_backend_ver(r["backend"], r["version"])
|
|
1240
|
+
|
|
1241
|
+
status_disp = (
|
|
1242
|
+
"✅ Ready"
|
|
1243
|
+
if r["status"] == "ready"
|
|
1244
|
+
else (
|
|
1245
|
+
f"⛔ Needs extra → {r['enable']}"
|
|
1246
|
+
if r["enable"]
|
|
1247
|
+
else "⛔ Needs extra"
|
|
1248
|
+
)
|
|
1249
|
+
)
|
|
1250
|
+
table.add_row(
|
|
1251
|
+
r["name"],
|
|
1252
|
+
r["origin"].capitalize(),
|
|
1253
|
+
("Guard" if r["mode"] == "guard" else "Edit"),
|
|
1254
|
+
b,
|
|
1255
|
+
v,
|
|
1256
|
+
status_disp,
|
|
1257
|
+
)
|
|
1258
|
+
console.print(table)
|
|
1259
|
+
|
|
1260
|
+
# Datasets summary (best effort; non-fatal)
|
|
1261
|
+
try:
|
|
1262
|
+
from invarlock.eval.data import list_providers # type: ignore
|
|
1263
|
+
|
|
1264
|
+
providers = sorted(list_providers())
|
|
1265
|
+
if providers:
|
|
1266
|
+
dtable = Table(title="Datasets")
|
|
1267
|
+
dtable.add_column("Provider", style="cyan")
|
|
1268
|
+
dtable.add_column("Network", style="dim")
|
|
1269
|
+
dtable.add_column("Status", style="green")
|
|
1270
|
+
dtable.add_column("Params", style="dim")
|
|
1271
|
+
from invarlock.cli.constants import (
|
|
1272
|
+
PROVIDER_NETWORK as provider_network,
|
|
1273
|
+
)
|
|
1274
|
+
from invarlock.cli.constants import (
|
|
1275
|
+
PROVIDER_PARAMS as provider_params,
|
|
1276
|
+
)
|
|
1277
|
+
|
|
1278
|
+
def _net_label(name: str) -> str:
|
|
1279
|
+
val = (provider_network.get(name, "") or "").lower()
|
|
1280
|
+
if val == "cache":
|
|
1281
|
+
return "Cache/Net"
|
|
1282
|
+
if val == "yes":
|
|
1283
|
+
return "Yes"
|
|
1284
|
+
if val == "no":
|
|
1285
|
+
return "No"
|
|
1286
|
+
return "Unknown"
|
|
1287
|
+
|
|
1288
|
+
for pname in providers:
|
|
1289
|
+
dtable.add_row(
|
|
1290
|
+
pname,
|
|
1291
|
+
_net_label(pname),
|
|
1292
|
+
"✓ Available",
|
|
1293
|
+
provider_params.get(pname, "-"),
|
|
1294
|
+
)
|
|
1295
|
+
console.print(dtable)
|
|
1296
|
+
except Exception:
|
|
1297
|
+
pass
|
|
1298
|
+
|
|
1299
|
+
if not json_out:
|
|
1300
|
+
console.print(
|
|
1301
|
+
"[dim]Legend: ✅ Ready = usable now · 🧩 Auto‑matcher = picks an adapter for you[/dim]"
|
|
1302
|
+
)
|
|
1303
|
+
console.print(
|
|
1304
|
+
"[dim]Hints: use --json · filter with --only ready|core|plugin|auto|unsupported[/dim]"
|
|
1305
|
+
)
|
|
1306
|
+
except Exception as e:
|
|
1307
|
+
# Gracefully handle missing optional Optimum stack
|
|
1308
|
+
if "optimum" in str(e).lower():
|
|
1309
|
+
if not json_out:
|
|
1310
|
+
console.print(
|
|
1311
|
+
" [yellow]⚠️ Optional Optimum/ONNXRuntime missing; hf_onnx will be shown as needs_extra[/yellow]"
|
|
1312
|
+
)
|
|
1313
|
+
# Do not mark overall health as failed for optional extras
|
|
1314
|
+
else:
|
|
1315
|
+
if not json_out:
|
|
1316
|
+
console.print(f" [red]❌ Registry error: {e}[/red]")
|
|
1317
|
+
health_status = False
|
|
1318
|
+
|
|
1319
|
+
# Final status / JSON output
|
|
1320
|
+
exit_code = 0 if (health_status and not had_error) else 1
|
|
1321
|
+
if json_out:
|
|
1322
|
+
import json as _json_out
|
|
1323
|
+
|
|
1324
|
+
# Sort findings deterministically by severity then code
|
|
1325
|
+
_order = {"error": 0, "warning": 1, "note": 2}
|
|
1326
|
+
try:
|
|
1327
|
+
findings.sort(
|
|
1328
|
+
key=lambda f: (_order.get(f.get("severity"), 9), f.get("code", "Z999"))
|
|
1329
|
+
)
|
|
1330
|
+
except Exception:
|
|
1331
|
+
pass
|
|
1332
|
+
result_obj = {
|
|
1333
|
+
"format_version": DOCTOR_FORMAT_VERSION,
|
|
1334
|
+
"summary": {
|
|
1335
|
+
"errors": sum(1 for f in findings if f.get("severity") == "error"),
|
|
1336
|
+
"warnings": sum(1 for f in findings if f.get("severity") == "warning"),
|
|
1337
|
+
"notes": sum(1 for f in findings if f.get("severity") == "note"),
|
|
1338
|
+
},
|
|
1339
|
+
"policy": POLICY_META
|
|
1340
|
+
if "POLICY_META" in globals()
|
|
1341
|
+
else {"tier": (tier or "balanced").lower()},
|
|
1342
|
+
"findings": findings,
|
|
1343
|
+
"resolution": {"exit_code": exit_code},
|
|
1344
|
+
}
|
|
1345
|
+
typer.echo(_json_out.dumps(result_obj))
|
|
1346
|
+
raise typer.Exit(exit_code)
|
|
1347
|
+
else:
|
|
1348
|
+
console.print("\n" + "=" * 50)
|
|
1349
|
+
if exit_code == 0:
|
|
1350
|
+
console.print(
|
|
1351
|
+
"[green]✅ InvarLock installation is healthy (exit code 0)[/green]"
|
|
1352
|
+
)
|
|
1353
|
+
else:
|
|
1354
|
+
console.print("[red]❌ InvarLock installation has issues[/red]")
|
|
1355
|
+
console.print(
|
|
1356
|
+
"Run: pip install invarlock[all] to install missing dependencies"
|
|
1357
|
+
)
|
|
1358
|
+
sys.exit(exit_code)
|