iints-sdk-python35 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iints/__init__.py +183 -0
- iints/analysis/__init__.py +12 -0
- iints/analysis/algorithm_xray.py +387 -0
- iints/analysis/baseline.py +92 -0
- iints/analysis/clinical_benchmark.py +198 -0
- iints/analysis/clinical_metrics.py +551 -0
- iints/analysis/clinical_tir_analyzer.py +136 -0
- iints/analysis/diabetes_metrics.py +43 -0
- iints/analysis/edge_efficiency.py +33 -0
- iints/analysis/edge_performance_monitor.py +315 -0
- iints/analysis/explainability.py +94 -0
- iints/analysis/explainable_ai.py +232 -0
- iints/analysis/hardware_benchmark.py +221 -0
- iints/analysis/metrics.py +117 -0
- iints/analysis/population_report.py +188 -0
- iints/analysis/reporting.py +345 -0
- iints/analysis/safety_index.py +311 -0
- iints/analysis/sensor_filtering.py +54 -0
- iints/analysis/validator.py +273 -0
- iints/api/__init__.py +0 -0
- iints/api/base_algorithm.py +307 -0
- iints/api/registry.py +103 -0
- iints/api/template_algorithm.py +195 -0
- iints/assets/iints_logo.png +0 -0
- iints/cli/__init__.py +0 -0
- iints/cli/cli.py +2598 -0
- iints/core/__init__.py +1 -0
- iints/core/algorithms/__init__.py +0 -0
- iints/core/algorithms/battle_runner.py +138 -0
- iints/core/algorithms/correction_bolus.py +95 -0
- iints/core/algorithms/discovery.py +92 -0
- iints/core/algorithms/fixed_basal_bolus.py +58 -0
- iints/core/algorithms/hybrid_algorithm.py +92 -0
- iints/core/algorithms/lstm_algorithm.py +138 -0
- iints/core/algorithms/mock_algorithms.py +162 -0
- iints/core/algorithms/pid_controller.py +88 -0
- iints/core/algorithms/standard_pump_algo.py +64 -0
- iints/core/device.py +0 -0
- iints/core/device_manager.py +64 -0
- iints/core/devices/__init__.py +3 -0
- iints/core/devices/models.py +160 -0
- iints/core/patient/__init__.py +9 -0
- iints/core/patient/bergman_model.py +341 -0
- iints/core/patient/models.py +285 -0
- iints/core/patient/patient_factory.py +117 -0
- iints/core/patient/profile.py +41 -0
- iints/core/safety/__init__.py +12 -0
- iints/core/safety/config.py +37 -0
- iints/core/safety/input_validator.py +95 -0
- iints/core/safety/supervisor.py +39 -0
- iints/core/simulation/__init__.py +0 -0
- iints/core/simulation/scenario_parser.py +61 -0
- iints/core/simulator.py +874 -0
- iints/core/supervisor.py +367 -0
- iints/data/__init__.py +53 -0
- iints/data/adapter.py +142 -0
- iints/data/column_mapper.py +398 -0
- iints/data/datasets.json +132 -0
- iints/data/demo/__init__.py +1 -0
- iints/data/demo/demo_cgm.csv +289 -0
- iints/data/importer.py +275 -0
- iints/data/ingestor.py +162 -0
- iints/data/nightscout.py +128 -0
- iints/data/quality_checker.py +550 -0
- iints/data/registry.py +166 -0
- iints/data/tidepool.py +38 -0
- iints/data/universal_parser.py +813 -0
- iints/data/virtual_patients/clinic_safe_baseline.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_hyper_challenge.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_hypo_prone.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_midnight.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_pizza.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_stress_meal.yaml +9 -0
- iints/data/virtual_patients/default_patient.yaml +11 -0
- iints/data/virtual_patients/patient_559_config.yaml +11 -0
- iints/emulation/__init__.py +80 -0
- iints/emulation/legacy_base.py +414 -0
- iints/emulation/medtronic_780g.py +337 -0
- iints/emulation/omnipod_5.py +367 -0
- iints/emulation/tandem_controliq.py +393 -0
- iints/highlevel.py +451 -0
- iints/learning/__init__.py +3 -0
- iints/learning/autonomous_optimizer.py +194 -0
- iints/learning/learning_system.py +122 -0
- iints/metrics.py +34 -0
- iints/population/__init__.py +11 -0
- iints/population/generator.py +131 -0
- iints/population/runner.py +327 -0
- iints/presets/__init__.py +28 -0
- iints/presets/presets.json +114 -0
- iints/research/__init__.py +30 -0
- iints/research/config.py +68 -0
- iints/research/dataset.py +319 -0
- iints/research/losses.py +73 -0
- iints/research/predictor.py +329 -0
- iints/scenarios/__init__.py +3 -0
- iints/scenarios/generator.py +92 -0
- iints/templates/__init__.py +0 -0
- iints/templates/default_algorithm.py +91 -0
- iints/templates/scenarios/__init__.py +0 -0
- iints/templates/scenarios/chaos_insulin_stacking.json +29 -0
- iints/templates/scenarios/chaos_runaway_ai.json +25 -0
- iints/templates/scenarios/example_scenario.json +35 -0
- iints/templates/scenarios/exercise_stress.json +30 -0
- iints/utils/__init__.py +3 -0
- iints/utils/plotting.py +50 -0
- iints/utils/run_io.py +152 -0
- iints/validation/__init__.py +133 -0
- iints/validation/schemas.py +94 -0
- iints/visualization/__init__.py +34 -0
- iints/visualization/cockpit.py +691 -0
- iints/visualization/uncertainty_cloud.py +612 -0
- iints_sdk_python35-0.0.18.dist-info/METADATA +225 -0
- iints_sdk_python35-0.0.18.dist-info/RECORD +118 -0
- iints_sdk_python35-0.0.18.dist-info/WHEEL +5 -0
- iints_sdk_python35-0.0.18.dist-info/entry_points.txt +10 -0
- iints_sdk_python35-0.0.18.dist-info/licenses/LICENSE +28 -0
- iints_sdk_python35-0.0.18.dist-info/top_level.txt +1 -0
iints/cli/cli.py
ADDED
|
@@ -0,0 +1,2598 @@
|
|
|
1
|
+
import typer # type: ignore
|
|
2
|
+
import concurrent.futures
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Dict, Any, Union, List, Tuple, Optional
|
|
5
|
+
from dataclasses import asdict
|
|
6
|
+
from typing_extensions import Annotated
|
|
7
|
+
from pydantic import ValidationError
|
|
8
|
+
import os
|
|
9
|
+
import importlib.util
|
|
10
|
+
import sys
|
|
11
|
+
import json
|
|
12
|
+
import yaml # Added for Virtual Patient Registry
|
|
13
|
+
import pandas as pd # Added for DataFrame in benchmark results
|
|
14
|
+
|
|
15
|
+
from rich.console import Console # type: ignore # For pretty printing
|
|
16
|
+
from rich.table import Table # type: ignore # For comparison table
|
|
17
|
+
from rich.panel import Panel # type: ignore # For nicer auto-doc output
|
|
18
|
+
|
|
19
|
+
import iints # Import the top-level SDK package
|
|
20
|
+
from iints.analysis.baseline import run_baseline_comparison, write_baseline_comparison
|
|
21
|
+
from iints.api.registry import list_algorithm_plugins
|
|
22
|
+
from iints.core.patient.profile import PatientProfile
|
|
23
|
+
from iints.core.safety import SafetyConfig
|
|
24
|
+
from iints.scenarios import ScenarioGeneratorConfig, generate_random_scenario
|
|
25
|
+
from iints.data.nightscout import NightscoutConfig, import_nightscout
|
|
26
|
+
from iints.data.tidepool import TidepoolClient
|
|
27
|
+
from iints.data.importer import (
|
|
28
|
+
export_demo_csv,
|
|
29
|
+
export_standard_csv,
|
|
30
|
+
guess_column_mapping,
|
|
31
|
+
import_cgm_dataframe,
|
|
32
|
+
load_demo_dataframe,
|
|
33
|
+
scenario_from_csv,
|
|
34
|
+
scenario_from_dataframe,
|
|
35
|
+
)
|
|
36
|
+
from iints.data.registry import (
|
|
37
|
+
load_dataset_registry,
|
|
38
|
+
get_dataset,
|
|
39
|
+
fetch_dataset,
|
|
40
|
+
DatasetFetchError,
|
|
41
|
+
DatasetRegistryError,
|
|
42
|
+
)
|
|
43
|
+
from iints.utils.run_io import (
|
|
44
|
+
build_run_metadata,
|
|
45
|
+
build_run_manifest,
|
|
46
|
+
generate_run_id,
|
|
47
|
+
maybe_sign_manifest,
|
|
48
|
+
resolve_output_dir,
|
|
49
|
+
resolve_seed,
|
|
50
|
+
write_json,
|
|
51
|
+
)
|
|
52
|
+
from iints.validation import (
|
|
53
|
+
build_stress_events,
|
|
54
|
+
format_validation_error,
|
|
55
|
+
load_scenario,
|
|
56
|
+
load_patient_config,
|
|
57
|
+
load_patient_config_by_name,
|
|
58
|
+
migrate_scenario_dict,
|
|
59
|
+
scenario_to_payloads,
|
|
60
|
+
scenario_warnings,
|
|
61
|
+
validate_patient_config_dict,
|
|
62
|
+
validate_scenario_dict,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
app = typer.Typer(help="IINTS-AF SDK CLI - Intelligent Insulin Titration System for Artificial Pancreas research.")
|
|
67
|
+
docs_app = typer.Typer(help="Generate documentation and technical summaries for IINTS-AF components.")
|
|
68
|
+
presets_app = typer.Typer(help="Clinic-safe presets and quickstart runs.")
|
|
69
|
+
profiles_app = typer.Typer(help="Patient profiles and physiological presets.")
|
|
70
|
+
data_app = typer.Typer(help="Official datasets and data packs.")
|
|
71
|
+
scenarios_app = typer.Typer(help="Scenario generation and utilities.")
|
|
72
|
+
algorithms_app = typer.Typer(help="Algorithm registry and plugins.")
|
|
73
|
+
app.add_typer(docs_app, name="docs")
|
|
74
|
+
app.add_typer(presets_app, name="presets")
|
|
75
|
+
app.add_typer(profiles_app, name="profiles")
|
|
76
|
+
app.add_typer(data_app, name="data")
|
|
77
|
+
app.add_typer(scenarios_app, name="scenarios")
|
|
78
|
+
app.add_typer(algorithms_app, name="algorithms")
|
|
79
|
+
|
|
80
|
+
def _load_algorithm_instance(algo: Path, console: Console) -> iints.InsulinAlgorithm:
|
|
81
|
+
if not algo.is_file():
|
|
82
|
+
console.print(f"[bold red]Error: Algorithm file '{algo}' not found.[/bold red]")
|
|
83
|
+
raise typer.Exit(code=1)
|
|
84
|
+
|
|
85
|
+
module_name = algo.stem
|
|
86
|
+
spec = importlib.util.spec_from_file_location(module_name, algo)
|
|
87
|
+
if spec is None:
|
|
88
|
+
console.print(f"[bold red]Error: Could not load module spec for {algo}[/bold red]")
|
|
89
|
+
raise typer.Exit(code=1)
|
|
90
|
+
|
|
91
|
+
module = importlib.util.module_from_spec(spec)
|
|
92
|
+
module.iints = iints # type: ignore
|
|
93
|
+
sys.modules[module_name] = module
|
|
94
|
+
try:
|
|
95
|
+
if spec.loader:
|
|
96
|
+
spec.loader.exec_module(module)
|
|
97
|
+
else:
|
|
98
|
+
raise ImportError(f"Could not load module loader for {algo}")
|
|
99
|
+
except Exception as e:
|
|
100
|
+
console.print(f"[bold red]Error loading algorithm module {algo}: {e}[/bold red]")
|
|
101
|
+
raise typer.Exit(code=1)
|
|
102
|
+
|
|
103
|
+
for _, obj in module.__dict__.items():
|
|
104
|
+
if isinstance(obj, type) and issubclass(obj, iints.InsulinAlgorithm) and obj is not iints.InsulinAlgorithm:
|
|
105
|
+
return obj()
|
|
106
|
+
|
|
107
|
+
console.print(f"[bold red]Error: No subclass of InsulinAlgorithm found in {algo}[/bold red]")
|
|
108
|
+
raise typer.Exit(code=1)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def _load_algorithm_instance_silent(algo: Path) -> iints.InsulinAlgorithm:
|
|
112
|
+
if not algo.is_file():
|
|
113
|
+
raise FileNotFoundError(f"Algorithm file '{algo}' not found.")
|
|
114
|
+
module_name = algo.stem
|
|
115
|
+
spec = importlib.util.spec_from_file_location(module_name, algo)
|
|
116
|
+
if spec is None:
|
|
117
|
+
raise ImportError(f"Could not load module spec for {algo}")
|
|
118
|
+
module = importlib.util.module_from_spec(spec)
|
|
119
|
+
module.iints = iints # type: ignore
|
|
120
|
+
sys.modules[module_name] = module
|
|
121
|
+
if spec.loader:
|
|
122
|
+
spec.loader.exec_module(module)
|
|
123
|
+
else:
|
|
124
|
+
raise ImportError(f"Could not load module loader for {algo}")
|
|
125
|
+
for _, obj in module.__dict__.items():
|
|
126
|
+
if isinstance(obj, type) and issubclass(obj, iints.InsulinAlgorithm) and obj is not iints.InsulinAlgorithm:
|
|
127
|
+
return obj()
|
|
128
|
+
raise ImportError(f"No subclass of InsulinAlgorithm found in {algo}")
|
|
129
|
+
|
|
130
|
+
def _load_presets() -> List[Dict[str, Any]]:
|
|
131
|
+
if sys.version_info >= (3, 9):
|
|
132
|
+
from importlib.resources import files
|
|
133
|
+
content = files("iints.presets").joinpath("presets.json").read_text()
|
|
134
|
+
else:
|
|
135
|
+
from importlib import resources
|
|
136
|
+
content = resources.read_text("iints.presets", "presets.json")
|
|
137
|
+
return json.loads(content)
|
|
138
|
+
|
|
139
|
+
def _get_preset(name: str) -> Dict[str, Any]:
|
|
140
|
+
presets = _load_presets()
|
|
141
|
+
for preset in presets:
|
|
142
|
+
if preset.get("name") == name:
|
|
143
|
+
return preset
|
|
144
|
+
raise KeyError(name)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def _parse_column_mapping(items: List[str], console: Console) -> Dict[str, str]:
|
|
148
|
+
mapping: Dict[str, str] = {}
|
|
149
|
+
for item in items:
|
|
150
|
+
if "=" not in item:
|
|
151
|
+
console.print(f"[bold red]Invalid mapping '{item}'. Use key=value.[/bold red]")
|
|
152
|
+
raise typer.Exit(code=1)
|
|
153
|
+
key, value = item.split("=", 1)
|
|
154
|
+
key = key.strip()
|
|
155
|
+
value = value.strip()
|
|
156
|
+
if not key or not value:
|
|
157
|
+
console.print(f"[bold red]Invalid mapping '{item}'. Use key=value.[/bold red]")
|
|
158
|
+
raise typer.Exit(code=1)
|
|
159
|
+
mapping[key] = value
|
|
160
|
+
return mapping
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _build_safety_config_from_options(**kwargs: Any) -> Optional[SafetyConfig]:
|
|
164
|
+
if all(value is None for value in kwargs.values()):
|
|
165
|
+
return None
|
|
166
|
+
config = SafetyConfig()
|
|
167
|
+
for key, value in kwargs.items():
|
|
168
|
+
if value is not None:
|
|
169
|
+
setattr(config, key, value)
|
|
170
|
+
return config
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def _build_safety_config_from_dict(values: Optional[Dict[str, Any]]) -> Optional[SafetyConfig]:
|
|
174
|
+
if not values:
|
|
175
|
+
return None
|
|
176
|
+
config = SafetyConfig()
|
|
177
|
+
for key, value in values.items():
|
|
178
|
+
setattr(config, key, value)
|
|
179
|
+
return config
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def _run_parallel_job(job: Dict[str, Any]) -> Dict[str, Any]:
|
|
183
|
+
algo_path = Path(job["algo"])
|
|
184
|
+
scenario_path = Path(job["scenario_path"])
|
|
185
|
+
output_dir = Path(job["output_dir"])
|
|
186
|
+
patient_config = job["patient_config"]
|
|
187
|
+
safety_config = _build_safety_config_from_dict(job.get("safety_overrides"))
|
|
188
|
+
algorithm_instance = _load_algorithm_instance_silent(algo_path)
|
|
189
|
+
|
|
190
|
+
outputs = iints.run_simulation(
|
|
191
|
+
algorithm=algorithm_instance,
|
|
192
|
+
scenario=str(scenario_path),
|
|
193
|
+
patient_config=patient_config,
|
|
194
|
+
duration_minutes=int(job["duration_minutes"]),
|
|
195
|
+
time_step=int(job["time_step"]),
|
|
196
|
+
seed=job.get("seed"),
|
|
197
|
+
output_dir=output_dir,
|
|
198
|
+
compare_baselines=bool(job.get("compare_baselines")),
|
|
199
|
+
export_audit=bool(job.get("export_audit")),
|
|
200
|
+
generate_report=bool(job.get("generate_report")),
|
|
201
|
+
safety_config=safety_config,
|
|
202
|
+
)
|
|
203
|
+
outputs.pop("results", None)
|
|
204
|
+
safety_report = outputs.get("safety_report", {})
|
|
205
|
+
return {
|
|
206
|
+
"scenario": scenario_path.stem,
|
|
207
|
+
"patient": job["patient_label"],
|
|
208
|
+
"output_dir": str(output_dir),
|
|
209
|
+
"results_csv": outputs.get("results_csv"),
|
|
210
|
+
"report_pdf": outputs.get("report_pdf"),
|
|
211
|
+
"run_manifest": outputs.get("run_manifest_path"),
|
|
212
|
+
"terminated_early": safety_report.get("terminated_early", False),
|
|
213
|
+
"total_violations": safety_report.get("total_violations", 0),
|
|
214
|
+
"error": "",
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
@app.command()
|
|
218
|
+
def evaluate(
|
|
219
|
+
algo: Annotated[Path, typer.Option(help="Path to the algorithm Python file")],
|
|
220
|
+
population: Annotated[int, typer.Option(help="Number of virtual patients to simulate")] = 100,
|
|
221
|
+
patient_config_name: Annotated[str, typer.Option("--patient-config", help="Base patient configuration name")] = "default_patient",
|
|
222
|
+
patient_config_path: Annotated[Optional[Path], typer.Option("--patient-config-path", help="Path to base patient config YAML")] = None,
|
|
223
|
+
scenario_path: Annotated[Optional[Path], typer.Option("--scenario", help="Path to scenario JSON")] = None,
|
|
224
|
+
duration: Annotated[int, typer.Option(help="Simulation duration in minutes")] = 720,
|
|
225
|
+
time_step: Annotated[int, typer.Option(help="Time step in minutes")] = 5,
|
|
226
|
+
output_dir: Annotated[Optional[Path], typer.Option(help="Output directory")] = None,
|
|
227
|
+
max_workers: Annotated[Optional[int], typer.Option(help="Max parallel workers (default: all cores)")] = None,
|
|
228
|
+
seed: Annotated[Optional[int], typer.Option(help="Random seed for reproducibility")] = None,
|
|
229
|
+
patient_model: Annotated[str, typer.Option("--patient-model", help="Patient model type: 'custom' or 'bergman'")] = "custom",
|
|
230
|
+
):
|
|
231
|
+
"""
|
|
232
|
+
Run a Monte Carlo population evaluation of an algorithm.
|
|
233
|
+
|
|
234
|
+
Generates N virtual patients with physiological variation, runs each
|
|
235
|
+
through the simulator in parallel, and reports aggregate TIR, hypo-risk,
|
|
236
|
+
and Safety Index with 95% confidence intervals.
|
|
237
|
+
|
|
238
|
+
Example:
|
|
239
|
+
iints evaluate --algo my_algo.py --population 500 --seed 42
|
|
240
|
+
"""
|
|
241
|
+
console = Console()
|
|
242
|
+
console.print(f"[bold blue]IINTS-AF Population Evaluation[/bold blue]")
|
|
243
|
+
console.print(f" Algorithm: [green]{algo.name}[/green]")
|
|
244
|
+
console.print(f" Population size: [cyan]{population}[/cyan]")
|
|
245
|
+
console.print(f" Patient model: [cyan]{patient_model}[/cyan]")
|
|
246
|
+
console.print(f" Duration: {duration} min")
|
|
247
|
+
console.print()
|
|
248
|
+
|
|
249
|
+
# Validate algo file exists
|
|
250
|
+
_load_algorithm_instance(algo, console)
|
|
251
|
+
|
|
252
|
+
patient_config: Union[str, Path] = str(patient_config_path) if patient_config_path else patient_config_name
|
|
253
|
+
scenario = str(scenario_path) if scenario_path else None
|
|
254
|
+
|
|
255
|
+
from iints.highlevel import run_population
|
|
256
|
+
|
|
257
|
+
with console.status("[bold green]Running population evaluation...", spinner="dots"):
|
|
258
|
+
results = run_population(
|
|
259
|
+
algo_path=str(algo),
|
|
260
|
+
n_patients=population,
|
|
261
|
+
scenario=scenario,
|
|
262
|
+
patient_config=patient_config,
|
|
263
|
+
duration_minutes=duration,
|
|
264
|
+
time_step=time_step,
|
|
265
|
+
seed=seed,
|
|
266
|
+
output_dir=str(output_dir) if output_dir else None,
|
|
267
|
+
max_workers=max_workers,
|
|
268
|
+
patient_model_type=patient_model,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
report = results["population_report"]
|
|
272
|
+
agg = report["aggregate_metrics"]
|
|
273
|
+
safety_agg = report["aggregate_safety"]
|
|
274
|
+
|
|
275
|
+
# --- Results table ---
|
|
276
|
+
table = Table(title=f"Population Evaluation Results (N={population})")
|
|
277
|
+
table.add_column("Metric", style="cyan", min_width=25)
|
|
278
|
+
table.add_column("Mean", justify="right", style="green")
|
|
279
|
+
table.add_column("95% CI", justify="right", style="yellow")
|
|
280
|
+
table.add_column("Std", justify="right", style="dim")
|
|
281
|
+
|
|
282
|
+
_METRIC_DISPLAY = {
|
|
283
|
+
"tir_70_180": "TIR 70-180 mg/dL (%)",
|
|
284
|
+
"tir_below_70": "Time <70 mg/dL (%)",
|
|
285
|
+
"tir_below_54": "Time <54 mg/dL (%)",
|
|
286
|
+
"tir_above_180": "Time >180 mg/dL (%)",
|
|
287
|
+
"mean_glucose": "Mean Glucose (mg/dL)",
|
|
288
|
+
"cv": "Coefficient of Variation (%)",
|
|
289
|
+
"gmi": "Glucose Management Indicator (%)",
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
for metric_key, stats in agg.items():
|
|
293
|
+
label = _METRIC_DISPLAY.get(metric_key, metric_key)
|
|
294
|
+
table.add_row(
|
|
295
|
+
label,
|
|
296
|
+
f"{stats['mean']:.1f}",
|
|
297
|
+
f"[{stats['ci_lower']:.1f}, {stats['ci_upper']:.1f}]",
|
|
298
|
+
f"{stats['std']:.1f}",
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
if "safety_index" in safety_agg:
|
|
302
|
+
si = safety_agg["safety_index"]
|
|
303
|
+
table.add_row(
|
|
304
|
+
"[bold]Safety Index[/bold]",
|
|
305
|
+
f"[bold]{si['mean']:.1f}[/bold]",
|
|
306
|
+
f"[{si['ci_lower']:.1f}, {si['ci_upper']:.1f}]",
|
|
307
|
+
f"{si['std']:.1f}",
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
console.print(table)
|
|
311
|
+
|
|
312
|
+
# --- Grade distribution ---
|
|
313
|
+
if "grade_distribution" in safety_agg:
|
|
314
|
+
console.print()
|
|
315
|
+
grade_table = Table(title="Safety Grade Distribution")
|
|
316
|
+
grade_table.add_column("Grade", style="bold")
|
|
317
|
+
grade_table.add_column("Count", justify="right")
|
|
318
|
+
grade_table.add_column("Percentage", justify="right")
|
|
319
|
+
for grade in ["A", "B", "C", "D", "F"]:
|
|
320
|
+
count = safety_agg["grade_distribution"].get(grade, 0)
|
|
321
|
+
pct = count / population * 100 if population else 0
|
|
322
|
+
grade_table.add_row(grade, str(count), f"{pct:.1f}%")
|
|
323
|
+
console.print(grade_table)
|
|
324
|
+
|
|
325
|
+
etr = safety_agg.get("early_termination_rate")
|
|
326
|
+
if etr is not None and etr > 0:
|
|
327
|
+
console.print(f"\n[yellow]Early termination rate: {etr * 100:.1f}%[/yellow]")
|
|
328
|
+
|
|
329
|
+
console.print(f"\n[green]Results saved to:[/green] {results['output_dir']}")
|
|
330
|
+
console.print(f" - population_summary.csv")
|
|
331
|
+
console.print(f" - population_report.json")
|
|
332
|
+
console.print(f" - population_report.pdf")
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
@app.command()
|
|
336
|
+
def init(
|
|
337
|
+
project_name: Annotated[str, typer.Option(help="Name of the project directory")] = "my_iints_project",
|
|
338
|
+
):
|
|
339
|
+
"""
|
|
340
|
+
Initialize a new IINTS-AF research project with a standard folder structure.
|
|
341
|
+
"""
|
|
342
|
+
console = Console()
|
|
343
|
+
project_path = Path(project_name)
|
|
344
|
+
|
|
345
|
+
if project_path.exists():
|
|
346
|
+
console.print(f"[bold red]Error: Directory '{project_name}' already exists.[/bold red]")
|
|
347
|
+
raise typer.Exit(code=1)
|
|
348
|
+
|
|
349
|
+
console.print(f"[bold blue]Initializing IINTS-AF Project: {project_name}[/bold blue]")
|
|
350
|
+
|
|
351
|
+
# Create Directories
|
|
352
|
+
(project_path / "algorithms").mkdir(parents=True)
|
|
353
|
+
(project_path / "scenarios").mkdir(parents=True)
|
|
354
|
+
(project_path / "data").mkdir(parents=True)
|
|
355
|
+
(project_path / "results").mkdir(parents=True)
|
|
356
|
+
|
|
357
|
+
# Copy Default Algorithm
|
|
358
|
+
try:
|
|
359
|
+
if sys.version_info >= (3, 9):
|
|
360
|
+
from importlib.resources import files
|
|
361
|
+
algo_content = files("iints.templates").joinpath("default_algorithm.py").read_text()
|
|
362
|
+
scenario_content = files("iints.templates.scenarios").joinpath("example_scenario.json").read_text()
|
|
363
|
+
exercise_content = files("iints.templates.scenarios").joinpath("exercise_stress.json").read_text()
|
|
364
|
+
stacking_content = files("iints.templates.scenarios").joinpath("chaos_insulin_stacking.json").read_text()
|
|
365
|
+
runaway_content = files("iints.templates.scenarios").joinpath("chaos_runaway_ai.json").read_text()
|
|
366
|
+
else:
|
|
367
|
+
from importlib import resources
|
|
368
|
+
algo_content = resources.read_text("iints.templates", "default_algorithm.py")
|
|
369
|
+
scenario_content = resources.read_text("iints.templates.scenarios", "example_scenario.json")
|
|
370
|
+
exercise_content = resources.read_text("iints.templates.scenarios", "exercise_stress.json")
|
|
371
|
+
stacking_content = resources.read_text("iints.templates.scenarios", "chaos_insulin_stacking.json")
|
|
372
|
+
runaway_content = resources.read_text("iints.templates.scenarios", "chaos_runaway_ai.json")
|
|
373
|
+
except Exception as e:
|
|
374
|
+
console.print(f"[bold red]Error reading template files: {e}[/bold red]")
|
|
375
|
+
raise typer.Exit(code=1)
|
|
376
|
+
|
|
377
|
+
# Instantiate the default algorithm template
|
|
378
|
+
algo_content = algo_content.replace("{{ALGO_NAME}}", "ExampleAlgorithm")
|
|
379
|
+
algo_content = algo_content.replace("{{AUTHOR_NAME}}", "IINTS User")
|
|
380
|
+
|
|
381
|
+
with open(project_path / "algorithms" / "example_algorithm.py", "w") as f:
|
|
382
|
+
f.write(algo_content)
|
|
383
|
+
|
|
384
|
+
with open(project_path / "scenarios" / "example_scenario.json", "w") as f:
|
|
385
|
+
f.write(scenario_content)
|
|
386
|
+
with open(project_path / "scenarios" / "exercise_stress.json", "w") as f:
|
|
387
|
+
f.write(exercise_content)
|
|
388
|
+
with open(project_path / "scenarios" / "chaos_insulin_stacking.json", "w") as f:
|
|
389
|
+
f.write(stacking_content)
|
|
390
|
+
with open(project_path / "scenarios" / "chaos_runaway_ai.json", "w") as f:
|
|
391
|
+
f.write(runaway_content)
|
|
392
|
+
|
|
393
|
+
# Create README
|
|
394
|
+
readme_content = f"""# {project_name}
|
|
395
|
+
|
|
396
|
+
Powered by IINTS-AF SDK.
|
|
397
|
+
|
|
398
|
+
## Structure
|
|
399
|
+
- `algorithms/`: Place your custom python algorithms here.
|
|
400
|
+
- `scenarios/`: JSON files defining stress test scenarios.
|
|
401
|
+
- `data/`: Custom patient data or configuration.
|
|
402
|
+
- `results/`: Simulation outputs.
|
|
403
|
+
|
|
404
|
+
## Getting Started
|
|
405
|
+
|
|
406
|
+
1. Run the example algorithm:
|
|
407
|
+
```bash
|
|
408
|
+
iints run --algo algorithms/example_algorithm.py --scenario-path scenarios/example_scenario.json
|
|
409
|
+
```
|
|
410
|
+
|
|
411
|
+
2. Create a new algorithm:
|
|
412
|
+
```bash
|
|
413
|
+
iints new-algo MyNewAlgo --output-dir algorithms/
|
|
414
|
+
```
|
|
415
|
+
"""
|
|
416
|
+
with open(project_path / "README.md", "w") as f:
|
|
417
|
+
f.write(readme_content)
|
|
418
|
+
|
|
419
|
+
console.print(f"[green]Project initialized successfully in '{project_name}'[/green]")
|
|
420
|
+
console.print(f"To get started:\n cd {project_name}\n iints run --algo algorithms/example_algorithm.py")
|
|
421
|
+
|
|
422
|
+
@app.command()
|
|
423
|
+
def quickstart(
|
|
424
|
+
project_name: Annotated[str, typer.Option(help="Name of the project directory")] = "iints_quickstart",
|
|
425
|
+
):
|
|
426
|
+
"""
|
|
427
|
+
Create a ready-to-run project using clinic-safe presets and a demo algorithm.
|
|
428
|
+
"""
|
|
429
|
+
console = Console()
|
|
430
|
+
project_path = Path(project_name)
|
|
431
|
+
|
|
432
|
+
if project_path.exists():
|
|
433
|
+
console.print(f"[bold red]Error: Directory '{project_name}' already exists.[/bold red]")
|
|
434
|
+
raise typer.Exit(code=1)
|
|
435
|
+
|
|
436
|
+
console.print(f"[bold blue]Creating IINTS-AF Quickstart Project: {project_name}[/bold blue]")
|
|
437
|
+
|
|
438
|
+
(project_path / "algorithms").mkdir(parents=True)
|
|
439
|
+
(project_path / "scenarios").mkdir(parents=True)
|
|
440
|
+
(project_path / "results").mkdir(parents=True)
|
|
441
|
+
|
|
442
|
+
try:
|
|
443
|
+
if sys.version_info >= (3, 9):
|
|
444
|
+
from importlib.resources import files
|
|
445
|
+
algo_content = files("iints.templates").joinpath("default_algorithm.py").read_text()
|
|
446
|
+
else:
|
|
447
|
+
from importlib import resources
|
|
448
|
+
algo_content = resources.read_text("iints.templates", "default_algorithm.py")
|
|
449
|
+
except Exception as e:
|
|
450
|
+
console.print(f"[bold red]Error reading template files: {e}[/bold red]")
|
|
451
|
+
raise typer.Exit(code=1)
|
|
452
|
+
|
|
453
|
+
algo_content = algo_content.replace("{{ALGO_NAME}}", "QuickstartAlgorithm")
|
|
454
|
+
algo_content = algo_content.replace("{{AUTHOR_NAME}}", "IINTS User")
|
|
455
|
+
algo_path = project_path / "algorithms" / "example_algorithm.py"
|
|
456
|
+
algo_path.write_text(algo_content)
|
|
457
|
+
|
|
458
|
+
try:
|
|
459
|
+
preset = _get_preset("baseline_t1d")
|
|
460
|
+
scenario_path = project_path / "scenarios" / "clinic_safe_baseline.json"
|
|
461
|
+
scenario_path.write_text(json.dumps(preset.get("scenario", {}), indent=2))
|
|
462
|
+
except Exception as e:
|
|
463
|
+
console.print(f"[yellow]Preset scenario not available: {e}[/yellow]")
|
|
464
|
+
|
|
465
|
+
try:
|
|
466
|
+
if sys.version_info >= (3, 9):
|
|
467
|
+
from importlib.resources import files
|
|
468
|
+
exercise_content = files("iints.templates.scenarios").joinpath("exercise_stress.json").read_text()
|
|
469
|
+
else:
|
|
470
|
+
from importlib import resources
|
|
471
|
+
exercise_content = resources.read_text("iints.templates.scenarios", "exercise_stress.json")
|
|
472
|
+
(project_path / "scenarios" / "exercise_stress.json").write_text(exercise_content)
|
|
473
|
+
except Exception as e:
|
|
474
|
+
console.print(f"[yellow]Exercise stress scenario not available: {e}[/yellow]")
|
|
475
|
+
|
|
476
|
+
readme_content = f"""# {project_name}
|
|
477
|
+
|
|
478
|
+
Clinic-safe quickstart project powered by IINTS-AF.
|
|
479
|
+
|
|
480
|
+
## Quickstart
|
|
481
|
+
|
|
482
|
+
Run a clinic-safe preset:
|
|
483
|
+
|
|
484
|
+
```bash
|
|
485
|
+
iints presets run --name baseline_t1d --algo algorithms/example_algorithm.py
|
|
486
|
+
```
|
|
487
|
+
|
|
488
|
+
Run with the included scenario file:
|
|
489
|
+
|
|
490
|
+
```bash
|
|
491
|
+
iints run --algo algorithms/example_algorithm.py --scenario-path scenarios/clinic_safe_baseline.json --duration 1440
|
|
492
|
+
```
|
|
493
|
+
"""
|
|
494
|
+
(project_path / "README.md").write_text(readme_content)
|
|
495
|
+
|
|
496
|
+
console.print(f"[green]Quickstart project ready in '{project_name}'.[/green]")
|
|
497
|
+
console.print(f"Next:\n cd {project_name}\n iints presets run --name baseline_t1d --algo algorithms/example_algorithm.py")
|
|
498
|
+
@app.command()
|
|
499
|
+
def new_algo(
|
|
500
|
+
name: Annotated[str, typer.Option(help="Name of the new algorithm")],
|
|
501
|
+
author: Annotated[str, typer.Option(help="Author of the algorithm")],
|
|
502
|
+
output_dir: Annotated[Path, typer.Option(help="Directory to save the new algorithm file")] = Path("."),
|
|
503
|
+
):
|
|
504
|
+
"""
|
|
505
|
+
Creates a new algorithm template file based on the BaseAlgorithm.
|
|
506
|
+
"""
|
|
507
|
+
if not output_dir.is_dir():
|
|
508
|
+
typer.echo(f"Error: Output directory '{output_dir}' does not exist.")
|
|
509
|
+
raise typer.Exit(code=1)
|
|
510
|
+
|
|
511
|
+
try:
|
|
512
|
+
# Try Python 3.9+ style
|
|
513
|
+
if sys.version_info >= (3, 9):
|
|
514
|
+
from importlib.resources import files
|
|
515
|
+
template_content = files("iints.templates").joinpath("default_algorithm.py").read_text()
|
|
516
|
+
else:
|
|
517
|
+
# Fallback for Python 3.8
|
|
518
|
+
from importlib import resources
|
|
519
|
+
template_content = resources.read_text("iints.templates", "default_algorithm.py")
|
|
520
|
+
except Exception as e:
|
|
521
|
+
typer.echo(f"Error reading template file: {e}")
|
|
522
|
+
raise typer.Exit(code=1)
|
|
523
|
+
|
|
524
|
+
# Replace placeholders
|
|
525
|
+
# We expect the template class name to be {{ALGO_NAME}} and author {{AUTHOR_NAME}}
|
|
526
|
+
# But wait, the file I wrote uses {{ALGO_NAME}} as a class name, which is valid syntax only if I replace it.
|
|
527
|
+
# The file on disk is valid python ONLY if those tokens are valid.
|
|
528
|
+
# Actually, in the previous step I wrote {{ALGO_NAME}} literally into the python file.
|
|
529
|
+
# That makes the template file itself invalid python syntax until replaced.
|
|
530
|
+
# That's fine for a template file, but might confuse linters.
|
|
531
|
+
# Ideally it's a .txt or .tmpl, but .py is fine if we accept it's a template.
|
|
532
|
+
|
|
533
|
+
final_content = template_content.replace("{{ALGO_NAME}}", f"{name}Algorithm")
|
|
534
|
+
final_content = final_content.replace("{{AUTHOR_NAME}}", author)
|
|
535
|
+
|
|
536
|
+
output_file = output_dir / f"{name.lower().replace(' ', '_')}_algorithm.py"
|
|
537
|
+
with open(output_file, "w") as f:
|
|
538
|
+
f.write(final_content)
|
|
539
|
+
|
|
540
|
+
typer.echo(f"Successfully created new algorithm template: {output_file}")
|
|
541
|
+
|
|
542
|
+
|
|
543
|
+
@presets_app.command("list")
|
|
544
|
+
def presets_list():
|
|
545
|
+
"""List clinic-safe presets."""
|
|
546
|
+
console = Console()
|
|
547
|
+
presets = _load_presets()
|
|
548
|
+
table = Table(title="Clinic-Safe Presets", show_lines=False)
|
|
549
|
+
table.add_column("Name", style="cyan")
|
|
550
|
+
table.add_column("Description")
|
|
551
|
+
table.add_column("Patient Config")
|
|
552
|
+
table.add_column("Duration (min)", justify="right")
|
|
553
|
+
for preset in presets:
|
|
554
|
+
table.add_row(
|
|
555
|
+
preset.get("name", ""),
|
|
556
|
+
preset.get("description", ""),
|
|
557
|
+
preset.get("patient_config", ""),
|
|
558
|
+
str(preset.get("duration_minutes", "")),
|
|
559
|
+
)
|
|
560
|
+
console.print(table)
|
|
561
|
+
|
|
562
|
+
|
|
563
|
+
@presets_app.command("show")
|
|
564
|
+
def presets_show(
|
|
565
|
+
name: Annotated[str, typer.Option(help="Preset name (e.g., baseline_t1d)")],
|
|
566
|
+
):
|
|
567
|
+
"""Show a preset definition."""
|
|
568
|
+
console = Console()
|
|
569
|
+
try:
|
|
570
|
+
preset = _get_preset(name)
|
|
571
|
+
except KeyError:
|
|
572
|
+
console.print(f"[bold red]Error: Unknown preset '{name}'.[/bold red]")
|
|
573
|
+
raise typer.Exit(code=1)
|
|
574
|
+
console.print_json(json.dumps(preset, indent=2))
|
|
575
|
+
|
|
576
|
+
|
|
577
|
+
@presets_app.command("run")
|
|
578
|
+
def presets_run(
|
|
579
|
+
name: Annotated[str, typer.Option(help="Preset name (e.g., baseline_t1d)")],
|
|
580
|
+
algo: Annotated[Path, typer.Option(help="Path to the algorithm Python file")],
|
|
581
|
+
output_dir: Annotated[Optional[Path], typer.Option(help="Directory to save outputs")] = None,
|
|
582
|
+
compare_baselines: Annotated[bool, typer.Option(help="Run PID and standard pump baselines in the background")] = True,
|
|
583
|
+
seed: Annotated[Optional[int], typer.Option(help="Random seed for deterministic runs")] = None,
|
|
584
|
+
safety_min_glucose: Annotated[Optional[float], typer.Option("--safety-min-glucose", help="Min plausible glucose (mg/dL)")] = None,
|
|
585
|
+
safety_max_glucose: Annotated[Optional[float], typer.Option("--safety-max-glucose", help="Max plausible glucose (mg/dL)")] = None,
|
|
586
|
+
safety_max_glucose_delta_per_5_min: Annotated[Optional[float], typer.Option("--safety-max-glucose-delta-per-5-min", help="Max glucose delta per 5 min (mg/dL)")] = None,
|
|
587
|
+
safety_hypoglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-hypo-threshold", help="Hypoglycemia threshold (mg/dL)")] = None,
|
|
588
|
+
safety_severe_hypoglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-severe-hypo-threshold", help="Severe hypoglycemia threshold (mg/dL)")] = None,
|
|
589
|
+
safety_hyperglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-hyper-threshold", help="Hyperglycemia threshold (mg/dL)")] = None,
|
|
590
|
+
safety_max_insulin_per_bolus: Annotated[Optional[float], typer.Option("--safety-max-bolus", help="Max insulin per bolus (U)")] = None,
|
|
591
|
+
safety_glucose_rate_alarm: Annotated[Optional[float], typer.Option("--safety-glucose-rate-alarm", help="Glucose rate alarm (mg/dL/min)")] = None,
|
|
592
|
+
safety_max_insulin_per_hour: Annotated[Optional[float], typer.Option("--safety-max-insulin-per-hour", help="Max insulin per 60 min (U)")] = None,
|
|
593
|
+
safety_max_iob: Annotated[Optional[float], typer.Option("--safety-max-iob", help="Max insulin on board (U)")] = None,
|
|
594
|
+
safety_trend_stop: Annotated[Optional[float], typer.Option("--safety-trend-stop", help="Negative trend cutoff (mg/dL/min)")] = None,
|
|
595
|
+
safety_hypo_cutoff: Annotated[Optional[float], typer.Option("--safety-hypo-cutoff", help="Hard hypo cutoff (mg/dL)")] = None,
|
|
596
|
+
safety_critical_glucose_threshold: Annotated[Optional[float], typer.Option("--safety-critical-glucose", help="Critical glucose threshold (mg/dL)")] = None,
|
|
597
|
+
safety_critical_glucose_duration_minutes: Annotated[Optional[int], typer.Option("--safety-critical-duration", help="Critical glucose duration (minutes)")] = None,
|
|
598
|
+
):
|
|
599
|
+
"""Run a clinic-safe preset with an algorithm and generate outputs."""
|
|
600
|
+
console = Console()
|
|
601
|
+
try:
|
|
602
|
+
preset = _get_preset(name)
|
|
603
|
+
except KeyError:
|
|
604
|
+
console.print(f"[bold red]Error: Unknown preset '{name}'.[/bold red]")
|
|
605
|
+
raise typer.Exit(code=1)
|
|
606
|
+
|
|
607
|
+
algorithm_instance = _load_algorithm_instance(algo, console)
|
|
608
|
+
console.print(f"Loaded algorithm: [green]{algorithm_instance.get_algorithm_metadata().name}[/green]")
|
|
609
|
+
|
|
610
|
+
resolved_seed = resolve_seed(seed)
|
|
611
|
+
run_id = generate_run_id(resolved_seed)
|
|
612
|
+
|
|
613
|
+
try:
|
|
614
|
+
patient_config_name = preset.get("patient_config", "default_patient")
|
|
615
|
+
validated_patient_params = load_patient_config_by_name(patient_config_name).model_dump()
|
|
616
|
+
patient_model = iints.PatientModel(**validated_patient_params)
|
|
617
|
+
except ValidationError as e:
|
|
618
|
+
console.print("[bold red]Patient config validation failed:[/bold red]")
|
|
619
|
+
for line in format_validation_error(e):
|
|
620
|
+
console.print(f"- {line}")
|
|
621
|
+
raise typer.Exit(code=1)
|
|
622
|
+
except Exception as e:
|
|
623
|
+
console.print(f"[bold red]Error loading patient config {patient_config_name}: {e}[/bold red]")
|
|
624
|
+
raise typer.Exit(code=1)
|
|
625
|
+
|
|
626
|
+
duration = int(preset.get("duration_minutes", 720))
|
|
627
|
+
time_step = int(preset.get("time_step_minutes", 5))
|
|
628
|
+
safety_config = _build_safety_config_from_options(
|
|
629
|
+
min_glucose=safety_min_glucose,
|
|
630
|
+
max_glucose=safety_max_glucose,
|
|
631
|
+
max_glucose_delta_per_5_min=safety_max_glucose_delta_per_5_min,
|
|
632
|
+
hypoglycemia_threshold=safety_hypoglycemia_threshold,
|
|
633
|
+
severe_hypoglycemia_threshold=safety_severe_hypoglycemia_threshold,
|
|
634
|
+
hyperglycemia_threshold=safety_hyperglycemia_threshold,
|
|
635
|
+
max_insulin_per_bolus=safety_max_insulin_per_bolus,
|
|
636
|
+
glucose_rate_alarm=safety_glucose_rate_alarm,
|
|
637
|
+
max_insulin_per_hour=safety_max_insulin_per_hour,
|
|
638
|
+
max_iob=safety_max_iob,
|
|
639
|
+
trend_stop=safety_trend_stop,
|
|
640
|
+
hypo_cutoff=safety_hypo_cutoff,
|
|
641
|
+
critical_glucose_threshold=safety_critical_glucose_threshold,
|
|
642
|
+
critical_glucose_duration_minutes=safety_critical_glucose_duration_minutes,
|
|
643
|
+
)
|
|
644
|
+
|
|
645
|
+
simulator_kwargs: Dict[str, Any] = {
|
|
646
|
+
"patient_model": patient_model,
|
|
647
|
+
"algorithm": algorithm_instance,
|
|
648
|
+
"time_step": time_step,
|
|
649
|
+
"safety_config": safety_config,
|
|
650
|
+
}
|
|
651
|
+
simulator_kwargs["seed"] = resolved_seed
|
|
652
|
+
if safety_config is None:
|
|
653
|
+
safety_config = SafetyConfig()
|
|
654
|
+
if "critical_glucose_threshold" in preset:
|
|
655
|
+
safety_config.critical_glucose_threshold = float(preset["critical_glucose_threshold"])
|
|
656
|
+
if "critical_glucose_duration_minutes" in preset:
|
|
657
|
+
safety_config.critical_glucose_duration_minutes = int(preset["critical_glucose_duration_minutes"])
|
|
658
|
+
simulator_kwargs["safety_config"] = safety_config
|
|
659
|
+
simulator = iints.Simulator(**simulator_kwargs)
|
|
660
|
+
|
|
661
|
+
scenario_payload = preset.get("scenario", {})
|
|
662
|
+
try:
|
|
663
|
+
scenario_model = validate_scenario_dict(scenario_payload)
|
|
664
|
+
except ValidationError as e:
|
|
665
|
+
console.print("[bold red]Preset scenario validation failed:[/bold red]")
|
|
666
|
+
for line in format_validation_error(e):
|
|
667
|
+
console.print(f"- {line}")
|
|
668
|
+
raise typer.Exit(code=1)
|
|
669
|
+
|
|
670
|
+
stress_event_payloads = scenario_to_payloads(scenario_model)
|
|
671
|
+
for warning in scenario_warnings(scenario_model):
|
|
672
|
+
console.print(f"[yellow]Warning:[/yellow] {warning}")
|
|
673
|
+
for event in build_stress_events(stress_event_payloads):
|
|
674
|
+
simulator.add_stress_event(event)
|
|
675
|
+
|
|
676
|
+
output_dir = resolve_output_dir(output_dir, run_id)
|
|
677
|
+
results_df, safety_report = simulator.run_batch(duration)
|
|
678
|
+
|
|
679
|
+
scenario_payload = scenario_model.model_dump() if scenario_model else None
|
|
680
|
+
config_payload: Dict[str, Any] = {
|
|
681
|
+
"run_type": "preset",
|
|
682
|
+
"preset_name": name,
|
|
683
|
+
"algorithm": {
|
|
684
|
+
"class": f"{algorithm_instance.__class__.__module__}.{algorithm_instance.__class__.__name__}",
|
|
685
|
+
"metadata": algorithm_instance.get_algorithm_metadata().to_dict(),
|
|
686
|
+
},
|
|
687
|
+
"patient_config": validated_patient_params,
|
|
688
|
+
"scenario": scenario_payload,
|
|
689
|
+
"duration_minutes": duration,
|
|
690
|
+
"time_step_minutes": time_step,
|
|
691
|
+
"seed": resolved_seed,
|
|
692
|
+
"compare_baselines": compare_baselines,
|
|
693
|
+
"export_audit": True,
|
|
694
|
+
"generate_report": True,
|
|
695
|
+
"safety_config": asdict(safety_config),
|
|
696
|
+
}
|
|
697
|
+
config_path = output_dir / "config.json"
|
|
698
|
+
write_json(config_path, config_payload)
|
|
699
|
+
run_metadata = build_run_metadata(run_id, resolved_seed, config_payload, output_dir)
|
|
700
|
+
run_metadata_path = output_dir / "run_metadata.json"
|
|
701
|
+
write_json(run_metadata_path, run_metadata)
|
|
702
|
+
|
|
703
|
+
results_file = output_dir / "results.csv"
|
|
704
|
+
results_df.to_csv(results_file, index=False)
|
|
705
|
+
console.print(f"Results saved to: {results_file}")
|
|
706
|
+
console.print(f"Run metadata: {run_metadata_path}")
|
|
707
|
+
|
|
708
|
+
audit_dir = output_dir / "audit"
|
|
709
|
+
try:
|
|
710
|
+
audit_paths = simulator.export_audit_trail(results_df, output_dir=str(audit_dir))
|
|
711
|
+
console.print(f"Audit trail: {audit_paths}")
|
|
712
|
+
except Exception as e:
|
|
713
|
+
console.print(f"[yellow]Audit export skipped: {e}[/yellow]")
|
|
714
|
+
|
|
715
|
+
if compare_baselines:
|
|
716
|
+
comparison = run_baseline_comparison(
|
|
717
|
+
patient_params=validated_patient_params,
|
|
718
|
+
stress_event_payloads=stress_event_payloads,
|
|
719
|
+
duration=duration,
|
|
720
|
+
time_step=time_step,
|
|
721
|
+
primary_label=algorithm_instance.get_algorithm_metadata().name,
|
|
722
|
+
primary_results=results_df,
|
|
723
|
+
primary_safety=safety_report,
|
|
724
|
+
seed=resolved_seed,
|
|
725
|
+
)
|
|
726
|
+
safety_report["baseline_comparison"] = comparison
|
|
727
|
+
baseline_paths = write_baseline_comparison(comparison, output_dir / "baseline")
|
|
728
|
+
console.print(f"Baseline comparison saved to: {baseline_paths}")
|
|
729
|
+
|
|
730
|
+
report_path = output_dir / "report.pdf"
|
|
731
|
+
iints.generate_report(results_df, str(report_path), safety_report)
|
|
732
|
+
console.print(f"PDF report saved to: {report_path}")
|
|
733
|
+
|
|
734
|
+
manifest_files = {
|
|
735
|
+
"config": config_path,
|
|
736
|
+
"run_metadata": run_metadata_path,
|
|
737
|
+
"results_csv": results_file,
|
|
738
|
+
"report_pdf": report_path,
|
|
739
|
+
}
|
|
740
|
+
if compare_baselines:
|
|
741
|
+
manifest_files["baseline_json"] = output_dir / "baseline" / "baseline_comparison.json"
|
|
742
|
+
manifest_files["baseline_csv"] = output_dir / "baseline" / "baseline_comparison.csv"
|
|
743
|
+
if output_dir / "audit" / "audit_summary.json":
|
|
744
|
+
manifest_files["audit_summary"] = output_dir / "audit" / "audit_summary.json"
|
|
745
|
+
run_manifest = build_run_manifest(output_dir, manifest_files)
|
|
746
|
+
run_manifest_path = output_dir / "run_manifest.json"
|
|
747
|
+
write_json(run_manifest_path, run_manifest)
|
|
748
|
+
console.print(f"Run manifest: {run_manifest_path}")
|
|
749
|
+
signature_path = maybe_sign_manifest(run_manifest_path)
|
|
750
|
+
if signature_path:
|
|
751
|
+
console.print(f"Run manifest signature: {signature_path}")
|
|
752
|
+
signature_path = maybe_sign_manifest(run_manifest_path)
|
|
753
|
+
if signature_path:
|
|
754
|
+
console.print(f"Run manifest signature: {signature_path}")
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
@presets_app.command("create")
|
|
758
|
+
def presets_create(
|
|
759
|
+
name: Annotated[str, typer.Option(help="Preset name (snake_case)")],
|
|
760
|
+
output_dir: Annotated[Path, typer.Option(help="Output directory for preset files")] = Path("./presets"),
|
|
761
|
+
initial_glucose: Annotated[float, typer.Option(help="Initial glucose (mg/dL)")] = 140.0,
|
|
762
|
+
basal_insulin_rate: Annotated[float, typer.Option(help="Basal insulin rate (U/hr)")] = 0.5,
|
|
763
|
+
insulin_sensitivity: Annotated[float, typer.Option(help="Insulin sensitivity (mg/dL per U)")] = 50.0,
|
|
764
|
+
carb_factor: Annotated[float, typer.Option(help="Carb factor (g per U)")] = 10.0,
|
|
765
|
+
):
|
|
766
|
+
"""
|
|
767
|
+
Generate a clinic-safe preset scaffold (patient YAML + scenario JSON).
|
|
768
|
+
"""
|
|
769
|
+
console = Console()
|
|
770
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
771
|
+
|
|
772
|
+
patient_config_name = f"clinic_safe_{name}"
|
|
773
|
+
patient_yaml_path = output_dir / f"{patient_config_name}.yaml"
|
|
774
|
+
scenario_path = output_dir / f"{name}.json"
|
|
775
|
+
|
|
776
|
+
patient_yaml = (
|
|
777
|
+
f"basal_insulin_rate: {basal_insulin_rate}\n"
|
|
778
|
+
f"insulin_sensitivity: {insulin_sensitivity}\n"
|
|
779
|
+
f"carb_factor: {carb_factor}\n"
|
|
780
|
+
"glucose_decay_rate: 0.03\n"
|
|
781
|
+
f"initial_glucose: {initial_glucose}\n"
|
|
782
|
+
"glucose_absorption_rate: 0.03\n"
|
|
783
|
+
"insulin_action_duration: 300.0\n"
|
|
784
|
+
"insulin_peak_time: 75.0\n"
|
|
785
|
+
"meal_mismatch_epsilon: 1.0\n"
|
|
786
|
+
)
|
|
787
|
+
patient_yaml_path.write_text(patient_yaml)
|
|
788
|
+
|
|
789
|
+
scenario = {
|
|
790
|
+
"scenario_name": f"Clinic Safe {name.replace('_', ' ').title()}",
|
|
791
|
+
"schema_version": "1.1",
|
|
792
|
+
"scenario_version": "1.0",
|
|
793
|
+
"stress_events": [
|
|
794
|
+
{"start_time": 60, "event_type": "meal", "value": 45, "absorption_delay_minutes": 15, "duration": 60},
|
|
795
|
+
{"start_time": 360, "event_type": "meal", "value": 60, "absorption_delay_minutes": 20, "duration": 90},
|
|
796
|
+
{"start_time": 720, "event_type": "meal", "value": 70, "absorption_delay_minutes": 15, "duration": 90},
|
|
797
|
+
],
|
|
798
|
+
}
|
|
799
|
+
scenario_path.write_text(json.dumps(scenario, indent=2))
|
|
800
|
+
|
|
801
|
+
preset_snippet = {
|
|
802
|
+
"name": name,
|
|
803
|
+
"description": "Custom clinic-safe preset (generated).",
|
|
804
|
+
"patient_config": patient_config_name,
|
|
805
|
+
"duration_minutes": 1440,
|
|
806
|
+
"time_step_minutes": 5,
|
|
807
|
+
"critical_glucose_threshold": 40.0,
|
|
808
|
+
"critical_glucose_duration_minutes": 30,
|
|
809
|
+
"scenario": scenario,
|
|
810
|
+
}
|
|
811
|
+
|
|
812
|
+
console.print(f"[green]Preset files created:[/green] {patient_yaml_path} , {scenario_path}")
|
|
813
|
+
console.print("[bold]Add this preset to presets.json if you want it built-in:[/bold]")
|
|
814
|
+
console.print_json(json.dumps(preset_snippet, indent=2))
|
|
815
|
+
|
|
816
|
+
|
|
817
|
+
@app.command("run-wizard")
|
|
818
|
+
def run_wizard():
|
|
819
|
+
"""Interactive wizard to run a preset quickly."""
|
|
820
|
+
console = Console()
|
|
821
|
+
presets = _load_presets()
|
|
822
|
+
preset_names = [preset.get("name", "") for preset in presets if preset.get("name")]
|
|
823
|
+
if not preset_names:
|
|
824
|
+
console.print("[bold red]No presets available.[/bold red]")
|
|
825
|
+
raise typer.Exit(code=1)
|
|
826
|
+
|
|
827
|
+
preset_choice = typer.prompt("Preset name", default=preset_names[0])
|
|
828
|
+
algo_path = Path(typer.prompt("Algorithm path", default="algorithms/example_algorithm.py"))
|
|
829
|
+
seed_input = typer.prompt("Seed (blank for auto)", default="", show_default=False)
|
|
830
|
+
seed = int(seed_input) if seed_input.strip() else None
|
|
831
|
+
output_dir_input = typer.prompt("Output directory (blank for default)", default="", show_default=False)
|
|
832
|
+
output_dir = Path(output_dir_input) if output_dir_input.strip() else None
|
|
833
|
+
|
|
834
|
+
presets_run(
|
|
835
|
+
name=preset_choice,
|
|
836
|
+
algo=algo_path,
|
|
837
|
+
output_dir=output_dir,
|
|
838
|
+
compare_baselines=True,
|
|
839
|
+
seed=seed,
|
|
840
|
+
safety_min_glucose=None,
|
|
841
|
+
safety_max_glucose=None,
|
|
842
|
+
safety_max_glucose_delta_per_5_min=None,
|
|
843
|
+
safety_hypoglycemia_threshold=None,
|
|
844
|
+
safety_severe_hypoglycemia_threshold=None,
|
|
845
|
+
safety_hyperglycemia_threshold=None,
|
|
846
|
+
safety_max_insulin_per_bolus=None,
|
|
847
|
+
safety_glucose_rate_alarm=None,
|
|
848
|
+
safety_max_insulin_per_hour=None,
|
|
849
|
+
safety_max_iob=None,
|
|
850
|
+
safety_trend_stop=None,
|
|
851
|
+
safety_hypo_cutoff=None,
|
|
852
|
+
safety_critical_glucose_threshold=None,
|
|
853
|
+
safety_critical_glucose_duration_minutes=None,
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
|
|
857
|
+
@profiles_app.command("create")
|
|
858
|
+
def profiles_create(
|
|
859
|
+
name: Annotated[str, typer.Option(help="Profile name (file stem)")],
|
|
860
|
+
output_dir: Annotated[Path, typer.Option(help="Output directory for the profile YAML")] = Path("./patient_profiles"),
|
|
861
|
+
isf: Annotated[float, typer.Option(help="Insulin Sensitivity Factor (mg/dL per unit)")] = 50.0,
|
|
862
|
+
icr: Annotated[float, typer.Option(help="Insulin-to-carb ratio (grams per unit)")] = 10.0,
|
|
863
|
+
basal_rate: Annotated[float, typer.Option(help="Basal insulin rate (U/hr)")] = 0.8,
|
|
864
|
+
initial_glucose: Annotated[float, typer.Option(help="Initial glucose (mg/dL)")] = 120.0,
|
|
865
|
+
dawn_strength: Annotated[float, typer.Option(help="Dawn phenomenon strength (mg/dL per hour)")] = 0.0,
|
|
866
|
+
dawn_start: Annotated[float, typer.Option(help="Dawn phenomenon start hour (0-23)")] = 4.0,
|
|
867
|
+
dawn_end: Annotated[float, typer.Option(help="Dawn phenomenon end hour (0-24)")] = 8.0,
|
|
868
|
+
):
|
|
869
|
+
"""Create a patient profile YAML you can pass to --patient-config-path."""
|
|
870
|
+
console = Console()
|
|
871
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
872
|
+
output_path = output_dir / f"{name}.yaml"
|
|
873
|
+
|
|
874
|
+
profile = PatientProfile(
|
|
875
|
+
isf=isf,
|
|
876
|
+
icr=icr,
|
|
877
|
+
basal_rate=basal_rate,
|
|
878
|
+
initial_glucose=initial_glucose,
|
|
879
|
+
dawn_phenomenon_strength=dawn_strength,
|
|
880
|
+
dawn_start_hour=dawn_start,
|
|
881
|
+
dawn_end_hour=dawn_end,
|
|
882
|
+
)
|
|
883
|
+
with output_path.open("w") as handle:
|
|
884
|
+
yaml.safe_dump(profile.to_patient_config(), handle, sort_keys=False)
|
|
885
|
+
|
|
886
|
+
console.print(f"[green]Patient profile saved:[/green] {output_path}")
|
|
887
|
+
console.print("Use it with:")
|
|
888
|
+
console.print(f" iints run --algo algorithms/example_algorithm.py --patient-config-path {output_path}")
|
|
889
|
+
|
|
890
|
+
|
|
891
|
+
@scenarios_app.command("generate")
|
|
892
|
+
def scenarios_generate(
|
|
893
|
+
name: Annotated[str, typer.Option(help="Scenario name")] = "Generated Scenario",
|
|
894
|
+
output_path: Annotated[Path, typer.Option(help="Output JSON path")] = Path("./scenarios/generated_scenario.json"),
|
|
895
|
+
duration_minutes: Annotated[int, typer.Option(help="Scenario duration in minutes")] = 1440,
|
|
896
|
+
seed: Annotated[Optional[int], typer.Option(help="Random seed")] = None,
|
|
897
|
+
meal_count: Annotated[int, typer.Option(help="Number of meal events")] = 3,
|
|
898
|
+
meal_min_grams: Annotated[float, typer.Option(help="Min meal size (g carbs)")] = 30.0,
|
|
899
|
+
meal_max_grams: Annotated[float, typer.Option(help="Max meal size (g carbs)")] = 80.0,
|
|
900
|
+
exercise_count: Annotated[int, typer.Option(help="Number of exercise events")] = 0,
|
|
901
|
+
sensor_error_count: Annotated[int, typer.Option(help="Number of sensor error events")] = 0,
|
|
902
|
+
):
|
|
903
|
+
"""Generate a random stress-test scenario JSON."""
|
|
904
|
+
config = ScenarioGeneratorConfig(
|
|
905
|
+
name=name,
|
|
906
|
+
duration_minutes=duration_minutes,
|
|
907
|
+
seed=seed,
|
|
908
|
+
meal_count=meal_count,
|
|
909
|
+
meal_min_grams=meal_min_grams,
|
|
910
|
+
meal_max_grams=meal_max_grams,
|
|
911
|
+
exercise_count=exercise_count,
|
|
912
|
+
sensor_error_count=sensor_error_count,
|
|
913
|
+
)
|
|
914
|
+
scenario = generate_random_scenario(config)
|
|
915
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
916
|
+
output_path.write_text(json.dumps(scenario, indent=2))
|
|
917
|
+
typer.echo(f"Scenario saved: {output_path}")
|
|
918
|
+
|
|
919
|
+
|
|
920
|
+
@scenarios_app.command("wizard")
|
|
921
|
+
def scenarios_wizard():
|
|
922
|
+
"""Interactive scenario generator."""
|
|
923
|
+
name = typer.prompt("Scenario name", default="Generated Scenario")
|
|
924
|
+
duration_minutes = int(typer.prompt("Duration (minutes)", default="1440"))
|
|
925
|
+
meal_count = int(typer.prompt("Meal events", default="3"))
|
|
926
|
+
meal_min = float(typer.prompt("Meal min grams", default="30"))
|
|
927
|
+
meal_max = float(typer.prompt("Meal max grams", default="80"))
|
|
928
|
+
exercise_count = int(typer.prompt("Exercise events", default="0"))
|
|
929
|
+
sensor_error_count = int(typer.prompt("Sensor error events", default="0"))
|
|
930
|
+
output_path = Path(typer.prompt("Output JSON path", default="scenarios/generated_scenario.json"))
|
|
931
|
+
|
|
932
|
+
config = ScenarioGeneratorConfig(
|
|
933
|
+
name=name,
|
|
934
|
+
duration_minutes=duration_minutes,
|
|
935
|
+
meal_count=meal_count,
|
|
936
|
+
meal_min_grams=meal_min,
|
|
937
|
+
meal_max_grams=meal_max,
|
|
938
|
+
exercise_count=exercise_count,
|
|
939
|
+
sensor_error_count=sensor_error_count,
|
|
940
|
+
)
|
|
941
|
+
scenario = generate_random_scenario(config)
|
|
942
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
943
|
+
output_path.write_text(json.dumps(scenario, indent=2))
|
|
944
|
+
typer.echo(f"Scenario saved: {output_path}")
|
|
945
|
+
|
|
946
|
+
|
|
947
|
+
@scenarios_app.command("migrate")
|
|
948
|
+
def scenarios_migrate(
|
|
949
|
+
input_path: Annotated[Path, typer.Argument(help="Scenario JSON to migrate")],
|
|
950
|
+
output_path: Annotated[Optional[Path], typer.Option(help="Output path (default: overwrite input)")] = None,
|
|
951
|
+
):
|
|
952
|
+
"""Migrate a scenario JSON to the latest schema version."""
|
|
953
|
+
console = Console()
|
|
954
|
+
if not input_path.is_file():
|
|
955
|
+
console.print(f"[bold red]Error: Scenario '{input_path}' not found.[/bold red]")
|
|
956
|
+
raise typer.Exit(code=1)
|
|
957
|
+
try:
|
|
958
|
+
data = json.loads(input_path.read_text())
|
|
959
|
+
except json.JSONDecodeError as exc:
|
|
960
|
+
console.print(f"[bold red]Invalid JSON: {exc}[/bold red]")
|
|
961
|
+
raise typer.Exit(code=1)
|
|
962
|
+
migrated = migrate_scenario_dict(data)
|
|
963
|
+
target = output_path or input_path
|
|
964
|
+
target.write_text(json.dumps(migrated, indent=2))
|
|
965
|
+
console.print(f"[green]Migrated scenario saved to {target}[/green]")
|
|
966
|
+
@app.command()
|
|
967
|
+
def run(
|
|
968
|
+
algo: Annotated[Path, typer.Option(help="Path to the algorithm Python file")],
|
|
969
|
+
patient_config_name: Annotated[str, typer.Option(help="Name of the patient configuration (e.g., 'default_patient' or 'patient_559_config')")] = "default_patient",
|
|
970
|
+
patient_config_path: Annotated[Optional[Path], typer.Option(help="Path to a patient config YAML (overrides --patient-config-name)")] = None,
|
|
971
|
+
scenario_path: Annotated[
|
|
972
|
+
Optional[Path],
|
|
973
|
+
typer.Option(
|
|
974
|
+
"--scenario",
|
|
975
|
+
"--scenario-path",
|
|
976
|
+
help="Path to the scenario JSON file (e.g., scenarios/example_scenario.json)",
|
|
977
|
+
),
|
|
978
|
+
] = None,
|
|
979
|
+
duration: Annotated[int, typer.Option(help="Simulation duration in minutes")] = 720, # 12 hours
|
|
980
|
+
time_step: Annotated[int, typer.Option(help="Simulation time step in minutes")] = 5,
|
|
981
|
+
output_dir: Annotated[Optional[Path], typer.Option(help="Directory to save simulation results")] = None,
|
|
982
|
+
compare_baselines: Annotated[bool, typer.Option(help="Run PID and standard pump baselines in the background")] = True,
|
|
983
|
+
seed: Annotated[Optional[int], typer.Option(help="Random seed for deterministic runs")] = None,
|
|
984
|
+
safety_min_glucose: Annotated[Optional[float], typer.Option("--safety-min-glucose", help="Min plausible glucose (mg/dL)")] = None,
|
|
985
|
+
safety_max_glucose: Annotated[Optional[float], typer.Option("--safety-max-glucose", help="Max plausible glucose (mg/dL)")] = None,
|
|
986
|
+
safety_max_glucose_delta_per_5_min: Annotated[Optional[float], typer.Option("--safety-max-glucose-delta-per-5-min", help="Max glucose delta per 5 min (mg/dL)")] = None,
|
|
987
|
+
safety_hypoglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-hypo-threshold", help="Hypoglycemia threshold (mg/dL)")] = None,
|
|
988
|
+
safety_severe_hypoglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-severe-hypo-threshold", help="Severe hypoglycemia threshold (mg/dL)")] = None,
|
|
989
|
+
safety_hyperglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-hyper-threshold", help="Hyperglycemia threshold (mg/dL)")] = None,
|
|
990
|
+
safety_max_insulin_per_bolus: Annotated[Optional[float], typer.Option("--safety-max-bolus", help="Max insulin per bolus (U)")] = None,
|
|
991
|
+
safety_glucose_rate_alarm: Annotated[Optional[float], typer.Option("--safety-glucose-rate-alarm", help="Glucose rate alarm (mg/dL/min)")] = None,
|
|
992
|
+
safety_max_insulin_per_hour: Annotated[Optional[float], typer.Option("--safety-max-insulin-per-hour", help="Max insulin per 60 min (U)")] = None,
|
|
993
|
+
safety_max_iob: Annotated[Optional[float], typer.Option("--safety-max-iob", help="Max insulin on board (U)")] = None,
|
|
994
|
+
safety_trend_stop: Annotated[Optional[float], typer.Option("--safety-trend-stop", help="Negative trend cutoff (mg/dL/min)")] = None,
|
|
995
|
+
safety_hypo_cutoff: Annotated[Optional[float], typer.Option("--safety-hypo-cutoff", help="Hard hypo cutoff (mg/dL)")] = None,
|
|
996
|
+
safety_critical_glucose_threshold: Annotated[Optional[float], typer.Option("--safety-critical-glucose", help="Critical glucose threshold (mg/dL)")] = None,
|
|
997
|
+
safety_critical_glucose_duration_minutes: Annotated[Optional[int], typer.Option("--safety-critical-duration", help="Critical glucose duration (minutes)")] = None,
|
|
998
|
+
):
|
|
999
|
+
"""
|
|
1000
|
+
Run an IINTS-AF simulation using a specified algorithm and patient configuration.
|
|
1001
|
+
"""
|
|
1002
|
+
console = Console() # Define console locally for each command to prevent Rich issues
|
|
1003
|
+
console.print(f"[bold blue]Starting IINTS-AF simulation with algorithm: {algo.name}[/bold blue]")
|
|
1004
|
+
console.print(f"Patient configuration: {patient_config_name}")
|
|
1005
|
+
console.print(f"Simulation duration: {duration} minutes, time step: {time_step} minutes")
|
|
1006
|
+
|
|
1007
|
+
# 1. Load Algorithm
|
|
1008
|
+
if not algo.is_file():
|
|
1009
|
+
console.print(f"[bold red]Error: Algorithm file '{algo}' not found.[/bold red]")
|
|
1010
|
+
raise typer.Exit(code=1)
|
|
1011
|
+
|
|
1012
|
+
module_name = algo.stem
|
|
1013
|
+
# Use importlib.util to load the module directly from the file path
|
|
1014
|
+
spec = importlib.util.spec_from_file_location(module_name, algo)
|
|
1015
|
+
if spec is None:
|
|
1016
|
+
console.print(f"[bold red]Error: Could not load module spec for {algo}[/bold red]")
|
|
1017
|
+
raise typer.Exit(code=1)
|
|
1018
|
+
|
|
1019
|
+
module = importlib.util.module_from_spec(spec)
|
|
1020
|
+
|
|
1021
|
+
# Manually inject 'iints' package into the loaded module's global namespace
|
|
1022
|
+
# This ensures 'from iints import ...' works within the algorithm file
|
|
1023
|
+
module.iints = iints # type: ignore
|
|
1024
|
+
|
|
1025
|
+
sys.modules[module_name] = module
|
|
1026
|
+
try:
|
|
1027
|
+
if spec.loader: # Ensure loader is not None
|
|
1028
|
+
spec.loader.exec_module(module)
|
|
1029
|
+
else:
|
|
1030
|
+
raise ImportError(f"Could not load module loader for {algo}")
|
|
1031
|
+
except Exception as e:
|
|
1032
|
+
console.print(f"[bold red]Error loading algorithm module {algo}: {e}[/bold red]")
|
|
1033
|
+
raise typer.Exit(code=1)
|
|
1034
|
+
|
|
1035
|
+
algorithm_instance = None
|
|
1036
|
+
for name_in_module, obj in module.__dict__.items(): # Renamed 'name' to 'name_in_module' to avoid conflict
|
|
1037
|
+
if isinstance(obj, type) and issubclass(obj, iints.InsulinAlgorithm) and obj is not iints.InsulinAlgorithm:
|
|
1038
|
+
algorithm_instance = obj() # Instantiate the algorithm
|
|
1039
|
+
console.print(f"Loaded algorithm: [green]{algorithm_instance.get_algorithm_metadata().name}[/green]")
|
|
1040
|
+
break
|
|
1041
|
+
|
|
1042
|
+
if algorithm_instance is None:
|
|
1043
|
+
console.print(f"[bold red]Error: No subclass of InsulinAlgorithm found in {algo}[/bold red]")
|
|
1044
|
+
raise typer.Exit(code=1)
|
|
1045
|
+
|
|
1046
|
+
# 2. Get Device
|
|
1047
|
+
device_manager = iints.DeviceManager()
|
|
1048
|
+
device = device_manager.get_device()
|
|
1049
|
+
console.print(f"Using compute device: [blue]{device}[/blue]")
|
|
1050
|
+
|
|
1051
|
+
# 3. Instantiate Patient Model
|
|
1052
|
+
try:
|
|
1053
|
+
if patient_config_path:
|
|
1054
|
+
if not patient_config_path.is_file():
|
|
1055
|
+
console.print(f"[bold red]Error: Patient config file '{patient_config_path}' not found.[/bold red]")
|
|
1056
|
+
raise typer.Exit(code=1)
|
|
1057
|
+
validated_patient_params = load_patient_config(patient_config_path).model_dump()
|
|
1058
|
+
patient_label = patient_config_path.stem
|
|
1059
|
+
else:
|
|
1060
|
+
validated_patient_params = load_patient_config_by_name(patient_config_name).model_dump()
|
|
1061
|
+
patient_label = patient_config_name
|
|
1062
|
+
|
|
1063
|
+
patient_model = iints.PatientModel(**validated_patient_params)
|
|
1064
|
+
console.print(f"Using patient model: {patient_model.__class__.__name__} with config [cyan]{patient_label}[/cyan]")
|
|
1065
|
+
except ValidationError as e:
|
|
1066
|
+
console.print("[bold red]Patient config validation failed:[/bold red]")
|
|
1067
|
+
for line in format_validation_error(e):
|
|
1068
|
+
console.print(f"- {line}")
|
|
1069
|
+
raise typer.Exit(code=1)
|
|
1070
|
+
except TypeError as e:
|
|
1071
|
+
console.print(f"[bold red]Error instantiating PatientModel with parameters from {patient_config_name}: {e}[/bold red]")
|
|
1072
|
+
console.print("[bold red]Please check that patient configuration keys match PatientModel constructor arguments.[/bold red]")
|
|
1073
|
+
raise typer.Exit(code=1)
|
|
1074
|
+
except Exception as e:
|
|
1075
|
+
console.print(f"[bold red]Error loading patient config {patient_config_name}: {e}[/bold red]")
|
|
1076
|
+
raise typer.Exit(code=1)
|
|
1077
|
+
|
|
1078
|
+
|
|
1079
|
+
# 4. Load Scenario Data (if provided)
|
|
1080
|
+
stress_events = []
|
|
1081
|
+
stress_event_payloads: List[Dict[str, Any]] = []
|
|
1082
|
+
scenario_model = None
|
|
1083
|
+
scenario_payload: Optional[Dict[str, Any]] = None
|
|
1084
|
+
if scenario_path:
|
|
1085
|
+
if not scenario_path.is_file():
|
|
1086
|
+
console.print(f"[bold red]Error: Scenario file '{scenario_path}' not found.[/bold red]")
|
|
1087
|
+
raise typer.Exit(code=1)
|
|
1088
|
+
|
|
1089
|
+
try:
|
|
1090
|
+
scenario_model = load_scenario(scenario_path)
|
|
1091
|
+
scenario_payload = scenario_model.model_dump()
|
|
1092
|
+
stress_event_payloads = scenario_to_payloads(scenario_model)
|
|
1093
|
+
stress_events = build_stress_events(stress_event_payloads)
|
|
1094
|
+
console.print(
|
|
1095
|
+
f"Loaded {len(stress_events)} stress events from scenario: [magenta]{scenario_path.name}[/magenta]"
|
|
1096
|
+
)
|
|
1097
|
+
for warning in scenario_warnings(scenario_model):
|
|
1098
|
+
console.print(f"[yellow]Warning:[/yellow] {warning}")
|
|
1099
|
+
except ValidationError as e:
|
|
1100
|
+
console.print("[bold red]Scenario validation failed:[/bold red]")
|
|
1101
|
+
for line in format_validation_error(e):
|
|
1102
|
+
console.print(f"- {line}")
|
|
1103
|
+
raise typer.Exit(code=1)
|
|
1104
|
+
|
|
1105
|
+
# 5. Run Simulation
|
|
1106
|
+
safety_config = _build_safety_config_from_options(
|
|
1107
|
+
min_glucose=safety_min_glucose,
|
|
1108
|
+
max_glucose=safety_max_glucose,
|
|
1109
|
+
max_glucose_delta_per_5_min=safety_max_glucose_delta_per_5_min,
|
|
1110
|
+
hypoglycemia_threshold=safety_hypoglycemia_threshold,
|
|
1111
|
+
severe_hypoglycemia_threshold=safety_severe_hypoglycemia_threshold,
|
|
1112
|
+
hyperglycemia_threshold=safety_hyperglycemia_threshold,
|
|
1113
|
+
max_insulin_per_bolus=safety_max_insulin_per_bolus,
|
|
1114
|
+
glucose_rate_alarm=safety_glucose_rate_alarm,
|
|
1115
|
+
max_insulin_per_hour=safety_max_insulin_per_hour,
|
|
1116
|
+
max_iob=safety_max_iob,
|
|
1117
|
+
trend_stop=safety_trend_stop,
|
|
1118
|
+
hypo_cutoff=safety_hypo_cutoff,
|
|
1119
|
+
critical_glucose_threshold=safety_critical_glucose_threshold,
|
|
1120
|
+
critical_glucose_duration_minutes=safety_critical_glucose_duration_minutes,
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
resolved_seed = resolve_seed(seed)
|
|
1124
|
+
run_id = generate_run_id(resolved_seed)
|
|
1125
|
+
output_dir = resolve_output_dir(output_dir, run_id)
|
|
1126
|
+
|
|
1127
|
+
effective_safety_config = safety_config or SafetyConfig()
|
|
1128
|
+
simulator = iints.Simulator(
|
|
1129
|
+
patient_model=patient_model,
|
|
1130
|
+
algorithm=algorithm_instance,
|
|
1131
|
+
time_step=time_step,
|
|
1132
|
+
seed=resolved_seed,
|
|
1133
|
+
safety_config=effective_safety_config,
|
|
1134
|
+
)
|
|
1135
|
+
|
|
1136
|
+
for event in stress_events:
|
|
1137
|
+
simulator.add_stress_event(event)
|
|
1138
|
+
|
|
1139
|
+
simulation_results_df, safety_report = simulator.run_batch(duration)
|
|
1140
|
+
|
|
1141
|
+
# 6. Output Results
|
|
1142
|
+
config_payload: Dict[str, Any] = {
|
|
1143
|
+
"run_type": "single",
|
|
1144
|
+
"algorithm": {
|
|
1145
|
+
"class": f"{algorithm_instance.__class__.__module__}.{algorithm_instance.__class__.__name__}",
|
|
1146
|
+
"metadata": algorithm_instance.get_algorithm_metadata().to_dict(),
|
|
1147
|
+
},
|
|
1148
|
+
"patient_config": validated_patient_params,
|
|
1149
|
+
"scenario": scenario_payload,
|
|
1150
|
+
"duration_minutes": duration,
|
|
1151
|
+
"time_step_minutes": time_step,
|
|
1152
|
+
"seed": resolved_seed,
|
|
1153
|
+
"compare_baselines": compare_baselines,
|
|
1154
|
+
"export_audit": False,
|
|
1155
|
+
"generate_report": True,
|
|
1156
|
+
"safety_config": asdict(effective_safety_config),
|
|
1157
|
+
}
|
|
1158
|
+
config_path = output_dir / "config.json"
|
|
1159
|
+
write_json(config_path, config_payload)
|
|
1160
|
+
run_metadata = build_run_metadata(run_id, resolved_seed, config_payload, output_dir)
|
|
1161
|
+
run_metadata_path = output_dir / "run_metadata.json"
|
|
1162
|
+
write_json(run_metadata_path, run_metadata)
|
|
1163
|
+
|
|
1164
|
+
results_file = output_dir / "results.csv"
|
|
1165
|
+
|
|
1166
|
+
simulation_results_df.to_csv(results_file, index=False)
|
|
1167
|
+
|
|
1168
|
+
console.print(f"\nSimulation completed. Results saved to: {results_file}")
|
|
1169
|
+
console.print(f"Run metadata: {run_metadata_path}")
|
|
1170
|
+
console.print("\n--- Safety Report ---")
|
|
1171
|
+
for key, value in safety_report.items():
|
|
1172
|
+
console.print(f"{key}: {value}")
|
|
1173
|
+
|
|
1174
|
+
console.print("\nDisplaying head of simulation results:")
|
|
1175
|
+
console.print(Panel(str(simulation_results_df.head()))) # Use Panel for rich output
|
|
1176
|
+
|
|
1177
|
+
if compare_baselines:
|
|
1178
|
+
comparison = run_baseline_comparison(
|
|
1179
|
+
patient_params=validated_patient_params,
|
|
1180
|
+
stress_event_payloads=stress_event_payloads,
|
|
1181
|
+
duration=duration,
|
|
1182
|
+
time_step=time_step,
|
|
1183
|
+
primary_label=algorithm_instance.get_algorithm_metadata().name,
|
|
1184
|
+
primary_results=simulation_results_df,
|
|
1185
|
+
primary_safety=safety_report,
|
|
1186
|
+
seed=resolved_seed,
|
|
1187
|
+
)
|
|
1188
|
+
safety_report["baseline_comparison"] = comparison
|
|
1189
|
+
baseline_paths = write_baseline_comparison(comparison, output_dir / "baseline")
|
|
1190
|
+
console.print(f"Baseline comparison saved to: {baseline_paths}")
|
|
1191
|
+
|
|
1192
|
+
# Generate full report (using the new iints.generate_report function)
|
|
1193
|
+
report_output_path = output_dir / "report.pdf"
|
|
1194
|
+
iints.generate_report(simulation_results_df, str(report_output_path), safety_report)
|
|
1195
|
+
|
|
1196
|
+
manifest_files = {
|
|
1197
|
+
"config": config_path,
|
|
1198
|
+
"run_metadata": run_metadata_path,
|
|
1199
|
+
"results_csv": results_file,
|
|
1200
|
+
"report_pdf": report_output_path,
|
|
1201
|
+
}
|
|
1202
|
+
if compare_baselines:
|
|
1203
|
+
manifest_files["baseline_json"] = output_dir / "baseline" / "baseline_comparison.json"
|
|
1204
|
+
manifest_files["baseline_csv"] = output_dir / "baseline" / "baseline_comparison.csv"
|
|
1205
|
+
run_manifest = build_run_manifest(output_dir, manifest_files)
|
|
1206
|
+
run_manifest_path = output_dir / "run_manifest.json"
|
|
1207
|
+
write_json(run_manifest_path, run_manifest)
|
|
1208
|
+
console.print(f"Run manifest: {run_manifest_path}")
|
|
1209
|
+
|
|
1210
|
+
|
|
1211
|
+
@app.command("run-full")
|
|
1212
|
+
def run_full(
|
|
1213
|
+
algo: Annotated[Path, typer.Option(help="Path to the algorithm Python file")],
|
|
1214
|
+
patient_config_name: Annotated[str, typer.Option(help="Name of the patient configuration (e.g., 'default_patient')")] = "default_patient",
|
|
1215
|
+
patient_config_path: Annotated[Optional[Path], typer.Option(help="Path to a patient config YAML (overrides --patient-config-name)")] = None,
|
|
1216
|
+
scenario_path: Annotated[Optional[Path], typer.Option(help="Path to the scenario JSON file")] = None,
|
|
1217
|
+
duration: Annotated[int, typer.Option(help="Simulation duration in minutes")] = 720,
|
|
1218
|
+
time_step: Annotated[int, typer.Option(help="Simulation time step in minutes")] = 5,
|
|
1219
|
+
output_dir: Annotated[Optional[Path], typer.Option(help="Directory to save results + audit + report")] = None,
|
|
1220
|
+
seed: Annotated[Optional[int], typer.Option(help="Random seed for deterministic runs")] = None,
|
|
1221
|
+
safety_min_glucose: Annotated[Optional[float], typer.Option("--safety-min-glucose", help="Min plausible glucose (mg/dL)")] = None,
|
|
1222
|
+
safety_max_glucose: Annotated[Optional[float], typer.Option("--safety-max-glucose", help="Max plausible glucose (mg/dL)")] = None,
|
|
1223
|
+
safety_max_glucose_delta_per_5_min: Annotated[Optional[float], typer.Option("--safety-max-glucose-delta-per-5-min", help="Max glucose delta per 5 min (mg/dL)")] = None,
|
|
1224
|
+
safety_hypoglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-hypo-threshold", help="Hypoglycemia threshold (mg/dL)")] = None,
|
|
1225
|
+
safety_severe_hypoglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-severe-hypo-threshold", help="Severe hypoglycemia threshold (mg/dL)")] = None,
|
|
1226
|
+
safety_hyperglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-hyper-threshold", help="Hyperglycemia threshold (mg/dL)")] = None,
|
|
1227
|
+
safety_max_insulin_per_bolus: Annotated[Optional[float], typer.Option("--safety-max-bolus", help="Max insulin per bolus (U)")] = None,
|
|
1228
|
+
safety_glucose_rate_alarm: Annotated[Optional[float], typer.Option("--safety-glucose-rate-alarm", help="Glucose rate alarm (mg/dL/min)")] = None,
|
|
1229
|
+
safety_max_insulin_per_hour: Annotated[Optional[float], typer.Option("--safety-max-insulin-per-hour", help="Max insulin per 60 min (U)")] = None,
|
|
1230
|
+
safety_max_iob: Annotated[Optional[float], typer.Option("--safety-max-iob", help="Max insulin on board (U)")] = None,
|
|
1231
|
+
safety_trend_stop: Annotated[Optional[float], typer.Option("--safety-trend-stop", help="Negative trend cutoff (mg/dL/min)")] = None,
|
|
1232
|
+
safety_hypo_cutoff: Annotated[Optional[float], typer.Option("--safety-hypo-cutoff", help="Hard hypo cutoff (mg/dL)")] = None,
|
|
1233
|
+
safety_critical_glucose_threshold: Annotated[Optional[float], typer.Option("--safety-critical-glucose", help="Critical glucose threshold (mg/dL)")] = None,
|
|
1234
|
+
safety_critical_glucose_duration_minutes: Annotated[Optional[int], typer.Option("--safety-critical-duration", help="Critical glucose duration (minutes)")] = None,
|
|
1235
|
+
):
|
|
1236
|
+
"""One-line runner: results CSV + audit + PDF + baseline comparison."""
|
|
1237
|
+
console = Console()
|
|
1238
|
+
algorithm_instance = _load_algorithm_instance(algo, console)
|
|
1239
|
+
|
|
1240
|
+
patient_config: Union[str, Path]
|
|
1241
|
+
if patient_config_path:
|
|
1242
|
+
if not patient_config_path.is_file():
|
|
1243
|
+
console.print(f"[bold red]Error: Patient config file '{patient_config_path}' not found.[/bold red]")
|
|
1244
|
+
raise typer.Exit(code=1)
|
|
1245
|
+
patient_config = patient_config_path
|
|
1246
|
+
else:
|
|
1247
|
+
patient_config = patient_config_name
|
|
1248
|
+
|
|
1249
|
+
safety_config = _build_safety_config_from_options(
|
|
1250
|
+
min_glucose=safety_min_glucose,
|
|
1251
|
+
max_glucose=safety_max_glucose,
|
|
1252
|
+
max_glucose_delta_per_5_min=safety_max_glucose_delta_per_5_min,
|
|
1253
|
+
hypoglycemia_threshold=safety_hypoglycemia_threshold,
|
|
1254
|
+
severe_hypoglycemia_threshold=safety_severe_hypoglycemia_threshold,
|
|
1255
|
+
hyperglycemia_threshold=safety_hyperglycemia_threshold,
|
|
1256
|
+
max_insulin_per_bolus=safety_max_insulin_per_bolus,
|
|
1257
|
+
glucose_rate_alarm=safety_glucose_rate_alarm,
|
|
1258
|
+
max_insulin_per_hour=safety_max_insulin_per_hour,
|
|
1259
|
+
max_iob=safety_max_iob,
|
|
1260
|
+
trend_stop=safety_trend_stop,
|
|
1261
|
+
hypo_cutoff=safety_hypo_cutoff,
|
|
1262
|
+
critical_glucose_threshold=safety_critical_glucose_threshold,
|
|
1263
|
+
critical_glucose_duration_minutes=safety_critical_glucose_duration_minutes,
|
|
1264
|
+
)
|
|
1265
|
+
|
|
1266
|
+
outputs = iints.run_full(
|
|
1267
|
+
algorithm=algorithm_instance,
|
|
1268
|
+
scenario=str(scenario_path) if scenario_path else None,
|
|
1269
|
+
patient_config=patient_config,
|
|
1270
|
+
duration_minutes=duration,
|
|
1271
|
+
time_step=time_step,
|
|
1272
|
+
seed=seed,
|
|
1273
|
+
output_dir=output_dir,
|
|
1274
|
+
safety_config=safety_config,
|
|
1275
|
+
)
|
|
1276
|
+
|
|
1277
|
+
console.print("[green]Run complete.[/green]")
|
|
1278
|
+
if "results_csv" in outputs:
|
|
1279
|
+
console.print(f"Results CSV: {outputs['results_csv']}")
|
|
1280
|
+
if "report_pdf" in outputs:
|
|
1281
|
+
console.print(f"Report PDF: {outputs['report_pdf']}")
|
|
1282
|
+
if "audit" in outputs:
|
|
1283
|
+
console.print(f"Audit: {outputs['audit']}")
|
|
1284
|
+
if "baseline_files" in outputs:
|
|
1285
|
+
console.print(f"Baseline files: {outputs['baseline_files']}")
|
|
1286
|
+
if "run_metadata_path" in outputs:
|
|
1287
|
+
console.print(f"Run metadata: {outputs['run_metadata_path']}")
|
|
1288
|
+
if "run_manifest_path" in outputs:
|
|
1289
|
+
console.print(f"Run manifest: {outputs['run_manifest_path']}")
|
|
1290
|
+
if "profiling_path" in outputs:
|
|
1291
|
+
console.print(f"Profiling report: {outputs['profiling_path']}")
|
|
1292
|
+
if "run_manifest_signature" in outputs:
|
|
1293
|
+
console.print(f"Run manifest signature: {outputs['run_manifest_signature']}")
|
|
1294
|
+
|
|
1295
|
+
|
|
1296
|
+
@app.command("run-parallel")
|
|
1297
|
+
def run_parallel(
|
|
1298
|
+
algo: Annotated[Path, typer.Option(help="Path to the algorithm Python file")],
|
|
1299
|
+
scenarios_dir: Annotated[Optional[Path], typer.Option(help="Directory with scenario JSON files")] = None,
|
|
1300
|
+
scenario_paths: Annotated[List[Path], typer.Option("--scenario-path", help="Scenario JSON path (repeatable)")] = [],
|
|
1301
|
+
patient_config_name: Annotated[str, typer.Option(help="Patient config name")] = "default_patient",
|
|
1302
|
+
patient_config_path: Annotated[Optional[Path], typer.Option(help="Patient config YAML path")] = None,
|
|
1303
|
+
patient_configs_dir: Annotated[Optional[Path], typer.Option(help="Directory of patient YAML configs")] = None,
|
|
1304
|
+
duration: Annotated[int, typer.Option(help="Simulation duration in minutes")] = 720,
|
|
1305
|
+
time_step: Annotated[int, typer.Option(help="Simulation time step in minutes")] = 5,
|
|
1306
|
+
output_dir: Annotated[Path, typer.Option(help="Root directory for batch outputs")] = Path("./results/batch"),
|
|
1307
|
+
max_workers: Annotated[Optional[int], typer.Option(help="Max parallel workers")] = None,
|
|
1308
|
+
seed: Annotated[Optional[int], typer.Option(help="Base seed for deterministic runs")] = None,
|
|
1309
|
+
compare_baselines: Annotated[bool, typer.Option(help="Run PID + standard pump baselines")] = False,
|
|
1310
|
+
export_audit: Annotated[bool, typer.Option(help="Export audit trails")] = False,
|
|
1311
|
+
generate_report: Annotated[bool, typer.Option(help="Generate PDF reports")] = False,
|
|
1312
|
+
safety_min_glucose: Annotated[Optional[float], typer.Option("--safety-min-glucose")] = None,
|
|
1313
|
+
safety_max_glucose: Annotated[Optional[float], typer.Option("--safety-max-glucose")] = None,
|
|
1314
|
+
safety_max_glucose_delta_per_5_min: Annotated[Optional[float], typer.Option("--safety-max-glucose-delta-per-5-min")] = None,
|
|
1315
|
+
safety_hypoglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-hypo-threshold")] = None,
|
|
1316
|
+
safety_severe_hypoglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-severe-hypo-threshold")] = None,
|
|
1317
|
+
safety_hyperglycemia_threshold: Annotated[Optional[float], typer.Option("--safety-hyper-threshold")] = None,
|
|
1318
|
+
safety_max_insulin_per_bolus: Annotated[Optional[float], typer.Option("--safety-max-bolus")] = None,
|
|
1319
|
+
safety_glucose_rate_alarm: Annotated[Optional[float], typer.Option("--safety-glucose-rate-alarm")] = None,
|
|
1320
|
+
safety_max_insulin_per_hour: Annotated[Optional[float], typer.Option("--safety-max-insulin-per-hour")] = None,
|
|
1321
|
+
safety_max_iob: Annotated[Optional[float], typer.Option("--safety-max-iob")] = None,
|
|
1322
|
+
safety_trend_stop: Annotated[Optional[float], typer.Option("--safety-trend-stop")] = None,
|
|
1323
|
+
safety_hypo_cutoff: Annotated[Optional[float], typer.Option("--safety-hypo-cutoff")] = None,
|
|
1324
|
+
safety_critical_glucose_threshold: Annotated[Optional[float], typer.Option("--safety-critical-glucose")] = None,
|
|
1325
|
+
safety_critical_glucose_duration_minutes: Annotated[Optional[int], typer.Option("--safety-critical-duration")] = None,
|
|
1326
|
+
):
|
|
1327
|
+
"""Run many scenarios in parallel across CPU cores."""
|
|
1328
|
+
console = Console()
|
|
1329
|
+
base_seed = resolve_seed(seed)
|
|
1330
|
+
if scenarios_dir is None and not scenario_paths:
|
|
1331
|
+
console.print("[bold red]Provide --scenarios-dir or at least one --scenario-path.[/bold red]")
|
|
1332
|
+
raise typer.Exit(code=1)
|
|
1333
|
+
|
|
1334
|
+
scenarios: List[Path] = []
|
|
1335
|
+
if scenarios_dir:
|
|
1336
|
+
if not scenarios_dir.is_dir():
|
|
1337
|
+
console.print(f"[bold red]Scenarios directory not found: {scenarios_dir}[/bold red]")
|
|
1338
|
+
raise typer.Exit(code=1)
|
|
1339
|
+
scenarios.extend(sorted(scenarios_dir.glob("*.json")))
|
|
1340
|
+
scenarios.extend(scenario_paths)
|
|
1341
|
+
scenarios = [path for path in scenarios if path.is_file()]
|
|
1342
|
+
if not scenarios:
|
|
1343
|
+
console.print("[bold red]No scenario JSON files found.[/bold red]")
|
|
1344
|
+
raise typer.Exit(code=1)
|
|
1345
|
+
|
|
1346
|
+
patient_configs: List[Union[str, Path]] = []
|
|
1347
|
+
patient_labels: List[str] = []
|
|
1348
|
+
if patient_configs_dir:
|
|
1349
|
+
if not patient_configs_dir.is_dir():
|
|
1350
|
+
console.print(f"[bold red]Patient config directory not found: {patient_configs_dir}[/bold red]")
|
|
1351
|
+
raise typer.Exit(code=1)
|
|
1352
|
+
for path in sorted(patient_configs_dir.glob("*.yaml")):
|
|
1353
|
+
patient_configs.append(path)
|
|
1354
|
+
patient_labels.append(path.stem)
|
|
1355
|
+
elif patient_config_path:
|
|
1356
|
+
if not patient_config_path.is_file():
|
|
1357
|
+
console.print(f"[bold red]Patient config file not found: {patient_config_path}[/bold red]")
|
|
1358
|
+
raise typer.Exit(code=1)
|
|
1359
|
+
patient_configs.append(patient_config_path)
|
|
1360
|
+
patient_labels.append(patient_config_path.stem)
|
|
1361
|
+
else:
|
|
1362
|
+
patient_configs.append(patient_config_name)
|
|
1363
|
+
patient_labels.append(patient_config_name)
|
|
1364
|
+
|
|
1365
|
+
safety_overrides = {
|
|
1366
|
+
"min_glucose": safety_min_glucose,
|
|
1367
|
+
"max_glucose": safety_max_glucose,
|
|
1368
|
+
"max_glucose_delta_per_5_min": safety_max_glucose_delta_per_5_min,
|
|
1369
|
+
"hypoglycemia_threshold": safety_hypoglycemia_threshold,
|
|
1370
|
+
"severe_hypoglycemia_threshold": safety_severe_hypoglycemia_threshold,
|
|
1371
|
+
"hyperglycemia_threshold": safety_hyperglycemia_threshold,
|
|
1372
|
+
"max_insulin_per_bolus": safety_max_insulin_per_bolus,
|
|
1373
|
+
"glucose_rate_alarm": safety_glucose_rate_alarm,
|
|
1374
|
+
"max_insulin_per_hour": safety_max_insulin_per_hour,
|
|
1375
|
+
"max_iob": safety_max_iob,
|
|
1376
|
+
"trend_stop": safety_trend_stop,
|
|
1377
|
+
"hypo_cutoff": safety_hypo_cutoff,
|
|
1378
|
+
"critical_glucose_threshold": safety_critical_glucose_threshold,
|
|
1379
|
+
"critical_glucose_duration_minutes": safety_critical_glucose_duration_minutes,
|
|
1380
|
+
}
|
|
1381
|
+
safety_overrides = {k: v for k, v in safety_overrides.items() if v is not None}
|
|
1382
|
+
|
|
1383
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
1384
|
+
jobs: List[Dict[str, Any]] = []
|
|
1385
|
+
idx = 0
|
|
1386
|
+
for scenario in scenarios:
|
|
1387
|
+
for patient_config, patient_label in zip(patient_configs, patient_labels):
|
|
1388
|
+
job_seed = base_seed + idx
|
|
1389
|
+
run_output_dir = output_dir / f"{scenario.stem}__{patient_label}"
|
|
1390
|
+
jobs.append(
|
|
1391
|
+
{
|
|
1392
|
+
"algo": str(algo),
|
|
1393
|
+
"scenario_path": str(scenario),
|
|
1394
|
+
"patient_config": patient_config,
|
|
1395
|
+
"patient_label": patient_label,
|
|
1396
|
+
"output_dir": str(run_output_dir),
|
|
1397
|
+
"duration_minutes": duration,
|
|
1398
|
+
"time_step": time_step,
|
|
1399
|
+
"seed": job_seed,
|
|
1400
|
+
"compare_baselines": compare_baselines,
|
|
1401
|
+
"export_audit": export_audit,
|
|
1402
|
+
"generate_report": generate_report,
|
|
1403
|
+
"safety_overrides": safety_overrides,
|
|
1404
|
+
}
|
|
1405
|
+
)
|
|
1406
|
+
idx += 1
|
|
1407
|
+
|
|
1408
|
+
console.print(f"Launching {len(jobs)} parallel jobs...")
|
|
1409
|
+
results: List[Dict[str, Any]] = []
|
|
1410
|
+
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
|
|
1411
|
+
future_map = {executor.submit(_run_parallel_job, job): job for job in jobs}
|
|
1412
|
+
for future in concurrent.futures.as_completed(future_map):
|
|
1413
|
+
job = future_map[future]
|
|
1414
|
+
try:
|
|
1415
|
+
result = future.result()
|
|
1416
|
+
except Exception as exc:
|
|
1417
|
+
result = {
|
|
1418
|
+
"scenario": Path(job["scenario_path"]).stem,
|
|
1419
|
+
"patient": job["patient_label"],
|
|
1420
|
+
"output_dir": job["output_dir"],
|
|
1421
|
+
"results_csv": "",
|
|
1422
|
+
"report_pdf": "",
|
|
1423
|
+
"terminated_early": False,
|
|
1424
|
+
"total_violations": 0,
|
|
1425
|
+
"error": str(exc),
|
|
1426
|
+
}
|
|
1427
|
+
results.append(result)
|
|
1428
|
+
|
|
1429
|
+
summary_df = pd.DataFrame(results)
|
|
1430
|
+
summary_path = output_dir / "batch_summary.csv"
|
|
1431
|
+
summary_df.to_csv(summary_path, index=False)
|
|
1432
|
+
console.print(f"[green]Batch summary saved:[/green] {summary_path}")
|
|
1433
|
+
|
|
1434
|
+
@app.command()
|
|
1435
|
+
def report(
|
|
1436
|
+
results_csv: Annotated[Path, typer.Option(help="Path to a simulation results CSV")],
|
|
1437
|
+
output_path: Annotated[Path, typer.Option(help="Output PDF path")] = Path("./results/clinical_report.pdf"),
|
|
1438
|
+
safety_report_path: Annotated[Optional[Path], typer.Option(help="Optional safety report JSON path")] = None,
|
|
1439
|
+
audit_output_dir: Annotated[Optional[Path], typer.Option(help="Optional audit output directory")] = None,
|
|
1440
|
+
bundle_dir: Annotated[Optional[Path], typer.Option(help="If set, write PDF + plots + audit into this folder")] = None,
|
|
1441
|
+
):
|
|
1442
|
+
"""Generate a clinical PDF report (and optional audit summary) from a results CSV."""
|
|
1443
|
+
console = Console()
|
|
1444
|
+
if not results_csv.is_file():
|
|
1445
|
+
console.print(f"[bold red]Error: Results file '{results_csv}' not found.[/bold red]")
|
|
1446
|
+
raise typer.Exit(code=1)
|
|
1447
|
+
|
|
1448
|
+
results_df = pd.read_csv(results_csv)
|
|
1449
|
+
safety_report: Dict[str, Any] = {}
|
|
1450
|
+
if safety_report_path:
|
|
1451
|
+
if not safety_report_path.is_file():
|
|
1452
|
+
console.print(f"[bold red]Error: Safety report file '{safety_report_path}' not found.[/bold red]")
|
|
1453
|
+
raise typer.Exit(code=1)
|
|
1454
|
+
safety_report = json.loads(safety_report_path.read_text())
|
|
1455
|
+
|
|
1456
|
+
if bundle_dir:
|
|
1457
|
+
bundle_dir.mkdir(parents=True, exist_ok=True)
|
|
1458
|
+
output_path = bundle_dir / "clinical_report.pdf"
|
|
1459
|
+
audit_output_dir = bundle_dir / "audit"
|
|
1460
|
+
plots_dir = bundle_dir / "plots"
|
|
1461
|
+
generator = iints.ClinicalReportGenerator()
|
|
1462
|
+
generator.export_plots(results_df, str(plots_dir))
|
|
1463
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
1464
|
+
iints.generate_report(results_df, str(output_path), safety_report)
|
|
1465
|
+
console.print(f"PDF report saved to: [link=file://{output_path}]{output_path}[/link]")
|
|
1466
|
+
|
|
1467
|
+
if audit_output_dir:
|
|
1468
|
+
audit_output_dir.mkdir(parents=True, exist_ok=True)
|
|
1469
|
+
audit_columns = [
|
|
1470
|
+
"time_minutes",
|
|
1471
|
+
"glucose_actual_mgdl",
|
|
1472
|
+
"glucose_to_algo_mgdl",
|
|
1473
|
+
"algo_recommended_insulin_units",
|
|
1474
|
+
"delivered_insulin_units",
|
|
1475
|
+
"safety_reason",
|
|
1476
|
+
"safety_triggered",
|
|
1477
|
+
"supervisor_latency_ms",
|
|
1478
|
+
]
|
|
1479
|
+
available = [c for c in audit_columns if c in results_df.columns]
|
|
1480
|
+
audit_df = results_df[available].copy()
|
|
1481
|
+
|
|
1482
|
+
jsonl_path = audit_output_dir / "audit_trail.jsonl"
|
|
1483
|
+
csv_path = audit_output_dir / "audit_trail.csv"
|
|
1484
|
+
summary_path = audit_output_dir / "audit_summary.json"
|
|
1485
|
+
|
|
1486
|
+
audit_df.to_json(jsonl_path, orient="records", lines=True)
|
|
1487
|
+
audit_df.to_csv(csv_path, index=False)
|
|
1488
|
+
|
|
1489
|
+
overrides = audit_df[audit_df.get("safety_triggered", False) == True] if "safety_triggered" in audit_df.columns else audit_df.iloc[0:0]
|
|
1490
|
+
reasons = overrides["safety_reason"].value_counts().to_dict() if "safety_reason" in overrides.columns else {}
|
|
1491
|
+
summary = {
|
|
1492
|
+
"total_steps": int(len(audit_df)),
|
|
1493
|
+
"total_overrides": int(len(overrides)),
|
|
1494
|
+
"top_reasons": reasons,
|
|
1495
|
+
}
|
|
1496
|
+
summary_path.write_text(json.dumps(summary, indent=2))
|
|
1497
|
+
console.print(f"Audit exports saved to: {audit_output_dir}")
|
|
1498
|
+
|
|
1499
|
+
|
|
1500
|
+
@app.command()
|
|
1501
|
+
def validate(
|
|
1502
|
+
scenario_path: Annotated[Path, typer.Option(help="Path to a scenario JSON file")],
|
|
1503
|
+
patient_config_path: Annotated[Optional[Path], typer.Option(help="Optional patient config YAML to validate")] = None,
|
|
1504
|
+
):
|
|
1505
|
+
"""Validate a scenario JSON for out-of-range values and missing keys."""
|
|
1506
|
+
console = Console()
|
|
1507
|
+
if not scenario_path.is_file():
|
|
1508
|
+
console.print(f"[bold red]Error: Scenario file '{scenario_path}' not found.[/bold red]")
|
|
1509
|
+
raise typer.Exit(code=1)
|
|
1510
|
+
|
|
1511
|
+
try:
|
|
1512
|
+
scenario = json.loads(scenario_path.read_text())
|
|
1513
|
+
scenario_model = validate_scenario_dict(scenario)
|
|
1514
|
+
for warning in scenario_warnings(scenario_model):
|
|
1515
|
+
console.print(f"[yellow]Warning:[/yellow] {warning}")
|
|
1516
|
+
except json.JSONDecodeError as e:
|
|
1517
|
+
console.print(f"[bold red]Error: Invalid JSON - {e}[/bold red]")
|
|
1518
|
+
raise typer.Exit(code=1)
|
|
1519
|
+
except ValidationError as e:
|
|
1520
|
+
console.print("[bold red]Scenario validation failed:[/bold red]")
|
|
1521
|
+
for line in format_validation_error(e):
|
|
1522
|
+
console.print(f"- {line}")
|
|
1523
|
+
raise typer.Exit(code=1)
|
|
1524
|
+
|
|
1525
|
+
if patient_config_path:
|
|
1526
|
+
if not patient_config_path.is_file():
|
|
1527
|
+
console.print(f"[bold red]Error: Patient config '{patient_config_path}' not found.[/bold red]")
|
|
1528
|
+
raise typer.Exit(code=1)
|
|
1529
|
+
try:
|
|
1530
|
+
patient_config = yaml.safe_load(patient_config_path.read_text())
|
|
1531
|
+
validate_patient_config_dict(patient_config)
|
|
1532
|
+
except yaml.YAMLError as e:
|
|
1533
|
+
console.print(f"[bold red]Error: Invalid YAML - {e}[/bold red]")
|
|
1534
|
+
raise typer.Exit(code=1)
|
|
1535
|
+
except ValidationError as e:
|
|
1536
|
+
console.print("[bold red]Patient config validation failed:[/bold red]")
|
|
1537
|
+
for line in format_validation_error(e):
|
|
1538
|
+
console.print(f"- {line}")
|
|
1539
|
+
raise typer.Exit(code=1)
|
|
1540
|
+
|
|
1541
|
+
console.print("[green]Scenario validation passed.[/green]")
|
|
1542
|
+
|
|
1543
|
+
|
|
1544
|
+
@data_app.command("list")
|
|
1545
|
+
def data_list():
|
|
1546
|
+
"""List official datasets and access requirements."""
|
|
1547
|
+
console = Console()
|
|
1548
|
+
datasets = load_dataset_registry()
|
|
1549
|
+
table = Table(title="IINTS-AF Official Datasets", show_header=True, header_style="bold cyan")
|
|
1550
|
+
table.add_column("ID", style="green")
|
|
1551
|
+
table.add_column("Name", style="white")
|
|
1552
|
+
table.add_column("Access", style="magenta")
|
|
1553
|
+
table.add_column("Source", style="yellow")
|
|
1554
|
+
for entry in datasets:
|
|
1555
|
+
table.add_row(
|
|
1556
|
+
entry.get("id", ""),
|
|
1557
|
+
entry.get("name", ""),
|
|
1558
|
+
entry.get("access", ""),
|
|
1559
|
+
entry.get("source", ""),
|
|
1560
|
+
)
|
|
1561
|
+
console.print(table)
|
|
1562
|
+
|
|
1563
|
+
|
|
1564
|
+
@data_app.command("info")
|
|
1565
|
+
def data_info(
|
|
1566
|
+
dataset_id: Annotated[str, typer.Argument(help="Dataset id (see `iints data list`)")],
|
|
1567
|
+
):
|
|
1568
|
+
"""Show metadata and access info for a dataset."""
|
|
1569
|
+
console = Console()
|
|
1570
|
+
try:
|
|
1571
|
+
dataset = get_dataset(dataset_id)
|
|
1572
|
+
except DatasetRegistryError as e:
|
|
1573
|
+
console.print(f"[bold red]{e}[/bold red]")
|
|
1574
|
+
raise typer.Exit(code=1)
|
|
1575
|
+
console.print_json(json.dumps(dataset, indent=2))
|
|
1576
|
+
citation = dataset.get("citation", {})
|
|
1577
|
+
if citation:
|
|
1578
|
+
text = citation.get("text")
|
|
1579
|
+
bibtex = citation.get("bibtex")
|
|
1580
|
+
if text:
|
|
1581
|
+
console.print("\n[bold]Citation (text)[/bold]")
|
|
1582
|
+
console.print(text)
|
|
1583
|
+
if bibtex:
|
|
1584
|
+
console.print("\n[bold]Citation (BibTeX)[/bold]")
|
|
1585
|
+
console.print(bibtex)
|
|
1586
|
+
|
|
1587
|
+
|
|
1588
|
+
@data_app.command("cite")
|
|
1589
|
+
def data_cite(
|
|
1590
|
+
dataset_id: Annotated[str, typer.Argument(help="Dataset id (see `iints data list`)")],
|
|
1591
|
+
):
|
|
1592
|
+
"""Print BibTeX citation for a dataset."""
|
|
1593
|
+
console = Console()
|
|
1594
|
+
try:
|
|
1595
|
+
dataset = get_dataset(dataset_id)
|
|
1596
|
+
except DatasetRegistryError as e:
|
|
1597
|
+
console.print(f"[bold red]{e}[/bold red]")
|
|
1598
|
+
raise typer.Exit(code=1)
|
|
1599
|
+
citation = dataset.get("citation", {})
|
|
1600
|
+
bibtex = citation.get("bibtex")
|
|
1601
|
+
text = citation.get("text")
|
|
1602
|
+
if bibtex:
|
|
1603
|
+
console.print(bibtex)
|
|
1604
|
+
return
|
|
1605
|
+
if text:
|
|
1606
|
+
console.print(text)
|
|
1607
|
+
return
|
|
1608
|
+
console.print("[yellow]No citation available for this dataset.[/yellow]")
|
|
1609
|
+
|
|
1610
|
+
|
|
1611
|
+
@data_app.command("fetch")
|
|
1612
|
+
def data_fetch(
|
|
1613
|
+
dataset_id: Annotated[str, typer.Argument(help="Dataset id (see `iints data list`)")],
|
|
1614
|
+
output_dir: Annotated[Optional[Path], typer.Option(help="Output directory (default: data_packs/official/<id>)")] = None,
|
|
1615
|
+
extract: Annotated[bool, typer.Option(help="Extract zip files if present")] = True,
|
|
1616
|
+
verify: Annotated[bool, typer.Option(help="Verify SHA-256 if available and emit SHA256SUMS.txt")] = True,
|
|
1617
|
+
):
|
|
1618
|
+
"""Download a dataset (public-download only)."""
|
|
1619
|
+
console = Console()
|
|
1620
|
+
try:
|
|
1621
|
+
dataset = get_dataset(dataset_id)
|
|
1622
|
+
except DatasetRegistryError as e:
|
|
1623
|
+
console.print(f"[bold red]{e}[/bold red]")
|
|
1624
|
+
raise typer.Exit(code=1)
|
|
1625
|
+
|
|
1626
|
+
if output_dir is None:
|
|
1627
|
+
output_dir = Path("data_packs") / "official" / dataset_id
|
|
1628
|
+
output_dir = output_dir.expanduser()
|
|
1629
|
+
if not output_dir.is_absolute():
|
|
1630
|
+
output_dir = (Path.cwd() / output_dir).resolve()
|
|
1631
|
+
else:
|
|
1632
|
+
output_dir = output_dir.resolve()
|
|
1633
|
+
|
|
1634
|
+
access = dataset.get("access", "manual")
|
|
1635
|
+
landing = dataset.get("landing_page", "")
|
|
1636
|
+
if access in {"request", "manual"}:
|
|
1637
|
+
console.print("[yellow]Manual download required for this dataset.[/yellow]")
|
|
1638
|
+
if landing:
|
|
1639
|
+
console.print(f"Source: {landing}")
|
|
1640
|
+
console.print("After downloading, place files in:")
|
|
1641
|
+
console.print(f" {output_dir}")
|
|
1642
|
+
return
|
|
1643
|
+
|
|
1644
|
+
try:
|
|
1645
|
+
downloaded = fetch_dataset(dataset_id, output_dir=output_dir, extract=extract, verify=verify)
|
|
1646
|
+
console.print(f"[green]Downloaded {len(downloaded)} file(s) to {output_dir}[/green]")
|
|
1647
|
+
except DatasetFetchError as e:
|
|
1648
|
+
console.print(f"[bold red]{e}[/bold red]")
|
|
1649
|
+
raise typer.Exit(code=1)
|
|
1650
|
+
|
|
1651
|
+
|
|
1652
|
+
# ---------------------------------------------------------------------------
|
|
1653
|
+
# P1-4: Research pipeline CLI commands
|
|
1654
|
+
# ---------------------------------------------------------------------------
|
|
1655
|
+
|
|
1656
|
+
research_app = typer.Typer(help="Research pipeline: dataset preparation and quality reporting.")
|
|
1657
|
+
app.add_typer(research_app, name="research")
|
|
1658
|
+
|
|
1659
|
+
|
|
1660
|
+
@research_app.command("prepare-azt1d")
|
|
1661
|
+
def research_prepare_azt1d(
|
|
1662
|
+
input_dir: Annotated[Path, typer.Option(help="Root directory containing AZT1D Subject folders")] = Path("data_packs/public/azt1d/AZT1D 2025/CGM Records"),
|
|
1663
|
+
output: Annotated[Path, typer.Option(help="Output dataset path (CSV or Parquet)")] = Path("data_packs/public/azt1d/processed/azt1d_merged.csv"),
|
|
1664
|
+
report: Annotated[Path, typer.Option(help="Quality report output path")] = Path("data_packs/public/azt1d/quality_report.json"),
|
|
1665
|
+
time_step: Annotated[int, typer.Option(help="Expected CGM sample interval (minutes)")] = 5,
|
|
1666
|
+
max_gap_multiplier: Annotated[float, typer.Option(help="Segment-break gap multiplier")] = 2.5,
|
|
1667
|
+
dia_minutes: Annotated[float, typer.Option(help="Insulin action duration (minutes)")] = 240.0,
|
|
1668
|
+
peak_minutes: Annotated[float, typer.Option(help="IOB peak time (minutes, OpenAPS bilinear)")] = 75.0,
|
|
1669
|
+
carb_absorb_minutes: Annotated[float, typer.Option(help="Carb absorption duration (minutes)")] = 120.0,
|
|
1670
|
+
max_basal: Annotated[float, typer.Option(help="Clip basal values above this (U/hr)")] = 20.0,
|
|
1671
|
+
max_bolus: Annotated[float, typer.Option(help="Clip bolus values above this (U)")] = 30.0,
|
|
1672
|
+
max_carbs: Annotated[float, typer.Option(help="Clip carb grams above this")] = 200.0,
|
|
1673
|
+
basal_is_rate: Annotated[bool, typer.Option(help="Treat Basal column as U/hr (convert to U/step)")] = True,
|
|
1674
|
+
):
|
|
1675
|
+
"""
|
|
1676
|
+
Prepare the AZT1D CGM dataset for LSTM predictor training.
|
|
1677
|
+
|
|
1678
|
+
Reads per-subject CSVs, applies basal-rate conversion (U/hr → U/step),
|
|
1679
|
+
derives IOB/COB using the OpenAPS bilinear model, adds time-of-day
|
|
1680
|
+
cyclical features, and writes the merged dataset plus a quality report.
|
|
1681
|
+
|
|
1682
|
+
Example
|
|
1683
|
+
-------
|
|
1684
|
+
iints research prepare-azt1d --input-dir data_packs/public/azt1d/... --output merged.parquet
|
|
1685
|
+
"""
|
|
1686
|
+
console = Console()
|
|
1687
|
+
if not input_dir.exists():
|
|
1688
|
+
console.print(f"[bold red]Input directory not found: {input_dir}[/bold red]")
|
|
1689
|
+
raise typer.Exit(code=1)
|
|
1690
|
+
|
|
1691
|
+
import subprocess, sys # noqa: E401
|
|
1692
|
+
cmd = [
|
|
1693
|
+
sys.executable,
|
|
1694
|
+
str(Path(__file__).parent.parent.parent.parent.parent / "research" / "prepare_azt1d.py"),
|
|
1695
|
+
"--input", str(input_dir),
|
|
1696
|
+
"--output", str(output),
|
|
1697
|
+
"--report", str(report),
|
|
1698
|
+
"--time-step", str(time_step),
|
|
1699
|
+
"--max-gap-multiplier", str(max_gap_multiplier),
|
|
1700
|
+
"--dia-minutes", str(dia_minutes),
|
|
1701
|
+
"--peak-minutes", str(peak_minutes),
|
|
1702
|
+
"--carb-absorb-minutes", str(carb_absorb_minutes),
|
|
1703
|
+
"--max-basal", str(max_basal),
|
|
1704
|
+
"--max-bolus", str(max_bolus),
|
|
1705
|
+
"--max-carbs", str(max_carbs),
|
|
1706
|
+
]
|
|
1707
|
+
if not basal_is_rate:
|
|
1708
|
+
cmd.append("--no-basal-is-rate")
|
|
1709
|
+
try:
|
|
1710
|
+
import importlib.util as _ilu
|
|
1711
|
+
# Prefer in-process execution when the research script is importable
|
|
1712
|
+
spec = _ilu.spec_from_file_location(
|
|
1713
|
+
"_prepare_azt1d",
|
|
1714
|
+
Path(__file__).parent.parent.parent.parent.parent / "research" / "prepare_azt1d.py",
|
|
1715
|
+
)
|
|
1716
|
+
if spec is not None and spec.loader is not None:
|
|
1717
|
+
import sys as _sys
|
|
1718
|
+
_old_argv = _sys.argv[:]
|
|
1719
|
+
_sys.argv = cmd[1:] # strip python interpreter
|
|
1720
|
+
try:
|
|
1721
|
+
mod = _ilu.module_from_spec(spec)
|
|
1722
|
+
spec.loader.exec_module(mod) # type: ignore[union-attr]
|
|
1723
|
+
mod.main() # type: ignore[attr-defined]
|
|
1724
|
+
finally:
|
|
1725
|
+
_sys.argv = _old_argv
|
|
1726
|
+
else:
|
|
1727
|
+
result = subprocess.run(cmd, check=True)
|
|
1728
|
+
except Exception as exc:
|
|
1729
|
+
console.print(f"[bold red]prepare-azt1d failed: {exc}[/bold red]")
|
|
1730
|
+
raise typer.Exit(code=1)
|
|
1731
|
+
|
|
1732
|
+
console.print(f"[green]Dataset written to:[/green] {output}")
|
|
1733
|
+
console.print(f"[green]Quality report :[/green] {report}")
|
|
1734
|
+
|
|
1735
|
+
|
|
1736
|
+
@research_app.command("prepare-ohio")
|
|
1737
|
+
def research_prepare_ohio(
|
|
1738
|
+
input_dir: Annotated[Path, typer.Option(help="Root directory containing OhioT1DM patient_* folders")] = Path("data_packs/public/ohio_t1dm"),
|
|
1739
|
+
output: Annotated[Path, typer.Option(help="Output dataset path (CSV or Parquet)")] = Path("data_packs/public/ohio_t1dm/processed/ohio_t1dm_merged.csv"),
|
|
1740
|
+
report: Annotated[Path, typer.Option(help="Quality report output path")] = Path("data_packs/public/ohio_t1dm/quality_report.json"),
|
|
1741
|
+
time_step: Annotated[int, typer.Option(help="Expected CGM sample interval (minutes)")] = 5,
|
|
1742
|
+
max_gap_multiplier: Annotated[float, typer.Option(help="Segment-break gap multiplier")] = 2.5,
|
|
1743
|
+
dia_minutes: Annotated[float, typer.Option(help="Insulin action duration (minutes)")] = 240.0,
|
|
1744
|
+
peak_minutes: Annotated[float, typer.Option(help="IOB peak time (minutes, OpenAPS bilinear)")] = 75.0,
|
|
1745
|
+
carb_absorb_minutes: Annotated[float, typer.Option(help="Carb absorption duration (minutes)")] = 120.0,
|
|
1746
|
+
max_insulin: Annotated[float, typer.Option(help="Clip insulin units above this")] = 30.0,
|
|
1747
|
+
max_carbs: Annotated[float, typer.Option(help="Clip carb grams above this")] = 200.0,
|
|
1748
|
+
icr_default: Annotated[float, typer.Option(help="Fallback ICR (g/U)")] = 10.0,
|
|
1749
|
+
isf_default: Annotated[float, typer.Option(help="Fallback ISF (mg/dL per U)")] = 50.0,
|
|
1750
|
+
basal_default: Annotated[float, typer.Option(help="Fallback basal rate (U/hr)")] = 0.0,
|
|
1751
|
+
meal_window_min: Annotated[float, typer.Option(help="Meal→insulin matching window (minutes)")] = 30.0,
|
|
1752
|
+
isf_window_min: Annotated[float, typer.Option(help="ISF estimation window (minutes)")] = 60.0,
|
|
1753
|
+
min_meal_carbs: Annotated[float, typer.Option(help="Minimum carbs to consider a meal (g)")] = 5.0,
|
|
1754
|
+
min_bolus: Annotated[float, typer.Option(help="Minimum insulin to consider a bolus (U)")] = 0.1,
|
|
1755
|
+
):
|
|
1756
|
+
"""
|
|
1757
|
+
Prepare the OhioT1DM dataset for LSTM predictor training.
|
|
1758
|
+
|
|
1759
|
+
Reads per-patient CSVs, derives IOB/COB using the OpenAPS bilinear model,
|
|
1760
|
+
estimates effective ISF/ICR/basal per subject, adds time-of-day features,
|
|
1761
|
+
and writes the merged dataset plus a quality report.
|
|
1762
|
+
|
|
1763
|
+
Example
|
|
1764
|
+
-------
|
|
1765
|
+
iints research prepare-ohio --input-dir data_packs/public/ohio_t1dm --output ohio.parquet
|
|
1766
|
+
"""
|
|
1767
|
+
console = Console()
|
|
1768
|
+
if not input_dir.exists():
|
|
1769
|
+
console.print(f"[bold red]Input directory not found: {input_dir}[/bold red]")
|
|
1770
|
+
raise typer.Exit(code=1)
|
|
1771
|
+
|
|
1772
|
+
import subprocess, sys # noqa: E401
|
|
1773
|
+
cmd = [
|
|
1774
|
+
sys.executable,
|
|
1775
|
+
str(Path(__file__).parent.parent.parent.parent.parent / "research" / "prepare_ohio_t1dm.py"),
|
|
1776
|
+
"--input", str(input_dir),
|
|
1777
|
+
"--output", str(output),
|
|
1778
|
+
"--report", str(report),
|
|
1779
|
+
"--time-step", str(time_step),
|
|
1780
|
+
"--max-gap-multiplier", str(max_gap_multiplier),
|
|
1781
|
+
"--dia-minutes", str(dia_minutes),
|
|
1782
|
+
"--peak-minutes", str(peak_minutes),
|
|
1783
|
+
"--carb-absorb-minutes", str(carb_absorb_minutes),
|
|
1784
|
+
"--max-insulin", str(max_insulin),
|
|
1785
|
+
"--max-carbs", str(max_carbs),
|
|
1786
|
+
"--icr-default", str(icr_default),
|
|
1787
|
+
"--isf-default", str(isf_default),
|
|
1788
|
+
"--basal-default", str(basal_default),
|
|
1789
|
+
"--meal-window-min", str(meal_window_min),
|
|
1790
|
+
"--isf-window-min", str(isf_window_min),
|
|
1791
|
+
"--min-meal-carbs", str(min_meal_carbs),
|
|
1792
|
+
"--min-bolus", str(min_bolus),
|
|
1793
|
+
]
|
|
1794
|
+
try:
|
|
1795
|
+
import importlib.util as _ilu
|
|
1796
|
+
spec = _ilu.spec_from_file_location(
|
|
1797
|
+
"_prepare_ohio_t1dm",
|
|
1798
|
+
Path(__file__).parent.parent.parent.parent.parent / "research" / "prepare_ohio_t1dm.py",
|
|
1799
|
+
)
|
|
1800
|
+
if spec is not None and spec.loader is not None:
|
|
1801
|
+
import sys as _sys
|
|
1802
|
+
_old_argv = _sys.argv[:]
|
|
1803
|
+
_sys.argv = cmd[1:]
|
|
1804
|
+
try:
|
|
1805
|
+
mod = _ilu.module_from_spec(spec)
|
|
1806
|
+
spec.loader.exec_module(mod) # type: ignore[union-attr]
|
|
1807
|
+
mod.main() # type: ignore[attr-defined]
|
|
1808
|
+
finally:
|
|
1809
|
+
_sys.argv = _old_argv
|
|
1810
|
+
else:
|
|
1811
|
+
subprocess.run(cmd, check=True)
|
|
1812
|
+
except Exception as exc:
|
|
1813
|
+
console.print(f"[bold red]prepare-ohio failed: {exc}[/bold red]")
|
|
1814
|
+
raise typer.Exit(code=1)
|
|
1815
|
+
|
|
1816
|
+
console.print(f"[green]Dataset written to:[/green] {output}")
|
|
1817
|
+
console.print(f"[green]Quality report :[/green] {report}")
|
|
1818
|
+
|
|
1819
|
+
|
|
1820
|
+
@research_app.command("prepare-hupa")
|
|
1821
|
+
def research_prepare_hupa(
|
|
1822
|
+
input_dir: Annotated[Path, typer.Option(help="Root directory containing HUPA-UCM CSV files")] = Path("data_packs/public/hupa_ucm"),
|
|
1823
|
+
output: Annotated[Path, typer.Option(help="Output dataset path (CSV or Parquet)")] = Path("data_packs/public/hupa_ucm/processed/hupa_ucm_merged.csv"),
|
|
1824
|
+
report: Annotated[Path, typer.Option(help="Quality report output path")] = Path("data_packs/public/hupa_ucm/quality_report.json"),
|
|
1825
|
+
time_step: Annotated[int, typer.Option(help="Expected CGM sample interval (minutes)")] = 5,
|
|
1826
|
+
max_gap_multiplier: Annotated[float, typer.Option(help="Segment-break gap multiplier")] = 2.5,
|
|
1827
|
+
dia_minutes: Annotated[float, typer.Option(help="Insulin action duration (minutes)")] = 240.0,
|
|
1828
|
+
peak_minutes: Annotated[float, typer.Option(help="IOB peak time (minutes, OpenAPS bilinear)")] = 75.0,
|
|
1829
|
+
carb_absorb_minutes: Annotated[float, typer.Option(help="Carb absorption duration (minutes)")] = 120.0,
|
|
1830
|
+
max_insulin: Annotated[float, typer.Option(help="Clip insulin units above this")] = 30.0,
|
|
1831
|
+
max_carbs: Annotated[float, typer.Option(help="Clip carb grams above this")] = 200.0,
|
|
1832
|
+
carb_serving_grams: Annotated[float, typer.Option(help="Carb serving size (g) for carb_input")] = 10.0,
|
|
1833
|
+
basal_is_rate: Annotated[bool, typer.Option(help="Treat basal_rate as U/hr (convert to U/step)")] = False,
|
|
1834
|
+
icr_default: Annotated[float, typer.Option(help="Fallback ICR (g/U)")] = 10.0,
|
|
1835
|
+
isf_default: Annotated[float, typer.Option(help="Fallback ISF (mg/dL per U)")] = 50.0,
|
|
1836
|
+
basal_default: Annotated[float, typer.Option(help="Fallback basal rate (U/hr)")] = 0.0,
|
|
1837
|
+
meal_window_min: Annotated[float, typer.Option(help="Meal→insulin matching window (minutes)")] = 30.0,
|
|
1838
|
+
isf_window_min: Annotated[float, typer.Option(help="ISF estimation window (minutes)")] = 60.0,
|
|
1839
|
+
min_meal_carbs: Annotated[float, typer.Option(help="Minimum carbs to consider a meal (g)")] = 5.0,
|
|
1840
|
+
min_bolus: Annotated[float, typer.Option(help="Minimum insulin to consider a bolus (U)")] = 0.1,
|
|
1841
|
+
):
|
|
1842
|
+
"""
|
|
1843
|
+
Prepare the HUPA-UCM dataset for LSTM predictor training.
|
|
1844
|
+
|
|
1845
|
+
Parses per-patient CSVs, derives IOB/COB, estimates ISF/ICR/basal per
|
|
1846
|
+
subject, and writes the merged dataset plus a quality report.
|
|
1847
|
+
|
|
1848
|
+
Example
|
|
1849
|
+
-------
|
|
1850
|
+
iints research prepare-hupa --input-dir data_packs/public/hupa_ucm --output hupa.parquet
|
|
1851
|
+
"""
|
|
1852
|
+
console = Console()
|
|
1853
|
+
if not input_dir.exists():
|
|
1854
|
+
console.print(f"[bold red]Input directory not found: {input_dir}[/bold red]")
|
|
1855
|
+
raise typer.Exit(code=1)
|
|
1856
|
+
|
|
1857
|
+
import subprocess, sys # noqa: E401
|
|
1858
|
+
cmd = [
|
|
1859
|
+
sys.executable,
|
|
1860
|
+
str(Path(__file__).parent.parent.parent.parent.parent / "research" / "prepare_hupa_ucm.py"),
|
|
1861
|
+
"--input", str(input_dir),
|
|
1862
|
+
"--output", str(output),
|
|
1863
|
+
"--report", str(report),
|
|
1864
|
+
"--time-step", str(time_step),
|
|
1865
|
+
"--max-gap-multiplier", str(max_gap_multiplier),
|
|
1866
|
+
"--dia-minutes", str(dia_minutes),
|
|
1867
|
+
"--peak-minutes", str(peak_minutes),
|
|
1868
|
+
"--carb-absorb-minutes", str(carb_absorb_minutes),
|
|
1869
|
+
"--max-insulin", str(max_insulin),
|
|
1870
|
+
"--max-carbs", str(max_carbs),
|
|
1871
|
+
"--carb-serving-grams", str(carb_serving_grams),
|
|
1872
|
+
"--icr-default", str(icr_default),
|
|
1873
|
+
"--isf-default", str(isf_default),
|
|
1874
|
+
"--basal-default", str(basal_default),
|
|
1875
|
+
"--meal-window-min", str(meal_window_min),
|
|
1876
|
+
"--isf-window-min", str(isf_window_min),
|
|
1877
|
+
"--min-meal-carbs", str(min_meal_carbs),
|
|
1878
|
+
"--min-bolus", str(min_bolus),
|
|
1879
|
+
]
|
|
1880
|
+
if basal_is_rate:
|
|
1881
|
+
cmd.append("--basal-is-rate")
|
|
1882
|
+
else:
|
|
1883
|
+
cmd.append("--no-basal-is-rate")
|
|
1884
|
+
try:
|
|
1885
|
+
import importlib.util as _ilu
|
|
1886
|
+
spec = _ilu.spec_from_file_location(
|
|
1887
|
+
"_prepare_hupa_ucm",
|
|
1888
|
+
Path(__file__).parent.parent.parent.parent.parent / "research" / "prepare_hupa_ucm.py",
|
|
1889
|
+
)
|
|
1890
|
+
if spec is not None and spec.loader is not None:
|
|
1891
|
+
import sys as _sys
|
|
1892
|
+
_old_argv = _sys.argv[:]
|
|
1893
|
+
_sys.argv = cmd[1:]
|
|
1894
|
+
try:
|
|
1895
|
+
mod = _ilu.module_from_spec(spec)
|
|
1896
|
+
spec.loader.exec_module(mod) # type: ignore[union-attr]
|
|
1897
|
+
mod.main() # type: ignore[attr-defined]
|
|
1898
|
+
finally:
|
|
1899
|
+
_sys.argv = _old_argv
|
|
1900
|
+
else:
|
|
1901
|
+
subprocess.run(cmd, check=True)
|
|
1902
|
+
except Exception as exc:
|
|
1903
|
+
console.print(f"[bold red]prepare-hupa failed: {exc}[/bold red]")
|
|
1904
|
+
raise typer.Exit(code=1)
|
|
1905
|
+
|
|
1906
|
+
console.print(f"[green]Dataset written to:[/green] {output}")
|
|
1907
|
+
console.print(f"[green]Quality report :[/green] {report}")
|
|
1908
|
+
|
|
1909
|
+
|
|
1910
|
+
@research_app.command("quality")
|
|
1911
|
+
def research_quality(
|
|
1912
|
+
report: Annotated[Path, typer.Option(help="Path to quality_report.json produced by prepare-azt1d")] = Path("data_packs/public/azt1d/quality_report.json"),
|
|
1913
|
+
):
|
|
1914
|
+
"""
|
|
1915
|
+
Display a quality report produced by `iints research prepare-azt1d`.
|
|
1916
|
+
|
|
1917
|
+
Shows dataset summary statistics, subject count, glucose range, and
|
|
1918
|
+
pipeline parameters in a formatted table.
|
|
1919
|
+
|
|
1920
|
+
Example
|
|
1921
|
+
-------
|
|
1922
|
+
iints research quality --report data_packs/public/azt1d/quality_report.json
|
|
1923
|
+
"""
|
|
1924
|
+
console = Console()
|
|
1925
|
+
if not report.exists():
|
|
1926
|
+
console.print(f"[bold red]Report not found: {report}[/bold red]")
|
|
1927
|
+
console.print("Run [bold]iints research prepare-azt1d[/bold] first.")
|
|
1928
|
+
raise typer.Exit(code=1)
|
|
1929
|
+
|
|
1930
|
+
try:
|
|
1931
|
+
data = json.loads(report.read_text())
|
|
1932
|
+
except Exception as exc:
|
|
1933
|
+
console.print(f"[bold red]Failed to parse report: {exc}[/bold red]")
|
|
1934
|
+
raise typer.Exit(code=1)
|
|
1935
|
+
|
|
1936
|
+
table = Table(title=f"AZT1D Dataset Quality Report", show_header=True, header_style="bold cyan")
|
|
1937
|
+
table.add_column("Field", style="green")
|
|
1938
|
+
table.add_column("Value", style="white")
|
|
1939
|
+
|
|
1940
|
+
display_keys = [
|
|
1941
|
+
("source", "Source directory"),
|
|
1942
|
+
("records_total", "Total records"),
|
|
1943
|
+
("subjects", "Subject count"),
|
|
1944
|
+
("subject_ids", "Subject IDs"),
|
|
1945
|
+
("start_time", "Start time"),
|
|
1946
|
+
("end_time", "End time"),
|
|
1947
|
+
("glucose_mean", "Glucose mean (mg/dL)"),
|
|
1948
|
+
("glucose_std", "Glucose std (mg/dL)"),
|
|
1949
|
+
("glucose_min", "Glucose min (mg/dL)"),
|
|
1950
|
+
("glucose_max", "Glucose max (mg/dL)"),
|
|
1951
|
+
("insulin_mean", "Insulin mean (U/step)"),
|
|
1952
|
+
("carb_mean", "Carb mean (g/step)"),
|
|
1953
|
+
("iob_model", "IOB model"),
|
|
1954
|
+
("basal_is_rate", "Basal as U/hr"),
|
|
1955
|
+
("time_step_minutes", "Time step (min)"),
|
|
1956
|
+
("dia_minutes", "DIA (min)"),
|
|
1957
|
+
("peak_minutes", "Peak time (min)"),
|
|
1958
|
+
("carb_absorb_minutes", "Carb absorb (min)"),
|
|
1959
|
+
]
|
|
1960
|
+
|
|
1961
|
+
for key, label in display_keys:
|
|
1962
|
+
if key in data:
|
|
1963
|
+
val = data[key]
|
|
1964
|
+
if isinstance(val, list):
|
|
1965
|
+
val = ", ".join(str(v) for v in val)
|
|
1966
|
+
elif isinstance(val, float):
|
|
1967
|
+
val = f"{val:.3f}"
|
|
1968
|
+
else:
|
|
1969
|
+
val = str(val)
|
|
1970
|
+
table.add_row(label, val)
|
|
1971
|
+
|
|
1972
|
+
console.print(table)
|
|
1973
|
+
|
|
1974
|
+
|
|
1975
|
+
@research_app.command("export-onnx")
|
|
1976
|
+
def research_export_onnx(
|
|
1977
|
+
model: Annotated[Path, typer.Option(help="Predictor checkpoint (.pt)")] = Path("models/hupa_finetuned_v2/predictor.pt"),
|
|
1978
|
+
out: Annotated[Path, typer.Option(help="Output ONNX file path")] = Path("models/predictor.onnx"),
|
|
1979
|
+
):
|
|
1980
|
+
"""
|
|
1981
|
+
Export a trained predictor to ONNX for edge/Jetson deployment.
|
|
1982
|
+
"""
|
|
1983
|
+
console = Console()
|
|
1984
|
+
if not model.exists():
|
|
1985
|
+
console.print(f"[bold red]Model not found: {model}[/bold red]")
|
|
1986
|
+
raise typer.Exit(code=1)
|
|
1987
|
+
|
|
1988
|
+
import subprocess, sys # noqa: E401
|
|
1989
|
+
cmd = [
|
|
1990
|
+
sys.executable,
|
|
1991
|
+
str(Path(__file__).parent.parent.parent.parent.parent / "research" / "export_predictor.py"),
|
|
1992
|
+
"--model", str(model),
|
|
1993
|
+
"--out", str(out),
|
|
1994
|
+
]
|
|
1995
|
+
try:
|
|
1996
|
+
subprocess.run(cmd, check=True)
|
|
1997
|
+
except Exception as exc:
|
|
1998
|
+
console.print(f"[bold red]export-onnx failed: {exc}[/bold red]")
|
|
1999
|
+
raise typer.Exit(code=1)
|
|
2000
|
+
|
|
2001
|
+
console.print(f"[green]ONNX written to:[/green] {out}")
|
|
2002
|
+
|
|
2003
|
+
|
|
2004
|
+
@app.command("import-data")
|
|
2005
|
+
def import_data(
|
|
2006
|
+
input_csv: Annotated[Path, typer.Option(help="Path to CGM CSV file")],
|
|
2007
|
+
output_dir: Annotated[Path, typer.Option(help="Output directory for scenario + standard CSV")] = Path("./results/imported"),
|
|
2008
|
+
data_format: Annotated[str, typer.Option(help="Data format preset: generic, dexcom, libre")] = "generic",
|
|
2009
|
+
scenario_name: Annotated[str, typer.Option(help="Scenario name")] = "Imported CGM Scenario",
|
|
2010
|
+
scenario_version: Annotated[str, typer.Option(help="Scenario version")] = "1.0",
|
|
2011
|
+
time_unit: Annotated[str, typer.Option(help="Timestamp unit: minutes or seconds")] = "minutes",
|
|
2012
|
+
carb_threshold: Annotated[float, typer.Option(help="Minimum carbs (g) to create a meal event")] = 0.1,
|
|
2013
|
+
scenario_path: Annotated[Optional[Path], typer.Option(help="Optional output scenario path")] = None,
|
|
2014
|
+
data_path: Annotated[Optional[Path], typer.Option(help="Optional output standard CSV path")] = None,
|
|
2015
|
+
mapping: Annotated[List[str], typer.Option("--map", help="Column mapping key=value (e.g., timestamp=Time, glucose=SGV)")] = [],
|
|
2016
|
+
):
|
|
2017
|
+
"""Import real-world CGM CSV into the IINTS standard schema + scenario JSON."""
|
|
2018
|
+
console = Console()
|
|
2019
|
+
if not input_csv.is_file():
|
|
2020
|
+
console.print(f"[bold red]Error: Input CSV '{input_csv}' not found.[/bold red]")
|
|
2021
|
+
raise typer.Exit(code=1)
|
|
2022
|
+
|
|
2023
|
+
column_map = _parse_column_mapping(mapping, console)
|
|
2024
|
+
|
|
2025
|
+
try:
|
|
2026
|
+
result = scenario_from_csv(
|
|
2027
|
+
input_csv,
|
|
2028
|
+
scenario_name=scenario_name,
|
|
2029
|
+
scenario_version=scenario_version,
|
|
2030
|
+
data_format=data_format,
|
|
2031
|
+
column_map=column_map or None,
|
|
2032
|
+
time_unit=time_unit,
|
|
2033
|
+
carb_threshold=carb_threshold,
|
|
2034
|
+
)
|
|
2035
|
+
except Exception as e:
|
|
2036
|
+
console.print(f"[bold red]Import failed: {e}[/bold red]")
|
|
2037
|
+
raise typer.Exit(code=1)
|
|
2038
|
+
|
|
2039
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
2040
|
+
scenario_path = scenario_path or output_dir / "scenario.json"
|
|
2041
|
+
data_path = data_path or output_dir / "cgm_standard.csv"
|
|
2042
|
+
|
|
2043
|
+
scenario_path.write_text(json.dumps(result.scenario, indent=2))
|
|
2044
|
+
export_standard_csv(result.dataframe, data_path)
|
|
2045
|
+
|
|
2046
|
+
console.print(f"[green]Scenario saved:[/green] {scenario_path}")
|
|
2047
|
+
console.print(f"[green]Standard CSV saved:[/green] {data_path}")
|
|
2048
|
+
console.print(f"Rows: {len(result.dataframe)} | Meal events: {len(result.scenario.get('stress_events', []))}")
|
|
2049
|
+
|
|
2050
|
+
|
|
2051
|
+
@app.command("import-wizard")
|
|
2052
|
+
def import_wizard():
|
|
2053
|
+
"""Interactive wizard to import real-world CGM CSVs."""
|
|
2054
|
+
console = Console()
|
|
2055
|
+
input_csv = Path(typer.prompt("Path to CGM CSV"))
|
|
2056
|
+
if not input_csv.is_file():
|
|
2057
|
+
console.print(f"[bold red]Error: Input CSV '{input_csv}' not found.[/bold red]")
|
|
2058
|
+
raise typer.Exit(code=1)
|
|
2059
|
+
|
|
2060
|
+
data_format = typer.prompt("Data format (generic/dexcom/libre)", default="generic")
|
|
2061
|
+
|
|
2062
|
+
header = pd.read_csv(input_csv, nrows=1)
|
|
2063
|
+
columns = list(header.columns)
|
|
2064
|
+
guesses = guess_column_mapping(columns, data_format=data_format)
|
|
2065
|
+
|
|
2066
|
+
console.print(f"[bold]Detected columns:[/bold] {', '.join(columns)}")
|
|
2067
|
+
try:
|
|
2068
|
+
preview = pd.read_csv(input_csv, nrows=5)
|
|
2069
|
+
console.print(Panel(preview.to_string(index=False), title="Preview (first 5 rows)"))
|
|
2070
|
+
except Exception as exc:
|
|
2071
|
+
console.print(f"[yellow]Preview unavailable:[/yellow] {exc}")
|
|
2072
|
+
|
|
2073
|
+
ts_col = typer.prompt("Timestamp column", default=guesses.get("timestamp") or columns[0])
|
|
2074
|
+
glucose_col = typer.prompt("Glucose column", default=guesses.get("glucose") or "")
|
|
2075
|
+
carbs_col = typer.prompt("Carbs column (optional)", default=guesses.get("carbs") or "")
|
|
2076
|
+
insulin_col = typer.prompt("Insulin column (optional)", default=guesses.get("insulin") or "")
|
|
2077
|
+
|
|
2078
|
+
mapping = {
|
|
2079
|
+
"timestamp": ts_col.strip(),
|
|
2080
|
+
"glucose": glucose_col.strip(),
|
|
2081
|
+
"carbs": carbs_col.strip(),
|
|
2082
|
+
"insulin": insulin_col.strip(),
|
|
2083
|
+
}
|
|
2084
|
+
mapping = {k: v for k, v in mapping.items() if v}
|
|
2085
|
+
|
|
2086
|
+
time_unit = typer.prompt("Timestamp unit (minutes/seconds)", default="minutes")
|
|
2087
|
+
scenario_name = typer.prompt("Scenario name", default="Imported CGM Scenario")
|
|
2088
|
+
output_dir = Path(typer.prompt("Output directory", default="results/imported"))
|
|
2089
|
+
|
|
2090
|
+
try:
|
|
2091
|
+
result = scenario_from_csv(
|
|
2092
|
+
input_csv,
|
|
2093
|
+
scenario_name=scenario_name,
|
|
2094
|
+
data_format=data_format,
|
|
2095
|
+
column_map=mapping or None,
|
|
2096
|
+
time_unit=time_unit,
|
|
2097
|
+
)
|
|
2098
|
+
except Exception as exc:
|
|
2099
|
+
console.print(f"[bold red]Import failed: {exc}[/bold red]")
|
|
2100
|
+
raise typer.Exit(code=1)
|
|
2101
|
+
|
|
2102
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
2103
|
+
scenario_path = output_dir / "scenario.json"
|
|
2104
|
+
data_path = output_dir / "cgm_standard.csv"
|
|
2105
|
+
|
|
2106
|
+
scenario_path.write_text(json.dumps(result.scenario, indent=2))
|
|
2107
|
+
export_standard_csv(result.dataframe, data_path)
|
|
2108
|
+
|
|
2109
|
+
console.print(f"[green]Scenario saved:[/green] {scenario_path}")
|
|
2110
|
+
console.print(f"[green]Standard CSV saved:[/green] {data_path}")
|
|
2111
|
+
console.print(f"Rows: {len(result.dataframe)} | Meal events: {len(result.scenario.get('stress_events', []))}")
|
|
2112
|
+
|
|
2113
|
+
|
|
2114
|
+
@app.command("import-demo")
|
|
2115
|
+
def import_demo(
|
|
2116
|
+
output_dir: Annotated[Path, typer.Option(help="Output directory for scenario + CSV")] = Path("./results/demo_import"),
|
|
2117
|
+
scenario_name: Annotated[str, typer.Option(help="Scenario name")] = "Demo CGM Scenario",
|
|
2118
|
+
export_raw: Annotated[bool, typer.Option(help="Export the raw demo CSV into output dir")] = True,
|
|
2119
|
+
):
|
|
2120
|
+
"""Generate a ready-to-run scenario from the bundled demo CGM data pack."""
|
|
2121
|
+
console = Console()
|
|
2122
|
+
demo_df = load_demo_dataframe()
|
|
2123
|
+
standard_df = import_cgm_dataframe(demo_df, data_format="generic", source="demo")
|
|
2124
|
+
scenario = scenario_from_dataframe(standard_df, scenario_name=scenario_name)
|
|
2125
|
+
|
|
2126
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
2127
|
+
scenario_path = output_dir / "scenario.json"
|
|
2128
|
+
data_path = output_dir / "cgm_standard.csv"
|
|
2129
|
+
|
|
2130
|
+
scenario_path.write_text(json.dumps(scenario, indent=2))
|
|
2131
|
+
export_standard_csv(standard_df, data_path)
|
|
2132
|
+
if export_raw:
|
|
2133
|
+
export_demo_csv(output_dir / "demo_cgm.csv")
|
|
2134
|
+
|
|
2135
|
+
console.print(f"[green]Scenario saved:[/green] {scenario_path}")
|
|
2136
|
+
console.print(f"[green]Standard CSV saved:[/green] {data_path}")
|
|
2137
|
+
if export_raw:
|
|
2138
|
+
console.print(f"[green]Raw demo CSV saved:[/green] {output_dir / 'demo_cgm.csv'}")
|
|
2139
|
+
|
|
2140
|
+
|
|
2141
|
+
@app.command("import-nightscout")
|
|
2142
|
+
def import_nightscout_cmd(
|
|
2143
|
+
url: Annotated[str, typer.Option(help="Nightscout base URL")],
|
|
2144
|
+
output_dir: Annotated[Path, typer.Option(help="Output directory for scenario + CSV")] = Path("./results/nightscout_import"),
|
|
2145
|
+
api_secret: Annotated[Optional[str], typer.Option(help="API secret (if required)")] = None,
|
|
2146
|
+
token: Annotated[Optional[str], typer.Option(help="API token (if required)")] = None,
|
|
2147
|
+
start: Annotated[Optional[str], typer.Option(help="Start time (ISO string)")] = None,
|
|
2148
|
+
end: Annotated[Optional[str], typer.Option(help="End time (ISO string)")] = None,
|
|
2149
|
+
limit: Annotated[Optional[int], typer.Option(help="Limit number of entries")] = None,
|
|
2150
|
+
scenario_name: Annotated[str, typer.Option(help="Scenario name")] = "Nightscout Import",
|
|
2151
|
+
):
|
|
2152
|
+
"""Import CGM entries from Nightscout into a scenario + standard CSV."""
|
|
2153
|
+
console = Console()
|
|
2154
|
+
config = NightscoutConfig(
|
|
2155
|
+
url=url,
|
|
2156
|
+
api_secret=api_secret,
|
|
2157
|
+
token=token,
|
|
2158
|
+
start=start,
|
|
2159
|
+
end=end,
|
|
2160
|
+
limit=limit,
|
|
2161
|
+
)
|
|
2162
|
+
try:
|
|
2163
|
+
result = import_nightscout(config, scenario_name=scenario_name)
|
|
2164
|
+
except ImportError as exc:
|
|
2165
|
+
console.print(f"[bold red]{exc}[/bold red]")
|
|
2166
|
+
raise typer.Exit(code=1)
|
|
2167
|
+
except Exception as exc:
|
|
2168
|
+
console.print(f"[bold red]Nightscout import failed: {exc}[/bold red]")
|
|
2169
|
+
raise typer.Exit(code=1)
|
|
2170
|
+
|
|
2171
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
2172
|
+
scenario_path = output_dir / "scenario.json"
|
|
2173
|
+
data_path = output_dir / "cgm_standard.csv"
|
|
2174
|
+
scenario_path.write_text(json.dumps(result.scenario, indent=2))
|
|
2175
|
+
export_standard_csv(result.dataframe, data_path)
|
|
2176
|
+
console.print(f"[green]Scenario saved:[/green] {scenario_path}")
|
|
2177
|
+
console.print(f"[green]Standard CSV saved:[/green] {data_path}")
|
|
2178
|
+
|
|
2179
|
+
|
|
2180
|
+
@app.command("import-tidepool")
|
|
2181
|
+
def import_tidepool_cmd(
|
|
2182
|
+
base_url: Annotated[str, typer.Option(help="Tidepool API base URL")] = "https://api.tidepool.org",
|
|
2183
|
+
token: Annotated[Optional[str], typer.Option(help="Bearer token")] = None,
|
|
2184
|
+
):
|
|
2185
|
+
"""Skeleton Tidepool client for future cloud imports."""
|
|
2186
|
+
console = Console()
|
|
2187
|
+
client = TidepoolClient(base_url=base_url, token=token)
|
|
2188
|
+
try:
|
|
2189
|
+
_ = client._headers()
|
|
2190
|
+
except Exception as exc:
|
|
2191
|
+
console.print(f"[bold red]{exc}[/bold red]")
|
|
2192
|
+
raise typer.Exit(code=1)
|
|
2193
|
+
console.print("[yellow]Tidepool client skeleton is initialized. Auth flow and endpoints are TODO.[/yellow]")
|
|
2194
|
+
|
|
2195
|
+
@app.command("check-deps")
|
|
2196
|
+
def check_deps():
|
|
2197
|
+
"""Check optional dependencies and report readiness."""
|
|
2198
|
+
console = Console()
|
|
2199
|
+
|
|
2200
|
+
def has(module: str) -> bool:
|
|
2201
|
+
return importlib.util.find_spec(module) is not None
|
|
2202
|
+
|
|
2203
|
+
checks = [
|
|
2204
|
+
("Simulator", True, "Core simulation engine"),
|
|
2205
|
+
("Metrics", has("numpy") and has("pandas"), "Clinical metrics"),
|
|
2206
|
+
("Reporting", has("matplotlib") and has("fpdf"), "PDF reports"),
|
|
2207
|
+
("Validation", has("pydantic"), "Schema validation"),
|
|
2208
|
+
("Deep Learning", has("torch"), "AI models (install with `pip install iints[torch]`)"),
|
|
2209
|
+
]
|
|
2210
|
+
|
|
2211
|
+
table = Table(title="IINTS Dependency Check", show_lines=False)
|
|
2212
|
+
table.add_column("Component", style="cyan")
|
|
2213
|
+
table.add_column("Status", style="green")
|
|
2214
|
+
table.add_column("Notes")
|
|
2215
|
+
|
|
2216
|
+
for name, ok, notes in checks:
|
|
2217
|
+
status = "OK" if ok else "Missing"
|
|
2218
|
+
table.add_row(name, status, notes)
|
|
2219
|
+
|
|
2220
|
+
console.print(table)
|
|
2221
|
+
|
|
2222
|
+
|
|
2223
|
+
@algorithms_app.command("list")
|
|
2224
|
+
def algorithms_list():
|
|
2225
|
+
"""List available algorithm plugins and built-ins."""
|
|
2226
|
+
console = Console()
|
|
2227
|
+
entries = list_algorithm_plugins()
|
|
2228
|
+
table = Table(title="IINTS Algorithms", show_header=True, header_style="bold cyan")
|
|
2229
|
+
table.add_column("Name", style="green")
|
|
2230
|
+
table.add_column("Source", style="magenta")
|
|
2231
|
+
table.add_column("Status", style="yellow")
|
|
2232
|
+
table.add_column("Class", style="white")
|
|
2233
|
+
for entry in entries:
|
|
2234
|
+
table.add_row(
|
|
2235
|
+
entry.name,
|
|
2236
|
+
entry.source,
|
|
2237
|
+
entry.status,
|
|
2238
|
+
entry.class_path,
|
|
2239
|
+
)
|
|
2240
|
+
console.print(table)
|
|
2241
|
+
|
|
2242
|
+
|
|
2243
|
+
@algorithms_app.command("info")
|
|
2244
|
+
def algorithms_info(
|
|
2245
|
+
name: Annotated[str, typer.Argument(help="Algorithm display name")],
|
|
2246
|
+
):
|
|
2247
|
+
"""Show metadata for a specific algorithm."""
|
|
2248
|
+
console = Console()
|
|
2249
|
+
entries = list_algorithm_plugins()
|
|
2250
|
+
matches = [entry for entry in entries if entry.name.lower() == name.lower()]
|
|
2251
|
+
if not matches:
|
|
2252
|
+
console.print(f"[bold red]Algorithm '{name}' not found.[/bold red]")
|
|
2253
|
+
raise typer.Exit(code=1)
|
|
2254
|
+
entry = matches[0]
|
|
2255
|
+
console.print(f"[bold]Name:[/bold] {entry.name}")
|
|
2256
|
+
console.print(f"[bold]Source:[/bold] {entry.source}")
|
|
2257
|
+
console.print(f"[bold]Status:[/bold] {entry.status}")
|
|
2258
|
+
console.print(f"[bold]Class:[/bold] {entry.class_path}")
|
|
2259
|
+
if entry.error:
|
|
2260
|
+
console.print(f"[bold red]Error:[/bold red] {entry.error}")
|
|
2261
|
+
if entry.metadata:
|
|
2262
|
+
console.print_json(json.dumps(entry.metadata.to_dict(), indent=2))
|
|
2263
|
+
@docs_app.command("algo")
|
|
2264
|
+
def docs_algo(
|
|
2265
|
+
algo_path: Annotated[Path, typer.Option(help="Path to the algorithm Python file to document")],
|
|
2266
|
+
):
|
|
2267
|
+
"""
|
|
2268
|
+
Generates a technical summary (auto-documentation) for a specified InsulinAlgorithm.
|
|
2269
|
+
"""
|
|
2270
|
+
console = Console()
|
|
2271
|
+
console.print(f"[bold blue]Generating Auto-Documentation for Algorithm: {algo_path.name}[/bold blue]")
|
|
2272
|
+
|
|
2273
|
+
if not algo_path.is_file():
|
|
2274
|
+
console.print(f"[bold red]Error: Algorithm file '{algo_path}' not found.[/bold red]")
|
|
2275
|
+
raise typer.Exit(code=1)
|
|
2276
|
+
|
|
2277
|
+
# Load Algorithm dynamically
|
|
2278
|
+
algorithm_instance = None
|
|
2279
|
+
module_name = algo_path.stem
|
|
2280
|
+
spec = importlib.util.spec_from_file_location(module_name, algo_path)
|
|
2281
|
+
if spec is None:
|
|
2282
|
+
console.print(f"[bold red]Error: Could not load module spec for {algo_path}[/bold red]")
|
|
2283
|
+
raise typer.Exit(code=1)
|
|
2284
|
+
|
|
2285
|
+
module = importlib.util.module_from_spec(spec)
|
|
2286
|
+
module.iints = iints # type: ignore # Inject iints package
|
|
2287
|
+
sys.modules[module_name] = module
|
|
2288
|
+
try:
|
|
2289
|
+
if spec.loader: # Ensure loader is not None
|
|
2290
|
+
spec.loader.exec_module(module)
|
|
2291
|
+
else:
|
|
2292
|
+
raise ImportError(f"Could not load module loader for {algo_path}")
|
|
2293
|
+
except Exception as e:
|
|
2294
|
+
console.print(f"[bold red]Error loading algorithm module {algo_path}: {e}[/bold red]")
|
|
2295
|
+
raise typer.Exit(code=1)
|
|
2296
|
+
|
|
2297
|
+
algorithm_class = None
|
|
2298
|
+
for name_in_module, obj in module.__dict__.items():
|
|
2299
|
+
if isinstance(obj, type) and issubclass(obj, iints.InsulinAlgorithm) and obj is not iints.InsulinAlgorithm:
|
|
2300
|
+
algorithm_class = obj
|
|
2301
|
+
algorithm_instance = obj() # Instantiate to get metadata
|
|
2302
|
+
break
|
|
2303
|
+
|
|
2304
|
+
if algorithm_instance is None:
|
|
2305
|
+
console.print(f"[bold red]Error: No subclass of InsulinAlgorithm found in {algo_path}[/bold red]")
|
|
2306
|
+
raise typer.Exit(code=1)
|
|
2307
|
+
|
|
2308
|
+
# Extract Metadata
|
|
2309
|
+
metadata = algorithm_instance.get_algorithm_metadata()
|
|
2310
|
+
|
|
2311
|
+
# Ensure algorithm_class is not None (it shouldn't be if algorithm_instance is not None)
|
|
2312
|
+
assert algorithm_class is not None
|
|
2313
|
+
|
|
2314
|
+
# Extract class docstring
|
|
2315
|
+
class_doc = algorithm_class.__doc__ if algorithm_class.__doc__ else "No class docstring available."
|
|
2316
|
+
|
|
2317
|
+
# Extract predict_insulin docstring
|
|
2318
|
+
predict_insulin_doc = algorithm_class.predict_insulin.__doc__ if algorithm_class.predict_insulin.__doc__ else "No docstring for predict_insulin method."
|
|
2319
|
+
|
|
2320
|
+
console.print(Panel(
|
|
2321
|
+
f"[bold blue]Algorithm Overview[/bold blue]\n\n"
|
|
2322
|
+
f"[green]Name:[/green] {metadata.name}\n"
|
|
2323
|
+
f"[green]Version:[/green] {metadata.version}\n"
|
|
2324
|
+
f"[green]Author:[/green] {metadata.author}\n"
|
|
2325
|
+
f"[green]Type:[/green] {metadata.algorithm_type}\n"
|
|
2326
|
+
f"[green]Description:[/green] {metadata.description}\n"
|
|
2327
|
+
f"[green]Requires Training:[/green] {metadata.requires_training}\n"
|
|
2328
|
+
f"[green]Supported Scenarios:[/green] {', '.join(metadata.supported_scenarios)}\n\n"
|
|
2329
|
+
f"[bold blue]Class Documentation[/bold blue]\n"
|
|
2330
|
+
f"{class_doc}\n\n"
|
|
2331
|
+
f"[bold blue]predict_insulin Method Documentation[/bold blue]\n"
|
|
2332
|
+
f"{predict_insulin_doc}",
|
|
2333
|
+
title=f"Auto-Doc: {metadata.name}",
|
|
2334
|
+
border_style="blue"
|
|
2335
|
+
))
|
|
2336
|
+
|
|
2337
|
+
@app.command()
|
|
2338
|
+
def benchmark(
|
|
2339
|
+
algo_to_benchmark: Annotated[Path, typer.Option(help="Path to the AI algorithm Python file to benchmark")],
|
|
2340
|
+
# standard_pump_config: Annotated[str, typer.Option(help="Name of the standard pump patient config (e.g., 'default')")] = "default", # This will be loaded implicitly for the standard pump
|
|
2341
|
+
patient_configs_dir: Annotated[Path, typer.Option(help="Directory containing patient configuration YAML files")] = Path("src/iints/data/virtual_patients"),
|
|
2342
|
+
scenarios_dir: Annotated[Path, typer.Option(help="Directory containing scenario JSON files")] = Path("scenarios"),
|
|
2343
|
+
duration: Annotated[int, typer.Option(help="Simulation duration in minutes for each run")] = 720, # 12 hours
|
|
2344
|
+
time_step: Annotated[int, typer.Option(help="Simulation time step in minutes")] = 5,
|
|
2345
|
+
output_dir: Annotated[Optional[Path], typer.Option(help="Directory to save all benchmark results")] = None,
|
|
2346
|
+
seed: Annotated[Optional[int], typer.Option(help="Base seed for deterministic runs")] = None,
|
|
2347
|
+
):
|
|
2348
|
+
"""
|
|
2349
|
+
Runs a series of simulations to benchmark an AI algorithm against a standard pump
|
|
2350
|
+
across multiple patient configurations and scenarios.
|
|
2351
|
+
"""
|
|
2352
|
+
console = Console()
|
|
2353
|
+
resolved_seed = resolve_seed(seed)
|
|
2354
|
+
run_id = generate_run_id(resolved_seed)
|
|
2355
|
+
output_dir = resolve_output_dir(output_dir, run_id)
|
|
2356
|
+
console.print(f"[bold blue]Starting IINTS-AF Benchmark Suite[/bold blue]")
|
|
2357
|
+
console.print(f"AI Algorithm: [green]{algo_to_benchmark.name}[/green]")
|
|
2358
|
+
# console.print(f"Standard Pump Config: [yellow]{standard_pump_config}[/yellow]") # Removed, as standard pump uses patient params
|
|
2359
|
+
console.print(f"Patient Configs from: [cyan]{patient_configs_dir}[/cyan]")
|
|
2360
|
+
console.print(f"Scenarios from: [magenta]{scenarios_dir}[/magenta]")
|
|
2361
|
+
console.print(f"Duration: {duration} min, Time Step: {time_step} min")
|
|
2362
|
+
console.print(f"Run ID: {run_id}")
|
|
2363
|
+
console.print(f"Output directory: {output_dir}")
|
|
2364
|
+
|
|
2365
|
+
if not algo_to_benchmark.is_file():
|
|
2366
|
+
console.print(f"[bold red]Error: AI Algorithm file '{algo_to_benchmark}' not found.[/bold red]")
|
|
2367
|
+
raise typer.Exit(code=1)
|
|
2368
|
+
if not patient_configs_dir.is_dir():
|
|
2369
|
+
console.print(f"[bold red]Error: Patient configurations directory '{patient_configs_dir}' not found.[/bold red]")
|
|
2370
|
+
raise typer.Exit(code=1)
|
|
2371
|
+
if not scenarios_dir.is_dir():
|
|
2372
|
+
console.print(f"[bold red]Error: Scenarios directory '{scenarios_dir}' not found.[/bold red]")
|
|
2373
|
+
raise typer.Exit(code=1)
|
|
2374
|
+
|
|
2375
|
+
# Ensure output directory exists
|
|
2376
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
2377
|
+
|
|
2378
|
+
config_payload = {
|
|
2379
|
+
"run_type": "benchmark",
|
|
2380
|
+
"run_id": run_id,
|
|
2381
|
+
"algorithm_path": str(algo_to_benchmark),
|
|
2382
|
+
"patient_configs_dir": str(patient_configs_dir),
|
|
2383
|
+
"scenarios_dir": str(scenarios_dir),
|
|
2384
|
+
"duration_minutes": duration,
|
|
2385
|
+
"time_step_minutes": time_step,
|
|
2386
|
+
"seed": resolved_seed,
|
|
2387
|
+
}
|
|
2388
|
+
config_path = output_dir / "config.json"
|
|
2389
|
+
write_json(config_path, config_payload)
|
|
2390
|
+
run_metadata = build_run_metadata(run_id, resolved_seed, config_payload, output_dir)
|
|
2391
|
+
run_metadata_path = output_dir / "run_metadata.json"
|
|
2392
|
+
write_json(run_metadata_path, run_metadata)
|
|
2393
|
+
|
|
2394
|
+
# Load AI Algorithm
|
|
2395
|
+
ai_algo_instance = None
|
|
2396
|
+
module_name_ai = algo_to_benchmark.stem
|
|
2397
|
+
spec_ai = importlib.util.spec_from_file_location(module_name_ai, algo_to_benchmark)
|
|
2398
|
+
if spec_ai is None:
|
|
2399
|
+
console.print(f"[bold red]Error: Could not load module spec for AI algorithm {algo_to_benchmark}[/bold red]")
|
|
2400
|
+
raise typer.Exit(code=1)
|
|
2401
|
+
module_ai = importlib.util.module_from_spec(spec_ai)
|
|
2402
|
+
module_ai.iints = iints # type: ignore # Inject iints package
|
|
2403
|
+
sys.modules[module_name_ai] = module_ai
|
|
2404
|
+
try:
|
|
2405
|
+
if spec_ai.loader: # Ensure loader is not None
|
|
2406
|
+
spec_ai.loader.exec_module(module_ai)
|
|
2407
|
+
else:
|
|
2408
|
+
raise ImportError(f"Could not load module loader for AI algorithm {algo_to_benchmark}")
|
|
2409
|
+
except Exception as e:
|
|
2410
|
+
console.print(f"[bold red]Error loading AI algorithm module {algo_to_benchmark}: {e}[/bold red]")
|
|
2411
|
+
raise typer.Exit(code=1)
|
|
2412
|
+
|
|
2413
|
+
for name_in_module, obj in module_ai.__dict__.items():
|
|
2414
|
+
if isinstance(obj, type) and issubclass(obj, iints.InsulinAlgorithm) and obj is not iints.InsulinAlgorithm:
|
|
2415
|
+
ai_algo_instance = obj()
|
|
2416
|
+
break
|
|
2417
|
+
if ai_algo_instance is None:
|
|
2418
|
+
console.print(f"[bold red]Error: No subclass of InsulinAlgorithm found in AI algorithm {algo_to_benchmark}[/bold red]")
|
|
2419
|
+
raise typer.Exit(code=1)
|
|
2420
|
+
console.print(f"Loaded AI Algorithm: [green]{ai_algo_instance.get_algorithm_metadata().name}[/green]")
|
|
2421
|
+
|
|
2422
|
+
# Get compute device
|
|
2423
|
+
device_manager = iints.DeviceManager()
|
|
2424
|
+
device = device_manager.get_device()
|
|
2425
|
+
console.print(f"Using compute device: [blue]{device}[/blue]")
|
|
2426
|
+
|
|
2427
|
+
# Collect patient configurations and scenarios
|
|
2428
|
+
patient_config_files = list(patient_configs_dir.glob("*.yaml"))
|
|
2429
|
+
scenario_files = list(scenarios_dir.glob("*.json"))
|
|
2430
|
+
|
|
2431
|
+
if not patient_config_files:
|
|
2432
|
+
console.print(f"[bold red]Error: No patient configuration files found in '{patient_configs_dir}'[/bold red]")
|
|
2433
|
+
raise typer.Exit(code=1)
|
|
2434
|
+
if not scenario_files:
|
|
2435
|
+
console.print(f"[bold red]Error: No scenario files found in '{scenarios_dir}'[/bold red]")
|
|
2436
|
+
raise typer.Exit(code=1)
|
|
2437
|
+
|
|
2438
|
+
benchmark_results = []
|
|
2439
|
+
run_index = 0
|
|
2440
|
+
|
|
2441
|
+
# Iterate through patients and scenarios
|
|
2442
|
+
for patient_config_file in patient_config_files:
|
|
2443
|
+
patient_config_name = patient_config_file.stem
|
|
2444
|
+
try:
|
|
2445
|
+
with open(patient_config_file, 'r') as f:
|
|
2446
|
+
patient_params = yaml.safe_load(f)
|
|
2447
|
+
console.print(f"\n[bold underline]Benchmarking Patient: {patient_config_name}[/bold underline]")
|
|
2448
|
+
except yaml.YAMLError as e:
|
|
2449
|
+
console.print(f"[bold red]Error parsing patient configuration '{patient_config_file.name}': {e}[/bold red]")
|
|
2450
|
+
continue # Skip this patient
|
|
2451
|
+
|
|
2452
|
+
for scenario_file in scenario_files:
|
|
2453
|
+
scenario_name = scenario_file.stem
|
|
2454
|
+
console.print(f" [bold]Scenario: {scenario_name}[/bold]")
|
|
2455
|
+
|
|
2456
|
+
# Load Scenario Data (validated)
|
|
2457
|
+
try:
|
|
2458
|
+
scenario_model = load_scenario(scenario_file)
|
|
2459
|
+
payloads = scenario_to_payloads(scenario_model)
|
|
2460
|
+
stress_events = build_stress_events(payloads)
|
|
2461
|
+
except ValidationError as e:
|
|
2462
|
+
console.print(f"[bold red] Scenario validation failed: {scenario_file.name}[/bold red]")
|
|
2463
|
+
for line in format_validation_error(e):
|
|
2464
|
+
console.print(f" - {line}")
|
|
2465
|
+
continue
|
|
2466
|
+
except Exception as e:
|
|
2467
|
+
console.print(f"[bold red] Error loading scenario '{scenario_file.name}': {e}[/bold red]")
|
|
2468
|
+
continue # Skip this scenario
|
|
2469
|
+
|
|
2470
|
+
job_seed = resolved_seed + run_index
|
|
2471
|
+
# --- Run AI Algorithm Simulation ---
|
|
2472
|
+
console.print(f" Running [green]{ai_algo_instance.get_algorithm_metadata().name}[/green]...")
|
|
2473
|
+
patient_model_ai = iints.PatientModel(**patient_params) # New instance for each run
|
|
2474
|
+
simulator_ai = iints.Simulator(
|
|
2475
|
+
patient_model=patient_model_ai,
|
|
2476
|
+
algorithm=ai_algo_instance,
|
|
2477
|
+
time_step=time_step,
|
|
2478
|
+
seed=job_seed,
|
|
2479
|
+
)
|
|
2480
|
+
for event in stress_events:
|
|
2481
|
+
simulator_ai.add_stress_event(event)
|
|
2482
|
+
|
|
2483
|
+
try:
|
|
2484
|
+
results_df_ai, safety_report_ai = simulator_ai.run_batch(duration)
|
|
2485
|
+
metrics_ai = iints.generate_benchmark_metrics(results_df_ai)
|
|
2486
|
+
except Exception as e:
|
|
2487
|
+
console.print(f"[bold red] AI Simulation failed: {e}[/bold red]")
|
|
2488
|
+
# Provide dummy metrics for failed simulations to allow table generation
|
|
2489
|
+
metrics_ai = {"TIR (%)": float('nan'), "Hypoglycemia (<70 mg/dL) (%)": float('nan'),
|
|
2490
|
+
"Hyperglycemia (>180 mg/dL) (%)": float('nan'), "Avg Glucose (mg/dL)": float('nan')}
|
|
2491
|
+
safety_report_ai = {'num_violations': float('nan')} # Dummy report
|
|
2492
|
+
|
|
2493
|
+
# --- Run Standard Pump Algorithm Simulation ---
|
|
2494
|
+
console.print(f" Running [yellow]Standard Pump[/yellow]...") # Use name directly
|
|
2495
|
+
# The standard pump also needs patient-specific parameters for ISF, ICR, basal rate, etc.
|
|
2496
|
+
# We'll pass the patient_params directly to the StandardPumpAlgorithm constructor.
|
|
2497
|
+
standard_pump_algo_instance = iints.StandardPumpAlgorithm(settings=patient_params)
|
|
2498
|
+
patient_model_std = iints.PatientModel(**patient_params) # New instance for each run
|
|
2499
|
+
simulator_std = iints.Simulator(
|
|
2500
|
+
patient_model=patient_model_std,
|
|
2501
|
+
algorithm=standard_pump_algo_instance,
|
|
2502
|
+
time_step=time_step,
|
|
2503
|
+
seed=job_seed,
|
|
2504
|
+
)
|
|
2505
|
+
for event in stress_events:
|
|
2506
|
+
simulator_std.add_stress_event(event)
|
|
2507
|
+
|
|
2508
|
+
try:
|
|
2509
|
+
results_df_std, safety_report_std = simulator_std.run_batch(duration)
|
|
2510
|
+
metrics_std = iints.generate_benchmark_metrics(results_df_std)
|
|
2511
|
+
except Exception as e:
|
|
2512
|
+
console.print(f"[bold red] Standard Pump Simulation failed: {e}[/bold red]")
|
|
2513
|
+
# Provide dummy metrics for failed simulations
|
|
2514
|
+
metrics_std = {"TIR (%)": float('nan'), "Hypoglycemia (<70 mg/dL) (%)": float('nan'),
|
|
2515
|
+
"Hyperglycemia (>180 mg/dL) (%)": float('nan'), "Avg Glucose (mg/dL)": float('nan')}
|
|
2516
|
+
safety_report_std = {'num_violations': float('nan')} # Dummy report
|
|
2517
|
+
|
|
2518
|
+
|
|
2519
|
+
# Store results
|
|
2520
|
+
benchmark_results.append({
|
|
2521
|
+
"run_id": run_id,
|
|
2522
|
+
"seed": job_seed,
|
|
2523
|
+
"Patient": patient_config_name,
|
|
2524
|
+
"Scenario": scenario_name,
|
|
2525
|
+
"AI Algo": ai_algo_instance.get_algorithm_metadata().name,
|
|
2526
|
+
**{f"AI {k}": v for k, v in metrics_ai.items()},
|
|
2527
|
+
**{f"AI Safety Violations": safety_report_ai.get('total_violations', float('nan'))},
|
|
2528
|
+
"Standard Algo": standard_pump_algo_instance.get_algorithm_metadata().name,
|
|
2529
|
+
**{f"Std {k}": v for k, v in metrics_std.items()},
|
|
2530
|
+
**{f"Std Safety Violations": safety_report_std.get('total_violations', float('nan'))},
|
|
2531
|
+
})
|
|
2532
|
+
run_index += 1
|
|
2533
|
+
|
|
2534
|
+
console.print("\n[bold green]Benchmark Suite Completed![/bold green]")
|
|
2535
|
+
|
|
2536
|
+
# Print Comparison Table
|
|
2537
|
+
if benchmark_results:
|
|
2538
|
+
results_df = pd.DataFrame(benchmark_results)
|
|
2539
|
+
|
|
2540
|
+
table = Table(title="IINTS-AF Benchmark Results", show_header=True, header_style="bold magenta")
|
|
2541
|
+
|
|
2542
|
+
# Add columns dynamically
|
|
2543
|
+
table.add_column("Patient", style="cyan", no_wrap=True)
|
|
2544
|
+
table.add_column("Scenario", style="cyan", no_wrap=True)
|
|
2545
|
+
|
|
2546
|
+
# Assuming AI Algo and Standard Algo names are consistent across results
|
|
2547
|
+
ai_algo_name = benchmark_results[0]["AI Algo"]
|
|
2548
|
+
std_algo_name = benchmark_results[0]["Standard Algo"]
|
|
2549
|
+
|
|
2550
|
+
# Get a sample of metric keys (excluding 'AI Algo', 'Standard Algo')
|
|
2551
|
+
# Fix: Filter out non-metric keys like 'AI Algo'
|
|
2552
|
+
sample_metrics_keys_raw = [k.replace('AI ', '') for k in benchmark_results[0].keys() if k.startswith('AI ') and 'Algo' not in k and 'Violations' not in k]
|
|
2553
|
+
|
|
2554
|
+
for metric_name_raw in sample_metrics_keys_raw:
|
|
2555
|
+
table.add_column(f"{ai_algo_name} {metric_name_raw}", style="green")
|
|
2556
|
+
table.add_column(f"{std_algo_name} {metric_name_raw}", style="yellow")
|
|
2557
|
+
|
|
2558
|
+
table.add_column(f"{ai_algo_name} Safety Violations", style="red")
|
|
2559
|
+
table.add_column(f"{std_algo_name} Safety Violations", style="red")
|
|
2560
|
+
|
|
2561
|
+
for _, row in results_df.iterrows():
|
|
2562
|
+
row_data = [str(row["Patient"]), str(row["Scenario"])]
|
|
2563
|
+
for metric_name_raw in sample_metrics_keys_raw:
|
|
2564
|
+
ai_val = row[f'AI {metric_name_raw}']
|
|
2565
|
+
std_val = row[f'Std {metric_name_raw}']
|
|
2566
|
+
|
|
2567
|
+
ai_formatted = f"{ai_val:.2f}%" if "%" in metric_name_raw and not pd.isna(ai_val) else (f"{ai_val:.2f}" if not pd.isna(ai_val) else "N/A")
|
|
2568
|
+
std_formatted = f"{std_val:.2f}%" if "%" in metric_name_raw and not pd.isna(std_val) else (f"{std_val:.2f}" if not pd.isna(std_val) else "N/A")
|
|
2569
|
+
|
|
2570
|
+
row_data.append(ai_formatted)
|
|
2571
|
+
row_data.append(std_formatted)
|
|
2572
|
+
|
|
2573
|
+
ai_safety_violations = row['AI Safety Violations']
|
|
2574
|
+
std_safety_violations = row['Std Safety Violations']
|
|
2575
|
+
row_data.append(f"{ai_safety_violations:.0f}" if not pd.isna(ai_safety_violations) else "N/A")
|
|
2576
|
+
row_data.append(f"{std_safety_violations:.0f}" if not pd.isna(std_safety_violations) else "N/A")
|
|
2577
|
+
table.add_row(*row_data)
|
|
2578
|
+
|
|
2579
|
+
console.print(table)
|
|
2580
|
+
|
|
2581
|
+
results_csv = output_dir / "benchmark_summary.csv"
|
|
2582
|
+
results_df.to_csv(results_csv, index=False)
|
|
2583
|
+
console.print(f"[green]Benchmark summary saved:[/green] {results_csv}")
|
|
2584
|
+
|
|
2585
|
+
manifest_files = {
|
|
2586
|
+
"config": config_path,
|
|
2587
|
+
"run_metadata": run_metadata_path,
|
|
2588
|
+
"benchmark_summary": results_csv,
|
|
2589
|
+
}
|
|
2590
|
+
run_manifest = build_run_manifest(output_dir, manifest_files)
|
|
2591
|
+
run_manifest_path = output_dir / "run_manifest.json"
|
|
2592
|
+
write_json(run_manifest_path, run_manifest)
|
|
2593
|
+
console.print(f"Run manifest: {run_manifest_path}")
|
|
2594
|
+
signature_path = maybe_sign_manifest(run_manifest_path)
|
|
2595
|
+
if signature_path:
|
|
2596
|
+
console.print(f"Run manifest signature: {signature_path}")
|
|
2597
|
+
else:
|
|
2598
|
+
console.print("[yellow]No benchmark results were generated.[/yellow]")
|