iints-sdk-python35 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iints/__init__.py +183 -0
- iints/analysis/__init__.py +12 -0
- iints/analysis/algorithm_xray.py +387 -0
- iints/analysis/baseline.py +92 -0
- iints/analysis/clinical_benchmark.py +198 -0
- iints/analysis/clinical_metrics.py +551 -0
- iints/analysis/clinical_tir_analyzer.py +136 -0
- iints/analysis/diabetes_metrics.py +43 -0
- iints/analysis/edge_efficiency.py +33 -0
- iints/analysis/edge_performance_monitor.py +315 -0
- iints/analysis/explainability.py +94 -0
- iints/analysis/explainable_ai.py +232 -0
- iints/analysis/hardware_benchmark.py +221 -0
- iints/analysis/metrics.py +117 -0
- iints/analysis/population_report.py +188 -0
- iints/analysis/reporting.py +345 -0
- iints/analysis/safety_index.py +311 -0
- iints/analysis/sensor_filtering.py +54 -0
- iints/analysis/validator.py +273 -0
- iints/api/__init__.py +0 -0
- iints/api/base_algorithm.py +307 -0
- iints/api/registry.py +103 -0
- iints/api/template_algorithm.py +195 -0
- iints/assets/iints_logo.png +0 -0
- iints/cli/__init__.py +0 -0
- iints/cli/cli.py +2598 -0
- iints/core/__init__.py +1 -0
- iints/core/algorithms/__init__.py +0 -0
- iints/core/algorithms/battle_runner.py +138 -0
- iints/core/algorithms/correction_bolus.py +95 -0
- iints/core/algorithms/discovery.py +92 -0
- iints/core/algorithms/fixed_basal_bolus.py +58 -0
- iints/core/algorithms/hybrid_algorithm.py +92 -0
- iints/core/algorithms/lstm_algorithm.py +138 -0
- iints/core/algorithms/mock_algorithms.py +162 -0
- iints/core/algorithms/pid_controller.py +88 -0
- iints/core/algorithms/standard_pump_algo.py +64 -0
- iints/core/device.py +0 -0
- iints/core/device_manager.py +64 -0
- iints/core/devices/__init__.py +3 -0
- iints/core/devices/models.py +160 -0
- iints/core/patient/__init__.py +9 -0
- iints/core/patient/bergman_model.py +341 -0
- iints/core/patient/models.py +285 -0
- iints/core/patient/patient_factory.py +117 -0
- iints/core/patient/profile.py +41 -0
- iints/core/safety/__init__.py +12 -0
- iints/core/safety/config.py +37 -0
- iints/core/safety/input_validator.py +95 -0
- iints/core/safety/supervisor.py +39 -0
- iints/core/simulation/__init__.py +0 -0
- iints/core/simulation/scenario_parser.py +61 -0
- iints/core/simulator.py +874 -0
- iints/core/supervisor.py +367 -0
- iints/data/__init__.py +53 -0
- iints/data/adapter.py +142 -0
- iints/data/column_mapper.py +398 -0
- iints/data/datasets.json +132 -0
- iints/data/demo/__init__.py +1 -0
- iints/data/demo/demo_cgm.csv +289 -0
- iints/data/importer.py +275 -0
- iints/data/ingestor.py +162 -0
- iints/data/nightscout.py +128 -0
- iints/data/quality_checker.py +550 -0
- iints/data/registry.py +166 -0
- iints/data/tidepool.py +38 -0
- iints/data/universal_parser.py +813 -0
- iints/data/virtual_patients/clinic_safe_baseline.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_hyper_challenge.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_hypo_prone.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_midnight.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_pizza.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_stress_meal.yaml +9 -0
- iints/data/virtual_patients/default_patient.yaml +11 -0
- iints/data/virtual_patients/patient_559_config.yaml +11 -0
- iints/emulation/__init__.py +80 -0
- iints/emulation/legacy_base.py +414 -0
- iints/emulation/medtronic_780g.py +337 -0
- iints/emulation/omnipod_5.py +367 -0
- iints/emulation/tandem_controliq.py +393 -0
- iints/highlevel.py +451 -0
- iints/learning/__init__.py +3 -0
- iints/learning/autonomous_optimizer.py +194 -0
- iints/learning/learning_system.py +122 -0
- iints/metrics.py +34 -0
- iints/population/__init__.py +11 -0
- iints/population/generator.py +131 -0
- iints/population/runner.py +327 -0
- iints/presets/__init__.py +28 -0
- iints/presets/presets.json +114 -0
- iints/research/__init__.py +30 -0
- iints/research/config.py +68 -0
- iints/research/dataset.py +319 -0
- iints/research/losses.py +73 -0
- iints/research/predictor.py +329 -0
- iints/scenarios/__init__.py +3 -0
- iints/scenarios/generator.py +92 -0
- iints/templates/__init__.py +0 -0
- iints/templates/default_algorithm.py +91 -0
- iints/templates/scenarios/__init__.py +0 -0
- iints/templates/scenarios/chaos_insulin_stacking.json +29 -0
- iints/templates/scenarios/chaos_runaway_ai.json +25 -0
- iints/templates/scenarios/example_scenario.json +35 -0
- iints/templates/scenarios/exercise_stress.json +30 -0
- iints/utils/__init__.py +3 -0
- iints/utils/plotting.py +50 -0
- iints/utils/run_io.py +152 -0
- iints/validation/__init__.py +133 -0
- iints/validation/schemas.py +94 -0
- iints/visualization/__init__.py +34 -0
- iints/visualization/cockpit.py +691 -0
- iints/visualization/uncertainty_cloud.py +612 -0
- iints_sdk_python35-0.0.18.dist-info/METADATA +225 -0
- iints_sdk_python35-0.0.18.dist-info/RECORD +118 -0
- iints_sdk_python35-0.0.18.dist-info/WHEEL +5 -0
- iints_sdk_python35-0.0.18.dist-info/entry_points.txt +10 -0
- iints_sdk_python35-0.0.18.dist-info/licenses/LICENSE +28 -0
- iints_sdk_python35-0.0.18.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Optional, Tuple, Protocol, Sequence, TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
|
|
8
|
+
_IMPORT_ERROR: Optional[BaseException]
|
|
9
|
+
try:
|
|
10
|
+
import torch
|
|
11
|
+
from torch import nn
|
|
12
|
+
except Exception as exc: # pragma: no cover
|
|
13
|
+
torch = None # type: ignore
|
|
14
|
+
nn = None # type: ignore
|
|
15
|
+
_IMPORT_ERROR = exc
|
|
16
|
+
else:
|
|
17
|
+
_IMPORT_ERROR = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# ---------------------------------------------------------------------------
|
|
21
|
+
# LSTM predictor
|
|
22
|
+
# ---------------------------------------------------------------------------
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
import torch # pragma: no cover
|
|
26
|
+
from torch import nn # pragma: no cover
|
|
27
|
+
|
|
28
|
+
class LSTMPredictor(nn.Module):
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
input_size: int,
|
|
32
|
+
hidden_size: int = 64,
|
|
33
|
+
num_layers: int = 2,
|
|
34
|
+
dropout: float = 0.1,
|
|
35
|
+
horizon_steps: int = 12,
|
|
36
|
+
) -> None: ...
|
|
37
|
+
|
|
38
|
+
def forward(self, x: "torch.Tensor") -> "torch.Tensor": ...
|
|
39
|
+
|
|
40
|
+
def predict_with_uncertainty(
|
|
41
|
+
self,
|
|
42
|
+
x: "torch.Tensor",
|
|
43
|
+
n_samples: int = 50,
|
|
44
|
+
) -> Tuple["torch.Tensor", "torch.Tensor"]: ...
|
|
45
|
+
else:
|
|
46
|
+
if nn is None: # pragma: no cover
|
|
47
|
+
class LSTMPredictor: # type: ignore[no-redef]
|
|
48
|
+
def __init__(self, *args: object, **kwargs: object) -> None:
|
|
49
|
+
raise ImportError(
|
|
50
|
+
"Torch is required for LSTMPredictor. Install with `pip install iints-sdk-python35[research]`."
|
|
51
|
+
) from _IMPORT_ERROR
|
|
52
|
+
else:
|
|
53
|
+
class LSTMPredictor(nn.Module): # type: ignore[misc,no-redef]
|
|
54
|
+
def __init__(
|
|
55
|
+
self,
|
|
56
|
+
input_size: int,
|
|
57
|
+
hidden_size: int = 64,
|
|
58
|
+
num_layers: int = 2,
|
|
59
|
+
dropout: float = 0.1,
|
|
60
|
+
horizon_steps: int = 12,
|
|
61
|
+
) -> None:
|
|
62
|
+
if torch is None or nn is None: # pragma: no cover
|
|
63
|
+
raise ImportError(
|
|
64
|
+
"Torch is required for LSTMPredictor. Install with `pip install iints-sdk-python35[research]`."
|
|
65
|
+
) from _IMPORT_ERROR
|
|
66
|
+
super().__init__()
|
|
67
|
+
self.horizon_steps = horizon_steps
|
|
68
|
+
self.lstm = nn.LSTM(
|
|
69
|
+
input_size=input_size,
|
|
70
|
+
hidden_size=hidden_size,
|
|
71
|
+
num_layers=num_layers,
|
|
72
|
+
dropout=dropout if num_layers > 1 else 0.0,
|
|
73
|
+
batch_first=True,
|
|
74
|
+
)
|
|
75
|
+
self.head = nn.Sequential(
|
|
76
|
+
nn.Linear(hidden_size, hidden_size),
|
|
77
|
+
nn.ReLU(),
|
|
78
|
+
nn.Dropout(p=dropout), # P3-12: dropout in head for MC Dropout inference
|
|
79
|
+
nn.Linear(hidden_size, horizon_steps),
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
def forward(self, x: "torch.Tensor") -> "torch.Tensor":
|
|
83
|
+
_, (hidden, _) = self.lstm(x)
|
|
84
|
+
last_hidden = hidden[-1]
|
|
85
|
+
return self.head(last_hidden)
|
|
86
|
+
|
|
87
|
+
# P3-12: Monte Carlo Dropout inference
|
|
88
|
+
def predict_with_uncertainty(
|
|
89
|
+
self,
|
|
90
|
+
x: "torch.Tensor",
|
|
91
|
+
n_samples: int = 50,
|
|
92
|
+
) -> Tuple["torch.Tensor", "torch.Tensor"]:
|
|
93
|
+
"""
|
|
94
|
+
Run MC Dropout inference to estimate predictive uncertainty.
|
|
95
|
+
|
|
96
|
+
Activates dropout at inference time and runs ``n_samples`` forward
|
|
97
|
+
passes. Returns the mean prediction and standard deviation across
|
|
98
|
+
samples as a proxy for aleatoric + epistemic uncertainty.
|
|
99
|
+
|
|
100
|
+
Parameters
|
|
101
|
+
----------
|
|
102
|
+
x : torch.Tensor of shape [B, T, F]
|
|
103
|
+
Input batch.
|
|
104
|
+
n_samples : int
|
|
105
|
+
Number of stochastic forward passes.
|
|
106
|
+
|
|
107
|
+
Returns
|
|
108
|
+
-------
|
|
109
|
+
mean : torch.Tensor of shape [B, horizon_steps]
|
|
110
|
+
std : torch.Tensor of shape [B, horizon_steps]
|
|
111
|
+
"""
|
|
112
|
+
if torch is None: # pragma: no cover
|
|
113
|
+
raise ImportError("Torch required.") from _IMPORT_ERROR
|
|
114
|
+
# Keep dropout active during inference
|
|
115
|
+
self.train()
|
|
116
|
+
with torch.no_grad():
|
|
117
|
+
preds = torch.stack([self.forward(x) for _ in range(n_samples)], dim=0)
|
|
118
|
+
self.eval()
|
|
119
|
+
return preds.mean(dim=0), preds.std(dim=0)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
# ---------------------------------------------------------------------------
|
|
123
|
+
# P3-11: Baseline predictors
|
|
124
|
+
# ---------------------------------------------------------------------------
|
|
125
|
+
|
|
126
|
+
class BaselinePredictor(Protocol):
|
|
127
|
+
def predict(self, X: np.ndarray) -> np.ndarray:
|
|
128
|
+
...
|
|
129
|
+
|
|
130
|
+
def name(self) -> str:
|
|
131
|
+
...
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class LastValueBaseline:
|
|
135
|
+
"""
|
|
136
|
+
Naïve last-value (persistence) baseline for glucose forecasting.
|
|
137
|
+
|
|
138
|
+
Predicts the same glucose value for all future time steps
|
|
139
|
+
(i.e. ``y_hat[t+k] = y[t]`` for k = 1..horizon).
|
|
140
|
+
|
|
141
|
+
This is the minimum bar any LSTM model must beat.
|
|
142
|
+
"""
|
|
143
|
+
|
|
144
|
+
def __init__(self, horizon_steps: int) -> None:
|
|
145
|
+
self.horizon_steps = horizon_steps
|
|
146
|
+
|
|
147
|
+
def predict(self, X: np.ndarray) -> np.ndarray:
|
|
148
|
+
"""
|
|
149
|
+
Parameters
|
|
150
|
+
----------
|
|
151
|
+
X : np.ndarray of shape [N, T, F]
|
|
152
|
+
Feature sequences. Assumes the first feature column is glucose
|
|
153
|
+
(index 0 along the last axis), which is standard for the AZT1D pipeline.
|
|
154
|
+
|
|
155
|
+
Returns
|
|
156
|
+
-------
|
|
157
|
+
np.ndarray of shape [N, horizon_steps]
|
|
158
|
+
"""
|
|
159
|
+
# Last glucose reading in each sequence
|
|
160
|
+
last_glucose = X[:, -1, 0] # shape [N]
|
|
161
|
+
return np.tile(last_glucose[:, None], (1, self.horizon_steps)).astype(np.float32)
|
|
162
|
+
|
|
163
|
+
def name(self) -> str:
|
|
164
|
+
return "LastValue"
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class LinearTrendBaseline:
|
|
168
|
+
"""
|
|
169
|
+
Linear-trend extrapolation baseline.
|
|
170
|
+
|
|
171
|
+
Fits a least-squares line to the glucose values in the history window and
|
|
172
|
+
extrapolates it ``horizon_steps`` steps into the future.
|
|
173
|
+
|
|
174
|
+
Captures short-term trends (e.g. a rising glucose after a meal) without
|
|
175
|
+
any knowledge of insulin or carbs, providing a stronger baseline than
|
|
176
|
+
simple last-value persistence.
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
def __init__(self, horizon_steps: int, time_step_minutes: float = 5.0) -> None:
|
|
180
|
+
self.horizon_steps = horizon_steps
|
|
181
|
+
self.time_step_minutes = time_step_minutes
|
|
182
|
+
|
|
183
|
+
def predict(self, X: np.ndarray) -> np.ndarray:
|
|
184
|
+
"""
|
|
185
|
+
Parameters
|
|
186
|
+
----------
|
|
187
|
+
X : np.ndarray of shape [N, T, F]
|
|
188
|
+
Feature sequences. First feature (index 0) must be glucose.
|
|
189
|
+
|
|
190
|
+
Returns
|
|
191
|
+
-------
|
|
192
|
+
np.ndarray of shape [N, horizon_steps]
|
|
193
|
+
"""
|
|
194
|
+
N, T, _ = X.shape
|
|
195
|
+
glucose = X[:, :, 0] # [N, T]
|
|
196
|
+
t_hist = np.arange(T, dtype=np.float32) # relative time indices
|
|
197
|
+
t_future = np.arange(T, T + self.horizon_steps, dtype=np.float32)
|
|
198
|
+
|
|
199
|
+
preds = np.empty((N, self.horizon_steps), dtype=np.float32)
|
|
200
|
+
# Vectorised least-squares over the batch
|
|
201
|
+
t_mean = t_hist.mean()
|
|
202
|
+
t_var = ((t_hist - t_mean) ** 2).sum()
|
|
203
|
+
|
|
204
|
+
if t_var < 1e-8:
|
|
205
|
+
# Degenerate case: all time points identical → last-value fallback
|
|
206
|
+
preds[:] = glucose[:, -1, None]
|
|
207
|
+
return preds
|
|
208
|
+
|
|
209
|
+
slopes = ((glucose * (t_hist - t_mean)).sum(axis=1)) / t_var # [N]
|
|
210
|
+
intercepts = glucose.mean(axis=1) - slopes * t_mean # [N]
|
|
211
|
+
preds = intercepts[:, None] + slopes[:, None] * t_future[None, :]
|
|
212
|
+
return preds.astype(np.float32)
|
|
213
|
+
|
|
214
|
+
def name(self) -> str:
|
|
215
|
+
return "LinearTrend"
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def evaluate_baselines(
|
|
219
|
+
X: np.ndarray,
|
|
220
|
+
y: np.ndarray,
|
|
221
|
+
horizon_steps: int,
|
|
222
|
+
time_step_minutes: float = 5.0,
|
|
223
|
+
) -> dict:
|
|
224
|
+
"""
|
|
225
|
+
Compute MAE and RMSE for both baseline predictors.
|
|
226
|
+
|
|
227
|
+
Parameters
|
|
228
|
+
----------
|
|
229
|
+
X : np.ndarray [N, T, F]
|
|
230
|
+
y : np.ndarray [N, horizon_steps]
|
|
231
|
+
horizon_steps : int
|
|
232
|
+
time_step_minutes : float
|
|
233
|
+
|
|
234
|
+
Returns
|
|
235
|
+
-------
|
|
236
|
+
dict with keys "last_value" and "linear_trend", each containing
|
|
237
|
+
{"mae": float, "rmse": float}.
|
|
238
|
+
"""
|
|
239
|
+
results = {}
|
|
240
|
+
baselines: Sequence[BaselinePredictor] = [
|
|
241
|
+
LastValueBaseline(horizon_steps),
|
|
242
|
+
LinearTrendBaseline(horizon_steps, time_step_minutes),
|
|
243
|
+
]
|
|
244
|
+
for baseline in baselines:
|
|
245
|
+
preds = baseline.predict(X)
|
|
246
|
+
mae = float(np.mean(np.abs(preds - y)))
|
|
247
|
+
rmse = float(np.sqrt(np.mean((preds - y) ** 2)))
|
|
248
|
+
results[baseline.name()] = {"mae": mae, "rmse": rmse}
|
|
249
|
+
return results
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
# ---------------------------------------------------------------------------
|
|
253
|
+
# Service / loading helpers
|
|
254
|
+
# ---------------------------------------------------------------------------
|
|
255
|
+
|
|
256
|
+
class PredictorService:
|
|
257
|
+
def __init__(self, model: "LSTMPredictor", config: dict) -> None:
|
|
258
|
+
self.model = model
|
|
259
|
+
self.config = config
|
|
260
|
+
self.feature_columns = list(config.get("feature_columns", []))
|
|
261
|
+
self.history_steps = int(config.get("history_steps", 1))
|
|
262
|
+
self.horizon_steps = int(config.get("horizon_steps", 1))
|
|
263
|
+
|
|
264
|
+
# Restore scaler if present in checkpoint
|
|
265
|
+
from iints.research.dataset import FeatureScaler
|
|
266
|
+
scaler_data = config.get("scaler")
|
|
267
|
+
self.scaler: Optional[FeatureScaler] = (
|
|
268
|
+
FeatureScaler.from_dict(scaler_data) if scaler_data else None
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
def predict(self, x: np.ndarray) -> np.ndarray:
|
|
272
|
+
if torch is None: # pragma: no cover
|
|
273
|
+
raise ImportError(
|
|
274
|
+
"Torch is required for predictor inference. Install with `pip install iints-sdk-python35[research]`."
|
|
275
|
+
) from _IMPORT_ERROR
|
|
276
|
+
if self.scaler is not None:
|
|
277
|
+
x = self.scaler.transform(x)
|
|
278
|
+
self.model.eval()
|
|
279
|
+
with torch.no_grad():
|
|
280
|
+
tensor = torch.from_numpy(x.astype(np.float32))
|
|
281
|
+
outputs = self.model(tensor).cpu().numpy()
|
|
282
|
+
return outputs
|
|
283
|
+
|
|
284
|
+
def predict_with_uncertainty(
|
|
285
|
+
self, x: np.ndarray, n_samples: int = 50
|
|
286
|
+
) -> Tuple[np.ndarray, np.ndarray]:
|
|
287
|
+
"""MC Dropout inference — returns (mean, std) arrays."""
|
|
288
|
+
if torch is None: # pragma: no cover
|
|
289
|
+
raise ImportError("Torch required.") from _IMPORT_ERROR
|
|
290
|
+
if self.scaler is not None:
|
|
291
|
+
x = self.scaler.transform(x)
|
|
292
|
+
tensor = torch.from_numpy(x.astype(np.float32))
|
|
293
|
+
mean_t, std_t = self.model.predict_with_uncertainty(tensor, n_samples=n_samples)
|
|
294
|
+
return mean_t.detach().cpu().numpy(), std_t.detach().cpu().numpy()
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def load_predictor(model_path: Path) -> Tuple["LSTMPredictor", dict]:
|
|
298
|
+
if torch is None or nn is None: # pragma: no cover
|
|
299
|
+
raise ImportError(
|
|
300
|
+
"Torch is required for predictor loading. Install with `pip install iints-sdk-python35[research]`."
|
|
301
|
+
) from _IMPORT_ERROR
|
|
302
|
+
payload = torch.load(model_path, map_location="cpu", weights_only=False)
|
|
303
|
+
config = payload["config"]
|
|
304
|
+
model = LSTMPredictor(
|
|
305
|
+
input_size=config["input_size"],
|
|
306
|
+
hidden_size=config["hidden_size"],
|
|
307
|
+
num_layers=config["num_layers"],
|
|
308
|
+
dropout=config["dropout"],
|
|
309
|
+
horizon_steps=config["horizon_steps"],
|
|
310
|
+
)
|
|
311
|
+
model.load_state_dict(payload["state_dict"])
|
|
312
|
+
model.eval()
|
|
313
|
+
return model, config
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
def load_predictor_service(model_path: Path) -> PredictorService:
|
|
317
|
+
model, config = load_predictor(model_path)
|
|
318
|
+
return PredictorService(model, config)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def predict_batch(model: "LSTMPredictor", x: np.ndarray) -> np.ndarray:
|
|
322
|
+
if torch is None: # pragma: no cover
|
|
323
|
+
raise ImportError(
|
|
324
|
+
"Torch is required for predictor inference. Install with `pip install iints-sdk-python35[research]`."
|
|
325
|
+
) from _IMPORT_ERROR
|
|
326
|
+
with torch.no_grad():
|
|
327
|
+
tensor = torch.from_numpy(x.astype(np.float32))
|
|
328
|
+
outputs = model(tensor).cpu().numpy()
|
|
329
|
+
return outputs
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Any, Dict, List, Optional
|
|
5
|
+
import random
|
|
6
|
+
|
|
7
|
+
from iints.validation.schemas import LATEST_SCHEMA_VERSION
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class ScenarioGeneratorConfig:
|
|
12
|
+
name: str
|
|
13
|
+
schema_version: str = LATEST_SCHEMA_VERSION
|
|
14
|
+
version: str = "1.0"
|
|
15
|
+
description: str = "Generated scenario"
|
|
16
|
+
duration_minutes: int = 1440
|
|
17
|
+
seed: Optional[int] = None
|
|
18
|
+
|
|
19
|
+
meal_count: int = 3
|
|
20
|
+
meal_min_grams: float = 30.0
|
|
21
|
+
meal_max_grams: float = 80.0
|
|
22
|
+
meal_delay_min: int = 10
|
|
23
|
+
meal_delay_max: int = 60
|
|
24
|
+
meal_duration_min: int = 30
|
|
25
|
+
meal_duration_max: int = 120
|
|
26
|
+
|
|
27
|
+
exercise_count: int = 0
|
|
28
|
+
exercise_intensity_min: float = 0.2
|
|
29
|
+
exercise_intensity_max: float = 0.8
|
|
30
|
+
exercise_duration_min: int = 30
|
|
31
|
+
exercise_duration_max: int = 90
|
|
32
|
+
|
|
33
|
+
sensor_error_count: int = 0
|
|
34
|
+
sensor_error_min: float = 40.0
|
|
35
|
+
sensor_error_max: float = 300.0
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _unique_times(rng: random.Random, max_time: int, count: int) -> List[int]:
|
|
39
|
+
if count <= 0:
|
|
40
|
+
return []
|
|
41
|
+
if max_time <= count:
|
|
42
|
+
return sorted(rng.sample(range(max_time + 1), k=max_time + 1))[:count]
|
|
43
|
+
return sorted(rng.sample(range(max_time), k=count))
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def generate_random_scenario(config: ScenarioGeneratorConfig) -> Dict[str, Any]:
|
|
47
|
+
rng = random.Random(config.seed)
|
|
48
|
+
events: List[Dict[str, Any]] = []
|
|
49
|
+
max_time = max(config.duration_minutes - 1, 1)
|
|
50
|
+
|
|
51
|
+
meal_times = _unique_times(rng, max_time, config.meal_count)
|
|
52
|
+
for t in meal_times:
|
|
53
|
+
events.append(
|
|
54
|
+
{
|
|
55
|
+
"start_time": int(t),
|
|
56
|
+
"event_type": "meal",
|
|
57
|
+
"value": float(rng.uniform(config.meal_min_grams, config.meal_max_grams)),
|
|
58
|
+
"absorption_delay_minutes": int(rng.randint(config.meal_delay_min, config.meal_delay_max)),
|
|
59
|
+
"duration": int(rng.randint(config.meal_duration_min, config.meal_duration_max)),
|
|
60
|
+
}
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
exercise_times = _unique_times(rng, max_time, config.exercise_count)
|
|
64
|
+
for t in exercise_times:
|
|
65
|
+
events.append(
|
|
66
|
+
{
|
|
67
|
+
"start_time": int(t),
|
|
68
|
+
"event_type": "exercise",
|
|
69
|
+
"value": float(rng.uniform(config.exercise_intensity_min, config.exercise_intensity_max)),
|
|
70
|
+
"duration": int(rng.randint(config.exercise_duration_min, config.exercise_duration_max)),
|
|
71
|
+
}
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
sensor_times = _unique_times(rng, max_time, config.sensor_error_count)
|
|
75
|
+
for t in sensor_times:
|
|
76
|
+
events.append(
|
|
77
|
+
{
|
|
78
|
+
"start_time": int(t),
|
|
79
|
+
"event_type": "sensor_error",
|
|
80
|
+
"value": float(rng.uniform(config.sensor_error_min, config.sensor_error_max)),
|
|
81
|
+
}
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
events = sorted(events, key=lambda e: e["start_time"])
|
|
85
|
+
|
|
86
|
+
return {
|
|
87
|
+
"scenario_name": config.name,
|
|
88
|
+
"schema_version": config.schema_version,
|
|
89
|
+
"scenario_version": config.version,
|
|
90
|
+
"description": config.description,
|
|
91
|
+
"stress_events": events,
|
|
92
|
+
}
|
|
File without changes
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
from iints import InsulinAlgorithm, AlgorithmInput, AlgorithmResult, AlgorithmMetadata
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
|
|
4
|
+
class {{ALGO_NAME}}(InsulinAlgorithm):
|
|
5
|
+
def __init__(self, settings: Dict[str, Any] = None):
|
|
6
|
+
super().__init__(settings)
|
|
7
|
+
self.set_algorithm_metadata(AlgorithmMetadata(
|
|
8
|
+
name="{{ALGO_NAME}}",
|
|
9
|
+
author="{{AUTHOR_NAME}}",
|
|
10
|
+
description="A new custom insulin algorithm.",
|
|
11
|
+
algorithm_type="rule_based" # Change as appropriate
|
|
12
|
+
))
|
|
13
|
+
# Initialize any specific state or parameters for your algorithm here
|
|
14
|
+
|
|
15
|
+
def predict_insulin(self, data: AlgorithmInput) -> Dict[str, Any]:
|
|
16
|
+
# --- SAFETY-FIRST STARTER LOGIC ---
|
|
17
|
+
# This template is intentionally conservative to avoid hypoglycemia.
|
|
18
|
+
|
|
19
|
+
self.why_log = []
|
|
20
|
+
|
|
21
|
+
current_glucose = data.current_glucose
|
|
22
|
+
iob = data.insulin_on_board
|
|
23
|
+
carbs = data.carb_intake
|
|
24
|
+
|
|
25
|
+
previous_glucose = self.state.get("previous_glucose", current_glucose)
|
|
26
|
+
glucose_trend = (current_glucose - previous_glucose) / max(data.time_step, 1)
|
|
27
|
+
self.state["previous_glucose"] = current_glucose
|
|
28
|
+
|
|
29
|
+
total_insulin = 0.0
|
|
30
|
+
bolus_insulin = 0.0
|
|
31
|
+
basal_insulin = 0.0
|
|
32
|
+
correction_bolus = 0.0
|
|
33
|
+
meal_bolus = 0.0
|
|
34
|
+
|
|
35
|
+
# Hard safety cutoff
|
|
36
|
+
if current_glucose < 90:
|
|
37
|
+
self._log_reason("Glucose below 90 mg/dL; holding insulin.", "safety_cutoff", current_glucose)
|
|
38
|
+
return {
|
|
39
|
+
"total_insulin_delivered": 0.0,
|
|
40
|
+
"bolus_insulin": 0.0,
|
|
41
|
+
"basal_insulin": 0.0,
|
|
42
|
+
"correction_bolus": 0.0,
|
|
43
|
+
"meal_bolus": 0.0,
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
# If glucose is falling quickly, avoid correction bolus
|
|
47
|
+
if glucose_trend < -1.0:
|
|
48
|
+
self._log_reason(
|
|
49
|
+
f"Glucose dropping at {glucose_trend:.2f} mg/dL/min; skipping correction bolus.",
|
|
50
|
+
"safety_trend",
|
|
51
|
+
glucose_trend,
|
|
52
|
+
)
|
|
53
|
+
else:
|
|
54
|
+
# Conservative correction only if quite high
|
|
55
|
+
if current_glucose > 180:
|
|
56
|
+
correction_bolus = (current_glucose - 140) / self.isf
|
|
57
|
+
correction_bolus = min(max(correction_bolus, 0.0), 0.5)
|
|
58
|
+
total_insulin += correction_bolus
|
|
59
|
+
self._log_reason(
|
|
60
|
+
f"Conservative correction bolus {correction_bolus:.2f} U.",
|
|
61
|
+
"correction",
|
|
62
|
+
current_glucose,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
# Meal bolus (capped)
|
|
66
|
+
if carbs > 0:
|
|
67
|
+
meal_bolus = min(carbs / self.icr, 2.0)
|
|
68
|
+
total_insulin += meal_bolus
|
|
69
|
+
self._log_reason(
|
|
70
|
+
f"Meal bolus {meal_bolus:.2f} U for {carbs:.0f} g carbs.",
|
|
71
|
+
"meal_bolus",
|
|
72
|
+
carbs,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
# Optional: cap total insulin based on IOB
|
|
76
|
+
if iob > 2.0:
|
|
77
|
+
total_insulin = min(total_insulin, 0.2)
|
|
78
|
+
self._log_reason("High IOB; capping total insulin to 0.2 U.", "iob_cap", iob)
|
|
79
|
+
|
|
80
|
+
total_insulin = max(0.0, total_insulin)
|
|
81
|
+
bolus_insulin = total_insulin
|
|
82
|
+
|
|
83
|
+
self._log_reason(f"Final insulin decision: {total_insulin:.2f} units", "decision", total_insulin)
|
|
84
|
+
|
|
85
|
+
return {
|
|
86
|
+
"total_insulin_delivered": total_insulin,
|
|
87
|
+
"bolus_insulin": bolus_insulin,
|
|
88
|
+
"basal_insulin": basal_insulin,
|
|
89
|
+
"correction_bolus": correction_bolus,
|
|
90
|
+
"meal_bolus": meal_bolus,
|
|
91
|
+
}
|
|
File without changes
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
{
|
|
2
|
+
"scenario_name": "Chaos: Insulin Stacking",
|
|
3
|
+
"schema_version": "1.1",
|
|
4
|
+
"scenario_version": "1.0",
|
|
5
|
+
"description": "Red-team scenario with repeated false-high CGM values to induce stacked boluses. Use with StackingAIAlgorithm.",
|
|
6
|
+
"stress_events": [
|
|
7
|
+
{
|
|
8
|
+
"start_time": 30,
|
|
9
|
+
"event_type": "sensor_error",
|
|
10
|
+
"value": 260
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
"start_time": 35,
|
|
14
|
+
"event_type": "sensor_error",
|
|
15
|
+
"value": 260
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
"start_time": 40,
|
|
19
|
+
"event_type": "sensor_error",
|
|
20
|
+
"value": 260
|
|
21
|
+
},
|
|
22
|
+
{
|
|
23
|
+
"start_time": 60,
|
|
24
|
+
"event_type": "meal",
|
|
25
|
+
"value": 40,
|
|
26
|
+
"reported_value": 40
|
|
27
|
+
}
|
|
28
|
+
]
|
|
29
|
+
}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
{
|
|
2
|
+
"scenario_name": "Chaos: Runaway AI",
|
|
3
|
+
"schema_version": "1.1",
|
|
4
|
+
"scenario_version": "1.0",
|
|
5
|
+
"description": "Red-team scenario with falling glucose during exercise. Use with RunawayAIAlgorithm.",
|
|
6
|
+
"stress_events": [
|
|
7
|
+
{
|
|
8
|
+
"start_time": 30,
|
|
9
|
+
"event_type": "meal",
|
|
10
|
+
"value": 60,
|
|
11
|
+
"reported_value": 60
|
|
12
|
+
},
|
|
13
|
+
{
|
|
14
|
+
"start_time": 120,
|
|
15
|
+
"event_type": "exercise",
|
|
16
|
+
"value": 0.8,
|
|
17
|
+
"duration": 60
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
"start_time": 200,
|
|
21
|
+
"event_type": "sensor_error",
|
|
22
|
+
"value": 180
|
|
23
|
+
}
|
|
24
|
+
]
|
|
25
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"scenario_name": "Standard Meal Challenge",
|
|
3
|
+
"schema_version": "1.1",
|
|
4
|
+
"scenario_version": "1.0",
|
|
5
|
+
"description": "A standard day with three meals and a missed bolus event.",
|
|
6
|
+
"stress_events": [
|
|
7
|
+
{
|
|
8
|
+
"start_time": 60,
|
|
9
|
+
"event_type": "meal",
|
|
10
|
+
"value": 45,
|
|
11
|
+
"absorption_delay_minutes": 15,
|
|
12
|
+
"duration": 60
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"start_time": 360,
|
|
16
|
+
"event_type": "meal",
|
|
17
|
+
"value": 70,
|
|
18
|
+
"absorption_delay_minutes": 20,
|
|
19
|
+
"duration": 90
|
|
20
|
+
},
|
|
21
|
+
{
|
|
22
|
+
"start_time": 720,
|
|
23
|
+
"event_type": "meal",
|
|
24
|
+
"value": 85,
|
|
25
|
+
"absorption_delay_minutes": 15,
|
|
26
|
+
"duration": 120
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
"start_time": 1000,
|
|
30
|
+
"event_type": "exercise",
|
|
31
|
+
"value": 0.5,
|
|
32
|
+
"duration": 45
|
|
33
|
+
}
|
|
34
|
+
]
|
|
35
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
{
|
|
2
|
+
"scenario_name": "Exercise Sensitivity Shift",
|
|
3
|
+
"schema_version": "1.1",
|
|
4
|
+
"scenario_version": "1.0",
|
|
5
|
+
"description": "Exercise raises insulin sensitivity mid-run. Includes ratio_change event.",
|
|
6
|
+
"stress_events": [
|
|
7
|
+
{
|
|
8
|
+
"start_time": 60,
|
|
9
|
+
"event_type": "meal",
|
|
10
|
+
"value": 60,
|
|
11
|
+
"absorption_delay_minutes": 10,
|
|
12
|
+
"duration": 60
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"start_time": 180,
|
|
16
|
+
"event_type": "exercise",
|
|
17
|
+
"value": 0.6,
|
|
18
|
+
"duration": 45
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"start_time": 180,
|
|
22
|
+
"event_type": "ratio_change",
|
|
23
|
+
"isf": 90,
|
|
24
|
+
"icr": 18,
|
|
25
|
+
"basal_rate": 0.3,
|
|
26
|
+
"dia_minutes": 240,
|
|
27
|
+
"duration": 180
|
|
28
|
+
}
|
|
29
|
+
]
|
|
30
|
+
}
|
iints/utils/__init__.py
ADDED