iints-sdk-python35 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iints/__init__.py +183 -0
- iints/analysis/__init__.py +12 -0
- iints/analysis/algorithm_xray.py +387 -0
- iints/analysis/baseline.py +92 -0
- iints/analysis/clinical_benchmark.py +198 -0
- iints/analysis/clinical_metrics.py +551 -0
- iints/analysis/clinical_tir_analyzer.py +136 -0
- iints/analysis/diabetes_metrics.py +43 -0
- iints/analysis/edge_efficiency.py +33 -0
- iints/analysis/edge_performance_monitor.py +315 -0
- iints/analysis/explainability.py +94 -0
- iints/analysis/explainable_ai.py +232 -0
- iints/analysis/hardware_benchmark.py +221 -0
- iints/analysis/metrics.py +117 -0
- iints/analysis/population_report.py +188 -0
- iints/analysis/reporting.py +345 -0
- iints/analysis/safety_index.py +311 -0
- iints/analysis/sensor_filtering.py +54 -0
- iints/analysis/validator.py +273 -0
- iints/api/__init__.py +0 -0
- iints/api/base_algorithm.py +307 -0
- iints/api/registry.py +103 -0
- iints/api/template_algorithm.py +195 -0
- iints/assets/iints_logo.png +0 -0
- iints/cli/__init__.py +0 -0
- iints/cli/cli.py +2598 -0
- iints/core/__init__.py +1 -0
- iints/core/algorithms/__init__.py +0 -0
- iints/core/algorithms/battle_runner.py +138 -0
- iints/core/algorithms/correction_bolus.py +95 -0
- iints/core/algorithms/discovery.py +92 -0
- iints/core/algorithms/fixed_basal_bolus.py +58 -0
- iints/core/algorithms/hybrid_algorithm.py +92 -0
- iints/core/algorithms/lstm_algorithm.py +138 -0
- iints/core/algorithms/mock_algorithms.py +162 -0
- iints/core/algorithms/pid_controller.py +88 -0
- iints/core/algorithms/standard_pump_algo.py +64 -0
- iints/core/device.py +0 -0
- iints/core/device_manager.py +64 -0
- iints/core/devices/__init__.py +3 -0
- iints/core/devices/models.py +160 -0
- iints/core/patient/__init__.py +9 -0
- iints/core/patient/bergman_model.py +341 -0
- iints/core/patient/models.py +285 -0
- iints/core/patient/patient_factory.py +117 -0
- iints/core/patient/profile.py +41 -0
- iints/core/safety/__init__.py +12 -0
- iints/core/safety/config.py +37 -0
- iints/core/safety/input_validator.py +95 -0
- iints/core/safety/supervisor.py +39 -0
- iints/core/simulation/__init__.py +0 -0
- iints/core/simulation/scenario_parser.py +61 -0
- iints/core/simulator.py +874 -0
- iints/core/supervisor.py +367 -0
- iints/data/__init__.py +53 -0
- iints/data/adapter.py +142 -0
- iints/data/column_mapper.py +398 -0
- iints/data/datasets.json +132 -0
- iints/data/demo/__init__.py +1 -0
- iints/data/demo/demo_cgm.csv +289 -0
- iints/data/importer.py +275 -0
- iints/data/ingestor.py +162 -0
- iints/data/nightscout.py +128 -0
- iints/data/quality_checker.py +550 -0
- iints/data/registry.py +166 -0
- iints/data/tidepool.py +38 -0
- iints/data/universal_parser.py +813 -0
- iints/data/virtual_patients/clinic_safe_baseline.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_hyper_challenge.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_hypo_prone.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_midnight.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_pizza.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_stress_meal.yaml +9 -0
- iints/data/virtual_patients/default_patient.yaml +11 -0
- iints/data/virtual_patients/patient_559_config.yaml +11 -0
- iints/emulation/__init__.py +80 -0
- iints/emulation/legacy_base.py +414 -0
- iints/emulation/medtronic_780g.py +337 -0
- iints/emulation/omnipod_5.py +367 -0
- iints/emulation/tandem_controliq.py +393 -0
- iints/highlevel.py +451 -0
- iints/learning/__init__.py +3 -0
- iints/learning/autonomous_optimizer.py +194 -0
- iints/learning/learning_system.py +122 -0
- iints/metrics.py +34 -0
- iints/population/__init__.py +11 -0
- iints/population/generator.py +131 -0
- iints/population/runner.py +327 -0
- iints/presets/__init__.py +28 -0
- iints/presets/presets.json +114 -0
- iints/research/__init__.py +30 -0
- iints/research/config.py +68 -0
- iints/research/dataset.py +319 -0
- iints/research/losses.py +73 -0
- iints/research/predictor.py +329 -0
- iints/scenarios/__init__.py +3 -0
- iints/scenarios/generator.py +92 -0
- iints/templates/__init__.py +0 -0
- iints/templates/default_algorithm.py +91 -0
- iints/templates/scenarios/__init__.py +0 -0
- iints/templates/scenarios/chaos_insulin_stacking.json +29 -0
- iints/templates/scenarios/chaos_runaway_ai.json +25 -0
- iints/templates/scenarios/example_scenario.json +35 -0
- iints/templates/scenarios/exercise_stress.json +30 -0
- iints/utils/__init__.py +3 -0
- iints/utils/plotting.py +50 -0
- iints/utils/run_io.py +152 -0
- iints/validation/__init__.py +133 -0
- iints/validation/schemas.py +94 -0
- iints/visualization/__init__.py +34 -0
- iints/visualization/cockpit.py +691 -0
- iints/visualization/uncertainty_cloud.py +612 -0
- iints_sdk_python35-0.0.18.dist-info/METADATA +225 -0
- iints_sdk_python35-0.0.18.dist-info/RECORD +118 -0
- iints_sdk_python35-0.0.18.dist-info/WHEEL +5 -0
- iints_sdk_python35-0.0.18.dist-info/entry_points.txt +10 -0
- iints_sdk_python35-0.0.18.dist-info/licenses/LICENSE +28 -0
- iints_sdk_python35-0.0.18.dist-info/top_level.txt +1 -0
iints/data/ingestor.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Dict, Any, Union, Optional
|
|
4
|
+
import yaml
|
|
5
|
+
|
|
6
|
+
class DataIngestor:
|
|
7
|
+
"""
|
|
8
|
+
Standardized Data Bridge for ingesting various diabetes datasets into a
|
|
9
|
+
universal IINTS-AF format.
|
|
10
|
+
"""
|
|
11
|
+
UNIVERSAL_SCHEMA = {
|
|
12
|
+
"timestamp": float,
|
|
13
|
+
"glucose": float,
|
|
14
|
+
"carbs": float, # Can be null, but pandas will infer float if mixed
|
|
15
|
+
"insulin": float, # Can be null, but pandas will infer float if mixed
|
|
16
|
+
"source": str,
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
def __init__(self, data_dir: Optional[str] = None):
|
|
20
|
+
self.data_dir = data_dir or "./data"
|
|
21
|
+
# Zorg dat de rest van je code zelf dit pad gebruikt om bestanden te vinden
|
|
22
|
+
|
|
23
|
+
def _load_ohio_t1dm_csv(self, file_path: Path) -> pd.DataFrame:
|
|
24
|
+
"""
|
|
25
|
+
Loads and transforms Ohio T1DM dataset CSV into universal schema.
|
|
26
|
+
Expected columns in Ohio T1DM: 'timestamp', 'glucose', 'carbs', 'insulin'
|
|
27
|
+
"""
|
|
28
|
+
df = pd.read_csv(file_path)
|
|
29
|
+
|
|
30
|
+
# Assuming 'timestamp' is already in minutes from start or can be converted
|
|
31
|
+
# For simplicity, assuming it's already a float representing minutes
|
|
32
|
+
# If it's a datetime, conversion would be needed:
|
|
33
|
+
# df['timestamp'] = (df['timestamp'] - df['timestamp'].min()).dt.total_seconds() / 60.0
|
|
34
|
+
|
|
35
|
+
# Rename columns to match universal schema if necessary
|
|
36
|
+
# Example: if original columns were different, map them here.
|
|
37
|
+
# For Ohio T1DM, let's assume they are already lowercase 'glucose', 'carbs', 'insulin'
|
|
38
|
+
# if 'BG' in df.columns: df = df.rename(columns={'BG': 'glucose'})
|
|
39
|
+
# if 'Carbs' in df.columns: df = df.rename(columns={'Carbs': 'carbs'})
|
|
40
|
+
# if 'Insulin' in df.columns: df = df.rename(columns={'Insulin': 'insulin'})
|
|
41
|
+
|
|
42
|
+
# Add 'source' column
|
|
43
|
+
df['source'] = 'public_ohio_t1dm'
|
|
44
|
+
|
|
45
|
+
# Ensure only universal schema columns are present and in order
|
|
46
|
+
required_cols = list(self.UNIVERSAL_SCHEMA.keys())
|
|
47
|
+
for col in required_cols:
|
|
48
|
+
if col not in df.columns:
|
|
49
|
+
df[col] = pd.NA # Add missing columns as NA
|
|
50
|
+
|
|
51
|
+
return df[required_cols]
|
|
52
|
+
|
|
53
|
+
def _validate_schema(self, df: pd.DataFrame, schema: Dict[str, type]) -> None:
|
|
54
|
+
"""
|
|
55
|
+
Validates DataFrame against the expected schema.
|
|
56
|
+
Raises ValueError if validation fails.
|
|
57
|
+
"""
|
|
58
|
+
for col, expected_type in schema.items():
|
|
59
|
+
if col not in df.columns:
|
|
60
|
+
raise ValueError(f"Missing required column: {col}")
|
|
61
|
+
# Basic type check (pandas dtypes are more complex, this is a simplified check)
|
|
62
|
+
# if not pd.api.types.is_dtype_equal(df[col].dtype, pd.Series(dtype=expected_type).dtype):
|
|
63
|
+
# print(f"Warning: Column '{col}' type mismatch. Expected {expected_type}, got {df[col].dtype}")
|
|
64
|
+
|
|
65
|
+
# Basic quality checks (from DATA_SCHEMA.md)
|
|
66
|
+
if 'glucose' in df.columns:
|
|
67
|
+
if not ((df['glucose'] >= 20) & (df['glucose'] <= 600)).all():
|
|
68
|
+
raise ValueError("Glucose values outside acceptable range (20-600 mg/dL)")
|
|
69
|
+
if 'insulin' in df.columns and not df['insulin'].isna().all():
|
|
70
|
+
if not ((df['insulin'] >= 0) & (df['insulin'] <= 50)).all():
|
|
71
|
+
raise ValueError("Insulin values outside acceptable range (0-50 units)")
|
|
72
|
+
|
|
73
|
+
# Check for missing timestamps (assuming 'timestamp' exists)
|
|
74
|
+
if df['timestamp'].isnull().any():
|
|
75
|
+
raise ValueError("Missing values in 'timestamp' column.")
|
|
76
|
+
|
|
77
|
+
def get_patient_model(self, file_path: Union[str, Path], data_type: str) -> pd.DataFrame:
|
|
78
|
+
"""
|
|
79
|
+
Loads patient data from a file and returns it as a standardized DataFrame.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
file_path (Union[str, Path]): Path to the data file. Can be extension-less for 'model' type.
|
|
83
|
+
data_type (str): Type of the data source (e.g., 'ohio_t1dm', 'iints_standard_csv', 'model').
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
pd.DataFrame: A DataFrame conforming to the universal IINTS-AF schema.
|
|
87
|
+
|
|
88
|
+
Raises:
|
|
89
|
+
ValueError: If the data_type is not supported or validation fails.
|
|
90
|
+
FileNotFoundError: If the data file cannot be found.
|
|
91
|
+
"""
|
|
92
|
+
file_path = Path(file_path)
|
|
93
|
+
|
|
94
|
+
# If data type is 'model' (JSON), ensure the .json extension is present.
|
|
95
|
+
if data_type == 'model' or data_type == 'iints_standard_json':
|
|
96
|
+
if not file_path.suffix:
|
|
97
|
+
file_path = file_path.with_suffix('.json')
|
|
98
|
+
|
|
99
|
+
if not file_path.is_file():
|
|
100
|
+
raise FileNotFoundError(f"Data file not found: {file_path}")
|
|
101
|
+
|
|
102
|
+
df = pd.DataFrame()
|
|
103
|
+
if data_type == 'ohio_t1dm':
|
|
104
|
+
df = self._load_ohio_t1dm_csv(file_path)
|
|
105
|
+
elif data_type == 'iints_standard_csv':
|
|
106
|
+
# Assume this is already in the universal schema format
|
|
107
|
+
df = pd.read_csv(file_path)
|
|
108
|
+
elif data_type == 'model' or data_type == 'iints_standard_json':
|
|
109
|
+
# Assumes a records-oriented JSON file.
|
|
110
|
+
df = pd.read_json(file_path, orient='records')
|
|
111
|
+
# Add source column if not present
|
|
112
|
+
if 'source' not in df.columns:
|
|
113
|
+
df['source'] = 'iints_standard_json'
|
|
114
|
+
|
|
115
|
+
# Ensure only universal schema columns are present and in order
|
|
116
|
+
required_cols = list(self.UNIVERSAL_SCHEMA.keys())
|
|
117
|
+
for col in required_cols:
|
|
118
|
+
if col not in df.columns:
|
|
119
|
+
df[col] = pd.NA # Add missing columns as NA
|
|
120
|
+
df = df[required_cols]
|
|
121
|
+
else:
|
|
122
|
+
raise ValueError(f"Unsupported data type: {data_type}")
|
|
123
|
+
|
|
124
|
+
# Validate the loaded data against the universal schema
|
|
125
|
+
self._validate_schema(df, self.UNIVERSAL_SCHEMA)
|
|
126
|
+
|
|
127
|
+
return df
|
|
128
|
+
|
|
129
|
+
if __name__ == "__main__":
|
|
130
|
+
# Example usage:
|
|
131
|
+
# This assumes you have a timeseries.csv in data_packs/public/ohio_t1dm/patient_XXX/
|
|
132
|
+
# For testing, we'll try to find one.
|
|
133
|
+
|
|
134
|
+
ohio_data_path = Path("data_packs/public/ohio_t1dm")
|
|
135
|
+
|
|
136
|
+
patient_dirs = [d for d in ohio_data_path.iterdir() if d.is_dir() and d.name.startswith("patient_")]
|
|
137
|
+
|
|
138
|
+
if patient_dirs:
|
|
139
|
+
sample_timeseries_file = None
|
|
140
|
+
for patient_dir in patient_dirs:
|
|
141
|
+
if (patient_dir / "timeseries.csv").is_file():
|
|
142
|
+
sample_timeseries_file = patient_dir / "timeseries.csv"
|
|
143
|
+
break
|
|
144
|
+
|
|
145
|
+
if sample_timeseries_file:
|
|
146
|
+
print(f"Loading sample Ohio T1DM data from: {sample_timeseries_file}")
|
|
147
|
+
ingestor = DataIngestor()
|
|
148
|
+
try:
|
|
149
|
+
df = ingestor.get_patient_model(sample_timeseries_file, 'ohio_t1dm')
|
|
150
|
+
print("Data loaded successfully and validated:")
|
|
151
|
+
print(df.head())
|
|
152
|
+
df.info()
|
|
153
|
+
except FileNotFoundError as e:
|
|
154
|
+
print(f"Error loading data: {e}")
|
|
155
|
+
except ValueError as e:
|
|
156
|
+
print(f"Data quality issue detected during validation for {sample_timeseries_file.name}: {e}")
|
|
157
|
+
except Exception as e:
|
|
158
|
+
print(f"An unexpected error occurred: {e}")
|
|
159
|
+
else:
|
|
160
|
+
print(f"No 'timeseries.csv' found in any patient directory within {ohio_data_path}. Cannot run example.")
|
|
161
|
+
else:
|
|
162
|
+
print(f"No patient directories found in {ohio_data_path}. Cannot run example.")
|
iints/data/nightscout.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Any, Dict, Iterable, List, Optional
|
|
5
|
+
import asyncio
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
|
|
9
|
+
from iints.data.importer import ImportResult, import_cgm_dataframe, scenario_from_dataframe
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class NightscoutConfig:
|
|
14
|
+
url: str
|
|
15
|
+
api_secret: Optional[str] = None
|
|
16
|
+
token: Optional[str] = None
|
|
17
|
+
start: Optional[str] = None # ISO string or date-like
|
|
18
|
+
end: Optional[str] = None
|
|
19
|
+
limit: Optional[int] = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _require_nightscout():
|
|
23
|
+
try:
|
|
24
|
+
import py_nightscout as nightscout # type: ignore
|
|
25
|
+
return nightscout
|
|
26
|
+
except Exception:
|
|
27
|
+
try:
|
|
28
|
+
import nightscout as nightscout # type: ignore
|
|
29
|
+
return nightscout
|
|
30
|
+
except Exception as exc: # pragma: no cover - optional dependency
|
|
31
|
+
raise ImportError(
|
|
32
|
+
"py-nightscout is required. Install with `pip install iints-sdk-python35[nightscout]`."
|
|
33
|
+
) from exc
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
async def _fetch_entries_async(config: NightscoutConfig) -> List[Any]:
|
|
37
|
+
nightscout = _require_nightscout()
|
|
38
|
+
if not hasattr(nightscout, "Api"):
|
|
39
|
+
raise RuntimeError("py-nightscout API wrapper missing 'Api' class.")
|
|
40
|
+
api = nightscout.Api( # type: ignore[attr-defined]
|
|
41
|
+
config.url,
|
|
42
|
+
api_secret=config.api_secret,
|
|
43
|
+
token=config.token,
|
|
44
|
+
)
|
|
45
|
+
if not hasattr(api, "get_sgvs"):
|
|
46
|
+
raise RuntimeError("py-nightscout client missing 'get_sgvs' method.")
|
|
47
|
+
entries = await api.get_sgvs()
|
|
48
|
+
return list(entries or [])
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _entry_get(entry: Any, key: str) -> Any:
|
|
52
|
+
if isinstance(entry, dict):
|
|
53
|
+
return entry.get(key)
|
|
54
|
+
return getattr(entry, key, None)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _entries_to_dataframe(entries: Iterable[Any]) -> pd.DataFrame:
|
|
58
|
+
rows: List[Dict[str, Any]] = []
|
|
59
|
+
for entry in entries:
|
|
60
|
+
glucose = _entry_get(entry, "sgv") or _entry_get(entry, "glucose")
|
|
61
|
+
if glucose is None:
|
|
62
|
+
continue
|
|
63
|
+
ts_raw = (
|
|
64
|
+
_entry_get(entry, "date")
|
|
65
|
+
or _entry_get(entry, "dateString")
|
|
66
|
+
or _entry_get(entry, "timestamp")
|
|
67
|
+
)
|
|
68
|
+
if ts_raw is None:
|
|
69
|
+
continue
|
|
70
|
+
if isinstance(ts_raw, (int, float)):
|
|
71
|
+
timestamp = pd.to_datetime(ts_raw, unit="ms", errors="coerce")
|
|
72
|
+
else:
|
|
73
|
+
timestamp = pd.to_datetime(ts_raw, errors="coerce")
|
|
74
|
+
if pd.isna(timestamp):
|
|
75
|
+
continue
|
|
76
|
+
rows.append(
|
|
77
|
+
{
|
|
78
|
+
"timestamp": timestamp,
|
|
79
|
+
"glucose": float(glucose),
|
|
80
|
+
"carbs": 0.0,
|
|
81
|
+
"insulin": 0.0,
|
|
82
|
+
}
|
|
83
|
+
)
|
|
84
|
+
return pd.DataFrame(rows)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def fetch_nightscout_dataframe(config: NightscoutConfig) -> pd.DataFrame:
|
|
88
|
+
entries = asyncio.run(_fetch_entries_async(config))
|
|
89
|
+
df = _entries_to_dataframe(entries)
|
|
90
|
+
if df.empty:
|
|
91
|
+
return df
|
|
92
|
+
|
|
93
|
+
if config.start:
|
|
94
|
+
start_ts = pd.to_datetime(config.start, errors="coerce")
|
|
95
|
+
if pd.notna(start_ts):
|
|
96
|
+
df = df[df["timestamp"] >= start_ts]
|
|
97
|
+
if config.end:
|
|
98
|
+
end_ts = pd.to_datetime(config.end, errors="coerce")
|
|
99
|
+
if pd.notna(end_ts):
|
|
100
|
+
df = df[df["timestamp"] <= end_ts]
|
|
101
|
+
if config.limit:
|
|
102
|
+
df = df.head(int(config.limit))
|
|
103
|
+
return df.reset_index(drop=True)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def import_nightscout(
|
|
107
|
+
config: NightscoutConfig,
|
|
108
|
+
scenario_name: str = "Nightscout Import",
|
|
109
|
+
scenario_version: str = "1.0",
|
|
110
|
+
carb_threshold: float = 0.1,
|
|
111
|
+
) -> ImportResult:
|
|
112
|
+
df = fetch_nightscout_dataframe(config)
|
|
113
|
+
if df.empty:
|
|
114
|
+
raise ValueError("No Nightscout CGM entries found for the given parameters.")
|
|
115
|
+
|
|
116
|
+
standard_df = import_cgm_dataframe(
|
|
117
|
+
df,
|
|
118
|
+
data_format="generic",
|
|
119
|
+
time_unit="minutes",
|
|
120
|
+
source="nightscout",
|
|
121
|
+
)
|
|
122
|
+
scenario = scenario_from_dataframe(
|
|
123
|
+
standard_df,
|
|
124
|
+
scenario_name=scenario_name,
|
|
125
|
+
scenario_version=scenario_version,
|
|
126
|
+
carb_threshold=carb_threshold,
|
|
127
|
+
)
|
|
128
|
+
return ImportResult(dataframe=standard_df, scenario=scenario)
|