iints-sdk-python35 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iints/__init__.py +183 -0
- iints/analysis/__init__.py +12 -0
- iints/analysis/algorithm_xray.py +387 -0
- iints/analysis/baseline.py +92 -0
- iints/analysis/clinical_benchmark.py +198 -0
- iints/analysis/clinical_metrics.py +551 -0
- iints/analysis/clinical_tir_analyzer.py +136 -0
- iints/analysis/diabetes_metrics.py +43 -0
- iints/analysis/edge_efficiency.py +33 -0
- iints/analysis/edge_performance_monitor.py +315 -0
- iints/analysis/explainability.py +94 -0
- iints/analysis/explainable_ai.py +232 -0
- iints/analysis/hardware_benchmark.py +221 -0
- iints/analysis/metrics.py +117 -0
- iints/analysis/population_report.py +188 -0
- iints/analysis/reporting.py +345 -0
- iints/analysis/safety_index.py +311 -0
- iints/analysis/sensor_filtering.py +54 -0
- iints/analysis/validator.py +273 -0
- iints/api/__init__.py +0 -0
- iints/api/base_algorithm.py +307 -0
- iints/api/registry.py +103 -0
- iints/api/template_algorithm.py +195 -0
- iints/assets/iints_logo.png +0 -0
- iints/cli/__init__.py +0 -0
- iints/cli/cli.py +2598 -0
- iints/core/__init__.py +1 -0
- iints/core/algorithms/__init__.py +0 -0
- iints/core/algorithms/battle_runner.py +138 -0
- iints/core/algorithms/correction_bolus.py +95 -0
- iints/core/algorithms/discovery.py +92 -0
- iints/core/algorithms/fixed_basal_bolus.py +58 -0
- iints/core/algorithms/hybrid_algorithm.py +92 -0
- iints/core/algorithms/lstm_algorithm.py +138 -0
- iints/core/algorithms/mock_algorithms.py +162 -0
- iints/core/algorithms/pid_controller.py +88 -0
- iints/core/algorithms/standard_pump_algo.py +64 -0
- iints/core/device.py +0 -0
- iints/core/device_manager.py +64 -0
- iints/core/devices/__init__.py +3 -0
- iints/core/devices/models.py +160 -0
- iints/core/patient/__init__.py +9 -0
- iints/core/patient/bergman_model.py +341 -0
- iints/core/patient/models.py +285 -0
- iints/core/patient/patient_factory.py +117 -0
- iints/core/patient/profile.py +41 -0
- iints/core/safety/__init__.py +12 -0
- iints/core/safety/config.py +37 -0
- iints/core/safety/input_validator.py +95 -0
- iints/core/safety/supervisor.py +39 -0
- iints/core/simulation/__init__.py +0 -0
- iints/core/simulation/scenario_parser.py +61 -0
- iints/core/simulator.py +874 -0
- iints/core/supervisor.py +367 -0
- iints/data/__init__.py +53 -0
- iints/data/adapter.py +142 -0
- iints/data/column_mapper.py +398 -0
- iints/data/datasets.json +132 -0
- iints/data/demo/__init__.py +1 -0
- iints/data/demo/demo_cgm.csv +289 -0
- iints/data/importer.py +275 -0
- iints/data/ingestor.py +162 -0
- iints/data/nightscout.py +128 -0
- iints/data/quality_checker.py +550 -0
- iints/data/registry.py +166 -0
- iints/data/tidepool.py +38 -0
- iints/data/universal_parser.py +813 -0
- iints/data/virtual_patients/clinic_safe_baseline.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_hyper_challenge.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_hypo_prone.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_midnight.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_pizza.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_stress_meal.yaml +9 -0
- iints/data/virtual_patients/default_patient.yaml +11 -0
- iints/data/virtual_patients/patient_559_config.yaml +11 -0
- iints/emulation/__init__.py +80 -0
- iints/emulation/legacy_base.py +414 -0
- iints/emulation/medtronic_780g.py +337 -0
- iints/emulation/omnipod_5.py +367 -0
- iints/emulation/tandem_controliq.py +393 -0
- iints/highlevel.py +451 -0
- iints/learning/__init__.py +3 -0
- iints/learning/autonomous_optimizer.py +194 -0
- iints/learning/learning_system.py +122 -0
- iints/metrics.py +34 -0
- iints/population/__init__.py +11 -0
- iints/population/generator.py +131 -0
- iints/population/runner.py +327 -0
- iints/presets/__init__.py +28 -0
- iints/presets/presets.json +114 -0
- iints/research/__init__.py +30 -0
- iints/research/config.py +68 -0
- iints/research/dataset.py +319 -0
- iints/research/losses.py +73 -0
- iints/research/predictor.py +329 -0
- iints/scenarios/__init__.py +3 -0
- iints/scenarios/generator.py +92 -0
- iints/templates/__init__.py +0 -0
- iints/templates/default_algorithm.py +91 -0
- iints/templates/scenarios/__init__.py +0 -0
- iints/templates/scenarios/chaos_insulin_stacking.json +29 -0
- iints/templates/scenarios/chaos_runaway_ai.json +25 -0
- iints/templates/scenarios/example_scenario.json +35 -0
- iints/templates/scenarios/exercise_stress.json +30 -0
- iints/utils/__init__.py +3 -0
- iints/utils/plotting.py +50 -0
- iints/utils/run_io.py +152 -0
- iints/validation/__init__.py +133 -0
- iints/validation/schemas.py +94 -0
- iints/visualization/__init__.py +34 -0
- iints/visualization/cockpit.py +691 -0
- iints/visualization/uncertainty_cloud.py +612 -0
- iints_sdk_python35-0.0.18.dist-info/METADATA +225 -0
- iints_sdk_python35-0.0.18.dist-info/RECORD +118 -0
- iints_sdk_python35-0.0.18.dist-info/WHEEL +5 -0
- iints_sdk_python35-0.0.18.dist-info/entry_points.txt +10 -0
- iints_sdk_python35-0.0.18.dist-info/licenses/LICENSE +28 -0
- iints_sdk_python35-0.0.18.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Explainable AI Audit Trail - IINTS-AF
|
|
4
|
+
Clinical decision transparency system for medical AI validation
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from datetime import datetime, timedelta
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
import numpy as np
|
|
11
|
+
|
|
12
|
+
class ClinicalAuditTrail:
|
|
13
|
+
"""Explainable AI system for clinical decision transparency"""
|
|
14
|
+
|
|
15
|
+
def __init__(self):
|
|
16
|
+
self.audit_log = []
|
|
17
|
+
self.decision_context = {}
|
|
18
|
+
|
|
19
|
+
def log_decision(self, timestamp, glucose_current, glucose_trend, insulin_decision,
|
|
20
|
+
algorithm_confidence, safety_override=False, context=None):
|
|
21
|
+
"""Log AI decision with clinical reasoning"""
|
|
22
|
+
|
|
23
|
+
# Calculate glucose velocity (mg/dL per minute)
|
|
24
|
+
glucose_velocity = self._calculate_velocity(glucose_trend)
|
|
25
|
+
|
|
26
|
+
# Generate clinical reasoning
|
|
27
|
+
reasoning = self._generate_clinical_reasoning(
|
|
28
|
+
glucose_current, glucose_velocity, insulin_decision,
|
|
29
|
+
algorithm_confidence, safety_override, context
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# Create audit entry
|
|
33
|
+
audit_entry = {
|
|
34
|
+
'timestamp': timestamp,
|
|
35
|
+
'glucose_mg_dL': glucose_current,
|
|
36
|
+
'glucose_velocity_per_min': glucose_velocity,
|
|
37
|
+
'insulin_decision_U': insulin_decision,
|
|
38
|
+
'algorithm_confidence': algorithm_confidence,
|
|
39
|
+
'safety_override': safety_override,
|
|
40
|
+
'clinical_reasoning': reasoning,
|
|
41
|
+
'risk_assessment': self._assess_immediate_risk(glucose_current, glucose_velocity),
|
|
42
|
+
'decision_category': self._categorize_decision(insulin_decision, glucose_current)
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
self.audit_log.append(audit_entry)
|
|
46
|
+
return audit_entry
|
|
47
|
+
|
|
48
|
+
def _calculate_velocity(self, glucose_trend):
|
|
49
|
+
"""Calculate glucose rate of change"""
|
|
50
|
+
if not glucose_trend or len(glucose_trend) < 2:
|
|
51
|
+
return 0.0
|
|
52
|
+
|
|
53
|
+
# Use last 3 readings for velocity calculation (15 minutes)
|
|
54
|
+
recent_readings = glucose_trend[-3:]
|
|
55
|
+
if len(recent_readings) < 2:
|
|
56
|
+
return 0.0
|
|
57
|
+
|
|
58
|
+
# Calculate slope (mg/dL per 5-minute interval)
|
|
59
|
+
time_intervals = len(recent_readings) - 1
|
|
60
|
+
glucose_change = recent_readings[-1] - recent_readings[0]
|
|
61
|
+
velocity_per_5min = glucose_change / time_intervals
|
|
62
|
+
|
|
63
|
+
# Convert to per-minute
|
|
64
|
+
return velocity_per_5min / 5.0
|
|
65
|
+
|
|
66
|
+
def _generate_clinical_reasoning(self, glucose, velocity, insulin, confidence, override, context):
|
|
67
|
+
"""Generate human-readable clinical reasoning"""
|
|
68
|
+
|
|
69
|
+
reasoning_parts = []
|
|
70
|
+
|
|
71
|
+
# Glucose status assessment
|
|
72
|
+
if glucose < 70:
|
|
73
|
+
reasoning_parts.append(f"Hypoglycemia detected ({glucose:.1f} mg/dL)")
|
|
74
|
+
elif glucose > 180:
|
|
75
|
+
reasoning_parts.append(f"Hyperglycemia detected ({glucose:.1f} mg/dL)")
|
|
76
|
+
else:
|
|
77
|
+
reasoning_parts.append(f"Glucose in target range ({glucose:.1f} mg/dL)")
|
|
78
|
+
|
|
79
|
+
# Trend analysis
|
|
80
|
+
if abs(velocity) > 2.0:
|
|
81
|
+
direction = "rising" if velocity > 0 else "falling"
|
|
82
|
+
reasoning_parts.append(f"Rapid glucose {direction} at {abs(velocity):.1f} mg/dL/min")
|
|
83
|
+
elif abs(velocity) > 1.0:
|
|
84
|
+
direction = "increasing" if velocity > 0 else "decreasing"
|
|
85
|
+
reasoning_parts.append(f"Moderate glucose {direction} trend")
|
|
86
|
+
else:
|
|
87
|
+
reasoning_parts.append("Stable glucose trend")
|
|
88
|
+
|
|
89
|
+
# Insulin decision reasoning
|
|
90
|
+
if insulin > 0:
|
|
91
|
+
if glucose > 150 and velocity > 1.0:
|
|
92
|
+
reasoning_parts.append(f"Corrective bolus {insulin:.2f}U for hyperglycemia with rising trend")
|
|
93
|
+
elif glucose > 180:
|
|
94
|
+
reasoning_parts.append(f"Correction bolus {insulin:.2f}U for hyperglycemia")
|
|
95
|
+
else:
|
|
96
|
+
reasoning_parts.append(f"Preventive insulin {insulin:.2f}U based on predictive model")
|
|
97
|
+
elif insulin < 0:
|
|
98
|
+
reasoning_parts.append(f"Basal reduction {abs(insulin):.2f}U to prevent hypoglycemia")
|
|
99
|
+
else:
|
|
100
|
+
reasoning_parts.append("No insulin adjustment - maintaining current therapy")
|
|
101
|
+
|
|
102
|
+
# Confidence assessment
|
|
103
|
+
if confidence < 0.7:
|
|
104
|
+
reasoning_parts.append(f"Low AI confidence ({confidence:.2f}) - conservative approach")
|
|
105
|
+
elif confidence > 0.9:
|
|
106
|
+
reasoning_parts.append(f"High AI confidence ({confidence:.2f}) - optimal conditions")
|
|
107
|
+
|
|
108
|
+
# Safety override explanation
|
|
109
|
+
if override:
|
|
110
|
+
reasoning_parts.append("SAFETY OVERRIDE: Decision modified by clinical safety supervisor")
|
|
111
|
+
|
|
112
|
+
# Context-specific reasoning
|
|
113
|
+
if context:
|
|
114
|
+
if context.get('meal_detected'):
|
|
115
|
+
reasoning_parts.append("Meal bolus component included")
|
|
116
|
+
if context.get('exercise_detected'):
|
|
117
|
+
reasoning_parts.append("Exercise adjustment applied")
|
|
118
|
+
if context.get('sensor_noise'):
|
|
119
|
+
reasoning_parts.append("Sensor reliability considered")
|
|
120
|
+
|
|
121
|
+
return ". ".join(reasoning_parts) + "."
|
|
122
|
+
|
|
123
|
+
def _assess_immediate_risk(self, glucose, velocity):
|
|
124
|
+
"""Assess immediate clinical risk"""
|
|
125
|
+
|
|
126
|
+
# Predict glucose in 30 minutes
|
|
127
|
+
predicted_glucose = glucose + (velocity * 30)
|
|
128
|
+
|
|
129
|
+
if glucose < 54 or predicted_glucose < 54:
|
|
130
|
+
return "CRITICAL - Severe hypoglycemia risk"
|
|
131
|
+
elif glucose < 70 or predicted_glucose < 70:
|
|
132
|
+
return "HIGH - Hypoglycemia risk"
|
|
133
|
+
elif glucose > 300 or predicted_glucose > 300:
|
|
134
|
+
return "HIGH - Severe hyperglycemia risk"
|
|
135
|
+
elif glucose > 250 or predicted_glucose > 250:
|
|
136
|
+
return "MODERATE - Hyperglycemia risk"
|
|
137
|
+
elif 70 <= predicted_glucose <= 180:
|
|
138
|
+
return "LOW - Target range maintained"
|
|
139
|
+
else:
|
|
140
|
+
return "MODERATE - Glucose excursion predicted"
|
|
141
|
+
|
|
142
|
+
def _categorize_decision(self, insulin, glucose):
|
|
143
|
+
"""Categorize the type of clinical decision"""
|
|
144
|
+
|
|
145
|
+
if insulin > 1.0:
|
|
146
|
+
return "CORRECTIVE_BOLUS"
|
|
147
|
+
elif insulin > 0.1:
|
|
148
|
+
return "MICRO_BOLUS"
|
|
149
|
+
elif insulin < -0.1:
|
|
150
|
+
return "BASAL_REDUCTION"
|
|
151
|
+
elif glucose < 70:
|
|
152
|
+
return "HYPOGLYCEMIA_MANAGEMENT"
|
|
153
|
+
elif glucose > 180:
|
|
154
|
+
return "HYPERGLYCEMIA_MANAGEMENT"
|
|
155
|
+
else:
|
|
156
|
+
return "MAINTENANCE_THERAPY"
|
|
157
|
+
|
|
158
|
+
def generate_clinical_summary(self, hours=24):
|
|
159
|
+
"""Generate clinical summary for specified time period"""
|
|
160
|
+
|
|
161
|
+
if not self.audit_log:
|
|
162
|
+
return "No clinical decisions recorded"
|
|
163
|
+
|
|
164
|
+
recent_entries = self.audit_log[-int(hours * 12):] # 12 entries per hour (5-min intervals)
|
|
165
|
+
|
|
166
|
+
# Count decision types
|
|
167
|
+
decision_counts = {}
|
|
168
|
+
risk_levels = {}
|
|
169
|
+
total_insulin = 0
|
|
170
|
+
|
|
171
|
+
for entry in recent_entries:
|
|
172
|
+
decision_type = entry['decision_category']
|
|
173
|
+
risk_level = entry['risk_assessment'].split(' - ')[0]
|
|
174
|
+
|
|
175
|
+
decision_counts[decision_type] = decision_counts.get(decision_type, 0) + 1
|
|
176
|
+
risk_levels[risk_level] = risk_levels.get(risk_level, 0) + 1
|
|
177
|
+
total_insulin += entry['insulin_decision_U']
|
|
178
|
+
|
|
179
|
+
# Generate summary
|
|
180
|
+
summary = f"Clinical Decision Summary ({hours}h period):\n"
|
|
181
|
+
summary += f"Total insulin delivered: {total_insulin:.2f}U\n"
|
|
182
|
+
summary += f"Decision types: {dict(decision_counts)}\n"
|
|
183
|
+
summary += f"Risk distribution: {dict(risk_levels)}\n"
|
|
184
|
+
|
|
185
|
+
return summary
|
|
186
|
+
|
|
187
|
+
def export_audit_trail(self, filepath):
|
|
188
|
+
"""Export audit trail for clinical review"""
|
|
189
|
+
|
|
190
|
+
audit_data = {
|
|
191
|
+
'export_timestamp': datetime.now().isoformat(),
|
|
192
|
+
'total_decisions': len(self.audit_log),
|
|
193
|
+
'audit_entries': self.audit_log,
|
|
194
|
+
'clinical_summary': self.generate_clinical_summary()
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
with open(filepath, 'w') as f:
|
|
198
|
+
json.dump(audit_data, f, indent=2, default=str)
|
|
199
|
+
|
|
200
|
+
return filepath
|
|
201
|
+
|
|
202
|
+
def main():
|
|
203
|
+
"""Test explainable AI audit trail"""
|
|
204
|
+
audit = ClinicalAuditTrail()
|
|
205
|
+
|
|
206
|
+
# Simulate some clinical decisions
|
|
207
|
+
test_scenarios = [
|
|
208
|
+
(120, [115, 118, 120], 0.2, 0.85, False, None), # Stable
|
|
209
|
+
(185, [170, 178, 185], 1.5, 0.92, False, {'meal_detected': True}), # Rising
|
|
210
|
+
(65, [75, 70, 65], -0.8, 0.78, True, None), # Falling with override
|
|
211
|
+
]
|
|
212
|
+
|
|
213
|
+
print("Explainable AI Clinical Audit Trail")
|
|
214
|
+
print("=" * 50)
|
|
215
|
+
|
|
216
|
+
for i, (glucose, trend, insulin, confidence, override, context) in enumerate(test_scenarios):
|
|
217
|
+
timestamp = datetime.now() + timedelta(minutes=i*5)
|
|
218
|
+
|
|
219
|
+
entry = audit.log_decision(
|
|
220
|
+
timestamp, glucose, trend, insulin, confidence, override, context
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
print(f"\nDecision {i+1}:")
|
|
224
|
+
print(f"Time: {timestamp.strftime('%H:%M')}")
|
|
225
|
+
print(f"Clinical Reasoning: {entry['clinical_reasoning']}")
|
|
226
|
+
print(f"Risk Assessment: {entry['risk_assessment']}")
|
|
227
|
+
print(f"Decision Category: {entry['decision_category']}")
|
|
228
|
+
|
|
229
|
+
print(f"\n{audit.generate_clinical_summary(1)}")
|
|
230
|
+
|
|
231
|
+
if __name__ == "__main__":
|
|
232
|
+
main()
|
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import subprocess
|
|
3
|
+
import threading
|
|
4
|
+
import psutil
|
|
5
|
+
import json
|
|
6
|
+
from typing import Dict, List, Optional
|
|
7
|
+
from dataclasses import dataclass, asdict
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class PerformanceMetrics:
|
|
11
|
+
timestamp: float
|
|
12
|
+
cpu_usage: float
|
|
13
|
+
memory_usage: float
|
|
14
|
+
gpu_usage: Optional[float] = None
|
|
15
|
+
gpu_memory: Optional[float] = None
|
|
16
|
+
temperature: Optional[float] = None
|
|
17
|
+
power_consumption: Optional[float] = None
|
|
18
|
+
|
|
19
|
+
class HardwareBenchmark:
|
|
20
|
+
"""Hardware performance monitoring for Jetson and other platforms."""
|
|
21
|
+
|
|
22
|
+
def __init__(self, sample_interval=1.0):
|
|
23
|
+
self.sample_interval = sample_interval
|
|
24
|
+
self.metrics_history = []
|
|
25
|
+
self.monitoring = False
|
|
26
|
+
self.monitor_thread = None
|
|
27
|
+
self.is_jetson = self._detect_jetson()
|
|
28
|
+
|
|
29
|
+
def _detect_jetson(self) -> bool:
|
|
30
|
+
"""Detect if running on Jetson platform."""
|
|
31
|
+
try:
|
|
32
|
+
with open('/proc/device-tree/model', 'r') as f:
|
|
33
|
+
model = f.read().strip()
|
|
34
|
+
return 'jetson' in model.lower()
|
|
35
|
+
except:
|
|
36
|
+
return False
|
|
37
|
+
|
|
38
|
+
def _get_tegrastats_metrics(self) -> Optional[Dict]:
|
|
39
|
+
"""Parse tegrastats output for Jetson-specific metrics."""
|
|
40
|
+
if not self.is_jetson:
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
result = subprocess.run(['tegrastats', '--interval', '100'],
|
|
45
|
+
capture_output=True, text=True, timeout=2)
|
|
46
|
+
if result.returncode == 0:
|
|
47
|
+
# Parse tegrastats output (simplified)
|
|
48
|
+
lines = result.stdout.strip().split('\n')
|
|
49
|
+
if lines:
|
|
50
|
+
# Example parsing - adapt based on actual tegrastats format
|
|
51
|
+
line = lines[-1] # Get last line
|
|
52
|
+
# This is a simplified parser - real implementation would be more robust
|
|
53
|
+
return {"raw_tegrastats": line}
|
|
54
|
+
except:
|
|
55
|
+
pass
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
def _collect_metrics(self) -> PerformanceMetrics:
|
|
59
|
+
"""Collect current performance metrics."""
|
|
60
|
+
timestamp = time.time()
|
|
61
|
+
|
|
62
|
+
# CPU and Memory (cross-platform)
|
|
63
|
+
cpu_usage = psutil.cpu_percent(interval=0.1)
|
|
64
|
+
memory = psutil.virtual_memory()
|
|
65
|
+
memory_usage = memory.percent
|
|
66
|
+
|
|
67
|
+
# GPU metrics (Jetson-specific)
|
|
68
|
+
gpu_usage = None
|
|
69
|
+
gpu_memory = None
|
|
70
|
+
temperature = None
|
|
71
|
+
power_consumption = None
|
|
72
|
+
|
|
73
|
+
if self.is_jetson:
|
|
74
|
+
tegra_metrics = self._get_tegrastats_metrics()
|
|
75
|
+
if tegra_metrics:
|
|
76
|
+
# Parse tegrastats data here
|
|
77
|
+
# This is platform-specific and would need proper parsing
|
|
78
|
+
pass
|
|
79
|
+
|
|
80
|
+
# Try to get temperature
|
|
81
|
+
try:
|
|
82
|
+
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as f:
|
|
83
|
+
temp_millicelsius = int(f.read().strip())
|
|
84
|
+
temperature = temp_millicelsius / 1000.0
|
|
85
|
+
except:
|
|
86
|
+
pass
|
|
87
|
+
|
|
88
|
+
return PerformanceMetrics(
|
|
89
|
+
timestamp=timestamp,
|
|
90
|
+
cpu_usage=cpu_usage,
|
|
91
|
+
memory_usage=memory_usage,
|
|
92
|
+
gpu_usage=gpu_usage,
|
|
93
|
+
gpu_memory=gpu_memory,
|
|
94
|
+
temperature=temperature,
|
|
95
|
+
power_consumption=power_consumption
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
def _monitor_loop(self):
|
|
99
|
+
"""Background monitoring loop."""
|
|
100
|
+
while self.monitoring:
|
|
101
|
+
metrics = self._collect_metrics()
|
|
102
|
+
self.metrics_history.append(metrics)
|
|
103
|
+
|
|
104
|
+
# Keep only last 1000 samples to prevent memory issues
|
|
105
|
+
if len(self.metrics_history) > 1000:
|
|
106
|
+
self.metrics_history.pop(0)
|
|
107
|
+
|
|
108
|
+
time.sleep(self.sample_interval)
|
|
109
|
+
|
|
110
|
+
def start_monitoring(self):
|
|
111
|
+
"""Start background performance monitoring."""
|
|
112
|
+
if not self.monitoring:
|
|
113
|
+
self.monitoring = True
|
|
114
|
+
self.monitor_thread = threading.Thread(target=self._monitor_loop, daemon=True)
|
|
115
|
+
self.monitor_thread.start()
|
|
116
|
+
|
|
117
|
+
def stop_monitoring(self):
|
|
118
|
+
"""Stop background performance monitoring."""
|
|
119
|
+
self.monitoring = False
|
|
120
|
+
if self.monitor_thread:
|
|
121
|
+
self.monitor_thread.join(timeout=2)
|
|
122
|
+
|
|
123
|
+
def benchmark_algorithm(self, algorithm, test_data, iterations=100):
|
|
124
|
+
"""Benchmark algorithm performance."""
|
|
125
|
+
self.start_monitoring()
|
|
126
|
+
|
|
127
|
+
start_time = time.time()
|
|
128
|
+
start_metrics = self._collect_metrics()
|
|
129
|
+
|
|
130
|
+
# Run algorithm iterations
|
|
131
|
+
for i in range(iterations):
|
|
132
|
+
# Simulate algorithm execution
|
|
133
|
+
if hasattr(algorithm, 'calculate_insulin'):
|
|
134
|
+
algorithm.calculate_insulin(
|
|
135
|
+
current_glucose=test_data.get('glucose', 120),
|
|
136
|
+
time_step=test_data.get('time_step', 5),
|
|
137
|
+
carb_intake=test_data.get('carbs', 0)
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
end_time = time.time()
|
|
141
|
+
end_metrics = self._collect_metrics()
|
|
142
|
+
|
|
143
|
+
self.stop_monitoring()
|
|
144
|
+
|
|
145
|
+
# Calculate performance statistics
|
|
146
|
+
total_time = end_time - start_time
|
|
147
|
+
avg_time_per_iteration = total_time / iterations
|
|
148
|
+
|
|
149
|
+
# Get metrics during benchmark
|
|
150
|
+
benchmark_metrics = [m for m in self.metrics_history
|
|
151
|
+
if start_time <= m.timestamp <= end_time]
|
|
152
|
+
|
|
153
|
+
if benchmark_metrics:
|
|
154
|
+
avg_cpu = sum(m.cpu_usage for m in benchmark_metrics) / len(benchmark_metrics)
|
|
155
|
+
avg_memory = sum(m.memory_usage for m in benchmark_metrics) / len(benchmark_metrics)
|
|
156
|
+
max_cpu = max(m.cpu_usage for m in benchmark_metrics)
|
|
157
|
+
max_memory = max(m.memory_usage for m in benchmark_metrics)
|
|
158
|
+
else:
|
|
159
|
+
avg_cpu = avg_memory = max_cpu = max_memory = 0
|
|
160
|
+
|
|
161
|
+
return {
|
|
162
|
+
"algorithm_name": algorithm.__class__.__name__,
|
|
163
|
+
"iterations": iterations,
|
|
164
|
+
"total_time_seconds": total_time,
|
|
165
|
+
"avg_time_per_iteration_ms": avg_time_per_iteration * 1000,
|
|
166
|
+
"iterations_per_second": iterations / total_time,
|
|
167
|
+
"cpu_usage": {
|
|
168
|
+
"average": avg_cpu,
|
|
169
|
+
"maximum": max_cpu
|
|
170
|
+
},
|
|
171
|
+
"memory_usage": {
|
|
172
|
+
"average": avg_memory,
|
|
173
|
+
"maximum": max_memory
|
|
174
|
+
},
|
|
175
|
+
"platform": "Jetson" if self.is_jetson else "Generic",
|
|
176
|
+
"sample_count": len(benchmark_metrics)
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
def get_current_metrics(self) -> Dict:
|
|
180
|
+
"""Get current system metrics."""
|
|
181
|
+
metrics = self._collect_metrics()
|
|
182
|
+
return asdict(metrics)
|
|
183
|
+
|
|
184
|
+
def get_metrics_summary(self) -> Dict:
|
|
185
|
+
"""Get summary of collected metrics."""
|
|
186
|
+
if not self.metrics_history:
|
|
187
|
+
return {"error": "No metrics collected"}
|
|
188
|
+
|
|
189
|
+
cpu_values = [m.cpu_usage for m in self.metrics_history]
|
|
190
|
+
memory_values = [m.memory_usage for m in self.metrics_history]
|
|
191
|
+
|
|
192
|
+
return {
|
|
193
|
+
"sample_count": len(self.metrics_history),
|
|
194
|
+
"duration_seconds": self.metrics_history[-1].timestamp - self.metrics_history[0].timestamp,
|
|
195
|
+
"cpu_usage": {
|
|
196
|
+
"average": sum(cpu_values) / len(cpu_values),
|
|
197
|
+
"minimum": min(cpu_values),
|
|
198
|
+
"maximum": max(cpu_values)
|
|
199
|
+
},
|
|
200
|
+
"memory_usage": {
|
|
201
|
+
"average": sum(memory_values) / len(memory_values),
|
|
202
|
+
"minimum": min(memory_values),
|
|
203
|
+
"maximum": max(memory_values)
|
|
204
|
+
},
|
|
205
|
+
"platform": "Jetson" if self.is_jetson else "Generic"
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
def export_metrics(self, filename: str):
|
|
209
|
+
"""Export metrics to JSON file."""
|
|
210
|
+
data = {
|
|
211
|
+
"platform": "Jetson" if self.is_jetson else "Generic",
|
|
212
|
+
"sample_interval": self.sample_interval,
|
|
213
|
+
"metrics": [asdict(m) for m in self.metrics_history]
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
with open(filename, 'w') as f:
|
|
217
|
+
json.dump(data, f, indent=2)
|
|
218
|
+
|
|
219
|
+
def clear_metrics(self):
|
|
220
|
+
"""Clear collected metrics."""
|
|
221
|
+
self.metrics_history.clear()
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
from typing import Dict, Any, Tuple
|
|
3
|
+
|
|
4
|
+
def calculate_tir(df: pd.DataFrame, lower_bound: float = 70.0, upper_bound: float = 180.0) -> float:
|
|
5
|
+
"""
|
|
6
|
+
Calculates Time In Range (TIR) for glucose values.
|
|
7
|
+
|
|
8
|
+
Args:
|
|
9
|
+
df (pd.DataFrame): DataFrame containing simulation results, must have a 'glucose_actual_mgdl' column.
|
|
10
|
+
lower_bound (float): Lower bound for Time In Range (mg/dL).
|
|
11
|
+
upper_bound (float): Upper bound for Time In Range (mg/dL).
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
float: Percentage of time in range (0-100).
|
|
15
|
+
"""
|
|
16
|
+
if 'glucose_actual_mgdl' not in df.columns:
|
|
17
|
+
return float('nan') # Or raise error, returning nan for robustness in benchmark
|
|
18
|
+
# raise ValueError("DataFrame must contain 'glucose_actual_mgdl' column for TIR calculation.")
|
|
19
|
+
|
|
20
|
+
in_range = (df['glucose_actual_mgdl'] >= lower_bound) & (df['glucose_actual_mgdl'] <= upper_bound)
|
|
21
|
+
tir_percentage = in_range.mean() * 100
|
|
22
|
+
return tir_percentage
|
|
23
|
+
|
|
24
|
+
def calculate_hypoglycemia(df: pd.DataFrame, threshold: float = 70.0) -> float:
|
|
25
|
+
"""
|
|
26
|
+
Calculates percentage of time in hypoglycemia.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
df (pd.DataFrame): DataFrame containing simulation results, must have a 'glucose_actual_mgdl' column.
|
|
30
|
+
threshold (float): Glucose threshold for hypoglycemia (mg/dL).
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
float: Percentage of time in hypoglycemia (0-100).
|
|
34
|
+
"""
|
|
35
|
+
if 'glucose_actual_mgdl' not in df.columns:
|
|
36
|
+
return float('nan')
|
|
37
|
+
# raise ValueError("DataFrame must contain 'glucose_actual_mgdl' column for hypoglycemia calculation.")
|
|
38
|
+
|
|
39
|
+
hypo = (df['glucose_actual_mgdl'] < threshold)
|
|
40
|
+
hypo_percentage = hypo.mean() * 100
|
|
41
|
+
return hypo_percentage
|
|
42
|
+
|
|
43
|
+
def calculate_hyperglycemia(df: pd.DataFrame, threshold: float = 180.0) -> float:
|
|
44
|
+
"""
|
|
45
|
+
Calculates percentage of time in hyperglycemia.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
df (pd.DataFrame): DataFrame containing simulation results, must have a 'glucose_actual_mgdl' column.
|
|
49
|
+
threshold (float): Glucose threshold for hyperglycemia (mg/dL).
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
float: Percentage of time in hyperglycemia (0-100).
|
|
53
|
+
"""
|
|
54
|
+
if 'glucose_actual_mgdl' not in df.columns:
|
|
55
|
+
return float('nan')
|
|
56
|
+
# raise ValueError("DataFrame must contain 'glucose_actual_mgdl' column for hyperglycemia calculation.")
|
|
57
|
+
|
|
58
|
+
hyper = (df['glucose_actual_mgdl'] > threshold)
|
|
59
|
+
hyper_percentage = hyper.mean() * 100
|
|
60
|
+
return hyper_percentage
|
|
61
|
+
|
|
62
|
+
def calculate_average_glucose(df: pd.DataFrame) -> float:
|
|
63
|
+
"""
|
|
64
|
+
Calculates the average glucose value.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
df (pd.DataFrame): DataFrame containing simulation results, must have a 'glucose_actual_mgdl' column.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
float: Average glucose value (mg/dL).
|
|
71
|
+
"""
|
|
72
|
+
if 'glucose_actual_mgdl' not in df.columns:
|
|
73
|
+
return float('nan')
|
|
74
|
+
# raise ValueError("DataFrame must contain 'glucose_actual_mgdl' column for average glucose calculation.")
|
|
75
|
+
|
|
76
|
+
return df['glucose_actual_mgdl'].mean()
|
|
77
|
+
|
|
78
|
+
def generate_benchmark_metrics(df: pd.DataFrame) -> Dict[str, float]:
|
|
79
|
+
"""
|
|
80
|
+
Generates a dictionary of key benchmark metrics from simulation results.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
df (pd.DataFrame): DataFrame containing simulation results.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
Dict[str, float]: A dictionary of calculated metrics.
|
|
87
|
+
"""
|
|
88
|
+
metrics = {
|
|
89
|
+
"TIR (%)": calculate_tir(df),
|
|
90
|
+
"Hypoglycemia (<70 mg/dL) (%)": calculate_hypoglycemia(df),
|
|
91
|
+
"Hyperglycemia (>180 mg/dL) (%)": calculate_hyperglycemia(df),
|
|
92
|
+
"Avg Glucose (mg/dL)": calculate_average_glucose(df),
|
|
93
|
+
}
|
|
94
|
+
return metrics
|
|
95
|
+
|
|
96
|
+
if __name__ == "__main__":
|
|
97
|
+
# Example usage:
|
|
98
|
+
print("Running example for metrics.py")
|
|
99
|
+
# Create a dummy DataFrame for testing
|
|
100
|
+
data = {
|
|
101
|
+
'time_minutes': range(0, 60, 5),
|
|
102
|
+
'glucose_actual_mgdl': [100, 110, 150, 200, 170, 80, 60, 50, 90, 120, 140, 160]
|
|
103
|
+
}
|
|
104
|
+
dummy_df = pd.DataFrame(data)
|
|
105
|
+
|
|
106
|
+
print("\nDummy DataFrame:")
|
|
107
|
+
print(dummy_df)
|
|
108
|
+
|
|
109
|
+
metrics_results = generate_benchmark_metrics(dummy_df)
|
|
110
|
+
print("\nCalculated Metrics:")
|
|
111
|
+
for key, value in metrics_results.items():
|
|
112
|
+
print(f"- {key}: {value:.2f}")
|
|
113
|
+
|
|
114
|
+
# Test with custom ranges
|
|
115
|
+
print("\nCalculated Metrics (Custom TIR 80-140):")
|
|
116
|
+
custom_tir = calculate_tir(dummy_df, lower_bound=80, upper_bound=140)
|
|
117
|
+
print(f"- TIR (80-140 mg/dL): {custom_tir:.2f}%")
|