iints-sdk-python35 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iints/__init__.py +183 -0
- iints/analysis/__init__.py +12 -0
- iints/analysis/algorithm_xray.py +387 -0
- iints/analysis/baseline.py +92 -0
- iints/analysis/clinical_benchmark.py +198 -0
- iints/analysis/clinical_metrics.py +551 -0
- iints/analysis/clinical_tir_analyzer.py +136 -0
- iints/analysis/diabetes_metrics.py +43 -0
- iints/analysis/edge_efficiency.py +33 -0
- iints/analysis/edge_performance_monitor.py +315 -0
- iints/analysis/explainability.py +94 -0
- iints/analysis/explainable_ai.py +232 -0
- iints/analysis/hardware_benchmark.py +221 -0
- iints/analysis/metrics.py +117 -0
- iints/analysis/population_report.py +188 -0
- iints/analysis/reporting.py +345 -0
- iints/analysis/safety_index.py +311 -0
- iints/analysis/sensor_filtering.py +54 -0
- iints/analysis/validator.py +273 -0
- iints/api/__init__.py +0 -0
- iints/api/base_algorithm.py +307 -0
- iints/api/registry.py +103 -0
- iints/api/template_algorithm.py +195 -0
- iints/assets/iints_logo.png +0 -0
- iints/cli/__init__.py +0 -0
- iints/cli/cli.py +2598 -0
- iints/core/__init__.py +1 -0
- iints/core/algorithms/__init__.py +0 -0
- iints/core/algorithms/battle_runner.py +138 -0
- iints/core/algorithms/correction_bolus.py +95 -0
- iints/core/algorithms/discovery.py +92 -0
- iints/core/algorithms/fixed_basal_bolus.py +58 -0
- iints/core/algorithms/hybrid_algorithm.py +92 -0
- iints/core/algorithms/lstm_algorithm.py +138 -0
- iints/core/algorithms/mock_algorithms.py +162 -0
- iints/core/algorithms/pid_controller.py +88 -0
- iints/core/algorithms/standard_pump_algo.py +64 -0
- iints/core/device.py +0 -0
- iints/core/device_manager.py +64 -0
- iints/core/devices/__init__.py +3 -0
- iints/core/devices/models.py +160 -0
- iints/core/patient/__init__.py +9 -0
- iints/core/patient/bergman_model.py +341 -0
- iints/core/patient/models.py +285 -0
- iints/core/patient/patient_factory.py +117 -0
- iints/core/patient/profile.py +41 -0
- iints/core/safety/__init__.py +12 -0
- iints/core/safety/config.py +37 -0
- iints/core/safety/input_validator.py +95 -0
- iints/core/safety/supervisor.py +39 -0
- iints/core/simulation/__init__.py +0 -0
- iints/core/simulation/scenario_parser.py +61 -0
- iints/core/simulator.py +874 -0
- iints/core/supervisor.py +367 -0
- iints/data/__init__.py +53 -0
- iints/data/adapter.py +142 -0
- iints/data/column_mapper.py +398 -0
- iints/data/datasets.json +132 -0
- iints/data/demo/__init__.py +1 -0
- iints/data/demo/demo_cgm.csv +289 -0
- iints/data/importer.py +275 -0
- iints/data/ingestor.py +162 -0
- iints/data/nightscout.py +128 -0
- iints/data/quality_checker.py +550 -0
- iints/data/registry.py +166 -0
- iints/data/tidepool.py +38 -0
- iints/data/universal_parser.py +813 -0
- iints/data/virtual_patients/clinic_safe_baseline.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_hyper_challenge.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_hypo_prone.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_midnight.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_pizza.yaml +9 -0
- iints/data/virtual_patients/clinic_safe_stress_meal.yaml +9 -0
- iints/data/virtual_patients/default_patient.yaml +11 -0
- iints/data/virtual_patients/patient_559_config.yaml +11 -0
- iints/emulation/__init__.py +80 -0
- iints/emulation/legacy_base.py +414 -0
- iints/emulation/medtronic_780g.py +337 -0
- iints/emulation/omnipod_5.py +367 -0
- iints/emulation/tandem_controliq.py +393 -0
- iints/highlevel.py +451 -0
- iints/learning/__init__.py +3 -0
- iints/learning/autonomous_optimizer.py +194 -0
- iints/learning/learning_system.py +122 -0
- iints/metrics.py +34 -0
- iints/population/__init__.py +11 -0
- iints/population/generator.py +131 -0
- iints/population/runner.py +327 -0
- iints/presets/__init__.py +28 -0
- iints/presets/presets.json +114 -0
- iints/research/__init__.py +30 -0
- iints/research/config.py +68 -0
- iints/research/dataset.py +319 -0
- iints/research/losses.py +73 -0
- iints/research/predictor.py +329 -0
- iints/scenarios/__init__.py +3 -0
- iints/scenarios/generator.py +92 -0
- iints/templates/__init__.py +0 -0
- iints/templates/default_algorithm.py +91 -0
- iints/templates/scenarios/__init__.py +0 -0
- iints/templates/scenarios/chaos_insulin_stacking.json +29 -0
- iints/templates/scenarios/chaos_runaway_ai.json +25 -0
- iints/templates/scenarios/example_scenario.json +35 -0
- iints/templates/scenarios/exercise_stress.json +30 -0
- iints/utils/__init__.py +3 -0
- iints/utils/plotting.py +50 -0
- iints/utils/run_io.py +152 -0
- iints/validation/__init__.py +133 -0
- iints/validation/schemas.py +94 -0
- iints/visualization/__init__.py +34 -0
- iints/visualization/cockpit.py +691 -0
- iints/visualization/uncertainty_cloud.py +612 -0
- iints_sdk_python35-0.0.18.dist-info/METADATA +225 -0
- iints_sdk_python35-0.0.18.dist-info/RECORD +118 -0
- iints_sdk_python35-0.0.18.dist-info/WHEEL +5 -0
- iints_sdk_python35-0.0.18.dist-info/entry_points.txt +10 -0
- iints_sdk_python35-0.0.18.dist-info/licenses/LICENSE +28 -0
- iints_sdk_python35-0.0.18.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
class DiabetesMetrics:
|
|
5
|
+
"""Professional diabetes metrics for algorithm evaluation."""
|
|
6
|
+
|
|
7
|
+
@staticmethod
|
|
8
|
+
def time_in_range(glucose_values, lower=70, upper=180):
|
|
9
|
+
"""Calculate Time In Range (TIR) percentage."""
|
|
10
|
+
in_range = (glucose_values >= lower) & (glucose_values <= upper)
|
|
11
|
+
return (in_range.sum() / len(glucose_values)) * 100
|
|
12
|
+
|
|
13
|
+
@staticmethod
|
|
14
|
+
def coefficient_of_variation(glucose_values):
|
|
15
|
+
"""Calculate CV - variability metric."""
|
|
16
|
+
return (np.std(glucose_values) / np.mean(glucose_values)) * 100
|
|
17
|
+
|
|
18
|
+
@staticmethod
|
|
19
|
+
def blood_glucose_risk_index(glucose_values, risk_type='high'):
|
|
20
|
+
"""Calculate LBGI or HBGI."""
|
|
21
|
+
def risk_function(bg):
|
|
22
|
+
if risk_type == 'low':
|
|
23
|
+
return 10 * (1.509 * (np.log(bg)**1.084 - 5.381))**2 if bg < 112.5 else 0
|
|
24
|
+
else: # high
|
|
25
|
+
return 10 * (1.509 * (np.log(bg)**1.084 - 5.381))**2 if bg > 112.5 else 0
|
|
26
|
+
|
|
27
|
+
risks = [risk_function(bg) for bg in glucose_values]
|
|
28
|
+
return np.mean(risks)
|
|
29
|
+
|
|
30
|
+
@staticmethod
|
|
31
|
+
def calculate_all_metrics(df, baseline=120):
|
|
32
|
+
"""Calculate comprehensive metrics suite."""
|
|
33
|
+
glucose = df['glucose_actual_mgdl']
|
|
34
|
+
|
|
35
|
+
return {
|
|
36
|
+
"peak_glucose_mgdl": glucose.max(),
|
|
37
|
+
"tir_percentage": DiabetesMetrics.time_in_range(glucose),
|
|
38
|
+
"cv_percentage": DiabetesMetrics.coefficient_of_variation(glucose),
|
|
39
|
+
"lbgi": DiabetesMetrics.blood_glucose_risk_index(glucose, 'low'),
|
|
40
|
+
"hbgi": DiabetesMetrics.blood_glucose_risk_index(glucose, 'high'),
|
|
41
|
+
"mean_glucose": glucose.mean(),
|
|
42
|
+
"glucose_std": glucose.std()
|
|
43
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass(frozen=True)
|
|
7
|
+
class EnergyEstimate:
|
|
8
|
+
energy_joules: float
|
|
9
|
+
energy_microjoules: float
|
|
10
|
+
decisions_per_day: int
|
|
11
|
+
energy_joules_per_day: float
|
|
12
|
+
energy_millijoules_per_day: float
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def estimate_energy_per_decision(power_watts: float, latency_ms: float, decisions_per_day: int = 288) -> EnergyEstimate:
|
|
16
|
+
"""
|
|
17
|
+
Estimate energy cost per decision on any device.
|
|
18
|
+
|
|
19
|
+
Energy (J) = Power (W) * Time (s)
|
|
20
|
+
|
|
21
|
+
decisions_per_day default: 288 (5-minute control loop)
|
|
22
|
+
"""
|
|
23
|
+
latency_s = max(latency_ms, 0.0) / 1000.0
|
|
24
|
+
energy_j = max(power_watts, 0.0) * latency_s
|
|
25
|
+
energy_uj = energy_j * 1_000_000.0
|
|
26
|
+
energy_day_j = energy_j * max(int(decisions_per_day), 1)
|
|
27
|
+
return EnergyEstimate(
|
|
28
|
+
energy_joules=energy_j,
|
|
29
|
+
energy_microjoules=energy_uj,
|
|
30
|
+
decisions_per_day=max(int(decisions_per_day), 1),
|
|
31
|
+
energy_joules_per_day=energy_day_j,
|
|
32
|
+
energy_millijoules_per_day=energy_day_j * 1_000.0,
|
|
33
|
+
)
|
|
@@ -0,0 +1,315 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Edge AI Performance Monitor - IINTS-AF
|
|
4
|
+
Jetson Nano performance validation for medical device standards
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import time
|
|
8
|
+
import psutil
|
|
9
|
+
import json
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
import numpy as np
|
|
13
|
+
|
|
14
|
+
class EdgeAIPerformanceMonitor:
|
|
15
|
+
"""Monitor Jetson Nano performance for medical device validation"""
|
|
16
|
+
|
|
17
|
+
def __init__(self):
|
|
18
|
+
self.performance_log = []
|
|
19
|
+
self.baseline_metrics = None
|
|
20
|
+
self.monitoring_active = False
|
|
21
|
+
|
|
22
|
+
def start_monitoring(self):
|
|
23
|
+
"""Start performance monitoring session"""
|
|
24
|
+
self.monitoring_active = True
|
|
25
|
+
self.baseline_metrics = self._capture_baseline()
|
|
26
|
+
|
|
27
|
+
print("Edge AI Performance Monitoring Started")
|
|
28
|
+
print(f"Baseline CPU: {self.baseline_metrics['cpu_percent']:.1f}%")
|
|
29
|
+
print(f"Baseline Memory: {self.baseline_metrics['memory_mb']:.1f} MB")
|
|
30
|
+
print(f"Available Memory: {self.baseline_metrics['available_memory_mb']:.1f} MB")
|
|
31
|
+
|
|
32
|
+
def measure_inference_latency(self, inference_function, input_data, iterations=100):
|
|
33
|
+
"""Measure AI inference latency with statistical analysis"""
|
|
34
|
+
|
|
35
|
+
if not self.monitoring_active:
|
|
36
|
+
self.start_monitoring()
|
|
37
|
+
|
|
38
|
+
latencies = []
|
|
39
|
+
cpu_usage = []
|
|
40
|
+
memory_usage = []
|
|
41
|
+
|
|
42
|
+
print(f"Measuring inference latency over {iterations} iterations...")
|
|
43
|
+
|
|
44
|
+
for i in range(iterations):
|
|
45
|
+
# Pre-inference metrics
|
|
46
|
+
cpu_before = psutil.cpu_percent()
|
|
47
|
+
memory_before = psutil.virtual_memory().used / (1024 * 1024) # MB
|
|
48
|
+
|
|
49
|
+
# Measure inference time
|
|
50
|
+
start_time = time.perf_counter()
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
result = inference_function(input_data)
|
|
54
|
+
except Exception as e:
|
|
55
|
+
print(f"Inference error at iteration {i}: {e}")
|
|
56
|
+
continue
|
|
57
|
+
|
|
58
|
+
end_time = time.perf_counter()
|
|
59
|
+
|
|
60
|
+
# Post-inference metrics
|
|
61
|
+
cpu_after = psutil.cpu_percent()
|
|
62
|
+
memory_after = psutil.virtual_memory().used / (1024 * 1024) # MB
|
|
63
|
+
|
|
64
|
+
# Calculate metrics
|
|
65
|
+
latency_ms = (end_time - start_time) * 1000
|
|
66
|
+
cpu_delta = cpu_after - cpu_before
|
|
67
|
+
memory_delta = memory_after - memory_before
|
|
68
|
+
|
|
69
|
+
latencies.append(latency_ms)
|
|
70
|
+
cpu_usage.append(cpu_delta)
|
|
71
|
+
memory_usage.append(memory_delta)
|
|
72
|
+
|
|
73
|
+
# Log detailed metrics every 10 iterations
|
|
74
|
+
if (i + 1) % 10 == 0:
|
|
75
|
+
avg_latency = np.mean(latencies[-10:])
|
|
76
|
+
print(f"Iteration {i+1:3d}: Avg latency {avg_latency:.2f}ms")
|
|
77
|
+
|
|
78
|
+
# Statistical analysis
|
|
79
|
+
performance_stats = self._analyze_performance_stats(latencies, cpu_usage, memory_usage)
|
|
80
|
+
|
|
81
|
+
# Log performance session
|
|
82
|
+
self.performance_log.append({
|
|
83
|
+
'timestamp': datetime.now().isoformat(),
|
|
84
|
+
'test_type': 'inference_latency',
|
|
85
|
+
'iterations': len(latencies),
|
|
86
|
+
'statistics': performance_stats,
|
|
87
|
+
'raw_latencies_ms': latencies,
|
|
88
|
+
'cpu_usage_delta': cpu_usage,
|
|
89
|
+
'memory_usage_delta_mb': memory_usage
|
|
90
|
+
})
|
|
91
|
+
|
|
92
|
+
return performance_stats
|
|
93
|
+
|
|
94
|
+
def _analyze_performance_stats(self, latencies, cpu_usage, memory_usage):
|
|
95
|
+
"""Analyze performance statistics for medical device validation"""
|
|
96
|
+
|
|
97
|
+
if not latencies:
|
|
98
|
+
return {'error': 'No valid measurements'}
|
|
99
|
+
|
|
100
|
+
latency_stats = {
|
|
101
|
+
'mean_ms': round(np.mean(latencies), 3),
|
|
102
|
+
'median_ms': round(np.median(latencies), 3),
|
|
103
|
+
'std_ms': round(np.std(latencies), 3),
|
|
104
|
+
'min_ms': round(np.min(latencies), 3),
|
|
105
|
+
'max_ms': round(np.max(latencies), 3),
|
|
106
|
+
'p95_ms': round(np.percentile(latencies, 95), 3),
|
|
107
|
+
'p99_ms': round(np.percentile(latencies, 99), 3)
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
# Embedded system performance classification
|
|
111
|
+
mean_latency = latency_stats['mean_ms']
|
|
112
|
+
if mean_latency < 10:
|
|
113
|
+
performance_class = "SUB_10MS_LATENCY - Real-time capable"
|
|
114
|
+
elif mean_latency < 50:
|
|
115
|
+
performance_class = "SUB_50MS_LATENCY - Near real-time suitable"
|
|
116
|
+
elif mean_latency < 100:
|
|
117
|
+
performance_class = "SUB_100MS_LATENCY - Batch processing suitable"
|
|
118
|
+
else:
|
|
119
|
+
performance_class = "OPTIMIZATION_REQUIRED - Exceeds embedded constraints"
|
|
120
|
+
|
|
121
|
+
# Consistency analysis (coefficient of variation)
|
|
122
|
+
cv_percent = (latency_stats['std_ms'] / latency_stats['mean_ms']) * 100
|
|
123
|
+
|
|
124
|
+
if cv_percent < 5:
|
|
125
|
+
consistency_rating = "HIGHLY CONSISTENT"
|
|
126
|
+
elif cv_percent < 15:
|
|
127
|
+
consistency_rating = "CONSISTENT"
|
|
128
|
+
elif cv_percent < 30:
|
|
129
|
+
consistency_rating = "MODERATELY VARIABLE"
|
|
130
|
+
else:
|
|
131
|
+
consistency_rating = "HIGHLY VARIABLE - Investigate"
|
|
132
|
+
|
|
133
|
+
return {
|
|
134
|
+
'latency_statistics': latency_stats,
|
|
135
|
+
'performance_classification': performance_class,
|
|
136
|
+
'consistency_rating': consistency_rating,
|
|
137
|
+
'coefficient_of_variation_percent': round(cv_percent, 2),
|
|
138
|
+
'cpu_impact': {
|
|
139
|
+
'mean_delta_percent': round(np.mean(cpu_usage), 2),
|
|
140
|
+
'max_delta_percent': round(np.max(cpu_usage), 2)
|
|
141
|
+
},
|
|
142
|
+
'memory_impact': {
|
|
143
|
+
'mean_delta_mb': round(np.mean(memory_usage), 2),
|
|
144
|
+
'max_delta_mb': round(np.max(memory_usage), 2)
|
|
145
|
+
},
|
|
146
|
+
'medical_device_assessment': self._assess_embedded_suitability(mean_latency, cv_percent)
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
def _assess_embedded_suitability(self, mean_latency, cv_percent):
|
|
150
|
+
"""Assess suitability for embedded system deployment"""
|
|
151
|
+
|
|
152
|
+
# Embedded system criteria
|
|
153
|
+
criteria = {
|
|
154
|
+
'real_time_response': mean_latency < 100, # < 100ms for real-time
|
|
155
|
+
'consistent_performance': cv_percent < 20, # < 20% variation
|
|
156
|
+
'low_latency': mean_latency < 50, # < 50ms preferred
|
|
157
|
+
'high_reliability': cv_percent < 10 # < 10% for high reliability
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
passed_criteria = sum(criteria.values())
|
|
161
|
+
total_criteria = len(criteria)
|
|
162
|
+
|
|
163
|
+
if passed_criteria == total_criteria:
|
|
164
|
+
suitability = "EMBEDDED_OPTIMAL - Compatible with real-time constraints"
|
|
165
|
+
elif passed_criteria >= 3:
|
|
166
|
+
suitability = "EMBEDDED_SUITABLE - Compatible with near real-time applications"
|
|
167
|
+
elif passed_criteria >= 2:
|
|
168
|
+
suitability = "RESEARCH_GRADE - Suitable for research and development"
|
|
169
|
+
else:
|
|
170
|
+
suitability = "OPTIMIZATION_REQUIRED - Requires performance tuning"
|
|
171
|
+
|
|
172
|
+
return {
|
|
173
|
+
'suitability_rating': suitability,
|
|
174
|
+
'criteria_passed': f"{passed_criteria}/{total_criteria}",
|
|
175
|
+
'detailed_criteria': criteria,
|
|
176
|
+
'recommendations': self._generate_optimization_recommendations(criteria)
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
def _generate_optimization_recommendations(self, criteria):
|
|
180
|
+
"""Generate optimization recommendations based on failed criteria"""
|
|
181
|
+
|
|
182
|
+
recommendations = []
|
|
183
|
+
|
|
184
|
+
if not criteria['real_time_response']:
|
|
185
|
+
recommendations.append("Optimize model architecture for faster inference")
|
|
186
|
+
|
|
187
|
+
if not criteria['consistent_performance']:
|
|
188
|
+
recommendations.append("Investigate system load variations and thermal throttling")
|
|
189
|
+
|
|
190
|
+
if not criteria['low_latency']:
|
|
191
|
+
recommendations.append("Consider model quantization or pruning techniques")
|
|
192
|
+
|
|
193
|
+
if not criteria['high_reliability']:
|
|
194
|
+
recommendations.append("Implement performance monitoring and adaptive scheduling")
|
|
195
|
+
|
|
196
|
+
if not recommendations:
|
|
197
|
+
recommendations.append("Performance meets embedded system constraints")
|
|
198
|
+
|
|
199
|
+
return recommendations
|
|
200
|
+
|
|
201
|
+
def _capture_baseline(self):
|
|
202
|
+
"""Capture baseline system metrics"""
|
|
203
|
+
|
|
204
|
+
memory = psutil.virtual_memory()
|
|
205
|
+
|
|
206
|
+
return {
|
|
207
|
+
'timestamp': datetime.now().isoformat(),
|
|
208
|
+
'cpu_percent': psutil.cpu_percent(interval=1),
|
|
209
|
+
'memory_mb': memory.used / (1024 * 1024),
|
|
210
|
+
'available_memory_mb': memory.available / (1024 * 1024),
|
|
211
|
+
'memory_percent': memory.percent,
|
|
212
|
+
'cpu_count': psutil.cpu_count(),
|
|
213
|
+
'cpu_freq_mhz': psutil.cpu_freq().current if psutil.cpu_freq() else 'Unknown'
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
def generate_performance_report(self):
|
|
217
|
+
"""Generate comprehensive performance report"""
|
|
218
|
+
|
|
219
|
+
if not self.performance_log:
|
|
220
|
+
return "No performance data available"
|
|
221
|
+
|
|
222
|
+
latest_session = self.performance_log[-1]
|
|
223
|
+
stats = latest_session['statistics']
|
|
224
|
+
|
|
225
|
+
report = f"""
|
|
226
|
+
EDGE AI PERFORMANCE VALIDATION REPORT
|
|
227
|
+
=====================================
|
|
228
|
+
|
|
229
|
+
Test Configuration:
|
|
230
|
+
- Device: Jetson Nano (Edge AI Platform)
|
|
231
|
+
- Test Date: {latest_session['timestamp'][:19]}
|
|
232
|
+
- Iterations: {latest_session['iterations']}
|
|
233
|
+
- Test Type: {latest_session['test_type']}
|
|
234
|
+
|
|
235
|
+
INFERENCE PERFORMANCE:
|
|
236
|
+
- Mean Latency: {stats['latency_statistics']['mean_ms']:.3f} ms
|
|
237
|
+
- Median Latency: {stats['latency_statistics']['median_ms']:.3f} ms
|
|
238
|
+
- 95th Percentile: {stats['latency_statistics']['p95_ms']:.3f} ms
|
|
239
|
+
- 99th Percentile: {stats['latency_statistics']['p99_ms']:.3f} ms
|
|
240
|
+
- Standard Deviation: {stats['latency_statistics']['std_ms']:.3f} ms
|
|
241
|
+
|
|
242
|
+
PERFORMANCE CLASSIFICATION:
|
|
243
|
+
- Overall Rating: {stats['performance_classification']}
|
|
244
|
+
- Consistency: {stats['consistency_rating']}
|
|
245
|
+
- Coefficient of Variation: {stats['coefficient_of_variation_percent']:.2f}%
|
|
246
|
+
|
|
247
|
+
EMBEDDED SYSTEM ASSESSMENT:
|
|
248
|
+
- Suitability: {stats['medical_device_assessment']['suitability_rating']}
|
|
249
|
+
- Criteria Passed: {stats['medical_device_assessment']['criteria_passed']}
|
|
250
|
+
|
|
251
|
+
SYSTEM IMPACT:
|
|
252
|
+
- CPU Usage Delta: {stats['cpu_impact']['mean_delta_percent']:.2f}% (avg), {stats['cpu_impact']['max_delta_percent']:.2f}% (max)
|
|
253
|
+
- Memory Usage Delta: {stats['memory_impact']['mean_delta_mb']:.2f} MB (avg), {stats['memory_impact']['max_delta_mb']:.2f} MB (max)
|
|
254
|
+
|
|
255
|
+
RECOMMENDATIONS:
|
|
256
|
+
"""
|
|
257
|
+
|
|
258
|
+
for rec in stats['medical_device_assessment']['recommendations']:
|
|
259
|
+
report += f"- {rec}\n"
|
|
260
|
+
|
|
261
|
+
return report
|
|
262
|
+
|
|
263
|
+
def export_performance_data(self, filepath):
|
|
264
|
+
"""Export performance data for analysis"""
|
|
265
|
+
|
|
266
|
+
export_data = {
|
|
267
|
+
'export_timestamp': datetime.now().isoformat(),
|
|
268
|
+
'baseline_metrics': self.baseline_metrics,
|
|
269
|
+
'performance_sessions': self.performance_log,
|
|
270
|
+
'summary_report': self.generate_performance_report()
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
with open(filepath, 'w') as f:
|
|
274
|
+
json.dump(export_data, f, indent=2, default=str)
|
|
275
|
+
|
|
276
|
+
return filepath
|
|
277
|
+
|
|
278
|
+
# Mock inference function for testing
|
|
279
|
+
def mock_ai_inference(input_data):
|
|
280
|
+
"""Mock AI inference function for testing"""
|
|
281
|
+
# Simulate neural network computation
|
|
282
|
+
time.sleep(0.008) # 8ms base latency
|
|
283
|
+
|
|
284
|
+
# Add some variability
|
|
285
|
+
time.sleep(np.random.exponential(0.002)) # Variable component
|
|
286
|
+
|
|
287
|
+
return {"prediction": np.random.random(), "confidence": np.random.random()}
|
|
288
|
+
|
|
289
|
+
def main():
|
|
290
|
+
"""Test edge AI performance monitoring"""
|
|
291
|
+
monitor = EdgeAIPerformanceMonitor()
|
|
292
|
+
|
|
293
|
+
print("IINTS-AF Edge AI Performance Validation")
|
|
294
|
+
print("=" * 45)
|
|
295
|
+
|
|
296
|
+
# Test inference performance
|
|
297
|
+
test_input = {"glucose": 150, "trend": [145, 148, 150]}
|
|
298
|
+
|
|
299
|
+
performance_stats = monitor.measure_inference_latency(
|
|
300
|
+
mock_ai_inference, test_input, iterations=50
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
# Generate and display report
|
|
304
|
+
report = monitor.generate_performance_report()
|
|
305
|
+
print(report)
|
|
306
|
+
|
|
307
|
+
# Export data
|
|
308
|
+
export_file = Path("results") / "edge_ai_performance.json"
|
|
309
|
+
export_file.parent.mkdir(exist_ok=True)
|
|
310
|
+
monitor.export_performance_data(export_file)
|
|
311
|
+
|
|
312
|
+
print(f"\nPerformance data exported to: {export_file}")
|
|
313
|
+
|
|
314
|
+
if __name__ == "__main__":
|
|
315
|
+
main()
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
from typing import List, Dict, Any, Callable, Optional, Union, cast
|
|
4
|
+
|
|
5
|
+
class ExplainabilityAnalyzer:
|
|
6
|
+
"""
|
|
7
|
+
Provides tools for analyzing and explaining the behavior of insulin algorithms.
|
|
8
|
+
This module implements 'AI as explainability tool' by performing sensitivity analysis.
|
|
9
|
+
"""
|
|
10
|
+
def __init__(self, simulation_results: pd.DataFrame):
|
|
11
|
+
self.results = simulation_results
|
|
12
|
+
|
|
13
|
+
def calculate_glucose_variability(self) -> Dict[str, float]:
|
|
14
|
+
"""
|
|
15
|
+
Calculates basic metrics for glucose variability.
|
|
16
|
+
"""
|
|
17
|
+
glucose_actual = self.results['glucose_actual_mgdl']
|
|
18
|
+
return {
|
|
19
|
+
"mean_glucose": glucose_actual.mean(),
|
|
20
|
+
"std_dev_glucose": glucose_actual.std(),
|
|
21
|
+
"min_glucose": glucose_actual.min(),
|
|
22
|
+
"max_glucose": glucose_actual.max(),
|
|
23
|
+
"time_in_range_70_180": (
|
|
24
|
+
((glucose_actual >= 70) & (glucose_actual <= 180)).sum() / len(glucose_actual)
|
|
25
|
+
) * 100
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
def analyze_insulin_response(self, algorithm_type: str = "") -> Dict[str, Any]:
|
|
29
|
+
"""
|
|
30
|
+
Analyzes insulin delivery patterns.
|
|
31
|
+
"""
|
|
32
|
+
total_insulin = self.results['delivered_insulin_units'].sum()
|
|
33
|
+
basal_insulin = self.results['basal_insulin_units'].sum()
|
|
34
|
+
bolus_insulin = self.results['bolus_insulin_units'].sum()
|
|
35
|
+
correction_bolus = self.results['correction_bolus_units'].sum() # May not exist for all algorithms
|
|
36
|
+
|
|
37
|
+
return {
|
|
38
|
+
"total_insulin_delivered": total_insulin,
|
|
39
|
+
"total_basal_insulin": basal_insulin,
|
|
40
|
+
"total_bolus_insulin": bolus_insulin,
|
|
41
|
+
"total_correction_bolus": correction_bolus,
|
|
42
|
+
"algorithm_type": algorithm_type # For context
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
def perform_sensitivity_analysis(self,
|
|
46
|
+
algorithm_instance: Any,
|
|
47
|
+
parameter_name: str,
|
|
48
|
+
original_value: float,
|
|
49
|
+
perturbations: List[float],
|
|
50
|
+
simulation_run_func: Callable[[Any, List[Any], int], pd.DataFrame],
|
|
51
|
+
fixed_events: Optional[List[Any]] = None,
|
|
52
|
+
duration_minutes: int = 1440 # 24 hours
|
|
53
|
+
) -> Dict[float, Dict[str, Union[float, str]]]:
|
|
54
|
+
"""
|
|
55
|
+
Performs a basic sensitivity analysis by perturbing one algorithm parameter
|
|
56
|
+
and observing the change in glucose metrics.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
algorithm_instance (Any): An instance of the algorithm to test.
|
|
60
|
+
parameter_name (str): The name of the parameter to perturb (e.g., 'carb_ratio').
|
|
61
|
+
original_value (float): The original value of the parameter.
|
|
62
|
+
perturbations (List[float]): A list of values to test for the parameter.
|
|
63
|
+
simulation_run_func (Callable): A function that takes an algorithm instance,
|
|
64
|
+
a list of fixed events, and duration_minutes,
|
|
65
|
+
then returns simulation results (pd.DataFrame).
|
|
66
|
+
fixed_events (List[Any]): Optional list of events to apply in each simulation run.
|
|
67
|
+
duration_minutes (int): Duration for each simulation run.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Dict[float, Dict[str, float]]: A dictionary where keys are the perturbed parameter values
|
|
71
|
+
and values are dictionaries of glucose metrics.
|
|
72
|
+
"""
|
|
73
|
+
original_settings = algorithm_instance.settings.copy()
|
|
74
|
+
analysis_results = {}
|
|
75
|
+
|
|
76
|
+
for p_value in perturbations:
|
|
77
|
+
# Temporarily modify the parameter
|
|
78
|
+
algorithm_instance.settings[parameter_name] = p_value
|
|
79
|
+
algorithm_instance.reset() # Ensure algorithm state is reset
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
# Call the provided simulation run function
|
|
83
|
+
perturbed_results_df = simulation_run_func(algorithm_instance, fixed_events or [], duration_minutes)
|
|
84
|
+
temp_analyzer = ExplainabilityAnalyzer(perturbed_results_df)
|
|
85
|
+
analysis_results[p_value] = temp_analyzer.calculate_glucose_variability()
|
|
86
|
+
except Exception as e:
|
|
87
|
+
print(f"Error during sensitivity analysis for {parameter_name}={p_value}: {e}")
|
|
88
|
+
analysis_results[p_value] = {"error": cast(Any, str(e))}
|
|
89
|
+
|
|
90
|
+
# Restore original settings
|
|
91
|
+
algorithm_instance.settings = original_settings
|
|
92
|
+
algorithm_instance.reset()
|
|
93
|
+
|
|
94
|
+
return analysis_results # type: ignore
|