iints-sdk-python35 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. iints/__init__.py +134 -0
  2. iints/analysis/__init__.py +12 -0
  3. iints/analysis/algorithm_xray.py +387 -0
  4. iints/analysis/baseline.py +92 -0
  5. iints/analysis/clinical_benchmark.py +198 -0
  6. iints/analysis/clinical_metrics.py +551 -0
  7. iints/analysis/clinical_tir_analyzer.py +136 -0
  8. iints/analysis/diabetes_metrics.py +43 -0
  9. iints/analysis/edge_performance_monitor.py +315 -0
  10. iints/analysis/explainability.py +94 -0
  11. iints/analysis/explainable_ai.py +232 -0
  12. iints/analysis/hardware_benchmark.py +221 -0
  13. iints/analysis/metrics.py +117 -0
  14. iints/analysis/reporting.py +261 -0
  15. iints/analysis/sensor_filtering.py +54 -0
  16. iints/analysis/validator.py +273 -0
  17. iints/api/__init__.py +0 -0
  18. iints/api/base_algorithm.py +300 -0
  19. iints/api/template_algorithm.py +195 -0
  20. iints/cli/__init__.py +0 -0
  21. iints/cli/cli.py +1286 -0
  22. iints/core/__init__.py +1 -0
  23. iints/core/algorithms/__init__.py +0 -0
  24. iints/core/algorithms/battle_runner.py +138 -0
  25. iints/core/algorithms/correction_bolus.py +86 -0
  26. iints/core/algorithms/discovery.py +92 -0
  27. iints/core/algorithms/fixed_basal_bolus.py +52 -0
  28. iints/core/algorithms/hybrid_algorithm.py +92 -0
  29. iints/core/algorithms/lstm_algorithm.py +138 -0
  30. iints/core/algorithms/mock_algorithms.py +69 -0
  31. iints/core/algorithms/pid_controller.py +88 -0
  32. iints/core/algorithms/standard_pump_algo.py +64 -0
  33. iints/core/device.py +0 -0
  34. iints/core/device_manager.py +64 -0
  35. iints/core/devices/__init__.py +3 -0
  36. iints/core/devices/models.py +155 -0
  37. iints/core/patient/__init__.py +3 -0
  38. iints/core/patient/models.py +246 -0
  39. iints/core/patient/patient_factory.py +117 -0
  40. iints/core/patient/profile.py +41 -0
  41. iints/core/safety/__init__.py +4 -0
  42. iints/core/safety/input_validator.py +87 -0
  43. iints/core/safety/supervisor.py +29 -0
  44. iints/core/simulation/__init__.py +0 -0
  45. iints/core/simulation/scenario_parser.py +61 -0
  46. iints/core/simulator.py +519 -0
  47. iints/core/supervisor.py +275 -0
  48. iints/data/__init__.py +42 -0
  49. iints/data/adapter.py +142 -0
  50. iints/data/column_mapper.py +398 -0
  51. iints/data/demo/__init__.py +1 -0
  52. iints/data/demo/demo_cgm.csv +289 -0
  53. iints/data/importer.py +275 -0
  54. iints/data/ingestor.py +162 -0
  55. iints/data/quality_checker.py +550 -0
  56. iints/data/universal_parser.py +813 -0
  57. iints/data/virtual_patients/clinic_safe_baseline.yaml +9 -0
  58. iints/data/virtual_patients/clinic_safe_hyper_challenge.yaml +9 -0
  59. iints/data/virtual_patients/clinic_safe_hypo_prone.yaml +9 -0
  60. iints/data/virtual_patients/clinic_safe_midnight.yaml +9 -0
  61. iints/data/virtual_patients/clinic_safe_pizza.yaml +9 -0
  62. iints/data/virtual_patients/clinic_safe_stress_meal.yaml +9 -0
  63. iints/data/virtual_patients/default_patient.yaml +11 -0
  64. iints/data/virtual_patients/patient_559_config.yaml +11 -0
  65. iints/emulation/__init__.py +80 -0
  66. iints/emulation/legacy_base.py +414 -0
  67. iints/emulation/medtronic_780g.py +337 -0
  68. iints/emulation/omnipod_5.py +367 -0
  69. iints/emulation/tandem_controliq.py +393 -0
  70. iints/highlevel.py +192 -0
  71. iints/learning/__init__.py +3 -0
  72. iints/learning/autonomous_optimizer.py +194 -0
  73. iints/learning/learning_system.py +122 -0
  74. iints/metrics.py +34 -0
  75. iints/presets/__init__.py +28 -0
  76. iints/presets/presets.json +114 -0
  77. iints/templates/__init__.py +0 -0
  78. iints/templates/default_algorithm.py +56 -0
  79. iints/templates/scenarios/__init__.py +0 -0
  80. iints/templates/scenarios/example_scenario.json +34 -0
  81. iints/utils/__init__.py +3 -0
  82. iints/utils/plotting.py +50 -0
  83. iints/validation/__init__.py +117 -0
  84. iints/validation/schemas.py +72 -0
  85. iints/visualization/__init__.py +34 -0
  86. iints/visualization/cockpit.py +691 -0
  87. iints/visualization/uncertainty_cloud.py +612 -0
  88. iints_sdk_python35-0.1.7.dist-info/METADATA +122 -0
  89. iints_sdk_python35-0.1.7.dist-info/RECORD +93 -0
  90. iints_sdk_python35-0.1.7.dist-info/WHEEL +5 -0
  91. iints_sdk_python35-0.1.7.dist-info/entry_points.txt +2 -0
  92. iints_sdk_python35-0.1.7.dist-info/licenses/LICENSE +28 -0
  93. iints_sdk_python35-0.1.7.dist-info/top_level.txt +1 -0
@@ -0,0 +1,43 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+
4
+ class DiabetesMetrics:
5
+ """Professional diabetes metrics for algorithm evaluation."""
6
+
7
+ @staticmethod
8
+ def time_in_range(glucose_values, lower=70, upper=180):
9
+ """Calculate Time In Range (TIR) percentage."""
10
+ in_range = (glucose_values >= lower) & (glucose_values <= upper)
11
+ return (in_range.sum() / len(glucose_values)) * 100
12
+
13
+ @staticmethod
14
+ def coefficient_of_variation(glucose_values):
15
+ """Calculate CV - variability metric."""
16
+ return (np.std(glucose_values) / np.mean(glucose_values)) * 100
17
+
18
+ @staticmethod
19
+ def blood_glucose_risk_index(glucose_values, risk_type='high'):
20
+ """Calculate LBGI or HBGI."""
21
+ def risk_function(bg):
22
+ if risk_type == 'low':
23
+ return 10 * (1.509 * (np.log(bg)**1.084 - 5.381))**2 if bg < 112.5 else 0
24
+ else: # high
25
+ return 10 * (1.509 * (np.log(bg)**1.084 - 5.381))**2 if bg > 112.5 else 0
26
+
27
+ risks = [risk_function(bg) for bg in glucose_values]
28
+ return np.mean(risks)
29
+
30
+ @staticmethod
31
+ def calculate_all_metrics(df, baseline=120):
32
+ """Calculate comprehensive metrics suite."""
33
+ glucose = df['glucose_actual_mgdl']
34
+
35
+ return {
36
+ "peak_glucose_mgdl": glucose.max(),
37
+ "tir_percentage": DiabetesMetrics.time_in_range(glucose),
38
+ "cv_percentage": DiabetesMetrics.coefficient_of_variation(glucose),
39
+ "lbgi": DiabetesMetrics.blood_glucose_risk_index(glucose, 'low'),
40
+ "hbgi": DiabetesMetrics.blood_glucose_risk_index(glucose, 'high'),
41
+ "mean_glucose": glucose.mean(),
42
+ "glucose_std": glucose.std()
43
+ }
@@ -0,0 +1,315 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Edge AI Performance Monitor - IINTS-AF
4
+ Jetson Nano performance validation for medical device standards
5
+ """
6
+
7
+ import time
8
+ import psutil
9
+ import json
10
+ from pathlib import Path
11
+ from datetime import datetime
12
+ import numpy as np
13
+
14
+ class EdgeAIPerformanceMonitor:
15
+ """Monitor Jetson Nano performance for medical device validation"""
16
+
17
+ def __init__(self):
18
+ self.performance_log = []
19
+ self.baseline_metrics = None
20
+ self.monitoring_active = False
21
+
22
+ def start_monitoring(self):
23
+ """Start performance monitoring session"""
24
+ self.monitoring_active = True
25
+ self.baseline_metrics = self._capture_baseline()
26
+
27
+ print("Edge AI Performance Monitoring Started")
28
+ print(f"Baseline CPU: {self.baseline_metrics['cpu_percent']:.1f}%")
29
+ print(f"Baseline Memory: {self.baseline_metrics['memory_mb']:.1f} MB")
30
+ print(f"Available Memory: {self.baseline_metrics['available_memory_mb']:.1f} MB")
31
+
32
+ def measure_inference_latency(self, inference_function, input_data, iterations=100):
33
+ """Measure AI inference latency with statistical analysis"""
34
+
35
+ if not self.monitoring_active:
36
+ self.start_monitoring()
37
+
38
+ latencies = []
39
+ cpu_usage = []
40
+ memory_usage = []
41
+
42
+ print(f"Measuring inference latency over {iterations} iterations...")
43
+
44
+ for i in range(iterations):
45
+ # Pre-inference metrics
46
+ cpu_before = psutil.cpu_percent()
47
+ memory_before = psutil.virtual_memory().used / (1024 * 1024) # MB
48
+
49
+ # Measure inference time
50
+ start_time = time.perf_counter()
51
+
52
+ try:
53
+ result = inference_function(input_data)
54
+ except Exception as e:
55
+ print(f"Inference error at iteration {i}: {e}")
56
+ continue
57
+
58
+ end_time = time.perf_counter()
59
+
60
+ # Post-inference metrics
61
+ cpu_after = psutil.cpu_percent()
62
+ memory_after = psutil.virtual_memory().used / (1024 * 1024) # MB
63
+
64
+ # Calculate metrics
65
+ latency_ms = (end_time - start_time) * 1000
66
+ cpu_delta = cpu_after - cpu_before
67
+ memory_delta = memory_after - memory_before
68
+
69
+ latencies.append(latency_ms)
70
+ cpu_usage.append(cpu_delta)
71
+ memory_usage.append(memory_delta)
72
+
73
+ # Log detailed metrics every 10 iterations
74
+ if (i + 1) % 10 == 0:
75
+ avg_latency = np.mean(latencies[-10:])
76
+ print(f"Iteration {i+1:3d}: Avg latency {avg_latency:.2f}ms")
77
+
78
+ # Statistical analysis
79
+ performance_stats = self._analyze_performance_stats(latencies, cpu_usage, memory_usage)
80
+
81
+ # Log performance session
82
+ self.performance_log.append({
83
+ 'timestamp': datetime.now().isoformat(),
84
+ 'test_type': 'inference_latency',
85
+ 'iterations': len(latencies),
86
+ 'statistics': performance_stats,
87
+ 'raw_latencies_ms': latencies,
88
+ 'cpu_usage_delta': cpu_usage,
89
+ 'memory_usage_delta_mb': memory_usage
90
+ })
91
+
92
+ return performance_stats
93
+
94
+ def _analyze_performance_stats(self, latencies, cpu_usage, memory_usage):
95
+ """Analyze performance statistics for medical device validation"""
96
+
97
+ if not latencies:
98
+ return {'error': 'No valid measurements'}
99
+
100
+ latency_stats = {
101
+ 'mean_ms': round(np.mean(latencies), 3),
102
+ 'median_ms': round(np.median(latencies), 3),
103
+ 'std_ms': round(np.std(latencies), 3),
104
+ 'min_ms': round(np.min(latencies), 3),
105
+ 'max_ms': round(np.max(latencies), 3),
106
+ 'p95_ms': round(np.percentile(latencies, 95), 3),
107
+ 'p99_ms': round(np.percentile(latencies, 99), 3)
108
+ }
109
+
110
+ # Embedded system performance classification
111
+ mean_latency = latency_stats['mean_ms']
112
+ if mean_latency < 10:
113
+ performance_class = "SUB_10MS_LATENCY - Real-time capable"
114
+ elif mean_latency < 50:
115
+ performance_class = "SUB_50MS_LATENCY - Near real-time suitable"
116
+ elif mean_latency < 100:
117
+ performance_class = "SUB_100MS_LATENCY - Batch processing suitable"
118
+ else:
119
+ performance_class = "OPTIMIZATION_REQUIRED - Exceeds embedded constraints"
120
+
121
+ # Consistency analysis (coefficient of variation)
122
+ cv_percent = (latency_stats['std_ms'] / latency_stats['mean_ms']) * 100
123
+
124
+ if cv_percent < 5:
125
+ consistency_rating = "HIGHLY CONSISTENT"
126
+ elif cv_percent < 15:
127
+ consistency_rating = "CONSISTENT"
128
+ elif cv_percent < 30:
129
+ consistency_rating = "MODERATELY VARIABLE"
130
+ else:
131
+ consistency_rating = "HIGHLY VARIABLE - Investigate"
132
+
133
+ return {
134
+ 'latency_statistics': latency_stats,
135
+ 'performance_classification': performance_class,
136
+ 'consistency_rating': consistency_rating,
137
+ 'coefficient_of_variation_percent': round(cv_percent, 2),
138
+ 'cpu_impact': {
139
+ 'mean_delta_percent': round(np.mean(cpu_usage), 2),
140
+ 'max_delta_percent': round(np.max(cpu_usage), 2)
141
+ },
142
+ 'memory_impact': {
143
+ 'mean_delta_mb': round(np.mean(memory_usage), 2),
144
+ 'max_delta_mb': round(np.max(memory_usage), 2)
145
+ },
146
+ 'medical_device_assessment': self._assess_embedded_suitability(mean_latency, cv_percent)
147
+ }
148
+
149
+ def _assess_embedded_suitability(self, mean_latency, cv_percent):
150
+ """Assess suitability for embedded system deployment"""
151
+
152
+ # Embedded system criteria
153
+ criteria = {
154
+ 'real_time_response': mean_latency < 100, # < 100ms for real-time
155
+ 'consistent_performance': cv_percent < 20, # < 20% variation
156
+ 'low_latency': mean_latency < 50, # < 50ms preferred
157
+ 'high_reliability': cv_percent < 10 # < 10% for high reliability
158
+ }
159
+
160
+ passed_criteria = sum(criteria.values())
161
+ total_criteria = len(criteria)
162
+
163
+ if passed_criteria == total_criteria:
164
+ suitability = "EMBEDDED_OPTIMAL - Compatible with real-time constraints"
165
+ elif passed_criteria >= 3:
166
+ suitability = "EMBEDDED_SUITABLE - Compatible with near real-time applications"
167
+ elif passed_criteria >= 2:
168
+ suitability = "RESEARCH_GRADE - Suitable for research and development"
169
+ else:
170
+ suitability = "OPTIMIZATION_REQUIRED - Requires performance tuning"
171
+
172
+ return {
173
+ 'suitability_rating': suitability,
174
+ 'criteria_passed': f"{passed_criteria}/{total_criteria}",
175
+ 'detailed_criteria': criteria,
176
+ 'recommendations': self._generate_optimization_recommendations(criteria)
177
+ }
178
+
179
+ def _generate_optimization_recommendations(self, criteria):
180
+ """Generate optimization recommendations based on failed criteria"""
181
+
182
+ recommendations = []
183
+
184
+ if not criteria['real_time_response']:
185
+ recommendations.append("Optimize model architecture for faster inference")
186
+
187
+ if not criteria['consistent_performance']:
188
+ recommendations.append("Investigate system load variations and thermal throttling")
189
+
190
+ if not criteria['low_latency']:
191
+ recommendations.append("Consider model quantization or pruning techniques")
192
+
193
+ if not criteria['high_reliability']:
194
+ recommendations.append("Implement performance monitoring and adaptive scheduling")
195
+
196
+ if not recommendations:
197
+ recommendations.append("Performance meets embedded system constraints")
198
+
199
+ return recommendations
200
+
201
+ def _capture_baseline(self):
202
+ """Capture baseline system metrics"""
203
+
204
+ memory = psutil.virtual_memory()
205
+
206
+ return {
207
+ 'timestamp': datetime.now().isoformat(),
208
+ 'cpu_percent': psutil.cpu_percent(interval=1),
209
+ 'memory_mb': memory.used / (1024 * 1024),
210
+ 'available_memory_mb': memory.available / (1024 * 1024),
211
+ 'memory_percent': memory.percent,
212
+ 'cpu_count': psutil.cpu_count(),
213
+ 'cpu_freq_mhz': psutil.cpu_freq().current if psutil.cpu_freq() else 'Unknown'
214
+ }
215
+
216
+ def generate_performance_report(self):
217
+ """Generate comprehensive performance report"""
218
+
219
+ if not self.performance_log:
220
+ return "No performance data available"
221
+
222
+ latest_session = self.performance_log[-1]
223
+ stats = latest_session['statistics']
224
+
225
+ report = f"""
226
+ EDGE AI PERFORMANCE VALIDATION REPORT
227
+ =====================================
228
+
229
+ Test Configuration:
230
+ - Device: Jetson Nano (Edge AI Platform)
231
+ - Test Date: {latest_session['timestamp'][:19]}
232
+ - Iterations: {latest_session['iterations']}
233
+ - Test Type: {latest_session['test_type']}
234
+
235
+ INFERENCE PERFORMANCE:
236
+ - Mean Latency: {stats['latency_statistics']['mean_ms']:.3f} ms
237
+ - Median Latency: {stats['latency_statistics']['median_ms']:.3f} ms
238
+ - 95th Percentile: {stats['latency_statistics']['p95_ms']:.3f} ms
239
+ - 99th Percentile: {stats['latency_statistics']['p99_ms']:.3f} ms
240
+ - Standard Deviation: {stats['latency_statistics']['std_ms']:.3f} ms
241
+
242
+ PERFORMANCE CLASSIFICATION:
243
+ - Overall Rating: {stats['performance_classification']}
244
+ - Consistency: {stats['consistency_rating']}
245
+ - Coefficient of Variation: {stats['coefficient_of_variation_percent']:.2f}%
246
+
247
+ EMBEDDED SYSTEM ASSESSMENT:
248
+ - Suitability: {stats['medical_device_assessment']['suitability_rating']}
249
+ - Criteria Passed: {stats['medical_device_assessment']['criteria_passed']}
250
+
251
+ SYSTEM IMPACT:
252
+ - CPU Usage Delta: {stats['cpu_impact']['mean_delta_percent']:.2f}% (avg), {stats['cpu_impact']['max_delta_percent']:.2f}% (max)
253
+ - Memory Usage Delta: {stats['memory_impact']['mean_delta_mb']:.2f} MB (avg), {stats['memory_impact']['max_delta_mb']:.2f} MB (max)
254
+
255
+ RECOMMENDATIONS:
256
+ """
257
+
258
+ for rec in stats['medical_device_assessment']['recommendations']:
259
+ report += f"- {rec}\n"
260
+
261
+ return report
262
+
263
+ def export_performance_data(self, filepath):
264
+ """Export performance data for analysis"""
265
+
266
+ export_data = {
267
+ 'export_timestamp': datetime.now().isoformat(),
268
+ 'baseline_metrics': self.baseline_metrics,
269
+ 'performance_sessions': self.performance_log,
270
+ 'summary_report': self.generate_performance_report()
271
+ }
272
+
273
+ with open(filepath, 'w') as f:
274
+ json.dump(export_data, f, indent=2, default=str)
275
+
276
+ return filepath
277
+
278
+ # Mock inference function for testing
279
+ def mock_ai_inference(input_data):
280
+ """Mock AI inference function for testing"""
281
+ # Simulate neural network computation
282
+ time.sleep(0.008) # 8ms base latency
283
+
284
+ # Add some variability
285
+ time.sleep(np.random.exponential(0.002)) # Variable component
286
+
287
+ return {"prediction": np.random.random(), "confidence": np.random.random()}
288
+
289
+ def main():
290
+ """Test edge AI performance monitoring"""
291
+ monitor = EdgeAIPerformanceMonitor()
292
+
293
+ print("IINTS-AF Edge AI Performance Validation")
294
+ print("=" * 45)
295
+
296
+ # Test inference performance
297
+ test_input = {"glucose": 150, "trend": [145, 148, 150]}
298
+
299
+ performance_stats = monitor.measure_inference_latency(
300
+ mock_ai_inference, test_input, iterations=50
301
+ )
302
+
303
+ # Generate and display report
304
+ report = monitor.generate_performance_report()
305
+ print(report)
306
+
307
+ # Export data
308
+ export_file = Path("results") / "edge_ai_performance.json"
309
+ export_file.parent.mkdir(exist_ok=True)
310
+ monitor.export_performance_data(export_file)
311
+
312
+ print(f"\nPerformance data exported to: {export_file}")
313
+
314
+ if __name__ == "__main__":
315
+ main()
@@ -0,0 +1,94 @@
1
+ import pandas as pd
2
+ import numpy as np
3
+ from typing import List, Dict, Any, Callable, Optional, Union, cast
4
+
5
+ class ExplainabilityAnalyzer:
6
+ """
7
+ Provides tools for analyzing and explaining the behavior of insulin algorithms.
8
+ This module implements 'AI as explainability tool' by performing sensitivity analysis.
9
+ """
10
+ def __init__(self, simulation_results: pd.DataFrame):
11
+ self.results = simulation_results
12
+
13
+ def calculate_glucose_variability(self) -> Dict[str, float]:
14
+ """
15
+ Calculates basic metrics for glucose variability.
16
+ """
17
+ glucose_actual = self.results['glucose_actual_mgdl']
18
+ return {
19
+ "mean_glucose": glucose_actual.mean(),
20
+ "std_dev_glucose": glucose_actual.std(),
21
+ "min_glucose": glucose_actual.min(),
22
+ "max_glucose": glucose_actual.max(),
23
+ "time_in_range_70_180": (
24
+ ((glucose_actual >= 70) & (glucose_actual <= 180)).sum() / len(glucose_actual)
25
+ ) * 100
26
+ }
27
+
28
+ def analyze_insulin_response(self, algorithm_type: str = "") -> Dict[str, Any]:
29
+ """
30
+ Analyzes insulin delivery patterns.
31
+ """
32
+ total_insulin = self.results['delivered_insulin_units'].sum()
33
+ basal_insulin = self.results['basal_insulin_units'].sum()
34
+ bolus_insulin = self.results['bolus_insulin_units'].sum()
35
+ correction_bolus = self.results['correction_bolus_units'].sum() # May not exist for all algorithms
36
+
37
+ return {
38
+ "total_insulin_delivered": total_insulin,
39
+ "total_basal_insulin": basal_insulin,
40
+ "total_bolus_insulin": bolus_insulin,
41
+ "total_correction_bolus": correction_bolus,
42
+ "algorithm_type": algorithm_type # For context
43
+ }
44
+
45
+ def perform_sensitivity_analysis(self,
46
+ algorithm_instance: Any,
47
+ parameter_name: str,
48
+ original_value: float,
49
+ perturbations: List[float],
50
+ simulation_run_func: Callable[[Any, List[Any], int], pd.DataFrame],
51
+ fixed_events: Optional[List[Any]] = None,
52
+ duration_minutes: int = 1440 # 24 hours
53
+ ) -> Dict[float, Dict[str, Union[float, str]]]:
54
+ """
55
+ Performs a basic sensitivity analysis by perturbing one algorithm parameter
56
+ and observing the change in glucose metrics.
57
+
58
+ Args:
59
+ algorithm_instance (Any): An instance of the algorithm to test.
60
+ parameter_name (str): The name of the parameter to perturb (e.g., 'carb_ratio').
61
+ original_value (float): The original value of the parameter.
62
+ perturbations (List[float]): A list of values to test for the parameter.
63
+ simulation_run_func (Callable): A function that takes an algorithm instance,
64
+ a list of fixed events, and duration_minutes,
65
+ then returns simulation results (pd.DataFrame).
66
+ fixed_events (List[Any]): Optional list of events to apply in each simulation run.
67
+ duration_minutes (int): Duration for each simulation run.
68
+
69
+ Returns:
70
+ Dict[float, Dict[str, float]]: A dictionary where keys are the perturbed parameter values
71
+ and values are dictionaries of glucose metrics.
72
+ """
73
+ original_settings = algorithm_instance.settings.copy()
74
+ analysis_results = {}
75
+
76
+ for p_value in perturbations:
77
+ # Temporarily modify the parameter
78
+ algorithm_instance.settings[parameter_name] = p_value
79
+ algorithm_instance.reset() # Ensure algorithm state is reset
80
+
81
+ try:
82
+ # Call the provided simulation run function
83
+ perturbed_results_df = simulation_run_func(algorithm_instance, fixed_events or [], duration_minutes)
84
+ temp_analyzer = ExplainabilityAnalyzer(perturbed_results_df)
85
+ analysis_results[p_value] = temp_analyzer.calculate_glucose_variability()
86
+ except Exception as e:
87
+ print(f"Error during sensitivity analysis for {parameter_name}={p_value}: {e}")
88
+ analysis_results[p_value] = {"error": cast(Any, str(e))}
89
+
90
+ # Restore original settings
91
+ algorithm_instance.settings = original_settings
92
+ algorithm_instance.reset()
93
+
94
+ return analysis_results # type: ignore
@@ -0,0 +1,232 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Explainable AI Audit Trail - IINTS-AF
4
+ Clinical decision transparency system for medical AI validation
5
+ """
6
+
7
+ import json
8
+ from datetime import datetime, timedelta
9
+ from pathlib import Path
10
+ import numpy as np
11
+
12
+ class ClinicalAuditTrail:
13
+ """Explainable AI system for clinical decision transparency"""
14
+
15
+ def __init__(self):
16
+ self.audit_log = []
17
+ self.decision_context = {}
18
+
19
+ def log_decision(self, timestamp, glucose_current, glucose_trend, insulin_decision,
20
+ algorithm_confidence, safety_override=False, context=None):
21
+ """Log AI decision with clinical reasoning"""
22
+
23
+ # Calculate glucose velocity (mg/dL per minute)
24
+ glucose_velocity = self._calculate_velocity(glucose_trend)
25
+
26
+ # Generate clinical reasoning
27
+ reasoning = self._generate_clinical_reasoning(
28
+ glucose_current, glucose_velocity, insulin_decision,
29
+ algorithm_confidence, safety_override, context
30
+ )
31
+
32
+ # Create audit entry
33
+ audit_entry = {
34
+ 'timestamp': timestamp,
35
+ 'glucose_mg_dL': glucose_current,
36
+ 'glucose_velocity_per_min': glucose_velocity,
37
+ 'insulin_decision_U': insulin_decision,
38
+ 'algorithm_confidence': algorithm_confidence,
39
+ 'safety_override': safety_override,
40
+ 'clinical_reasoning': reasoning,
41
+ 'risk_assessment': self._assess_immediate_risk(glucose_current, glucose_velocity),
42
+ 'decision_category': self._categorize_decision(insulin_decision, glucose_current)
43
+ }
44
+
45
+ self.audit_log.append(audit_entry)
46
+ return audit_entry
47
+
48
+ def _calculate_velocity(self, glucose_trend):
49
+ """Calculate glucose rate of change"""
50
+ if not glucose_trend or len(glucose_trend) < 2:
51
+ return 0.0
52
+
53
+ # Use last 3 readings for velocity calculation (15 minutes)
54
+ recent_readings = glucose_trend[-3:]
55
+ if len(recent_readings) < 2:
56
+ return 0.0
57
+
58
+ # Calculate slope (mg/dL per 5-minute interval)
59
+ time_intervals = len(recent_readings) - 1
60
+ glucose_change = recent_readings[-1] - recent_readings[0]
61
+ velocity_per_5min = glucose_change / time_intervals
62
+
63
+ # Convert to per-minute
64
+ return velocity_per_5min / 5.0
65
+
66
+ def _generate_clinical_reasoning(self, glucose, velocity, insulin, confidence, override, context):
67
+ """Generate human-readable clinical reasoning"""
68
+
69
+ reasoning_parts = []
70
+
71
+ # Glucose status assessment
72
+ if glucose < 70:
73
+ reasoning_parts.append(f"Hypoglycemia detected ({glucose:.1f} mg/dL)")
74
+ elif glucose > 180:
75
+ reasoning_parts.append(f"Hyperglycemia detected ({glucose:.1f} mg/dL)")
76
+ else:
77
+ reasoning_parts.append(f"Glucose in target range ({glucose:.1f} mg/dL)")
78
+
79
+ # Trend analysis
80
+ if abs(velocity) > 2.0:
81
+ direction = "rising" if velocity > 0 else "falling"
82
+ reasoning_parts.append(f"Rapid glucose {direction} at {abs(velocity):.1f} mg/dL/min")
83
+ elif abs(velocity) > 1.0:
84
+ direction = "increasing" if velocity > 0 else "decreasing"
85
+ reasoning_parts.append(f"Moderate glucose {direction} trend")
86
+ else:
87
+ reasoning_parts.append("Stable glucose trend")
88
+
89
+ # Insulin decision reasoning
90
+ if insulin > 0:
91
+ if glucose > 150 and velocity > 1.0:
92
+ reasoning_parts.append(f"Corrective bolus {insulin:.2f}U for hyperglycemia with rising trend")
93
+ elif glucose > 180:
94
+ reasoning_parts.append(f"Correction bolus {insulin:.2f}U for hyperglycemia")
95
+ else:
96
+ reasoning_parts.append(f"Preventive insulin {insulin:.2f}U based on predictive model")
97
+ elif insulin < 0:
98
+ reasoning_parts.append(f"Basal reduction {abs(insulin):.2f}U to prevent hypoglycemia")
99
+ else:
100
+ reasoning_parts.append("No insulin adjustment - maintaining current therapy")
101
+
102
+ # Confidence assessment
103
+ if confidence < 0.7:
104
+ reasoning_parts.append(f"Low AI confidence ({confidence:.2f}) - conservative approach")
105
+ elif confidence > 0.9:
106
+ reasoning_parts.append(f"High AI confidence ({confidence:.2f}) - optimal conditions")
107
+
108
+ # Safety override explanation
109
+ if override:
110
+ reasoning_parts.append("SAFETY OVERRIDE: Decision modified by clinical safety supervisor")
111
+
112
+ # Context-specific reasoning
113
+ if context:
114
+ if context.get('meal_detected'):
115
+ reasoning_parts.append("Meal bolus component included")
116
+ if context.get('exercise_detected'):
117
+ reasoning_parts.append("Exercise adjustment applied")
118
+ if context.get('sensor_noise'):
119
+ reasoning_parts.append("Sensor reliability considered")
120
+
121
+ return ". ".join(reasoning_parts) + "."
122
+
123
+ def _assess_immediate_risk(self, glucose, velocity):
124
+ """Assess immediate clinical risk"""
125
+
126
+ # Predict glucose in 30 minutes
127
+ predicted_glucose = glucose + (velocity * 30)
128
+
129
+ if glucose < 54 or predicted_glucose < 54:
130
+ return "CRITICAL - Severe hypoglycemia risk"
131
+ elif glucose < 70 or predicted_glucose < 70:
132
+ return "HIGH - Hypoglycemia risk"
133
+ elif glucose > 300 or predicted_glucose > 300:
134
+ return "HIGH - Severe hyperglycemia risk"
135
+ elif glucose > 250 or predicted_glucose > 250:
136
+ return "MODERATE - Hyperglycemia risk"
137
+ elif 70 <= predicted_glucose <= 180:
138
+ return "LOW - Target range maintained"
139
+ else:
140
+ return "MODERATE - Glucose excursion predicted"
141
+
142
+ def _categorize_decision(self, insulin, glucose):
143
+ """Categorize the type of clinical decision"""
144
+
145
+ if insulin > 1.0:
146
+ return "CORRECTIVE_BOLUS"
147
+ elif insulin > 0.1:
148
+ return "MICRO_BOLUS"
149
+ elif insulin < -0.1:
150
+ return "BASAL_REDUCTION"
151
+ elif glucose < 70:
152
+ return "HYPOGLYCEMIA_MANAGEMENT"
153
+ elif glucose > 180:
154
+ return "HYPERGLYCEMIA_MANAGEMENT"
155
+ else:
156
+ return "MAINTENANCE_THERAPY"
157
+
158
+ def generate_clinical_summary(self, hours=24):
159
+ """Generate clinical summary for specified time period"""
160
+
161
+ if not self.audit_log:
162
+ return "No clinical decisions recorded"
163
+
164
+ recent_entries = self.audit_log[-int(hours * 12):] # 12 entries per hour (5-min intervals)
165
+
166
+ # Count decision types
167
+ decision_counts = {}
168
+ risk_levels = {}
169
+ total_insulin = 0
170
+
171
+ for entry in recent_entries:
172
+ decision_type = entry['decision_category']
173
+ risk_level = entry['risk_assessment'].split(' - ')[0]
174
+
175
+ decision_counts[decision_type] = decision_counts.get(decision_type, 0) + 1
176
+ risk_levels[risk_level] = risk_levels.get(risk_level, 0) + 1
177
+ total_insulin += entry['insulin_decision_U']
178
+
179
+ # Generate summary
180
+ summary = f"Clinical Decision Summary ({hours}h period):\n"
181
+ summary += f"Total insulin delivered: {total_insulin:.2f}U\n"
182
+ summary += f"Decision types: {dict(decision_counts)}\n"
183
+ summary += f"Risk distribution: {dict(risk_levels)}\n"
184
+
185
+ return summary
186
+
187
+ def export_audit_trail(self, filepath):
188
+ """Export audit trail for clinical review"""
189
+
190
+ audit_data = {
191
+ 'export_timestamp': datetime.now().isoformat(),
192
+ 'total_decisions': len(self.audit_log),
193
+ 'audit_entries': self.audit_log,
194
+ 'clinical_summary': self.generate_clinical_summary()
195
+ }
196
+
197
+ with open(filepath, 'w') as f:
198
+ json.dump(audit_data, f, indent=2, default=str)
199
+
200
+ return filepath
201
+
202
+ def main():
203
+ """Test explainable AI audit trail"""
204
+ audit = ClinicalAuditTrail()
205
+
206
+ # Simulate some clinical decisions
207
+ test_scenarios = [
208
+ (120, [115, 118, 120], 0.2, 0.85, False, None), # Stable
209
+ (185, [170, 178, 185], 1.5, 0.92, False, {'meal_detected': True}), # Rising
210
+ (65, [75, 70, 65], -0.8, 0.78, True, None), # Falling with override
211
+ ]
212
+
213
+ print("Explainable AI Clinical Audit Trail")
214
+ print("=" * 50)
215
+
216
+ for i, (glucose, trend, insulin, confidence, override, context) in enumerate(test_scenarios):
217
+ timestamp = datetime.now() + timedelta(minutes=i*5)
218
+
219
+ entry = audit.log_decision(
220
+ timestamp, glucose, trend, insulin, confidence, override, context
221
+ )
222
+
223
+ print(f"\nDecision {i+1}:")
224
+ print(f"Time: {timestamp.strftime('%H:%M')}")
225
+ print(f"Clinical Reasoning: {entry['clinical_reasoning']}")
226
+ print(f"Risk Assessment: {entry['risk_assessment']}")
227
+ print(f"Decision Category: {entry['decision_category']}")
228
+
229
+ print(f"\n{audit.generate_clinical_summary(1)}")
230
+
231
+ if __name__ == "__main__":
232
+ main()