iints-sdk-python35 0.0.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. iints/__init__.py +183 -0
  2. iints/analysis/__init__.py +12 -0
  3. iints/analysis/algorithm_xray.py +387 -0
  4. iints/analysis/baseline.py +92 -0
  5. iints/analysis/clinical_benchmark.py +198 -0
  6. iints/analysis/clinical_metrics.py +551 -0
  7. iints/analysis/clinical_tir_analyzer.py +136 -0
  8. iints/analysis/diabetes_metrics.py +43 -0
  9. iints/analysis/edge_efficiency.py +33 -0
  10. iints/analysis/edge_performance_monitor.py +315 -0
  11. iints/analysis/explainability.py +94 -0
  12. iints/analysis/explainable_ai.py +232 -0
  13. iints/analysis/hardware_benchmark.py +221 -0
  14. iints/analysis/metrics.py +117 -0
  15. iints/analysis/population_report.py +188 -0
  16. iints/analysis/reporting.py +345 -0
  17. iints/analysis/safety_index.py +311 -0
  18. iints/analysis/sensor_filtering.py +54 -0
  19. iints/analysis/validator.py +273 -0
  20. iints/api/__init__.py +0 -0
  21. iints/api/base_algorithm.py +307 -0
  22. iints/api/registry.py +103 -0
  23. iints/api/template_algorithm.py +195 -0
  24. iints/assets/iints_logo.png +0 -0
  25. iints/cli/__init__.py +0 -0
  26. iints/cli/cli.py +2598 -0
  27. iints/core/__init__.py +1 -0
  28. iints/core/algorithms/__init__.py +0 -0
  29. iints/core/algorithms/battle_runner.py +138 -0
  30. iints/core/algorithms/correction_bolus.py +95 -0
  31. iints/core/algorithms/discovery.py +92 -0
  32. iints/core/algorithms/fixed_basal_bolus.py +58 -0
  33. iints/core/algorithms/hybrid_algorithm.py +92 -0
  34. iints/core/algorithms/lstm_algorithm.py +138 -0
  35. iints/core/algorithms/mock_algorithms.py +162 -0
  36. iints/core/algorithms/pid_controller.py +88 -0
  37. iints/core/algorithms/standard_pump_algo.py +64 -0
  38. iints/core/device.py +0 -0
  39. iints/core/device_manager.py +64 -0
  40. iints/core/devices/__init__.py +3 -0
  41. iints/core/devices/models.py +160 -0
  42. iints/core/patient/__init__.py +9 -0
  43. iints/core/patient/bergman_model.py +341 -0
  44. iints/core/patient/models.py +285 -0
  45. iints/core/patient/patient_factory.py +117 -0
  46. iints/core/patient/profile.py +41 -0
  47. iints/core/safety/__init__.py +12 -0
  48. iints/core/safety/config.py +37 -0
  49. iints/core/safety/input_validator.py +95 -0
  50. iints/core/safety/supervisor.py +39 -0
  51. iints/core/simulation/__init__.py +0 -0
  52. iints/core/simulation/scenario_parser.py +61 -0
  53. iints/core/simulator.py +874 -0
  54. iints/core/supervisor.py +367 -0
  55. iints/data/__init__.py +53 -0
  56. iints/data/adapter.py +142 -0
  57. iints/data/column_mapper.py +398 -0
  58. iints/data/datasets.json +132 -0
  59. iints/data/demo/__init__.py +1 -0
  60. iints/data/demo/demo_cgm.csv +289 -0
  61. iints/data/importer.py +275 -0
  62. iints/data/ingestor.py +162 -0
  63. iints/data/nightscout.py +128 -0
  64. iints/data/quality_checker.py +550 -0
  65. iints/data/registry.py +166 -0
  66. iints/data/tidepool.py +38 -0
  67. iints/data/universal_parser.py +813 -0
  68. iints/data/virtual_patients/clinic_safe_baseline.yaml +9 -0
  69. iints/data/virtual_patients/clinic_safe_hyper_challenge.yaml +9 -0
  70. iints/data/virtual_patients/clinic_safe_hypo_prone.yaml +9 -0
  71. iints/data/virtual_patients/clinic_safe_midnight.yaml +9 -0
  72. iints/data/virtual_patients/clinic_safe_pizza.yaml +9 -0
  73. iints/data/virtual_patients/clinic_safe_stress_meal.yaml +9 -0
  74. iints/data/virtual_patients/default_patient.yaml +11 -0
  75. iints/data/virtual_patients/patient_559_config.yaml +11 -0
  76. iints/emulation/__init__.py +80 -0
  77. iints/emulation/legacy_base.py +414 -0
  78. iints/emulation/medtronic_780g.py +337 -0
  79. iints/emulation/omnipod_5.py +367 -0
  80. iints/emulation/tandem_controliq.py +393 -0
  81. iints/highlevel.py +451 -0
  82. iints/learning/__init__.py +3 -0
  83. iints/learning/autonomous_optimizer.py +194 -0
  84. iints/learning/learning_system.py +122 -0
  85. iints/metrics.py +34 -0
  86. iints/population/__init__.py +11 -0
  87. iints/population/generator.py +131 -0
  88. iints/population/runner.py +327 -0
  89. iints/presets/__init__.py +28 -0
  90. iints/presets/presets.json +114 -0
  91. iints/research/__init__.py +30 -0
  92. iints/research/config.py +68 -0
  93. iints/research/dataset.py +319 -0
  94. iints/research/losses.py +73 -0
  95. iints/research/predictor.py +329 -0
  96. iints/scenarios/__init__.py +3 -0
  97. iints/scenarios/generator.py +92 -0
  98. iints/templates/__init__.py +0 -0
  99. iints/templates/default_algorithm.py +91 -0
  100. iints/templates/scenarios/__init__.py +0 -0
  101. iints/templates/scenarios/chaos_insulin_stacking.json +29 -0
  102. iints/templates/scenarios/chaos_runaway_ai.json +25 -0
  103. iints/templates/scenarios/example_scenario.json +35 -0
  104. iints/templates/scenarios/exercise_stress.json +30 -0
  105. iints/utils/__init__.py +3 -0
  106. iints/utils/plotting.py +50 -0
  107. iints/utils/run_io.py +152 -0
  108. iints/validation/__init__.py +133 -0
  109. iints/validation/schemas.py +94 -0
  110. iints/visualization/__init__.py +34 -0
  111. iints/visualization/cockpit.py +691 -0
  112. iints/visualization/uncertainty_cloud.py +612 -0
  113. iints_sdk_python35-0.0.18.dist-info/METADATA +225 -0
  114. iints_sdk_python35-0.0.18.dist-info/RECORD +118 -0
  115. iints_sdk_python35-0.0.18.dist-info/WHEEL +5 -0
  116. iints_sdk_python35-0.0.18.dist-info/entry_points.txt +10 -0
  117. iints_sdk_python35-0.0.18.dist-info/licenses/LICENSE +28 -0
  118. iints_sdk_python35-0.0.18.dist-info/top_level.txt +1 -0
@@ -0,0 +1,398 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Column Mapper - IINTS-AF
4
+ Maps various column names to standard IINTS format
5
+
6
+ Supports 50+ column name variations from different data sources.
7
+ """
8
+
9
+ from typing import Dict, List, Optional, Tuple, Any
10
+ from dataclasses import dataclass
11
+ import re
12
+ import pandas as pd # Required for type hints like pd.DataFrame
13
+
14
+
15
+ @dataclass
16
+ class ColumnMapping:
17
+ """Result of column mapping operation"""
18
+ mapped_columns: Dict[str, str]
19
+ unmapped_columns: List[str]
20
+ confidence: float
21
+ warnings: List[str]
22
+
23
+
24
+ class ColumnMapper:
25
+ """
26
+ Maps various column name aliases to standard IINTS format.
27
+
28
+ Standard Format: [timestamp, glucose, carbs, insulin]
29
+
30
+ Supported sources:
31
+ - Ohio T1DM: timestamp, glucose_mg_dl, carbs, insulin
32
+ - OpenAPS/Nightscout: dateString, sg, carbs, insulin
33
+ - Dexcom: timestamp, glucose, meal_carbs, bolus
34
+ - CareLink: Glucose, Carbohydrates, Insulin
35
+ - Custom CSV: Any variation
36
+ """
37
+
38
+ # Comprehensive column aliases for each standard field
39
+ COLUMN_ALIASES = {
40
+ 'timestamp': [
41
+ 'timestamp', 'time', 'datetime', 'date', 'dateTime',
42
+ 'dateString', 'ts', 't', 'time_minutes', 'time_min',
43
+ 'minutes', 'elapsed_time', 'unix_time',
44
+ 'clock_time', 'reading_time', 'measurement_time',
45
+ 'created_at', 'recorded_at', 'sensor_timestamp'
46
+ ],
47
+ 'glucose': [
48
+ 'glucose', 'bg', 'glucose_mg_dl', 'glucose_mgdl',
49
+ 'sg', 'sensor_glucose', 'cbg', 'blood_glucose',
50
+ 'glucose_value', 'glucose_reading', 'glucose_level',
51
+ 'glucose_concentration', 'glucose_mgdl', 'Glucose',
52
+ 'BG', 'blood_glucose_mg_dl', 'interstitial_glucose',
53
+ 'meter_glucose', 'sensor_glucose_mg_dl'
54
+ ],
55
+ 'carbs': [
56
+ 'carbs', 'carbohydrates', 'cho', 'carb_intake',
57
+ 'meal_carbs', 'carbs_grams', 'carbohydrate_intake',
58
+ 'carbs_g', 'Carbs', 'carbs_consumed', 'food_carbs',
59
+ 'meal_carbohydrates', 'cho grams', 'Carbohydrates',
60
+ 'CHO', 'carb_input', 'carbs_input'
61
+ ],
62
+ 'insulin': [
63
+ 'insulin', 'insulin_delivered', 'insulin_units',
64
+ 'bolus', 'total_insulin', 'insulin_dose', 'insulin_value',
65
+ 'bolus_insulin', 'basal_insulin', 'Insulin', 'insulin_total',
66
+ 'delivered_insulin', 'IOB', 'insulin_on_board', 'correction_bolus',
67
+ 'meal_bolus', 'Insulin (U)', 'insulin_units_u'
68
+ ]
69
+ }
70
+
71
+ # Data source detection patterns
72
+ SOURCE_PATTERNS = {
73
+ 'ohio_t1dm': {
74
+ 'columns': ['glucose_mg_dl', 'timestamp', 'carbs', 'insulin'],
75
+ 'delimiter': ','
76
+ },
77
+ 'openaps_nightscout': {
78
+ 'columns': ['dateString', 'sg', 'carbs', 'insulin'],
79
+ 'delimiter': ','
80
+ },
81
+ 'dexcom': {
82
+ 'columns': ['timestamp', 'glucose', 'meal_carbs', 'bolus'],
83
+ 'delimiter': ','
84
+ },
85
+ 'carelink': {
86
+ 'columns': ['Glucose', 'Carbohydrates', 'Insulin'],
87
+ 'delimiter': ','
88
+ },
89
+ 'tandem': {
90
+ 'columns': ['Date', 'Sensor Glucose', 'Carbs', 'Insulin'],
91
+ 'delimiter': ','
92
+ }
93
+ }
94
+
95
+ def __init__(self):
96
+ self.mappings_cache: Dict[str, str] = {}
97
+
98
+ def detect_source(self, columns: List[str]) -> Optional[str]:
99
+ """
100
+ Detect the data source based on column names.
101
+
102
+ Args:
103
+ columns: List of column names from the data file
104
+
105
+ Returns:
106
+ Detected source name or None
107
+ """
108
+ columns_lower = [c.lower() for c in columns]
109
+
110
+ for source, pattern in self.SOURCE_PATTERNS.items():
111
+ matches = 0
112
+ for expected_col in pattern['columns']:
113
+ if any(expected_col.lower() in col for col in columns_lower):
114
+ matches += 1
115
+
116
+ if matches >= len(pattern['columns']) * 0.75: # 75% match threshold
117
+ return source
118
+
119
+ return None
120
+
121
+ def normalize_column_name(self, column_name: str) -> str:
122
+ """
123
+ Normalize a column name to lowercase with underscores.
124
+
125
+ Args:
126
+ column_name: Original column name
127
+
128
+ Returns:
129
+ Normalized column name
130
+ """
131
+ # Check cache first
132
+ if column_name in self.mappings_cache:
133
+ return self.mappings_cache[column_name]
134
+
135
+ # Normalize: lowercase, replace spaces/special chars with underscore
136
+ normalized = re.sub(r'[\s\-/]+', '_', column_name.lower())
137
+ normalized = re.sub(r'[()]', '', normalized)
138
+ normalized = normalized.strip('_')
139
+
140
+ self.mappings_cache[column_name] = normalized
141
+ return normalized
142
+
143
+ def find_standard_mapping(self, column_name: str) -> Optional[str]:
144
+ """
145
+ Find which standard field this column maps to.
146
+
147
+ Args:
148
+ column_name: Column name to map
149
+
150
+ Returns:
151
+ Standard field name or None
152
+ """
153
+ normalized = self.normalize_column_name(column_name)
154
+
155
+ # Prioritize exact match with normalized aliases
156
+ for standard_field, aliases in self.COLUMN_ALIASES.items():
157
+ if normalized in aliases:
158
+ return standard_field
159
+
160
+ # Fuzzy match (original logic, but less priority)
161
+ for standard_field, aliases in self.COLUMN_ALIASES.items():
162
+ for alias in aliases:
163
+ if alias in normalized or normalized in alias:
164
+ return standard_field
165
+
166
+ # Try partial matching (even less priority)
167
+ for standard_field, aliases in self.COLUMN_ALIASES.items():
168
+ for alias in aliases:
169
+ # Check for common patterns
170
+ if 'glucose' in normalized or 'bg' in normalized:
171
+ if 'glucose' in alias or 'bg' in alias:
172
+ return standard_field
173
+ if 'carb' in normalized:
174
+ if 'carb' in alias:
175
+ return standard_field
176
+ if 'insulin' in normalized or 'bolus' in normalized:
177
+ if 'insulin' in alias or 'bolus' in alias:
178
+ return standard_field
179
+ if 'time' in normalized or 'date' in normalized:
180
+ if 'time' in alias or 'date' in alias:
181
+ return standard_field
182
+
183
+ return None
184
+
185
+ def map_columns(self, columns: List[str]) -> ColumnMapping:
186
+ """
187
+ Map a list of columns to standard IINTS format.
188
+
189
+ Args:
190
+ columns: List of column names from data file
191
+
192
+ Returns:
193
+ ColumnMapping with results and confidence score
194
+ """
195
+ mapped_columns: Dict[str, str] = {}
196
+ unmapped_columns: List[str] = []
197
+ warnings: List[str] = []
198
+
199
+ # Iterate and map, prioritizing earlier matches for a given standard field
200
+ for column in columns:
201
+ standard_field = self.find_standard_mapping(column)
202
+
203
+ if standard_field:
204
+ # Only map if not already mapped, or if it's a "better" match
205
+ # For simplicity, we'll just take the first match for now.
206
+ # More advanced logic could go here to determine "best" match
207
+ if standard_field not in mapped_columns:
208
+ mapped_columns[standard_field] = column
209
+ else:
210
+ warnings.append(
211
+ f"Skipping duplicate mapping for '{standard_field}': "
212
+ f"'{column}' (already mapped to '{mapped_columns[standard_field]}')"
213
+ )
214
+ else:
215
+ unmapped_columns.append(column)
216
+
217
+ # Calculate confidence
218
+ required_fields = ['timestamp', 'glucose']
219
+ optional_fields = ['carbs', 'insulin']
220
+
221
+ mapped_required = sum(1 for f in required_fields if f in mapped_columns)
222
+ mapped_optional = sum(1 for f in optional_fields if f in mapped_columns)
223
+
224
+ confidence = (
225
+ (mapped_required / len(required_fields)) * 0.7 +
226
+ (mapped_optional / len(optional_fields)) * 0.3
227
+ )
228
+
229
+ # Add warnings for missing optional fields
230
+ for field in optional_fields:
231
+ if field not in mapped_columns:
232
+ warnings.append(f"Optional field '{field}' not found in data")
233
+
234
+ # Warning for missing required fields
235
+ for field in required_fields:
236
+ if field not in mapped_columns:
237
+ warnings.append(f"CRITICAL: Required field '{field}' not found!")
238
+ confidence -= 0.2
239
+
240
+ return ColumnMapping(
241
+ mapped_columns=mapped_columns,
242
+ unmapped_columns=unmapped_columns,
243
+ confidence=max(0, confidence),
244
+ warnings=warnings
245
+ )
246
+
247
+ def apply_mapping(self, df, mapping: ColumnMapping) -> 'pd.DataFrame':
248
+ """
249
+ Apply column mapping to a DataFrame, renaming columns to standard format.
250
+
251
+ Args:
252
+ df: Pandas DataFrame
253
+ mapping: ColumnMapping result
254
+
255
+ Returns:
256
+ DataFrame with renamed columns
257
+ """
258
+ import pandas as pd
259
+
260
+ # Create a copy to avoid modifying original
261
+ result_df = df.copy()
262
+
263
+ # Rename columns to standard names
264
+ rename_dict = {v: k for k, v in mapping.mapped_columns.items()}
265
+ result_df = result_df.rename(columns=rename_dict)
266
+
267
+ # Add missing columns with NaN values
268
+ for field in ['timestamp', 'glucose', 'carbs', 'insulin']:
269
+ if field not in result_df.columns:
270
+ result_df[field] = float('nan')
271
+
272
+ # Ensure correct column order
273
+ standard_order = ['timestamp', 'glucose', 'carbs', 'insulin']
274
+ existing_cols = [c for c in standard_order if c in result_df.columns]
275
+ other_cols = [c for c in result_df.columns if c not in standard_order]
276
+ result_df = result_df[existing_cols + other_cols]
277
+
278
+ return result_df
279
+
280
+ def get_recommended_parser(self, source: str) -> str:
281
+ """
282
+ Get recommended parser configuration for a detected source.
283
+
284
+ Args:
285
+ source: Detected source name
286
+
287
+ Returns:
288
+ Parser configuration string
289
+ """
290
+ if source in self.SOURCE_PATTERNS:
291
+ pattern = self.SOURCE_PATTERNS[source]
292
+ return f"use_parser('{source}', delimiter='{pattern['delimiter']}')"
293
+
294
+ return "use_parser('auto')"
295
+
296
+ def get_source_info(self, source: str) -> Dict:
297
+ """
298
+ Get information about a detected data source.
299
+
300
+ Args:
301
+ source: Source name
302
+
303
+ Returns:
304
+ Dictionary with source information
305
+ """
306
+ if source in self.SOURCE_PATTERNS:
307
+ return {
308
+ 'name': source,
309
+ 'columns': self.SOURCE_PATTERNS[source]['columns'],
310
+ 'delimiter': self.SOURCE_PATTERNS[source]['delimiter'],
311
+ 'known_format': True
312
+ }
313
+
314
+ return {
315
+ 'name': source,
316
+ 'columns': [],
317
+ 'delimiter': ',',
318
+ 'known_format': False
319
+ }
320
+
321
+
322
+ def demo_column_mapping():
323
+ """Demonstrate column mapping functionality"""
324
+ print("=" * 70)
325
+ print("COLUMN MAPPER DEMONSTRATION")
326
+ print("=" * 70)
327
+
328
+ mapper = ColumnMapper()
329
+
330
+ # Test case 1: Ohio T1DM format
331
+ print("\n Test Case 1: Ohio T1DM Format")
332
+ print("-" * 50)
333
+ ohio_columns = ['timestamp', 'glucose_mg_dl', 'carbs', 'insulin', 'heart_rate']
334
+ mapping = mapper.map_columns(ohio_columns)
335
+
336
+ print(f"Input columns: {ohio_columns}")
337
+ print(f"Mapped: {mapping.mapped_columns}")
338
+ print(f"Unmapped: {mapping.unmapped_columns}")
339
+ print(f"Confidence: {mapping.confidence:.1%}")
340
+ if mapping.warnings:
341
+ print("Warnings:")
342
+ for w in mapping.warnings:
343
+ print(f" {w}")
344
+
345
+ # Test case 2: OpenAPS/Nightscout format
346
+ print("\n Test Case 2: OpenAPS/Nightscout Format")
347
+ print("-" * 50)
348
+ openaps_columns = ['dateString', 'sg', 'carbs', 'insulin', 'iob', 'cob']
349
+ mapping = mapper.map_columns(openaps_columns)
350
+
351
+ print(f"Input columns: {openaps_columns}")
352
+ print(f"Mapped: {mapping.mapped_columns}")
353
+ print(f"Confidence: {mapping.confidence:.1%}")
354
+
355
+ # Test case 3: Dexcom format
356
+ print("\n Test Case 3: Dexcom Format")
357
+ print("-" * 50)
358
+ dexcom_columns = ['timestamp', 'glucose', 'meal_carbs', 'bolus', 'isig']
359
+ mapping = mapper.map_columns(dexcom_columns)
360
+
361
+ print(f"Input columns: {dexcom_columns}")
362
+ print(f"Mapped: {mapping.mapped_columns}")
363
+ print(f"Confidence: {mapping.confidence:.1%}")
364
+
365
+ # Test case 4: Custom format with variations
366
+ print("\n Test Case 4: Custom Format (Variations)")
367
+ print("-" * 50)
368
+ custom_columns = ['Time (min)', 'BG', 'CHO grams', 'Insulin (U)', 'Activity']
369
+ mapping = mapper.map_columns(custom_columns)
370
+
371
+ print(f"Input columns: {custom_columns}")
372
+ print(f"Mapped: {mapping.mapped_columns}")
373
+ print(f"Confidence: {mapping.confidence:.1%}")
374
+ if mapping.warnings:
375
+ print("Warnings:")
376
+ for w in mapping.warnings:
377
+ print(f" {w}")
378
+
379
+ # Test source detection
380
+ print("\n Source Detection")
381
+ print("-" * 50)
382
+ for test_columns, expected_source in [
383
+ (ohio_columns, 'ohio_t1dm'),
384
+ (openaps_columns, 'openaps_nightscout'),
385
+ (dexcom_columns, 'dexcom')
386
+ ]:
387
+ detected = mapper.detect_source(test_columns)
388
+ status = "✓" if detected == expected_source else "✗"
389
+ print(f"{status} Columns: {test_columns[:3]}... → Detected: {detected}")
390
+
391
+ print("\n" + "=" * 70)
392
+ print("COLUMN MAPPER DEMONSTRATION COMPLETE")
393
+ print("=" * 70)
394
+
395
+
396
+ if __name__ == "__main__":
397
+ demo_column_mapping()
398
+
@@ -0,0 +1,132 @@
1
+ [
2
+ {
3
+ "id": "sample",
4
+ "name": "IINTS Sample CGM (Bundled)",
5
+ "source": "IINTS-AF",
6
+ "access": "bundled",
7
+ "license": "Demo-only (bundled with SDK)",
8
+ "description": "Tiny CGM sample for quickstart and offline demos.",
9
+ "bundled_path": "demo/demo_cgm.csv",
10
+ "sha256": "e869e033887e595ab4c41cb14cde6f2328cc9d55e0fb2c741e7fbe3e0ae3af96",
11
+ "citation": {
12
+ "text": "IINTS-AF Team. IINTS Sample CGM (bundled). Accessed 2026-02-16.",
13
+ "bibtex": "@misc{iints_sample_cgm, title={IINTS Sample CGM (bundled)}, author={IINTS-AF Team}, year={2026}, note={Bundled with IINTS-AF SDK, accessed 2026-02-16}}"
14
+ }
15
+ },
16
+ {
17
+ "id": "aide_t1d",
18
+ "name": "AIDE T1D Public Dataset",
19
+ "source": "Jaeb Center for Health Research",
20
+ "access": "public-download",
21
+ "license": "Public dataset (see Jaeb public datasets page)",
22
+ "description": "Automated Insulin Delivery in Elderly with Type 1 Diabetes (AIDE T1D) public dataset.",
23
+ "landing_page": "https://public.jaeb.org/datasets/",
24
+ "download_urls": [
25
+ "https://live-jchrpublicdatasets.s3.amazonaws.com/Diabetes/Public%20Datasets/AIDET1D_Public_Dataset.zip"
26
+ ],
27
+ "sha256": null,
28
+ "sha256_note": "Checksum not published by source; SHA256SUMS.txt will be generated after download.",
29
+ "citation": {
30
+ "text": "Jaeb Center for Health Research. Automated Insulin Delivery in Elderly with Type 1 Diabetes (AIDE T1D) Public Dataset. Public Study Websites. Accessed 2026-02-16. https://public.jaeb.org/datasets/",
31
+ "bibtex": "@misc{jaeb_aide_t1d, title={Automated Insulin Delivery in Elderly with Type 1 Diabetes (AIDE T1D) Public Dataset}, author={Jaeb Center for Health Research}, howpublished={Public Study Websites}, note={Accessed 2026-02-16}, url={https://public.jaeb.org/datasets/}}"
32
+ }
33
+ },
34
+ {
35
+ "id": "pedap",
36
+ "name": "PEDAP Public Dataset",
37
+ "source": "Jaeb Center for Health Research",
38
+ "access": "public-download",
39
+ "license": "Public dataset (see Jaeb public datasets page)",
40
+ "description": "Pediatric Artificial Pancreas (PEDAP) public dataset (Release 5).",
41
+ "landing_page": "https://public.jaeb.org/datasets/",
42
+ "download_urls": [
43
+ "https://live-jchrpublicdatasets.s3.amazonaws.com/Diabetes/Public%20Datasets/PEDAP%20Public%20Dataset%20-%20Release%205%20-%202025-05-12.zip"
44
+ ],
45
+ "sha256": null,
46
+ "sha256_note": "Checksum not published by source; SHA256SUMS.txt will be generated after download.",
47
+ "citation": {
48
+ "text": "Jaeb Center for Health Research. Pediatric Artificial Pancreas (PEDAP) Public Dataset, Release 5. Public Study Websites. Accessed 2026-02-16. https://public.jaeb.org/datasets/",
49
+ "bibtex": "@misc{jaeb_pedap_2025, title={Pediatric Artificial Pancreas (PEDAP) Public Dataset, Release 5}, author={Jaeb Center for Health Research}, howpublished={Public Study Websites}, note={Accessed 2026-02-16}, url={https://public.jaeb.org/datasets/}}"
50
+ }
51
+ },
52
+ {
53
+ "id": "azt1d",
54
+ "name": "AZT1D: A Real-World Dataset for Type 1 Diabetes",
55
+ "source": "Mendeley Data",
56
+ "access": "manual",
57
+ "license": "CC BY 4.0",
58
+ "description": "Real-world T1D dataset (CGM + insulin + meals) from 25 individuals on AID systems.",
59
+ "landing_page": "https://data.mendeley.com/datasets/gk9m674wcx/1",
60
+ "doi": "10.17632/gk9m674wcx.1",
61
+ "citation": {
62
+ "text": "Khamesian S, Arefeen A, Thompson BM, Grando A, Ghasemzadeh H. AZT1D: A Real-World Dataset for Type 1 Diabetes. Mendeley Data, v1, 2025. doi:10.17632/gk9m674wcx.1",
63
+ "bibtex": "@misc{azt1d_2025, title={AZT1D: A Real-World Dataset for Type 1 Diabetes}, author={Khamesian, Saman and Arefeen, Asiful and Thompson, Bithika M. and Grando, Adela and Ghasemzadeh, Hassan}, year={2025}, doi={10.17632/gk9m674wcx.1}, publisher={Mendeley Data}}"
64
+ }
65
+ },
66
+ {
67
+ "id": "hupa_ucm",
68
+ "name": "HUPA-UCM Diabetes Dataset",
69
+ "source": "Mendeley Data",
70
+ "access": "manual",
71
+ "license": "CC BY 4.0",
72
+ "description": "Free-living T1D dataset with CGM, insulin, meals, and activity data.",
73
+ "landing_page": "https://data.mendeley.com/datasets/3hbcscwz44/1",
74
+ "doi": "10.17632/3hbcscwz44.1",
75
+ "citation": {
76
+ "text": "Hidalgo JI, Alvarado J, Botella M, Aramendi A, Velasco JM, Garnica O. HUPA-UCM Diabetes Dataset. Mendeley Data, v1, 2024. doi:10.17632/3hbcscwz44.1",
77
+ "bibtex": "@misc{hupa_ucm_2024, title={HUPA-UCM Diabetes Dataset}, author={Hidalgo, J. Ignacio and Alvarado, Jorge and Botella, Marta and Aramendi, Aranzazu and Velasco, J. Manuel and Garnica, Oscar}, year={2024}, doi={10.17632/3hbcscwz44.1}, publisher={Mendeley Data}}"
78
+ }
79
+ },
80
+ {
81
+ "id": "openaps_data_commons",
82
+ "name": "OpenAPS Data Commons",
83
+ "source": "OpenAPS",
84
+ "access": "request",
85
+ "license": "Data use agreement (see OpenAPS)",
86
+ "description": "Community-contributed open APS data commons.",
87
+ "landing_page": "https://openaps.org/outcomes/data-commons/",
88
+ "citation": {
89
+ "text": "OpenAPS. OpenAPS Data Commons. Accessed 2026-02-16. https://openaps.org/outcomes/data-commons/",
90
+ "bibtex": "@misc{openaps_datacommons, title={OpenAPS Data Commons}, author={OpenAPS}, note={Accessed 2026-02-16}, url={https://openaps.org/outcomes/data-commons/}}"
91
+ }
92
+ },
93
+ {
94
+ "id": "tidepool_bigdata",
95
+ "name": "Tidepool Big Data Donation",
96
+ "source": "Tidepool",
97
+ "access": "request",
98
+ "license": "Research collaboration / approval required",
99
+ "description": "Large-scale real-world diabetes data donation program.",
100
+ "landing_page": "https://www.tidepool.org/bigdata",
101
+ "citation": {
102
+ "text": "Tidepool. Big Data Donation Project. Accessed 2026-02-16. https://www.tidepool.org/bigdata",
103
+ "bibtex": "@misc{tidepool_bddp, title={Big Data Donation Project}, author={Tidepool}, note={Accessed 2026-02-16}, url={https://www.tidepool.org/bigdata}}"
104
+ }
105
+ },
106
+ {
107
+ "id": "niddk_central",
108
+ "name": "NIDDK Central Repository",
109
+ "source": "NIDDK",
110
+ "access": "request",
111
+ "license": "Repository access agreement",
112
+ "description": "NIH/NIDDK centralized repository of clinical diabetes studies.",
113
+ "landing_page": "https://repository.niddk.nih.gov/",
114
+ "citation": {
115
+ "text": "NIDDK Central Repository. Accessed 2026-02-16. https://repository.niddk.nih.gov/",
116
+ "bibtex": "@misc{niddk_central_repo, title={NIDDK Central Repository}, author={NIDDK}, note={Accessed 2026-02-16}, url={https://repository.niddk.nih.gov/}}"
117
+ }
118
+ },
119
+ {
120
+ "id": "t1d_exchange",
121
+ "name": "T1D Exchange Clinic Registry",
122
+ "source": "Jaeb Center / T1D Exchange",
123
+ "access": "request",
124
+ "license": "Data request / approval required",
125
+ "description": "Large clinical registry for type 1 diabetes research.",
126
+ "landing_page": "https://datacatalog.med.nyu.edu/dataset/10129",
127
+ "citation": {
128
+ "text": "NYU Data Catalog. T1D Exchange Clinic Registry. Accessed 2026-02-16. https://datacatalog.med.nyu.edu/dataset/10129",
129
+ "bibtex": "@misc{t1d_exchange_registry, title={T1D Exchange Clinic Registry}, author={NYU Data Catalog}, note={Accessed 2026-02-16}, url={https://datacatalog.med.nyu.edu/dataset/10129}}"
130
+ }
131
+ }
132
+ ]
@@ -0,0 +1 @@
1
+ # Demo CGM data pack for IINTS-AF