paradigma 1.0.3__py3-none-any.whl → 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
paradigma/testing.py CHANGED
@@ -1,54 +1,100 @@
1
1
  import json
2
- import numpy as np
3
2
  import os
4
- import pandas as pd
5
3
  from pathlib import Path
6
- import tsdf
7
4
  from typing import List
8
5
 
6
+ import numpy as np
7
+ import pandas as pd
8
+ import tsdf
9
+
9
10
  from paradigma.classification import ClassifierPackage
10
- from paradigma.config import IMUConfig, PPGConfig, GaitConfig, TremorConfig, PulseRateConfig
11
+ from paradigma.config import (
12
+ GaitConfig,
13
+ IMUConfig,
14
+ PPGConfig,
15
+ PulseRateConfig,
16
+ TremorConfig,
17
+ )
11
18
  from paradigma.constants import DataColumns, TimeUnit
12
- from paradigma.pipelines.gait_pipeline import extract_gait_features, detect_gait, \
13
- extract_arm_activity_features, filter_gait
14
- from paradigma.pipelines.tremor_pipeline import extract_tremor_features, detect_tremor, \
15
- aggregate_tremor
16
- from paradigma.pipelines.pulse_rate_pipeline import extract_signal_quality_features, signal_quality_classification, \
17
- aggregate_pulse_rate
19
+ from paradigma.pipelines.gait_pipeline import (
20
+ detect_gait,
21
+ extract_arm_activity_features,
22
+ extract_gait_features,
23
+ filter_gait,
24
+ )
25
+ from paradigma.pipelines.pulse_rate_pipeline import (
26
+ aggregate_pulse_rate,
27
+ extract_signal_quality_features,
28
+ )
29
+ from paradigma.pipelines.tremor_pipeline import (
30
+ aggregate_tremor,
31
+ detect_tremor,
32
+ extract_tremor_features,
33
+ )
18
34
  from paradigma.preprocessing import preprocess_imu_data, preprocess_ppg_data
19
- from paradigma.util import read_metadata, write_df_data, get_end_iso8601, merge_predictions_with_timestamps
20
-
21
-
22
- def preprocess_imu_data_io(path_to_input: str | Path, path_to_output: str | Path,
23
- config: IMUConfig, sensor: str, watch_side: str) -> None:
35
+ from paradigma.util import (
36
+ get_end_iso8601,
37
+ merge_predictions_with_timestamps,
38
+ read_metadata,
39
+ write_df_data,
40
+ )
41
+
42
+
43
+ def preprocess_imu_data_io(
44
+ path_to_input: str | Path,
45
+ path_to_output: str | Path,
46
+ config: IMUConfig,
47
+ sensor: str,
48
+ watch_side: str,
49
+ ) -> None:
24
50
  # Load data
25
- metadata_time, metadata_values = read_metadata(str(path_to_input), str(config.meta_filename),
26
- str(config.time_filename), str(config.values_filename))
27
- df = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
51
+ metadata_time, metadata_values = read_metadata(
52
+ str(path_to_input),
53
+ str(config.meta_filename),
54
+ str(config.time_filename),
55
+ str(config.values_filename),
56
+ )
57
+ df = tsdf.load_dataframe_from_binaries(
58
+ [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
59
+ )
28
60
 
29
61
  # Preprocess data
30
62
  df = preprocess_imu_data(df=df, config=config, sensor=sensor, watch_side=watch_side)
31
63
 
32
64
  # Store data
33
- for sensor, units in zip(['accelerometer', 'gyroscope'], ['g', config.rotation_units]):
65
+ for sensor, units in zip(
66
+ ["accelerometer", "gyroscope"], ["g", config.rotation_units]
67
+ ):
34
68
  if any(sensor in col for col in df.columns):
35
69
  df_sensor = df[[DataColumns.TIME] + [x for x in df.columns if sensor in x]]
36
70
 
37
71
  metadata_values.channels = [x for x in df.columns if sensor in x]
38
- metadata_values.units = list(np.repeat(units, len(metadata_values.channels)))
72
+ metadata_values.units = list(
73
+ np.repeat(units, len(metadata_values.channels))
74
+ )
39
75
  metadata_values.scale_factors = []
40
- metadata_values.file_name = f'{sensor}_values.bin'
76
+ metadata_values.file_name = f"{sensor}_values.bin"
41
77
 
42
- metadata_time.file_name = f'{sensor}_time.bin'
78
+ metadata_time.file_name = f"{sensor}_time.bin"
43
79
  metadata_time.units = [TimeUnit.RELATIVE_S]
44
80
 
45
- write_df_data(metadata_time, metadata_values, path_to_output, f'{sensor}_meta.json', df_sensor)
46
-
47
-
48
- def preprocess_ppg_data_io(path_to_input_ppg: str | Path, path_to_input_imu: str | Path,
49
- output_path: str | Path, ppg_config: PPGConfig,
50
- imu_config: IMUConfig) -> None:
51
- """
81
+ write_df_data(
82
+ metadata_time,
83
+ metadata_values,
84
+ path_to_output,
85
+ f"{sensor}_meta.json",
86
+ df_sensor,
87
+ )
88
+
89
+
90
+ def preprocess_ppg_data_io(
91
+ path_to_input_ppg: str | Path,
92
+ path_to_input_imu: str | Path,
93
+ output_path: str | Path,
94
+ ppg_config: PPGConfig,
95
+ imu_config: IMUConfig,
96
+ ) -> None:
97
+ """
52
98
  Preprocess PPG and IMU data by resampling, filtering, and aligning the data segments.
53
99
 
54
100
  Parameters
@@ -67,176 +113,234 @@ def preprocess_ppg_data_io(path_to_input_ppg: str | Path, path_to_input_imu: str
67
113
  Returns
68
114
  -------
69
115
  None
70
- """
116
+ """
71
117
 
72
118
  # Load PPG data
73
- # Load data
74
- metadata_time_ppg, metadata_values_ppg = read_metadata(path_to_input_ppg, ppg_config.meta_filename,
75
- ppg_config.time_filename, ppg_config.values_filename)
76
- df_ppg = tsdf.load_dataframe_from_binaries([metadata_time_ppg, metadata_values_ppg], tsdf.constants.ConcatenationType.columns)
119
+ # Load data
120
+ metadata_time_ppg, metadata_values_ppg = read_metadata(
121
+ path_to_input_ppg,
122
+ ppg_config.meta_filename,
123
+ ppg_config.time_filename,
124
+ ppg_config.values_filename,
125
+ )
126
+ df_ppg = tsdf.load_dataframe_from_binaries(
127
+ [metadata_time_ppg, metadata_values_ppg],
128
+ tsdf.constants.ConcatenationType.columns,
129
+ )
77
130
 
78
131
  # Load IMU data
79
- metadata_time_imu, metadata_values_imu = read_metadata(path_to_input_imu, imu_config.meta_filename,
80
- imu_config.time_filename, imu_config.values_filename)
81
- df_imu = tsdf.load_dataframe_from_binaries([metadata_time_imu, metadata_values_imu], tsdf.constants.ConcatenationType.columns)
132
+ metadata_time_imu, metadata_values_imu = read_metadata(
133
+ path_to_input_imu,
134
+ imu_config.meta_filename,
135
+ imu_config.time_filename,
136
+ imu_config.values_filename,
137
+ )
138
+ df_imu = tsdf.load_dataframe_from_binaries(
139
+ [metadata_time_imu, metadata_values_imu],
140
+ tsdf.constants.ConcatenationType.columns,
141
+ )
82
142
 
83
143
  # Drop the gyroscope columns from the IMU data
84
- cols_to_drop = df_imu.filter(regex='^gyroscope_').columns
85
- df_acc = df_imu.drop(cols_to_drop, axis=1)
144
+ colnames_to_drop = df_imu.filter(regex="^gyroscope_").columns
145
+ df_acc = df_imu.drop(colnames_to_drop, axis=1)
86
146
 
87
147
  # Preprocess data
88
148
  df_ppg_proc, df_acc_proc = preprocess_ppg_data(
89
- df_ppg=df_ppg,
90
- df_acc=df_acc,
91
- ppg_config=ppg_config,
149
+ df_ppg=df_ppg,
150
+ df_acc=df_acc,
151
+ ppg_config=ppg_config,
92
152
  imu_config=imu_config,
93
153
  start_time_ppg=metadata_time_ppg.start_iso8601,
94
- start_time_imu=metadata_time_imu.start_iso8601
154
+ start_time_imu=metadata_time_imu.start_iso8601,
95
155
  )
96
156
 
97
157
  # Store data
98
158
  metadata_values_imu.channels = list(imu_config.d_channels_accelerometer.keys())
99
159
  metadata_values_imu.units = list(imu_config.d_channels_accelerometer.values())
100
- metadata_values_imu.file_name = 'accelerometer_values.bin'
160
+ metadata_values_imu.file_name = "accelerometer_values.bin"
101
161
  metadata_time_imu.units = [TimeUnit.ABSOLUTE_MS]
102
- metadata_time_imu.file_name = 'accelerometer_time.bin'
103
- write_df_data(metadata_time_imu, metadata_values_imu, output_path, 'accelerometer_meta.json', df_acc_proc)
162
+ metadata_time_imu.file_name = "accelerometer_time.bin"
163
+ write_df_data(
164
+ metadata_time_imu,
165
+ metadata_values_imu,
166
+ output_path,
167
+ "accelerometer_meta.json",
168
+ df_acc_proc,
169
+ )
104
170
 
105
171
  metadata_values_ppg.channels = list(ppg_config.d_channels_ppg.keys())
106
172
  metadata_values_ppg.units = list(ppg_config.d_channels_ppg.values())
107
- metadata_values_ppg.file_name = 'PPG_values.bin'
173
+ metadata_values_ppg.file_name = "PPG_values.bin"
108
174
  metadata_time_ppg.units = [TimeUnit.ABSOLUTE_MS]
109
- metadata_time_ppg.file_name = 'PPG_time.bin'
110
- write_df_data(metadata_time_ppg, metadata_values_ppg, output_path, 'PPG_meta.json', df_ppg_proc)
175
+ metadata_time_ppg.file_name = "PPG_time.bin"
176
+ write_df_data(
177
+ metadata_time_ppg,
178
+ metadata_values_ppg,
179
+ output_path,
180
+ "PPG_meta.json",
181
+ df_ppg_proc,
182
+ )
111
183
 
112
184
 
113
185
  def extract_gait_features_io(
114
- config: GaitConfig,
115
- path_to_input: str | Path,
116
- path_to_output: str | Path
117
- ) -> None:
186
+ config: GaitConfig, path_to_input: str | Path, path_to_output: str | Path
187
+ ) -> None:
118
188
  # Load data
119
- metadata_time, metadata_values = read_metadata(path_to_input, config.meta_filename, config.time_filename, config.values_filename)
120
- df = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
189
+ metadata_time, metadata_values = read_metadata(
190
+ path_to_input,
191
+ config.meta_filename,
192
+ config.time_filename,
193
+ config.values_filename,
194
+ )
195
+ df = tsdf.load_dataframe_from_binaries(
196
+ [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
197
+ )
121
198
 
122
199
  # Extract gait features
123
200
  df_features = extract_gait_features(df=df, config=config)
124
201
 
125
202
  # Store data
126
- end_iso8601 = get_end_iso8601(start_iso8601=metadata_time.start_iso8601,
127
- window_length_seconds=int(df_features[DataColumns.TIME][-1:].values[0] + config.window_length_s))
203
+ end_iso8601 = get_end_iso8601(
204
+ start_iso8601=metadata_time.start_iso8601,
205
+ window_length_seconds=int(
206
+ df_features[DataColumns.TIME][-1:].values[0] + config.window_length_s
207
+ ),
208
+ )
128
209
 
129
- metadata_values.file_name = 'gait_values.bin'
130
- metadata_time.file_name = 'gait_time.bin'
210
+ metadata_values.file_name = "gait_values.bin"
211
+ metadata_time.file_name = "gait_time.bin"
131
212
  metadata_values.end_iso8601 = end_iso8601
132
213
  metadata_time.end_iso8601 = end_iso8601
133
-
214
+
134
215
  metadata_values.channels = list(config.d_channels_values.keys())
135
216
  metadata_values.units = list(config.d_channels_values.values())
136
217
 
137
218
  metadata_time.channels = [DataColumns.TIME]
138
219
  metadata_time.units = [TimeUnit.RELATIVE_S]
139
220
 
140
- write_df_data(metadata_time, metadata_values, path_to_output, 'gait_meta.json', df_features)
221
+ write_df_data(
222
+ metadata_time, metadata_values, path_to_output, "gait_meta.json", df_features
223
+ )
141
224
 
142
225
 
143
226
  def detect_gait_io(
144
- config: GaitConfig,
145
- path_to_input: str | Path,
146
- path_to_output: str | Path,
147
- full_path_to_classifier_package: str | Path,
148
- ) -> None:
149
-
227
+ config: GaitConfig,
228
+ path_to_input: str | Path,
229
+ path_to_output: str | Path,
230
+ full_path_to_classifier_package: str | Path,
231
+ ) -> None:
232
+
150
233
  # Load the data
151
- config.set_filenames('gait')
234
+ config.set_filenames("gait")
152
235
 
153
- metadata_time, metadata_values = read_metadata(path_to_input, config.meta_filename, config.time_filename, config.values_filename)
154
- df = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
236
+ metadata_time, metadata_values = read_metadata(
237
+ path_to_input,
238
+ config.meta_filename,
239
+ config.time_filename,
240
+ config.values_filename,
241
+ )
242
+ df = tsdf.load_dataframe_from_binaries(
243
+ [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
244
+ )
155
245
 
156
246
  clf_package = ClassifierPackage.load(full_path_to_classifier_package)
157
247
 
158
- df[DataColumns.PRED_GAIT_PROBA] = detect_gait(
159
- df=df,
160
- clf_package=clf_package
161
- )
248
+ df[DataColumns.PRED_GAIT_PROBA] = detect_gait(df=df, clf_package=clf_package)
162
249
 
163
250
  # Prepare the metadata
164
- metadata_values.file_name = 'gait_values.bin'
165
- metadata_time.file_name = 'gait_time.bin'
251
+ metadata_values.file_name = "gait_values.bin"
252
+ metadata_time.file_name = "gait_time.bin"
166
253
 
167
254
  metadata_values.channels = [DataColumns.PRED_GAIT_PROBA]
168
- metadata_values.units = ['probability']
255
+ metadata_values.units = ["probability"]
169
256
 
170
257
  metadata_time.channels = [DataColumns.TIME]
171
258
  metadata_time.units = [TimeUnit.RELATIVE_S]
172
259
 
173
- write_df_data(metadata_time, metadata_values, path_to_output, 'gait_meta.json', df)
260
+ write_df_data(metadata_time, metadata_values, path_to_output, "gait_meta.json", df)
174
261
 
175
262
 
176
263
  def extract_arm_activity_features_io(
177
- config: GaitConfig,
178
- path_to_timestamp_input: str | Path,
179
- path_to_prediction_input: str | Path,
180
- full_path_to_classifier_package: str | Path,
181
- path_to_output: str | Path
182
- ) -> None:
264
+ config: GaitConfig,
265
+ path_to_timestamp_input: str | Path,
266
+ path_to_prediction_input: str | Path,
267
+ full_path_to_classifier_package: str | Path,
268
+ path_to_output: str | Path,
269
+ ) -> None:
183
270
  # Load accelerometer and gyroscope data
184
271
  dfs = []
185
- for sensor in ['accelerometer', 'gyroscope']:
272
+ for sensor in ["accelerometer", "gyroscope"]:
186
273
  config.set_sensor(sensor)
187
- meta_ts_filename = f'{sensor}_meta.json'
188
- values_ts_filename = f'{sensor}_values.bin'
189
- time_ts_filename = f'{sensor}_time.bin'
274
+ meta_ts_filename = f"{sensor}_meta.json"
275
+ values_ts_filename = f"{sensor}_values.bin"
276
+ time_ts_filename = f"{sensor}_time.bin"
190
277
 
191
- metadata_ts_dict = tsdf.load_metadata_from_path(os.path.join(path_to_timestamp_input, meta_ts_filename))
278
+ metadata_ts_dict = tsdf.load_metadata_from_path(
279
+ os.path.join(path_to_timestamp_input, meta_ts_filename)
280
+ )
192
281
  metadata_ts_time = metadata_ts_dict[time_ts_filename]
193
282
  metadata_ts_values = metadata_ts_dict[values_ts_filename]
194
- dfs.append(tsdf.load_dataframe_from_binaries([metadata_ts_time, metadata_ts_values], tsdf.constants.ConcatenationType.columns))
283
+ dfs.append(
284
+ tsdf.load_dataframe_from_binaries(
285
+ [metadata_ts_time, metadata_ts_values],
286
+ tsdf.constants.ConcatenationType.columns,
287
+ )
288
+ )
195
289
 
196
290
  df_ts = pd.merge(dfs[0], dfs[1], on=DataColumns.TIME)
197
291
 
198
292
  # Load gait predictions
199
- meta_pred_filename = 'gait_meta.json'
200
- values_pred_filename = 'gait_values.bin'
201
- time_pred_filename = 'gait_time.bin'
293
+ meta_pred_filename = "gait_meta.json"
294
+ values_pred_filename = "gait_values.bin"
295
+ time_pred_filename = "gait_time.bin"
202
296
 
203
- metadata_pred_dict = tsdf.load_metadata_from_path(os.path.join(path_to_prediction_input, meta_pred_filename))
297
+ metadata_pred_dict = tsdf.load_metadata_from_path(
298
+ os.path.join(path_to_prediction_input, meta_pred_filename)
299
+ )
204
300
  metadata_pred_time = metadata_pred_dict[time_pred_filename]
205
301
  metadata_pred_values = metadata_pred_dict[values_pred_filename]
206
302
 
207
- df_pred_gait = tsdf.load_dataframe_from_binaries([metadata_pred_time, metadata_pred_values], tsdf.constants.ConcatenationType.columns)
303
+ df_pred_gait = tsdf.load_dataframe_from_binaries(
304
+ [metadata_pred_time, metadata_pred_values],
305
+ tsdf.constants.ConcatenationType.columns,
306
+ )
208
307
 
209
308
  clf_package = ClassifierPackage.load(full_path_to_classifier_package)
210
309
 
211
- gait_preprocessing_config = GaitConfig(step='gait')
310
+ gait_preprocessing_config = GaitConfig(step="gait")
212
311
 
213
312
  df = merge_predictions_with_timestamps(
214
- df_ts=df_ts,
215
- df_predictions=df_pred_gait,
313
+ df_ts=df_ts,
314
+ df_predictions=df_pred_gait,
216
315
  pred_proba_colname=DataColumns.PRED_GAIT_PROBA,
217
316
  window_length_s=gait_preprocessing_config.window_length_s,
218
- fs=gait_preprocessing_config.sampling_frequency
317
+ fs=gait_preprocessing_config.sampling_frequency,
219
318
  )
220
319
 
221
320
  # Add a column for predicted gait based on a fitted threshold
222
- df[DataColumns.PRED_GAIT] = (df[DataColumns.PRED_GAIT_PROBA] >= clf_package.threshold).astype(int)
321
+ df[DataColumns.PRED_GAIT] = (
322
+ df[DataColumns.PRED_GAIT_PROBA] >= clf_package.threshold
323
+ ).astype(int)
223
324
 
224
325
  # Filter the DataFrame to only include predicted gait (1)
225
- df = df.loc[df[DataColumns.PRED_GAIT]==1].reset_index(drop=True)
326
+ df = df.loc[df[DataColumns.PRED_GAIT] == 1].reset_index(drop=True)
226
327
 
227
328
  # Extract arm activity features
228
- config = GaitConfig(step='arm_activity')
329
+ config = GaitConfig(step="arm_activity")
229
330
  df_features = extract_arm_activity_features(
230
- df=df,
331
+ df=df,
231
332
  config=config,
232
333
  )
233
334
 
234
- end_iso8601 = get_end_iso8601(metadata_ts_values.start_iso8601, df_features[DataColumns.TIME][-1:].values[0] + config.window_length_s)
335
+ end_iso8601 = get_end_iso8601(
336
+ metadata_ts_values.start_iso8601,
337
+ df_features[DataColumns.TIME][-1:].values[0] + config.window_length_s,
338
+ )
235
339
 
236
340
  metadata_ts_values.end_iso8601 = end_iso8601
237
- metadata_ts_values.file_name = 'arm_activity_values.bin'
341
+ metadata_ts_values.file_name = "arm_activity_values.bin"
238
342
  metadata_ts_time.end_iso8601 = end_iso8601
239
- metadata_ts_time.file_name = 'arm_activity_time.bin'
343
+ metadata_ts_time.file_name = "arm_activity_time.bin"
240
344
 
241
345
  metadata_ts_values.channels = list(config.d_channels_values.keys())
242
346
  metadata_ts_values.units = list(config.d_channels_values.values())
@@ -244,103 +348,154 @@ def extract_arm_activity_features_io(
244
348
  metadata_ts_time.channels = [DataColumns.TIME]
245
349
  metadata_ts_time.units = [TimeUnit.RELATIVE_S]
246
350
 
247
- write_df_data(metadata_ts_time, metadata_ts_values, path_to_output, 'arm_activity_meta.json', df_features)
351
+ write_df_data(
352
+ metadata_ts_time,
353
+ metadata_ts_values,
354
+ path_to_output,
355
+ "arm_activity_meta.json",
356
+ df_features,
357
+ )
248
358
 
249
359
 
250
360
  def filter_gait_io(
251
- config: GaitConfig,
252
- path_to_input: str | Path,
253
- path_to_output: str | Path,
254
- full_path_to_classifier_package: str | Path,
255
- ) -> None:
361
+ config: GaitConfig,
362
+ path_to_input: str | Path,
363
+ path_to_output: str | Path,
364
+ full_path_to_classifier_package: str | Path,
365
+ ) -> None:
256
366
  # Load the data
257
- config.set_filenames('arm_activity')
367
+ config.set_filenames("arm_activity")
258
368
 
259
- metadata_time, metadata_values = read_metadata(path_to_input, config.meta_filename, config.time_filename, config.values_filename)
260
- df = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
369
+ metadata_time, metadata_values = read_metadata(
370
+ path_to_input,
371
+ config.meta_filename,
372
+ config.time_filename,
373
+ config.values_filename,
374
+ )
375
+ df = tsdf.load_dataframe_from_binaries(
376
+ [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
377
+ )
261
378
 
262
379
  clf_package = ClassifierPackage.load(filepath=full_path_to_classifier_package)
263
380
 
264
381
  df[DataColumns.PRED_NO_OTHER_ARM_ACTIVITY_PROBA] = filter_gait(
265
- df=df,
266
- clf_package=clf_package
382
+ df=df, clf_package=clf_package
267
383
  )
268
384
 
269
385
  # Prepare the metadata
270
- metadata_values.file_name = 'arm_activity_values.bin'
271
- metadata_time.file_name = 'arm_activity_time.bin'
386
+ metadata_values.file_name = "arm_activity_values.bin"
387
+ metadata_time.file_name = "arm_activity_time.bin"
272
388
 
273
389
  metadata_values.channels = [DataColumns.PRED_NO_OTHER_ARM_ACTIVITY_PROBA]
274
- metadata_values.units = ['probability']
390
+ metadata_values.units = ["probability"]
275
391
 
276
392
  metadata_time.channels = [DataColumns.TIME]
277
393
  metadata_time.units = [TimeUnit.RELATIVE_S]
278
394
 
279
- write_df_data(metadata_time, metadata_values, path_to_output, 'arm_activity_meta.json', df)
395
+ write_df_data(
396
+ metadata_time, metadata_values, path_to_output, "arm_activity_meta.json", df
397
+ )
280
398
 
281
399
 
282
- def extract_tremor_features_io(input_path: str | Path, output_path: str | Path, config: TremorConfig) -> None:
400
+ def extract_tremor_features_io(
401
+ input_path: str | Path, output_path: str | Path, config: TremorConfig
402
+ ) -> None:
283
403
  # Load data
284
- metadata_time, metadata_values = read_metadata(input_path, config.meta_filename, config.time_filename, config.values_filename)
285
- df = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
404
+ metadata_time, metadata_values = read_metadata(
405
+ input_path, config.meta_filename, config.time_filename, config.values_filename
406
+ )
407
+ df = tsdf.load_dataframe_from_binaries(
408
+ [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
409
+ )
286
410
 
287
411
  # Extract tremor features
288
412
  df_windowed = extract_tremor_features(df, config)
289
413
 
290
414
  # Store data
291
- end_iso8601 = get_end_iso8601(start_iso8601=metadata_time.start_iso8601,
292
- window_length_seconds=int(df_windowed[DataColumns.TIME][-1:].values[0] + config.window_length_s))
415
+ end_iso8601 = get_end_iso8601(
416
+ start_iso8601=metadata_time.start_iso8601,
417
+ window_length_seconds=int(
418
+ df_windowed[DataColumns.TIME][-1:].values[0] + config.window_length_s
419
+ ),
420
+ )
293
421
 
294
422
  metadata_values.end_iso8601 = end_iso8601
295
- metadata_values.file_name = 'tremor_values.bin'
423
+ metadata_values.file_name = "tremor_values.bin"
296
424
  metadata_time.end_iso8601 = end_iso8601
297
- metadata_time.file_name = 'tremor_time.bin'
425
+ metadata_time.file_name = "tremor_time.bin"
298
426
 
299
427
  metadata_values.channels = list(config.d_channels_values.keys())
300
428
  metadata_values.units = list(config.d_channels_values.values())
301
429
 
302
430
  metadata_time.channels = [DataColumns.TIME]
303
- metadata_time.units = ['relative_time_ms']
431
+ metadata_time.units = ["relative_time_ms"]
432
+
433
+ write_df_data(
434
+ metadata_time, metadata_values, output_path, "tremor_meta.json", df_windowed
435
+ )
304
436
 
305
- write_df_data(metadata_time, metadata_values, output_path, 'tremor_meta.json', df_windowed)
306
437
 
438
+ def detect_tremor_io(
439
+ input_path: str | Path,
440
+ output_path: str | Path,
441
+ path_to_classifier_input: str | Path,
442
+ config: TremorConfig,
443
+ ) -> None:
307
444
 
308
- def detect_tremor_io(input_path: str | Path, output_path: str | Path, path_to_classifier_input: str | Path, config: TremorConfig) -> None:
309
-
310
445
  # Load the data
311
- config.set_filenames('tremor')
446
+ config.set_filenames("tremor")
312
447
 
313
- metadata_time, metadata_values = read_metadata(input_path, config.meta_filename, config.time_filename, config.values_filename)
314
- df = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
448
+ metadata_time, metadata_values = read_metadata(
449
+ input_path, config.meta_filename, config.time_filename, config.values_filename
450
+ )
451
+ df = tsdf.load_dataframe_from_binaries(
452
+ [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
453
+ )
315
454
 
316
455
  df = detect_tremor(df, config, path_to_classifier_input)
317
456
 
318
457
  # Prepare the metadata
319
- metadata_values.file_name = 'tremor_values.bin'
320
- metadata_time.file_name = 'tremor_time.bin'
458
+ metadata_values.file_name = "tremor_values.bin"
459
+ metadata_time.file_name = "tremor_time.bin"
321
460
 
322
461
  metadata_values.channels = list(config.d_channels_values.keys())
323
462
  metadata_values.units = list(config.d_channels_values.values())
324
463
 
325
464
  metadata_time.channels = [DataColumns.TIME]
326
- metadata_time.units = ['relative_time_ms']
465
+ metadata_time.units = ["relative_time_ms"]
466
+
467
+ write_df_data(metadata_time, metadata_values, output_path, "tremor_meta.json", df)
327
468
 
328
- write_df_data(metadata_time, metadata_values, output_path, 'tremor_meta.json', df)
329
469
 
470
+ def aggregate_tremor_io(
471
+ path_to_feature_input: str | Path,
472
+ path_to_prediction_input: str | Path,
473
+ output_path: str | Path,
474
+ config: TremorConfig,
475
+ ) -> None:
330
476
 
331
- def aggregate_tremor_io(path_to_feature_input: str | Path, path_to_prediction_input: str | Path, output_path: str | Path, config: TremorConfig) -> None:
332
-
333
477
  # Load the features & predictions
334
- metadata_time, metadata_values = read_metadata(path_to_feature_input, config.meta_filename, config.time_filename, config.values_filename)
335
- df_features = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
478
+ metadata_time, metadata_values = read_metadata(
479
+ path_to_feature_input,
480
+ config.meta_filename,
481
+ config.time_filename,
482
+ config.values_filename,
483
+ )
484
+ df_features = tsdf.load_dataframe_from_binaries(
485
+ [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
486
+ )
336
487
 
337
- metadata_dict = tsdf.load_metadata_from_path(path_to_prediction_input / config.meta_filename)
488
+ metadata_dict = tsdf.load_metadata_from_path(
489
+ path_to_prediction_input / config.meta_filename
490
+ )
338
491
  metadata_time = metadata_dict[config.time_filename]
339
492
  metadata_values = metadata_dict[config.values_filename]
340
- df_predictions = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
493
+ df_predictions = tsdf.load_dataframe_from_binaries(
494
+ [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
495
+ )
341
496
 
342
497
  # Subset features
343
- df_features = df_features[['tremor_power', 'below_tremor_power']]
498
+ df_features = df_features[["tremor_power", "below_tremor_power"]]
344
499
 
345
500
  # Concatenate predictions and tremor power
346
501
  df = pd.concat([df_predictions, df_features], axis=1)
@@ -349,11 +504,16 @@ def aggregate_tremor_io(path_to_feature_input: str | Path, path_to_prediction_in
349
504
  d_aggregates = aggregate_tremor(df, config)
350
505
 
351
506
  # Save output
352
- with open(output_path / "tremor_aggregates.json", 'w') as json_file:
507
+ with open(output_path / "tremor_aggregates.json", "w") as json_file:
353
508
  json.dump(d_aggregates, json_file, indent=4)
354
509
 
355
510
 
356
- def extract_signal_quality_features_io(input_path: str | Path, output_path: str | Path, ppg_config: PulseRateConfig, acc_config: PulseRateConfig) -> pd.DataFrame:
511
+ def extract_signal_quality_features_io(
512
+ input_path: str | Path,
513
+ output_path: str | Path,
514
+ ppg_config: PulseRateConfig,
515
+ acc_config: PulseRateConfig,
516
+ ) -> pd.DataFrame:
357
517
  """
358
518
  Extract signal quality features from the PPG signal and save them to a file.
359
519
 
@@ -373,37 +533,64 @@ def extract_signal_quality_features_io(input_path: str | Path, output_path: str
373
533
  df_windowed : pd.DataFrame
374
534
  The DataFrame containing the extracted signal quality features.
375
535
 
376
- """
536
+ """
377
537
  # Load PPG data
378
- metadata_time, metadata_values = read_metadata(input_path, ppg_config.meta_filename, ppg_config.time_filename, ppg_config.values_filename)
379
- df_ppg = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
380
-
538
+ metadata_time, metadata_values = read_metadata(
539
+ input_path,
540
+ ppg_config.meta_filename,
541
+ ppg_config.time_filename,
542
+ ppg_config.values_filename,
543
+ )
544
+ df_ppg = tsdf.load_dataframe_from_binaries(
545
+ [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
546
+ )
547
+
381
548
  # Load IMU data
382
- metadata_time, metadata_values = read_metadata(input_path, acc_config.meta_filename, acc_config.time_filename, acc_config.values_filename)
383
- df_acc = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
549
+ metadata_time, metadata_values = read_metadata(
550
+ input_path,
551
+ acc_config.meta_filename,
552
+ acc_config.time_filename,
553
+ acc_config.values_filename,
554
+ )
555
+ df_acc = tsdf.load_dataframe_from_binaries(
556
+ [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
557
+ )
384
558
 
385
559
  # Extract signal quality features
386
- df_windowed = extract_signal_quality_features(df_ppg, df_acc, ppg_config, acc_config)
387
-
560
+ df_windowed = extract_signal_quality_features(
561
+ df_ppg, df_acc, ppg_config, acc_config
562
+ )
563
+
388
564
  # Save the extracted features
389
- #TO BE ADDED
565
+ # TO BE ADDED
390
566
  return df_windowed
391
567
 
392
568
 
393
- def signal_quality_classification_io(input_path: str | Path, output_path: str | Path, path_to_classifier_input: str | Path, config: PulseRateConfig) -> None:
394
-
395
- # Load the data
396
- metadata_time, metadata_values = read_metadata(input_path, config.meta_filename, config.time_filename, config.values_filename)
397
- df_windowed = tsdf.load_dataframe_from_binaries([metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns)
569
+ # def signal_quality_classification_io(
570
+ # input_path: str | Path,
571
+ # output_path: str | Path,
572
+ # path_to_classifier_input: str | Path,
573
+ # config: PulseRateConfig,
574
+ # ) -> None:
575
+
576
+ # # Load the data
577
+ # metadata_time, metadata_values = read_metadata(
578
+ # input_path, config.meta_filename, config.time_filename, config.values_filename
579
+ # )
580
+ # df_windowed = tsdf.load_dataframe_from_binaries(
581
+ # [metadata_time, metadata_values], tsdf.constants.ConcatenationType.columns
582
+ # )
398
583
 
399
- df_sqa = signal_quality_classification(df_windowed, config, path_to_classifier_input)
584
+ # df_sqa = signal_quality_classification(
585
+ # df_windowed, config, path_to_classifier_input
586
+ # )
400
587
 
401
588
 
402
589
  def aggregate_pulse_rate_io(
403
- full_path_to_input: str | Path,
404
- full_path_to_output: str | Path,
405
- aggregates: List[str] = ['mode', '99p']
406
- ) -> None:
590
+ full_path_to_input: str | Path,
591
+ full_path_to_output: str | Path,
592
+ aggregates: List[str] = ["mode", "99p"],
593
+ ) -> None:
407
594
  """
408
595
  Extract pulse rate from the PPG signal and save the aggregated pulse rate estimates to a file.
409
596
 
@@ -418,13 +605,13 @@ def aggregate_pulse_rate_io(
418
605
  """
419
606
 
420
607
  # Load the pulse rate estimates
421
- with open(full_path_to_input, 'r') as f:
608
+ with open(full_path_to_input, "r") as f:
422
609
  df_pr = json.load(f)
423
-
610
+
424
611
  # Aggregate the pulse rate estimates
425
- pr_values = df_pr['pulse_rate'].values
612
+ pr_values = df_pr["pulse_rate"].values
426
613
  df_pr_aggregates = aggregate_pulse_rate(pr_values, aggregates)
427
614
 
428
615
  # Save the aggregated pulse rate estimates
429
- with open(full_path_to_output, 'w') as json_file:
430
- json.dump(df_pr_aggregates, json_file, indent=4)
616
+ with open(full_path_to_output, "w") as json_file:
617
+ json.dump(df_pr_aggregates, json_file, indent=4)