paradigma 0.4.2__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
paradigma/config.py CHANGED
@@ -195,11 +195,12 @@ class TremorConfig(IMUConfig):
195
195
  # Feature extraction
196
196
  # -----------------
197
197
  self.window_type = 'hann'
198
-
199
- # Power spectral density
200
198
  self.overlap_fraction: float = 0.8
201
- self.segment_length_s: float = 3
199
+ self.segment_length_psd_s: float = 3
200
+ self.segment_length_spectrogram_s: float = 2
202
201
  self.spectral_resolution: float = 0.25
202
+
203
+ # PSD based features
203
204
  self.fmin_peak_search: float = 1
204
205
  self.fmax_peak_search: float = 25
205
206
  self.fmin_below_rest_tremor: float = 0.5
@@ -416,11 +416,13 @@ def compute_spectral_entropy(
416
416
  def compute_mfccs(
417
417
  total_power_array: np.ndarray,
418
418
  config,
419
+ total_power_type: str = 'psd',
419
420
  mel_scale: bool = True,
420
- multiplication_factor: float = 1
421
+ multiplication_factor: float = 1,
422
+ rounding_method: str = 'floor'
421
423
  ) -> np.ndarray:
422
424
  """
423
- Generate Mel Frequency Cepstral Coefficients (MFCCs) from the total power spectral density of the signal.
425
+ Generate Mel Frequency Cepstral Coefficients (MFCCs) from the total power spectral density or spectrogram of the signal.
424
426
 
425
427
  MFCCs are commonly used features in signal processing for tasks like audio and
426
428
  vibration analysis. In this version, we adjusted the MFFCs to the human activity
@@ -434,6 +436,9 @@ def compute_mfccs(
434
436
  total_power_array : np.ndarray
435
437
  2D array of shape (n_windows, n_frequencies) containing the total power
436
438
  of the signal for each window.
439
+ OR
440
+ 3D array of shape (n_windows, n_frequencies, n_segments) containing the total spectrogram
441
+ of the signal for each window.
437
442
  config : object
438
443
  Configuration object containing the following attributes:
439
444
  - window_length_s : int
@@ -448,11 +453,15 @@ def compute_mfccs(
448
453
  Number of triangular filters in the filterbank (default: 20).
449
454
  - mfcc_n_coefficients : int
450
455
  Number of coefficients to extract (default: 12).
456
+ total_power_type : str, optional
457
+ The type of the total power array. Supported values are 'psd' and 'spectrogram' (default: 'psd').
451
458
  mel_scale : bool, optional
452
459
  Whether to use the mel scale for the filterbank (default: True).
453
460
  multiplication_factor : float, optional
454
461
  Multiplication factor for the Mel scale conversion (default: 1). For tremor, the recommended
455
462
  value is 1. For gait, this is 4.
463
+ rounding_method : str, optional
464
+ The method used to round the filter points. Supported values are 'round' and 'floor' (default: 'floor').
456
465
 
457
466
  Returns
458
467
  -------
@@ -466,9 +475,19 @@ def compute_mfccs(
466
475
  - The function includes filterbank normalization to ensure proper scaling.
467
476
  - DCT filters are constructed to minimize spectral leakage.
468
477
  """
478
+
479
+ # Check if total_power_type is either 'psd' or 'spectrogram'
480
+ if total_power_type not in ['psd', 'spectrogram']:
481
+ raise ValueError("total_power_type should be set to either 'psd' or 'spectrogram'")
482
+
469
483
  # Compute window length in samples
470
484
  window_length = config.window_length_s * config.sampling_frequency
471
485
 
486
+ # Determine the length of subwindows used in the spectrogram computation
487
+ if total_power_type == 'spectrogram':
488
+ nr_subwindows = total_power_array.shape[2]
489
+ window_length = int(window_length/(nr_subwindows - (nr_subwindows - 1) * config.overlap_fraction))
490
+
472
491
  # Generate filter points
473
492
  if mel_scale:
474
493
  freqs = np.linspace(
@@ -483,10 +502,16 @@ def compute_mfccs(
483
502
  config.mfcc_high_frequency,
484
503
  num=config.mfcc_n_dct_filters + 2
485
504
  )
505
+
506
+ if rounding_method == 'round':
507
+ filter_points = np.round(
508
+ window_length / config.sampling_frequency * freqs
509
+ ).astype(int) + 1
486
510
 
487
- filter_points = np.floor(
488
- window_length / config.sampling_frequency * freqs
489
- ).astype(int) + 1
511
+ elif rounding_method == 'floor':
512
+ filter_points = np.floor(
513
+ window_length / config.sampling_frequency * freqs
514
+ ).astype(int) + 1
490
515
 
491
516
  # Construct triangular filterbank
492
517
  filters = np.zeros((len(filter_points) - 2, int(window_length / 2 + 1)))
@@ -500,8 +525,11 @@ def compute_mfccs(
500
525
  )
501
526
 
502
527
  # Apply filterbank to total power
503
- power_filtered = np.dot(total_power_array, filters.T)
504
-
528
+ if total_power_type == 'spectrogram':
529
+ power_filtered = np.tensordot(total_power_array, filters.T, axes=(1,0))
530
+ elif total_power_type == 'psd':
531
+ power_filtered = np.dot(total_power_array, filters.T)
532
+
505
533
  # Convert power to logarithmic scale
506
534
  log_power_filtered = np.log10(power_filtered + 1e-10)
507
535
 
@@ -519,6 +547,9 @@ def compute_mfccs(
519
547
  # Compute MFCCs
520
548
  mfccs = np.dot(log_power_filtered, dct_filters.T)
521
549
 
550
+ if total_power_type == 'spectrogram':
551
+ mfccs = np.mean(mfccs, axis=1)
552
+
522
553
  return mfccs
523
554
 
524
555
 
@@ -350,31 +350,20 @@ def filter_gait(
350
350
 
351
351
 
352
352
  def quantify_arm_swing(
353
- df_timestamps: pd.DataFrame,
354
- df_predictions: pd.DataFrame,
355
- classification_threshold: float,
356
- window_length_s: float,
353
+ df: pd.DataFrame,
357
354
  max_segment_gap_s: float,
358
355
  min_segment_length_s: float,
359
356
  fs: int,
360
- dfs_to_quantify: List[str] | str = ['unfiltered', 'filtered'],
357
+ filtered: bool = False,
361
358
  ) -> Tuple[dict[str, pd.DataFrame], dict]:
362
359
  """
363
360
  Quantify arm swing parameters for segments of motion based on gyroscope data.
364
361
 
365
362
  Parameters
366
363
  ----------
367
- df_timestamps : pd.DataFrame
368
- A DataFrame containing the raw sensor data, including gyroscope columns.
369
-
370
- df_predictions : pd.DataFrame
371
- A DataFrame containing the predicted probabilities for no other arm activity per window.
372
-
373
- classification_threshold : float
374
- The threshold used to classify no other arm activity based on the predicted probabilities.
375
-
376
- window_length_s : float
377
- The length of the window used for feature extraction.
364
+ df : pd.DataFrame
365
+ A DataFrame containing the raw sensor data, including gyroscope columns. Should include a column
366
+ for predicted no other arm activity based on a fitted threshold if filtered is True.
378
367
 
379
368
  max_segment_gap_s : float
380
369
  The maximum gap allowed between segments.
@@ -385,45 +374,16 @@ def quantify_arm_swing(
385
374
  fs : int
386
375
  The sampling frequency of the sensor data.
387
376
 
388
- dfs_to_quantify : List[str] | str, optional
389
- The DataFrames to quantify arm swing parameters for. Options are 'unfiltered' and 'filtered', with 'unfiltered' being predicted gait, and
390
- 'filtered' being predicted gait without other arm activities.
377
+ filtered : bool, optional, default=True
378
+ If `True`, the gyroscope data is filtered to only include predicted no other arm activity.
391
379
 
392
380
  Returns
393
381
  -------
394
- Tuple[dict, dict]
395
- A tuple containing a dictionary with quantified arm swing parameters for dfs_to_quantify,
396
- and a dictionary containing metadata for each segment.
382
+ Tuple[pd.DataFrame, dict]
383
+ A tuple containing a dataframe with quantified arm swing parameters and a dictionary containing
384
+ metadata for each segment.
397
385
  """
398
- if not any(df_predictions[DataColumns.PRED_NO_OTHER_ARM_ACTIVITY_PROBA] >= classification_threshold):
399
- raise ValueError("No gait without other arm activity detected in the input data.")
400
-
401
- if isinstance(dfs_to_quantify, str):
402
- dfs_to_quantify = [dfs_to_quantify]
403
- elif not isinstance(dfs_to_quantify, list):
404
- raise ValueError("dfs_to_quantify must be either 'unfiltered', 'filtered', or a list containing both.")
405
-
406
- valid_values = {'unfiltered', 'filtered'}
407
- if set(dfs_to_quantify) - valid_values:
408
- raise ValueError(
409
- f"Invalid value in dfs_to_quantify: {dfs_to_quantify}. "
410
- f"Valid options are 'unfiltered', 'filtered', or both in a list."
411
- )
412
386
 
413
- # Merge arm activity predictions with timestamps
414
- df = merge_predictions_with_timestamps(
415
- df_ts=df_timestamps,
416
- df_predictions=df_predictions,
417
- pred_proba_colname=DataColumns.PRED_NO_OTHER_ARM_ACTIVITY_PROBA,
418
- window_length_s=window_length_s,
419
- fs=fs
420
- )
421
-
422
- # Add a column for predicted no other arm activity based on a fitted threshold
423
- df[DataColumns.PRED_NO_OTHER_ARM_ACTIVITY] = (
424
- df[DataColumns.PRED_NO_OTHER_ARM_ACTIVITY_PROBA] >= classification_threshold
425
- ).astype(int)
426
-
427
387
  # Group consecutive timestamps into segments, with new segments starting after a pre-specified gap.
428
388
  # Segments are made based on predicted gait
429
389
  df[DataColumns.SEGMENT_NR] = create_segments(
@@ -444,111 +404,98 @@ def quantify_arm_swing(
444
404
  raise ValueError("No segments found in the input data.")
445
405
 
446
406
  # If no arm swing data is remaining, return an empty dictionary
447
- if df.loc[df[DataColumns.PRED_NO_OTHER_ARM_ACTIVITY]==1].empty:
407
+ if filtered and df.loc[df[DataColumns.PRED_NO_OTHER_ARM_ACTIVITY]==1].empty:
408
+ raise ValueError("No gait without other arm activities to quantify.")
448
409
 
449
- if 'filtered' in dfs_to_quantify and len(dfs_to_quantify) == 1:
450
- raise ValueError("No gait without other arm activities to quantify.")
451
-
452
- dfs_to_quantify = [x for x in dfs_to_quantify if x != 'filtered']
410
+ df[DataColumns.SEGMENT_CAT] = categorize_segments(df=df, fs=fs)
453
411
 
454
- df[DataColumns.SEGMENT_CAT] = categorize_segments(
455
- df=df,
456
- fs=fs
457
- )
412
+ # Group and process segments
413
+ arm_swing_quantified = []
414
+ segment_meta = {}
415
+
416
+ if filtered:
417
+ # Filter the DataFrame to only include predicted no other arm activity (1)
418
+ df = df.loc[df[DataColumns.PRED_NO_OTHER_ARM_ACTIVITY]==1].reset_index(drop=True)
419
+
420
+ # Group consecutive timestamps into segments, with new segments starting after a pre-specified gap
421
+ # Now segments are based on predicted gait without other arm activity for subsequent processes
422
+ df[DataColumns.SEGMENT_NR] = create_segments(
423
+ time_array=df[DataColumns.TIME],
424
+ max_segment_gap_s=max_segment_gap_s
425
+ )
426
+
427
+ pred_colname_pca = DataColumns.PRED_NO_OTHER_ARM_ACTIVITY
428
+ else:
429
+ pred_colname_pca = None
458
430
 
459
431
  df[DataColumns.VELOCITY] = pca_transform_gyroscope(
460
432
  df=df,
461
433
  y_gyro_colname=DataColumns.GYROSCOPE_Y,
462
434
  z_gyro_colname=DataColumns.GYROSCOPE_Z,
463
- pred_colname=DataColumns.PRED_NO_OTHER_ARM_ACTIVITY
435
+ pred_colname=pred_colname_pca
464
436
  )
465
437
 
466
- # Group and process segments
467
- arm_swing_quantified = {}
468
- segment_meta = {}
438
+ for segment_nr, group in df.groupby(DataColumns.SEGMENT_NR, sort=False):
439
+ segment_cat = group[DataColumns.SEGMENT_CAT].iloc[0]
440
+ time_array = group[DataColumns.TIME].to_numpy()
441
+ velocity_array = group[DataColumns.VELOCITY].to_numpy()
469
442
 
470
- # If both unfiltered and filtered gait are to be quantified, start with the unfiltered data
471
- # and subset to get filtered data afterwards.
472
- dfs_to_quantify = sorted(dfs_to_quantify, reverse=True)
473
-
474
- for df_name in dfs_to_quantify:
475
- if df_name == 'filtered':
476
- # Filter the DataFrame to only include predicted no other arm activity (1)
477
- df_focus = df.loc[df[DataColumns.PRED_NO_OTHER_ARM_ACTIVITY]==1].copy().reset_index(drop=True)
478
-
479
- # Group consecutive timestamps into segments, with new segments starting after a pre-specified gap
480
- # Now segments are based on predicted gait without other arm activity for subsequent processes
481
- df_focus[DataColumns.SEGMENT_NR] = create_segments(
482
- time_array=df_focus[DataColumns.TIME],
483
- max_segment_gap_s=max_segment_gap_s
484
- )
485
- else:
486
- df_focus = df.copy()
487
-
488
- arm_swing_quantified[df_name] = []
489
- segment_meta[df_name] = {}
443
+ # Integrate the angular velocity to obtain an estimation of the angle
444
+ angle_array = compute_angle(
445
+ time_array=time_array,
446
+ velocity_array=velocity_array,
447
+ )
490
448
 
491
- for segment_nr, group in df_focus.groupby(DataColumns.SEGMENT_NR, sort=False):
492
- segment_cat = group[DataColumns.SEGMENT_CAT].iloc[0]
493
- time_array = group[DataColumns.TIME].to_numpy()
494
- velocity_array = group[DataColumns.VELOCITY].to_numpy()
449
+ # Detrend angle using moving average
450
+ angle_array = remove_moving_average_angle(
451
+ angle_array=angle_array,
452
+ fs=fs,
453
+ )
495
454
 
496
- # Integrate the angular velocity to obtain an estimation of the angle
497
- angle_array = compute_angle(
498
- time_array=time_array,
499
- velocity_array=velocity_array,
500
- )
455
+ segment_meta[segment_nr] = {
456
+ 'time_s': len(angle_array) / fs,
457
+ DataColumns.SEGMENT_CAT: segment_cat
458
+ }
501
459
 
502
- # Detrend angle using moving average
503
- angle_array = remove_moving_average_angle(
460
+ if angle_array.size > 0:
461
+ angle_extrema_indices, _, _ = extract_angle_extremes(
504
462
  angle_array=angle_array,
505
- fs=fs,
463
+ sampling_frequency=fs,
464
+ max_frequency_activity=1.75
506
465
  )
507
466
 
508
- segment_meta[df_name][segment_nr] = {
509
- 'time_s': len(angle_array) / fs,
510
- DataColumns.SEGMENT_CAT: segment_cat
511
- }
512
-
513
- if angle_array.size > 0:
514
- angle_extrema_indices, _, _ = extract_angle_extremes(
515
- angle_array=angle_array,
516
- sampling_frequency=fs,
517
- max_frequency_activity=1.75
518
- )
519
-
520
- if len(angle_extrema_indices) > 1: # Requires at minimum 2 peaks
521
- try:
522
- rom = compute_range_of_motion(
523
- angle_array=angle_array,
524
- extrema_indices=angle_extrema_indices,
525
- )
526
- except Exception as e:
527
- # Handle the error, set RoM to NaN, and log the error
528
- print(f"Error computing range of motion for segment {segment_nr}: {e}")
529
- rom = np.array([np.nan])
530
-
531
- try:
532
- pav = compute_peak_angular_velocity(
533
- velocity_array=velocity_array,
534
- angle_extrema_indices=angle_extrema_indices
535
- )
536
- except Exception as e:
537
- # Handle the error, set pav to NaN, and log the error
538
- print(f"Error computing peak angular velocity for segment {segment_nr}: {e}")
539
- pav = np.array([np.nan])
540
-
541
- df_params_segment = pd.DataFrame({
542
- DataColumns.SEGMENT_NR: segment_nr,
543
- DataColumns.RANGE_OF_MOTION: rom,
544
- DataColumns.PEAK_VELOCITY: pav
545
- })
546
-
547
- arm_swing_quantified[df_name].append(df_params_segment)
548
-
549
- arm_swing_quantified[df_name] = pd.concat(arm_swing_quantified[df_name], ignore_index=True)
467
+ if len(angle_extrema_indices) > 1: # Requires at minimum 2 peaks
468
+ try:
469
+ rom = compute_range_of_motion(
470
+ angle_array=angle_array,
471
+ extrema_indices=angle_extrema_indices,
472
+ )
473
+ except Exception as e:
474
+ # Handle the error, set RoM to NaN, and log the error
475
+ print(f"Error computing range of motion for segment {segment_nr}: {e}")
476
+ rom = np.array([np.nan])
477
+
478
+ try:
479
+ pav = compute_peak_angular_velocity(
480
+ velocity_array=velocity_array,
481
+ angle_extrema_indices=angle_extrema_indices
482
+ )
483
+ except Exception as e:
484
+ # Handle the error, set pav to NaN, and log the error
485
+ print(f"Error computing peak angular velocity for segment {segment_nr}: {e}")
486
+ pav = np.array([np.nan])
487
+
488
+ df_params_segment = pd.DataFrame({
489
+ DataColumns.SEGMENT_NR: segment_nr,
490
+ DataColumns.RANGE_OF_MOTION: rom,
491
+ DataColumns.PEAK_VELOCITY: pav
492
+ })
493
+
494
+ arm_swing_quantified.append(df_params_segment)
495
+
496
+ arm_swing_quantified = pd.concat(arm_swing_quantified, ignore_index=True)
550
497
 
551
- return {df_name: arm_swing_quantified[df_name] for df_name in dfs_to_quantify}, segment_meta
498
+ return arm_swing_quantified, segment_meta
552
499
 
553
500
 
554
501
  def aggregate_arm_swing_params(df_arm_swing_params: pd.DataFrame, segment_meta: dict, aggregates: List[str] = ['median']) -> dict:
@@ -1,5 +1,3 @@
1
- import tsdf
2
- import json
3
1
  import pandas as pd
4
2
  import numpy as np
5
3
  from pathlib import Path
@@ -12,7 +10,7 @@ from paradigma.config import TremorConfig
12
10
  from paradigma.feature_extraction import compute_mfccs, compute_power_in_bandwidth, compute_total_power, extract_frequency_peak, \
13
11
  extract_tremor_power
14
12
  from paradigma.segmenting import tabulate_windows, WindowedDataExtractor
15
- from paradigma.util import get_end_iso8601, write_df_data, read_metadata, aggregate_parameter
13
+ from paradigma.util import aggregate_parameter
16
14
 
17
15
 
18
16
  def extract_tremor_features(df: pd.DataFrame, config: TremorConfig) -> pd.DataFrame:
@@ -182,25 +180,38 @@ def aggregate_tremor(df: pd.DataFrame, config: TremorConfig):
182
180
  df_filtered = df.loc[df.pred_arm_at_rest == 1]
183
181
  nr_windows_rest = df_filtered.shape[0] # number of windows without non-tremor arm movement
184
182
 
183
+ if nr_windows_rest == 0: # if no windows without non-tremor arm movement are detected
184
+ raise Warning('No windows without non-tremor arm movement are detected.')
185
+
185
186
  # calculate tremor time
186
- perc_windows_tremor= np.sum(df_filtered['pred_tremor_checked']) / nr_windows_rest * 100 # as percentage of total measured time without non-tremor arm movement
187
+ n_windows_tremor = np.sum(df_filtered['pred_tremor_checked'])
188
+ perc_windows_tremor = n_windows_tremor / nr_windows_rest * 100 # as percentage of total measured time without non-tremor arm movement
187
189
 
188
- # calculate aggregated tremor power measures
189
- tremor_power = df_filtered.loc[df_filtered['pred_tremor_checked'] == 1, 'tremor_power']
190
- tremor_power = np.log10(tremor_power+1) # convert to log scale
191
- aggregated_tremor_power = {}
190
+ aggregated_tremor_power = {} # initialize dictionary to store aggregated tremor power measures
192
191
 
193
- for aggregate in config.aggregates_tremor_power:
194
- aggregate_name = f"{aggregate}_tremor_power"
195
- if aggregate == 'mode':
196
- # calculate modal tremor power
197
- bin_edges = np.linspace(0, 6, 301)
198
- kde = gaussian_kde(tremor_power)
199
- kde_values = kde(bin_edges)
200
- max_index = np.argmax(kde_values)
201
- aggregated_tremor_power['modal_tremor_power'] = bin_edges[max_index]
202
- else: # calculate te other aggregates (e.g. median and 90th percentile) of tremor power
203
- aggregated_tremor_power[aggregate_name] = aggregate_parameter(tremor_power, aggregate)
192
+ if n_windows_tremor == 0: # if no tremor is detected, the tremor power measures are set to NaN
193
+
194
+ aggregated_tremor_power['median_tremor_power'] = np.nan
195
+ aggregated_tremor_power['modal_tremor_power'] = np.nan
196
+ aggregated_tremor_power['90p_tremor_power'] = np.nan
197
+
198
+ else:
199
+
200
+ # calculate aggregated tremor power measures
201
+ tremor_power = df_filtered.loc[df_filtered['pred_tremor_checked'] == 1, 'tremor_power']
202
+ tremor_power = np.log10(tremor_power+1) # convert to log scale
203
+
204
+ for aggregate in config.aggregates_tremor_power:
205
+ aggregate_name = f"{aggregate}_tremor_power"
206
+ if aggregate == 'mode':
207
+ # calculate modal tremor power
208
+ bin_edges = np.linspace(0, 6, 301)
209
+ kde = gaussian_kde(tremor_power)
210
+ kde_values = kde(bin_edges)
211
+ max_index = np.argmax(kde_values)
212
+ aggregated_tremor_power['modal_tremor_power'] = bin_edges[max_index]
213
+ else: # calculate te other aggregates (e.g. median and 90th percentile) of tremor power
214
+ aggregated_tremor_power[aggregate_name] = aggregate_parameter(tremor_power, aggregate)
204
215
 
205
216
  # store aggregates in json format
206
217
  d_aggregates = {
@@ -246,13 +257,14 @@ def extract_spectral_domain_features(data: np.ndarray, config) -> pd.DataFrame:
246
257
 
247
258
  # Initialize parameters
248
259
  sampling_frequency = config.sampling_frequency
249
- segment_length_s = config.segment_length_s
260
+ segment_length_psd_s = config.segment_length_psd_s
261
+ segment_length_spectrogram_s = config.segment_length_spectrogram_s
250
262
  overlap_fraction = config.overlap_fraction
251
263
  spectral_resolution = config.spectral_resolution
252
264
  window_type = 'hann'
253
265
 
254
266
  # Compute the power spectral density
255
- segment_length_n = sampling_frequency * segment_length_s
267
+ segment_length_n = sampling_frequency * segment_length_psd_s
256
268
  overlap_n = segment_length_n * overlap_fraction
257
269
  window = signal.get_window(window_type, segment_length_n, fftbins=False)
258
270
  nfft = sampling_frequency / spectral_resolution
@@ -269,8 +281,24 @@ def extract_spectral_domain_features(data: np.ndarray, config) -> pd.DataFrame:
269
281
  axis=1
270
282
  )
271
283
 
272
- # Compute total power in the PSD (over the three axes)
284
+ # Compute the spectrogram
285
+ segment_length_n = sampling_frequency * segment_length_spectrogram_s
286
+ overlap_n = segment_length_n * overlap_fraction
287
+ window = signal.get_window(window_type, segment_length_n)
288
+
289
+ f, t, S1 = signal.stft(
290
+ x=data,
291
+ fs=sampling_frequency,
292
+ window=window,
293
+ nperseg=segment_length_n,
294
+ noverlap=overlap_n,
295
+ boundary=None,
296
+ axis=1
297
+ )
298
+
299
+ # Compute total power in the PSD and the total spectrogram (summed over the three axes)
273
300
  total_psd = compute_total_power(psd)
301
+ total_spectrogram = np.sum(np.abs(S1)*sampling_frequency, axis=2)
274
302
 
275
303
  # Compute the MFCC's
276
304
  config.mfcc_low_frequency = config.fmin_mfcc
@@ -279,8 +307,10 @@ def extract_spectral_domain_features(data: np.ndarray, config) -> pd.DataFrame:
279
307
  config.mfcc_n_coefficients = config.n_coefficients_mfcc
280
308
 
281
309
  mfccs = compute_mfccs(
282
- total_power_array=total_psd,
310
+ total_power_array=total_spectrogram,
283
311
  config=config,
312
+ total_power_type='spectrogram',
313
+ rounding_method='round',
284
314
  multiplication_factor=1
285
315
  )
286
316
 
@@ -191,7 +191,7 @@ def preprocess_imu_data(df: pd.DataFrame, config: IMUConfig, sensor: str, watch_
191
191
  )
192
192
 
193
193
  # Invert the IMU data if the watch was worn on the right wrist
194
- df = invert_watch_side(df, watch_side)
194
+ df = invert_watch_side(df, watch_side, sensor)
195
195
 
196
196
  if sensor in ['accelerometer', 'both']:
197
197
 
paradigma/util.py CHANGED
@@ -285,7 +285,7 @@ def convert_units_gyroscope(data: np.ndarray, units: str) -> np.ndarray:
285
285
  raise ValueError(f"Unsupported unit: {units}")
286
286
 
287
287
 
288
- def invert_watch_side(df: pd.DataFrame, side: str) -> np.ndarray:
288
+ def invert_watch_side(df: pd.DataFrame, side: str, sensor='both') -> np.ndarray:
289
289
  """
290
290
  Invert the data based on the watch side.
291
291
 
@@ -295,6 +295,8 @@ def invert_watch_side(df: pd.DataFrame, side: str) -> np.ndarray:
295
295
  The data.
296
296
  side : str
297
297
  The watch side (left or right).
298
+ sensor: str
299
+ The sensor(s) to invert: 'accelerometer', 'gyroscope', or 'both'
298
300
 
299
301
  Returns
300
302
  -------
@@ -304,10 +306,15 @@ def invert_watch_side(df: pd.DataFrame, side: str) -> np.ndarray:
304
306
  """
305
307
  if side not in ["left", "right"]:
306
308
  raise ValueError(f"Unsupported side: {side}")
309
+ if sensor not in ['accelerometer', 'gyroscope', 'both']:
310
+ raise ValueError(f"Unsupported sensor: {sensor}")
311
+
307
312
  elif side == "right":
308
- df[DataColumns.GYROSCOPE_Y] *= -1
309
- df[DataColumns.GYROSCOPE_Z] *= -1
310
- df[DataColumns.ACCELEROMETER_X] *= -1
313
+ if sensor in ['gyroscope', 'both']:
314
+ df[DataColumns.GYROSCOPE_Y] *= -1
315
+ df[DataColumns.GYROSCOPE_Z] *= -1
316
+ if sensor in ['accelerometer', 'both']:
317
+ df[DataColumns.ACCELEROMETER_X] *= -1
311
318
 
312
319
  return df
313
320
 
@@ -1,3 +1,5 @@
1
+ # License
2
+
1
3
  Apache License
2
4
  Version 2.0, January 2004
3
5
  http://www.apache.org/licenses/
@@ -0,0 +1,135 @@
1
+ Metadata-Version: 2.3
2
+ Name: paradigma
3
+ Version: 0.4.3
4
+ Summary: Paradigma - a toolbox for Digital Biomarkers for Parkinson's Disease
5
+ License: Apache-2.0
6
+ Author: Erik Post
7
+ Author-email: erik.post@radboudumc.nl
8
+ Requires-Python: >=3.11,<4.0
9
+ Classifier: License :: OSI Approved :: Apache Software License
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.11
12
+ Classifier: Programming Language :: Python :: 3.12
13
+ Classifier: Programming Language :: Python :: 3.13
14
+ Requires-Dist: pandas (>=2.1.4,<3.0.0)
15
+ Requires-Dist: python-dateutil (>=2.9.0.post0,<3.0.0)
16
+ Requires-Dist: pytype (>=2024.4.11,<2025.0.0)
17
+ Requires-Dist: scikit-learn (>=1.3.2,<1.6.1)
18
+ Requires-Dist: tsdf (>=0.5.2,<0.6.0)
19
+ Description-Content-Type: text/markdown
20
+
21
+ <p align="center">
22
+ <img src="https://raw.githubusercontent.com/biomarkersParkinson/paradigma/main/docs/source/_static/img/paradigma-logo-banner.png" alt="ParaDigMa logo"/>
23
+ </p>
24
+
25
+ | Badges | |
26
+ |:----:|----|
27
+ | **Packages and Releases** | [![Latest release](https://img.shields.io/github/release/biomarkersparkinson/paradigma.svg)](https://github.com/biomarkersparkinson/paradigma/releases/latest) [![PyPI](https://img.shields.io/pypi/v/paradigma.svg)](https://pypi.python.org/pypi/paradigma/) [![Static Badge](https://img.shields.io/badge/RSD-paradigma-lib)](https://research-software-directory.org/software/paradigma) |
28
+ | **DOI** | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.13838392.svg)](https://doi.org/10.5281/zenodo.13838392) |
29
+ | **Build Status** | [![](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/) [![Build and test](https://github.com/biomarkersParkinson/paradigma/actions/workflows/build-and-test.yml/badge.svg)](https://github.com/biomarkersParkinson/paradigma/actions/workflows/build-and-test.yml) [![pages-build-deployment](https://github.com/biomarkersParkinson/paradigma/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/biomarkersParkinson/paradigma/actions/workflows/pages/pages-build-deployment) |
30
+ | **License** | [![GitHub license](https://img.shields.io/github/license/biomarkersParkinson/paradigma)](https://github.com/biomarkersparkinson/paradigma/blob/main/LICENSE) |
31
+ <!-- | **Fairness** | [![fair-software.eu](https://img.shields.io/badge/fair--software.eu-%E2%97%8F%20%20%E2%97%8F%20%20%E2%97%8F%20%20%E2%97%8F%20%20%E2%97%8F-green)](https://fair-software.eu) [![OpenSSF Best Practices](https://bestpractices.coreinfrastructure.org/projects/8083/badge)](https://www.bestpractices.dev/projects/8083) | -->
32
+
33
+ ## Overview
34
+ The Parkinson's disease Digital Markers (ParaDigMa) toolbox is a Python
35
+ software package designed for processing real-life wrist sensor data
36
+ to extract digital measures of motor and non-motor signs of Parkinson's disease (PD).
37
+
38
+ Specifically, the toolbox is designed to process accelerometer, gyroscope and
39
+ photoplethysmography (PPG) signals, collected during passive monitoring in daily life.
40
+ It contains three data processing pipelines: (1) arm swing during gait, (2) tremor,
41
+ and (3) pulse rate. These pipelines are scientifically validated for their
42
+ use in persons with PD. Furthermore, the toolbox contains general functionalities for
43
+ signal processing and feature extraction, such as filtering, peak detection, and
44
+ spectral analysis.
45
+
46
+ The toolbox is accompanied by a set of example scripts and notebooks for
47
+ each processing pipeline that demonstrate how to use the toolbox for extracting
48
+ digital measures. In addition, the toolbox is designed to be modular, enabling
49
+ researchers to easily extend the toolbox with new algorithms and functionalities.
50
+
51
+ ## Features
52
+ The components of ParaDigMa are shown in the diagram below.
53
+
54
+ <p align="center">
55
+ <img src="https://raw.githubusercontent.com/biomarkersParkinson/paradigma/main/docs/source/_static/img/pipeline-architecture.png" alt="Pipeline architeecture"/>
56
+ </p>
57
+ The three colored, shaded columns represent the individual pipelines. Processes of the pipelines are represented by blue ellipses, and input/output data by rectangular boxes. The input/output of each step is indicated by yellow horizontal bars denoting the type of data (e.g., 3. Extracted features). Arrows indicate the sequential order of the processes of the pipeline. <br/> <br/>
58
+ ParaDigMa can best be understood by categorizing the sequential processes:
59
+
60
+ | Process | Description |
61
+ | ---- | ---- |
62
+ | Preprocessing | Preparing raw sensor signals for further processing |
63
+ | Feature extraction | Extracting features based on windowed sensor signals |
64
+ | Classification | Detecting segments of interest using validated classifiers (e.g., gait segments) |
65
+ | Quantification | Extracting specific measures from the detected segments (e.g., arm swing measures) |
66
+ | Aggregation | Aggregating the measures over a specific time period (e.g., week-level aggregates) |
67
+
68
+ <br/>
69
+ ParaDigMa contains the following validated processing pipelines (each using the processes described above):
70
+
71
+ | Pipeline | Input | Output classification | Output quantification | Output week-level aggregation |
72
+ | ---- | ---- | ---- | ---- | ---- |
73
+ | **Arm swing during gait** | Wrist accelerometer and gyroscope data | Gait probability, gait without other arm activities probability | Arm swing range of motion (RoM) | Typical & maximum arm swing RoM |
74
+ | **Tremor** | Wrist gyroscope data | Tremor probability | Tremor power | % tremor time, typical & maximum tremor power |
75
+ | **Pulse rate** | Wrist PPG and accelerometer data | PPG signal quality | Pulse rate | Resting & maximum pulse rate |
76
+
77
+ ## Installation
78
+
79
+ The package is available in PyPI and requires [Python 3.11](https://www.python.org/downloads/) or higher. It can be installed using:
80
+
81
+ ```bash
82
+ pip install paradigma
83
+ ```
84
+
85
+ ## Usage
86
+
87
+ ### Tutorials & documentation
88
+ See our tutorials for example scripts on how to use the toolbox to extract digital measures from wrist sensor signals.
89
+ The API reference contains detailed documentation of all toolbox modules and functions.
90
+ The user guides provide additional information about specific topics (e.g. the required orientation of the wrist sensor).
91
+
92
+ ### Sensor data requirements
93
+ The ParaDigMa toolbox is designed for the analysis of passive monitoring data collected using a wrist sensor in persons with PD.
94
+
95
+ Specific requirements include:
96
+ | Pipeline | Sensor Configuration | Context of Use |
97
+ |------------------------|--------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------|
98
+ | **All** | - Sensor position: wrist-band on most or least affected side (validated for both, but different sensitivity for measuring disease progression for tremor and arm swing during gait). <br> - Sensor orientation: orientation as described in [Coordinate System](https://biomarkersparkinson.github.io/paradigma/guides/coordinate_system.html). | - Population: persons with PD. <br> - Data collection protocol: passive monitoring in daily life. |
99
+ | **Arm swing during gait** | - Accelerometer: minimum sampling rate of 100 Hz, minimum range of ± 4 g. <br> - Gyroscope: minimum sampling rate of 100 Hz, minimum range of ± 1000 degrees/sec. | - Population: no walking aid, no severe dyskinesia in the watch-sided arm. <br> - Compliance: for weekly measures: at least three compliant days (with ≥10 hours of data between 8 am and 10 pm), and at least 2 minutes of arm swing. |
100
+ | **Tremor** | - Gyroscope: minimum sampling rate of 100 Hz, minimum range of ± 1000 degrees/sec. | - Compliance: for weekly measures: at least three compliant days (with ≥10 hours of data between 8 am and 10 pm). |
101
+ | **Pulse rate** | - PPG*: minimum sampling rate of 30 Hz, green LED. <br> - Accelerometer: minimum sampling rate of 100 Hz, minimum range of ± 4 g. | - Population: no rhythm disorders (e.g. atrial fibrillation, atrial flutter). <br> - Compliance: for weekly measures: minimum average of 12 hours of data per day. |
102
+
103
+ \* The processing of PPG signals is currently based on the blood volume pulse (arbitrary units) obtained from the Verily Study Watch, and we are currently testing the applicability of the pipeline to other PPG devices.
104
+
105
+ > [!WARNING]
106
+ > While the toolbox is designed to work on any wrist sensor device which fulfills the requirements,
107
+ we have currently verified its performance on data from the Gait-up Physilog 4 (arm swing during gait & tremor) and the Verily Study Watch (all pipelines). Furthermore, the specifications above are the minimally validated requirements. For example, while ParaDigMa works with accelerometer and gyroscope data sampled at 50 Hz, its effect on subsequent processes has not been empirically validated.
108
+ <br/>
109
+
110
+ We have included support for [TSDF](https://biomarkersparkinson.github.io/tsdf/) as format for loading and storing sensor data. TSDF enables efficient data storage with added metadata. However, ParaDigMa does not require a particular method of data storage and retrieval. Please see our tutorial [Data preparation](https://biomarkersparkinson.github.io/paradigma/tutorials/data_preparation.html) for examples of loading TSDF and other data formats into memory, and for preparing raw sensor data as input for the processing pipelines.
111
+
112
+ ## Scientific validation
113
+
114
+ The pipelines were developed and validated using data from the Parkinson@Home Validation study [[Evers et al. 2020]](https://pmc.ncbi.nlm.nih.gov/articles/PMC7584982/)
115
+ and the Personalized Parkinson Project [[Bloem et al. 2019]](https://pubmed.ncbi.nlm.nih.gov/31315608/). Details and validation of the different pipelines shall be shared in upcoming scientific publications.
116
+
117
+ ## Contributing
118
+
119
+ We welcome contributions! Please check out our [contributing guidelines](https://biomarkersparkinson.github.io/paradigma/contributing.html).
120
+ Please note that this project is released with a [Code of Conduct](https://biomarkersparkinson.github.io/paradigma/conduct.html). By contributing to this project, you agree to abide by its terms.
121
+
122
+ ## License
123
+
124
+ It is licensed under the terms of the Apache License 2.0 license. See [License](https://biomarkersparkinson.github.io/paradigma/license.html) for more details.
125
+
126
+ ## Acknowledgements
127
+
128
+ The core team of ParaDigMa consists of Erik Post, Kars Veldkamp, Nienke Timmermans, Diogo Coutinho Soriano, Peter Kok, Vedran Kasalica and Luc Evers.
129
+ Advisors to the project are Max Little, Jordan Raykov, Twan van Laarhoven, Hayriye Cagnan, and Bas Bloem.
130
+ The initial release of ParaDigMa was funded by the Michael J Fox Foundation (grant #020425) and the Dutch Research Council (grant #ASDI.2020.060 & grant #2023.010).
131
+ ParaDigMa was created with [`cookiecutter`](https://cookiecutter.readthedocs.io/en/latest/) and the `py-pkgs-cookiecutter` [template](https://github.com/py-pkgs/py-pkgs-cookiecutter).
132
+
133
+ ## Contact
134
+ Questions, issues or suggestions about ParaDigMa? Please reach out to erik.post@radboudumc.nl, or open an issue in the GitHub repository.
135
+
@@ -2,21 +2,21 @@ paradigma/__init__.py,sha256=vCLqo7vOEgcnYs10gUVYvEFfi8y-jBi7w1YKRoqn95k,127
2
2
  paradigma/assets/gait_detection_clf_package.pkl,sha256=8jCbuM_4dkilSjOEk9ss7bJbSppgzXe72y0X4BCnzCU,11497247
3
3
  paradigma/assets/gait_filtering_clf_package.pkl,sha256=lAaLyhmXdV4X_drmYt0EM6wGwSo80yhpxtncWGq4RfQ,3915
4
4
  paradigma/assets/ppg_quality_clf_package.pkl,sha256=vUcM4v8gZwWAmDVK7E4UcHhVnhlEg27RSB71oPGloSc,1292
5
- paradigma/assets/tremor_detection_clf_package.pkl,sha256=bZ7xrqTsCLg0CKBxVejgnnh8hnKSqNsf-bSQG7D2aqY,1613
5
+ paradigma/assets/tremor_detection_clf_package.pkl,sha256=S-KsK1EcUBJX6oGGBo8GqU0AhNZThA6Qe-cs0QPcWw4,1475
6
6
  paradigma/classification.py,sha256=sBJSePvwHZNPUQuLdx-pncfnDzMq-1naomsCxSJneWY,2921
7
- paradigma/config.py,sha256=4F6FsY5HI6d-L_vB6zehyTAJK3X-U0F6IrLwxN9M-dw,11102
7
+ paradigma/config.py,sha256=72KkIEVV1v5dD9ZJDPI-mFNvorA8nBADEcA0A-jviHU,11163
8
8
  paradigma/constants.py,sha256=JlrD4Zx66g7myQALYAc4Gw_y6yW5EipZuvwj9_fjjpI,3543
9
- paradigma/feature_extraction.py,sha256=pXfVoiuElrMzLCCZu1gj1uYT5v7vb3vULUjkxeJNWXQ,34566
9
+ paradigma/feature_extraction.py,sha256=v_AwbBmvYo21XbULkOV6Ob_sZ1iboyXdDRRAsmCBh-Q,36061
10
10
  paradigma/pipelines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- paradigma/pipelines/gait_pipeline.py,sha256=2mk7Tyfd5rXV2jGZamX5awpRnyJAsrq4kiJoeZVn4fM,29064
11
+ paradigma/pipelines/gait_pipeline.py,sha256=guz6RZlM0muarxG_GtOMf117XqV0YMNPrK2KlyIP4Jg,26426
12
12
  paradigma/pipelines/heart_rate_pipeline.py,sha256=0-D9KcW9nwE5sgXsWHONkeKrsX6qZ5BYqjDttoffwL8,17726
13
13
  paradigma/pipelines/heart_rate_utils.py,sha256=aV2mTMWrFWHZD0KpHqy3IIC1onZykbppyp7_OUWxFTU,26764
14
- paradigma/pipelines/tremor_pipeline.py,sha256=hPqLf26MRperwlqpNgrwayByoP3aWwnWc-tQMSJNZGw,13314
15
- paradigma/preprocessing.py,sha256=DQ-lgmta1tng0neuWftwmXG3yesW6dxOsYvjSV2OFRk,13498
14
+ paradigma/pipelines/tremor_pipeline.py,sha256=B5uZB3IP5pwb30PE4xztRbdYmZt4JQj193BRksC9N94,14590
15
+ paradigma/preprocessing.py,sha256=-Vt_awvJe8MGqXACqWp7R6LWq6XFOcAVUyd0anNaytc,13506
16
16
  paradigma/segmenting.py,sha256=Jrz2JQX5eSfR9jBfpBhc6QV0SFmPVT5O6T8MyL0sdSw,13874
17
17
  paradigma/testing.py,sha256=DSbWeYl5HuZ-bNyOKwgwMHQGG8KlTabvGTR1Yzd-9CY,17955
18
- paradigma/util.py,sha256=6G-z6OXv8T2PS8RFjkd6IfDywdzTjvSl1pfNwqnT8i0,14431
19
- paradigma-0.4.2.dist-info/LICENSE,sha256=Lda8kIVC2kbmlSeYaUWwUwV75Q-q31idYvo18HUTfiw,9807
20
- paradigma-0.4.2.dist-info/METADATA,sha256=6fv07UQ2NQxQB8aE9vE3ECHIYqc65Ps8vl4KUPZCgIc,6416
21
- paradigma-0.4.2.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
22
- paradigma-0.4.2.dist-info/RECORD,,
18
+ paradigma/util.py,sha256=MEoe0zWigxwqy6aVd8zKdHifiuUTc9Mqyrh4xsy1oHY,14759
19
+ paradigma-0.4.3.dist-info/LICENSE,sha256=bKdwckQhMGrkC7Ug3zvZpI556dNG0vQiPYZWPDRD7rs,9818
20
+ paradigma-0.4.3.dist-info/METADATA,sha256=RFKmsnr-p0coG1lDKJVpFXP2Wca9O_8WPKlUenmW0_E,11288
21
+ paradigma-0.4.3.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
22
+ paradigma-0.4.3.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.0.1
2
+ Generator: poetry-core 2.1.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,138 +0,0 @@
1
- Metadata-Version: 2.3
2
- Name: paradigma
3
- Version: 0.4.2
4
- Summary: Paradigma - a toolbox for Digital Biomarkers for Parkinson's Disease
5
- License: Apache-2.0
6
- Author: Erik Post
7
- Author-email: erik.post@radboudumc.nl
8
- Requires-Python: >=3.11,<4.0
9
- Classifier: License :: OSI Approved :: Apache Software License
10
- Classifier: Programming Language :: Python :: 3
11
- Classifier: Programming Language :: Python :: 3.11
12
- Classifier: Programming Language :: Python :: 3.12
13
- Classifier: Programming Language :: Python :: 3.13
14
- Requires-Dist: pandas (>=2.1.4,<3.0.0)
15
- Requires-Dist: python-dateutil (>=2.9.0.post0,<3.0.0)
16
- Requires-Dist: pytype (>=2024.4.11,<2025.0.0)
17
- Requires-Dist: scikit-learn (>=1.3.2,<1.6.1)
18
- Requires-Dist: tsdf (>=0.5.2,<0.6.0)
19
- Description-Content-Type: text/markdown
20
-
21
- <p align="center">
22
- <img src="https://raw.githubusercontent.com/biomarkersParkinson/paradigma/main/docs/source/_static/img/paradigma-logo-banner.png" alt="ParaDigMa logo"/>
23
- </p>
24
-
25
- | Badges | |
26
- |:----:|----|
27
- | **Packages and Releases** | [![Latest release](https://img.shields.io/github/release/biomarkersparkinson/paradigma.svg)](https://github.com/biomarkersparkinson/paradigma/releases/latest) [![PyPI](https://img.shields.io/pypi/v/paradigma.svg)](https://pypi.python.org/pypi/paradigma/) [![Static Badge](https://img.shields.io/badge/RSD-paradigma-lib)](https://research-software-directory.org/software/paradigma) |
28
- | **DOI** | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.13838392.svg)](https://doi.org/10.5281/zenodo.13838392) |
29
- | **Build Status** | [![](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/) [![Build and test](https://github.com/biomarkersParkinson/paradigma/actions/workflows/build-and-test.yml/badge.svg)](https://github.com/biomarkersParkinson/paradigma/actions/workflows/build-and-test.yml) [![pages-build-deployment](https://github.com/biomarkersParkinson/paradigma/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/biomarkersParkinson/paradigma/actions/workflows/pages/pages-build-deployment) |
30
- | **License** | [![GitHub license](https://img.shields.io/github/license/biomarkersParkinson/paradigma)](https://github.com/biomarkersparkinson/paradigma/blob/main/LICENSE) |
31
- <!-- | **Fairness** | [![fair-software.eu](https://img.shields.io/badge/fair--software.eu-%E2%97%8F%20%20%E2%97%8F%20%20%E2%97%8F%20%20%E2%97%8F%20%20%E2%97%8F-green)](https://fair-software.eu) [![OpenSSF Best Practices](https://bestpractices.coreinfrastructure.org/projects/8083/badge)](https://www.bestpractices.dev/projects/8083) | -->
32
-
33
- ## Introduction
34
- The Parkinsons Disease Digital Markers (ParaDigMa) toolbox is a Python
35
- software package designed for processing passively collected wrist
36
- sensor data to extract digital measures of motor and non-motor signs
37
- of Parkinson's disease (PD).
38
-
39
- Specifically, the toolbox contains three data processing pipelines:
40
- (1) arm swing during gait, (2) tremor, and (3) heart rate analysis.
41
- Furthermore, the toolbox contains general functionalities for signal
42
- processing and feature extraction, such as filtering, peak detection,
43
- and spectral analysis. The toolbox is designed to be user-friendly and
44
- modular, enabling researchers to easily extend the toolbox with new
45
- algorithms and functionalities. The toolbox is accompanied by a set of
46
- example scripts and notebooks for each domain that demonstrate how to use
47
- the toolbox for processing sensor data and extracting digital measures.
48
-
49
- It contains functionalities for processing the following sensor types:
50
-
51
- - Inertial Measurement Units (accelerometer, gyroscope)
52
- - Photoplethysmogram (PPG)
53
-
54
- ## More about ParaDigMa
55
- The components of ParaDigMa are visually shown in the diagram below.
56
-
57
- <p align="center">
58
- <img src="https://raw.githubusercontent.com/biomarkersParkinson/paradigma/main/docs/source/_static/img/pipeline-architecture.png" alt="Pipeline architeecture"/>
59
- </p>
60
-
61
- #### Processes
62
- ParaDigMa can best be understood by categorizing the sequential processes:
63
-
64
- | Process | Description |
65
- | ---- | ---- |
66
- | Preprocessing | Ensuring that the sensor data is ready for further processing |
67
- | Feature extraction | Creating features based on windowed views of the timestamps |
68
- | Classification | Making predictions using developed and validated classifiers |
69
- | Quantification | Selecting specific features of interest |
70
- | Aggregation | Aggregating the features at a specified time-level |
71
-
72
- #### Domain requirements
73
- ParaDigMa can be used to extract aggregations related to a single or multiple domain(s). Each domain has its specific data requirements. Strict requirements for the domain are marked by X, soft requirements (for some additional functionalities) are marked by O.
74
-
75
- | | Gait | Tremor | Heart Rate |
76
- |----------|:-----------:|:-----------:|:-----------:|
77
- | **Accelerometer** | X | | O |
78
- | **Gyroscope** | X | X | |
79
- | **PPG** | | | X |
80
-
81
-
82
-
83
- ## Installation
84
-
85
- The package is available in PyPi and requires [Python 3.10](https://www.python.org/downloads/) or higher. It can be installed using:
86
-
87
- ```bash
88
- pip install paradigma
89
- ```
90
-
91
- ## Usage
92
-
93
- See our [extended documentation](https://biomarkersparkinson.github.io/paradigma/).
94
-
95
- ## Development
96
-
97
- ### Installation
98
-
99
- The package requires Python 3.11 or higher. Use [Poetry](https://python-poetry.org/docs/#installation) to set up the environment and install the dependencies:
100
-
101
- ```bash
102
- poetry install
103
- ```
104
-
105
- ### Testing
106
-
107
- ```bash
108
- poetry run pytest
109
- ```
110
-
111
- ### Type checking
112
-
113
- ```bash
114
- poetry run pytype .
115
- ```
116
-
117
- ### Building documentation
118
-
119
- ```bash
120
- poetry run make html --directory docs/
121
- ```
122
-
123
- ## Contributing
124
-
125
- Interested in contributing? Check out the contributing guidelines. Please note that this project is released with a Code of Conduct. By contributing to this project, you agree to abide by its terms.
126
-
127
- ## License
128
-
129
- The core team of ParaDigMa consists of Erik Post, Kars Veldkamp, Nienke Timmermans, Diogo Coutinho Soriano, Luc Evers,
130
- Peter Kok and Vedran Kasalica. Advisors to the project are Max Little, Jordan Raykov, Twan van Laarhoven, Hayriye Cagnan, and Bas Bloem. It is licensed under the terms of the Apache License 2.0 license.
131
-
132
- ## Credits
133
-
134
- ParaDigMa was created with [`cookiecutter`](https://cookiecutter.readthedocs.io/en/latest/) and the `py-pkgs-cookiecutter` [template](https://github.com/py-pkgs/py-pkgs-cookiecutter).
135
-
136
- ## Contact
137
-
138
- For more information or questions about ParaDigMa, please reach out to erik.post@radboudumc.nl.