paradigma 1.0.4__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,3 @@
1
- from typing import List, Tuple
2
-
3
1
  import numpy as np
4
2
  import pandas as pd
5
3
  from scipy.integrate import cumulative_trapezoid
@@ -43,7 +41,8 @@ def compute_statistics(
43
41
  Raises
44
42
  ------
45
43
  ValueError
46
- If the specified `statistic` is not supported or if the input data has an invalid shape.
44
+ If the specified `statistic` is not supported or if the input data
45
+ has an invalid shape.
47
46
  """
48
47
  if statistic not in [
49
48
  "mean",
@@ -121,13 +120,14 @@ def compute_power_in_bandwidth(
121
120
  fmax: float,
122
121
  include_max: bool = True,
123
122
  spectral_resolution: float = 1,
124
- cumulative_sum_method: str = "trapz",
123
+ cumulative_sum_method: str = "trapezoid",
125
124
  ) -> np.ndarray:
126
125
  """
127
126
  Compute the logarithmic power within specified frequency bands for each sensor axis.
128
127
 
129
- This function integrates the power spectral density (PSD) over user-defined frequency
130
- bands and computes the logarithm of the resulting power for each axis of the sensor.
128
+ This function integrates the power spectral density (PSD) over
129
+ user-defined frequency bands and computes the logarithm of the
130
+ resulting power for each axis of the sensor.
131
131
 
132
132
  Parameters
133
133
  ----------
@@ -135,8 +135,9 @@ def compute_power_in_bandwidth(
135
135
  A 1D array of shape (n_frequencies,) containing the frequencies corresponding
136
136
  to the PSD values.
137
137
  psd : np.ndarray
138
- A 2D array of shape (n_windows, n_frequencies) or 3D array of shape (n_windows, n_frequencies, n_axes)
139
- representing the power spectral density (PSD) of the sensor data.
138
+ A 2D array of shape (n_windows, n_frequencies) or 3D array of shape
139
+ (n_windows, n_frequencies, n_axes) representing the power spectral
140
+ density (PSD) of the sensor data.
140
141
  fmin : float
141
142
  The lower bound of the frequency band in Hz.
142
143
  fmax : float
@@ -146,9 +147,10 @@ def compute_power_in_bandwidth(
146
147
  spectral_resolution : float, optional
147
148
  The spectral resolution of the PSD in Hz (default: 1).
148
149
  cumulative_sum_method : str, optional
149
- The method used to integrate the PSD over the frequency band. Supported values are:
150
- - 'trapz': Trapezoidal rule.
151
- - 'sum': Simple summation (default: 'trapz').
150
+ The method used to integrate the PSD over the frequency band
151
+ (default: 'trapezoid'). Supported values are:
152
+ - 'trapezoid': Trapezoidal rule.
153
+ - 'sum': Simple summation.
152
154
 
153
155
  Returns
154
156
  -------
@@ -168,21 +170,22 @@ def compute_power_in_bandwidth(
168
170
  elif psd.ndim == 3:
169
171
  masked_psd = psd[:, band_mask, :]
170
172
 
171
- if cumulative_sum_method == "trapz":
172
- band_power = spectral_resolution * np.trapz(
173
+ if cumulative_sum_method == "trapezoid":
174
+ band_power = spectral_resolution * np.trapezoid(
173
175
  masked_psd, freqs[band_mask], axis=1
174
176
  )
175
177
  elif cumulative_sum_method == "sum":
176
178
  band_power = spectral_resolution * np.sum(masked_psd, axis=1)
177
179
  else:
178
- raise ValueError("cumulative_sum_method must be 'trapz' or 'sum'.")
180
+ raise ValueError("cumulative_sum_method must be 'trapezoid' or 'sum'.")
179
181
 
180
182
  return band_power
181
183
 
182
184
 
183
185
  def compute_total_power(psd: np.ndarray) -> np.ndarray:
184
186
  """
185
- Compute the total power by summing the power spectral density (PSD) across frequency bins.
187
+ Compute the total power by summing the power spectral density (PSD)
188
+ across frequency bins.
186
189
 
187
190
  This function calculates the total power for each window and each sensor axis by
188
191
  summing the PSD values across all frequency bins.
@@ -209,7 +212,10 @@ def extract_tremor_power(
209
212
  fmax: float = 7,
210
213
  spectral_resolution: float = 0.25,
211
214
  ) -> np.ndarray:
212
- """Computes the tremor power (1.25 Hz around the peak within the tremor frequency band)
215
+ """Computes the tremor power.
216
+
217
+ Tremor power is 1.25 Hz around the peak within the tremor
218
+ frequency band.
213
219
 
214
220
  Parameters
215
221
  ----------
@@ -254,10 +260,12 @@ def compute_dominant_frequency(
254
260
  fmax: float | None = None,
255
261
  ) -> np.ndarray:
256
262
  """
257
- Compute the dominant frequency within a specified frequency range for each window and sensor axis.
263
+ Compute the dominant frequency within a specified frequency range for
264
+ each window and sensor axis.
258
265
 
259
- The dominant frequency is defined as the frequency corresponding to the maximum power in the
260
- power spectral density (PSD) within the specified range.
266
+ The dominant frequency is defined as the frequency corresponding to the
267
+ maximum power in the power spectral density (PSD) within the specified
268
+ range.
261
269
 
262
270
  Parameters
263
271
  ----------
@@ -275,10 +283,10 @@ def compute_dominant_frequency(
275
283
  Returns
276
284
  -------
277
285
  np.ndarray
278
- - If `psd` is 2D: A 1D array of shape (n_windows,) containing the dominant frequency
279
- for each window.
280
- - If `psd` is 3D: A 2D array of shape (n_windows, n_axes) containing the dominant
281
- frequency for each window and each axis.
286
+ - If `psd` is 2D: A 1D array of shape (n_windows,) containing the
287
+ dominant frequency for each window.
288
+ - If `psd` is 3D: A 2D array of shape (n_windows, n_axes) containing
289
+ the dominant frequency for each window and each axis.
282
290
 
283
291
  Raises
284
292
  ------
@@ -286,7 +294,8 @@ def compute_dominant_frequency(
286
294
  If `fmin` or `fmax` is outside the bounds of the `freqs` array.
287
295
  If `psd` is not a 2D or 3D array.
288
296
  """
289
- # Set default values for fmin and fmax to the minimum and maximum frequencies if not provided
297
+ # Set default values for fmin and fmax to the minimum and maximum
298
+ # frequencies if not provided
290
299
  if fmin is None:
291
300
  fmin = freqs[0]
292
301
  if fmax is None:
@@ -331,7 +340,8 @@ def extract_frequency_peak(
331
340
  fmax: float | None = None,
332
341
  include_max: bool = True,
333
342
  ) -> pd.Series:
334
- """Extract the frequency of the peak in the power spectral density within the specified frequency band.
343
+ """Extract the frequency of the peak in the power spectral density
344
+ within the specified frequency band.
335
345
 
336
346
  Parameters
337
347
  ----------
@@ -340,9 +350,11 @@ def extract_frequency_peak(
340
350
  psd: pd.Series
341
351
  The total power spectral density of the gyroscope signal
342
352
  fmin: float
343
- The lower bound of the frequency band in Hz (default: None). If not provided, the minimum frequency is used.
353
+ The lower bound of the frequency band in Hz (default: None).
354
+ If not provided, the minimum frequency is used.
344
355
  fmax: float
345
- The upper bound of the frequency band in Hz (default: None). If not provided, the maximum frequency is used.
356
+ The upper bound of the frequency band in Hz (default: None).
357
+ If not provided, the maximum frequency is used.
346
358
  include_max: bool
347
359
  Whether to include the maximum frequency in the search range (default: True)
348
360
 
@@ -373,7 +385,8 @@ def compute_relative_power(
373
385
  freqs: np.ndarray, psd: np.ndarray, config: PulseRateConfig
374
386
  ) -> list:
375
387
  """
376
- Calculate relative power within the dominant frequency band in the physiological range (0.75 - 3 Hz).
388
+ Calculate relative power within the dominant frequency band in the
389
+ physiological range (0.75 - 3 Hz).
377
390
 
378
391
  Parameters
379
392
  ----------
@@ -382,17 +395,19 @@ def compute_relative_power(
382
395
  psd: np.ndarray
383
396
  The power spectral density of the signal.
384
397
  config: PulseRateConfig
385
- The configuration object containing the parameters for the feature extraction. The following
386
- attributes are used:
398
+ The configuration object containing the parameters for the feature
399
+ extraction. The following attributes are used:
387
400
  - freq_band_physio: tuple
388
401
  The frequency band for physiological pulse rate (default: (0.75, 3)).
389
402
  - bandwidth: float
390
- The bandwidth around the peak frequency to consider for relative power calculation (default: 0.5).
403
+ The bandwidth around the peak frequency to consider for
404
+ relative power calculation (default: 0.5).
391
405
 
392
406
  Returns
393
407
  -------
394
408
  list
395
- The relative power within the dominant frequency band in the physiological range (0.75 - 3 Hz).
409
+ The relative power within the dominant frequency band in the
410
+ physiological range (0.75 - 3 Hz).
396
411
 
397
412
  """
398
413
  hr_range_mask = (freqs >= config.freq_band_physio[0]) & (
@@ -410,7 +425,7 @@ def compute_relative_power(
410
425
  for peak_freq in peak_freqs
411
426
  ]
412
427
  rel_power = [
413
- np.trapz(psd[j, idx], freqs[idx]) / np.trapz(psd[j, :], freqs)
428
+ np.trapezoid(psd[j, idx], freqs[idx]) / np.trapezoid(psd[j, :], freqs)
414
429
  for j, idx in enumerate(dom_band_idx)
415
430
  ]
416
431
  return rel_power
@@ -449,11 +464,13 @@ def compute_mfccs(
449
464
  rounding_method: str = "floor",
450
465
  ) -> np.ndarray:
451
466
  """
452
- Generate Mel Frequency Cepstral Coefficients (MFCCs) from the total power spectral density or spectrogram of the signal.
467
+ Generate Mel Frequency Cepstral Coefficients (MFCCs) from the total
468
+ power spectral density or spectrogram of the signal.
453
469
 
454
470
  MFCCs are commonly used features in signal processing for tasks like audio and
455
471
  vibration analysis. In this version, we adjusted the MFFCs to the human activity
456
- range according to: https://www.sciencedirect.com/science/article/abs/pii/S016516841500331X#f0050.
472
+ range according to:
473
+ https://www.sciencedirect.com/science/article/abs/pii/S016516841500331X#f0050
457
474
  This function calculates MFCCs by applying a filterbank
458
475
  (in either the mel scale or linear scale) to the total power of the signal,
459
476
  followed by a Discrete Cosine Transform (DCT) to obtain coefficients.
@@ -461,11 +478,11 @@ def compute_mfccs(
461
478
  Parameters
462
479
  ----------
463
480
  total_power_array : np.ndarray
464
- 2D array of shape (n_windows, n_frequencies) containing the total power
465
- of the signal for each window.
481
+ 2D array of shape (n_windows, n_frequencies) containing the total
482
+ power of the signal for each window.
466
483
  OR
467
- 3D array of shape (n_windows, n_frequencies, n_segments) containing the total spectrogram
468
- of the signal for each window.
484
+ 3D array of shape (n_windows, n_frequencies, n_segments) containing
485
+ the total spectrogram of the signal for each window.
469
486
  config : object
470
487
  Configuration object containing the following attributes:
471
488
  - window_length_s : int
@@ -481,14 +498,16 @@ def compute_mfccs(
481
498
  - mfcc_n_coefficients : int
482
499
  Number of coefficients to extract (default: 12).
483
500
  total_power_type : str, optional
484
- The type of the total power array. Supported values are 'psd' and 'spectrogram' (default: 'psd').
501
+ The type of the total power array. Supported values are 'psd' and
502
+ 'spectrogram' (default: 'psd').
485
503
  mel_scale : bool, optional
486
504
  Whether to use the mel scale for the filterbank (default: True).
487
505
  multiplication_factor : float, optional
488
- Multiplication factor for the Mel scale conversion (default: 1). For tremor, the recommended
489
- value is 1. For gait, this is 4.
506
+ Multiplication factor for the Mel scale conversion (default: 1).
507
+ For tremor, the recommended value is 1. For gait, this is 4.
490
508
  rounding_method : str, optional
491
- The method used to round the filter points. Supported values are 'round' and 'floor' (default: 'floor').
509
+ The method used to round the filter points. Supported values are
510
+ 'round' and 'floor' (default: 'floor').
492
511
 
493
512
  Returns
494
513
  -------
@@ -598,8 +617,8 @@ def melscale(x: np.ndarray, multiplication_factor: float = 1) -> np.ndarray:
598
617
  x : np.ndarray
599
618
  Linear frequency values to be converted to the Mel scale.
600
619
  multiplication_factor : float, optional
601
- Multiplication factor for the Mel scale conversion (default: 1). For tremor, the recommended
602
- value is 1. For gait, this is 4.
620
+ Multiplication factor for the Mel scale conversion (default: 1).
621
+ For tremor, the recommended value is 1. For gait, this is 4.
603
622
 
604
623
  Returns
605
624
  -------
@@ -616,7 +635,8 @@ def inverse_melscale(x: np.ndarray, multiplication_factor: float = 1) -> np.ndar
616
635
  Maps values from the Mel scale back to linear frequencies.
617
636
 
618
637
  This function performs the inverse transformation of the Mel scale,
619
- converting perceptual frequency values to their corresponding linear frequency values.
638
+ converting perceptual frequency values to their corresponding linear
639
+ frequency values.
620
640
 
621
641
  Parameters
622
642
  ----------
@@ -672,7 +692,8 @@ def pca_transform_gyroscope(
672
692
 
673
693
  def compute_angle(time_array: np.ndarray, velocity_array: np.ndarray) -> np.ndarray:
674
694
  """
675
- Compute the angle from the angular velocity using cumulative trapezoidal integration.
695
+ Compute the angle from the angular velocity using cumulative
696
+ trapezoidal integration.
676
697
 
677
698
  Parameters
678
699
  ----------
@@ -684,7 +705,8 @@ def compute_angle(time_array: np.ndarray, velocity_array: np.ndarray) -> np.ndar
684
705
  Returns
685
706
  -------
686
707
  np.ndarray
687
- The estimated angle based on the cumulative trapezoidal integration of the angular velocity.
708
+ The estimated angle based on the cumulative trapezoidal integration
709
+ of the angular velocity.
688
710
  """
689
711
  # Perform integration and apply absolute value
690
712
  angle_array = cumulative_trapezoid(y=velocity_array, x=time_array, initial=0)
@@ -721,7 +743,7 @@ def extract_angle_extremes(
721
743
  angle_array: np.ndarray,
722
744
  sampling_frequency: float,
723
745
  max_frequency_activity: float = 1.75,
724
- ) -> tuple[List[int], List[int], List[int]]:
746
+ ) -> tuple[list[int], list[int], list[int]]:
725
747
  """
726
748
  Extract extrema (minima and maxima) indices from the angle array.
727
749
 
@@ -810,7 +832,7 @@ def extract_angle_extremes(
810
832
 
811
833
 
812
834
  def compute_range_of_motion(
813
- angle_array: np.ndarray, extrema_indices: List[int]
835
+ angle_array: np.ndarray, extrema_indices: list[int]
814
836
  ) -> np.ndarray:
815
837
  """
816
838
  Compute the range of motion of a time series based on the angle extrema.
@@ -848,7 +870,7 @@ def compute_range_of_motion(
848
870
 
849
871
  def compute_peak_angular_velocity(
850
872
  velocity_array: np.ndarray,
851
- angle_extrema_indices: List[int],
873
+ angle_extrema_indices: list[int],
852
874
  ) -> np.ndarray:
853
875
  """
854
876
  Compute the peak angular velocity of a time series based on the angle extrema.
@@ -890,10 +912,10 @@ def compute_peak_angular_velocity(
890
912
 
891
913
  def compute_forward_backward_peak_angular_velocity(
892
914
  velocity_array: np.ndarray,
893
- angle_extrema_indices: List[int],
894
- minima_indices: List[int],
895
- maxima_indices: List[int],
896
- ) -> Tuple[np.ndarray, np.ndarray]:
915
+ angle_extrema_indices: list[int],
916
+ minima_indices: list[int],
917
+ maxima_indices: list[int],
918
+ ) -> tuple[np.ndarray, np.ndarray]:
897
919
  """
898
920
  Compute the peak angular velocity of a time series based on the angle extrema.
899
921
 
@@ -911,7 +933,8 @@ def compute_forward_backward_peak_angular_velocity(
911
933
  Returns
912
934
  -------
913
935
  Tuple[np.ndarray, np.ndarray]
914
- A tuple containing the forward and backward peak angular velocities for minima and maxima.
936
+ A tuple containing the forward and backward peak angular velocities
937
+ for minima and maxima.
915
938
  """
916
939
  if np.any(np.array(angle_extrema_indices) < 0) or np.any(
917
940
  np.array(angle_extrema_indices) >= len(velocity_array)
@@ -938,7 +961,8 @@ def compute_forward_backward_peak_angular_velocity(
938
961
  next_peak_idx = angle_extrema_indices[i + 1]
939
962
  segment = velocity_array[current_peak_idx:next_peak_idx]
940
963
 
941
- # Check if the current peak is a minimum or maximum and calculate peak velocity accordingly
964
+ # Check if the current peak is a minimum or maximum and
965
+ # calculate peak velocity accordingly
942
966
  if current_peak_idx in minima_indices:
943
967
  forward_pav.append(np.max(np.abs(segment)))
944
968
  elif current_peak_idx in maxima_indices:
@@ -975,8 +999,14 @@ def compute_signal_to_noise_ratio(ppg_windowed: np.ndarray) -> np.ndarray:
975
999
 
976
1000
  def compute_auto_correlation(ppg_windowed: np.ndarray, fs: int) -> np.ndarray:
977
1001
  """
978
- Compute the biased autocorrelation of the PPG signal. The autocorrelation is computed up to 3 seconds. The highest peak value is selected as the autocorrelation value. If no peaks are found, the value is set to 0.
979
- The biased autocorrelation is computed using the biased_autocorrelation function. It differs from the unbiased autocorrelation in that the normalization factor is the length of the original signal, and boundary effects are considered. This results in a smoother autocorrelation function.
1002
+ Compute the biased autocorrelation of the PPG signal. The autocorrelation
1003
+ is computed up to 3 seconds. The highest peak value is selected as the
1004
+ autocorrelation value. If no peaks are found, the value is set to 0.
1005
+ The biased autocorrelation is computed using the biased_autocorrelation
1006
+ function. It differs from the unbiased autocorrelation in that the
1007
+ normalization factor is the length of the original signal, and boundary
1008
+ effects are considered. This results in a smoother autocorrelation
1009
+ function.
980
1010
 
981
1011
  Parameters
982
1012
  ----------
@@ -991,26 +1021,25 @@ def compute_auto_correlation(ppg_windowed: np.ndarray, fs: int) -> np.ndarray:
991
1021
  The autocorrelation of the PPG signal.
992
1022
  """
993
1023
 
994
- auto_correlations = biased_autocorrelation(
995
- ppg_windowed, fs * 3
996
- ) # compute the biased autocorrelation of the PPG signal up to 3 seconds
997
- peaks = [
998
- find_peaks(x, height=0.01)[0] for x in auto_correlations
999
- ] # find the peaks of the autocorrelation
1024
+ # Compute the biased autocorrelation of the PPG signal up to 3 seconds
1025
+ auto_correlations = biased_autocorrelation(ppg_windowed, fs * 3)
1026
+ # Find the peaks of the autocorrelation
1027
+ peaks = [find_peaks(x, height=0.01)[0] for x in auto_correlations]
1028
+ # Sort the peak values in descending order
1000
1029
  sorted_peak_values = [
1001
1030
  np.sort(auto_correlations[i, indices])[::-1] for i, indices in enumerate(peaks)
1002
- ] # sort the peak values in descending order
1003
- auto_correlations = [
1004
- x[0] if len(x) > 0 else 0 for x in sorted_peak_values
1005
- ] # get the highest peak value if there are any peaks, otherwise set to 0
1031
+ ]
1032
+ # Get the highest peak value if there are any peaks, otherwise set to 0
1033
+ auto_correlations = [x[0] if len(x) > 0 else 0 for x in sorted_peak_values]
1006
1034
 
1007
1035
  return np.asarray(auto_correlations)
1008
1036
 
1009
1037
 
1010
1038
  def biased_autocorrelation(ppg_windowed: np.ndarray, max_lag: int) -> np.ndarray:
1011
1039
  """
1012
- Compute the biased autocorrelation of a signal (similar to matlabs autocorr function), where the normalization factor
1013
- is the length of the original signal, and boundary effects are considered.
1040
+ Compute the biased autocorrelation of a signal (similar to matlab's
1041
+ autocorr function), where the normalization factor is the length of the
1042
+ original signal, and boundary effects are considered.
1014
1043
 
1015
1044
  Parameters
1016
1045
  ----------
@@ -1028,15 +1057,17 @@ def biased_autocorrelation(ppg_windowed: np.ndarray, max_lag: int) -> np.ndarray
1028
1057
  zero_mean_ppg = ppg_windowed - np.mean(
1029
1058
  ppg_windowed, axis=1, keepdims=True
1030
1059
  ) # Remove the mean of the signal to make it zero-mean
1031
- N = zero_mean_ppg.shape[1]
1060
+ n_samples = zero_mean_ppg.shape[1]
1032
1061
  autocorr_values = np.zeros((zero_mean_ppg.shape[0], max_lag + 1))
1033
1062
 
1034
1063
  for lag in range(max_lag + 1):
1035
1064
  # Compute autocorrelation for current lag
1036
- overlapping_points = zero_mean_ppg[:, : N - lag] * zero_mean_ppg[:, lag:]
1065
+ overlapping_points = (
1066
+ zero_mean_ppg[:, : n_samples - lag] * zero_mean_ppg[:, lag:]
1067
+ )
1037
1068
  autocorr_values[:, lag] = (
1038
- np.sum(overlapping_points, axis=1) / N
1039
- ) # Divide by N (biased normalization)
1069
+ np.sum(overlapping_points, axis=1) / n_samples
1070
+ ) # Divide by n_samples (biased normalization)
1040
1071
 
1041
1072
  return (
1042
1073
  autocorr_values / autocorr_values[:, 0, np.newaxis]