physiodsp 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- physiodsp/__init__.py +0 -0
- physiodsp/_version.py +1 -0
- physiodsp/activity/__init__.py +5 -0
- physiodsp/activity/activity_recognition.py +0 -0
- physiodsp/activity/activity_score.py +311 -0
- physiodsp/activity/energy_expenditure.py +0 -0
- physiodsp/activity/enmo.py +74 -0
- physiodsp/activity/pim.py +43 -0
- physiodsp/activity/step_count.py +0 -0
- physiodsp/activity/time_above_thr.py +64 -0
- physiodsp/activity/zero_crossing.py +103 -0
- physiodsp/balance_tests/__init__.py +0 -0
- physiodsp/balance_tests/sway.py +157 -0
- physiodsp/base.py +65 -0
- physiodsp/dsp/__init__.py +0 -0
- physiodsp/dsp/convolution.py +24 -0
- physiodsp/ecg/__init__.py +0 -0
- physiodsp/ecg/peak_detector.py +78 -0
- physiodsp/hrv/__init__.py +0 -0
- physiodsp/hrv/hrv_score.py +89 -0
- physiodsp/sensors/__init__.py +0 -0
- physiodsp/sensors/ecg.py +10 -0
- physiodsp/sensors/hrv.py +9 -0
- physiodsp/sensors/imu/__init__.py +0 -0
- physiodsp/sensors/imu/accelerometer.py +10 -0
- physiodsp/sensors/imu/base.py +26 -0
- physiodsp/sensors/imu/gyroscope.py +10 -0
- physiodsp/sensors/imu/magnetometer.py +10 -0
- physiodsp-0.1.0.dist-info/METADATA +223 -0
- physiodsp-0.1.0.dist-info/RECORD +33 -0
- physiodsp-0.1.0.dist-info/WHEEL +5 -0
- physiodsp-0.1.0.dist-info/licenses/LICENSE +674 -0
- physiodsp-0.1.0.dist-info/top_level.txt +1 -0
physiodsp/__init__.py
ADDED
|
File without changes
|
physiodsp/_version.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.0"
|
|
File without changes
|
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from pandas import DataFrame
|
|
5
|
+
from pydantic import BaseModel, Field, PositiveInt, PositiveFloat
|
|
6
|
+
|
|
7
|
+
from physiodsp.base import BaseAlgorithm
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ActivityScoreSettings(BaseModel):
|
|
11
|
+
"""Configuration settings for Activity Score algorithm"""
|
|
12
|
+
|
|
13
|
+
# Baseline window (number of days to use for personalization)
|
|
14
|
+
baseline_window_days: PositiveInt = Field(default=30, description="Number of days to use for computing baselines")
|
|
15
|
+
|
|
16
|
+
# Factor weights (must sum to 1.0)
|
|
17
|
+
step_weight: PositiveFloat = Field(default=0.25, description="Weight for step count factor")
|
|
18
|
+
sleep_weight: PositiveFloat = Field(default=0.35, description="Weight for sleep factor")
|
|
19
|
+
training_weight: PositiveFloat = Field(default=0.25, description="Weight for training time factor")
|
|
20
|
+
resting_weight: PositiveFloat = Field(default=0.15, description="Weight for resting time factor")
|
|
21
|
+
|
|
22
|
+
# Step targets (personalized)
|
|
23
|
+
baseline_daily_steps: PositiveInt = Field(default=8000, description="User's baseline daily steps")
|
|
24
|
+
step_ceiling: PositiveInt = Field(default=15000, description="Maximum steps for optimal score")
|
|
25
|
+
|
|
26
|
+
# Sleep targets (hours)
|
|
27
|
+
min_sleep_hours: PositiveFloat = Field(default=6.0, description="Minimum healthy sleep duration")
|
|
28
|
+
optimal_sleep_hours: PositiveFloat = Field(default=8.0, description="Optimal sleep duration")
|
|
29
|
+
max_sleep_hours: PositiveFloat = Field(default=10.0, description="Maximum sleep before penalty")
|
|
30
|
+
|
|
31
|
+
# Training targets (minutes)
|
|
32
|
+
min_training_minutes: PositiveInt = Field(default=0, description="Minimum training minutes per day")
|
|
33
|
+
optimal_training_minutes: PositiveInt = Field(default=60, description="Optimal training minutes per day")
|
|
34
|
+
max_training_minutes: PositiveInt = Field(default=120, description="Maximum training before overtraining penalty")
|
|
35
|
+
|
|
36
|
+
# Resting targets (minutes)
|
|
37
|
+
min_resting_minutes: PositiveInt = Field(default=480, description="Minimum resting minutes (8 hours)")
|
|
38
|
+
optimal_resting_minutes: PositiveInt = Field(default=540, description="Optimal resting minutes (9 hours)")
|
|
39
|
+
max_resting_minutes: PositiveInt = Field(default=720, description="Maximum resting minutes (12 hours)")
|
|
40
|
+
|
|
41
|
+
# Scoring method
|
|
42
|
+
scoring_method: Literal["gaussian", "sigmoid", "linear"] = Field(
|
|
43
|
+
default="gaussian",
|
|
44
|
+
description="Method to map normalized values to scores"
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class ActivityScore(BaseAlgorithm):
|
|
49
|
+
"""
|
|
50
|
+
Activity Score Algorithm - Personalized Daily Activity Assessment
|
|
51
|
+
|
|
52
|
+
Combines multiple health metrics (steps, sleep, training, resting) into a single
|
|
53
|
+
0-100 score tailored to individual user baselines and targets.
|
|
54
|
+
|
|
55
|
+
Based on principles from Oura Ring and WHOOP band scoring algorithms.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
_algorithm_name = "ActivityScore"
|
|
59
|
+
_version = "0.1.0"
|
|
60
|
+
|
|
61
|
+
def __init__(self, settings: ActivityScoreSettings = ActivityScoreSettings()) -> None:
|
|
62
|
+
self.settings = settings
|
|
63
|
+
self._validate_weights()
|
|
64
|
+
self.user_stats = None
|
|
65
|
+
self.daily_scores = None
|
|
66
|
+
self.baseline_stats = None
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
def _validate_weights(self) -> None:
|
|
70
|
+
"""Validate that weights sum to 1.0"""
|
|
71
|
+
total_weight = (
|
|
72
|
+
self.settings.step_weight +
|
|
73
|
+
self.settings.sleep_weight +
|
|
74
|
+
self.settings.training_weight +
|
|
75
|
+
self.settings.resting_weight
|
|
76
|
+
)
|
|
77
|
+
if not np.isclose(total_weight, 1.0, atol=0.01):
|
|
78
|
+
raise ValueError(f"Weights must sum to 1.0, got {total_weight}")
|
|
79
|
+
|
|
80
|
+
def _compute_baseline_stats(self, baseline_data: DataFrame) -> dict:
|
|
81
|
+
"""
|
|
82
|
+
Compute personalized baseline statistics from historical data.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
baseline_data: DataFrame with previous N-1 days
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Dictionary with computed statistics
|
|
89
|
+
"""
|
|
90
|
+
return {
|
|
91
|
+
'steps_median': baseline_data['steps'].median(),
|
|
92
|
+
'steps_std': baseline_data['steps'].std(),
|
|
93
|
+
'steps_p75': baseline_data['steps'].quantile(0.75),
|
|
94
|
+
'sleep_median': baseline_data['sleep_hours'].median(),
|
|
95
|
+
'sleep_std': baseline_data['sleep_hours'].std(),
|
|
96
|
+
'training_median': baseline_data['training_minutes'].median(),
|
|
97
|
+
'training_std': baseline_data['training_minutes'].std(),
|
|
98
|
+
'resting_median': baseline_data['resting_minutes'].median(),
|
|
99
|
+
'resting_std': baseline_data['resting_minutes'].std()
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
def _score_steps(self, daily_steps: int, baseline_stats: dict = None) -> float:
|
|
103
|
+
"""
|
|
104
|
+
Score daily step count (0-100).
|
|
105
|
+
|
|
106
|
+
Gaussian distribution centered at user's median baseline.
|
|
107
|
+
"""
|
|
108
|
+
if baseline_stats is None:
|
|
109
|
+
baseline = self.settings.baseline_daily_steps
|
|
110
|
+
ceiling = self.settings.step_ceiling
|
|
111
|
+
else:
|
|
112
|
+
# Use personalized baseline from historical data
|
|
113
|
+
baseline = baseline_stats['steps_median']
|
|
114
|
+
# Ceiling is 75th percentile or 1.5x median, whichever is higher
|
|
115
|
+
ceiling = max(baseline * 1.5, baseline_stats['steps_p75'])
|
|
116
|
+
|
|
117
|
+
if self.settings.scoring_method == "gaussian":
|
|
118
|
+
if baseline == 0:
|
|
119
|
+
return 0.0
|
|
120
|
+
normalized = daily_steps / baseline
|
|
121
|
+
peak_normalized = ceiling / baseline
|
|
122
|
+
std_dev = (peak_normalized - 1.0) / 2.0
|
|
123
|
+
score = 100 * np.exp(-((normalized - 1.0) ** 2) / (2 * std_dev ** 2))
|
|
124
|
+
score = np.clip(score, 0, 100)
|
|
125
|
+
else:
|
|
126
|
+
if ceiling == 0:
|
|
127
|
+
return 0.0
|
|
128
|
+
score = min(100, (daily_steps / ceiling) * 100)
|
|
129
|
+
|
|
130
|
+
return float(score)
|
|
131
|
+
|
|
132
|
+
def _score_sleep(self, sleep_hours: float, baseline_stats: dict = None) -> float:
|
|
133
|
+
"""
|
|
134
|
+
Score sleep duration (0-100).
|
|
135
|
+
|
|
136
|
+
Centered at user's median sleep, with personalized thresholds.
|
|
137
|
+
"""
|
|
138
|
+
if baseline_stats is None:
|
|
139
|
+
optimal = self.settings.optimal_sleep_hours
|
|
140
|
+
min_sleep = self.settings.min_sleep_hours
|
|
141
|
+
max_sleep = self.settings.max_sleep_hours
|
|
142
|
+
else:
|
|
143
|
+
median_sleep = baseline_stats['sleep_median']
|
|
144
|
+
std_sleep = baseline_stats['sleep_std'] or 0.5
|
|
145
|
+
optimal = median_sleep
|
|
146
|
+
min_sleep = max(4.0, median_sleep - std_sleep * 1.5)
|
|
147
|
+
max_sleep = min(11.0, median_sleep + std_sleep * 1.5)
|
|
148
|
+
|
|
149
|
+
if sleep_hours < min_sleep:
|
|
150
|
+
deficit = (min_sleep - sleep_hours) / min_sleep
|
|
151
|
+
score = max(0, 50 - deficit * 50)
|
|
152
|
+
elif sleep_hours <= optimal:
|
|
153
|
+
score = ((sleep_hours - min_sleep) / (optimal - min_sleep)) * 100
|
|
154
|
+
elif sleep_hours <= max_sleep:
|
|
155
|
+
score = 100 - ((sleep_hours - optimal) / (max_sleep - optimal)) * 30
|
|
156
|
+
else:
|
|
157
|
+
excess = sleep_hours - max_sleep
|
|
158
|
+
score = max(0, 70 - excess * 10)
|
|
159
|
+
|
|
160
|
+
return float(np.clip(score, 0, 100))
|
|
161
|
+
|
|
162
|
+
def _score_training(self, training_minutes: int, baseline_stats: dict = None) -> float:
|
|
163
|
+
"""
|
|
164
|
+
Score training time (0-100).
|
|
165
|
+
|
|
166
|
+
Rewards consistent moderate training relative to user's baseline.
|
|
167
|
+
"""
|
|
168
|
+
if baseline_stats is None:
|
|
169
|
+
optimal = self.settings.optimal_training_minutes
|
|
170
|
+
max_train = self.settings.max_training_minutes
|
|
171
|
+
else:
|
|
172
|
+
median_training = baseline_stats['training_median']
|
|
173
|
+
std_training = baseline_stats['training_std'] or 15
|
|
174
|
+
optimal = max(30, median_training)
|
|
175
|
+
max_train = optimal + std_training * 2
|
|
176
|
+
|
|
177
|
+
if training_minutes < 5:
|
|
178
|
+
score = 50
|
|
179
|
+
elif training_minutes <= optimal:
|
|
180
|
+
score = 50 + (training_minutes / optimal) * 50
|
|
181
|
+
elif training_minutes <= max_train:
|
|
182
|
+
excess_ratio = (training_minutes - optimal) / (max_train - optimal)
|
|
183
|
+
score = 100 + (excess_ratio * 10) - 10
|
|
184
|
+
else:
|
|
185
|
+
excess = training_minutes - max_train
|
|
186
|
+
score = max(0, 100 - excess * 0.5)
|
|
187
|
+
|
|
188
|
+
return float(np.clip(score, 0, 100))
|
|
189
|
+
|
|
190
|
+
def _score_resting(self, resting_minutes: int, baseline_stats: dict = None) -> float:
|
|
191
|
+
"""
|
|
192
|
+
Score resting/recovery time (0-100).
|
|
193
|
+
|
|
194
|
+
Rewards recovery relative to user's baseline.
|
|
195
|
+
"""
|
|
196
|
+
resting_hours = resting_minutes / 60
|
|
197
|
+
|
|
198
|
+
if baseline_stats is None:
|
|
199
|
+
optimal_hours = self.settings.optimal_resting_minutes / 60
|
|
200
|
+
min_hours = self.settings.min_resting_minutes / 60
|
|
201
|
+
max_hours = self.settings.max_resting_minutes / 60
|
|
202
|
+
else:
|
|
203
|
+
median_resting = baseline_stats['resting_median'] / 60
|
|
204
|
+
std_resting = baseline_stats['resting_std'] / 60 or 0.5
|
|
205
|
+
optimal_hours = median_resting
|
|
206
|
+
min_hours = max(6.0, median_resting - std_resting * 1.5)
|
|
207
|
+
max_hours = min(12.0, median_resting + std_resting * 1.5)
|
|
208
|
+
|
|
209
|
+
if resting_hours < min_hours:
|
|
210
|
+
deficit = (min_hours - resting_hours)
|
|
211
|
+
score = max(0, 50 - deficit * 15)
|
|
212
|
+
elif resting_hours <= optimal_hours:
|
|
213
|
+
score = ((resting_hours - min_hours) / (optimal_hours - min_hours)) * 100
|
|
214
|
+
elif resting_hours <= max_hours:
|
|
215
|
+
excess_hours = resting_hours - optimal_hours
|
|
216
|
+
max_excess = max_hours - optimal_hours
|
|
217
|
+
score = 100 - (excess_hours / max_excess) * 25
|
|
218
|
+
else:
|
|
219
|
+
excess_hours = resting_hours - max_hours
|
|
220
|
+
score = max(0, 75 - excess_hours * 10)
|
|
221
|
+
|
|
222
|
+
return float(np.clip(score, 0, 100))
|
|
223
|
+
|
|
224
|
+
def run(self, daily_activity_data: DataFrame):
|
|
225
|
+
"""
|
|
226
|
+
Calculate Activity Score for the most recent day using baseline statistics.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
daily_activity_data: DataFrame with all available data, sorted by date (oldest to newest).
|
|
230
|
+
Must contain columns:
|
|
231
|
+
- 'date' or index as date
|
|
232
|
+
- 'steps': Daily step count
|
|
233
|
+
- 'sleep_hours': Total sleep duration in hours
|
|
234
|
+
- 'training_minutes': Total training time in minutes
|
|
235
|
+
- 'resting_minutes': Total resting/recovery time in minutes
|
|
236
|
+
|
|
237
|
+
Should have at least baseline_window_days of data.
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
self with biomarker containing score for the most recent day
|
|
241
|
+
"""
|
|
242
|
+
if not all(col in daily_activity_data.columns for col in ['steps', 'sleep_hours', 'training_minutes', 'resting_minutes']):
|
|
243
|
+
raise ValueError("DataFrame must contain: steps, sleep_hours, training_minutes, resting_minutes")
|
|
244
|
+
|
|
245
|
+
if len(daily_activity_data) < 2:
|
|
246
|
+
raise ValueError(f"Need at least 2 days of data, got {len(daily_activity_data)}")
|
|
247
|
+
|
|
248
|
+
# Ensure we have the right number of baseline days
|
|
249
|
+
window_size = min(self.settings.baseline_window_days, len(daily_activity_data) - 1)
|
|
250
|
+
|
|
251
|
+
# Split into baseline (all but last) and current day (last)
|
|
252
|
+
baseline_data = daily_activity_data.iloc[:-1].tail(window_size)
|
|
253
|
+
current_day_data = daily_activity_data.iloc[-1]
|
|
254
|
+
|
|
255
|
+
# Compute personalized baseline statistics
|
|
256
|
+
self.baseline_stats = self._compute_baseline_stats(baseline_data)
|
|
257
|
+
|
|
258
|
+
# Calculate individual scores using personalized baselines
|
|
259
|
+
step_score = self._score_steps(current_day_data['steps'], self.baseline_stats)
|
|
260
|
+
sleep_score = self._score_sleep(current_day_data['sleep_hours'], self.baseline_stats)
|
|
261
|
+
training_score = self._score_training(current_day_data['training_minutes'], self.baseline_stats)
|
|
262
|
+
resting_score = self._score_resting(current_day_data['resting_minutes'], self.baseline_stats)
|
|
263
|
+
|
|
264
|
+
# Weighted combination
|
|
265
|
+
activity_score = (
|
|
266
|
+
(step_score * self.settings.step_weight) +
|
|
267
|
+
(sleep_score * self.settings.sleep_weight) +
|
|
268
|
+
(training_score * self.settings.training_weight) +
|
|
269
|
+
(resting_score * self.settings.resting_weight)
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
# Create output DataFrame
|
|
273
|
+
score_record = {
|
|
274
|
+
'date': current_day_data.get('date', daily_activity_data.index[-1]) if 'date' in daily_activity_data.columns else daily_activity_data.index[-1],
|
|
275
|
+
'activity_score': round(activity_score, 1),
|
|
276
|
+
'step_score': round(step_score, 1),
|
|
277
|
+
'sleep_score': round(sleep_score, 1),
|
|
278
|
+
'training_score': round(training_score, 1),
|
|
279
|
+
'resting_score': round(resting_score, 1),
|
|
280
|
+
'steps': current_day_data['steps'],
|
|
281
|
+
'sleep_hours': current_day_data['sleep_hours'],
|
|
282
|
+
'training_minutes': current_day_data['training_minutes'],
|
|
283
|
+
'resting_minutes': current_day_data['resting_minutes'],
|
|
284
|
+
'baseline_days_used': window_size
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
self.biomarker_agg = DataFrame([score_record])
|
|
288
|
+
self.daily_scores = self.biomarker_agg.copy()
|
|
289
|
+
|
|
290
|
+
return self
|
|
291
|
+
|
|
292
|
+
def get_activity_score_interpretation(self, activity_score: float) -> str:
|
|
293
|
+
"""
|
|
294
|
+
Return interpretation of Activity Score.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
activity_score: Activity score (0-100)
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
Interpretation string
|
|
301
|
+
"""
|
|
302
|
+
if activity_score >= 85:
|
|
303
|
+
return "Excellent - Outstanding activity and recovery balance"
|
|
304
|
+
elif activity_score >= 70:
|
|
305
|
+
return "Good - Healthy activity levels with adequate recovery"
|
|
306
|
+
elif activity_score >= 50:
|
|
307
|
+
return "Fair - Room for improvement in activity or recovery"
|
|
308
|
+
elif activity_score >= 30:
|
|
309
|
+
return "Poor - Significant imbalance in activity or recovery"
|
|
310
|
+
else:
|
|
311
|
+
return "Critical - Urgent attention needed to activity and recovery"
|
|
File without changes
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
from pandas import Series, DataFrame
|
|
2
|
+
from pydantic import BaseModel, Field, PositiveInt
|
|
3
|
+
|
|
4
|
+
from physiodsp.base import BaseAlgorithm
|
|
5
|
+
from physiodsp.sensors.imu.accelerometer import AccelerometerData
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ENMOSettings(BaseModel):
|
|
9
|
+
|
|
10
|
+
window_len: PositiveInt = Field(default=1, description="processing window length in seconds")
|
|
11
|
+
|
|
12
|
+
aggregation_window: PositiveInt = Field(default=60, description="aggregation window length in seconds")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ENMO(BaseAlgorithm):
|
|
16
|
+
"""Euclidean Norm Minus One"""
|
|
17
|
+
|
|
18
|
+
_algorithm_name = "ENMO"
|
|
19
|
+
_version = "0.1.0"
|
|
20
|
+
|
|
21
|
+
def __init__(self,
|
|
22
|
+
settings: ENMOSettings = ENMOSettings()
|
|
23
|
+
) -> None:
|
|
24
|
+
self.settings = settings
|
|
25
|
+
self._window_len = settings.window_len
|
|
26
|
+
self._aggregation_window = settings.aggregation_window
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
def run(self, accelerometer: AccelerometerData):
|
|
30
|
+
"""Run the ENMO algorithm on accelerometer data.
|
|
31
|
+
ENMO is calculated as the vector magnitude of the three accelerometer
|
|
32
|
+
axes minus 1g, with negative values set to zero. The algorithm computes
|
|
33
|
+
the average ENMO over non-overlapping windows of a specified length.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
accelerometer (AccelerometerData): Triaxial accelerometer data.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
ENMO: Instance of the ENMO algorithm with computed values.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
enmo = accelerometer.magnitude - 1
|
|
43
|
+
enmo[enmo < 0] = 0
|
|
44
|
+
|
|
45
|
+
self.timestamps = Series(accelerometer.timestamps).rolling(
|
|
46
|
+
window=int(self._window_len * accelerometer.fs),
|
|
47
|
+
step=int(self._window_len * accelerometer.fs),
|
|
48
|
+
min_periods=int(self._window_len * accelerometer.fs),
|
|
49
|
+
closed="left"
|
|
50
|
+
).max()[1:]
|
|
51
|
+
|
|
52
|
+
self.values = Series(enmo).rolling(
|
|
53
|
+
window=int(self._window_len * accelerometer.fs),
|
|
54
|
+
step=int(self._window_len * accelerometer.fs),
|
|
55
|
+
min_periods=int(self._window_len * accelerometer.fs),
|
|
56
|
+
closed="left"
|
|
57
|
+
).mean()[1:]
|
|
58
|
+
|
|
59
|
+
self.biomarker = DataFrame(
|
|
60
|
+
list(zip(self.timestamps, self.values)),
|
|
61
|
+
columns=['timestamps', 'values']
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
return self
|
|
65
|
+
|
|
66
|
+
def aggregate(self,
|
|
67
|
+
method: str = 'mean'
|
|
68
|
+
):
|
|
69
|
+
super().aggregate(
|
|
70
|
+
self.timestamps,
|
|
71
|
+
self.values,
|
|
72
|
+
method
|
|
73
|
+
)
|
|
74
|
+
return self
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from numpy import abs
|
|
2
|
+
from pandas import DataFrame
|
|
3
|
+
|
|
4
|
+
from physiodsp.base import BaseAlgorithm
|
|
5
|
+
from physiodsp.sensors.imu.base import IMUData
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class PIMAlgorithm(BaseAlgorithm):
|
|
9
|
+
"""Proportional Integration Mode"""
|
|
10
|
+
|
|
11
|
+
_algorithm_name = "PIMAlgorithm"
|
|
12
|
+
_version = "0.1.0"
|
|
13
|
+
_aggregation_window = 5
|
|
14
|
+
|
|
15
|
+
def __init__(self) -> None:
|
|
16
|
+
|
|
17
|
+
return None
|
|
18
|
+
|
|
19
|
+
def estimate(self, data: IMUData):
|
|
20
|
+
|
|
21
|
+
self.values_x = abs(data.x)
|
|
22
|
+
self.values_y = abs(data.y)
|
|
23
|
+
self.values_z = abs(data.z)
|
|
24
|
+
|
|
25
|
+
return self
|
|
26
|
+
|
|
27
|
+
def aggregate(self,
|
|
28
|
+
method: str = 'sum'
|
|
29
|
+
):
|
|
30
|
+
|
|
31
|
+
df = DataFrame(
|
|
32
|
+
list(zip(self.data.timestamps, self.values_x, self.values_y, self.values_z)),
|
|
33
|
+
columns=['timestamps', 'x', 'y', 'z']
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
df['timestamps'] = df[
|
|
37
|
+
'timestamps'].apply(lambda x: x // self.aggregation_window)
|
|
38
|
+
|
|
39
|
+
df_agg = df.groupby('timestamps')[["x", "y", "z"]].agg(method).reset_index(drop=False)
|
|
40
|
+
|
|
41
|
+
self.biomarker_agg = df_agg
|
|
42
|
+
|
|
43
|
+
return self
|
|
File without changes
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from numpy import abs, concatenate
|
|
2
|
+
from pandas import DataFrame
|
|
3
|
+
from pydantic import BaseModel, Field, PositiveInt, PositiveFloat
|
|
4
|
+
|
|
5
|
+
from physiodsp.base import BaseAlgorithm
|
|
6
|
+
from physiodsp.sensors.imu.base import IMUData
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class TimeAboveThrSettings(BaseModel):
|
|
10
|
+
|
|
11
|
+
window_len: PositiveInt = Field(default=1, description="processing window length in seconds")
|
|
12
|
+
|
|
13
|
+
aggregation_window: PositiveInt = Field(default=60, description="aggregation window length in seconds")
|
|
14
|
+
|
|
15
|
+
threshold: PositiveFloat = Field(default=0.1, description="threshold in g")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class TimeAboveThr(BaseAlgorithm):
|
|
19
|
+
"""Time Above Threshold Algorithm"""
|
|
20
|
+
|
|
21
|
+
_algorithm_name = "TimeAboveThrAlgorithm"
|
|
22
|
+
_version = "0.1.0"
|
|
23
|
+
|
|
24
|
+
def __init__(self,
|
|
25
|
+
settings: TimeAboveThrSettings = TimeAboveThrSettings()
|
|
26
|
+
) -> None:
|
|
27
|
+
self.settings = settings
|
|
28
|
+
self._window_len = settings.window_len
|
|
29
|
+
self._aggregation_window = settings.aggregation_window
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
def run(self, data: IMUData):
|
|
33
|
+
|
|
34
|
+
imu_matrix = data.to_matrix()
|
|
35
|
+
above_thr = (abs(imu_matrix) >= self.settings.threshold).astype(int)
|
|
36
|
+
# Add timestamp column
|
|
37
|
+
above_thr = concatenate([data.timestamps.reshape(-1, 1), above_thr], axis=1)
|
|
38
|
+
|
|
39
|
+
self.values = DataFrame({"timestamps": above_thr[:, 0], "x": above_thr[:, 1], "y": above_thr[:, 2], "z": above_thr[:, 3]}).rolling(
|
|
40
|
+
window=int(self._window_len * data.fs),
|
|
41
|
+
step=int(self._window_len * data.fs),
|
|
42
|
+
min_periods=int(self._window_len * data.fs),
|
|
43
|
+
closed="left"
|
|
44
|
+
).agg({"timestamps": "max", "x": "sum", "y": "sum", "z": "sum"})[1:]
|
|
45
|
+
|
|
46
|
+
return self
|
|
47
|
+
|
|
48
|
+
def aggregate(self,
|
|
49
|
+
method: str = 'sum'
|
|
50
|
+
):
|
|
51
|
+
|
|
52
|
+
df = DataFrame(
|
|
53
|
+
list(zip(self.data.timestamps, self.values_x, self.values_y, self.values_z)),
|
|
54
|
+
columns=['timestamps', 'x', 'y', 'z']
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
df['timestamp'] = df[
|
|
58
|
+
'timestamps'].apply(lambda x: x // self._aggregation_window)
|
|
59
|
+
|
|
60
|
+
df_agg = df.groupby('timestamp')[["x", "y", "z"]].agg(method).reset_index(drop=False)
|
|
61
|
+
|
|
62
|
+
self.biomarker_agg = df_agg
|
|
63
|
+
|
|
64
|
+
return self
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
from numpy import diff, abs
|
|
2
|
+
from pandas import DataFrame
|
|
3
|
+
from pydantic import BaseModel, Field, PositiveInt, PositiveFloat
|
|
4
|
+
from scipy.signal import butter, sosfilt, sosfilt_zi
|
|
5
|
+
|
|
6
|
+
from physiodsp.base import BaseAlgorithm
|
|
7
|
+
from physiodsp.sensors.imu.base import IMUData
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ZeroCrossingSettings(BaseModel):
|
|
11
|
+
|
|
12
|
+
window_len: PositiveInt = Field(default=1, description="processing window length in seconds")
|
|
13
|
+
|
|
14
|
+
aggregation_window: PositiveInt = Field(default=60, description="aggregation window length in seconds")
|
|
15
|
+
|
|
16
|
+
zero_crossing_thr: PositiveFloat = Field(default=0.05, description="ero crossing threshold in g")
|
|
17
|
+
|
|
18
|
+
filter_order: PositiveInt = Field(default=4, description="Butterworth filter order for bandpass filtering")
|
|
19
|
+
|
|
20
|
+
filter_low_freq: PositiveFloat = Field(default=0.3, description="Lower cutoff frequency in Hz")
|
|
21
|
+
|
|
22
|
+
filter_high_freq: PositiveFloat = Field(default=3.5, description="Upper cutoff frequency in Hz")
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class ZeroCrossing(BaseAlgorithm):
|
|
26
|
+
"""Zero Crossing Algorithm"""
|
|
27
|
+
|
|
28
|
+
_algorithm_name = "ZeroCrossingAlgorithm"
|
|
29
|
+
_version = "0.1.0"
|
|
30
|
+
|
|
31
|
+
def __init__(self,
|
|
32
|
+
settings: ZeroCrossingSettings = ZeroCrossingSettings(),
|
|
33
|
+
) -> None:
|
|
34
|
+
|
|
35
|
+
self.settings = settings
|
|
36
|
+
self._window_len = settings.window_len
|
|
37
|
+
self._aggregation_window = settings.aggregation_window
|
|
38
|
+
self.zero_crossing_thr = settings.zero_crossing_thr
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
def __preprocess_imu(self, imu_matrix, fs: int):
|
|
42
|
+
"""
|
|
43
|
+
Apply bandpass Butterworth filter to IMU data.
|
|
44
|
+
|
|
45
|
+
Reference: 0.3-3.5 Hz pass band is commonly used for human movement analysis
|
|
46
|
+
to isolate low-frequency body motion while removing drift and high-frequency noise.
|
|
47
|
+
Args:
|
|
48
|
+
imu_matrix: (N, 3) array with columns [x, y, z]
|
|
49
|
+
fs: Sampling frequency of the IMU data in Hz
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Filtered (N, 3) array
|
|
53
|
+
"""
|
|
54
|
+
# Design bandpass filter using second-order sections for numerical stability
|
|
55
|
+
sos = butter(self.settings.filter_order,
|
|
56
|
+
[self.settings.filter_low_freq, self.settings.filter_high_freq],
|
|
57
|
+
btype='band',
|
|
58
|
+
fs=fs,
|
|
59
|
+
output='sos')
|
|
60
|
+
|
|
61
|
+
# Initialize filter states for each axis
|
|
62
|
+
zi = sosfilt_zi(sos)
|
|
63
|
+
|
|
64
|
+
# Apply filter to each axis using sosfilt (not filtfilt)
|
|
65
|
+
filtered_matrix = imu_matrix.copy()
|
|
66
|
+
for i in range(imu_matrix.shape[1]):
|
|
67
|
+
filtered_matrix[:, i], _ = sosfilt(sos, imu_matrix[:, i], zi=zi * imu_matrix[0, i])
|
|
68
|
+
|
|
69
|
+
return filtered_matrix
|
|
70
|
+
|
|
71
|
+
def run(self, data: IMUData):
|
|
72
|
+
|
|
73
|
+
# Apply bandpass filter to IMU data
|
|
74
|
+
imu_matrix = data.to_matrix()
|
|
75
|
+
imu_matrix_filtered = self.__preprocess_imu(imu_matrix, fs=data.fs)
|
|
76
|
+
# Compute zero crossings for each axis
|
|
77
|
+
zcr = abs(diff((imu_matrix_filtered >= self.zero_crossing_thr).astype(int), axis=0))
|
|
78
|
+
|
|
79
|
+
self.values = DataFrame({"timestamps": data.timestamps[1:], "x": zcr[:, 0], "y": zcr[:, 1], "z": zcr[:, 2]}).rolling(
|
|
80
|
+
window=int(self._window_len * data.fs),
|
|
81
|
+
step=int(self._window_len * data.fs),
|
|
82
|
+
min_periods=int(self._window_len * data.fs),
|
|
83
|
+
closed="left"
|
|
84
|
+
).agg({"timestamps": "max", "x": "sum", "y": "sum", "z": "sum"})[1:]
|
|
85
|
+
|
|
86
|
+
self.biomarker = self.values.copy()
|
|
87
|
+
|
|
88
|
+
return self
|
|
89
|
+
|
|
90
|
+
def aggregate(self,
|
|
91
|
+
method: str = 'sum'
|
|
92
|
+
):
|
|
93
|
+
|
|
94
|
+
df = self.values.copy()
|
|
95
|
+
|
|
96
|
+
df['timestamps'] = df[
|
|
97
|
+
'timestamps'].apply(lambda x: (x // self._aggregation_window) * self._aggregation_window)
|
|
98
|
+
|
|
99
|
+
df_agg = df.groupby('timestamps')[["x", "y", "z"]].agg(method).reset_index(drop=False)
|
|
100
|
+
|
|
101
|
+
self.biomarker_agg = df_agg
|
|
102
|
+
|
|
103
|
+
return self
|
|
File without changes
|