autogaita 1.5.2__py3-none-any.whl → 1.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,367 +0,0 @@
1
- from autogaita.dlc.dlc_main import dlc
2
- from autogaita.common2D.common2D_1_preparation import some_prep
3
- from autogaita.common2D.common2D_2_sc_extraction import extract_stepcycles
4
- from autogaita.common2D.common2D_utils import (
5
- check_cycle_out_of_bounds,
6
- check_cycle_duplicates,
7
- check_cycle_order,
8
- check_differing_angle_joint_coords,
9
- check_tracking_xy_thresholds,
10
- check_tracking_SLEAP_nans,
11
- )
12
- from hypothesis import given, strategies as st, settings, HealthCheck
13
- import os
14
- import numpy as np
15
- import pytest
16
-
17
-
18
- # %%................................ fixtures ........................................
19
- @pytest.fixture
20
- def extract_data_using_some_prep(extract_info, extract_folderinfo, extract_cfg):
21
- data = some_prep("DLC", extract_info, extract_folderinfo, extract_cfg)
22
- return data
23
-
24
-
25
- @pytest.fixture
26
- def extract_info(tmp_path):
27
- info = {}
28
- info["mouse_num"] = 15
29
- info["run_num"] = 3
30
- info["name"] = "ID " + str(info["mouse_num"]) + " - Run " + str(info["run_num"])
31
- info["results_dir"] = os.path.join(tmp_path, info["name"])
32
- return info
33
-
34
-
35
- @pytest.fixture
36
- def extract_folderinfo():
37
- folderinfo = {}
38
- folderinfo["root_dir"] = "tests/test_data/dlc_data"
39
- folderinfo["sctable_filename"] = (
40
- "correct_annotation_table.xlsx" # has to be an excel file
41
- )
42
- folderinfo["data_string"] = "SIMINewOct"
43
- folderinfo["beam_string"] = "BeamTraining"
44
- folderinfo["premouse_string"] = "Mouse"
45
- folderinfo["postmouse_string"] = "25mm"
46
- folderinfo["prerun_string"] = "run"
47
- folderinfo["postrun_string"] = "6DLC"
48
- return folderinfo
49
-
50
-
51
- @pytest.fixture
52
- def extract_cfg():
53
- cfg = {}
54
- cfg["sampling_rate"] = 100
55
- cfg["subtract_beam"] = True
56
- cfg["dont_show_plots"] = True
57
- cfg["convert_to_mm"] = True
58
- cfg["pixel_to_mm_ratio"] = 3.76
59
- cfg["x_sc_broken_threshold"] = 200 # optional cfg
60
- cfg["y_sc_broken_threshold"] = 50
61
- cfg["x_acceleration"] = True
62
- cfg["angular_acceleration"] = True
63
- cfg["save_to_xls"] = True
64
- cfg["bin_num"] = 25
65
- cfg["plot_SE"] = True
66
- cfg["standardise_y_at_SC_level"] = False
67
- cfg["standardise_y_to_a_joint"] = True
68
- cfg["y_standardisation_joint"] = ["Knee"] # "Hind paw tao"]
69
- cfg["plot_joint_number"] = 3
70
- cfg["color_palette"] = "viridis"
71
- cfg["legend_outside"] = True
72
- cfg["invert_y_axis"] = True
73
- cfg["flip_gait_direction"] = True
74
- cfg["analyse_average_x"] = True
75
- cfg["standardise_x_coordinates"] = True
76
- cfg["x_standardisation_joint"] = ["Hind paw tao"]
77
- cfg["coordinate_standardisation_xls"] = ""
78
- cfg["hind_joints"] = ["Hind paw tao", "Ankle", "Knee", "Hip", "Iliac Crest"]
79
- cfg["fore_joints"] = [
80
- "Front paw tao ",
81
- "Wrist ",
82
- "Elbow ",
83
- "Lower Shoulder ",
84
- "Upper Shoulder ",
85
- ]
86
- cfg["beam_col_left"] = ["BeamLeft"] # BEAM_COL_LEFT & _RIGHT must be lists of len=1
87
- cfg["beam_col_right"] = ["BeamRight"]
88
- cfg["beam_hind_jointadd"] = ["Tail base ", "Tail center ", "Tail tip "]
89
- cfg["beam_fore_jointadd"] = ["Nose ", "Ear base "]
90
- cfg["angles"] = {
91
- "name": ["Ankle ", "Knee ", "Hip "],
92
- "lower_joint": ["Hind paw tao ", "Ankle ", "Knee "],
93
- "upper_joint": ["Knee ", "Hip ", "Iliac Crest "],
94
- }
95
- return cfg
96
-
97
-
98
- # %%.............................. test golden path ..................................
99
-
100
-
101
- def test_golden_path_extract_stepcycles(
102
- extract_data_using_some_prep, extract_info, extract_folderinfo, extract_cfg
103
- ):
104
- expected_cycles = [[284, 317], [318, 359], [413, 441]]
105
- assert (
106
- extract_stepcycles(
107
- "DLC",
108
- extract_data_using_some_prep,
109
- extract_info,
110
- extract_folderinfo,
111
- extract_cfg,
112
- )
113
- == expected_cycles
114
- )
115
-
116
-
117
- # %%................... test Annotation Table checks & handle_issues .................
118
-
119
-
120
- def test_file_not_found_error_in_extract_stepcycles(
121
- extract_data_using_some_prep, extract_info, extract_folderinfo, extract_cfg
122
- ):
123
- extract_folderinfo["root_dir"] = ""
124
- with pytest.raises(FileNotFoundError) as excinfo:
125
- extract_stepcycles(
126
- "DLC",
127
- extract_data_using_some_prep,
128
- extract_info,
129
- extract_folderinfo,
130
- extract_cfg,
131
- )
132
- assert "No Annotation Table found!" in str(excinfo.value)
133
-
134
-
135
- def test_handle_issues_1_all_SCs_invalid_because_all_cycles_empty_in_dlc(
136
- extract_info, extract_folderinfo, extract_cfg
137
- ):
138
- # call dlc and not extract_stepcycles since handle_issues call we test is in dlc
139
- extract_folderinfo["root_dir"] = os.path.join(
140
- extract_folderinfo["root_dir"], "flawed_files"
141
- )
142
- extract_folderinfo["sctable_filename"] = "flawed_table_all_ID_15_SCs_invalid"
143
- dlc(extract_info, extract_folderinfo, extract_cfg)
144
- with open(os.path.join(extract_info["results_dir"], "Issues.txt")) as f:
145
- content = f.read()
146
- assert ("Skipped since all SCs invalid!" in content) & (
147
- "not in data/video" in content
148
- )
149
-
150
-
151
- def test_handle_issues_2_no_scs_of_given_mouse_and_run_in_extract_stepcycles(
152
- extract_data_using_some_prep, extract_info, extract_folderinfo, extract_cfg
153
- ):
154
- extract_info["mouse_num"] = 12
155
- extract_info["run_num"] = 1
156
- extract_stepcycles(
157
- "DLC",
158
- extract_data_using_some_prep,
159
- extract_info,
160
- extract_folderinfo,
161
- extract_cfg,
162
- )
163
- with open(os.path.join(extract_info["results_dir"], "Issues.txt")) as f:
164
- content = f.read()
165
- assert "Skipped since no SCs in Annotation Table!" in content
166
-
167
-
168
- def test_handle_issues_2_wrong_run_number_in_extract_stepcycles(
169
- extract_data_using_some_prep, extract_info, extract_folderinfo, extract_cfg
170
- ):
171
- extract_info["run_num"] = 123456789101112
172
- extract_stepcycles(
173
- "DLC",
174
- extract_data_using_some_prep,
175
- extract_info,
176
- extract_folderinfo,
177
- extract_cfg,
178
- )
179
- with open(os.path.join(extract_info["results_dir"], "Issues.txt")) as f:
180
- content = f.read()
181
- assert "Skipped since no SCs in Annotation Table!" in content
182
-
183
-
184
- def test_handle_issues_3_wrong_mouse_number_in_extract_stepcycles(
185
- extract_data_using_some_prep, extract_info, extract_folderinfo, extract_cfg
186
- ):
187
- extract_info["mouse_num"] = 123456789101112
188
- extract_stepcycles(
189
- "DLC",
190
- extract_data_using_some_prep,
191
- extract_info,
192
- extract_folderinfo,
193
- extract_cfg,
194
- )
195
- with open(os.path.join(extract_info["results_dir"], "Issues.txt")) as f:
196
- content = f.read()
197
- assert "ID not in Annotation Table!" in content
198
-
199
-
200
- def test_handle_issues_4_bad_annotation_table_columns_in_extract_stepcycles(
201
- extract_data_using_some_prep, extract_info, extract_folderinfo, extract_cfg
202
- ):
203
- extract_folderinfo["root_dir"] = os.path.join(
204
- extract_folderinfo["root_dir"], "flawed_files"
205
- )
206
- extract_folderinfo["sctable_filename"] = "flawed_table_bad_column_names_table"
207
- extract_stepcycles(
208
- "DLC",
209
- extract_data_using_some_prep,
210
- extract_info,
211
- extract_folderinfo,
212
- extract_cfg,
213
- )
214
- with open(os.path.join(extract_info["results_dir"], "Issues.txt")) as f:
215
- content = f.read()
216
- assert "Annotation Table's Column Names are wrong!" in content
217
-
218
-
219
- def test_handle_issues_5_double_ID_in_annotation_table_in_extract_stepcycles(
220
- extract_data_using_some_prep, extract_info, extract_folderinfo, extract_cfg
221
- ):
222
- extract_folderinfo["root_dir"] = os.path.join(
223
- extract_folderinfo["root_dir"], "flawed_files"
224
- )
225
- extract_folderinfo["sctable_filename"] = "flawed_table_double_ID_15_table"
226
- extract_stepcycles(
227
- "DLC",
228
- extract_data_using_some_prep,
229
- extract_info,
230
- extract_folderinfo,
231
- extract_cfg,
232
- )
233
- with open(os.path.join(extract_info["results_dir"], "Issues.txt")) as f:
234
- content = f.read()
235
- assert "ID found more than once in Annotation Table!" in content
236
-
237
-
238
- # ..................... test clean all_cycles local functions ........................
239
-
240
-
241
- @given(
242
- all_cycles=st.lists(
243
- st.lists(
244
- st.one_of(st.integers(), st.floats(), st.text()), min_size=2, max_size=2
245
- )
246
- )
247
- )
248
- def test_clean_cycles_1a_cycle_out_of_bounds_in_extract_stepcycles(all_cycles):
249
- all_cycles = check_cycle_out_of_bounds(all_cycles)
250
- flat_cycles = flatten_all_cycles(all_cycles)
251
- if all_cycles: # can be None
252
- assert all(isinstance(idx, (int, np.integer)) for idx in flat_cycles)
253
-
254
-
255
- # Note for following cases that within extract_stepcycles a check for cycle-idxs being in data.index assigns all_cycles[s] to [None, None] - so we have to use that here
256
- cases = (
257
- (
258
- [[1, 100], [None, None], [200, 300]],
259
- [[1, 100], [200, 300]],
260
- ),
261
- ([[None, None], [None, None], [None, None]], None),
262
- ) # fmt: skip
263
- @pytest.mark.parametrize("all_cycles, expected_cycles", cases)
264
- def test_clean_cycles_1b_cycle_out_of_bounds_in_extract_stepcycles(
265
- all_cycles, expected_cycles
266
- ):
267
- assert expected_cycles == check_cycle_out_of_bounds(all_cycles)
268
-
269
-
270
- cases = (
271
- (
272
- [[11, 12], [12, 14], [14, 110], [110, 210]],
273
- [[11, 12], [13, 14], [15, 110], [111, 210]],
274
- ),
275
- ([[1, 2], [2, 3], [3, 4], [4, 5]], [[1, 2], [3, 3], [4, 4], [5, 5]]),
276
- ) # fmt: skip
277
- @pytest.mark.parametrize("all_cycles, expected_cycles", cases)
278
- def test_clean_cycles_2_cycle_duplicates_in_extract_stepcycles(
279
- all_cycles, expected_cycles
280
- ):
281
- assert expected_cycles == check_cycle_duplicates(all_cycles)
282
-
283
-
284
- @given(all_cycles=st.lists(st.lists(st.integers(), min_size=2, max_size=2)))
285
- @settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
286
- def test_clean_cycles_3_cycle_order(all_cycles, extract_info):
287
- """Note here that we manually create results_dir's tmp_path if required bc. it's usually created in dlc somewhere and that hypothesis complains without the if not condition bc. the path keeps being created for new cases I think!"""
288
- if not os.path.exists(extract_info["results_dir"]):
289
- os.makedirs(extract_info["results_dir"])
290
- all_cycles = check_cycle_order(all_cycles, extract_info)
291
- flat_cycles = flatten_all_cycles(all_cycles)
292
- if all_cycles: # can be None
293
- assert flat_cycles == sorted(flat_cycles)
294
-
295
-
296
- def test_clean_cycles_4_differing_angle_joint_coords(
297
- extract_data_using_some_prep, extract_info, extract_cfg
298
- ):
299
- data = extract_data_using_some_prep.copy()
300
- all_cycles = [[111, 222], [333, 444]]
301
- data.loc[123, "Hind paw tao x"] = 5.5
302
- data.loc[123, "Hind paw tao y"] = 2.1
303
- data.loc[123, "Ankle x"] = 5.5
304
- data.loc[123, "Ankle y"] = 2.1
305
- all_cycles = check_differing_angle_joint_coords(
306
- all_cycles, data, extract_info, extract_cfg
307
- )
308
- assert all_cycles == [[333, 444]]
309
- with open(os.path.join(extract_info["results_dir"], "Issues.txt")) as f:
310
- content = f.read()
311
- assert "SC #1" in content
312
- assert "Lower joint: Hind paw tao" in content
313
- data.loc[345, "Hip x"] = 10.1
314
- data.loc[345, "Hip y"] = 0.333
315
- data.loc[345, "Iliac Crest x"] = 10.1
316
- data.loc[345, "Iliac Crest y"] = 0.333
317
- assert (
318
- check_differing_angle_joint_coords(all_cycles, data, extract_info, extract_cfg)
319
- is None
320
- )
321
-
322
-
323
- def test_clean_cycles_5_DLC_tracking(
324
- extract_data_using_some_prep, extract_info, extract_cfg
325
- ):
326
- """Note that we know that very early on (2-150) the mouse was not in the frame yet so DLC is broken and these SCs will be excluded. The other 3 of case 2 are the correcty SCs of this ID/run of which None should be excluded!"""
327
- all_cycles_of_the_two_cases = (
328
- [[2, 50], [52, 100], [102, 150]],
329
- [[2, 50], [52, 100], [102, 150], [284, 317], [318, 359], [413, 441]],
330
- )
331
- expected_cycles = (None, [[284, 317], [318, 359], [413, 441]])
332
- for c, this_cases_all_cycles in enumerate(all_cycles_of_the_two_cases):
333
- assert expected_cycles[c] == check_tracking_xy_thresholds(
334
- this_cases_all_cycles,
335
- extract_data_using_some_prep,
336
- extract_info,
337
- extract_cfg,
338
- )
339
-
340
-
341
- def test_clean_cycles_6_SLEAP_nan_removals(
342
- extract_data_using_some_prep, extract_info, extract_cfg
343
- ):
344
- """We just pretend that this is SLEAP data - whatever"""
345
- extract_cfg["joints"] = ["Hind paw tao ", "Ankle "]
346
- extract_cfg["angles"] = {
347
- "name": ["Elbow "],
348
- "lower_joint": ["Wrist "],
349
- "upper_joint": ["Lower Shoulder "],
350
- }
351
- data = extract_data_using_some_prep.copy()
352
- all_cycles = [[111, 222], [333, 444], [555, 666]]
353
- data.loc[123, "Hind paw tao y"] = np.nan
354
- data.loc[444, "Elbow x"] = np.nan
355
- clean_cycles = check_tracking_SLEAP_nans(
356
- all_cycles,
357
- data,
358
- extract_info,
359
- extract_cfg,
360
- )
361
- assert clean_cycles == [[555, 666]]
362
-
363
-
364
- # ............................... helper functions ...................................
365
- def flatten_all_cycles(all_cycles):
366
- if all_cycles:
367
- return [idx for cycle in all_cycles for idx in cycle]
@@ -1,245 +0,0 @@
1
- from autogaita.common2D.common2D_1_preparation import some_prep
2
- from autogaita.common2D.common2D_2_sc_extraction import extract_stepcycles
3
- from autogaita.common2D.common2D_3_analysis import (
4
- analyse_and_export_stepcycles,
5
- add_step_separators,
6
- add_angles,
7
- add_x_velocities,
8
- add_angular_velocities,
9
- )
10
- from hypothesis import given
11
- import hypothesis.strategies as st
12
- from hypothesis.extra.numpy import arrays
13
- import os
14
- import numpy as np
15
- import pandas as pd
16
- import pandas.testing as pdt
17
- import pytest
18
-
19
-
20
- # %%................................ fixtures ........................................
21
- @pytest.fixture
22
- def extract_info(tmp_path):
23
- info = {}
24
- info["mouse_num"] = 15
25
- info["run_num"] = 3
26
- info["name"] = "ID " + str(info["mouse_num"]) + " - Run " + str(info["run_num"])
27
- info["results_dir"] = os.path.join(tmp_path, info["name"])
28
- return info
29
-
30
-
31
- @pytest.fixture
32
- def extract_folderinfo():
33
- folderinfo = {}
34
- folderinfo["root_dir"] = "tests/test_data/dlc_data"
35
- folderinfo["sctable_filename"] = (
36
- "correct_annotation_table.xlsx" # has to be an excel file
37
- )
38
- folderinfo["data_string"] = "SIMINewOct"
39
- folderinfo["beam_string"] = "BeamTraining"
40
- folderinfo["premouse_string"] = "Mouse"
41
- folderinfo["postmouse_string"] = "25mm"
42
- folderinfo["prerun_string"] = "run"
43
- folderinfo["postrun_string"] = "6DLC"
44
- return folderinfo
45
-
46
-
47
- @pytest.fixture
48
- def extract_cfg():
49
- cfg = {}
50
- cfg["sampling_rate"] = 100
51
- cfg["subtract_beam"] = True
52
- cfg["dont_show_plots"] = True
53
- cfg["convert_to_mm"] = True
54
- cfg["pixel_to_mm_ratio"] = 3.76
55
- cfg["x_sc_broken_threshold"] = 200 # optional cfg
56
- cfg["y_sc_broken_threshold"] = 50
57
- cfg["x_acceleration"] = True
58
- cfg["angular_acceleration"] = True
59
- cfg["save_to_xls"] = True
60
- cfg["bin_num"] = 25
61
- cfg["plot_SE"] = True
62
- cfg["standardise_y_at_SC_level"] = False
63
- cfg["standardise_y_to_a_joint"] = True
64
- cfg["y_standardisation_joint"] = ["Knee"] # "Hind paw tao"]
65
- cfg["plot_joint_number"] = 3
66
- cfg["color_palette"] = "viridis"
67
- cfg["legend_outside"] = True
68
- cfg["invert_y_axis"] = True
69
- cfg["flip_gait_direction"] = True
70
- cfg["analyse_average_x"] = True
71
- cfg["standardise_x_coordinates"] = True
72
- cfg["x_standardisation_joint"] = ["Hind paw tao"]
73
- cfg["coordinate_standardisation_xls"] = ""
74
- cfg["hind_joints"] = ["Hind paw tao", "Ankle", "Knee", "Hip", "Iliac Crest"]
75
- cfg["fore_joints"] = [
76
- "Front paw tao ",
77
- "Wrist ",
78
- "Elbow ",
79
- "Lower Shoulder ",
80
- "Upper Shoulder ",
81
- ]
82
- cfg["beam_col_left"] = ["BeamLeft"] # BEAM_COL_LEFT & _RIGHT must be lists of len=1
83
- cfg["beam_col_right"] = ["BeamRight"]
84
- cfg["beam_hind_jointadd"] = ["Tail base ", "Tail center ", "Tail tip "]
85
- cfg["beam_fore_jointadd"] = ["Nose ", "Ear base "]
86
- cfg["angles"] = {
87
- "name": ["Ankle ", "Knee ", "Hip "],
88
- "lower_joint": ["Hind paw tao ", "Ankle ", "Knee "],
89
- "upper_joint": ["Knee ", "Hip ", "Iliac Crest "],
90
- }
91
- return cfg
92
-
93
-
94
- # %%....... main analysis: sc-lvl y-norm, features, df-creation & export ..........
95
-
96
-
97
- def test_height_standardisation_no_beam():
98
- """Unit test normalising heights if there is no beam"""
99
- # Create a sample DataFrame
100
- data = {
101
- "first_y": [-10, -5, 0, 10],
102
- "second_y": [10, 20, 30, 40],
103
- "third_y": [5, 15, 25, 35],
104
- }
105
- step = pd.DataFrame(data)
106
- y_cols = [col for col in step.columns if col.endswith("y")]
107
- this_y_min = step[y_cols].min().min()
108
- step[y_cols] -= this_y_min
109
- # Expected result
110
- expected_result = {
111
- "first_y": [0, 5, 10, 20],
112
- "second_y": [20, 30, 40, 50],
113
- "third_y": [15, 25, 35, 45],
114
- }
115
- expected_step = pd.DataFrame(expected_result)
116
- # Compare the result with the expected result
117
- pdt.assert_frame_equal(step, expected_step)
118
-
119
-
120
- cases = [
121
- ((0, 0), (1, 0), (0, 1), 90),
122
- ((0, 0), (1, 0), (0, 0.5), 90),
123
- ((0, 0), (1, 0), (2, 0), 0)
124
- ] # fmt: skip
125
- @pytest.mark.parametrize("angle_x_y, lower_x_y, upper_x_y, expected_angle", cases)
126
- def test_angles(angle_x_y, lower_x_y, upper_x_y, expected_angle):
127
- step = (
128
- pd.Series(
129
- {
130
- "angle x": angle_x_y[0],
131
- "angle y": angle_x_y[1],
132
- "lower x": lower_x_y[0],
133
- "lower y": lower_x_y[1],
134
- "upper x": upper_x_y[0],
135
- "upper y": upper_x_y[1],
136
- }
137
- )
138
- .to_frame()
139
- .T
140
- )
141
- info = {}
142
- cfg = {}
143
- cfg["angles"] = {
144
- "name": ["angle "],
145
- "lower_joint": ["lower "],
146
- "upper_joint": ["upper "],
147
- }
148
- step = add_angles(step, info, cfg)
149
- assert step["angle Angle"].values == expected_angle
150
-
151
-
152
- def test_angles_not_depending_on_x_standardisation_and_gait_direction_flipping(
153
- extract_info, extract_folderinfo, extract_cfg
154
- ):
155
- # 1. preparation
156
- data = some_prep("DLC", extract_info, extract_folderinfo, extract_cfg)
157
- all_cycles = extract_stepcycles(
158
- "DLC", data, extract_info, extract_folderinfo, extract_cfg
159
- )
160
- # 2. x standardisation
161
- results = analyse_and_export_stepcycles(data, all_cycles, extract_info, extract_cfg)
162
- all_steps_data = results["all_steps_data"]
163
- x_standardised_steps_data = results["x_standardised_steps_data"]
164
- angle_cols = [col for col in all_steps_data.columns if col.endswith("Angle")]
165
- for angle_col in angle_cols:
166
- pdt.assert_series_equal(
167
- all_steps_data[angle_col], x_standardised_steps_data[angle_col]
168
- )
169
- # 3. gait direction flipping
170
- extract_cfg["flip_gait_direction"] = False
171
- non_flipped_data = some_prep("DLC", extract_info, extract_folderinfo, extract_cfg)
172
- non_flipped_results = analyse_and_export_stepcycles(
173
- non_flipped_data, all_cycles, extract_info, extract_cfg
174
- )
175
- non_flipped_all_steps_data = non_flipped_results["all_steps_data"]
176
- for angle_col in angle_cols:
177
- pdt.assert_series_equal(
178
- all_steps_data[angle_col], non_flipped_all_steps_data[angle_col]
179
- )
180
-
181
-
182
- def test_velocities():
183
- """Unit test of how velocities are added
184
- A Note
185
- ------
186
- If you ever consider testing if velocities are expected after flipping x-values using flip_mouse_body - DON'T!
187
- => We only flip if gait direction is right=>left in the first place and thus: velocities are CORRECT and comparable to non-flipped left=>right videos.
188
- => NU - write a test that confirms this formally
189
- """
190
- data = {
191
- "Sample x": [0.0, 2.0, 4.0, 8.0, 8.0, 4.0, 2.0, 0.0, -2.0, -10.0],
192
- "Sample Angle": [-10.0, -8.0, -7.0, -4.0, 0.0, 4.0, 10.0, 20.0, 10.0, 0.0],
193
- }
194
- step = pd.DataFrame(data)
195
- cfg = {
196
- "hind_joints": ["Sample "],
197
- "x_acceleration": True,
198
- "angular_acceleration": True,
199
- }
200
- step = add_x_velocities(step, cfg)
201
- step = add_angular_velocities(step, cfg)
202
- # expected values were obtained by calling np.gradient on arrays above
203
- expected_values = {
204
- "Sample Velocity": [2.0, 2.0, 3.0, 2.0, -2.0, -3.0, -2.0, -2.0, -5.0, -8.0],
205
- "Sample Acceleration": [0.0, 0.5, 0.0, -2.5, -2.5, 0.0, 0.5, -1.5, -3.0, -3.0],
206
- "Sample Angle Velocity": [2.0, 1.5, 2.0, 3.5, 4.0, 5.0, 8.0, 0.0, -10.0, -10.0],
207
- "Sample Angle Acceleration": [
208
- -0.5,
209
- 0.0,
210
- 1.0,
211
- 1.0,
212
- 0.75,
213
- 2.0,
214
- -2.5,
215
- -9.0,
216
- -5.0,
217
- 0.0,
218
- ],
219
- }
220
- for key in expected_values.keys():
221
- assert all(expected_values[key] == step[key])
222
-
223
-
224
- def test_step_separators():
225
- """Unit test of how step separators are added to df"""
226
- df_no_separators = pd.DataFrame(
227
- {"A": [1, 2, 3, 4, 5], "B": ["yes", "no", "yes", "no", "yes"]}
228
- )
229
- nanvector = df_no_separators.loc[[1]]
230
- nanvector[:] = np.nan
231
- numvector = df_no_separators.loc[[1]]
232
- numvector[:] = 1
233
- df_manually_added_separators = pd.DataFrame(
234
- {
235
- "A": [1, 2, 3, 4, 5, np.nan, 1, np.nan],
236
- "B": ["yes", "no", "yes", "no", "yes", np.nan, 1, np.nan],
237
- }
238
- )
239
- df_manually_added_separators.index = [0, 1, 2, 3, 4, 1, 1, 1]
240
- df_func_added_separators = add_step_separators(
241
- df_no_separators, nanvector, numvector
242
- )
243
- pd.testing.assert_frame_equal(
244
- df_func_added_separators, df_manually_added_separators
245
- )