autogaita 1.5.2__py3-none-any.whl → 1.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,426 @@
1
+ # %% imports
2
+ from autogaita.resources.utils import write_issues_to_textfile
3
+ from autogaita.common2D.common2D_1_preparation import (
4
+ check_and_expand_cfg,
5
+ flip_mouse_body,
6
+ )
7
+ from autogaita.dlc.dlc_utils import prepare_DLC_df
8
+ from autogaita.common2D.common2D_constants import FILE_ID_STRING_ADDITIONS
9
+ import os
10
+ import shutil
11
+ import json
12
+ import pandas as pd
13
+ import numpy as np
14
+
15
+ # %% constants
16
+ from autogaita.resources.constants import (
17
+ ISSUES_TXT_FILENAME,
18
+ TIME_COL,
19
+ CONFIG_JSON_FILENAME,
20
+ )
21
+ from autogaita.common2D.common2D_constants import (
22
+ DIRECTION_DLC_THRESHOLD,
23
+ )
24
+
25
+ # %% workflow step #1 - preparation
26
+
27
+
28
+ def some_prep(info, folderinfo, cfg):
29
+ """Preparation of the data & cfg file for dlc analyses"""
30
+
31
+ # ............................ unpack stuff ......................................
32
+ # => DON'T unpack (joint) cfg-keys that are tested later by check_and_expand_cfg
33
+ name = info["name"]
34
+ results_dir = info["results_dir"]
35
+ data_string = folderinfo["data_string"]
36
+ beam_string = folderinfo["beam_string"]
37
+ sampling_rate = cfg["sampling_rate"]
38
+ subtract_beam = cfg["subtract_beam"]
39
+ convert_to_mm = cfg["convert_to_mm"]
40
+ pixel_to_mm_ratio = cfg["pixel_to_mm_ratio"]
41
+ standardise_y_at_SC_level = cfg["standardise_y_at_SC_level"]
42
+ invert_y_axis = cfg["invert_y_axis"]
43
+ flip_gait_direction = cfg["flip_gait_direction"]
44
+ analyse_average_x = cfg["analyse_average_x"]
45
+ standardise_x_coordinates = cfg["standardise_x_coordinates"]
46
+ standardise_y_to_a_joint = cfg["standardise_y_to_a_joint"]
47
+
48
+ # ............................. move data ........................................
49
+ # => see if we can delete a previous runs results folder if existant. if not, it's a
50
+ # bit ugly since we only update results if filenames match...
51
+ # => for example if angle acceleration not wanted in current run, but was stored in
52
+ # previous run, the previous run's figure is in the folder
53
+ # => inform the user and leave this as is
54
+ if os.path.exists(results_dir):
55
+ try:
56
+ shutil.rmtree(results_dir)
57
+ move_data_to_folders(info, folderinfo)
58
+ except OSError:
59
+ move_data_to_folders(info, folderinfo)
60
+ unable_to_rm_resdir_error = (
61
+ "\n***********\n! WARNING !\n***********\n"
62
+ + "Unable to remove previous Results subfolder of ID: "
63
+ + name
64
+ + "!\n Results will only be updated if filenames match!"
65
+ )
66
+ print(unable_to_rm_resdir_error)
67
+ write_issues_to_textfile(unable_to_rm_resdir_error, info)
68
+ else:
69
+ move_data_to_folders(info, folderinfo)
70
+
71
+ # ....... initialise Issues.txt & quick check for file existence .................
72
+ # Issues.txt - delete if saved in a previous run
73
+ issues_txt_path = os.path.join(results_dir, ISSUES_TXT_FILENAME)
74
+ if os.path.exists(issues_txt_path):
75
+ os.remove(issues_txt_path)
76
+ # read data & beam
77
+ if not os.listdir(results_dir):
78
+ no_files_error = (
79
+ "\n******************\n! CRITICAL ERROR !\n******************\n"
80
+ + "Unable to identify ANY RELEVANT FILES for "
81
+ + name
82
+ + "!\nThis is likely due to issues with unique file name identifiers.. "
83
+ + "check capitalisation!"
84
+ )
85
+ write_issues_to_textfile(no_files_error, info)
86
+ print(no_files_error)
87
+ return
88
+
89
+ # ............................ import data .......................................
90
+ datadf = pd.DataFrame(data=None) # prep stuff for error handling
91
+ datadf_duplicate_error = ""
92
+ if subtract_beam:
93
+ if data_string == beam_string:
94
+ beam_and_data_string_error_message = (
95
+ "\n******************\n! CRITICAL ERROR !\n******************\n"
96
+ + "Your data & baseline (beam) identifiers ([G] in our "
97
+ + "file naming convention) are identical. "
98
+ + "\nNote that they must be different! \nTry again"
99
+ )
100
+ write_issues_to_textfile(beam_and_data_string_error_message, info)
101
+ return
102
+ beamdf = pd.DataFrame(data=None)
103
+ beamdf_duplicate_error = ""
104
+ for filename in os.listdir(results_dir): # import
105
+ if filename.endswith(".csv"):
106
+ if data_string in filename:
107
+ if datadf.empty:
108
+ datadf = pd.read_csv(os.path.join(results_dir, filename))
109
+ else:
110
+ datadf_duplicate_error = (
111
+ "\n******************\n! CRITICAL ERROR !\n******************\n"
112
+ + "Two DATA csv-files found for "
113
+ + name
114
+ + "!\nPlease ensure your root directory only has one datafile "
115
+ + "per video!"
116
+ )
117
+ if subtract_beam:
118
+ if beam_string in filename:
119
+ if beamdf.empty:
120
+ beamdf = pd.read_csv(os.path.join(results_dir, filename))
121
+ else:
122
+ beamdf_duplicate_error = (
123
+ "\n******************\n! CRITICAL ERROR !\n***************"
124
+ + "***\nTwo BEAM csv-files found for "
125
+ + name
126
+ + "!\nPlease ensure your root directory only has one "
127
+ + "beamfile per video!"
128
+ )
129
+ # handle import errors
130
+ # => append to empty strings to handle multiple issues at once seemlessly
131
+ import_error_message = ""
132
+ if datadf_duplicate_error:
133
+ import_error_message += datadf_duplicate_error
134
+ if datadf.empty:
135
+ import_error_message += ( # if pd didn't raise errors but dfs still empty
136
+ "\n******************\n! CRITICAL ERROR !\n******************\n"
137
+ + "Unable to load a DATA csv file for "
138
+ + name
139
+ + "!\nTry again!"
140
+ )
141
+ if subtract_beam:
142
+ if beamdf_duplicate_error:
143
+ import_error_message += beamdf_duplicate_error
144
+ if beamdf.empty:
145
+ import_error_message += (
146
+ "\n******************\n! CRITICAL ERROR !\n******************\n"
147
+ + "Unable to load a BEAM csv file for "
148
+ + name
149
+ + "!\nTry again!"
150
+ )
151
+ if import_error_message: # see if there was any issues with import, if so: stop
152
+ print(import_error_message)
153
+ write_issues_to_textfile(import_error_message, info)
154
+ return
155
+
156
+ # .... finalise import: rename cols, get rid of unnecessary elements, floatit ....
157
+ datadf = prepare_DLC_df(datadf)
158
+ if subtract_beam: # beam df
159
+ beamdf = prepare_DLC_df(beamdf)
160
+ data = pd.concat([datadf, beamdf], axis=1)
161
+ else:
162
+ data = datadf.copy(deep=True)
163
+
164
+ # ................ final data checks, conversions & additions ....................
165
+ # IMPORTANT - MAIN TESTS OF USER-INPUT VALIDITY OCCUR HERE!
166
+ # => UNPACK VARS FROM CFG THAT ARE TESTED BY check_and_expand HERE, NOT EARLIER!
167
+ cfg = check_and_expand_cfg(data, cfg, info)
168
+ if cfg is None: # some critical error occured
169
+ return
170
+ hind_joints = cfg["hind_joints"]
171
+ fore_joints = cfg["fore_joints"]
172
+ angles = cfg["angles"]
173
+ beam_hind_jointadd = cfg["beam_hind_jointadd"]
174
+ beam_fore_jointadd = cfg["beam_fore_jointadd"]
175
+ direction_joint = cfg["direction_joint"]
176
+ # important to unpack to vars and not to cfg since cfg is overwritten in multiruns!
177
+ x_standardisation_joint = cfg["x_standardisation_joint"][0]
178
+ y_standardisation_joint = cfg["y_standardisation_joint"][0]
179
+ # store config json file @ group path
180
+ # !!! NU - do this @ mouse path!
181
+ group_path = results_dir.split(name)[0]
182
+ config_json_path = os.path.join(group_path, CONFIG_JSON_FILENAME)
183
+ config_vars_to_json = {
184
+ "sampling_rate": sampling_rate,
185
+ "convert_to_mm": convert_to_mm,
186
+ "standardise_y_at_SC_level": standardise_y_at_SC_level,
187
+ "analyse_average_x": analyse_average_x,
188
+ "standardise_x_coordinates": standardise_x_coordinates,
189
+ "x_standardisation_joint": x_standardisation_joint,
190
+ "standardise_y_to_a_joint": standardise_y_to_a_joint,
191
+ "y_standardisation_joint": y_standardisation_joint,
192
+ "hind_joints": hind_joints,
193
+ "fore_joints": fore_joints,
194
+ "angles": angles,
195
+ "tracking_software": "DLC",
196
+ }
197
+ # note - using "w" will overwrite/truncate file, thus no need to remove it if exists
198
+ with open(config_json_path, "w") as config_json_file:
199
+ json.dump(config_vars_to_json, config_json_file, indent=4)
200
+ # a little test to see if columns make sense, i.e., same number of x/y/likelihood
201
+ x_col_count = len([c for c in data.columns if c.endswith(" x")])
202
+ y_col_count = len([c for c in data.columns if c.endswith(" y")])
203
+ likelihood_col_count = len([c for c in data.columns if c.endswith(" likelihood")])
204
+ if x_col_count == y_col_count == likelihood_col_count:
205
+ pass
206
+ else:
207
+ cols_are_weird_message = (
208
+ "\n***********\n! WARNING !\n***********\n"
209
+ + "We detected an unequal number of columns ending with x, y or "
210
+ + "likelihood!\nCounts were:\n"
211
+ + "x: "
212
+ + str(x_col_count)
213
+ + ", y: "
214
+ + str(y_col_count)
215
+ + ", likelihood: "
216
+ + str(likelihood_col_count)
217
+ + "!\n\n"
218
+ + "We continue with the analysis but we strongly suggest you have another "
219
+ + "look at your dataset, this should not happen.\n"
220
+ )
221
+ print(cols_are_weird_message)
222
+ write_issues_to_textfile(cols_are_weird_message, info)
223
+ # if wanted: fix that deeplabcut inverses y
224
+ if invert_y_axis:
225
+ for col in data.columns:
226
+ if col.endswith(" y"):
227
+ data[col] = data[col] * -1
228
+ # if we don't have a beam to subtract, standardise y to a joint's or global ymin = 0
229
+ if not subtract_beam:
230
+ y_min = float("inf")
231
+ y_cols = [col for col in data.columns if col.endswith("y")]
232
+ if standardise_y_to_a_joint:
233
+ y_min = data[y_standardisation_joint + "y"].min()
234
+ else:
235
+ y_min = data[y_cols].min().min()
236
+ data[y_cols] -= y_min
237
+ # convert pixels to millimeters
238
+ if convert_to_mm:
239
+ for column in data.columns:
240
+ if not column.endswith("likelihood"):
241
+ data[column] = data[column] / pixel_to_mm_ratio
242
+ # quick warning if cfg is set to not flip gait direction but to standardise x
243
+ if not flip_gait_direction and standardise_x_coordinates:
244
+ message = (
245
+ "\n***********\n! WARNING !\n***********\n"
246
+ + "You are standardising x-coordinates without standardising the direction "
247
+ + "of gait (e.g. all walking from right to left)."
248
+ + "\nThis can be correct if you are doing things like treadmill walking "
249
+ + "but can lead to unexpected behaviour otherwise!"
250
+ + "\nMake sure you know what you are doing!"
251
+ )
252
+ print(message)
253
+ write_issues_to_textfile(message, info)
254
+ # check gait direction & DLC file validity
255
+ data = check_gait_direction(data, direction_joint, flip_gait_direction, info)
256
+ if data is None: # this means DLC file is broken
257
+ return
258
+ # subtract the beam from the joints to standardise y
259
+ # => bc. we simulate that all mice run from left to right, we can write:
260
+ # (note that we also flip beam x columns, but never y-columns!)
261
+ # => & bc. we multiply y values by *-1 earlier, it's a neg_num - - neg_num
262
+ # pushing it towards zero.
263
+ # => using list(set()) to ensure that we don't have duplicate values (if users
264
+ # should have provided them in both cfg vars by misstake)
265
+ # => beam_col_left and right is provided by users
266
+ if subtract_beam:
267
+ # note beam_col_left/right are always lists in cfg!
268
+ beam_col_left = cfg["beam_col_left"][0]
269
+ beam_col_right = cfg["beam_col_right"][0]
270
+ for joint in list(set(hind_joints + beam_hind_jointadd)):
271
+ data[joint + "y"] = data[joint + "y"] - data[beam_col_left + "y"]
272
+ for joint in list(set(fore_joints + beam_fore_jointadd)):
273
+ data[joint + "y"] = data[joint + "y"] - data[beam_col_right + "y"]
274
+ data.drop(columns=list(beamdf.columns), inplace=True) # beam not needed anymore
275
+ # add Time
276
+ data[TIME_COL] = data.index * (1 / sampling_rate)
277
+ # reorder the columns we added
278
+ cols = [TIME_COL, "Flipped"]
279
+ data = data[cols + [c for c in data.columns if c not in cols]]
280
+ return data
281
+
282
+
283
+ # .............................. helper functions ....................................
284
+
285
+
286
+ def move_data_to_folders(info, folderinfo):
287
+ """Find files, copy data, video, beamdata & beamvideo to new results_dir"""
288
+ # unpack
289
+ results_dir = info["results_dir"]
290
+ postmouse_string = folderinfo["postmouse_string"]
291
+ postrun_string = folderinfo["postrun_string"]
292
+ os.makedirs(results_dir) # important to do this outside of loop!
293
+ # check if user forgot some underscores or dashes in their filenames
294
+ # => two levels of string additions for two post FILE-ID strings
295
+ # => in theory if the user has some strange cases in which this double forloop
296
+ # would be true twice (because one file is called -6DLC and another is called _6DLC
297
+ # for some reason) it will break after the first time and always ignore the second
298
+ # one - keep this in mind if it should come up but it should be very unlikely
299
+ for mouse_string_addition in FILE_ID_STRING_ADDITIONS:
300
+ candidate_postmouse_string = mouse_string_addition + postmouse_string
301
+ for run_string_addition in FILE_ID_STRING_ADDITIONS:
302
+ candidate_postrun_string = run_string_addition + postrun_string
303
+ found_it = check_this_filename_configuration(
304
+ info,
305
+ folderinfo,
306
+ candidate_postmouse_string,
307
+ candidate_postrun_string,
308
+ results_dir,
309
+ )
310
+ if found_it: # if our search was successful, stop searching and continue
311
+ break
312
+
313
+
314
+ def check_this_filename_configuration(
315
+ info, folderinfo, postmouse_string, postrun_string, results_dir
316
+ ):
317
+ # unpack
318
+ name = info["name"]
319
+ mouse_num = info["mouse_num"]
320
+ run_num = info["run_num"]
321
+ root_dir = folderinfo["root_dir"]
322
+ data_string = folderinfo["data_string"]
323
+ beam_string = folderinfo["beam_string"]
324
+ premouse_string = folderinfo["premouse_string"]
325
+ prerun_string = folderinfo["prerun_string"]
326
+ whichvideo = "" # initialise
327
+ found_it = False
328
+ for filename in os.listdir(root_dir):
329
+ # the following condition is True for data & beam csv
330
+ if (
331
+ (premouse_string + str(mouse_num) + postmouse_string in filename)
332
+ and (prerun_string + str(run_num) + postrun_string in filename)
333
+ and (filename.endswith(".csv"))
334
+ ):
335
+ found_it = True
336
+ # Copy the Excel file to the new subfolder
337
+ shutil.copy2(
338
+ os.path.join(root_dir, filename), os.path.join(results_dir, filename)
339
+ )
340
+ # Check if there is a video and if so copy it too
341
+ vidname = filename[:-4] + "_labeled.mp4"
342
+ vidpath = os.path.join(root_dir, vidname)
343
+ if os.path.exists(vidpath):
344
+ shutil.copy2(vidpath, os.path.join(results_dir, vidname))
345
+ else:
346
+ if data_string in vidname:
347
+ whichvideo = "Data"
348
+ elif beam_string in vidname:
349
+ whichvideo = "Beam"
350
+ this_message = (
351
+ "\n***********\n! WARNING !\n***********\n"
352
+ + "No "
353
+ + whichvideo
354
+ + "video for "
355
+ + name
356
+ + "!"
357
+ )
358
+ print(this_message)
359
+ write_issues_to_textfile(this_message, info)
360
+ return found_it
361
+
362
+
363
+ def check_gait_direction(data, direction_joint, flip_gait_direction, info):
364
+ """Check direction of gait - reverse it if needed
365
+
366
+ Note
367
+ ----
368
+ Also using this check to check for DLC files being broken
369
+ flip_gait_direction is only used after the check for DLC files being broken
370
+ """
371
+
372
+ data["Flipped"] = False
373
+ enterframe = 0
374
+ idx = 0
375
+ flip_error_message = ""
376
+ while enterframe == 0: # first find out when mouse was in the video frame.
377
+ if (
378
+ np.mean(data[direction_joint + "likelihood"][idx : idx + 5])
379
+ > DIRECTION_DLC_THRESHOLD
380
+ ): # +5 to increase conf.
381
+ enterframe = idx + 5
382
+ idx += 1
383
+ if (idx > len(data)) | (enterframe > len(data)):
384
+ flip_error_message += (
385
+ "\n******************\n! CRITICAL ERROR !\n******************\n"
386
+ + "Unable to determine gait direction!"
387
+ + "\nThis hints a critical issue with DLC tracking, e.g., likelihood "
388
+ + "\ncolumns being low everywhere or tables being suspiciously short!"
389
+ + "\nTo be sure, we cancel everything here."
390
+ + "\nPlease check your input DLC csv files for correctness & try again!"
391
+ )
392
+ break
393
+ leaveframe = 0
394
+ idx = 1
395
+ while leaveframe == 0: # see where mouse left frame (same logic from back)
396
+ if (
397
+ np.mean(data[direction_joint + "likelihood"][-idx - 5 : -idx])
398
+ > DIRECTION_DLC_THRESHOLD
399
+ ):
400
+ leaveframe = len(data) - idx - 5
401
+ idx += 1
402
+ if idx > len(data):
403
+ if not flip_error_message:
404
+ flip_error_message += (
405
+ "\n******************\n! CRITICAL ERROR !\n******************\n"
406
+ + "Unable to determine gait direction!"
407
+ + "\nThis hints a critical issue with DLC tracking, e.g., "
408
+ + "likelihood \ncolumns being low everywhere or tables being "
409
+ + "suspiciously short!\nTo be sure, we cancel everything here."
410
+ + "\nPlease check your input DLC csv files for correctness & try "
411
+ + "again!"
412
+ )
413
+ break
414
+ if flip_error_message:
415
+ write_issues_to_textfile(flip_error_message, info)
416
+ print(flip_error_message)
417
+ return
418
+ if (
419
+ data[direction_joint + "x"][enterframe]
420
+ > data[direction_joint + "x"][leaveframe]
421
+ ): # i.e.: right to left
422
+ # simulate that mouse ran from left to right (only if user wants it)
423
+ if flip_gait_direction:
424
+ data = flip_mouse_body(data, info)
425
+ data["Flipped"] = True
426
+ return data
@@ -0,0 +1,217 @@
1
+ # %% imports
2
+ from autogaita.resources.utils import write_issues_to_textfile
3
+ from autogaita.common2D.common2D_utils import (
4
+ check_cycle_out_of_bounds,
5
+ check_cycle_duplicates,
6
+ check_cycle_order,
7
+ check_tracking,
8
+ handle_issues,
9
+ )
10
+ import os
11
+ import pandas as pd
12
+ import numpy as np
13
+
14
+ # %% constants
15
+ from autogaita.common2D.common2D_constants import (
16
+ SCXLS_MOUSECOLS,
17
+ SCXLS_RUNCOLS,
18
+ SCXLS_SCCOLS,
19
+ SWINGSTART_COL,
20
+ STANCEEND_COL,
21
+ )
22
+
23
+ # %% workflow step #2 - SC extraction (reading user-provided SC Table)
24
+
25
+
26
+ def extract_stepcycles(data, info, folderinfo, cfg):
27
+ """Read XLS file with SC annotations, find correct row & return all_cycles"""
28
+
29
+ # ............................... preparation ....................................
30
+ # unpack
31
+ mouse_num = info["mouse_num"]
32
+ run_num = info["run_num"]
33
+ root_dir = folderinfo["root_dir"]
34
+ sctable_filename = folderinfo["sctable_filename"]
35
+ sampling_rate = cfg["sampling_rate"]
36
+
37
+ # load the table - try some filename & ending options
38
+ if os.path.exists(os.path.join(root_dir, sctable_filename)):
39
+ SCdf_full_filename = os.path.join(root_dir, sctable_filename)
40
+ elif os.path.exists(os.path.join(root_dir, sctable_filename) + ".xlsx"):
41
+ SCdf_full_filename = os.path.join(root_dir, sctable_filename) + ".xlsx"
42
+ elif os.path.exists(os.path.join(root_dir, sctable_filename) + ".xls"):
43
+ SCdf_full_filename = os.path.join(root_dir, sctable_filename) + ".xls"
44
+ else:
45
+ no_sc_table_message = (
46
+ "No Annotation Table found! sctable_filename has to be @ root_dir"
47
+ )
48
+ raise FileNotFoundError(no_sc_table_message)
49
+ # check if we need to specify engine (required for xlsx)
50
+ try:
51
+ SCdf = pd.read_excel(SCdf_full_filename)
52
+ except:
53
+ SCdf = pd.read_excel(SCdf_full_filename, engine="openpyxl")
54
+
55
+ # see if table columns are labelled correctly (try a couple to allow user typos)
56
+ valid_col_flags = [False, False, False]
57
+ header_columns = ["", "", ""]
58
+ for h, header in enumerate([SCXLS_MOUSECOLS, SCXLS_RUNCOLS, SCXLS_SCCOLS]):
59
+ for header_col in header:
60
+ if header_col in SCdf.columns:
61
+ valid_col_flags[h] = True
62
+ header_columns[h] = header_col
63
+ break
64
+ if not all(valid_col_flags):
65
+ handle_issues("wrong_scxls_colnames", info)
66
+ return
67
+ # find our info columns & rows
68
+ mouse_col = SCdf.columns.get_loc(header_columns[0]) # INDEXING! (see list above)
69
+ run_col = SCdf.columns.get_loc(header_columns[1])
70
+ sc_col = SCdf.columns.get_loc(header_columns[2])
71
+ # mouse_row will always be start of this mouse's runs
72
+ mouse_row = SCdf.index[SCdf[header_columns[0]] == mouse_num]
73
+ # this mouse was not included in sc xls
74
+ if len(mouse_row) == 0:
75
+ handle_issues("no_mouse", info)
76
+ return
77
+ # this mouse was included more than once
78
+ if len(mouse_row) > 1:
79
+ handle_issues("double_mouse", info)
80
+ return
81
+
82
+ next_mouse_idx = mouse_row # search idx of first row of next mouse
83
+
84
+ # .............................. main xls read ...................................
85
+ # if while is False, we arrived at the next mouse/end & dont update next_mouse_idx
86
+ # 3 conditions (continue if true):
87
+ # 1) First row of this mouse
88
+ # 2) None means a different run of this mouse or an empty row
89
+ # 3) Last line of SC Table
90
+ # ==> Important that there are parentheses around mouse & runs cond!!!
91
+ while (
92
+ (SCdf.iloc[next_mouse_idx, mouse_col].values[0] == mouse_num)
93
+ | (np.isnan(SCdf.iloc[next_mouse_idx, mouse_col].values[0]))
94
+ ) & (next_mouse_idx[0] != len(SCdf) - 1):
95
+ next_mouse_idx += 1 # this becomes first idx of next mouse's runs
96
+ # slicing is exclusive, so indexing the first row of next mouse means we
97
+ # include (!) the last row of correct mouse
98
+ if next_mouse_idx[0] != (len(SCdf) - 1):
99
+ mouse_runs = SCdf.iloc[int(mouse_row[0]) : int(next_mouse_idx[0]), run_col]
100
+ else:
101
+ # SPECIAL CASE: the last row of SCdf is a mouse with only one run!!!
102
+ # ==> E.g.: SCdf's last idx is 25.
103
+ # SCdf.iloc[25:25, run_col] == Empty Series (slicing exclusive)
104
+ # NOTE THAT: if this mouse should have two runs, e.g. 24 & 25:
105
+ # SCdf.iloc[24:25, run_col] == Correct series because 25 is treated
106
+ # as SCdf.iloc[24:, run_col]
107
+ # TO BE SURE: if our while loop broke out bc. we arrived at SCdf's end,
108
+ # just index with a colon iloc[mouse_row:]
109
+ mouse_runs = SCdf.iloc[int(mouse_row[0]) :, run_col]
110
+ if run_num not in mouse_runs.values:
111
+ handle_issues("no_scs", info)
112
+ return # return None and stop everything
113
+ # find out the total number of scs & see if it matches user-provided values
114
+ # => also exclude run if no scs found
115
+ info_row = mouse_runs[mouse_runs == run_num].index # where is this run
116
+ sc_num = 0
117
+ for column in SCdf.columns:
118
+ if STANCEEND_COL in column:
119
+ if np.isnan(SCdf[column][info_row].values[0]) == False:
120
+ sc_num += 1
121
+ if sc_num == 0:
122
+ handle_issues("no_scs", info)
123
+ return
124
+ user_scnum = SCdf.iloc[info_row, sc_col].values[0] # sanity check input
125
+ if user_scnum != sc_num: # warn the user, take the values we found
126
+ this_message = (
127
+ "\n***********\n! WARNING !\n***********\n"
128
+ + "Mismatch between stepcycle number of SC Number column & "
129
+ + "entries in swing/stance latency columns!"
130
+ + "\nUsing all valid swing/stance entries."
131
+ )
132
+ print(this_message)
133
+ write_issues_to_textfile(this_message, info)
134
+
135
+ # ........................... idxs to all_cycles .................................
136
+ # use value we found, loop over all runs, throw all scs into all_cycles
137
+ all_cycles = [[None, None] for s in range(sc_num)] # fill :sc_num x 2 list
138
+ for s in range(sc_num):
139
+ if s == 0:
140
+ start_col = SCdf.columns.get_loc(SWINGSTART_COL)
141
+ end_col = SCdf.columns.get_loc(STANCEEND_COL)
142
+ else:
143
+ # str(s) because colnames match s for s>0!
144
+ start_col = SCdf.columns.get_loc(SWINGSTART_COL + "." + str(s))
145
+ end_col = SCdf.columns.get_loc(STANCEEND_COL + "." + str(s))
146
+ user_scnum += 1
147
+ # extract the SC times
148
+ start_in_s = float(SCdf.iloc[info_row, start_col].values[0])
149
+ end_in_s = float(SCdf.iloc[info_row, end_col].values[0])
150
+ # see if we are rounding to fix inaccurate user input
151
+ # => account for python's float precision leading to inaccuracies
152
+ # => two important steps here (sanity_check_vals only used for these checks)
153
+ # 1. round to 10th decimal to fix python making
154
+ # 3211.999999999999999995 out of 3212
155
+ sanity_check_start = round(start_in_s * sampling_rate, 10)
156
+ sanity_check_end = round(end_in_s * sampling_rate, 10)
157
+ # 2. comparing abs(sanity check vals) to 1e-7 just to be 1000% sure
158
+ if (abs(sanity_check_start % 1) > 1e-7) | (abs(sanity_check_end % 1) > 1e-7):
159
+ round_message = (
160
+ "\n***********\n! WARNING !\n***********\n"
161
+ + "SC latencies of "
162
+ + str(start_in_s)
163
+ + "s to "
164
+ + str(end_in_s)
165
+ + "s were not provided in units of the frame rate!"
166
+ + "\nWe thus use the previous possible frame(s)."
167
+ + "\nDouble check if this worked as expected or fix annotation table!"
168
+ )
169
+ print(round_message)
170
+ write_issues_to_textfile(round_message, info)
171
+ # assign to all_cycles (note int() rounds down!)
172
+ try:
173
+ all_cycles[s] = [
174
+ int(start_in_s * sampling_rate),
175
+ int(end_in_s * sampling_rate),
176
+ ]
177
+ except:
178
+ assign_error_message = (
179
+ "\n***********\n! WARNING !\n***********\n"
180
+ + "Unable to assign SC latencies of:"
181
+ + str(start_in_s)
182
+ + "s to "
183
+ + str(end_in_s)
184
+ + "\nThis could indicate that your Swing/Stance columns of your "
185
+ + "Annotation Table are not named correctly."
186
+ + "\nPlease double-check & re-run!"
187
+ )
188
+ print(assign_error_message)
189
+ write_issues_to_textfile(assign_error_message, info)
190
+ # check if we are in data-bounds
191
+ if (all_cycles[s][0] in data.index) & (all_cycles[s][1] in data.index):
192
+ pass
193
+ else:
194
+ all_cycles[s] = [None, None] # so they can be cleaned later
195
+ this_message = (
196
+ "\n***********\n! WARNING !\n***********\n"
197
+ + "SC latencies of: "
198
+ + str(start_in_s)
199
+ + "s to "
200
+ + str(end_in_s)
201
+ + "s not in data/video range!"
202
+ + "\nSkipping!"
203
+ )
204
+ print(this_message)
205
+ write_issues_to_textfile(this_message, info)
206
+
207
+ # ............................ clean all_cycles ..................................
208
+ # check if we skipped latencies because they were out of data-bounds
209
+ all_cycles = check_cycle_out_of_bounds(all_cycles)
210
+ if all_cycles: # can be None if all SCs were out of bounds
211
+ # check if there are any duplicates (e.g., SC2's start-lat == SC1's end-lat)
212
+ all_cycles = check_cycle_duplicates(all_cycles)
213
+ # check if user input progressively later latencies
214
+ all_cycles = check_cycle_order(all_cycles, info)
215
+ # check if tracking broke for any SCs - if so remove them
216
+ all_cycles = check_tracking(data, info, all_cycles, cfg)
217
+ return all_cycles