accusleepy 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. accusleepy/__init__.py +0 -0
  2. accusleepy/__main__.py +4 -0
  3. accusleepy/bouts.py +142 -0
  4. accusleepy/brain_state_set.py +89 -0
  5. accusleepy/classification.py +285 -0
  6. accusleepy/config.json +24 -0
  7. accusleepy/constants.py +46 -0
  8. accusleepy/fileio.py +179 -0
  9. accusleepy/gui/__init__.py +0 -0
  10. accusleepy/gui/icons/brightness_down.png +0 -0
  11. accusleepy/gui/icons/brightness_up.png +0 -0
  12. accusleepy/gui/icons/double_down_arrow.png +0 -0
  13. accusleepy/gui/icons/double_up_arrow.png +0 -0
  14. accusleepy/gui/icons/down_arrow.png +0 -0
  15. accusleepy/gui/icons/home.png +0 -0
  16. accusleepy/gui/icons/question.png +0 -0
  17. accusleepy/gui/icons/save.png +0 -0
  18. accusleepy/gui/icons/up_arrow.png +0 -0
  19. accusleepy/gui/icons/zoom_in.png +0 -0
  20. accusleepy/gui/icons/zoom_out.png +0 -0
  21. accusleepy/gui/images/primary_window.png +0 -0
  22. accusleepy/gui/images/viewer_window.png +0 -0
  23. accusleepy/gui/images/viewer_window_annotated.png +0 -0
  24. accusleepy/gui/main.py +1494 -0
  25. accusleepy/gui/manual_scoring.py +1096 -0
  26. accusleepy/gui/mplwidget.py +386 -0
  27. accusleepy/gui/primary_window.py +2577 -0
  28. accusleepy/gui/primary_window.ui +3831 -0
  29. accusleepy/gui/resources.qrc +16 -0
  30. accusleepy/gui/resources_rc.py +6710 -0
  31. accusleepy/gui/text/config_guide.txt +27 -0
  32. accusleepy/gui/text/main_guide.md +167 -0
  33. accusleepy/gui/text/manual_scoring_guide.md +23 -0
  34. accusleepy/gui/viewer_window.py +610 -0
  35. accusleepy/gui/viewer_window.ui +926 -0
  36. accusleepy/models.py +108 -0
  37. accusleepy/multitaper.py +661 -0
  38. accusleepy/signal_processing.py +469 -0
  39. accusleepy/temperature_scaling.py +157 -0
  40. accusleepy-0.6.0.dist-info/METADATA +106 -0
  41. accusleepy-0.6.0.dist-info/RECORD +42 -0
  42. accusleepy-0.6.0.dist-info/WHEEL +4 -0
accusleepy/__init__.py ADDED
File without changes
accusleepy/__main__.py ADDED
@@ -0,0 +1,4 @@
1
+ from accusleepy.gui.main import run_primary_window
2
+
3
+ if __name__ == "__main__":
4
+ run_primary_window()
accusleepy/bouts.py ADDED
@@ -0,0 +1,142 @@
1
+ import re
2
+ from dataclasses import dataclass
3
+ from operator import attrgetter
4
+
5
+ import numpy as np
6
+
7
+
8
+ @dataclass
9
+ class Bout:
10
+ """Stores information about a brain state bout"""
11
+
12
+ length: int # length, in number of epochs
13
+ start_index: int # index where bout starts
14
+ end_index: int # index where bout ends
15
+ surrounding_state: int # brain state on both sides of the bout
16
+
17
+
18
+ def find_last_adjacent_bout(sorted_bouts: list[Bout], bout_index: int) -> int:
19
+ """Find index of last consecutive same-length bout
20
+
21
+ When running the post-processing step that enforces a minimum duration
22
+ for brain state bouts, there is a special case when bouts below the
23
+ duration threshold occur consecutively. This function performs a
24
+ recursive search for the index of a bout at the end of such a sequence.
25
+ When initially called, bout_index will always be 0. If, for example, the
26
+ first three bouts in the list are consecutive, the function will return 2.
27
+
28
+ :param sorted_bouts: list of brain state bouts, sorted by start time
29
+ :param bout_index: index of the bout in question
30
+ :return: index of the last consecutive same-length bout
31
+ """
32
+ # if we're at the end of the bout list, stop
33
+ if bout_index == len(sorted_bouts) - 1:
34
+ return bout_index
35
+
36
+ # if there is an adjacent bout
37
+ if sorted_bouts[bout_index].end_index == sorted_bouts[bout_index + 1].start_index:
38
+ # look for more adjacent bouts using that one as a starting point
39
+ return find_last_adjacent_bout(sorted_bouts, bout_index + 1)
40
+ else:
41
+ return bout_index
42
+
43
+
44
+ def enforce_min_bout_length(
45
+ labels: np.array, epoch_length: int | float, min_bout_length: int | float
46
+ ) -> np.array:
47
+ """Ensure brain state bouts meet the min length requirement
48
+
49
+ As a post-processing step for sleep scoring, we can require that any
50
+ bout (continuous period) of a brain state have a minimum duration.
51
+ This function sets any bout shorter than the minimum duration to the
52
+ surrounding brain state (if the states on the left and right sides
53
+ are the same). In the case where there are consecutive short bouts,
54
+ it either creates a transition at the midpoint or removes all short
55
+ bouts, depending on whether the number is even or odd. For example:
56
+ ...AAABABAAA... -> ...AAAAAAAAA...
57
+ ...AAABABABBB... -> ...AAAAABBBBB...
58
+
59
+ :param labels: brain state labels (digits in the 0-9 range)
60
+ :param epoch_length: epoch length, in seconds
61
+ :param min_bout_length: minimum bout length, in seconds
62
+ :return: updated brain state labels
63
+ """
64
+ # if recording is very short, don't change anything
65
+ if labels.size < 3:
66
+ return labels
67
+
68
+ if epoch_length == min_bout_length:
69
+ return labels
70
+
71
+ # get minimum number of epochs in a bout
72
+ min_epochs = int(np.ceil(min_bout_length / epoch_length))
73
+ # get set of states in the labels
74
+ brain_states = set(labels.tolist())
75
+
76
+ while True: # so true
77
+ # convert labels to a string for regex search
78
+ # There is probably a regex that can find all patterns like ab+a
79
+ # without consuming each "a" but I haven't found it :(
80
+ label_string = "".join(labels.astype(str))
81
+
82
+ bouts = list()
83
+
84
+ for state in brain_states:
85
+ for other_state in brain_states:
86
+ if state == other_state:
87
+ continue
88
+ # get start and end indices of each bout
89
+ expression = (
90
+ f"(?<={other_state}){state}{{1,{min_epochs - 1}}}(?={other_state})"
91
+ )
92
+ matches = re.finditer(expression, label_string)
93
+ spans = [match.span() for match in matches]
94
+
95
+ # if some bouts were found
96
+ for span in spans:
97
+ bouts.append(
98
+ Bout(
99
+ length=span[1] - span[0],
100
+ start_index=span[0],
101
+ end_index=span[1],
102
+ surrounding_state=other_state,
103
+ )
104
+ )
105
+
106
+ if len(bouts) == 0:
107
+ break
108
+
109
+ # only keep the shortest bouts
110
+ min_length_in_list = np.min([bout.length for bout in bouts])
111
+ bouts = [i for i in bouts if i.length == min_length_in_list]
112
+ # sort by start index
113
+ sorted_bouts = sorted(bouts, key=attrgetter("start_index"))
114
+
115
+ while len(sorted_bouts) > 0:
116
+ # get row index of latest adjacent bout (of same length)
117
+ last_adjacent_bout_index = find_last_adjacent_bout(sorted_bouts, 0)
118
+ # if there's an even number of adjacent bouts
119
+ if (last_adjacent_bout_index + 1) % 2 == 0:
120
+ midpoint = sorted_bouts[
121
+ round((last_adjacent_bout_index + 1) / 2)
122
+ ].start_index
123
+ labels[sorted_bouts[0].start_index : midpoint] = sorted_bouts[
124
+ 0
125
+ ].surrounding_state
126
+ labels[midpoint : sorted_bouts[last_adjacent_bout_index].end_index] = (
127
+ sorted_bouts[last_adjacent_bout_index].surrounding_state
128
+ )
129
+ else:
130
+ labels[
131
+ sorted_bouts[0].start_index : sorted_bouts[
132
+ last_adjacent_bout_index
133
+ ].end_index
134
+ ] = sorted_bouts[0].surrounding_state
135
+
136
+ # delete the bouts we just fixed
137
+ if last_adjacent_bout_index == len(sorted_bouts) - 1:
138
+ sorted_bouts = []
139
+ else:
140
+ sorted_bouts = sorted_bouts[(last_adjacent_bout_index + 1) :]
141
+
142
+ return labels
@@ -0,0 +1,89 @@
1
+ from dataclasses import dataclass
2
+
3
+ import numpy as np
4
+
5
+ BRAIN_STATES_KEY = "brain_states"
6
+
7
+
8
+ @dataclass
9
+ class BrainState:
10
+ """Convenience class for a brain state and its attributes"""
11
+
12
+ name: str # friendly name
13
+ digit: int # number 0-9 - used as keyboard shortcut and in label files
14
+ is_scored: bool # whether a classification model should score this state
15
+ frequency: int | float # typical relative frequency, between 0 and 1
16
+
17
+
18
+ class BrainStateSet:
19
+ def __init__(self, brain_states: list[BrainState], undefined_label: int):
20
+ """Initialize set of brain states
21
+
22
+ :param brain_states: list of BrainState objects
23
+ :param undefined_label: label for undefined epochs
24
+ """
25
+ self.brain_states = brain_states
26
+
27
+ # The user can choose any subset of the digits 0-9 to represent
28
+ # brain states, but not all of them are necessarily intended to be
29
+ # scored by a classifier, and pytorch requires that all input
30
+ # labels are in the 0-n range for training and inference.
31
+ # So, we have to have a distinction between "brain states" (as
32
+ # represented in label files and keyboard inputs) and "classes"
33
+ # (AccuSleep's internal representation).
34
+
35
+ # map digits to classes, and vice versa
36
+ self.digit_to_class = {undefined_label: None}
37
+ self.class_to_digit = dict()
38
+ # relative frequencies of each class
39
+ self.mixture_weights = list()
40
+
41
+ i = 0
42
+ for brain_state in self.brain_states:
43
+ if brain_state.digit == undefined_label:
44
+ raise Exception(
45
+ f"Digit for {brain_state.name} matches 'undefined' label"
46
+ )
47
+ if brain_state.is_scored:
48
+ self.digit_to_class[brain_state.digit] = i
49
+ self.class_to_digit[i] = brain_state.digit
50
+ self.mixture_weights.append(brain_state.frequency)
51
+ i += 1
52
+ else:
53
+ self.digit_to_class[brain_state.digit] = None
54
+
55
+ self.n_classes = i
56
+
57
+ self.mixture_weights = np.array(self.mixture_weights)
58
+ if np.sum(self.mixture_weights) != 1:
59
+ raise Exception("Typical frequencies for scored brain states must sum to 1")
60
+
61
+ def convert_digit_to_class(self, digits: np.array) -> np.array:
62
+ """Convert array of digits to their corresponding classes
63
+
64
+ :param digits: array of digits
65
+ :return: array of classes
66
+ """
67
+ return np.array([self.digit_to_class[i] for i in digits])
68
+
69
+ def convert_class_to_digit(self, classes: np.array) -> np.array:
70
+ """Convert array of classes to their corresponding digits
71
+
72
+ :param classes: array of classes
73
+ :return: array of digits
74
+ """
75
+ return np.array([self.class_to_digit[i] for i in classes])
76
+
77
+ def to_output_dict(self) -> dict:
78
+ """Return dictionary of brain states"""
79
+ return {
80
+ BRAIN_STATES_KEY: [
81
+ {
82
+ "name": b.name,
83
+ "digit": b.digit,
84
+ "is_scored": b.is_scored,
85
+ "frequency": b.frequency,
86
+ }
87
+ for b in self.brain_states
88
+ ]
89
+ }
@@ -0,0 +1,285 @@
1
+ import os
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ import torch
6
+ import torch.optim as optim
7
+ from torch import nn
8
+ from torch.utils.data import DataLoader, Dataset
9
+ from torchvision.io import read_image
10
+ from tqdm import trange
11
+
12
+ import accusleepy.constants as c
13
+ from accusleepy.brain_state_set import BrainStateSet
14
+ from accusleepy.models import SSANN
15
+ from accusleepy.signal_processing import (
16
+ create_eeg_emg_image,
17
+ format_img,
18
+ get_mixture_values,
19
+ mixture_z_score_img,
20
+ )
21
+
22
+ BATCH_SIZE = 64
23
+ LEARNING_RATE = 1e-3
24
+ MOMENTUM = 0.9
25
+ TRAINING_EPOCHS = 6
26
+
27
+
28
+ class AccuSleepImageDataset(Dataset):
29
+ """Dataset for loading AccuSleep training images"""
30
+
31
+ def __init__(
32
+ self, annotations_file, img_dir, transform=None, target_transform=None
33
+ ):
34
+ self.img_labels = pd.read_csv(annotations_file)
35
+ self.img_dir = img_dir
36
+ self.transform = transform
37
+ self.target_transform = target_transform
38
+
39
+ def __len__(self):
40
+ return len(self.img_labels)
41
+
42
+ def __getitem__(self, idx):
43
+ img_path = str(
44
+ os.path.join(self.img_dir, self.img_labels.at[idx, c.FILENAME_COL])
45
+ )
46
+ image = read_image(img_path)
47
+ label = self.img_labels.at[idx, c.LABEL_COL]
48
+ if self.transform:
49
+ image = self.transform(image)
50
+ if self.target_transform:
51
+ label = self.target_transform(label)
52
+ return image, label
53
+
54
+
55
+ def get_device():
56
+ """Get accelerator, if one is available"""
57
+ return (
58
+ torch.accelerator.current_accelerator().type
59
+ if torch.accelerator.is_available()
60
+ else "cpu"
61
+ )
62
+
63
+
64
+ def create_dataloader(
65
+ annotations_file: str, img_dir: str, shuffle: bool = True
66
+ ) -> DataLoader:
67
+ """Create DataLoader for a dataset of training or calibration images
68
+
69
+ :param annotations_file: file with information on each training image
70
+ :param img_dir: training image location
71
+ :param shuffle: reshuffle data for every epoch
72
+ :return: DataLoader for the data
73
+
74
+ """
75
+ image_dataset = AccuSleepImageDataset(
76
+ annotations_file=annotations_file,
77
+ img_dir=img_dir,
78
+ )
79
+ return DataLoader(image_dataset, batch_size=BATCH_SIZE, shuffle=shuffle)
80
+
81
+
82
+ def train_ssann(
83
+ annotations_file: str,
84
+ img_dir: str,
85
+ mixture_weights: np.array,
86
+ n_classes: int,
87
+ ) -> SSANN:
88
+ """Train a SSANN classification model for sleep scoring
89
+
90
+ :param annotations_file: file with information on each training image
91
+ :param img_dir: training image location
92
+ :param mixture_weights: typical relative frequencies of brain states
93
+ :param n_classes: number of classes the model will learn
94
+ :return: trained Sleep Scoring Artificial Neural Network model
95
+ """
96
+ train_dataloader = create_dataloader(
97
+ annotations_file=annotations_file, img_dir=img_dir
98
+ )
99
+
100
+ device = get_device()
101
+ model = SSANN(n_classes=n_classes)
102
+ model.to(device)
103
+ model.train()
104
+
105
+ # correct for class imbalance
106
+ weight = torch.tensor((mixture_weights**-1).astype("float32")).to(device)
107
+
108
+ criterion = nn.CrossEntropyLoss(weight=weight)
109
+ optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=MOMENTUM)
110
+
111
+ for _ in trange(TRAINING_EPOCHS):
112
+ for data in train_dataloader:
113
+ inputs, labels = data
114
+ (inputs, labels) = (inputs.to(device), labels.to(device))
115
+ optimizer.zero_grad()
116
+ outputs = model(inputs)
117
+ loss = criterion(outputs, labels)
118
+ loss.backward()
119
+ optimizer.step()
120
+
121
+ return model
122
+
123
+
124
+ def score_recording(
125
+ model: SSANN,
126
+ eeg: np.array,
127
+ emg: np.array,
128
+ mixture_means: np.array,
129
+ mixture_sds: np.array,
130
+ sampling_rate: int | float,
131
+ epoch_length: int | float,
132
+ epochs_per_img: int,
133
+ brain_state_set: BrainStateSet,
134
+ ) -> np.array:
135
+ """Use classification model to get brain state labels for a recording
136
+
137
+ This assumes signals have been preprocessed to contain an integer
138
+ number of epochs.
139
+
140
+ :param model: classification model
141
+ :param eeg: EEG signal
142
+ :param emg: EMG signal
143
+ :param mixture_means: mixture means, for calibration
144
+ :param mixture_sds: mixture standard deviations, for calibration
145
+ :param sampling_rate: sampling rate, in Hz
146
+ :param epoch_length: epoch length, in seconds
147
+ :param epochs_per_img: number of epochs for the model to consider
148
+ :param brain_state_set: set of brain state options
149
+ :return: brain state labels, confidence scores
150
+ """
151
+ # prepare model
152
+ device = get_device()
153
+ model = model.to(device)
154
+ model.eval()
155
+
156
+ # create and scale eeg+emg spectrogram
157
+ img = create_eeg_emg_image(eeg, emg, sampling_rate, epoch_length)
158
+ img = mixture_z_score_img(
159
+ img,
160
+ mixture_means=mixture_means,
161
+ mixture_sds=mixture_sds,
162
+ brain_state_set=brain_state_set,
163
+ )
164
+ img = format_img(img=img, epochs_per_img=epochs_per_img, add_padding=True)
165
+
166
+ # create dataset for inference
167
+ images = []
168
+ for i in range(img.shape[1] - epochs_per_img + 1):
169
+ images.append(img[:, i : (i + epochs_per_img)].astype("float32"))
170
+ images = torch.from_numpy(np.array(images))
171
+ images = images[:, None, :, :] # add channel
172
+ images = images.to(device)
173
+
174
+ # perform classification
175
+ with torch.no_grad():
176
+ outputs = model(images)
177
+ logits, predicted = torch.max(outputs, 1)
178
+
179
+ labels = brain_state_set.convert_class_to_digit(predicted.cpu().numpy())
180
+ confidence_scores = 1 / (1 + np.e ** (-logits.cpu().numpy()))
181
+
182
+ return labels, confidence_scores
183
+
184
+
185
+ def example_real_time_scoring_function(
186
+ model: SSANN,
187
+ eeg: np.array,
188
+ emg: np.array,
189
+ mixture_means: np.array,
190
+ mixture_sds: np.array,
191
+ sampling_rate: int | float,
192
+ epoch_length: int | float,
193
+ epochs_per_img: int,
194
+ brain_state_set: BrainStateSet,
195
+ ) -> int:
196
+ """Example function that could be used for real-time scoring
197
+
198
+ This function demonstrates how you could use a model trained in
199
+ "real-time" mode (current epoch on the right side of each image)
200
+ to score incoming data. By passing a segment of EEG/EMG data
201
+ into this function, the most recent epoch will be scored. For
202
+ example, if the model expects 9 epochs worth of data and the
203
+ epoch length is 5 seconds, you would pass in 45 seconds of data
204
+ and would obtain the brain state of the most recent 5 seconds.
205
+
206
+ Note:
207
+ - The EEG and EMG signals must have length equal to
208
+ sampling_rate * epoch_length * <number of epochs per image>.
209
+ - The number of samples per epoch must be an integer.
210
+ - This is just a demonstration, you should customize this for
211
+ your application and there are probably ways to make it
212
+ run faster.
213
+
214
+ :param model: classification model
215
+ :param eeg: EEG signal
216
+ :param emg: EMG signal
217
+ :param mixture_means: mixture means, for calibration
218
+ :param mixture_sds: mixture standard deviations, for calibration
219
+ :param sampling_rate: sampling rate, in Hz
220
+ :param epoch_length: epoch length, in seconds
221
+ :param epochs_per_img: number of epochs shown to the model at once
222
+ :param brain_state_set: set of brain state options
223
+ :return: brain state label
224
+ """
225
+ # prepare model
226
+ # this could be done outside the function
227
+ device = get_device()
228
+ model = model.to(device)
229
+ model.eval()
230
+
231
+ # create and scale eeg+emg spectrogram
232
+ img = create_eeg_emg_image(eeg, emg, sampling_rate, epoch_length)
233
+ img = mixture_z_score_img(
234
+ img,
235
+ mixture_means=mixture_means,
236
+ mixture_sds=mixture_sds,
237
+ brain_state_set=brain_state_set,
238
+ )
239
+ img = format_img(img=img, epochs_per_img=epochs_per_img, add_padding=False)
240
+
241
+ # create dataset for inference
242
+ images = torch.from_numpy(np.array([img.astype("float32")]))
243
+ images = images[:, None, :, :] # add channel
244
+ images = images.to(device)
245
+
246
+ # perform classification
247
+ with torch.no_grad():
248
+ outputs = model(images)
249
+ _, predicted = torch.max(outputs, 1)
250
+
251
+ label = int(brain_state_set.convert_class_to_digit(predicted.cpu().numpy())[0])
252
+ return label
253
+
254
+
255
+ def create_calibration_file(
256
+ filename: str,
257
+ eeg: np.array,
258
+ emg: np.array,
259
+ labels: np.array,
260
+ sampling_rate: int | float,
261
+ epoch_length: int | float,
262
+ brain_state_set: BrainStateSet,
263
+ ) -> None:
264
+ """Create file of calibration data for a subject
265
+
266
+ This assumes signals have been preprocessed to contain an integer
267
+ number of epochs.
268
+
269
+ :param filename: filename for the calibration file
270
+ :param eeg: EEG signal
271
+ :param emg: EMG signal
272
+ :param labels: brain state labels, as digits
273
+ :param sampling_rate: sampling rate, in Hz
274
+ :param epoch_length: epoch length, in seconds
275
+ :param brain_state_set: set of brain state options
276
+ """
277
+ img = create_eeg_emg_image(eeg, emg, sampling_rate, epoch_length)
278
+ mixture_means, mixture_sds = get_mixture_values(
279
+ img=img,
280
+ labels=brain_state_set.convert_digit_to_class(labels),
281
+ brain_state_set=brain_state_set,
282
+ )
283
+ pd.DataFrame(
284
+ {c.MIXTURE_MEAN_COL: mixture_means, c.MIXTURE_SD_COL: mixture_sds}
285
+ ).to_csv(filename, index=False)
accusleepy/config.json ADDED
@@ -0,0 +1,24 @@
1
+ {
2
+ "brain_states": [
3
+ {
4
+ "name": "REM",
5
+ "digit": 1,
6
+ "is_scored": true,
7
+ "frequency": 0.1
8
+ },
9
+ {
10
+ "name": "Wake",
11
+ "digit": 2,
12
+ "is_scored": true,
13
+ "frequency": 0.35
14
+ },
15
+ {
16
+ "name": "NREM",
17
+ "digit": 3,
18
+ "is_scored": true,
19
+ "frequency": 0.55
20
+ }
21
+ ],
22
+ "default_epoch_length": 2.5,
23
+ "save_confidence_setting": true
24
+ }
@@ -0,0 +1,46 @@
1
+ # probably don't change these unless you really need to
2
+ UNDEFINED_LABEL = -1 # can't be the same as a brain state's digit, must be an integer
3
+ # calibration file columns
4
+ MIXTURE_MEAN_COL = "mixture_mean"
5
+ MIXTURE_SD_COL = "mixture_sd"
6
+ # recording file columns
7
+ EEG_COL = "eeg"
8
+ EMG_COL = "emg"
9
+ # label file columns
10
+ BRAIN_STATE_COL = "brain_state"
11
+ CONFIDENCE_SCORE_COL = "confidence_score"
12
+
13
+
14
+ # really don't change these
15
+ # config file location
16
+ CONFIG_FILE = "config.json"
17
+ # number of times to include the EMG power in a training image
18
+ EMG_COPIES = 9
19
+ # minimum spectrogram window length, in seconds
20
+ MIN_WINDOW_LEN = 5
21
+ # frequency above which to downsample EEG spectrograms
22
+ DOWNSAMPLING_START_FREQ = 20
23
+ # upper frequency cutoff for EEG spectrograms
24
+ UPPER_FREQ = 50
25
+ # classification model types
26
+ DEFAULT_MODEL_TYPE = "default" # current epoch is centered
27
+ REAL_TIME_MODEL_TYPE = "real-time" # current epoch on the right
28
+ # valid filetypes
29
+ RECORDING_FILE_TYPES = [".parquet", ".csv"]
30
+ LABEL_FILE_TYPE = ".csv"
31
+ CALIBRATION_FILE_TYPE = ".csv"
32
+ MODEL_FILE_TYPE = ".pth"
33
+ # annotation file columns
34
+ FILENAME_COL = "filename"
35
+ LABEL_COL = "label"
36
+ # recording list file header:
37
+ RECORDING_LIST_NAME = "recording_list"
38
+ RECORDING_LIST_FILE_TYPE = ".json"
39
+ # key for default epoch length in config
40
+ DEFAULT_EPOCH_LENGTH_KEY = "default_epoch_length"
41
+ # key used for default confidence score behavior in config
42
+ DEFAULT_CONFIDENCE_SETTING_KEY = "save_confidence_setting"
43
+ # filename used to store info about training image datasets
44
+ ANNOTATIONS_FILENAME = "annotations.csv"
45
+ # filename for annotation file for the calibration set
46
+ CALIBRATION_ANNOTATION_FILENAME = "calibration_set.csv"