sl-shared-assets 6.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sl_shared_assets/__init__.py +120 -0
- sl_shared_assets/command_line_interfaces/__init__.py +3 -0
- sl_shared_assets/command_line_interfaces/configure.py +318 -0
- sl_shared_assets/data_classes/__init__.py +121 -0
- sl_shared_assets/data_classes/configuration_data.py +939 -0
- sl_shared_assets/data_classes/dataset_data.py +385 -0
- sl_shared_assets/data_classes/processing_data.py +385 -0
- sl_shared_assets/data_classes/runtime_data.py +237 -0
- sl_shared_assets/data_classes/session_data.py +400 -0
- sl_shared_assets/data_classes/surgery_data.py +138 -0
- sl_shared_assets/data_transfer/__init__.py +12 -0
- sl_shared_assets/data_transfer/checksum_tools.py +125 -0
- sl_shared_assets/data_transfer/transfer_tools.py +181 -0
- sl_shared_assets/py.typed +0 -0
- sl_shared_assets-6.1.1.dist-info/METADATA +830 -0
- sl_shared_assets-6.1.1.dist-info/RECORD +19 -0
- sl_shared_assets-6.1.1.dist-info/WHEEL +4 -0
- sl_shared_assets-6.1.1.dist-info/entry_points.txt +2 -0
- sl_shared_assets-6.1.1.dist-info/licenses/LICENSE +674 -0
|
@@ -0,0 +1,385 @@
|
|
|
1
|
+
"""This module provides the assets for running data processing pipelines."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import copy
|
|
5
|
+
from enum import IntEnum, StrEnum
|
|
6
|
+
from pathlib import Path # noqa: TC003
|
|
7
|
+
from dataclasses import field, dataclass
|
|
8
|
+
|
|
9
|
+
import xxhash
|
|
10
|
+
from filelock import FileLock
|
|
11
|
+
from ataraxis_base_utilities import console
|
|
12
|
+
from ataraxis_data_structures import YamlConfig
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ProcessingPipelines(StrEnum):
|
|
16
|
+
"""Defines the data processing pipelines currently supported by the Sun lab data workflow."""
|
|
17
|
+
|
|
18
|
+
MANIFEST = "manifest"
|
|
19
|
+
"""The project manifest generation pipeline. This pipeline generates a .feather file that stores the snapshot of the
|
|
20
|
+
target project's data processing state. The created manifest file is then used as the entry-point for all other
|
|
21
|
+
data processing pipelines other than the ADOPTION pipeline."""
|
|
22
|
+
ADOPTION = "adoption"
|
|
23
|
+
"""The session data adoption pipeline. This pipeline copies session's data stored in the shared Sun lab
|
|
24
|
+
data directory on the remote compute server to the user's working directory. This is the entry-point for all
|
|
25
|
+
further interactions with the data, as it gives the calling user the permission to modify the copied data."""
|
|
26
|
+
CHECKSUM = "checksum"
|
|
27
|
+
"""The session's raw data integrity checksum verification pipeline. This pipeline verifies or regenerates
|
|
28
|
+
the session's data integrity checksum."""
|
|
29
|
+
BEHAVIOR = "behavior"
|
|
30
|
+
"""The behavior data processing pipeline. This pipeline parses the non-video behavior data from the .npz log
|
|
31
|
+
archives generated by the sl-experiment library during the session's data acquisition."""
|
|
32
|
+
VIDEO = "video"
|
|
33
|
+
"""The video data processing pipeline. This pipeline identifies and extracts the animal's pose and movement data
|
|
34
|
+
from the frames recorded by behavior video cameras during the session's data acquisition."""
|
|
35
|
+
SUITE2P = "suite2p"
|
|
36
|
+
"""The suite2p calcium imaging data processing pipeline. This pipeline motion-corrects, identifies, and extracts
|
|
37
|
+
the calcium fluorescence traces from cells recorded with 2-Photon Random Access Mesoscope (2P-RAM)
|
|
38
|
+
during the session's data acquisition."""
|
|
39
|
+
MULTIDAY = "multiday"
|
|
40
|
+
"""The suite2p multi-day cell tracking pipeline. This pipeline uses the output of the 'suite2p' pipeline for each
|
|
41
|
+
of the processed sessions to identify and track the activity of stably present cells across days."""
|
|
42
|
+
FORGING = "forging"
|
|
43
|
+
"""The dataset assembly (forging) pipeline. This pipeline integrates the single-day and multi-day data from all
|
|
44
|
+
available sources for each processed session into a unified analysis Dataset structure. The assembled
|
|
45
|
+
dataset then serves as an entry-point for all further data analysis tasks."""
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class ManagingTrackers(StrEnum):
|
|
49
|
+
"""Defines the tracker files used by the data managing pipelines currently supported by the Sun lab data
|
|
50
|
+
workflow.
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
CHECKSUM = "checksum.yaml"
|
|
54
|
+
"""The tracker file used by the checksum resolution pipeline."""
|
|
55
|
+
MANIFEST = "manifest.yaml"
|
|
56
|
+
"""The tracker file used by the project manifest generation pipeline."""
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class ProcessingTrackers(StrEnum):
|
|
60
|
+
"""Defines the tracker files used by data processing pipelines currently supported by the Sun lab data
|
|
61
|
+
workflow.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
SUITE2P = "suite2p.yaml"
|
|
65
|
+
"""The tracker file used by the suite2p processing pipeline."""
|
|
66
|
+
BEHAVIOR = "behavior.yaml"
|
|
67
|
+
"""The tracker file used by the behavior extraction pipeline."""
|
|
68
|
+
VIDEO = "video.yaml"
|
|
69
|
+
"""The tracker file used by the video (DeepLabCut) processing pipeline."""
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class DatasetTrackers(StrEnum):
|
|
73
|
+
"""Defines the tracker files used by dataset forging pipelines currently supported by the Sun lab data
|
|
74
|
+
workflow.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
FORGING = "forging.yaml"
|
|
78
|
+
"""The tracker file used by the dataset forging pipeline."""
|
|
79
|
+
MULTIDAY = "multiday.yaml"
|
|
80
|
+
"""The tracker file used by the multi-day suite2p registration pipeline."""
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class ProcessingStatus(IntEnum):
|
|
84
|
+
"""Defines the status codes used by the ProcessingTracker instances to communicate the runtime state of each
|
|
85
|
+
job making up the managed data processing pipeline.
|
|
86
|
+
"""
|
|
87
|
+
|
|
88
|
+
SCHEDULED = 0
|
|
89
|
+
"""The job is scheduled for execution."""
|
|
90
|
+
RUNNING = 1
|
|
91
|
+
"""The job is currently being executed."""
|
|
92
|
+
SUCCEEDED = 2
|
|
93
|
+
"""The job has been completed."""
|
|
94
|
+
FAILED = 3
|
|
95
|
+
"""The job encountered a runtime error and was not completed."""
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@dataclass
|
|
99
|
+
class JobState:
|
|
100
|
+
"""Stores the metadata and the current runtime status of a single job in the processing pipeline."""
|
|
101
|
+
|
|
102
|
+
status: ProcessingStatus = ProcessingStatus.SCHEDULED
|
|
103
|
+
"""The current status of the job."""
|
|
104
|
+
slurm_job_id: int | None = None
|
|
105
|
+
"""The SLURM-assigned job ID, if running on a SLURM cluster."""
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
@dataclass()
|
|
109
|
+
class ProcessingTracker(YamlConfig):
|
|
110
|
+
"""Tracks the state of a data processing pipeline and provides tools for communicating this state between multiple
|
|
111
|
+
processes and host-machines.
|
|
112
|
+
|
|
113
|
+
Note:
|
|
114
|
+
All modifications to the tracker file require the acquisition of the .lock file, which ensures exclusive
|
|
115
|
+
access to the tracker's data, allowing multiple independent processes (jobs) to safely work with the same
|
|
116
|
+
tracker file.
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
file_path: Path
|
|
120
|
+
"""The path to the .YAML file used to cache the tracker's data on disk."""
|
|
121
|
+
jobs: dict[str, JobState] = field(default_factory=dict)
|
|
122
|
+
"""Maps the unique identifiers of the jobs that make up the processing pipeline to their current state and
|
|
123
|
+
metadata."""
|
|
124
|
+
lock_path: str = field(init=False)
|
|
125
|
+
"""The path to the .LOCK file used to ensure thread-safe access to the tracker's data."""
|
|
126
|
+
|
|
127
|
+
def __post_init__(self) -> None:
|
|
128
|
+
"""Resolves the .LOCK file for the managed tracker .YAML file."""
|
|
129
|
+
# Generates the .lock file path for the target tracker .yaml file.
|
|
130
|
+
if self.file_path is not None:
|
|
131
|
+
self.lock_path = str(self.file_path.with_suffix(self.file_path.suffix + ".lock"))
|
|
132
|
+
else:
|
|
133
|
+
self.lock_path = ""
|
|
134
|
+
|
|
135
|
+
# Converts integer status values back to ProcessingStatus enumeration instances. The conversion to integers is
|
|
136
|
+
# necessary for .YAML saving compatibility.
|
|
137
|
+
for job_state in self.jobs.values():
|
|
138
|
+
if isinstance(job_state.status, int):
|
|
139
|
+
job_state.status = ProcessingStatus(job_state.status)
|
|
140
|
+
|
|
141
|
+
@staticmethod
|
|
142
|
+
def generate_job_id(session_path: Path, job_name: str) -> str:
|
|
143
|
+
"""Generates a unique hexadecimal job identifier based on the session's data path and the job's name using the
|
|
144
|
+
xxHash64 checksum generator.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
session_path: The path to the processed session's data directory.
|
|
148
|
+
job_name: The unique name for the processing job.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
The unique hexadecimal identifier for the target job.
|
|
152
|
+
"""
|
|
153
|
+
# Combines session path and job name into a single string for hashing
|
|
154
|
+
combined = f"{session_path.resolve()}:{job_name}"
|
|
155
|
+
# Generates and returns the xxHash64 hash
|
|
156
|
+
return xxhash.xxh64(combined.encode("utf-8")).hexdigest()
|
|
157
|
+
|
|
158
|
+
@staticmethod
|
|
159
|
+
def _get_slurm_job_id() -> int | None:
|
|
160
|
+
"""Retrieves the SLURM-assigned job's ID from the environment, if available.
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
The SLURM-assigned job's ID if running in a SLURM environment, None otherwise.
|
|
164
|
+
"""
|
|
165
|
+
slurm_id = os.environ.get("SLURM_JOB_ID") or os.environ.get("SLURM_JOBID")
|
|
166
|
+
return int(slurm_id) if slurm_id else None
|
|
167
|
+
|
|
168
|
+
def _load_state(self) -> None:
|
|
169
|
+
"""Reads the processing pipeline's runtime state from the cached .YAML file."""
|
|
170
|
+
if self.file_path.exists():
|
|
171
|
+
# Loads the data for the state values but does not replace the file path or lock attributes.
|
|
172
|
+
instance: ProcessingTracker = self.from_yaml(self.file_path)
|
|
173
|
+
self.jobs = copy.deepcopy(instance.jobs)
|
|
174
|
+
else:
|
|
175
|
+
# Otherwise, if the tracker file does not exist, generates a new .yaml file using default instance values
|
|
176
|
+
# and saves it to disk using the specified tracker file path.
|
|
177
|
+
self._save_state()
|
|
178
|
+
|
|
179
|
+
def _save_state(self) -> None:
|
|
180
|
+
"""Caches the current processing state stored inside the instance's attributes as a.YAML file."""
|
|
181
|
+
# Resets the lock_path and file_path to None and jobs to a dictionary of integers before dumping the data to
|
|
182
|
+
# .YAML to avoid issues with loading it back.
|
|
183
|
+
temp_file_path, temp_lock_path, temp_jobs = self.file_path, self.lock_path, self.jobs
|
|
184
|
+
|
|
185
|
+
# Converts enums to int for YAML serialization
|
|
186
|
+
converted_jobs = {}
|
|
187
|
+
for job_id, job_state in self.jobs.items():
|
|
188
|
+
converted_jobs[job_id] = JobState(
|
|
189
|
+
status=int(job_state.status), # type: ignore[arg-type]
|
|
190
|
+
slurm_job_id=job_state.slurm_job_id,
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
self.file_path = None # type: ignore[assignment]
|
|
195
|
+
self.lock_path = None # type: ignore[assignment]
|
|
196
|
+
self.jobs = converted_jobs
|
|
197
|
+
self.to_yaml(file_path=temp_file_path)
|
|
198
|
+
finally:
|
|
199
|
+
self.file_path, self.lock_path, self.jobs = temp_file_path, temp_lock_path, temp_jobs
|
|
200
|
+
|
|
201
|
+
def initialize_jobs(self, job_ids: list[str]) -> None:
|
|
202
|
+
"""Configures the tracker with the list of jobs to be executed during the pipeline's runtime.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
job_ids: The list of unique identifiers for all jobs that make up the tracked pipeline.
|
|
206
|
+
|
|
207
|
+
Raises:
|
|
208
|
+
TimeoutError: If the .LOCK file for the tracker .YAML file cannot be acquired within the timeout period.
|
|
209
|
+
"""
|
|
210
|
+
lock = FileLock(self.lock_path)
|
|
211
|
+
with lock.acquire(timeout=10.0):
|
|
212
|
+
# Loads tracker's state from the .yaml file
|
|
213
|
+
self._load_state()
|
|
214
|
+
|
|
215
|
+
# Initialize all jobs as SCHEDULED if they don't already exist
|
|
216
|
+
for job_id in job_ids:
|
|
217
|
+
if job_id not in self.jobs:
|
|
218
|
+
self.jobs[job_id] = JobState(status=ProcessingStatus.SCHEDULED)
|
|
219
|
+
|
|
220
|
+
self._save_state()
|
|
221
|
+
|
|
222
|
+
def start_job(self, job_id: str) -> None:
|
|
223
|
+
"""Marks the target job as running and captures the SLURM-assigned job's ID from the environment, if called
|
|
224
|
+
under the SLURM job manager.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
job_id: The unique identifier of the job mark as started.
|
|
228
|
+
|
|
229
|
+
Raises:
|
|
230
|
+
TimeoutError: If the .LOCK file for the tracker .YAML file cannot be acquired within the timeout period.
|
|
231
|
+
ValueError: If the specified job ID is not found in the managed tracker file.
|
|
232
|
+
"""
|
|
233
|
+
lock = FileLock(self.lock_path)
|
|
234
|
+
with lock.acquire(timeout=10.0):
|
|
235
|
+
# Loads tracker state from the .yaml file
|
|
236
|
+
self._load_state()
|
|
237
|
+
|
|
238
|
+
# Verifies that the tracker is configured to track the specified job
|
|
239
|
+
if job_id not in self.jobs:
|
|
240
|
+
message = (
|
|
241
|
+
f"The ProcessingTracker instance is not configured to track the state of the job with ID "
|
|
242
|
+
f"'{job_id}'. The instance is currently configured to track jobs with IDs: "
|
|
243
|
+
f"{', '.join(self.jobs.keys())}."
|
|
244
|
+
)
|
|
245
|
+
console.error(message=message, error=ValueError)
|
|
246
|
+
# Fallback to appease mypy, should not be reachable
|
|
247
|
+
raise ValueError(message) # pragma: no cover
|
|
248
|
+
|
|
249
|
+
# Updates job status and captures the SLURM-assigned job ID
|
|
250
|
+
job_info = self.jobs[job_id]
|
|
251
|
+
job_info.status = ProcessingStatus.RUNNING
|
|
252
|
+
job_info.slurm_job_id = self._get_slurm_job_id()
|
|
253
|
+
|
|
254
|
+
self._save_state()
|
|
255
|
+
|
|
256
|
+
def complete_job(self, job_id: str) -> None:
|
|
257
|
+
"""Marks a target job as successfully completed.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
job_id: The unique identifier of the job to mark as complete.
|
|
261
|
+
|
|
262
|
+
Raises:
|
|
263
|
+
TimeoutError: If the .LOCK file for the tracker .YAML file cannot be acquired within the timeout period.
|
|
264
|
+
ValueError: If the specified job ID is not found in the managed tracker file.
|
|
265
|
+
"""
|
|
266
|
+
lock = FileLock(self.lock_path)
|
|
267
|
+
with lock.acquire(timeout=10.0):
|
|
268
|
+
# Loads tracker state from the .yaml file
|
|
269
|
+
self._load_state()
|
|
270
|
+
|
|
271
|
+
# Verifies that the tracker is configured to track the specified job
|
|
272
|
+
if job_id not in self.jobs:
|
|
273
|
+
message = (
|
|
274
|
+
f"The ProcessingTracker instance is not configured to track the state of the job with ID "
|
|
275
|
+
f"'{job_id}'. The instance is currently configured to track jobs with IDs: "
|
|
276
|
+
f"{', '.join(self.jobs.keys())}."
|
|
277
|
+
)
|
|
278
|
+
console.error(message=message, error=ValueError)
|
|
279
|
+
# Fallback to appease mypy, should not be reachable
|
|
280
|
+
raise ValueError(message) # pragma: no cover
|
|
281
|
+
|
|
282
|
+
# Updates the job's status.
|
|
283
|
+
job_info = self.jobs[job_id]
|
|
284
|
+
job_info.status = ProcessingStatus.SUCCEEDED
|
|
285
|
+
|
|
286
|
+
self._save_state()
|
|
287
|
+
|
|
288
|
+
def fail_job(self, job_id: str) -> None:
|
|
289
|
+
"""Marks the target job as failed.
|
|
290
|
+
|
|
291
|
+
Args:
|
|
292
|
+
job_id: The unique identifier of the job to mark as failed.
|
|
293
|
+
|
|
294
|
+
Raises:
|
|
295
|
+
TimeoutError: If the .LOCK file for the tracker .YAML file cannot be acquired within the timeout period.
|
|
296
|
+
ValueError: If the specified job ID is not found in the managed tracker file.
|
|
297
|
+
"""
|
|
298
|
+
lock = FileLock(self.lock_path)
|
|
299
|
+
with lock.acquire(timeout=10.0):
|
|
300
|
+
# Loads tracker state from the .yaml file
|
|
301
|
+
self._load_state()
|
|
302
|
+
|
|
303
|
+
# Verifies that the tracker is configured to track the specified job
|
|
304
|
+
if job_id not in self.jobs:
|
|
305
|
+
message = (
|
|
306
|
+
f"The ProcessingTracker instance is not configured to track the state of the job with ID "
|
|
307
|
+
f"'{job_id}'. The instance is currently configured to track jobs with IDs: "
|
|
308
|
+
f"{', '.join(self.jobs.keys())}."
|
|
309
|
+
)
|
|
310
|
+
console.error(message=message, error=ValueError)
|
|
311
|
+
# Fallback to appease mypy, should not be reachable
|
|
312
|
+
raise ValueError(message) # pragma: no cover
|
|
313
|
+
|
|
314
|
+
# Updates the job's status.
|
|
315
|
+
job_info = self.jobs[job_id]
|
|
316
|
+
job_info.status = ProcessingStatus.FAILED
|
|
317
|
+
|
|
318
|
+
self._save_state()
|
|
319
|
+
|
|
320
|
+
def get_job_status(self, job_id: str) -> ProcessingStatus:
|
|
321
|
+
"""Queries the current runtime status of the target job.
|
|
322
|
+
|
|
323
|
+
Args:
|
|
324
|
+
job_id: The unique identifier of the job for which to query the runtime status.
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
The current runtime status of the job.
|
|
328
|
+
|
|
329
|
+
Raises:
|
|
330
|
+
TimeoutError: If the .LOCK file for the tracker .YAML file cannot be acquired within the timeout period.
|
|
331
|
+
ValueError: If the specified job ID is not found in the managed tracker file.
|
|
332
|
+
"""
|
|
333
|
+
lock = FileLock(self.lock_path)
|
|
334
|
+
with lock.acquire(timeout=10.0):
|
|
335
|
+
self._load_state()
|
|
336
|
+
|
|
337
|
+
# Verifies that the tracker is configured to track the specified job
|
|
338
|
+
if job_id not in self.jobs:
|
|
339
|
+
message = (
|
|
340
|
+
f"The ProcessingTracker instance is not configured to track the state of the job with ID "
|
|
341
|
+
f"'{job_id}'. The instance is currently configured to track jobs with IDs: "
|
|
342
|
+
f"{', '.join(self.jobs.keys())}."
|
|
343
|
+
)
|
|
344
|
+
console.error(message=message, error=ValueError)
|
|
345
|
+
# Fallback to appease mypy, should not be reachable
|
|
346
|
+
raise ValueError(message) # pragma: no cover
|
|
347
|
+
|
|
348
|
+
return self.jobs[job_id].status
|
|
349
|
+
|
|
350
|
+
def reset(self) -> None:
|
|
351
|
+
"""Resets the tracker file to the default state."""
|
|
352
|
+
lock = FileLock(self.lock_path)
|
|
353
|
+
with lock.acquire(timeout=10.0):
|
|
354
|
+
# Loads tracker state from the .yaml file.
|
|
355
|
+
self._load_state()
|
|
356
|
+
|
|
357
|
+
# Resets the tracker file to the default state.
|
|
358
|
+
self.jobs.clear()
|
|
359
|
+
self._save_state()
|
|
360
|
+
|
|
361
|
+
@property
|
|
362
|
+
def complete(self) -> bool:
|
|
363
|
+
"""Returns True if the tracked processing pipeline has been completed successfully.
|
|
364
|
+
|
|
365
|
+
Notes:
|
|
366
|
+
The pipeline is considered complete if all jobs have been marked as succeeded.
|
|
367
|
+
"""
|
|
368
|
+
lock = FileLock(self.lock_path)
|
|
369
|
+
with lock.acquire(timeout=10.0):
|
|
370
|
+
self._load_state()
|
|
371
|
+
if not self.jobs:
|
|
372
|
+
return False
|
|
373
|
+
return all(job.status == ProcessingStatus.SUCCEEDED for job in self.jobs.values())
|
|
374
|
+
|
|
375
|
+
@property
|
|
376
|
+
def encountered_error(self) -> bool:
|
|
377
|
+
"""Returns True if the tracked processing pipeline has been terminated due to a runtime error.
|
|
378
|
+
|
|
379
|
+
Note:
|
|
380
|
+
The pipeline is considered to have encountered an error if any job has been marked as failed.
|
|
381
|
+
"""
|
|
382
|
+
lock = FileLock(self.lock_path)
|
|
383
|
+
with lock.acquire(timeout=10.0):
|
|
384
|
+
self._load_state()
|
|
385
|
+
return any(job.status == ProcessingStatus.FAILED for job in self.jobs.values())
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
"""This module provides the assets used by data acquisition systems to store a subset of the acquired data."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass # pragma: no cover
|
|
4
|
+
|
|
5
|
+
from ataraxis_data_structures import YamlConfig # pragma: no cover
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass()
|
|
9
|
+
class MesoscopeHardwareState(YamlConfig): # pragma: no cover
|
|
10
|
+
"""Stores configuration parameters (states) of the Mesoscope-VR system hardware modules used during training or
|
|
11
|
+
experiment runtimes.
|
|
12
|
+
|
|
13
|
+
Notes:
|
|
14
|
+
All hardware parameters are stored using the appropriate datatypes and rounding methods that ensure
|
|
15
|
+
their complete equivalence to the values used by the data acquisition system during runtime.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
cm_per_pulse: float | None = None
|
|
19
|
+
"""The conversion factor used to translate encoder pulses into centimeters."""
|
|
20
|
+
maximum_brake_strength: float | None = None
|
|
21
|
+
"""The braking torque, in Newton centimeters, applied by the brake to the edge of the running wheel when it is
|
|
22
|
+
maximally engaged."""
|
|
23
|
+
minimum_brake_strength: float | None = None
|
|
24
|
+
"""The braking torque, in Newton centimeters, applied by the brake to the edge of the running wheel when it is
|
|
25
|
+
completely disengaged."""
|
|
26
|
+
lick_threshold: int | None = None
|
|
27
|
+
"""Determines the threshold, in 12-bit Analog to Digital Converter (ADC) units reported by the lick sensor, for
|
|
28
|
+
considering the reported signal a lick."""
|
|
29
|
+
valve_scale_coefficient: float | None = None
|
|
30
|
+
"""The scale coefficient of the power law equation that describes the relationship between the time the valve is
|
|
31
|
+
kept open and the dispensed water volume."""
|
|
32
|
+
valve_nonlinearity_exponent: float | None = None
|
|
33
|
+
"""The nonlinearity exponent of the power law equation that describes the relationship between the time the valve
|
|
34
|
+
is kept open and the dispensed water volume."""
|
|
35
|
+
torque_per_adc_unit: float | None = None
|
|
36
|
+
"""The conversion factor used to translate torque values reported by the sensor as 12-bit Analog to Digital
|
|
37
|
+
Converter (ADC) units into Newton centimeters (N·cm)."""
|
|
38
|
+
screens_initially_on: bool | None = None
|
|
39
|
+
"""Stores the initial state of the Virtual Reality screens at the beginning of the session's runtime."""
|
|
40
|
+
recorded_mesoscope_ttl: bool | None = None
|
|
41
|
+
"""Tracks whether the session recorded brain activity data with the mesoscope."""
|
|
42
|
+
delivered_gas_puffs: bool | None = None
|
|
43
|
+
"""Tracks whether the session delivered any gas puffs to the animal."""
|
|
44
|
+
system_state_codes: dict[str, int] | None = None
|
|
45
|
+
"""Maps integer state-codes used by the Mesoscope-VR system to communicate its states (system states) to
|
|
46
|
+
human-readable state names."""
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@dataclass()
|
|
50
|
+
class LickTrainingDescriptor(YamlConfig): # pragma: no cover
|
|
51
|
+
"""Stores the task and outcome information specific to lick training sessions that use the Mesoscope-VR system."""
|
|
52
|
+
|
|
53
|
+
experimenter: str
|
|
54
|
+
"""The ID of the experimenter running the session."""
|
|
55
|
+
mouse_weight_g: float
|
|
56
|
+
"""The weight of the animal, in grams, at the beginning of the session."""
|
|
57
|
+
minimum_reward_delay_s: int = 6
|
|
58
|
+
"""The minimum delay, in seconds, that can separate the delivery of two consecutive water rewards."""
|
|
59
|
+
maximum_reward_delay_s: int = 18
|
|
60
|
+
"""The maximum delay, in seconds, that can separate the delivery of two consecutive water rewards."""
|
|
61
|
+
maximum_water_volume_ml: float = 1.0
|
|
62
|
+
"""The maximum volume of water the system is allowed to dispense during training."""
|
|
63
|
+
maximum_training_time_min: int = 20
|
|
64
|
+
"""The maximum time, in minutes, the system is allowed to run the training."""
|
|
65
|
+
maximum_unconsumed_rewards: int = 1
|
|
66
|
+
"""The maximum number of consecutive rewards that can be delivered without the animal consuming them. If
|
|
67
|
+
the animal receives this many rewards without licking (consuming) them, reward delivery is paused until the animal
|
|
68
|
+
consumes the delivered rewards."""
|
|
69
|
+
water_reward_size_ul: float = 5.0
|
|
70
|
+
"""The volume of water, in microliters, dispensed to the animal when it achieves the required running speed and
|
|
71
|
+
duration thresholds."""
|
|
72
|
+
reward_tone_duration_ms: int = 300
|
|
73
|
+
"""The duration, in milliseconds, of the auditory tone played to the animal when it receives water rewards."""
|
|
74
|
+
dispensed_water_volume_ml: float = 0.0
|
|
75
|
+
"""The total water volume, in milliliters, dispensed during runtime. This excludes the water volume
|
|
76
|
+
dispensed during the paused (idle) state."""
|
|
77
|
+
pause_dispensed_water_volume_ml: float = 0.0
|
|
78
|
+
"""The total water volume, in milliliters, dispensed during the paused (idle) state."""
|
|
79
|
+
experimenter_given_water_volume_ml: float = 0.0
|
|
80
|
+
"""The additional volume of water, in milliliters, administered by the experimenter to the animal after the session.
|
|
81
|
+
"""
|
|
82
|
+
preferred_session_water_volume_ml: float = 0.0
|
|
83
|
+
"""The volume of water, in milliliters, the animal should be receiving during the session runtime if its
|
|
84
|
+
performance matches experimenter-specified threshold."""
|
|
85
|
+
incomplete: bool = True
|
|
86
|
+
"""Tracks whether the session's data is complete and eligible for unsupervised data processing."""
|
|
87
|
+
experimenter_notes: str = "Replace this with your notes."
|
|
88
|
+
"""Stores the experimenter's notes made during runtime."""
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@dataclass()
|
|
92
|
+
class RunTrainingDescriptor(YamlConfig): # pragma: no cover
|
|
93
|
+
"""Stores the task and outcome information specific to run training sessions that use the Mesoscope-VR system."""
|
|
94
|
+
|
|
95
|
+
experimenter: str
|
|
96
|
+
"""The ID of the experimenter running the session."""
|
|
97
|
+
mouse_weight_g: float
|
|
98
|
+
"""The weight of the animal, in grams, at the beginning of the session."""
|
|
99
|
+
final_run_speed_threshold_cm_s: float = 1.5
|
|
100
|
+
"""The running speed threshold, in centimeters per second, at the end of training."""
|
|
101
|
+
final_run_duration_threshold_s: float = 1.5
|
|
102
|
+
"""The running duration threshold, in seconds, at the end of training."""
|
|
103
|
+
initial_run_speed_threshold_cm_s: float = 0.8
|
|
104
|
+
"""The initial running speed threshold, in centimeters per second."""
|
|
105
|
+
initial_run_duration_threshold_s: float = 1.5
|
|
106
|
+
"""The initial running duration threshold, in seconds."""
|
|
107
|
+
increase_threshold_ml: float = 0.1
|
|
108
|
+
"""The threshold volume of water delivered to the animal, in milliliters, that triggers the increase in the running
|
|
109
|
+
speed and duration thresholds."""
|
|
110
|
+
run_speed_increase_step_cm_s: float = 0.05
|
|
111
|
+
"""The value, in centimeters per second, used by the system to increment the running speed threshold each
|
|
112
|
+
time the animal receives 'increase_threshold' volume of water."""
|
|
113
|
+
run_duration_increase_step_s: float = 0.1
|
|
114
|
+
"""The value, in seconds, used by the system to increment the duration threshold each time the animal
|
|
115
|
+
receives 'increase_threshold' volume of water."""
|
|
116
|
+
maximum_water_volume_ml: float = 1.0
|
|
117
|
+
"""The maximum volume of water the system is allowed to dispense during training."""
|
|
118
|
+
maximum_training_time_min: int = 40
|
|
119
|
+
"""The maximum time, in minutes, the system is allowed to run the training."""
|
|
120
|
+
maximum_unconsumed_rewards: int = 1
|
|
121
|
+
"""The maximum number of consecutive rewards that can be delivered without the animal consuming them. If
|
|
122
|
+
the animal receives this many rewards without licking (consuming) them, reward delivery is paused until the animal
|
|
123
|
+
consumes the delivered rewards."""
|
|
124
|
+
maximum_idle_time_s: float = 0.3
|
|
125
|
+
"""The maximum time, in seconds, the animal can dip below the running speed threshold to still receive the
|
|
126
|
+
reward. This allows animals that 'run' by taking a series of large steps, briefly dipping below speed threshold at
|
|
127
|
+
the end of each step, to still get water rewards."""
|
|
128
|
+
water_reward_size_ul: float = 5.0
|
|
129
|
+
"""The volume of water, in microliters, dispensed to the animal when it achieves the required running speed and
|
|
130
|
+
duration thresholds."""
|
|
131
|
+
reward_tone_duration_ms: int = 300
|
|
132
|
+
"""The duration, in milliseconds, of the auditory tone played to the animal when it receives water rewards."""
|
|
133
|
+
dispensed_water_volume_ml: float = 0.0
|
|
134
|
+
"""The total water volume, in milliliters, dispensed during runtime. This excludes the water volume
|
|
135
|
+
dispensed during the paused (idle) state."""
|
|
136
|
+
pause_dispensed_water_volume_ml: float = 0.0
|
|
137
|
+
"""The total water volume, in milliliters, dispensed during the paused (idle) state."""
|
|
138
|
+
experimenter_given_water_volume_ml: float = 0.0
|
|
139
|
+
"""The additional volume of water, in milliliters, administered by the experimenter to the animal after the session.
|
|
140
|
+
"""
|
|
141
|
+
preferred_session_water_volume_ml: float = 0.0
|
|
142
|
+
"""The volume of water, in milliliters, the animal should be receiving during the session runtime if its
|
|
143
|
+
performance matches experimenter-specified threshold."""
|
|
144
|
+
incomplete: bool = True
|
|
145
|
+
"""Tracks whether the session's data is complete and eligible for unsupervised data processing."""
|
|
146
|
+
experimenter_notes: str = "Replace this with your notes."
|
|
147
|
+
"""Stores the experimenter's notes made during runtime."""
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
@dataclass()
|
|
151
|
+
class MesoscopeExperimentDescriptor(YamlConfig): # pragma: no cover
|
|
152
|
+
"""Stores the task and outcome information specific to experiment sessions that use the Mesoscope-VR system."""
|
|
153
|
+
|
|
154
|
+
experimenter: str
|
|
155
|
+
"""The ID of the experimenter running the session."""
|
|
156
|
+
mouse_weight_g: float
|
|
157
|
+
"""The weight of the animal, in grams, at the beginning of the session."""
|
|
158
|
+
maximum_unconsumed_rewards: int = 1
|
|
159
|
+
"""The maximum number of consecutive rewards that can be delivered without the animal consuming them. If
|
|
160
|
+
the animal receives this many rewards without licking (consuming) them, reward delivery is paused until the animal
|
|
161
|
+
consumes the delivered rewards."""
|
|
162
|
+
dispensed_water_volume_ml: float = 0.0
|
|
163
|
+
"""The total water volume, in milliliters, dispensed during runtime. This excludes the water volume
|
|
164
|
+
dispensed during the paused (idle) state."""
|
|
165
|
+
pause_dispensed_water_volume_ml: float = 0.0
|
|
166
|
+
"""The total water volume, in milliliters, dispensed during the paused (idle) state."""
|
|
167
|
+
experimenter_given_water_volume_ml: float = 0.0
|
|
168
|
+
"""The additional volume of water, in milliliters, administered by the experimenter to the animal after the session.
|
|
169
|
+
"""
|
|
170
|
+
preferred_session_water_volume_ml: float = 0.0
|
|
171
|
+
"""The volume of water, in milliliters, the animal should be receiving during the session runtime if its
|
|
172
|
+
performance matches experimenter-specified threshold."""
|
|
173
|
+
incomplete: bool = True
|
|
174
|
+
"""Tracks whether the session's data is complete and eligible for unsupervised data processing."""
|
|
175
|
+
experimenter_notes: str = "Replace this with your notes."
|
|
176
|
+
"""Stores the experimenter's notes made during runtime."""
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
@dataclass()
|
|
180
|
+
class WindowCheckingDescriptor(YamlConfig): # pragma: no cover
|
|
181
|
+
"""Stores the outcome information specific to window checking sessions that use the Mesoscope-VR system."""
|
|
182
|
+
|
|
183
|
+
experimenter: str
|
|
184
|
+
"""The ID of the experimenter running the session."""
|
|
185
|
+
surgery_quality: int = 0
|
|
186
|
+
"""The quality of the cranial window and surgical intervention on a scale from 0 (non-usable) to
|
|
187
|
+
3 (high-tier publication grade) inclusive."""
|
|
188
|
+
incomplete: bool = True
|
|
189
|
+
"""Tracks whether the session's data is complete and eligible for unsupervised data processing."""
|
|
190
|
+
experimenter_notes: str = "Replace this with your notes."
|
|
191
|
+
"""Stores the experimenter's notes made during runtime."""
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
@dataclass()
|
|
195
|
+
class ZaberPositions(YamlConfig): # pragma: no cover
|
|
196
|
+
"""Stores Zaber motor positions reused between data acquisition sessions that use the Mesoscope-VR system."""
|
|
197
|
+
|
|
198
|
+
headbar_z: int = 0
|
|
199
|
+
"""The absolute position, in native motor units, of the HeadBar z-axis motor."""
|
|
200
|
+
headbar_pitch: int = 0
|
|
201
|
+
"""The absolute position, in native motor units, of the HeadBar pitch-axis motor."""
|
|
202
|
+
headbar_roll: int = 0
|
|
203
|
+
"""The absolute position, in native motor units, of the HeadBar roll-axis motor."""
|
|
204
|
+
lickport_z: int = 0
|
|
205
|
+
"""The absolute position, in native motor units, of the LickPort z-axis motor."""
|
|
206
|
+
lickport_y: int = 0
|
|
207
|
+
"""The absolute position, in native motor units, of the LickPort y-axis motor."""
|
|
208
|
+
lickport_x: int = 0
|
|
209
|
+
"""The absolute position, in native motor units, of the LickPort x-axis motor."""
|
|
210
|
+
wheel_x: int = 0
|
|
211
|
+
"""The absolute position, in native motor units, of the running wheel platform x-axis motor."""
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
@dataclass()
|
|
215
|
+
class MesoscopePositions(YamlConfig): # pragma: no cover
|
|
216
|
+
"""Stores the positions of real and virtual Mesoscope imaging axes reused between experiment sessions that use the
|
|
217
|
+
Mesoscope-VR system.
|
|
218
|
+
"""
|
|
219
|
+
|
|
220
|
+
mesoscope_x: float = 0.0
|
|
221
|
+
"""The Mesoscope objective's X-axis position, in micrometers."""
|
|
222
|
+
mesoscope_y: float = 0.0
|
|
223
|
+
"""The Mesoscope objective's Y-axis position, in micrometers."""
|
|
224
|
+
mesoscope_roll: float = 0.0
|
|
225
|
+
"""The Mesoscope objective's Roll-axis position, in degrees."""
|
|
226
|
+
mesoscope_z: float = 0.0
|
|
227
|
+
"""The Mesoscope objective's Z-axis position, in micrometers."""
|
|
228
|
+
mesoscope_fast_z: float = 0.0
|
|
229
|
+
"""The ScanImage's FastZ (virtual Z-axis) position, in micrometers."""
|
|
230
|
+
mesoscope_tip: float = 0.0
|
|
231
|
+
"""The ScanImage's Tilt position, in degrees.."""
|
|
232
|
+
mesoscope_tilt: float = 0.0
|
|
233
|
+
"""The ScanImage's Tip position, in degrees."""
|
|
234
|
+
laser_power_mw: float = 0.0
|
|
235
|
+
"""The laser excitation power at the sample, in milliwatts."""
|
|
236
|
+
red_dot_alignment_z: float = 0.0
|
|
237
|
+
"""The Mesoscope objective's Z-axis position, in micrometers, used for the red-dot alignment procedure."""
|