sl-shared-assets 1.0.0rc18__py3-none-any.whl → 1.0.0rc20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sl-shared-assets might be problematic. Click here for more details.

Files changed (27) hide show
  1. sl_shared_assets/__init__.pyi +71 -0
  2. sl_shared_assets/cli.pyi +28 -0
  3. sl_shared_assets/data_classes/__init__.pyi +61 -0
  4. sl_shared_assets/data_classes/configuration_data.pyi +37 -0
  5. sl_shared_assets/data_classes/runtime_data.py +12 -0
  6. sl_shared_assets/data_classes/runtime_data.pyi +148 -0
  7. sl_shared_assets/data_classes/session_data.py +12 -9
  8. sl_shared_assets/data_classes/session_data.pyi +544 -0
  9. sl_shared_assets/data_classes/surgery_data.pyi +89 -0
  10. sl_shared_assets/server/__init__.pyi +8 -0
  11. sl_shared_assets/server/job.pyi +94 -0
  12. sl_shared_assets/server/server.pyi +95 -0
  13. sl_shared_assets/suite2p/__init__.pyi +4 -0
  14. sl_shared_assets/suite2p/multi_day.py +7 -8
  15. sl_shared_assets/suite2p/multi_day.pyi +104 -0
  16. sl_shared_assets/suite2p/single_day.py +5 -4
  17. sl_shared_assets/suite2p/single_day.pyi +220 -0
  18. sl_shared_assets/tools/__init__.pyi +5 -0
  19. sl_shared_assets/tools/ascension_tools.pyi +68 -0
  20. sl_shared_assets/tools/packaging_tools.pyi +52 -0
  21. sl_shared_assets/tools/transfer_tools.pyi +53 -0
  22. {sl_shared_assets-1.0.0rc18.dist-info → sl_shared_assets-1.0.0rc20.dist-info}/METADATA +1 -1
  23. sl_shared_assets-1.0.0rc20.dist-info/RECORD +40 -0
  24. sl_shared_assets-1.0.0rc18.dist-info/RECORD +0 -23
  25. {sl_shared_assets-1.0.0rc18.dist-info → sl_shared_assets-1.0.0rc20.dist-info}/WHEEL +0 -0
  26. {sl_shared_assets-1.0.0rc18.dist-info → sl_shared_assets-1.0.0rc20.dist-info}/entry_points.txt +0 -0
  27. {sl_shared_assets-1.0.0rc18.dist-info → sl_shared_assets-1.0.0rc20.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,94 @@
1
+ from pathlib import Path
2
+
3
+ from _typeshed import Incomplete
4
+ from simple_slurm import Slurm
5
+
6
+ class Job:
7
+ """Aggregates the data of a single SLURM-managed job to be executed on the Sun lab BioHPC cluster.
8
+
9
+ This class provides the API for constructing any server-side job in the Sun lab. Internally, it wraps an instance
10
+ of a Slurm class to package the job data into the format expected by the SLURM job manager. All jobs managed by this
11
+ class instance should be submitted to an initialized Server class 'submit_job' method to be executed on the server.
12
+
13
+ Notes:
14
+ The initialization method of the class contains the arguments for configuring the SLURM and Conda environments
15
+ used by the job. Do not submit additional SLURM or Conda commands via the 'add_command' method, as this may
16
+ produce unexpected behavior.
17
+
18
+ Each job can be conceptualized as a sequence of shell instructions to execute on the remote compute server. For
19
+ the lab, that means that the bulk of the command consists of calling various CLIs exposed by data processing or
20
+ analysis pipelines, installed in the Conda environment on the server. Other than that, the job contains commands
21
+ for activating the target conda environment and, in some cases, doing other preparatory or cleanup work. The
22
+ source code of a 'remote' job is typically identical to what a human operator would type in a 'local' terminal
23
+ to run the same job on their PC.
24
+
25
+ A key feature of server-side jobs is that they are executed on virtual machines managed by SLURM. Since the
26
+ server has a lot more compute and memory resources than likely needed by individual jobs, each job typically
27
+ requests a subset of these resources. Upon being executed, SLURM creates an isolated environment with the
28
+ requested resources and runs the job in that environment.
29
+
30
+ Since all jobs are expected to use the CLIs from python packages (pre)installed on the BioHPC server, make sure
31
+ that the target environment is installed and configured before submitting jobs to the server. See notes in
32
+ ReadMe to learn more about configuring server-side conda environments.
33
+
34
+ Args:
35
+ job_name: The descriptive name of the SLURM job to be created. Primarily, this name is used in terminal
36
+ printouts to identify the job to human operators.
37
+ output_log: The absolute path to the .txt file on the processing server, where to store the standard output
38
+ data of the job.
39
+ error_log: The absolute path to the .txt file on the processing server, where to store the standard error
40
+ data of the job.
41
+ working_directory: The absolute path to the directory where temporary job files will be stored. During runtime,
42
+ classes from this library use that directory to store files such as the job's shell script. All such files
43
+ are automatically removed from the directory at the end of a non-errors runtime.
44
+ conda_environment: The name of the conda environment to activate on the server before running the job logic. The
45
+ environment should contain the necessary Python packages and CLIs to support running the job's logic.
46
+ cpus_to_use: The number of CPUs to use for the job.
47
+ ram_gb: The amount of RAM to allocate for the job, in Gigabytes.
48
+ time_limit: The maximum time limit for the job, in minutes. If the job is still running at the end of this time
49
+ period, it will be forcibly terminated. It is highly advised to always set adequate maximum runtime limits
50
+ to prevent jobs from hogging the server in case of runtime or algorithm errors.
51
+
52
+ Attributes:
53
+ remote_script_path: Stores the path to the script file relative to the root of the remote server that runs the
54
+ command.
55
+ job_id: Stores the unique job identifier assigned by the SLURM manager to this job, when it is accepted for
56
+ execution. This field initialized to None and is overwritten by the Server class that submits the job.
57
+ job_name: Stores the descriptive name of the SLURM job.
58
+ _command: Stores the managed SLURM command object.
59
+ """
60
+
61
+ remote_script_path: Incomplete
62
+ job_id: str | None
63
+ job_name: str
64
+ _command: Slurm
65
+ def __init__(
66
+ self,
67
+ job_name: str,
68
+ output_log: Path,
69
+ error_log: Path,
70
+ working_directory: Path,
71
+ conda_environment: str,
72
+ cpus_to_use: int = 10,
73
+ ram_gb: int = 10,
74
+ time_limit: int = 60,
75
+ ) -> None: ...
76
+ def __repr__(self) -> str:
77
+ """Returns the string representation of the Job instance."""
78
+ def add_command(self, command: str) -> None:
79
+ """Adds the input command string to the end of the managed SLURM job command list.
80
+
81
+ This method is a wrapper around simple_slurm's 'add_cmd' method. It is used to iteratively build the shell
82
+ command sequence of the job.
83
+
84
+ Args:
85
+ command: The command string to add to the command list, e.g.: 'python main.py --input 1'.
86
+ """
87
+ @property
88
+ def command_script(self) -> str:
89
+ """Translates the managed job data into a shell-script-writable string and returns it to caller.
90
+
91
+ This method is used by the Server class to translate the job into the format that can be submitted to and
92
+ executed on the remote compute server. Do not call this method manually unless you know what you are doing.
93
+ The returned string is safe to dump into a .sh (shell script) file and move to the BioHPC server for execution.
94
+ """
@@ -0,0 +1,95 @@
1
+ from pathlib import Path
2
+ from dataclasses import dataclass
3
+
4
+ from simple_slurm import Slurm as Slurm
5
+ from paramiko.client import SSHClient as SSHClient
6
+ from ataraxis_data_structures import YamlConfig
7
+
8
+ from .job import Job as Job
9
+
10
+ def generate_server_credentials(
11
+ output_directory: Path, username: str, password: str, host: str = "cbsuwsun.biohpc.cornell.edu"
12
+ ) -> None:
13
+ """Generates a new server_credentials.yaml file under the specified directory, using input information.
14
+
15
+ This function provides a convenience interface for generating new BioHPC server credential files. Generally, this is
16
+ only used when setting up new host-computers in the lab.
17
+ """
18
+ @dataclass()
19
+ class ServerCredentials(YamlConfig):
20
+ """This class stores the hostname and credentials used to log into the BioHPC cluster to run Sun lab processing
21
+ pipelines.
22
+
23
+ Primarily, this is used as part of the sl-experiment library runtime to start data processing once it is
24
+ transferred to the BioHPC server during preprocessing. However, the same file can be used together with the Server
25
+ class API to run any computation jobs on the lab's BioHPC server.
26
+ """
27
+
28
+ username: str = ...
29
+ password: str = ...
30
+ host: str = ...
31
+
32
+ class Server:
33
+ """Encapsulates access to the Sun lab BioHPC processing server.
34
+
35
+ This class provides the API that allows accessing the BioHPC server to create and submit various SLURM-managed jobs
36
+ to the server. It functions as the central interface used by all processing pipelines in the lab to execute costly
37
+ data processing on the server.
38
+
39
+ Notes:
40
+ All lab processing pipelines expect the data to be stored on the server and all processing logic to be packaged
41
+ and installed into dedicated conda environments on the server.
42
+
43
+ This class assumes that the target server has SLURM job manager installed and accessible to the user whose
44
+ credentials are used to connect to the server as part of this class instantiation.
45
+
46
+ Args:
47
+ credentials_path: The path to the locally stored .yaml file that contains the server hostname and access
48
+ credentials.
49
+
50
+ Attributes:
51
+ _open: Tracks whether the connection to the server is open or not.
52
+ _client: Stores the initialized SSHClient instance used to interface with the server.
53
+ """
54
+
55
+ _open: bool
56
+ _credentials: ServerCredentials
57
+ _client: SSHClient
58
+ def __init__(self, credentials_path: Path) -> None: ...
59
+ def __del__(self) -> None:
60
+ """If the instance is connected to the server, terminates the connection before the instance is destroyed."""
61
+ def submit_job(self, job: Job) -> Job:
62
+ """Submits the input job to the managed BioHPC server via SLURM job manager.
63
+
64
+ This method submits various jobs for execution via SLURM-managed BioHPC cluster. As part of its runtime, the
65
+ method translates the Job object into the shell script, moves the script to the target working directory on
66
+ the server, and instructs the server to execute the shell script (via SLURM).
67
+
68
+ Args:
69
+ job: The Job object that contains all job data.
70
+
71
+ Returns:
72
+ The job object whose 'job_id' attribute had been modified with the job ID, if the job was successfully
73
+ submitted.
74
+
75
+ Raises:
76
+ RuntimeError: If job submission to the server fails.
77
+ """
78
+ def job_complete(self, job: Job) -> bool:
79
+ """Returns True if the job managed by the input Job instance has been completed or terminated its runtime due
80
+ to an error.
81
+
82
+ If the job is still running or is waiting inside the execution queue, returns False.
83
+
84
+ Args:
85
+ job: The Job object whose status needs to be checked.
86
+
87
+ Raises:
88
+ ValueError: If the input Job object does not contain a valid job_id, suggesting that it has not been
89
+ submitted to the server.
90
+ """
91
+ def close(self) -> None:
92
+ """Closes the SSH connection to the server.
93
+
94
+ This method has to be called before destroying the class instance to ensure proper resource cleanup.
95
+ """
@@ -0,0 +1,4 @@
1
+ from .multi_day import MultiDayS2PConfiguration as MultiDayS2PConfiguration
2
+ from .single_day import SingleDayS2PConfiguration as SingleDayS2PConfiguration
3
+
4
+ __all__ = ["MultiDayS2PConfiguration", "SingleDayS2PConfiguration"]
@@ -3,11 +3,11 @@ extends the original suite2p code to support tracking the same objects (cells) a
3
3
  (original) and multi-day (extended) pipelines are available as part of the Sun lab maintained sl-suite2p package."""
4
4
 
5
5
  from typing import Any
6
- from dataclasses import field, asdict, dataclass
7
6
  from pathlib import Path
7
+ from dataclasses import field, asdict, dataclass
8
+
8
9
  import numpy as np
9
10
  from ataraxis_base_utilities import ensure_directory_exists
10
-
11
11
  from ataraxis_data_structures import YamlConfig
12
12
 
13
13
 
@@ -60,8 +60,7 @@ class Hardware:
60
60
 
61
61
  @dataclass()
62
62
  class CellDetection:
63
- """Stores parameters for selecting single-day-registered cells (ROIs) to be tracked across multiple sessions (days).
64
- """
63
+ """Stores parameters for selecting single-day-registered cells (ROIs) to be tracked across multiple sessions (days)."""
65
64
 
66
65
  probability_threshold: float = 0.85
67
66
  """The minimum required probability score assigned to the cell (ROI) by the single-day suite2p classifier. Cells
@@ -85,8 +84,7 @@ class CellDetection:
85
84
 
86
85
  @dataclass()
87
86
  class Registration:
88
- """Stores parameters for aligning (registering) the sessions from multiple days to the same visual (sampling) space.
89
- """
87
+ """Stores parameters for aligning (registering) the sessions from multiple days to the same visual (sampling) space."""
90
88
 
91
89
  image_type: str = "enhanced"
92
90
  """The type of suite2p-generated reference image to use for across-day registration. Supported options are
@@ -192,7 +190,8 @@ class MultiDayS2PConfiguration(YamlConfig):
192
190
  """
193
191
  ensure_directory_exists(output_directory) # Creates the directory, if necessary
194
192
  file_path = output_directory.joinpath("ops.npy") # Computes the output path
195
- np.save(file_path, self.to_ops(), allow_pickle=True) # Dumps the configuration data to 'ops.npy' file.
193
+ # Dumps the configuration data to 'ops.npy' file.
194
+ np.save(file_path, self.to_ops(), allow_pickle=True) # type: ignore
196
195
 
197
196
  def to_config(self, output_directory: Path) -> None:
198
197
  """Saves the managed configuration data as a 'multi_day_s2p_configuration.yaml' file under the target
@@ -210,7 +209,7 @@ class MultiDayS2PConfiguration(YamlConfig):
210
209
  saved.
211
210
  """
212
211
  ensure_directory_exists(output_directory) # Creates the directory, if necessary
213
- file_path = output_directory.joinpath("multi_day_s2p_configuration.yaml") # Computes the output path
212
+ file_path = output_directory.joinpath("multi_day_s2p_configuration.yaml") # Computes the output path
214
213
 
215
214
  # Note, this uses the same configuration name as the SessionData class, making it automatically compatible with
216
215
  # Sun lab data structure.
@@ -0,0 +1,104 @@
1
+ from typing import Any
2
+ from pathlib import Path
3
+ from dataclasses import field, dataclass
4
+
5
+ from _typeshed import Incomplete
6
+ from ataraxis_data_structures import YamlConfig
7
+
8
+ @dataclass()
9
+ class IO:
10
+ """Stores parameters that control data input and output during various stages of the pipeline."""
11
+
12
+ session_ids: list[str] = field(default_factory=list)
13
+ session_folders: list[str] = field(default_factory=list)
14
+
15
+ @dataclass()
16
+ class Hardware:
17
+ """Stores parameters that control how the suite2p interacts with the hardware of the host-computer to accelerate
18
+ processing speed."""
19
+
20
+ parallelize_registration: bool = ...
21
+ registration_workers: int = ...
22
+ parallelize_extraction: bool = ...
23
+ parallel_sessions: int = ...
24
+
25
+ @dataclass()
26
+ class CellDetection:
27
+ """Stores parameters for selecting single-day-registered cells (ROIs) to be tracked across multiple sessions (days)."""
28
+
29
+ probability_threshold: float = ...
30
+ maximum_size: int = ...
31
+ mesoscope_stripe_borders: list[int] = field(default_factory=list)
32
+ stripe_margin: int = ...
33
+
34
+ @dataclass()
35
+ class Registration:
36
+ """Stores parameters for aligning (registering) the sessions from multiple days to the same visual (sampling) space."""
37
+
38
+ image_type: str = ...
39
+ grid_sampling_factor: float = ...
40
+ scale_sampling: int = ...
41
+ speed_factor: float = ...
42
+
43
+ @dataclass()
44
+ class Clustering:
45
+ """Stores parameters for tracking (clustering) cell (ROI) masks across multiple registered sessions (days)."""
46
+
47
+ criterion: str = ...
48
+ threshold: float = ...
49
+ mask_prevalence: int = ...
50
+ pixel_prevalence: int = ...
51
+ step_sizes: list[int] = field(default_factory=Incomplete)
52
+ bin_size: int = ...
53
+ maximum_distance: int = ...
54
+ minimum_size: int = ...
55
+
56
+ @dataclass()
57
+ class MultiDayS2PConfiguration(YamlConfig):
58
+ """Aggregates all parameters for the multi-day suite2p pipeline used to track cells across multiple days
59
+ (sessions) and extract their activity.
60
+
61
+ These settings are used to configure the multi-day suite2p extraction pipeline, which is based on the reference
62
+ implementation here: https://github.com/sprustonlab/multiday-suite2p-public. This class behaves similar to the
63
+ SingleDayS2PConfiguration class. It can be saved and loaded from a .YAML file and translated to dictionary or
64
+ ops.npy format, expected by the multi-day sl-suite2p pipeline.
65
+ """
66
+
67
+ io: IO = field(default_factory=IO)
68
+ hardware: Hardware = field(default_factory=Hardware)
69
+ cell_detection: CellDetection = field(default_factory=CellDetection)
70
+ registration: Registration = field(default_factory=Registration)
71
+ clustering: Clustering = field(default_factory=Clustering)
72
+ def to_npy(self, output_directory: Path) -> None:
73
+ """Saves the managed configuration data as an 'ops.npy' file under the target directory.
74
+
75
+ This method is mostly called by internal sl-suite2p functions to translate the user-specified configuration
76
+ file into the format used by suite2p pipelines.
77
+
78
+ Notes:
79
+ If the target output directory does not exist when this method is called, it will be created.
80
+
81
+ Args:
82
+ output_directory: The path to the directory where the 'ops.npy' file should be saved.
83
+ """
84
+ def to_config(self, output_directory: Path) -> None:
85
+ """Saves the managed configuration data as a 'multi_day_s2p_configuration.yaml' file under the target
86
+ directory.
87
+
88
+ This method is typically used to dump the 'default' configuration parameters to disk as a user-editable
89
+ .yaml file. The user is then expected to modify these parameters as needed, before the class data is loaded and
90
+ used by the suite2p pipeline.
91
+
92
+ Notes:
93
+ If the target output directory does not exist when this method is called, it will be created.
94
+
95
+ Args:
96
+ output_directory: The path to the directory where the 'multi_day_s2p_configuration.yaml' file should be
97
+ saved.
98
+ """
99
+ def to_ops(self) -> dict[str, Any]:
100
+ """Converts the class instance to a dictionary and returns it to caller.
101
+
102
+ This method is mostly called by internal sl-suite2p functions to translate the default configuration parameters
103
+ to the dictionary format used by suite2p pipelines.
104
+ """
@@ -4,11 +4,11 @@ is used as the first step of the multi-day brain activity processing pipeline us
4
4
  (original) and multi-day (extended) pipelines are available as part of the Sun lab maintained sl-suite2p package."""
5
5
 
6
6
  from typing import Any
7
- from dataclasses import field, asdict, dataclass
8
7
  from pathlib import Path
8
+ from dataclasses import field, asdict, dataclass
9
+
9
10
  import numpy as np
10
11
  from ataraxis_base_utilities import ensure_directory_exists
11
-
12
12
  from ataraxis_data_structures import YamlConfig
13
13
 
14
14
 
@@ -519,7 +519,8 @@ class SingleDayS2PConfiguration(YamlConfig):
519
519
  """
520
520
  ensure_directory_exists(output_directory) # Creates the directory, if necessary
521
521
  file_path = output_directory.joinpath("ops.npy") # Computes the output path
522
- np.save(file_path, self.to_ops(), allow_pickle=True) # Dumps the configuration data to 'ops.npy' file.
522
+ # Dumps the configuration data to 'ops.npy' file.
523
+ np.save(file_path, self.to_ops(), allow_pickle=True) # type: ignore
523
524
 
524
525
  def to_config(self, output_directory: Path) -> None:
525
526
  """Saves the managed configuration data as a 'single_day_s2p_configuration.yaml' file under the target
@@ -537,7 +538,7 @@ class SingleDayS2PConfiguration(YamlConfig):
537
538
  saved.
538
539
  """
539
540
  ensure_directory_exists(output_directory) # Creates the directory, if necessary
540
- file_path = output_directory.joinpath("single_day_s2p_configuration.yaml") # Computes the output path
541
+ file_path = output_directory.joinpath("single_day_s2p_configuration.yaml") # Computes the output path
541
542
 
542
543
  # Note, this uses the same configuration name as the SessionData class, making it automatically compatible with
543
544
  # Sun lab data structure.
@@ -0,0 +1,220 @@
1
+ from typing import Any
2
+ from pathlib import Path
3
+ from dataclasses import field, dataclass
4
+
5
+ from _typeshed import Incomplete
6
+ from ataraxis_data_structures import YamlConfig
7
+
8
+ @dataclass
9
+ class Main:
10
+ """Stores global parameters that broadly define the suite2p single-day processing configuration."""
11
+
12
+ nplanes: int = ...
13
+ nchannels: int = ...
14
+ functional_chan: int = ...
15
+ tau: float = ...
16
+ force_sktiff: bool = ...
17
+ fs: float = ...
18
+ do_bidiphase: bool = ...
19
+ bidiphase: int = ...
20
+ bidi_corrected: bool = ...
21
+ frames_include: int = ...
22
+ multiplane_parallel: bool = ...
23
+ parallel_planes: int = ...
24
+ ignore_flyback: list[int] = field(default_factory=list)
25
+
26
+ @dataclass
27
+ class FileIO:
28
+ """Stores general I/O parameters that specify input data location, format, and working and output directories."""
29
+
30
+ fast_disk: str = ...
31
+ delete_bin: bool = ...
32
+ mesoscan: bool = ...
33
+ bruker: bool = ...
34
+ bruker_bidirectional: bool = ...
35
+ h5py: list[str] = field(default_factory=list)
36
+ h5py_key: str = ...
37
+ nwb_file: str = ...
38
+ nwb_driver: str = ...
39
+ nwb_series: str = ...
40
+ save_path0: str = ...
41
+ save_folder: str = ...
42
+ data_path: list[str] = field(default_factory=list)
43
+ look_one_level_down: bool = ...
44
+ subfolders: list[str] = field(default_factory=list)
45
+ move_bin: bool = ...
46
+
47
+ @dataclass
48
+ class Output:
49
+ """Stores parameters for aggregating and saving the processing results of each plane as a unified directory or
50
+ file."""
51
+
52
+ save_nwb: bool = ...
53
+ save_mat: bool = ...
54
+ combined: bool = ...
55
+ aspect: float = ...
56
+
57
+ @dataclass
58
+ class Registration:
59
+ """Stores parameters for rigid registration, which is used to correct motion artifacts between frames by
60
+ counter-shifting the entire frame."""
61
+
62
+ do_registration: bool = ...
63
+ align_by_chan: int = ...
64
+ nimg_init: int = ...
65
+ batch_size: int = ...
66
+ maxregshift: float = ...
67
+ smooth_sigma: float = ...
68
+ smooth_sigma_time: float = ...
69
+ keep_movie_raw: bool = ...
70
+ two_step_registration: bool = ...
71
+ reg_tif: bool = ...
72
+ reg_tif_chan2: bool = ...
73
+ th_badframes: float = ...
74
+ norm_frames: bool = ...
75
+ force_refImg: bool = ...
76
+ pad_fft: bool = ...
77
+ do_regmetrics: bool = ...
78
+ reg_metric_n_pc: int = ...
79
+
80
+ @dataclass
81
+ class OnePRegistration:
82
+ """Stores parameters for additional pre-registration processing used to improve the registration of 1-photon
83
+ datasets."""
84
+
85
+ one_p_reg: bool = ...
86
+ spatial_hp_reg: int = ...
87
+ pre_smooth: float = ...
88
+ spatial_taper: float = ...
89
+
90
+ @dataclass
91
+ class NonRigid:
92
+ """Stores parameters for non-rigid registration, which is used to improve motion registration in complex
93
+ datasets by dividing frames into subregions and shifting each subregion independently of other subregions."""
94
+
95
+ nonrigid: bool = ...
96
+ block_size: list[int] = field(default_factory=Incomplete)
97
+ snr_thresh: float = ...
98
+ maxregshiftNR: float = ...
99
+
100
+ @dataclass
101
+ class ROIDetection:
102
+ """Stores parameters for cell ROI detection."""
103
+
104
+ preclassify: float = ...
105
+ roidetect: bool = ...
106
+ sparse_mode: bool = ...
107
+ spatial_scale: int = ...
108
+ connected: bool = ...
109
+ threshold_scaling: float = ...
110
+ spatial_hp_detect: int = ...
111
+ max_overlap: float = ...
112
+ high_pass: int = ...
113
+ smooth_masks: bool = ...
114
+ max_iterations: int = ...
115
+ nbinned: int = ...
116
+ denoise: bool = ...
117
+
118
+ @dataclass
119
+ class CellposeDetection:
120
+ """Stores parameters for the Cellpose algorithm, which can optionally be used to improve cell ROI extraction."""
121
+
122
+ anatomical_only: int = ...
123
+ diameter: int = ...
124
+ cellprob_threshold: float = ...
125
+ flow_threshold: float = ...
126
+ spatial_hp_cp: int = ...
127
+ pretrained_model: str = ...
128
+
129
+ @dataclass
130
+ class SignalExtraction:
131
+ """Stores parameters for extracting fluorescence signals from ROIs and surrounding neuropil regions."""
132
+
133
+ neuropil_extract: bool = ...
134
+ allow_overlap: bool = ...
135
+ min_neuropil_pixels: int = ...
136
+ inner_neuropil_radius: int = ...
137
+ lam_percentile: int = ...
138
+
139
+ @dataclass
140
+ class SpikeDeconvolution:
141
+ """Stores parameters for deconvolve fluorescence signals to infer spike trains."""
142
+
143
+ spikedetect: bool = ...
144
+ neucoeff: float = ...
145
+ baseline: str = ...
146
+ win_baseline: float = ...
147
+ sig_baseline: float = ...
148
+ prctile_baseline: float = ...
149
+
150
+ @dataclass
151
+ class Classification:
152
+ """Stores parameters for classifying detected ROIs as real cells or artifacts."""
153
+
154
+ soma_crop: bool = ...
155
+ use_builtin_classifier: bool = ...
156
+ classifier_path: str = ...
157
+
158
+ @dataclass
159
+ class Channel2:
160
+ """Stores parameters for processing the second channel in multichannel datasets."""
161
+
162
+ chan2_thres: float = ...
163
+
164
+ @dataclass
165
+ class SingleDayS2PConfiguration(YamlConfig):
166
+ """Aggregates all user-addressable parameters of the single-day suite2p pipeline used to discover cell ROIs and
167
+ extract their fluorescence data.
168
+
169
+ This class is used during single-day processing to instruct suite2p on how to process the data. This class is based
170
+ on the 'default_ops' from the original suite2p package. As part of the suite2p refactoring performed in sl-suite2p
171
+ package, the 'default_ops' has been replaced with this class instance. Compared to the 'original' ops, it allows
172
+ saving configuration parameters as a .YAML file, which offers a better way of viewing and editing the parameters and
173
+ running suite2p pipeline on remote compute servers.
174
+ """
175
+
176
+ main: Main = field(default_factory=Main)
177
+ file_io: FileIO = field(default_factory=FileIO)
178
+ output: Output = field(default_factory=Output)
179
+ registration: Registration = field(default_factory=Registration)
180
+ one_p_registration: OnePRegistration = field(default_factory=OnePRegistration)
181
+ non_rigid: NonRigid = field(default_factory=NonRigid)
182
+ roi_detection: ROIDetection = field(default_factory=ROIDetection)
183
+ cellpose_detection: CellposeDetection = field(default_factory=CellposeDetection)
184
+ signal_extraction: SignalExtraction = field(default_factory=SignalExtraction)
185
+ spike_deconvolution: SpikeDeconvolution = field(default_factory=SpikeDeconvolution)
186
+ classification: Classification = field(default_factory=Classification)
187
+ channel2: Channel2 = field(default_factory=Channel2)
188
+ def to_npy(self, output_directory: Path) -> None:
189
+ """Saves the managed configuration data as an 'ops.npy' file under the target directory.
190
+
191
+ This method is mostly called by internal sl-suite2p functions to translate the user-specified configuration
192
+ file into the format used by suite2p pipelines.
193
+
194
+ Notes:
195
+ If the target output directory does not exist when this method is called, it will be created.
196
+
197
+ Args:
198
+ output_directory: The path to the directory where the 'ops.npy' file should be saved.
199
+ """
200
+ def to_config(self, output_directory: Path) -> None:
201
+ """Saves the managed configuration data as a 'single_day_s2p_configuration.yaml' file under the target
202
+ directory.
203
+
204
+ This method is typically used to dump the 'default' configuration parameters to disk as a user-editable
205
+ .yaml file. The user is then expected to modify these parameters as needed, before the class data is loaded and
206
+ used by the suite2p pipeline.
207
+
208
+ Notes:
209
+ If the target output directory does not exist when this method is called, it will be created.
210
+
211
+ Args:
212
+ output_directory: The path to the directory where the 'single_day_s2p_configuration.yaml' file should be
213
+ saved.
214
+ """
215
+ def to_ops(self) -> dict[str, Any]:
216
+ """Converts the class instance to a dictionary and returns it to caller.
217
+
218
+ This method is mostly called by internal sl-suite2p functions to translate the default configuration parameters
219
+ to the dictionary format used by suite2p pipelines.
220
+ """
@@ -0,0 +1,5 @@
1
+ from .transfer_tools import transfer_directory as transfer_directory
2
+ from .ascension_tools import ascend_tyche_data as ascend_tyche_data
3
+ from .packaging_tools import calculate_directory_checksum as calculate_directory_checksum
4
+
5
+ __all__ = ["transfer_directory", "calculate_directory_checksum", "ascend_tyche_data"]