sl-shared-assets 5.0.1__py3-none-any.whl → 5.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sl-shared-assets might be problematic. Click here for more details.
- sl_shared_assets/__init__.py +4 -0
- sl_shared_assets/command_line_interfaces/manage.py +53 -0
- sl_shared_assets/data_classes/__init__.py +3 -3
- sl_shared_assets/data_classes/configuration_data.py +105 -138
- sl_shared_assets/data_classes/runtime_data.py +2 -4
- sl_shared_assets/data_classes/session_data.py +116 -86
- sl_shared_assets/data_classes/surgery_data.py +44 -44
- sl_shared_assets/server/__init__.py +1 -2
- sl_shared_assets/server/job.py +43 -50
- sl_shared_assets/server/pipeline.py +108 -119
- sl_shared_assets/server/server.py +45 -104
- sl_shared_assets/tools/__init__.py +4 -0
- sl_shared_assets/tools/packaging_tools.py +1 -1
- sl_shared_assets/tools/project_management_tools.py +67 -12
- {sl_shared_assets-5.0.1.dist-info → sl_shared_assets-5.1.0.dist-info}/METADATA +1 -1
- sl_shared_assets-5.1.0.dist-info/RECORD +23 -0
- sl_shared_assets-5.0.1.dist-info/RECORD +0 -23
- {sl_shared_assets-5.0.1.dist-info → sl_shared_assets-5.1.0.dist-info}/WHEEL +0 -0
- {sl_shared_assets-5.0.1.dist-info → sl_shared_assets-5.1.0.dist-info}/entry_points.txt +0 -0
- {sl_shared_assets-5.0.1.dist-info → sl_shared_assets-5.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,7 +1,6 @@
|
|
|
1
|
-
"""This module provides the
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
resources.
|
|
1
|
+
"""This module provides the API for submitting jobs to compute servers and clusters (managed via SLURM) and
|
|
2
|
+
monitoring the running jobs status. Many Sun lab data workflow pipelines use this interface for accessing shared
|
|
3
|
+
compute resources.
|
|
5
4
|
"""
|
|
6
5
|
|
|
7
6
|
import stat
|
|
@@ -33,7 +32,7 @@ def generate_server_credentials(
|
|
|
33
32
|
working_root: str = "/local/storage",
|
|
34
33
|
shared_directory_name: str = "sun_data",
|
|
35
34
|
) -> None:
|
|
36
|
-
"""Generates
|
|
35
|
+
"""Generates the server access credentials .yaml file under the specified directory, using input information.
|
|
37
36
|
|
|
38
37
|
This function provides a convenience interface for generating new server access credential files. Depending on
|
|
39
38
|
configuration, it either creates user access credentials files or service access credentials files.
|
|
@@ -76,13 +75,7 @@ def generate_server_credentials(
|
|
|
76
75
|
|
|
77
76
|
@dataclass()
|
|
78
77
|
class ServerCredentials(YamlConfig):
|
|
79
|
-
"""This class stores the
|
|
80
|
-
pipelines.
|
|
81
|
-
|
|
82
|
-
Primarily, this is used as part of the sl-experiment library runtime to start data processing once it is
|
|
83
|
-
transferred to the BioHPC server during preprocessing. However, the same file can be used together with the Server
|
|
84
|
-
class API to run any computation jobs on the lab's BioHPC server.
|
|
85
|
-
"""
|
|
78
|
+
"""This class stores the information used to interface with Sun lab's remote compute servers."""
|
|
86
79
|
|
|
87
80
|
username: str = "YourNetID"
|
|
88
81
|
"""The username to use for server authentication."""
|
|
@@ -124,16 +117,13 @@ class ServerCredentials(YamlConfig):
|
|
|
124
117
|
|
|
125
118
|
|
|
126
119
|
class Server:
|
|
127
|
-
"""
|
|
120
|
+
"""Establishes and maintains a bidirectional interface that allows working with a remote compute server.
|
|
128
121
|
|
|
129
|
-
This class provides the API that allows accessing the remote processing server
|
|
130
|
-
SLURM-managed jobs to the server. It functions as the central interface
|
|
131
|
-
lab to execute costly data processing on the server.
|
|
122
|
+
This class provides the API that allows accessing the remote processing server. Primarily, the class is used to
|
|
123
|
+
submit SLURM-managed jobs to the server and monitor their execution status. It functions as the central interface
|
|
124
|
+
used by many data workflow pipelines in the lab to execute costly data processing on the server.
|
|
132
125
|
|
|
133
126
|
Notes:
|
|
134
|
-
All lab processing pipelines expect the data to be stored on the server and all processing logic to be packaged
|
|
135
|
-
and installed into dedicated conda environments on the server.
|
|
136
|
-
|
|
137
127
|
This class assumes that the target server has SLURM job manager installed and accessible to the user whose
|
|
138
128
|
credentials are used to connect to the server as part of this class instantiation.
|
|
139
129
|
|
|
@@ -196,54 +186,6 @@ class Server:
|
|
|
196
186
|
"""If the instance is connected to the server, terminates the connection before the instance is destroyed."""
|
|
197
187
|
self.close()
|
|
198
188
|
|
|
199
|
-
def create_job(
|
|
200
|
-
self,
|
|
201
|
-
job_name: str,
|
|
202
|
-
conda_environment: str,
|
|
203
|
-
cpus_to_use: int = 10,
|
|
204
|
-
ram_gb: int = 10,
|
|
205
|
-
time_limit: int = 60,
|
|
206
|
-
) -> Job:
|
|
207
|
-
"""Creates and returns a new Job instance.
|
|
208
|
-
|
|
209
|
-
Use this method to generate Job objects for all headless jobs that need to be run on the remote server. The
|
|
210
|
-
generated Job is a precursor that requires further configuration by the user before it can be submitted to the
|
|
211
|
-
server for execution.
|
|
212
|
-
|
|
213
|
-
Args:
|
|
214
|
-
job_name: The descriptive name of the SLURM job to be created. Primarily, this name is used in terminal
|
|
215
|
-
printouts to identify the job to human operators.
|
|
216
|
-
conda_environment: The name of the conda environment to activate on the server before running the job logic.
|
|
217
|
-
The environment should contain the necessary Python packages and CLIs to support running the job's
|
|
218
|
-
logic.
|
|
219
|
-
cpus_to_use: The number of CPUs to use for the job.
|
|
220
|
-
ram_gb: The amount of RAM to allocate for the job, in Gigabytes.
|
|
221
|
-
time_limit: The maximum time limit for the job, in minutes. If the job is still running at the end of this
|
|
222
|
-
time period, it will be forcibly terminated. It is highly advised to always set adequate maximum runtime
|
|
223
|
-
limits to prevent jobs from hogging the server in case of runtime or algorithm errors.
|
|
224
|
-
|
|
225
|
-
Returns:
|
|
226
|
-
The initialized Job instance pre-filled with SLURM configuration data and conda activation commands. Modify
|
|
227
|
-
the returned instance with any additional commands as necessary for the job to fulfill its intended
|
|
228
|
-
purpose. Note, the Job requires submission via submit_job() to be executed by the server.
|
|
229
|
-
"""
|
|
230
|
-
# Statically configures the working directory to be stored under:
|
|
231
|
-
# user working root / job_logs / job_name_timestamp
|
|
232
|
-
timestamp = get_timestamp()
|
|
233
|
-
working_directory = Path(self.user_working_root.joinpath("job_logs", f"{job_name}_{timestamp}"))
|
|
234
|
-
self.create_directory(remote_path=working_directory, parents=True)
|
|
235
|
-
|
|
236
|
-
return Job(
|
|
237
|
-
job_name=job_name,
|
|
238
|
-
output_log=working_directory.joinpath("stdout.txt"),
|
|
239
|
-
error_log=working_directory.joinpath("stderr.txt"),
|
|
240
|
-
working_directory=working_directory,
|
|
241
|
-
conda_environment=conda_environment,
|
|
242
|
-
cpus_to_use=cpus_to_use,
|
|
243
|
-
ram_gb=ram_gb,
|
|
244
|
-
time_limit=time_limit,
|
|
245
|
-
)
|
|
246
|
-
|
|
247
189
|
def launch_jupyter_server(
|
|
248
190
|
self,
|
|
249
191
|
job_name: str,
|
|
@@ -255,31 +197,24 @@ class Server:
|
|
|
255
197
|
port: int = 0,
|
|
256
198
|
jupyter_args: str = "",
|
|
257
199
|
) -> JupyterJob:
|
|
258
|
-
"""Launches a Jupyter notebook server on the target remote
|
|
200
|
+
"""Launches a remote Jupyter notebook session (server) on the target remote compute server.
|
|
259
201
|
|
|
260
|
-
|
|
261
|
-
create_job(), this method automatically submits the job for execution as part of its runtime. Therefore, the
|
|
262
|
-
returned JupyterJob instance should only be used to query information about how to connect to the remote
|
|
263
|
-
Jupyter server.
|
|
202
|
+
This method allows running interactive Jupyter sessions on the remote server under SLURM control.
|
|
264
203
|
|
|
265
204
|
Args:
|
|
266
|
-
job_name: The descriptive name of the Jupyter SLURM job to be created.
|
|
267
|
-
terminal printouts to identify the job to human operators.
|
|
205
|
+
job_name: The descriptive name of the Jupyter SLURM job to be created.
|
|
268
206
|
conda_environment: The name of the conda environment to activate on the server before running the job logic.
|
|
269
207
|
The environment should contain the necessary Python packages and CLIs to support running the job's
|
|
270
208
|
logic. For Jupyter jobs, this necessarily includes the Jupyter notebook and jupyterlab packages.
|
|
271
209
|
port: The connection port number for the Jupyter server. If set to 0 (default), a random port number between
|
|
272
|
-
8888 and 9999
|
|
210
|
+
8888 and 9999 is assigned to this connection to reduce the possibility of colliding with other
|
|
273
211
|
user sessions.
|
|
274
|
-
notebook_directory: The directory to
|
|
275
|
-
access to items stored
|
|
276
|
-
root
|
|
277
|
-
cpus_to_use: The number of CPUs to allocate to the Jupyter server.
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
avoid interfering with headless data processing jobs.
|
|
281
|
-
time_limit: The maximum Jupyter server uptime, in minutes. Set this to the expected duration of your jupyter
|
|
282
|
-
session.
|
|
212
|
+
notebook_directory: The root directory where to run the Jupyter notebook. During runtime, the notebook will
|
|
213
|
+
only have access to items stored under this directory. For most runtimes, this should be set to the
|
|
214
|
+
user's root working directory.
|
|
215
|
+
cpus_to_use: The number of CPUs to allocate to the Jupyter server.
|
|
216
|
+
ram_gb: The amount of RAM, in GB, to allocate to the Jupyter server.
|
|
217
|
+
time_limit: The maximum Jupyter server uptime, in minutes.
|
|
283
218
|
jupyter_args: Stores additional arguments to pass to jupyter notebook initialization command.
|
|
284
219
|
|
|
285
220
|
Returns:
|
|
@@ -320,28 +255,37 @@ class Server:
|
|
|
320
255
|
return self.submit_job(job) # type: ignore[return-value]
|
|
321
256
|
|
|
322
257
|
def submit_job(self, job: Job | JupyterJob, verbose: bool = True) -> Job | JupyterJob:
|
|
323
|
-
"""Submits the input job to the managed
|
|
258
|
+
"""Submits the input job to the managed remote compute server via the SLURM job manager.
|
|
324
259
|
|
|
325
|
-
This method
|
|
326
|
-
|
|
327
|
-
the server, and instructs the server to execute the shell script (via SLURM).
|
|
260
|
+
This method functions as the entry point for all headless jobs that are executed on the remote compute
|
|
261
|
+
server.
|
|
328
262
|
|
|
329
263
|
Args:
|
|
330
|
-
job: The Job
|
|
331
|
-
verbose: Determines whether to notify the user about non-error states of the job submission
|
|
332
|
-
this is disabled when batch-submitting jobs (for example, as part of running a processing pipeline) and
|
|
333
|
-
enabled when submitting single jobs.
|
|
264
|
+
job: The initialized Job instance that contains remote job's data.
|
|
265
|
+
verbose: Determines whether to notify the user about non-error states of the job submission process.
|
|
334
266
|
|
|
335
267
|
Returns:
|
|
336
|
-
The job object whose 'job_id' attribute had been modified
|
|
337
|
-
submitted.
|
|
268
|
+
The job object whose 'job_id' attribute had been modified to include the SLURM-assigned job ID if the job
|
|
269
|
+
was successfully submitted.
|
|
338
270
|
|
|
339
271
|
Raises:
|
|
340
|
-
RuntimeError: If job
|
|
272
|
+
RuntimeError: If the job cannot be submitted to the server for any reason.
|
|
341
273
|
"""
|
|
342
274
|
if verbose:
|
|
343
275
|
console.echo(message=f"Submitting '{job.job_name}' job to the remote server {self.host}...")
|
|
344
276
|
|
|
277
|
+
# If the Job object already has a job ID, this indicates that the job has already been submitted to the server.
|
|
278
|
+
# In this case returns it to the caller with no further modifications.
|
|
279
|
+
if job.job_id is not None:
|
|
280
|
+
console.echo(
|
|
281
|
+
message=(
|
|
282
|
+
f"The '{job.job_name}' job has already been submitted to the server. No further actions have "
|
|
283
|
+
f"been taken as part of this submission cycle."
|
|
284
|
+
),
|
|
285
|
+
level=LogLevel.WARNING,
|
|
286
|
+
)
|
|
287
|
+
return job
|
|
288
|
+
|
|
345
289
|
# Generates a temporary shell script on the local machine. Uses tempfile to automatically remove the
|
|
346
290
|
# local script as soon as it is uploaded to the server.
|
|
347
291
|
with tempfile.TemporaryDirectory() as temp_dir:
|
|
@@ -439,7 +383,7 @@ class Server:
|
|
|
439
383
|
"""Returns True if the job managed by the input Job instance has been completed or terminated its runtime due
|
|
440
384
|
to an error.
|
|
441
385
|
|
|
442
|
-
If the job is still running or
|
|
386
|
+
If the job is still running or queued for runtime, the method returns False.
|
|
443
387
|
|
|
444
388
|
Args:
|
|
445
389
|
job: The Job object whose status needs to be checked.
|
|
@@ -467,9 +411,9 @@ class Server:
|
|
|
467
411
|
def abort_job(self, job: Job | JupyterJob) -> None:
|
|
468
412
|
"""Aborts the target job if it is currently running on the server.
|
|
469
413
|
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
414
|
+
If the job is currently running, this method forcibly terminates its runtime. If the job is queued for
|
|
415
|
+
execution, this method removes it from the SLURM queue. If the job is already terminated, this method will do
|
|
416
|
+
nothing.
|
|
473
417
|
|
|
474
418
|
Args:
|
|
475
419
|
job: The Job object that needs to be aborted.
|
|
@@ -597,7 +541,7 @@ class Server:
|
|
|
597
541
|
sftp.close()
|
|
598
542
|
|
|
599
543
|
def _recursive_remove(self, sftp: paramiko.SFTPClient, remote_path: Path) -> None:
|
|
600
|
-
"""Recursively removes
|
|
544
|
+
"""Recursively removes the specified remote directory and all its contents.
|
|
601
545
|
|
|
602
546
|
This worker method is used by the user-facing remove() method to recursively remove non-empty directories.
|
|
603
547
|
|
|
@@ -627,10 +571,7 @@ class Server:
|
|
|
627
571
|
console.echo(f"Unable to remove the specified directory {remote_path}: {e!s}", level=LogLevel.WARNING)
|
|
628
572
|
|
|
629
573
|
def create_directory(self, remote_path: Path, parents: bool = True) -> None:
|
|
630
|
-
"""Creates the specified directory tree on the managed remote server
|
|
631
|
-
|
|
632
|
-
This method creates directories on the remote server, with options to create parent directories and handle
|
|
633
|
-
existing directories gracefully.
|
|
574
|
+
"""Creates the specified directory tree on the managed remote server.
|
|
634
575
|
|
|
635
576
|
Args:
|
|
636
577
|
remote_path: The absolute path to the directory to create on the remote server, relative to the server
|
|
@@ -5,6 +5,8 @@ from .transfer_tools import delete_directory, transfer_directory
|
|
|
5
5
|
from .packaging_tools import calculate_directory_checksum
|
|
6
6
|
from .project_management_tools import (
|
|
7
7
|
ProjectManifest,
|
|
8
|
+
acquire_lock,
|
|
9
|
+
release_lock,
|
|
8
10
|
archive_session,
|
|
9
11
|
prepare_session,
|
|
10
12
|
resolve_checksum,
|
|
@@ -13,11 +15,13 @@ from .project_management_tools import (
|
|
|
13
15
|
|
|
14
16
|
__all__ = [
|
|
15
17
|
"ProjectManifest",
|
|
18
|
+
"acquire_lock",
|
|
16
19
|
"archive_session",
|
|
17
20
|
"calculate_directory_checksum",
|
|
18
21
|
"delete_directory",
|
|
19
22
|
"generate_project_manifest",
|
|
20
23
|
"prepare_session",
|
|
24
|
+
"release_lock",
|
|
21
25
|
"resolve_checksum",
|
|
22
26
|
"transfer_directory",
|
|
23
27
|
]
|
|
@@ -13,7 +13,7 @@ import xxhash
|
|
|
13
13
|
|
|
14
14
|
# Defines a 'blacklist' set of files. Primarily, this list contains the service files that may change after the session
|
|
15
15
|
# data has been acquired. Therefore, it does not make sense to include them in the checksum, as they do not reflect the
|
|
16
|
-
# data that should remain permanently unchanged.
|
|
16
|
+
# data that should remain permanently unchanged.
|
|
17
17
|
_excluded_files = {
|
|
18
18
|
"ax_checksum.txt",
|
|
19
19
|
"ubiquitin.bin",
|
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
"""This module provides tools for managing the data of any Sun lab project. Tools from this module
|
|
2
|
-
|
|
3
|
-
processing pipelines."""
|
|
1
|
+
"""This module provides tools for managing the data of any Sun lab project. Tools from this module are primarily used
|
|
2
|
+
to support data processing pipelines that make up the Sun lab data workflow and run on the remote compute server."""
|
|
4
3
|
|
|
5
4
|
from pathlib import Path
|
|
6
5
|
from datetime import datetime
|
|
@@ -24,6 +23,62 @@ from .transfer_tools import delete_directory, transfer_directory
|
|
|
24
23
|
from .packaging_tools import calculate_directory_checksum
|
|
25
24
|
|
|
26
25
|
|
|
26
|
+
def acquire_lock(
|
|
27
|
+
session_path: Path, manager_id: int, processed_data_root: Path | None = None, reset_lock: bool = False
|
|
28
|
+
) -> None:
|
|
29
|
+
"""Acquires the target session's data lock for the specified manager process.
|
|
30
|
+
|
|
31
|
+
Calling this function locks the target session's data to make it accessible only for the specified manager process.
|
|
32
|
+
|
|
33
|
+
Notes:
|
|
34
|
+
Each time this function is called, the release_lock() function must also be called to release the lock file.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
session_path: The path to the session directory to be locked.
|
|
38
|
+
manager_id: The unique identifier of the manager process that acquires the lock.
|
|
39
|
+
reset_lock: Determines whether to reset the lock file before executing the runtime. This allows recovering
|
|
40
|
+
from deadlocked runtimes, but otherwise should not be used to ensure that the lock performs its intended
|
|
41
|
+
function of limiting access to session's data.
|
|
42
|
+
processed_data_root: The path to the root directory used to store the processed data from all Sun lab projects,
|
|
43
|
+
if different from the 'session_path' root.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
# Resolves the session directory hierarchy
|
|
47
|
+
session = SessionData.load(session_path=session_path, processed_data_root=processed_data_root)
|
|
48
|
+
|
|
49
|
+
# Instantiates the lock instance for the session
|
|
50
|
+
lock = SessionLock(file_path=session.tracking_data.session_lock_path)
|
|
51
|
+
|
|
52
|
+
# If requested, forcibly resets the lock state before re-acquiring the lock for the specified manager
|
|
53
|
+
if reset_lock:
|
|
54
|
+
lock.force_release()
|
|
55
|
+
|
|
56
|
+
# Acquires the lock for the specified manager.
|
|
57
|
+
lock.acquire(manager_id=manager_id)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def release_lock(session_path: Path, manager_id: int, processed_data_root: Path | None = None) -> None:
|
|
61
|
+
"""Releases the target session's data lock if it is owned by the specified manager process.
|
|
62
|
+
|
|
63
|
+
Calling this function unlocks the session's data, making it possible for other manager processes to acquire the
|
|
64
|
+
lock and work with the session's data. This step has to be performed by every manager process as part of its
|
|
65
|
+
shutdown sequence if the manager called the acquire_lock() function.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
session_path: The path to the session directory to be unlocked.
|
|
69
|
+
manager_id: The unique identifier of the manager process that releases the lock.
|
|
70
|
+
processed_data_root: The path to the root directory used to store the processed data from all Sun lab projects,
|
|
71
|
+
if different from the 'session_path' root.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
# Resolves the session directory hierarchy
|
|
75
|
+
session = SessionData.load(session_path=session_path, processed_data_root=processed_data_root)
|
|
76
|
+
|
|
77
|
+
# Releases the lock for the target session
|
|
78
|
+
lock = SessionLock(file_path=session.tracking_data.session_lock_path)
|
|
79
|
+
lock.release(manager_id=manager_id)
|
|
80
|
+
|
|
81
|
+
|
|
27
82
|
def resolve_checksum(
|
|
28
83
|
session_path: Path,
|
|
29
84
|
manager_id: int,
|
|
@@ -54,7 +109,7 @@ def resolve_checksum(
|
|
|
54
109
|
reset_tracker: Determines whether to reset the tracker file before executing the runtime. This allows
|
|
55
110
|
recovering from deadlocked runtimes, but otherwise should not be used to ensure runtime safety.
|
|
56
111
|
regenerate_checksum: Determines whether to update the checksum stored in the ax_checksum.txt file before
|
|
57
|
-
carrying out the verification. In this case, the verification necessarily succeeds and the session's
|
|
112
|
+
carrying out the verification. In this case, the verification necessarily succeeds, and the session's
|
|
58
113
|
reference checksum is changed to reflect the current state of the session data.
|
|
59
114
|
"""
|
|
60
115
|
|
|
@@ -64,9 +119,9 @@ def resolve_checksum(
|
|
|
64
119
|
processed_data_root=processed_data_root,
|
|
65
120
|
)
|
|
66
121
|
|
|
67
|
-
#
|
|
122
|
+
# Ensures that the manager process is holding the session lock
|
|
68
123
|
lock = SessionLock(file_path=session_data.tracking_data.session_lock_path)
|
|
69
|
-
lock.
|
|
124
|
+
lock.check_owner(manager_id=manager_id)
|
|
70
125
|
|
|
71
126
|
# Initializes the ProcessingTracker instance
|
|
72
127
|
tracker = ProcessingTracker(
|
|
@@ -155,9 +210,9 @@ def prepare_session(
|
|
|
155
210
|
processed_data_root=processed_data_root,
|
|
156
211
|
)
|
|
157
212
|
|
|
158
|
-
#
|
|
213
|
+
# Ensures that the manager process is holding the session lock
|
|
159
214
|
lock = SessionLock(file_path=session_data.tracking_data.session_lock_path)
|
|
160
|
-
lock.
|
|
215
|
+
lock.check_owner(manager_id=manager_id)
|
|
161
216
|
|
|
162
217
|
# Initializes the ProcessingTracker instances for preparation and archiving pipelines.
|
|
163
218
|
preparation_tracker = ProcessingTracker(
|
|
@@ -260,10 +315,10 @@ def archive_session(
|
|
|
260
315
|
Args:
|
|
261
316
|
session_path: The path to the session directory to be processed.
|
|
262
317
|
manager_id: The unique identifier of the manager process that manages the runtime.
|
|
263
|
-
processed_data_root: The path to the root directory used to store the processed data from all Sun lab projects,
|
|
264
|
-
if different from the 'session_path' root.
|
|
265
318
|
reset_tracker: Determines whether to reset the tracker file before executing the runtime. This allows
|
|
266
319
|
recovering from deadlocked runtimes, but otherwise should not be used to ensure runtime safety.
|
|
320
|
+
processed_data_root: The path to the root directory used to store the processed data from all Sun lab projects,
|
|
321
|
+
if different from the 'session_path' root.
|
|
267
322
|
|
|
268
323
|
Notes:
|
|
269
324
|
This function inverses the result of running the prepare_session() function.
|
|
@@ -274,9 +329,9 @@ def archive_session(
|
|
|
274
329
|
processed_data_root=processed_data_root,
|
|
275
330
|
)
|
|
276
331
|
|
|
277
|
-
#
|
|
332
|
+
# Ensures that the manager process is holding the session lock
|
|
278
333
|
lock = SessionLock(file_path=session_data.tracking_data.session_lock_path)
|
|
279
|
-
lock.
|
|
334
|
+
lock.check_owner(manager_id=manager_id)
|
|
280
335
|
|
|
281
336
|
# Initializes the ProcessingTracker instances for preparation and archiving pipelines.
|
|
282
337
|
preparation_tracker = ProcessingTracker(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sl-shared-assets
|
|
3
|
-
Version: 5.0
|
|
3
|
+
Version: 5.1.0
|
|
4
4
|
Summary: Provides data acquisition and processing assets shared between Sun (NeuroAI) lab libraries.
|
|
5
5
|
Project-URL: Homepage, https://github.com/Sun-Lab-NBB/sl-shared-assets
|
|
6
6
|
Project-URL: Documentation, https://sl-shared-assets-api-docs.netlify.app/
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
sl_shared_assets/__init__.py,sha256=b4dg24AJKMTNxp050maE2vlqNu57nGHrzvyQUEFTwMM,2753
|
|
2
|
+
sl_shared_assets/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
sl_shared_assets/command_line_interfaces/__init__.py,sha256=dbcCiuBCe1uqz6d1D-dFpSkRwYP603tEGkc1RbZcI6w,126
|
|
4
|
+
sl_shared_assets/command_line_interfaces/configure.py,sha256=lAWhLzJ7UmdvUZlges9Hg5pnblQuXSDlgzr12y41108,6140
|
|
5
|
+
sl_shared_assets/command_line_interfaces/manage.py,sha256=hFsj5r2UTiFwdr3N6FHOGYRnXBmdcJ4N_bpjsqcE5iA,10766
|
|
6
|
+
sl_shared_assets/data_classes/__init__.py,sha256=7Cslz_z0K2VmgXjOR1u-NBySoWYfUNawBHh_1xLiFZ0,2092
|
|
7
|
+
sl_shared_assets/data_classes/configuration_data.py,sha256=9gVo7bQShCMqAuau8d9BML6RrQyA4wg8ZFol1RJfJ7g,41334
|
|
8
|
+
sl_shared_assets/data_classes/runtime_data.py,sha256=z7C40ZeNOePqRZD4zE3KbmK2yvbox1N-6flvcEE2d_4,17400
|
|
9
|
+
sl_shared_assets/data_classes/session_data.py,sha256=gfzdFnBXwawWXQWiwZ3UXQeYajauKErkhnBph5mwXtA,48890
|
|
10
|
+
sl_shared_assets/data_classes/surgery_data.py,sha256=-rENeSjAPmoIc-fp_skI4-EW_uxbK0Ps9aTLPWLkTSQ,7624
|
|
11
|
+
sl_shared_assets/server/__init__.py,sha256=cIZFNE8pPPujp_FxIt6yT22L-4V4XBb1e_x4cnTrtSI,685
|
|
12
|
+
sl_shared_assets/server/job.py,sha256=_u27gXwSfg3ztCemAtiskC9KnDfqF-RvWlTYYk0wHvQ,18704
|
|
13
|
+
sl_shared_assets/server/pipeline.py,sha256=Uqg210L6N71OHTy4--8iocRz_bV5cBSUOVZnOk_u1zg,32157
|
|
14
|
+
sl_shared_assets/server/server.py,sha256=FEBx9isTswndAKFaZ6MM4qyFp75FcE2ZWUiwc_uycMI,32825
|
|
15
|
+
sl_shared_assets/tools/__init__.py,sha256=gMyKO3ZA70pmG7Hh8fIA5CLrkOXtN5pQk2o4NrKUjXM,813
|
|
16
|
+
sl_shared_assets/tools/packaging_tools.py,sha256=9kwUSQQRCOycpNA7ovthCTXYX1HCC7p1iDnCYz2K2B8,6978
|
|
17
|
+
sl_shared_assets/tools/project_management_tools.py,sha256=I1EqSapoQU2zspoMc_OxOrUMw_iYIqX6uIfmZZ3paGU,43205
|
|
18
|
+
sl_shared_assets/tools/transfer_tools.py,sha256=6lwk4zyHWWB0QeWQageiQ1pPIGUV0IhBNNzsbgQ_qH4,9026
|
|
19
|
+
sl_shared_assets-5.1.0.dist-info/METADATA,sha256=l_Ww8_R1Mw13mkxGrxzlWmPYPa8TRy6dSqtsz85yhEk,46884
|
|
20
|
+
sl_shared_assets-5.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
21
|
+
sl_shared_assets-5.1.0.dist-info/entry_points.txt,sha256=19NzFPG5CcW-4RhEacTcX3J2TbrvmjOE4tlLCH2O5wI,161
|
|
22
|
+
sl_shared_assets-5.1.0.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
23
|
+
sl_shared_assets-5.1.0.dist-info/RECORD,,
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
sl_shared_assets/__init__.py,sha256=97oYSDN15AVCv-nk1MzIs_7FfsQP8ENR1wkJYQc6vEY,2677
|
|
2
|
-
sl_shared_assets/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
sl_shared_assets/command_line_interfaces/__init__.py,sha256=dbcCiuBCe1uqz6d1D-dFpSkRwYP603tEGkc1RbZcI6w,126
|
|
4
|
-
sl_shared_assets/command_line_interfaces/configure.py,sha256=lAWhLzJ7UmdvUZlges9Hg5pnblQuXSDlgzr12y41108,6140
|
|
5
|
-
sl_shared_assets/command_line_interfaces/manage.py,sha256=aGLn_iOrAGTXLFvhrjex4qXo7Crutmp8e8dWQ8YWuy4,8738
|
|
6
|
-
sl_shared_assets/data_classes/__init__.py,sha256=SiOY1JjK_W7lc-EkeZ4O8SCepMPsC5OILUkPe6geU1Q,2119
|
|
7
|
-
sl_shared_assets/data_classes/configuration_data.py,sha256=X6p5V15Verpu6ycO_fJhGOCyNp-IacVLktERc5eYF4k,45394
|
|
8
|
-
sl_shared_assets/data_classes/runtime_data.py,sha256=ruo5KZ1VSfGwSg_gtSyWTbLBFcfG7Az455cKxiUpDoc,17565
|
|
9
|
-
sl_shared_assets/data_classes/session_data.py,sha256=q34M6xDl_ntnvIG_tdDlGrkSXbHR1MzMGjhNBwM2rkk,47038
|
|
10
|
-
sl_shared_assets/data_classes/surgery_data.py,sha256=riwyULMdZ8pDXFLdyNByNSeMk1-mVTo1FcCl5FrThas,7495
|
|
11
|
-
sl_shared_assets/server/__init__.py,sha256=6REoeUvHW39V1Uwal_Af4uZ8J6UYDmR2N-saZoau30E,869
|
|
12
|
-
sl_shared_assets/server/job.py,sha256=92nCVaLYqEJH7-xaS0bZmIVoGp-nJ5gh6dnwyRyZ9gI,19406
|
|
13
|
-
sl_shared_assets/server/pipeline.py,sha256=0Peuq499BxKA3GaebQyhl_9VLepuaJQuwwt2vuM5ULg,33503
|
|
14
|
-
sl_shared_assets/server/server.py,sha256=BUELEQbO-sx-9E2epGURCYSGN-I9WAD2dRETMWEPcRU,36483
|
|
15
|
-
sl_shared_assets/tools/__init__.py,sha256=Pf8_MqZOLiityR112Txkermxf1bBLHYdEdw-raeY9Fc,737
|
|
16
|
-
sl_shared_assets/tools/packaging_tools.py,sha256=hvOrdRwQCoe199G_XHhIpbKtIx-I0ad9O4SLZwhgj7U,7035
|
|
17
|
-
sl_shared_assets/tools/project_management_tools.py,sha256=AcZpkzcXyAv4aBn-LIXawQgd7le7-vtZ9dl5MvMFmkg,40365
|
|
18
|
-
sl_shared_assets/tools/transfer_tools.py,sha256=6lwk4zyHWWB0QeWQageiQ1pPIGUV0IhBNNzsbgQ_qH4,9026
|
|
19
|
-
sl_shared_assets-5.0.1.dist-info/METADATA,sha256=kH_5OOIkoJbGKbiBoAG8_Ghg5iSisLLsxAV3NjTPY4E,46884
|
|
20
|
-
sl_shared_assets-5.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
21
|
-
sl_shared_assets-5.0.1.dist-info/entry_points.txt,sha256=19NzFPG5CcW-4RhEacTcX3J2TbrvmjOE4tlLCH2O5wI,161
|
|
22
|
-
sl_shared_assets-5.0.1.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
23
|
-
sl_shared_assets-5.0.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|