executorlib 0.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- executorlib/__init__.py +248 -0
- executorlib/_version.py +716 -0
- executorlib/backend/__init__.py +0 -0
- executorlib/backend/cache_parallel.py +57 -0
- executorlib/backend/cache_serial.py +6 -0
- executorlib/backend/interactive_parallel.py +99 -0
- executorlib/backend/interactive_serial.py +74 -0
- executorlib/base/__init__.py +0 -0
- executorlib/base/executor.py +167 -0
- executorlib/cache/__init__.py +0 -0
- executorlib/cache/backend.py +75 -0
- executorlib/cache/executor.py +121 -0
- executorlib/cache/queue_spawner.py +109 -0
- executorlib/cache/shared.py +249 -0
- executorlib/cache/subprocess_spawner.py +65 -0
- executorlib/interactive/__init__.py +0 -0
- executorlib/interactive/executor.py +329 -0
- executorlib/interactive/flux.py +135 -0
- executorlib/interactive/shared.py +657 -0
- executorlib/interactive/slurm.py +109 -0
- executorlib/standalone/__init__.py +21 -0
- executorlib/standalone/command.py +14 -0
- executorlib/standalone/hdf.py +116 -0
- executorlib/standalone/inputcheck.py +201 -0
- executorlib/standalone/interactive/__init__.py +0 -0
- executorlib/standalone/interactive/backend.py +98 -0
- executorlib/standalone/interactive/communication.py +213 -0
- executorlib/standalone/interactive/spawner.py +174 -0
- executorlib/standalone/plot.py +134 -0
- executorlib/standalone/queue.py +19 -0
- executorlib/standalone/serialize.py +82 -0
- executorlib/standalone/thread.py +42 -0
- executorlib-0.0.8.dist-info/LICENSE +29 -0
- executorlib-0.0.8.dist-info/METADATA +230 -0
- executorlib-0.0.8.dist-info/RECORD +37 -0
- executorlib-0.0.8.dist-info/WHEEL +5 -0
- executorlib-0.0.8.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
from executorlib.standalone.interactive.spawner import SubprocessSpawner
|
|
5
|
+
|
|
6
|
+
SLURM_COMMAND = "srun"
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def validate_max_workers(max_workers: int, cores: int, threads_per_core: int):
|
|
10
|
+
cores_total = int(os.environ["SLURM_NTASKS"]) * int(
|
|
11
|
+
os.environ["SLURM_CPUS_PER_TASK"]
|
|
12
|
+
)
|
|
13
|
+
cores_requested = max_workers * cores * threads_per_core
|
|
14
|
+
if cores_total < cores_requested:
|
|
15
|
+
raise ValueError(
|
|
16
|
+
"The number of requested cores is larger than the available cores "
|
|
17
|
+
+ str(cores_total)
|
|
18
|
+
+ " < "
|
|
19
|
+
+ str(cores_requested)
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class SrunSpawner(SubprocessSpawner):
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
cwd: Optional[str] = None,
|
|
27
|
+
cores: int = 1,
|
|
28
|
+
threads_per_core: int = 1,
|
|
29
|
+
gpus_per_core: int = 0,
|
|
30
|
+
openmpi_oversubscribe: bool = False,
|
|
31
|
+
slurm_cmd_args: list[str] = [],
|
|
32
|
+
):
|
|
33
|
+
"""
|
|
34
|
+
Srun interface implementation.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
cwd (str, optional): The current working directory. Defaults to None.
|
|
38
|
+
cores (int, optional): The number of cores to use. Defaults to 1.
|
|
39
|
+
threads_per_core (int, optional): The number of threads per core. Defaults to 1.
|
|
40
|
+
gpus_per_core (int, optional): The number of GPUs per core. Defaults to 0.
|
|
41
|
+
openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
|
|
42
|
+
slurm_cmd_args (list[str], optional): Additional command line arguments. Defaults to [].
|
|
43
|
+
"""
|
|
44
|
+
super().__init__(
|
|
45
|
+
cwd=cwd,
|
|
46
|
+
cores=cores,
|
|
47
|
+
openmpi_oversubscribe=openmpi_oversubscribe,
|
|
48
|
+
threads_per_core=threads_per_core,
|
|
49
|
+
)
|
|
50
|
+
self._gpus_per_core = gpus_per_core
|
|
51
|
+
self._slurm_cmd_args = slurm_cmd_args
|
|
52
|
+
|
|
53
|
+
def generate_command(self, command_lst: list[str]) -> list[str]:
|
|
54
|
+
"""
|
|
55
|
+
Generate the command list for the Srun interface.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
command_lst (list[str]): The command list.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
list[str]: The generated command list.
|
|
62
|
+
"""
|
|
63
|
+
command_prepend_lst = generate_slurm_command(
|
|
64
|
+
cores=self._cores,
|
|
65
|
+
cwd=self._cwd,
|
|
66
|
+
threads_per_core=self._threads_per_core,
|
|
67
|
+
gpus_per_core=self._gpus_per_core,
|
|
68
|
+
openmpi_oversubscribe=self._openmpi_oversubscribe,
|
|
69
|
+
slurm_cmd_args=self._slurm_cmd_args,
|
|
70
|
+
)
|
|
71
|
+
return super().generate_command(
|
|
72
|
+
command_lst=command_prepend_lst + command_lst,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def generate_slurm_command(
|
|
77
|
+
cores: int,
|
|
78
|
+
cwd: Optional[str],
|
|
79
|
+
threads_per_core: int = 1,
|
|
80
|
+
gpus_per_core: int = 0,
|
|
81
|
+
openmpi_oversubscribe: bool = False,
|
|
82
|
+
slurm_cmd_args: list[str] = [],
|
|
83
|
+
) -> list[str]:
|
|
84
|
+
"""
|
|
85
|
+
Generate the command list for the SLURM interface.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
cores (int): The number of cores.
|
|
89
|
+
cwd (str): The current working directory.
|
|
90
|
+
threads_per_core (int, optional): The number of threads per core. Defaults to 1.
|
|
91
|
+
gpus_per_core (int, optional): The number of GPUs per core. Defaults to 0.
|
|
92
|
+
openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
|
|
93
|
+
slurm_cmd_args (list[str], optional): Additional command line arguments. Defaults to [].
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
list[str]: The generated command list.
|
|
97
|
+
"""
|
|
98
|
+
command_prepend_lst = [SLURM_COMMAND, "-n", str(cores)]
|
|
99
|
+
if cwd is not None:
|
|
100
|
+
command_prepend_lst += ["-D", cwd]
|
|
101
|
+
if threads_per_core > 1:
|
|
102
|
+
command_prepend_lst += ["--cpus-per-task" + str(threads_per_core)]
|
|
103
|
+
if gpus_per_core > 0:
|
|
104
|
+
command_prepend_lst += ["--gpus-per-task=" + str(gpus_per_core)]
|
|
105
|
+
if openmpi_oversubscribe:
|
|
106
|
+
command_prepend_lst += ["--oversubscribe"]
|
|
107
|
+
if len(slurm_cmd_args) > 0:
|
|
108
|
+
command_prepend_lst += slurm_cmd_args
|
|
109
|
+
return command_prepend_lst
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from executorlib.standalone.interactive.communication import (
|
|
2
|
+
SocketInterface,
|
|
3
|
+
interface_bootup,
|
|
4
|
+
interface_connect,
|
|
5
|
+
interface_receive,
|
|
6
|
+
interface_send,
|
|
7
|
+
interface_shutdown,
|
|
8
|
+
)
|
|
9
|
+
from executorlib.standalone.interactive.spawner import MpiExecSpawner
|
|
10
|
+
from executorlib.standalone.thread import RaisingThread
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"SocketInterface",
|
|
14
|
+
"interface_bootup",
|
|
15
|
+
"interface_connect",
|
|
16
|
+
"interface_send",
|
|
17
|
+
"interface_shutdown",
|
|
18
|
+
"interface_receive",
|
|
19
|
+
"RaisingThread",
|
|
20
|
+
"MpiExecSpawner",
|
|
21
|
+
]
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def get_command_path(executable: str) -> str:
|
|
5
|
+
"""
|
|
6
|
+
Get path of the backend executable script
|
|
7
|
+
|
|
8
|
+
Args:
|
|
9
|
+
executable (str): Name of the backend executable script, either mpiexec.py or serial.py
|
|
10
|
+
|
|
11
|
+
Returns:
|
|
12
|
+
str: absolute path to the executable script
|
|
13
|
+
"""
|
|
14
|
+
return os.path.abspath(os.path.join(__file__, "..", "..", "backend", executable))
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Any, List, Optional, Tuple
|
|
3
|
+
|
|
4
|
+
import cloudpickle
|
|
5
|
+
import h5py
|
|
6
|
+
import numpy as np
|
|
7
|
+
|
|
8
|
+
group_dict = {
|
|
9
|
+
"fn": "function",
|
|
10
|
+
"args": "input_args",
|
|
11
|
+
"kwargs": "input_kwargs",
|
|
12
|
+
"output": "output",
|
|
13
|
+
"runtime": "runtime",
|
|
14
|
+
"queue_id": "queue_id",
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def dump(file_name: Optional[str], data_dict: dict) -> None:
|
|
19
|
+
"""
|
|
20
|
+
Dump data dictionary into HDF5 file
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
file_name (str): file name of the HDF5 file as absolute path
|
|
24
|
+
data_dict (dict): dictionary containing the python function to be executed {"fn": ..., "args": (), "kwargs": {}}
|
|
25
|
+
"""
|
|
26
|
+
if file_name is not None:
|
|
27
|
+
with h5py.File(file_name, "a") as fname:
|
|
28
|
+
for data_key, data_value in data_dict.items():
|
|
29
|
+
if data_key in group_dict.keys():
|
|
30
|
+
fname.create_dataset(
|
|
31
|
+
name="/" + group_dict[data_key],
|
|
32
|
+
data=np.void(cloudpickle.dumps(data_value)),
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def load(file_name: str) -> dict:
|
|
37
|
+
"""
|
|
38
|
+
Load data dictionary from HDF5 file
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
file_name (str): file name of the HDF5 file as absolute path
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
dict: dictionary containing the python function to be executed {"fn": ..., "args": (), "kwargs": {}}
|
|
45
|
+
"""
|
|
46
|
+
with h5py.File(file_name, "r") as hdf:
|
|
47
|
+
data_dict = {}
|
|
48
|
+
if "function" in hdf:
|
|
49
|
+
data_dict["fn"] = cloudpickle.loads(np.void(hdf["/function"]))
|
|
50
|
+
else:
|
|
51
|
+
raise TypeError("Function not found in HDF5 file.")
|
|
52
|
+
if "input_args" in hdf:
|
|
53
|
+
data_dict["args"] = cloudpickle.loads(np.void(hdf["/input_args"]))
|
|
54
|
+
else:
|
|
55
|
+
data_dict["args"] = ()
|
|
56
|
+
if "input_kwargs" in hdf:
|
|
57
|
+
data_dict["kwargs"] = cloudpickle.loads(np.void(hdf["/input_kwargs"]))
|
|
58
|
+
else:
|
|
59
|
+
data_dict["kwargs"] = {}
|
|
60
|
+
return data_dict
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_output(file_name: str) -> Tuple[bool, Any]:
|
|
64
|
+
"""
|
|
65
|
+
Check if output is available in the HDF5 file
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
file_name (str): file name of the HDF5 file as absolute path
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
Tuple[bool, object]: boolean flag indicating if output is available and the output object itself
|
|
72
|
+
"""
|
|
73
|
+
with h5py.File(file_name, "r") as hdf:
|
|
74
|
+
if "output" in hdf:
|
|
75
|
+
return True, cloudpickle.loads(np.void(hdf["/output"]))
|
|
76
|
+
else:
|
|
77
|
+
return False, None
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def get_runtime(file_name: str) -> float:
|
|
81
|
+
"""
|
|
82
|
+
Get run time from HDF5 file
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
file_name (str): file name of the HDF5 file as absolute path
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
float: run time from the execution of the python function
|
|
89
|
+
"""
|
|
90
|
+
with h5py.File(file_name, "r") as hdf:
|
|
91
|
+
if "runtime" in hdf:
|
|
92
|
+
return cloudpickle.loads(np.void(hdf["/runtime"]))
|
|
93
|
+
else:
|
|
94
|
+
return 0.0
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def get_queue_id(file_name: Optional[str]) -> Optional[int]:
|
|
98
|
+
if file_name is not None:
|
|
99
|
+
with h5py.File(file_name, "r") as hdf:
|
|
100
|
+
if "queue_id" in hdf:
|
|
101
|
+
return cloudpickle.loads(np.void(hdf["/queue_id"]))
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def get_cache_data(cache_directory: str) -> List[dict]:
|
|
106
|
+
file_lst = []
|
|
107
|
+
for file_name in os.listdir(cache_directory):
|
|
108
|
+
with h5py.File(os.path.join(cache_directory, file_name), "r") as hdf:
|
|
109
|
+
file_content_dict = {
|
|
110
|
+
key: cloudpickle.loads(np.void(hdf["/" + key]))
|
|
111
|
+
for key in group_dict.values()
|
|
112
|
+
if key in hdf
|
|
113
|
+
}
|
|
114
|
+
file_content_dict["filename"] = file_name
|
|
115
|
+
file_lst.append(file_content_dict)
|
|
116
|
+
return file_lst
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
import multiprocessing
|
|
3
|
+
import os.path
|
|
4
|
+
from concurrent.futures import Executor
|
|
5
|
+
from typing import Callable, List, Optional
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def check_oversubscribe(oversubscribe: bool) -> None:
|
|
9
|
+
"""
|
|
10
|
+
Check if oversubscribe is True and raise a ValueError if it is.
|
|
11
|
+
"""
|
|
12
|
+
if oversubscribe:
|
|
13
|
+
raise ValueError(
|
|
14
|
+
"Oversubscribing is not supported for the executorlib.flux.PyFLuxExecutor backend."
|
|
15
|
+
"Please use oversubscribe=False instead of oversubscribe=True."
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def check_command_line_argument_lst(command_line_argument_lst: List[str]) -> None:
|
|
20
|
+
"""
|
|
21
|
+
Check if command_line_argument_lst is not empty and raise a ValueError if it is.
|
|
22
|
+
"""
|
|
23
|
+
if len(command_line_argument_lst) > 0:
|
|
24
|
+
raise ValueError(
|
|
25
|
+
"The command_line_argument_lst parameter is not supported for the SLURM backend."
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def check_gpus_per_worker(gpus_per_worker: int) -> None:
|
|
30
|
+
"""
|
|
31
|
+
Check if gpus_per_worker is not 0 and raise a TypeError if it is.
|
|
32
|
+
"""
|
|
33
|
+
if gpus_per_worker != 0:
|
|
34
|
+
raise TypeError(
|
|
35
|
+
"GPU assignment is not supported for the executorlib.mpi.PyMPIExecutor backend."
|
|
36
|
+
"Please use gpus_per_worker=0 instead of gpus_per_worker="
|
|
37
|
+
+ str(gpus_per_worker)
|
|
38
|
+
+ "."
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def check_executor(executor: Executor) -> None:
|
|
43
|
+
"""
|
|
44
|
+
Check if executor is not None and raise a ValueError if it is.
|
|
45
|
+
"""
|
|
46
|
+
if executor is not None:
|
|
47
|
+
raise ValueError(
|
|
48
|
+
"The executor parameter is only supported for the flux framework backend."
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def check_nested_flux_executor(nested_flux_executor: bool) -> None:
|
|
53
|
+
"""
|
|
54
|
+
Check if nested_flux_executor is True and raise a ValueError if it is.
|
|
55
|
+
"""
|
|
56
|
+
if nested_flux_executor:
|
|
57
|
+
raise ValueError(
|
|
58
|
+
"The nested_flux_executor parameter is only supported for the flux framework backend."
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def check_resource_dict(function: Callable) -> None:
|
|
63
|
+
"""
|
|
64
|
+
Check if the function has a parameter named 'resource_dict' and raise a ValueError if it does.
|
|
65
|
+
"""
|
|
66
|
+
if "resource_dict" in inspect.signature(function).parameters.keys():
|
|
67
|
+
raise ValueError(
|
|
68
|
+
"The parameter resource_dict is used internally in executorlib, "
|
|
69
|
+
"so it cannot be used as a parameter in the submitted functions."
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def check_resource_dict_is_empty(resource_dict: dict) -> None:
|
|
74
|
+
"""
|
|
75
|
+
Check if resource_dict is not empty and raise a ValueError if it is.
|
|
76
|
+
"""
|
|
77
|
+
if len(resource_dict) > 0:
|
|
78
|
+
raise ValueError(
|
|
79
|
+
"When block_allocation is enabled, the resource requirements have to be defined on the executor level."
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def check_refresh_rate(refresh_rate: float) -> None:
|
|
84
|
+
"""
|
|
85
|
+
Check if refresh_rate is not 0.01 and raise a ValueError if it is.
|
|
86
|
+
"""
|
|
87
|
+
if refresh_rate != 0.01:
|
|
88
|
+
raise ValueError(
|
|
89
|
+
"The sleep_interval parameter is only used when disable_dependencies=False."
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def check_plot_dependency_graph(plot_dependency_graph: bool) -> None:
|
|
94
|
+
"""
|
|
95
|
+
Check if plot_dependency_graph is True and raise a ValueError if it is.
|
|
96
|
+
"""
|
|
97
|
+
if plot_dependency_graph:
|
|
98
|
+
raise ValueError(
|
|
99
|
+
"The plot_dependency_graph parameter is only used when disable_dependencies=False."
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def check_pmi(backend: Optional[str], pmi: Optional[str]) -> None:
|
|
104
|
+
"""
|
|
105
|
+
Check if pmi is valid for the selected backend and raise a ValueError if it is not.
|
|
106
|
+
"""
|
|
107
|
+
if backend is not None:
|
|
108
|
+
if backend != "flux_allocation" and pmi is not None:
|
|
109
|
+
raise ValueError(
|
|
110
|
+
"The pmi parameter is currently only implemented for flux."
|
|
111
|
+
)
|
|
112
|
+
elif backend == "flux_allocation" and pmi not in ["pmix", "pmi1", "pmi2", None]:
|
|
113
|
+
raise ValueError(
|
|
114
|
+
"The pmi parameter supports [pmix, pmi1, pmi2], but not: " + str(pmi)
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def check_init_function(
|
|
119
|
+
block_allocation: bool, init_function: Optional[Callable]
|
|
120
|
+
) -> None:
|
|
121
|
+
"""
|
|
122
|
+
Check if block_allocation is False and init_function is not None, and raise a ValueError if it is.
|
|
123
|
+
"""
|
|
124
|
+
if not block_allocation and init_function is not None:
|
|
125
|
+
raise ValueError("")
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def check_max_workers_and_cores(
|
|
129
|
+
max_workers: Optional[int], max_cores: Optional[int]
|
|
130
|
+
) -> None:
|
|
131
|
+
if max_workers is not None:
|
|
132
|
+
raise ValueError(
|
|
133
|
+
"The number of workers cannot be controlled with the pysqa based backend."
|
|
134
|
+
)
|
|
135
|
+
if max_cores is not None:
|
|
136
|
+
raise ValueError(
|
|
137
|
+
"The number of cores cannot be controlled with the pysqa based backend."
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def check_hostname_localhost(hostname_localhost: Optional[bool]) -> None:
|
|
142
|
+
if hostname_localhost is not None:
|
|
143
|
+
raise ValueError(
|
|
144
|
+
"The option to connect to hosts based on their hostname is not available with the pysqa based backend."
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def check_flux_executor_pmi_mode(flux_executor_pmi_mode: Optional[str]) -> None:
|
|
149
|
+
if flux_executor_pmi_mode is not None:
|
|
150
|
+
raise ValueError(
|
|
151
|
+
"The option to specify the flux pmi mode is not available with the pysqa based backend."
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def check_flux_log_files(flux_log_files: Optional[bool]) -> None:
|
|
156
|
+
"""
|
|
157
|
+
Check if flux_log_files is True and raise a ValueError if it is.
|
|
158
|
+
"""
|
|
159
|
+
if flux_log_files:
|
|
160
|
+
raise ValueError(
|
|
161
|
+
"The flux_log_files parameter is only supported for the flux framework backend."
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def check_pysqa_config_directory(pysqa_config_directory: Optional[str]) -> None:
|
|
166
|
+
"""
|
|
167
|
+
Check if pysqa_config_directory is None and raise a ValueError if it is not.
|
|
168
|
+
"""
|
|
169
|
+
if pysqa_config_directory is not None:
|
|
170
|
+
raise ValueError(
|
|
171
|
+
"pysqa_config_directory parameter is only supported for pysqa backend."
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def validate_number_of_cores(
|
|
176
|
+
max_cores: Optional[int] = None,
|
|
177
|
+
max_workers: Optional[int] = None,
|
|
178
|
+
cores_per_worker: Optional[int] = 1,
|
|
179
|
+
set_local_cores: bool = False,
|
|
180
|
+
) -> int:
|
|
181
|
+
"""
|
|
182
|
+
Validate the number of cores and return the appropriate value.
|
|
183
|
+
"""
|
|
184
|
+
if max_cores is not None and max_workers is None and cores_per_worker is not None:
|
|
185
|
+
return int(max_cores / cores_per_worker)
|
|
186
|
+
elif max_workers is not None:
|
|
187
|
+
return int(max_workers)
|
|
188
|
+
else:
|
|
189
|
+
if max_cores is None and max_workers is None and not set_local_cores:
|
|
190
|
+
raise ValueError(
|
|
191
|
+
"Block allocation requires a fixed set of computational resources. Neither max_cores nor max_workers are defined."
|
|
192
|
+
)
|
|
193
|
+
else:
|
|
194
|
+
return multiprocessing.cpu_count()
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def check_file_exists(file_name: Optional[str]):
|
|
198
|
+
if file_name is None:
|
|
199
|
+
raise ValueError("file_name is not set.")
|
|
200
|
+
if not os.path.exists(file_name):
|
|
201
|
+
raise ValueError("file_name is not written to the file system.")
|
|
File without changes
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
from typing import Any, Callable, Optional
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def call_funct(
|
|
6
|
+
input_dict: dict, funct: Optional[Callable] = None, memory: Optional[dict] = None
|
|
7
|
+
) -> Any:
|
|
8
|
+
"""
|
|
9
|
+
Call function from dictionary
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
input_dict (dict): dictionary containing the function 'fn', its arguments 'args' and keyword arguments 'kwargs'
|
|
13
|
+
funct (Callable, optional): function to be evaluated if it is not included in the input dictionary
|
|
14
|
+
memory (dict, optional): variables stored in memory which can be used as keyword arguments
|
|
15
|
+
|
|
16
|
+
Returns:
|
|
17
|
+
Any: Result of the function
|
|
18
|
+
"""
|
|
19
|
+
if funct is None:
|
|
20
|
+
|
|
21
|
+
def funct(*args, **kwargs):
|
|
22
|
+
return args[0].__call__(*args[1:], **kwargs)
|
|
23
|
+
|
|
24
|
+
funct_args = inspect.getfullargspec(input_dict["fn"]).args
|
|
25
|
+
if memory is not None:
|
|
26
|
+
input_dict["kwargs"].update(
|
|
27
|
+
_update_dict_delta(
|
|
28
|
+
dict_input=memory,
|
|
29
|
+
dict_output=input_dict["kwargs"],
|
|
30
|
+
keys_possible_lst=funct_args,
|
|
31
|
+
)
|
|
32
|
+
)
|
|
33
|
+
return funct(input_dict["fn"], *input_dict["args"], **input_dict["kwargs"])
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def parse_arguments(argument_lst: list[str]) -> dict:
|
|
37
|
+
"""
|
|
38
|
+
Simple function to parse command line arguments
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
argument_lst (list): list of arguments as strings
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
dict: dictionary with the parsed arguments and their corresponding values
|
|
45
|
+
"""
|
|
46
|
+
return update_default_dict_from_arguments(
|
|
47
|
+
argument_lst=argument_lst,
|
|
48
|
+
argument_dict={
|
|
49
|
+
"zmqport": "--zmqport",
|
|
50
|
+
"host": "--host",
|
|
51
|
+
},
|
|
52
|
+
default_dict={"host": "localhost"},
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def update_default_dict_from_arguments(
|
|
57
|
+
argument_lst: list[str], argument_dict: dict, default_dict: dict
|
|
58
|
+
) -> dict:
|
|
59
|
+
"""
|
|
60
|
+
Update default dictionary with values from command line arguments
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
argument_lst (list[str]): List of arguments as strings
|
|
64
|
+
argument_dict (dict): Dictionary mapping argument names to their corresponding command line flags
|
|
65
|
+
default_dict (dict): Default dictionary to be updated
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
dict: Updated default dictionary
|
|
69
|
+
"""
|
|
70
|
+
default_dict.update(
|
|
71
|
+
{
|
|
72
|
+
k: argument_lst[argument_lst.index(v) + 1]
|
|
73
|
+
for k, v in argument_dict.items()
|
|
74
|
+
if v in argument_lst
|
|
75
|
+
}
|
|
76
|
+
)
|
|
77
|
+
return default_dict
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _update_dict_delta(
|
|
81
|
+
dict_input: dict, dict_output: dict, keys_possible_lst: list[str]
|
|
82
|
+
) -> dict:
|
|
83
|
+
"""
|
|
84
|
+
Update dictionary with values from another dictionary, only if the keys are present in a given list
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
dict_input (dict): Input dictionary
|
|
88
|
+
dict_output (dict): Output dictionary to be updated
|
|
89
|
+
keys_possible_lst (list[str]): List of possible keys to be updated
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
dict: Updated dictionary
|
|
93
|
+
"""
|
|
94
|
+
return {
|
|
95
|
+
k: v
|
|
96
|
+
for k, v in dict_input.items()
|
|
97
|
+
if k in keys_possible_lst and k not in dict_output.keys()
|
|
98
|
+
}
|