executorlib 1.2.1__tar.gz → 1.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {executorlib-1.2.1 → executorlib-1.4.0}/PKG-INFO +3 -3
  2. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/__init__.py +2 -0
  3. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/_version.py +2 -2
  4. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/executor/base.py +11 -1
  5. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/executor/flux.py +24 -26
  6. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/executor/single.py +13 -14
  7. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/executor/slurm.py +24 -26
  8. {executorlib-1.2.1 → executorlib-1.4.0}/pyproject.toml +3 -3
  9. {executorlib-1.2.1 → executorlib-1.4.0}/.gitignore +0 -0
  10. {executorlib-1.2.1 → executorlib-1.4.0}/LICENSE +0 -0
  11. {executorlib-1.2.1 → executorlib-1.4.0}/README.md +0 -0
  12. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/api.py +0 -0
  13. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/backend/__init__.py +0 -0
  14. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/backend/cache_parallel.py +0 -0
  15. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/backend/cache_serial.py +0 -0
  16. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/backend/interactive_parallel.py +0 -0
  17. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/backend/interactive_serial.py +0 -0
  18. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/executor/__init__.py +0 -0
  19. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/__init__.py +0 -0
  20. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/cache.py +0 -0
  21. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/command.py +0 -0
  22. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/inputcheck.py +0 -0
  23. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/interactive/__init__.py +0 -0
  24. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/interactive/arguments.py +0 -0
  25. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/interactive/backend.py +0 -0
  26. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/interactive/communication.py +0 -0
  27. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/interactive/spawner.py +0 -0
  28. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/plot.py +0 -0
  29. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/queue.py +0 -0
  30. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/standalone/serialize.py +0 -0
  31. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/__init__.py +0 -0
  32. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/base.py +0 -0
  33. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/file/__init__.py +0 -0
  34. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/file/backend.py +0 -0
  35. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/file/hdf.py +0 -0
  36. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/file/queue_spawner.py +0 -0
  37. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/file/shared.py +0 -0
  38. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/file/subprocess_spawner.py +0 -0
  39. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/file/task_scheduler.py +0 -0
  40. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/interactive/__init__.py +0 -0
  41. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/interactive/blockallocation.py +0 -0
  42. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/interactive/dependency.py +0 -0
  43. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/interactive/fluxspawner.py +0 -0
  44. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/interactive/onetoone.py +0 -0
  45. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/interactive/shared.py +0 -0
  46. {executorlib-1.2.1 → executorlib-1.4.0}/executorlib/task_scheduler/interactive/slurmspawner.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: executorlib
3
- Version: 1.2.1
3
+ Version: 1.4.0
4
4
  Summary: Up-scale python functions for high performance computing (HPC) with executorlib.
5
5
  Project-URL: Homepage, https://github.com/pyiron/executorlib
6
6
  Project-URL: Documentation, https://executorlib.readthedocs.io
@@ -56,12 +56,12 @@ Requires-Dist: ipython<=9.0.2,>=7.33.0; extra == 'all'
56
56
  Requires-Dist: mpi4py<=4.0.1,>=3.1.4; extra == 'all'
57
57
  Requires-Dist: networkx<=3.4.2,>=2.8.8; extra == 'all'
58
58
  Requires-Dist: pygraphviz<=1.14,>=1.10; extra == 'all'
59
- Requires-Dist: pysqa==0.2.4; extra == 'all'
59
+ Requires-Dist: pysqa==0.2.5; extra == 'all'
60
60
  Provides-Extra: cache
61
61
  Requires-Dist: h5py<=3.13.0,>=3.6.0; extra == 'cache'
62
62
  Provides-Extra: cluster
63
63
  Requires-Dist: h5py<=3.13.0,>=3.6.0; extra == 'cluster'
64
- Requires-Dist: pysqa==0.2.4; extra == 'cluster'
64
+ Requires-Dist: pysqa==0.2.5; extra == 'cluster'
65
65
  Provides-Extra: graph
66
66
  Requires-Dist: networkx<=3.4.2,>=2.8.8; extra == 'graph'
67
67
  Requires-Dist: pygraphviz<=1.14,>=1.10; extra == 'graph'
@@ -1,3 +1,4 @@
1
+ from executorlib.executor.base import BaseExecutor
1
2
  from executorlib.executor.flux import (
2
3
  FluxClusterExecutor,
3
4
  FluxJobExecutor,
@@ -13,6 +14,7 @@ from . import _version
13
14
 
14
15
  __all__: list[str] = [
15
16
  "get_cache_data",
17
+ "BaseExecutor",
16
18
  "FluxJobExecutor",
17
19
  "FluxClusterExecutor",
18
20
  "SingleNodeExecutor",
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '1.2.1'
21
- __version_tuple__ = version_tuple = (1, 2, 1)
20
+ __version__ = version = '1.4.0'
21
+ __version_tuple__ = version_tuple = (1, 4, 0)
@@ -1,4 +1,5 @@
1
1
  import queue
2
+ from abc import ABC
2
3
  from concurrent.futures import (
3
4
  Executor as FutureExecutor,
4
5
  )
@@ -10,7 +11,7 @@ from typing import Callable, Optional
10
11
  from executorlib.task_scheduler.base import TaskSchedulerBase
11
12
 
12
13
 
13
- class ExecutorBase(FutureExecutor):
14
+ class BaseExecutor(FutureExecutor, ABC):
14
15
  """
15
16
  Interface class for the executor.
16
17
 
@@ -111,6 +112,15 @@ class ExecutorBase(FutureExecutor):
111
112
  """
112
113
  return len(self._task_scheduler)
113
114
 
115
+ def __bool__(self):
116
+ """
117
+ Overwrite length to always return True
118
+
119
+ Returns:
120
+ bool: Always return True
121
+ """
122
+ return True
123
+
114
124
  def __exit__(self, *args, **kwargs) -> None:
115
125
  """
116
126
  Exit method called when exiting the context manager.
@@ -1,6 +1,6 @@
1
1
  from typing import Callable, Optional, Union
2
2
 
3
- from executorlib.executor.base import ExecutorBase
3
+ from executorlib.executor.base import BaseExecutor
4
4
  from executorlib.standalone.inputcheck import (
5
5
  check_command_line_argument_lst,
6
6
  check_init_function,
@@ -17,13 +17,13 @@ from executorlib.task_scheduler.interactive.dependency import DependencyTaskSche
17
17
  from executorlib.task_scheduler.interactive.onetoone import OneProcessTaskScheduler
18
18
 
19
19
 
20
- class FluxJobExecutor(ExecutorBase):
20
+ class FluxJobExecutor(BaseExecutor):
21
21
  """
22
- The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or
22
+ The executorlib.FluxJobExecutor leverages either the message passing interface (MPI), the SLURM workload manager or
23
23
  preferable the flux framework for distributing python functions within a given resource allocation. In contrast to
24
- the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not
25
- require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly
26
- in an interactive Jupyter notebook.
24
+ the mpi4py.futures.MPIPoolExecutor the executorlib.FluxJobExecutor can be executed in a serial python process and
25
+ does not require the python script to be executed with MPI. It is even possible to execute the
26
+ executorlib.FluxJobExecutor directly in an interactive Jupyter notebook.
27
27
 
28
28
  Args:
29
29
  max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
@@ -65,7 +65,7 @@ class FluxJobExecutor(ExecutorBase):
65
65
  Examples:
66
66
  ```
67
67
  >>> import numpy as np
68
- >>> from executorlib.executor.flux import FluxJobExecutor
68
+ >>> from executorlib import FluxJobExecutor
69
69
  >>>
70
70
  >>> def calc(i, j, k):
71
71
  >>> from mpi4py import MPI
@@ -102,12 +102,11 @@ class FluxJobExecutor(ExecutorBase):
102
102
  plot_dependency_graph_filename: Optional[str] = None,
103
103
  ):
104
104
  """
105
- Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor,
106
- executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The
107
- executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used
108
- for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be
109
- installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor
110
- requires the SLURM workload manager to be installed on the system.
105
+ The executorlib.FluxJobExecutor leverages either the message passing interface (MPI), the SLURM workload manager
106
+ or preferable the flux framework for distributing python functions within a given resource allocation. In
107
+ contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.FluxJobExecutor can be executed in a serial
108
+ python process and does not require the python script to be executed with MPI. It is even possible to execute
109
+ the executorlib.FluxJobExecutor directly in an interactive Jupyter notebook.
111
110
 
112
111
  Args:
113
112
  max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
@@ -202,13 +201,13 @@ class FluxJobExecutor(ExecutorBase):
202
201
  )
203
202
 
204
203
 
205
- class FluxClusterExecutor(ExecutorBase):
204
+ class FluxClusterExecutor(BaseExecutor):
206
205
  """
207
- The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or
208
- preferable the flux framework for distributing python functions within a given resource allocation. In contrast to
209
- the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not
210
- require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly
211
- in an interactive Jupyter notebook.
206
+ The executorlib.FluxClusterExecutor leverages either the message passing interface (MPI), the SLURM workload manager
207
+ or preferable the flux framework for distributing python functions within a given resource allocation. In contrast
208
+ to the mpi4py.futures.MPIPoolExecutor the executorlib.FluxClusterExecutor can be executed in a serial python process
209
+ and does not require the python script to be executed with MPI. It is even possible to execute the
210
+ executorlib.FluxClusterExecutor directly in an interactive Jupyter notebook.
212
211
 
213
212
  Args:
214
213
  max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
@@ -246,7 +245,7 @@ class FluxClusterExecutor(ExecutorBase):
246
245
  Examples:
247
246
  ```
248
247
  >>> import numpy as np
249
- >>> from executorlib.executor.flux import FluxClusterExecutor
248
+ >>> from executorlib import FluxClusterExecutor
250
249
  >>>
251
250
  >>> def calc(i, j, k):
252
251
  >>> from mpi4py import MPI
@@ -280,12 +279,11 @@ class FluxClusterExecutor(ExecutorBase):
280
279
  plot_dependency_graph_filename: Optional[str] = None,
281
280
  ):
282
281
  """
283
- Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor,
284
- executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The
285
- executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used
286
- for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be
287
- installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor
288
- requires the SLURM workload manager to be installed on the system.
282
+ The executorlib.FluxClusterExecutor leverages either the message passing interface (MPI), the SLURM workload
283
+ manager or preferable the flux framework for distributing python functions within a given resource allocation.
284
+ In contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.FluxClusterExecutor can be executed in a
285
+ serial python process and does not require the python script to be executed with MPI. It is even possible to
286
+ execute the executorlib.FluxClusterExecutor directly in an interactive Jupyter notebook.
289
287
 
290
288
  Args:
291
289
  max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
@@ -1,6 +1,6 @@
1
1
  from typing import Callable, Optional, Union
2
2
 
3
- from executorlib.executor.base import ExecutorBase
3
+ from executorlib.executor.base import BaseExecutor
4
4
  from executorlib.standalone.inputcheck import (
5
5
  check_command_line_argument_lst,
6
6
  check_gpus_per_worker,
@@ -17,13 +17,13 @@ from executorlib.task_scheduler.interactive.dependency import DependencyTaskSche
17
17
  from executorlib.task_scheduler.interactive.onetoone import OneProcessTaskScheduler
18
18
 
19
19
 
20
- class SingleNodeExecutor(ExecutorBase):
20
+ class SingleNodeExecutor(BaseExecutor):
21
21
  """
22
- The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or
23
- preferable the flux framework for distributing python functions within a given resource allocation. In contrast to
24
- the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not
25
- require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly
26
- in an interactive Jupyter notebook.
22
+ The executorlib.SingleNodeExecutor leverages either the message passing interface (MPI), the SLURM workload manager
23
+ or preferable the flux framework for distributing python functions within a given resource allocation. In contrast
24
+ to the mpi4py.futures.MPIPoolExecutor the executorlib.SingleNodeExecutor can be executed in a serial python process
25
+ and does not require the python script to be executed with MPI. It is even possible to execute the
26
+ executorlib.SingleNodeExecutor directly in an interactive Jupyter notebook.
27
27
 
28
28
  Args:
29
29
  max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
@@ -60,7 +60,7 @@ class SingleNodeExecutor(ExecutorBase):
60
60
  Examples:
61
61
  ```
62
62
  >>> import numpy as np
63
- >>> from executorlib.executor.single import SingleNodeExecutor
63
+ >>> from executorlib import SingleNodeExecutor
64
64
  >>>
65
65
  >>> def calc(i, j, k):
66
66
  >>> from mpi4py import MPI
@@ -93,12 +93,11 @@ class SingleNodeExecutor(ExecutorBase):
93
93
  plot_dependency_graph_filename: Optional[str] = None,
94
94
  ):
95
95
  """
96
- Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor,
97
- executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The
98
- executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used
99
- for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be
100
- installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor
101
- requires the SLURM workload manager to be installed on the system.
96
+ The executorlib.SingleNodeExecutor leverages either the message passing interface (MPI), the SLURM workload
97
+ manager or preferable the flux framework for distributing python functions within a given resource allocation.
98
+ In contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SingleNodeExecutor can be executed in a serial
99
+ python process and does not require the python script to be executed with MPI. It is even possible to execute
100
+ the executorlib.SingleNodeExecutor directly in an interactive Jupyter notebook.
102
101
 
103
102
  Args:
104
103
  max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
@@ -1,6 +1,6 @@
1
1
  from typing import Callable, Optional, Union
2
2
 
3
- from executorlib.executor.base import ExecutorBase
3
+ from executorlib.executor.base import BaseExecutor
4
4
  from executorlib.standalone.inputcheck import (
5
5
  check_init_function,
6
6
  check_plot_dependency_graph,
@@ -18,13 +18,13 @@ from executorlib.task_scheduler.interactive.slurmspawner import (
18
18
  )
19
19
 
20
20
 
21
- class SlurmClusterExecutor(ExecutorBase):
21
+ class SlurmClusterExecutor(BaseExecutor):
22
22
  """
23
- The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or
24
- preferable the flux framework for distributing python functions within a given resource allocation. In contrast to
25
- the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not
26
- require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly
27
- in an interactive Jupyter notebook.
23
+ The executorlib.SlurmClusterExecutor leverages either the message passing interface (MPI), the SLURM workload
24
+ manager or preferable the flux framework for distributing python functions within a given resource allocation. In
25
+ contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmClusterExecutor can be executed in a serial
26
+ python process and does not require the python script to be executed with MPI. It is even possible to execute the
27
+ executorlib.SlurmClusterExecutor directly in an interactive Jupyter notebook.
28
28
 
29
29
  Args:
30
30
  max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
@@ -62,7 +62,7 @@ class SlurmClusterExecutor(ExecutorBase):
62
62
  Examples:
63
63
  ```
64
64
  >>> import numpy as np
65
- >>> from executorlib.executor.slurm import SlurmClusterExecutor
65
+ >>> from executorlib import SlurmClusterExecutor
66
66
  >>>
67
67
  >>> def calc(i, j, k):
68
68
  >>> from mpi4py import MPI
@@ -96,12 +96,11 @@ class SlurmClusterExecutor(ExecutorBase):
96
96
  plot_dependency_graph_filename: Optional[str] = None,
97
97
  ):
98
98
  """
99
- Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor,
100
- executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The
101
- executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used
102
- for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be
103
- installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor
104
- requires the SLURM workload manager to be installed on the system.
99
+ The executorlib.SlurmClusterExecutor leverages either the message passing interface (MPI), the SLURM workload
100
+ manager or preferable the flux framework for distributing python functions within a given resource allocation.
101
+ In contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmClusterExecutor can be executed in a
102
+ serial python process and does not require the python script to be executed with MPI. It is even possible to
103
+ execute the executorlib.SlurmClusterExecutor directly in an interactive Jupyter notebook.
105
104
 
106
105
  Args:
107
106
  max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
@@ -194,13 +193,13 @@ class SlurmClusterExecutor(ExecutorBase):
194
193
  )
195
194
 
196
195
 
197
- class SlurmJobExecutor(ExecutorBase):
196
+ class SlurmJobExecutor(BaseExecutor):
198
197
  """
199
- The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or
198
+ The executorlib.SlurmJobExecutor leverages either the message passing interface (MPI), the SLURM workload manager or
200
199
  preferable the flux framework for distributing python functions within a given resource allocation. In contrast to
201
- the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not
202
- require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly
203
- in an interactive Jupyter notebook.
200
+ the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmJobExecutor can be executed in a serial python process and
201
+ does not require the python script to be executed with MPI. It is even possible to execute the
202
+ executorlib.SlurmJobExecutor directly in an interactive Jupyter notebook.
204
203
 
205
204
  Args:
206
205
  max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
@@ -241,7 +240,7 @@ class SlurmJobExecutor(ExecutorBase):
241
240
  Examples:
242
241
  ```
243
242
  >>> import numpy as np
244
- >>> from executorlib.executor.slurm import SlurmJobExecutor
243
+ >>> from executorlib import SlurmJobExecutor
245
244
  >>>
246
245
  >>> def calc(i, j, k):
247
246
  >>> from mpi4py import MPI
@@ -274,12 +273,11 @@ class SlurmJobExecutor(ExecutorBase):
274
273
  plot_dependency_graph_filename: Optional[str] = None,
275
274
  ):
276
275
  """
277
- Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor,
278
- executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The
279
- executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used
280
- for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be
281
- installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor
282
- requires the SLURM workload manager to be installed on the system.
276
+ The executorlib.SlurmJobExecutor leverages either the message passing interface (MPI), the SLURM workload
277
+ manager or preferable the flux framework for distributing python functions within a given resource allocation.
278
+ In contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmJobExecutor can be executed in a serial
279
+ python process and does not require the python script to be executed with MPI. It is even possible to execute
280
+ the executorlib.SlurmJobExecutor directly in an interactive Jupyter notebook.
283
281
 
284
282
  Args:
285
283
  max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
@@ -1,7 +1,7 @@
1
1
  [build-system]
2
2
  requires = [
3
3
  "hatchling==1.27.0",
4
- "hatch-vcs==0.4.0",
4
+ "hatch-vcs==0.5.0",
5
5
  "cloudpickle>=2.0.0,<=3.1.1",
6
6
  "pyzmq>=25.0.0,<=26.4.0",
7
7
  ]
@@ -53,12 +53,12 @@ graphnotebook = [
53
53
  ]
54
54
  mpi = ["mpi4py>=3.1.4,<=4.0.1"]
55
55
  cluster = [
56
- "pysqa==0.2.4",
56
+ "pysqa==0.2.5",
57
57
  "h5py>=3.6.0,<=3.13.0",
58
58
  ]
59
59
  all = [
60
60
  "mpi4py>=3.1.4,<=4.0.1",
61
- "pysqa==0.2.4",
61
+ "pysqa==0.2.5",
62
62
  "h5py>=3.6.0,<=3.13.0",
63
63
  "pygraphviz>=1.10,<=1.14",
64
64
  "networkx>=2.8.8,<=3.4.2",
File without changes
File without changes
File without changes