parsl 2024.8.12__py3-none-any.whl → 2024.8.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/channels/oauth_ssh/oauth_ssh.py +10 -2
- parsl/channels/ssh/ssh.py +16 -6
- parsl/channels/ssh_il/ssh_il.py +12 -2
- parsl/executors/high_throughput/executor.py +16 -26
- parsl/executors/high_throughput/mpi_executor.py +23 -2
- parsl/executors/high_throughput/mpi_prefix_composer.py +5 -4
- parsl/monitoring/db_manager.py +26 -39
- parsl/monitoring/monitoring.py +8 -3
- parsl/monitoring/router.py +15 -12
- parsl/tests/test_htex/test_resource_spec_validation.py +40 -0
- parsl/tests/test_mpi_apps/test_bad_mpi_config.py +29 -14
- parsl/tests/test_mpi_apps/test_mpi_mode_enabled.py +16 -8
- parsl/tests/test_mpi_apps/test_mpiex.py +2 -3
- parsl/tests/test_mpi_apps/test_resource_spec.py +39 -41
- parsl/version.py +1 -1
- {parsl-2024.8.12.dist-info → parsl-2024.8.19.dist-info}/METADATA +5 -3
- {parsl-2024.8.12.dist-info → parsl-2024.8.19.dist-info}/RECORD +25 -25
- parsl/tests/test_mpi_apps/test_mpi_mode_disabled.py +0 -47
- {parsl-2024.8.12.data → parsl-2024.8.19.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2024.8.12.data → parsl-2024.8.19.data}/scripts/interchange.py +0 -0
- {parsl-2024.8.12.data → parsl-2024.8.19.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2024.8.12.data → parsl-2024.8.19.data}/scripts/process_worker_pool.py +0 -0
- {parsl-2024.8.12.dist-info → parsl-2024.8.19.dist-info}/LICENSE +0 -0
- {parsl-2024.8.12.dist-info → parsl-2024.8.19.dist-info}/WHEEL +0 -0
- {parsl-2024.8.12.dist-info → parsl-2024.8.19.dist-info}/entry_points.txt +0 -0
- {parsl-2024.8.12.dist-info → parsl-2024.8.19.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,15 @@
|
|
1
1
|
import logging
|
2
2
|
import socket
|
3
3
|
|
4
|
-
import paramiko
|
5
|
-
|
6
4
|
from parsl.channels.ssh.ssh import DeprecatedSSHChannel
|
7
5
|
from parsl.errors import OptionalModuleMissing
|
8
6
|
|
7
|
+
try:
|
8
|
+
import paramiko
|
9
|
+
_ssh_enabled = True
|
10
|
+
except (ImportError, NameError, FileNotFoundError):
|
11
|
+
_ssh_enabled = False
|
12
|
+
|
9
13
|
try:
|
10
14
|
from oauth_ssh.oauth_ssh_token import find_access_token
|
11
15
|
from oauth_ssh.ssh_service import SSHService
|
@@ -38,6 +42,10 @@ class DeprecatedOAuthSSHChannel(DeprecatedSSHChannel):
|
|
38
42
|
|
39
43
|
Raises:
|
40
44
|
'''
|
45
|
+
if not _ssh_enabled:
|
46
|
+
raise OptionalModuleMissing(['ssh'],
|
47
|
+
"OauthSSHChannel requires the ssh module and config.")
|
48
|
+
|
41
49
|
if not _oauth_ssh_enabled:
|
42
50
|
raise OptionalModuleMissing(['oauth_ssh'],
|
43
51
|
"OauthSSHChannel requires oauth_ssh module and config.")
|
parsl/channels/ssh/ssh.py
CHANGED
@@ -2,8 +2,6 @@ import errno
|
|
2
2
|
import logging
|
3
3
|
import os
|
4
4
|
|
5
|
-
import paramiko
|
6
|
-
|
7
5
|
from parsl.channels.base import Channel
|
8
6
|
from parsl.channels.errors import (
|
9
7
|
AuthException,
|
@@ -13,15 +11,24 @@ from parsl.channels.errors import (
|
|
13
11
|
FileCopyException,
|
14
12
|
SSHException,
|
15
13
|
)
|
14
|
+
from parsl.errors import OptionalModuleMissing
|
16
15
|
from parsl.utils import RepresentationMixin
|
17
16
|
|
17
|
+
try:
|
18
|
+
import paramiko
|
19
|
+
_ssh_enabled = True
|
20
|
+
except (ImportError, NameError, FileNotFoundError):
|
21
|
+
_ssh_enabled = False
|
22
|
+
|
23
|
+
|
18
24
|
logger = logging.getLogger(__name__)
|
19
25
|
|
20
26
|
|
21
|
-
|
22
|
-
|
23
|
-
self
|
24
|
-
|
27
|
+
if _ssh_enabled:
|
28
|
+
class NoAuthSSHClient(paramiko.SSHClient):
|
29
|
+
def _auth(self, username, *args):
|
30
|
+
self._transport.auth_none(username)
|
31
|
+
return
|
25
32
|
|
26
33
|
|
27
34
|
class DeprecatedSSHChannel(Channel, RepresentationMixin):
|
@@ -53,6 +60,9 @@ class DeprecatedSSHChannel(Channel, RepresentationMixin):
|
|
53
60
|
|
54
61
|
Raises:
|
55
62
|
'''
|
63
|
+
if not _ssh_enabled:
|
64
|
+
raise OptionalModuleMissing(['ssh'],
|
65
|
+
"SSHChannel requires the ssh module and config.")
|
56
66
|
|
57
67
|
self.hostname = hostname
|
58
68
|
self.username = username
|
parsl/channels/ssh_il/ssh_il.py
CHANGED
@@ -1,9 +1,15 @@
|
|
1
1
|
import getpass
|
2
2
|
import logging
|
3
3
|
|
4
|
-
import paramiko
|
5
|
-
|
6
4
|
from parsl.channels.ssh.ssh import DeprecatedSSHChannel
|
5
|
+
from parsl.errors import OptionalModuleMissing
|
6
|
+
|
7
|
+
try:
|
8
|
+
import paramiko
|
9
|
+
_ssh_enabled = True
|
10
|
+
except (ImportError, NameError, FileNotFoundError):
|
11
|
+
_ssh_enabled = False
|
12
|
+
|
7
13
|
|
8
14
|
logger = logging.getLogger(__name__)
|
9
15
|
|
@@ -30,6 +36,10 @@ class DeprecatedSSHInteractiveLoginChannel(DeprecatedSSHChannel):
|
|
30
36
|
|
31
37
|
Raises:
|
32
38
|
'''
|
39
|
+
if not _ssh_enabled:
|
40
|
+
raise OptionalModuleMissing(['ssh'],
|
41
|
+
"SSHInteractiveLoginChannel requires the ssh module and config.")
|
42
|
+
|
33
43
|
self.hostname = hostname
|
34
44
|
self.username = username
|
35
45
|
self.password = password
|
@@ -12,7 +12,6 @@ from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
|
|
12
12
|
|
13
13
|
import typeguard
|
14
14
|
|
15
|
-
import parsl.launchers
|
16
15
|
from parsl import curvezmq
|
17
16
|
from parsl.addresses import get_all_addresses
|
18
17
|
from parsl.app.errors import RemoteExceptionWrapper
|
@@ -25,8 +24,7 @@ from parsl.executors.high_throughput.manager_selector import (
|
|
25
24
|
RandomManagerSelector,
|
26
25
|
)
|
27
26
|
from parsl.executors.high_throughput.mpi_prefix_composer import (
|
28
|
-
|
29
|
-
validate_resource_spec,
|
27
|
+
InvalidResourceSpecification,
|
30
28
|
)
|
31
29
|
from parsl.executors.status_handling import BlockProviderExecutor
|
32
30
|
from parsl.jobs.states import TERMINAL_STATES, JobState, JobStatus
|
@@ -224,17 +222,6 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
224
222
|
Parsl will create names as integers starting with 0.
|
225
223
|
|
226
224
|
default: empty list
|
227
|
-
|
228
|
-
enable_mpi_mode: bool
|
229
|
-
If enabled, MPI launch prefixes will be composed for the batch scheduler based on
|
230
|
-
the nodes available in each batch job and the resource_specification dict passed
|
231
|
-
from the app. This is an experimental feature, please refer to the following doc section
|
232
|
-
before use: https://parsl.readthedocs.io/en/stable/userguide/mpi_apps.html
|
233
|
-
|
234
|
-
mpi_launcher: str
|
235
|
-
This field is only used if enable_mpi_mode is set. Select one from the
|
236
|
-
list of supported MPI launchers = ("srun", "aprun", "mpiexec").
|
237
|
-
default: "mpiexec"
|
238
225
|
"""
|
239
226
|
|
240
227
|
@typeguard.typechecked
|
@@ -263,8 +250,6 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
263
250
|
poll_period: int = 10,
|
264
251
|
address_probe_timeout: Optional[int] = None,
|
265
252
|
worker_logdir_root: Optional[str] = None,
|
266
|
-
enable_mpi_mode: bool = False,
|
267
|
-
mpi_launcher: str = "mpiexec",
|
268
253
|
manager_selector: ManagerSelector = RandomManagerSelector(),
|
269
254
|
block_error_handler: Union[bool, Callable[[BlockProviderExecutor, Dict[str, JobStatus]], None]] = True,
|
270
255
|
encrypted: bool = False):
|
@@ -330,15 +315,6 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
330
315
|
self.encrypted = encrypted
|
331
316
|
self.cert_dir = None
|
332
317
|
|
333
|
-
self.enable_mpi_mode = enable_mpi_mode
|
334
|
-
assert mpi_launcher in VALID_LAUNCHERS, \
|
335
|
-
f"mpi_launcher must be set to one of {VALID_LAUNCHERS}"
|
336
|
-
if self.enable_mpi_mode:
|
337
|
-
assert isinstance(self.provider.launcher, parsl.launchers.SimpleLauncher), \
|
338
|
-
"mpi_mode requires the provider to be configured to use a SimpleLauncher"
|
339
|
-
|
340
|
-
self.mpi_launcher = mpi_launcher
|
341
|
-
|
342
318
|
if not launch_cmd:
|
343
319
|
launch_cmd = DEFAULT_LAUNCH_CMD
|
344
320
|
self.launch_cmd = launch_cmd
|
@@ -348,6 +324,8 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
348
324
|
self.interchange_launch_cmd = interchange_launch_cmd
|
349
325
|
|
350
326
|
radio_mode = "htex"
|
327
|
+
enable_mpi_mode: bool = False
|
328
|
+
mpi_launcher: str = "mpiexec"
|
351
329
|
|
352
330
|
def _warn_deprecated(self, old: str, new: str):
|
353
331
|
warnings.warn(
|
@@ -377,6 +355,18 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
377
355
|
return "{}/{}".format(self.worker_logdir_root, self.label)
|
378
356
|
return self.logdir
|
379
357
|
|
358
|
+
def validate_resource_spec(self, resource_specification: dict):
|
359
|
+
"""HTEX does not support *any* resource_specification options and
|
360
|
+
will raise InvalidResourceSpecification is any are passed to it"""
|
361
|
+
if resource_specification:
|
362
|
+
raise InvalidResourceSpecification(
|
363
|
+
set(resource_specification.keys()),
|
364
|
+
("HTEX does not support the supplied resource_specifications."
|
365
|
+
"For MPI applications consider using the MPIExecutor. "
|
366
|
+
"For specifications for core count/memory/walltime, consider using WorkQueueExecutor. ")
|
367
|
+
)
|
368
|
+
return
|
369
|
+
|
380
370
|
def initialize_scaling(self):
|
381
371
|
"""Compose the launch command and scale out the initial blocks.
|
382
372
|
"""
|
@@ -660,7 +650,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
660
650
|
Future
|
661
651
|
"""
|
662
652
|
|
663
|
-
validate_resource_spec(resource_specification
|
653
|
+
self.validate_resource_spec(resource_specification)
|
664
654
|
|
665
655
|
if self.bad_state_is_set:
|
666
656
|
raise self.executor_exception
|
@@ -8,8 +8,13 @@ from parsl.executors.high_throughput.executor import (
|
|
8
8
|
GENERAL_HTEX_PARAM_DOCS,
|
9
9
|
HighThroughputExecutor,
|
10
10
|
)
|
11
|
+
from parsl.executors.high_throughput.mpi_prefix_composer import (
|
12
|
+
VALID_LAUNCHERS,
|
13
|
+
validate_resource_spec,
|
14
|
+
)
|
11
15
|
from parsl.executors.status_handling import BlockProviderExecutor
|
12
16
|
from parsl.jobs.states import JobStatus
|
17
|
+
from parsl.launchers import SimpleLauncher
|
13
18
|
from parsl.providers import LocalProvider
|
14
19
|
from parsl.providers.base import ExecutionProvider
|
15
20
|
|
@@ -30,6 +35,11 @@ class MPIExecutor(HighThroughputExecutor):
|
|
30
35
|
max_workers_per_block: int
|
31
36
|
Maximum number of MPI applications to run at once per block
|
32
37
|
|
38
|
+
mpi_launcher: str
|
39
|
+
Select one from the list of supported MPI launchers:
|
40
|
+
("srun", "aprun", "mpiexec").
|
41
|
+
default: "mpiexec"
|
42
|
+
|
33
43
|
{GENERAL_HTEX_PARAM_DOCS}
|
34
44
|
"""
|
35
45
|
|
@@ -60,7 +70,6 @@ class MPIExecutor(HighThroughputExecutor):
|
|
60
70
|
super().__init__(
|
61
71
|
# Hard-coded settings
|
62
72
|
cores_per_worker=1e-9, # Ensures there will be at least an absurd number of workers
|
63
|
-
enable_mpi_mode=True,
|
64
73
|
max_workers_per_node=max_workers_per_block,
|
65
74
|
|
66
75
|
# Everything else
|
@@ -82,9 +91,21 @@ class MPIExecutor(HighThroughputExecutor):
|
|
82
91
|
poll_period=poll_period,
|
83
92
|
address_probe_timeout=address_probe_timeout,
|
84
93
|
worker_logdir_root=worker_logdir_root,
|
85
|
-
mpi_launcher=mpi_launcher,
|
86
94
|
block_error_handler=block_error_handler,
|
87
95
|
encrypted=encrypted
|
88
96
|
)
|
97
|
+
self.enable_mpi_mode = True
|
98
|
+
self.mpi_launcher = mpi_launcher
|
89
99
|
|
90
100
|
self.max_workers_per_block = max_workers_per_block
|
101
|
+
|
102
|
+
if not isinstance(self.provider.launcher, SimpleLauncher):
|
103
|
+
raise TypeError("mpi_mode requires the provider to be configured to use a SimpleLauncher")
|
104
|
+
|
105
|
+
if mpi_launcher not in VALID_LAUNCHERS:
|
106
|
+
raise ValueError(f"mpi_launcher set to:{mpi_launcher} must be set to one of {VALID_LAUNCHERS}")
|
107
|
+
|
108
|
+
self.mpi_launcher = mpi_launcher
|
109
|
+
|
110
|
+
def validate_resource_spec(self, resource_specification: dict):
|
111
|
+
return validate_resource_spec(resource_specification)
|
@@ -21,14 +21,15 @@ class MissingResourceSpecification(Exception):
|
|
21
21
|
class InvalidResourceSpecification(Exception):
|
22
22
|
"""Exception raised when Invalid input is supplied via resource specification"""
|
23
23
|
|
24
|
-
def __init__(self, invalid_keys: Set[str]):
|
24
|
+
def __init__(self, invalid_keys: Set[str], message: str = ''):
|
25
25
|
self.invalid_keys = invalid_keys
|
26
|
+
self.message = message
|
26
27
|
|
27
28
|
def __str__(self):
|
28
|
-
return f"Invalid resource specification options supplied: {self.invalid_keys}"
|
29
|
+
return f"Invalid resource specification options supplied: {self.invalid_keys} {self.message}"
|
29
30
|
|
30
31
|
|
31
|
-
def validate_resource_spec(resource_spec: Dict[str, str]
|
32
|
+
def validate_resource_spec(resource_spec: Dict[str, str]):
|
32
33
|
"""Basic validation of keys in the resource_spec
|
33
34
|
|
34
35
|
Raises: InvalidResourceSpecification if the resource_spec
|
@@ -38,7 +39,7 @@ def validate_resource_spec(resource_spec: Dict[str, str], is_mpi_enabled: bool):
|
|
38
39
|
|
39
40
|
# empty resource_spec when mpi_mode is set causes parsl to hang
|
40
41
|
# ref issue #3427
|
41
|
-
if
|
42
|
+
if len(user_keys) == 0:
|
42
43
|
raise MissingResourceSpecification('MPI mode requires optional parsl_resource_specification keyword argument to be configured')
|
43
44
|
|
44
45
|
legal_keys = set(("ranks_per_node",
|
parsl/monitoring/db_manager.py
CHANGED
@@ -1,11 +1,14 @@
|
|
1
1
|
import datetime
|
2
2
|
import logging
|
3
|
+
import multiprocessing.queues as mpq
|
3
4
|
import os
|
4
5
|
import queue
|
5
6
|
import threading
|
6
7
|
import time
|
7
8
|
from typing import Any, Dict, List, Optional, Set, Tuple, TypeVar, cast
|
8
9
|
|
10
|
+
import typeguard
|
11
|
+
|
9
12
|
from parsl.dataflow.states import States
|
10
13
|
from parsl.errors import OptionalModuleMissing
|
11
14
|
from parsl.log_utils import set_file_logger
|
@@ -305,15 +308,15 @@ class DatabaseManager:
|
|
305
308
|
self.pending_resource_queue: queue.Queue[MonitoringMessage] = queue.Queue()
|
306
309
|
|
307
310
|
def start(self,
|
308
|
-
priority_queue:
|
309
|
-
node_queue:
|
310
|
-
block_queue:
|
311
|
-
resource_queue:
|
311
|
+
priority_queue: mpq.Queue,
|
312
|
+
node_queue: mpq.Queue,
|
313
|
+
block_queue: mpq.Queue,
|
314
|
+
resource_queue: mpq.Queue) -> None:
|
312
315
|
|
313
316
|
self._kill_event = threading.Event()
|
314
317
|
self._priority_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal,
|
315
318
|
args=(
|
316
|
-
priority_queue,
|
319
|
+
priority_queue, self._kill_event,),
|
317
320
|
name="Monitoring-migrate-priority",
|
318
321
|
daemon=True,
|
319
322
|
)
|
@@ -321,7 +324,7 @@ class DatabaseManager:
|
|
321
324
|
|
322
325
|
self._node_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal,
|
323
326
|
args=(
|
324
|
-
node_queue,
|
327
|
+
node_queue, self._kill_event,),
|
325
328
|
name="Monitoring-migrate-node",
|
326
329
|
daemon=True,
|
327
330
|
)
|
@@ -329,7 +332,7 @@ class DatabaseManager:
|
|
329
332
|
|
330
333
|
self._block_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal,
|
331
334
|
args=(
|
332
|
-
block_queue,
|
335
|
+
block_queue, self._kill_event,),
|
333
336
|
name="Monitoring-migrate-block",
|
334
337
|
daemon=True,
|
335
338
|
)
|
@@ -337,7 +340,7 @@ class DatabaseManager:
|
|
337
340
|
|
338
341
|
self._resource_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal,
|
339
342
|
args=(
|
340
|
-
resource_queue,
|
343
|
+
resource_queue, self._kill_event,),
|
341
344
|
name="Monitoring-migrate-resource",
|
342
345
|
daemon=True,
|
343
346
|
)
|
@@ -574,43 +577,26 @@ class DatabaseManager:
|
|
574
577
|
raise RuntimeError("An exception happened sometime during database processing and should have been logged in database_manager.log")
|
575
578
|
|
576
579
|
@wrap_with_logs(target="database_manager")
|
577
|
-
def _migrate_logs_to_internal(self, logs_queue: queue.Queue,
|
578
|
-
logger.info("Starting
|
580
|
+
def _migrate_logs_to_internal(self, logs_queue: queue.Queue, kill_event: threading.Event) -> None:
|
581
|
+
logger.info("Starting _migrate_logs_to_internal")
|
579
582
|
|
580
583
|
while not kill_event.is_set() or logs_queue.qsize() != 0:
|
581
|
-
logger.debug("
|
582
|
-
|
584
|
+
logger.debug("Checking STOP conditions: kill event: %s, queue has entries: %s",
|
585
|
+
kill_event.is_set(), logs_queue.qsize() != 0)
|
583
586
|
try:
|
584
587
|
x, addr = logs_queue.get(timeout=0.1)
|
585
588
|
except queue.Empty:
|
586
589
|
continue
|
587
590
|
else:
|
588
|
-
if
|
591
|
+
if x == 'STOP':
|
589
592
|
self.close()
|
590
|
-
elif queue_tag == 'priority': # implicitly not 'STOP'
|
591
|
-
assert isinstance(x, tuple)
|
592
|
-
assert len(x) == 2
|
593
|
-
assert x[0] in [MessageType.WORKFLOW_INFO, MessageType.TASK_INFO], \
|
594
|
-
"_migrate_logs_to_internal can only migrate WORKFLOW_,TASK_INFO message from priority queue, got x[0] == {}".format(x[0])
|
595
|
-
self._dispatch_to_internal(x)
|
596
|
-
elif queue_tag == 'resource':
|
597
|
-
assert isinstance(x, tuple), "_migrate_logs_to_internal was expecting a tuple, got {}".format(x)
|
598
|
-
assert x[0] == MessageType.RESOURCE_INFO, (
|
599
|
-
"_migrate_logs_to_internal can only migrate RESOURCE_INFO message from resource queue, "
|
600
|
-
"got tag {}, message {}".format(x[0], x)
|
601
|
-
)
|
602
|
-
self._dispatch_to_internal(x)
|
603
|
-
elif queue_tag == 'node':
|
604
|
-
assert len(x) == 2, "expected message tuple to have exactly two elements"
|
605
|
-
assert x[0] == MessageType.NODE_INFO, "_migrate_logs_to_internal can only migrate NODE_INFO messages from node queue"
|
606
|
-
|
607
|
-
self._dispatch_to_internal(x)
|
608
|
-
elif queue_tag == "block":
|
609
|
-
self._dispatch_to_internal(x)
|
610
593
|
else:
|
611
|
-
|
594
|
+
self._dispatch_to_internal(x)
|
612
595
|
|
613
596
|
def _dispatch_to_internal(self, x: Tuple) -> None:
|
597
|
+
assert isinstance(x, tuple)
|
598
|
+
assert len(x) == 2, "expected message tuple to have exactly two elements"
|
599
|
+
|
614
600
|
if x[0] in [MessageType.WORKFLOW_INFO, MessageType.TASK_INFO]:
|
615
601
|
self.pending_priority_queue.put(cast(Any, x))
|
616
602
|
elif x[0] == MessageType.RESOURCE_INFO:
|
@@ -719,11 +705,12 @@ class DatabaseManager:
|
|
719
705
|
|
720
706
|
|
721
707
|
@wrap_with_logs(target="database_manager")
|
722
|
-
|
723
|
-
|
724
|
-
|
725
|
-
|
726
|
-
|
708
|
+
@typeguard.typechecked
|
709
|
+
def dbm_starter(exception_q: mpq.Queue,
|
710
|
+
priority_msgs: mpq.Queue,
|
711
|
+
node_msgs: mpq.Queue,
|
712
|
+
block_msgs: mpq.Queue,
|
713
|
+
resource_msgs: mpq.Queue,
|
727
714
|
db_url: str,
|
728
715
|
logdir: str,
|
729
716
|
logging_level: int) -> None:
|
parsl/monitoring/monitoring.py
CHANGED
@@ -154,9 +154,14 @@ class MonitoringHub(RepresentationMixin):
|
|
154
154
|
self.router_exit_event = Event()
|
155
155
|
|
156
156
|
self.router_proc = ForkProcess(target=router_starter,
|
157
|
-
|
158
|
-
|
159
|
-
|
157
|
+
kwargs={"comm_q": comm_q,
|
158
|
+
"exception_q": self.exception_q,
|
159
|
+
"priority_msgs": self.priority_msgs,
|
160
|
+
"node_msgs": self.node_msgs,
|
161
|
+
"block_msgs": self.block_msgs,
|
162
|
+
"resource_msgs": self.resource_msgs,
|
163
|
+
"exit_event": self.router_exit_event,
|
164
|
+
"hub_address": self.hub_address,
|
160
165
|
"udp_port": self.hub_port,
|
161
166
|
"zmq_port_range": self.hub_port_range,
|
162
167
|
"logdir": self.logdir,
|
parsl/monitoring/router.py
CHANGED
@@ -1,15 +1,16 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
import logging
|
4
|
+
import multiprocessing.queues as mpq
|
4
5
|
import os
|
5
6
|
import pickle
|
6
|
-
import queue
|
7
7
|
import socket
|
8
8
|
import threading
|
9
9
|
import time
|
10
10
|
from multiprocessing.synchronize import Event
|
11
|
-
from typing import Optional, Tuple
|
11
|
+
from typing import Optional, Tuple
|
12
12
|
|
13
|
+
import typeguard
|
13
14
|
import zmq
|
14
15
|
|
15
16
|
from parsl.log_utils import set_file_logger
|
@@ -33,10 +34,10 @@ class MonitoringRouter:
|
|
33
34
|
logdir: str = ".",
|
34
35
|
logging_level: int = logging.INFO,
|
35
36
|
atexit_timeout: int = 3, # in seconds
|
36
|
-
priority_msgs:
|
37
|
-
node_msgs:
|
38
|
-
block_msgs:
|
39
|
-
resource_msgs:
|
37
|
+
priority_msgs: mpq.Queue,
|
38
|
+
node_msgs: mpq.Queue,
|
39
|
+
block_msgs: mpq.Queue,
|
40
|
+
resource_msgs: mpq.Queue,
|
40
41
|
exit_event: Event,
|
41
42
|
):
|
42
43
|
""" Initializes a monitoring configuration class.
|
@@ -202,12 +203,14 @@ class MonitoringRouter:
|
|
202
203
|
|
203
204
|
|
204
205
|
@wrap_with_logs
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
206
|
+
@typeguard.typechecked
|
207
|
+
def router_starter(*,
|
208
|
+
comm_q: mpq.Queue,
|
209
|
+
exception_q: mpq.Queue,
|
210
|
+
priority_msgs: mpq.Queue,
|
211
|
+
node_msgs: mpq.Queue,
|
212
|
+
block_msgs: mpq.Queue,
|
213
|
+
resource_msgs: mpq.Queue,
|
211
214
|
exit_event: Event,
|
212
215
|
|
213
216
|
hub_address: str,
|
@@ -0,0 +1,40 @@
|
|
1
|
+
import queue
|
2
|
+
from unittest import mock
|
3
|
+
|
4
|
+
import pytest
|
5
|
+
|
6
|
+
from parsl.executors import HighThroughputExecutor
|
7
|
+
from parsl.executors.high_throughput.mpi_prefix_composer import (
|
8
|
+
InvalidResourceSpecification,
|
9
|
+
)
|
10
|
+
|
11
|
+
|
12
|
+
def double(x):
|
13
|
+
return x * 2
|
14
|
+
|
15
|
+
|
16
|
+
@pytest.mark.local
|
17
|
+
def test_submit_calls_validate():
|
18
|
+
|
19
|
+
htex = HighThroughputExecutor()
|
20
|
+
htex.outgoing_q = mock.Mock(spec=queue.Queue)
|
21
|
+
htex.validate_resource_spec = mock.Mock(spec=htex.validate_resource_spec)
|
22
|
+
|
23
|
+
res_spec = {}
|
24
|
+
htex.submit(double, res_spec, (5,), {})
|
25
|
+
htex.validate_resource_spec.assert_called()
|
26
|
+
|
27
|
+
|
28
|
+
@pytest.mark.local
|
29
|
+
def test_resource_spec_validation():
|
30
|
+
htex = HighThroughputExecutor()
|
31
|
+
ret_val = htex.validate_resource_spec({})
|
32
|
+
assert ret_val is None
|
33
|
+
|
34
|
+
|
35
|
+
@pytest.mark.local
|
36
|
+
def test_resource_spec_validation_bad_keys():
|
37
|
+
htex = HighThroughputExecutor()
|
38
|
+
|
39
|
+
with pytest.raises(InvalidResourceSpecification):
|
40
|
+
htex.validate_resource_spec({"num_nodes": 2})
|
@@ -1,33 +1,48 @@
|
|
1
1
|
import pytest
|
2
2
|
|
3
3
|
from parsl import Config
|
4
|
-
from parsl.executors import
|
4
|
+
from parsl.executors import MPIExecutor
|
5
5
|
from parsl.launchers import AprunLauncher, SimpleLauncher, SrunLauncher
|
6
6
|
from parsl.providers import SlurmProvider
|
7
7
|
|
8
8
|
|
9
9
|
@pytest.mark.local
|
10
|
-
def
|
11
|
-
"""
|
10
|
+
def test_bad_launcher():
|
11
|
+
"""TypeError if a launcher other than SimpleLauncher is supplied"""
|
12
12
|
|
13
13
|
for launcher in [SrunLauncher(), AprunLauncher()]:
|
14
|
-
with pytest.raises(
|
14
|
+
with pytest.raises(TypeError):
|
15
15
|
Config(executors=[
|
16
|
-
|
17
|
-
enable_mpi_mode=True,
|
16
|
+
MPIExecutor(
|
18
17
|
provider=SlurmProvider(launcher=launcher),
|
19
18
|
)
|
20
19
|
])
|
21
20
|
|
22
21
|
|
23
22
|
@pytest.mark.local
|
24
|
-
def
|
23
|
+
def test_bad_mpi_launcher():
|
24
|
+
"""ValueError if an unsupported mpi_launcher is specified"""
|
25
|
+
|
26
|
+
with pytest.raises(ValueError):
|
27
|
+
Config(executors=[
|
28
|
+
MPIExecutor(
|
29
|
+
mpi_launcher="bad_launcher",
|
30
|
+
provider=SlurmProvider(launcher=SimpleLauncher()),
|
31
|
+
)
|
32
|
+
])
|
33
|
+
|
34
|
+
|
35
|
+
@pytest.mark.local
|
36
|
+
@pytest.mark.parametrize(
|
37
|
+
"mpi_launcher",
|
38
|
+
["srun", "aprun", "mpiexec"]
|
39
|
+
)
|
40
|
+
def test_correct_launcher_with_mpi_mode(mpi_launcher: str):
|
25
41
|
"""Confirm that SimpleLauncher works with mpi_mode"""
|
26
42
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
assert isinstance(config.executors[0].provider.launcher, SimpleLauncher)
|
43
|
+
executor = MPIExecutor(
|
44
|
+
mpi_launcher=mpi_launcher,
|
45
|
+
provider=SlurmProvider(launcher=SimpleLauncher()),
|
46
|
+
)
|
47
|
+
|
48
|
+
assert isinstance(executor.provider.launcher, SimpleLauncher)
|
@@ -6,26 +6,34 @@ from typing import Dict
|
|
6
6
|
import pytest
|
7
7
|
|
8
8
|
import parsl
|
9
|
-
from parsl import bash_app, python_app
|
9
|
+
from parsl import Config, bash_app, python_app
|
10
|
+
from parsl.executors import MPIExecutor
|
10
11
|
from parsl.executors.high_throughput.mpi_prefix_composer import (
|
11
12
|
MissingResourceSpecification,
|
12
13
|
)
|
13
|
-
from parsl.
|
14
|
+
from parsl.launchers import SimpleLauncher
|
15
|
+
from parsl.providers import LocalProvider
|
14
16
|
|
15
17
|
EXECUTOR_LABEL = "MPI_TEST"
|
16
18
|
|
17
19
|
|
18
20
|
def local_setup():
|
19
|
-
config = fresh_config()
|
20
|
-
config.executors[0].label = EXECUTOR_LABEL
|
21
|
-
config.executors[0].max_workers_per_node = 2
|
22
|
-
config.executors[0].enable_mpi_mode = True
|
23
|
-
config.executors[0].mpi_launcher = "mpiexec"
|
24
21
|
|
25
22
|
cwd = os.path.abspath(os.path.dirname(__file__))
|
26
23
|
pbs_nodefile = os.path.join(cwd, "mocks", "pbs_nodefile")
|
27
24
|
|
28
|
-
config
|
25
|
+
config = Config(
|
26
|
+
executors=[
|
27
|
+
MPIExecutor(
|
28
|
+
label=EXECUTOR_LABEL,
|
29
|
+
max_workers_per_block=2,
|
30
|
+
mpi_launcher="mpiexec",
|
31
|
+
provider=LocalProvider(
|
32
|
+
worker_init=f"export PBS_NODEFILE={pbs_nodefile}",
|
33
|
+
launcher=SimpleLauncher()
|
34
|
+
)
|
35
|
+
)
|
36
|
+
])
|
29
37
|
|
30
38
|
parsl.load(config)
|
31
39
|
|
@@ -4,7 +4,6 @@ from pathlib import Path
|
|
4
4
|
|
5
5
|
import pytest
|
6
6
|
|
7
|
-
import parsl
|
8
7
|
from parsl import Config, HighThroughputExecutor
|
9
8
|
from parsl.executors.high_throughput.mpi_executor import MPIExecutor
|
10
9
|
from parsl.launchers import SimpleLauncher
|
@@ -42,8 +41,8 @@ def test_docstring():
|
|
42
41
|
def test_init():
|
43
42
|
"""Ensure all relevant kwargs are copied over from HTEx"""
|
44
43
|
|
45
|
-
new_kwargs = {'max_workers_per_block'}
|
46
|
-
excluded_kwargs = {'available_accelerators', '
|
44
|
+
new_kwargs = {'max_workers_per_block', 'mpi_launcher'}
|
45
|
+
excluded_kwargs = {'available_accelerators', 'cores_per_worker', 'max_workers_per_node',
|
47
46
|
'mem_per_worker', 'cpu_affinity', 'max_workers', 'manager_selector'}
|
48
47
|
|
49
48
|
# Get the kwargs from both HTEx and MPIEx
|
@@ -1,18 +1,20 @@
|
|
1
1
|
import contextlib
|
2
2
|
import logging
|
3
3
|
import os
|
4
|
+
import queue
|
4
5
|
import typing
|
5
6
|
import unittest
|
6
7
|
from typing import Dict
|
8
|
+
from unittest import mock
|
7
9
|
|
8
10
|
import pytest
|
9
11
|
|
10
|
-
import parsl
|
11
12
|
from parsl.app.app import python_app
|
13
|
+
from parsl.executors.high_throughput.executor import HighThroughputExecutor
|
14
|
+
from parsl.executors.high_throughput.mpi_executor import MPIExecutor
|
12
15
|
from parsl.executors.high_throughput.mpi_prefix_composer import (
|
13
16
|
InvalidResourceSpecification,
|
14
17
|
MissingResourceSpecification,
|
15
|
-
validate_resource_spec,
|
16
18
|
)
|
17
19
|
from parsl.executors.high_throughput.mpi_resource_management import (
|
18
20
|
get_nodes_in_batchjob,
|
@@ -20,6 +22,8 @@ from parsl.executors.high_throughput.mpi_resource_management import (
|
|
20
22
|
get_slurm_hosts_list,
|
21
23
|
identify_scheduler,
|
22
24
|
)
|
25
|
+
from parsl.launchers import SimpleLauncher
|
26
|
+
from parsl.providers import LocalProvider
|
23
27
|
from parsl.tests.configs.htex_local import fresh_config
|
24
28
|
|
25
29
|
EXECUTOR_LABEL = "MPI_TEST"
|
@@ -48,23 +52,6 @@ def get_env_vars(parsl_resource_specification: Dict = {}) -> Dict:
|
|
48
52
|
return parsl_vars
|
49
53
|
|
50
54
|
|
51
|
-
@pytest.mark.local
|
52
|
-
def test_resource_spec_env_vars():
|
53
|
-
resource_spec = {
|
54
|
-
"num_nodes": 4,
|
55
|
-
"ranks_per_node": 2,
|
56
|
-
}
|
57
|
-
|
58
|
-
assert double(5).result() == 10
|
59
|
-
|
60
|
-
future = get_env_vars(parsl_resource_specification=resource_spec)
|
61
|
-
|
62
|
-
result = future.result()
|
63
|
-
assert isinstance(result, Dict)
|
64
|
-
assert result["PARSL_NUM_NODES"] == str(resource_spec["num_nodes"])
|
65
|
-
assert result["PARSL_RANKS_PER_NODE"] == str(resource_spec["ranks_per_node"])
|
66
|
-
|
67
|
-
|
68
55
|
@pytest.mark.local
|
69
56
|
@unittest.mock.patch("subprocess.check_output", return_value=b"c203-031\nc203-032\n")
|
70
57
|
def test_slurm_mocked_mpi_fetch(subprocess_check):
|
@@ -83,16 +70,6 @@ def add_to_path(path: os.PathLike) -> typing.Generator[None, None, None]:
|
|
83
70
|
os.environ["PATH"] = old_path
|
84
71
|
|
85
72
|
|
86
|
-
@pytest.mark.local
|
87
|
-
@pytest.mark.skip
|
88
|
-
def test_slurm_mpi_fetch():
|
89
|
-
logging.warning(f"Current pwd : {os.path.dirname(__file__)}")
|
90
|
-
with add_to_path(os.path.dirname(__file__)):
|
91
|
-
logging.warning(f"PATH: {os.environ['PATH']}")
|
92
|
-
nodeinfo = get_slurm_hosts_list()
|
93
|
-
logging.warning(f"Got : {nodeinfo}")
|
94
|
-
|
95
|
-
|
96
73
|
@contextlib.contextmanager
|
97
74
|
def mock_pbs_nodefile(nodefile: str = "pbs_nodefile") -> typing.Generator[None, None, None]:
|
98
75
|
cwd = os.path.abspath(os.path.dirname(__file__))
|
@@ -122,22 +99,43 @@ def test_top_level():
|
|
122
99
|
|
123
100
|
@pytest.mark.local
|
124
101
|
@pytest.mark.parametrize(
|
125
|
-
"resource_spec,
|
102
|
+
"resource_spec, exception",
|
126
103
|
(
|
127
|
-
|
128
|
-
({"
|
129
|
-
({"
|
130
|
-
({
|
131
|
-
({
|
132
|
-
({"launcher_options": "--debug_foo"}, True, None),
|
133
|
-
({"num_nodes": 2, "BAD_OPT": 1}, True, InvalidResourceSpecification),
|
134
|
-
({}, True, MissingResourceSpecification),
|
104
|
+
|
105
|
+
({"num_nodes": 2, "ranks_per_node": 1}, None),
|
106
|
+
({"launcher_options": "--debug_foo"}, None),
|
107
|
+
({"num_nodes": 2, "BAD_OPT": 1}, InvalidResourceSpecification),
|
108
|
+
({}, MissingResourceSpecification),
|
135
109
|
)
|
136
110
|
)
|
137
|
-
def
|
111
|
+
def test_mpi_resource_spec(resource_spec: Dict, exception):
|
112
|
+
"""Test validation of resource_specification in MPIExecutor"""
|
113
|
+
|
114
|
+
mpi_ex = MPIExecutor(provider=LocalProvider(launcher=SimpleLauncher()))
|
115
|
+
mpi_ex.outgoing_q = mock.Mock(spec=queue.Queue)
|
116
|
+
|
138
117
|
if exception:
|
139
118
|
with pytest.raises(exception):
|
140
|
-
validate_resource_spec(resource_spec
|
119
|
+
mpi_ex.validate_resource_spec(resource_spec)
|
141
120
|
else:
|
142
|
-
result = validate_resource_spec(resource_spec
|
121
|
+
result = mpi_ex.validate_resource_spec(resource_spec)
|
143
122
|
assert result is None
|
123
|
+
|
124
|
+
|
125
|
+
@pytest.mark.local
|
126
|
+
@pytest.mark.parametrize(
|
127
|
+
"resource_spec",
|
128
|
+
(
|
129
|
+
{"num_nodes": 2, "ranks_per_node": 1},
|
130
|
+
{"launcher_options": "--debug_foo"},
|
131
|
+
{"BAD_OPT": 1},
|
132
|
+
)
|
133
|
+
)
|
134
|
+
def test_mpi_resource_spec_passed_to_htex(resource_spec: dict):
|
135
|
+
"""HTEX should reject every resource_spec"""
|
136
|
+
|
137
|
+
htex = HighThroughputExecutor()
|
138
|
+
htex.outgoing_q = mock.Mock(spec=queue.Queue)
|
139
|
+
|
140
|
+
with pytest.raises(InvalidResourceSpecification):
|
141
|
+
htex.validate_resource_spec(resource_spec)
|
parsl/version.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2024.8.
|
3
|
+
Version: 2024.8.19
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2024.08.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2024.08.19.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|
@@ -25,7 +25,6 @@ Requires-Dist: globus-sdk
|
|
25
25
|
Requires-Dist: dill
|
26
26
|
Requires-Dist: tblib
|
27
27
|
Requires-Dist: requests
|
28
|
-
Requires-Dist: paramiko
|
29
28
|
Requires-Dist: psutil>=5.5.1
|
30
29
|
Requires-Dist: setproctitle
|
31
30
|
Requires-Dist: filelock<4,>=3.13
|
@@ -57,6 +56,7 @@ Requires-Dist: jsonschema; extra == "all"
|
|
57
56
|
Requires-Dist: proxystore; extra == "all"
|
58
57
|
Requires-Dist: radical.pilot==1.60; extra == "all"
|
59
58
|
Requires-Dist: radical.utils==1.60; extra == "all"
|
59
|
+
Requires-Dist: paramiko; extra == "all"
|
60
60
|
Provides-Extra: aws
|
61
61
|
Requires-Dist: boto3; extra == "aws"
|
62
62
|
Provides-Extra: azure
|
@@ -87,6 +87,8 @@ Requires-Dist: proxystore; extra == "proxystore"
|
|
87
87
|
Provides-Extra: radical-pilot
|
88
88
|
Requires-Dist: radical.pilot==1.60; extra == "radical-pilot"
|
89
89
|
Requires-Dist: radical.utils==1.60; extra == "radical-pilot"
|
90
|
+
Provides-Extra: ssh
|
91
|
+
Requires-Dist: paramiko; extra == "ssh"
|
90
92
|
Provides-Extra: visualization
|
91
93
|
Requires-Dist: pydot; extra == "visualization"
|
92
94
|
Requires-Dist: networkx<2.6,>=2.5; extra == "visualization"
|
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=MyaEcEq-Qf860u7V98u-PZrPNdtzOZL_NW6EhIJnmfQ,1937
|
|
8
8
|
parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
|
9
9
|
parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
parsl/utils.py,sha256=91FjQiTUY383ueAjkBAgE21My9nba6SP2a2SrbB1r1Q,11250
|
11
|
-
parsl/version.py,sha256=
|
11
|
+
parsl/version.py,sha256=Cz-5cl59BVU1_SViVhDG0FvWfftidv7huse6Kygyu10,131
|
12
12
|
parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
|
14
14
|
parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
|
@@ -23,11 +23,11 @@ parsl/channels/errors.py,sha256=Dp0FhtHpygn0IjX8nGurx-WrTJm9aw-Jjz3SSUT-jCc,3283
|
|
23
23
|
parsl/channels/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
24
|
parsl/channels/local/local.py,sha256=xqH4HnipUN95NgvyB1r33SiqgQKkARgRKmg0_HnumUk,5311
|
25
25
|
parsl/channels/oauth_ssh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
26
|
-
parsl/channels/oauth_ssh/oauth_ssh.py,sha256=
|
26
|
+
parsl/channels/oauth_ssh/oauth_ssh.py,sha256=6pj3LQAX89p5Lc8NL1Llq2_noi8GS8BItCuRtDp-iCA,3823
|
27
27
|
parsl/channels/ssh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
28
|
-
parsl/channels/ssh/ssh.py,sha256=
|
28
|
+
parsl/channels/ssh/ssh.py,sha256=3PfE3qYQOCr-BZrCseGiMKYFUILFPmW_CgvV63CWI4M,10494
|
29
29
|
parsl/channels/ssh_il/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
30
|
-
parsl/channels/ssh_il/ssh_il.py,sha256=
|
30
|
+
parsl/channels/ssh_il/ssh_il.py,sha256=acOXJyqCmgC2nl7zrO_uEu3GpJZMN2l-Af5XfmNMLRs,2783
|
31
31
|
parsl/concurrent/__init__.py,sha256=TvIVceJYaJAsxedNBF3Vdo9lEQNHH_j3uxJv0zUjP7w,3288
|
32
32
|
parsl/configs/ASPIRE1.py,sha256=eKnmz0QD3V522emtXMjS6Ppeooe5lzcBgCE6cxunbYY,1718
|
33
33
|
parsl/configs/Azure.py,sha256=CJms3xWmdb-S3CksbHrPF2TfMxJC5I0faqUKCOzVg0k,1268
|
@@ -79,13 +79,13 @@ parsl/executors/flux/executor.py,sha256=8_xakLUu5zNJAHL0LbeTCFEWqWzRK1eE-3ep4GII
|
|
79
79
|
parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaPrSF9ec7zvRfLfYJw,2129
|
80
80
|
parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
81
81
|
parsl/executors/high_throughput/errors.py,sha256=Sak8e8UpiEcXefUjMHbhyXc4Rn7kJtOoh7L8wreBQdk,1638
|
82
|
-
parsl/executors/high_throughput/executor.py,sha256=
|
82
|
+
parsl/executors/high_throughput/executor.py,sha256=f6xHuDRklecrxz9luqERyuNpfBXJi0erSd0EuDwQnNQ,37770
|
83
83
|
parsl/executors/high_throughput/interchange.py,sha256=upaJht6YnqvJqVF1Ub7GEyRFDtw1v19d0JmCWNXsi6k,31094
|
84
84
|
parsl/executors/high_throughput/manager_record.py,sha256=yn3L8TUJFkgm2lX1x0SeS9mkvJowC0s2VIMCFiU7ThM,455
|
85
85
|
parsl/executors/high_throughput/manager_selector.py,sha256=uRaEtcbDO2vXf8vjEcm7bfZVdeUlSPTRc3G4oFRO29M,820
|
86
86
|
parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
|
87
|
-
parsl/executors/high_throughput/mpi_executor.py,sha256=
|
88
|
-
parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=
|
87
|
+
parsl/executors/high_throughput/mpi_executor.py,sha256=khvGz56A8zU8XAY-R4TtqqiJB8B10mkVTXD_9xtrXgo,4696
|
88
|
+
parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=XQAv9MH7pl5rCUOVw1x8qB64n8iT1-smiVLTBSB1Ro0,4878
|
89
89
|
parsl/executors/high_throughput/mpi_resource_management.py,sha256=LFBbJ3BnzTcY_v-jNu30uoIB2Enk4cleN4ygY3dncjY,8194
|
90
90
|
parsl/executors/high_throughput/probe.py,sha256=TNpGTXb4_DEeg_h-LHu4zEKi1-hffboxvKcZUl2OZGk,2751
|
91
91
|
parsl/executors/high_throughput/process_worker_pool.py,sha256=3s-Ouo3ZEhod7hon8euyL37t1DbP5pSVjXyC23DSN_0,43075
|
@@ -120,13 +120,13 @@ parsl/launchers/base.py,sha256=CblcvPTJiu-MNLWaRtFe29SZQ0BpTOlaY8CGcHdlHIE,538
|
|
120
120
|
parsl/launchers/errors.py,sha256=8YMV_CHpBNVa4eXkGE4x5DaFQlZkDCRCHmBktYcY6TA,467
|
121
121
|
parsl/launchers/launchers.py,sha256=VB--fiVv_IQne3DydTMSdGUY0o0g69puAs-Hd3mJ2vo,15464
|
122
122
|
parsl/monitoring/__init__.py,sha256=0ywNz6i0lM1xo_7_BIxhETDGeVd2C_0wwD7qgeaMR4c,83
|
123
|
-
parsl/monitoring/db_manager.py,sha256=
|
123
|
+
parsl/monitoring/db_manager.py,sha256=IXaSIw3k-ij-dS78gnFYrXEHy9fBW1v8Cg2hcV3Erm0,35413
|
124
124
|
parsl/monitoring/errors.py,sha256=D6jpYzEzp0d6FmVKGqhvjAxr4ztZfJX2s-aXemH9bBU,148
|
125
125
|
parsl/monitoring/message_type.py,sha256=Khn88afNxcOIciKiCK4GLnn90I5BlRTiOL3zK-P07yQ,401
|
126
|
-
parsl/monitoring/monitoring.py,sha256=
|
126
|
+
parsl/monitoring/monitoring.py,sha256=KtUAu9qQqHHGutfDFTL6-8z7qC7ZuWt2XCza5NBzHWg,13862
|
127
127
|
parsl/monitoring/radios.py,sha256=cHdpBOW1ITYvFnOgYjziuZOauq8p7mlSBOvcbIP78mg,6437
|
128
128
|
parsl/monitoring/remote.py,sha256=avIWMvejN0LeIXpt_RCXJxGLbsXhapUab2rS5Tmjca4,13739
|
129
|
-
parsl/monitoring/router.py,sha256=
|
129
|
+
parsl/monitoring/router.py,sha256=IfyhBqcFJE7RazyH3UjkFBZr-HdJXKzS_IIdaYkuPZs,10993
|
130
130
|
parsl/monitoring/types.py,sha256=_WGizCTgQVOkJ2dvNfsvHpYBj21Ky3bJsmyIskIx10I,631
|
131
131
|
parsl/monitoring/queries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
132
132
|
parsl/monitoring/queries/pandas.py,sha256=0Z2r0rjTKCemf0eaDkF1irvVHn5g7KC5SYETvQPRxwU,2232
|
@@ -341,6 +341,7 @@ parsl/tests/test_htex/test_manager_failure.py,sha256=N-obuSZ8f7XA_XcddoN2LWKSVtp
|
|
341
341
|
parsl/tests/test_htex/test_managers_command.py,sha256=Y-eUjtBzwW9erCYdph9bOesbkUvX8QUPqXt27DCgVS8,951
|
342
342
|
parsl/tests/test_htex/test_missing_worker.py,sha256=gyp5i7_t-JHyJGtz_eXZKKBY5w8oqLOIxO6cJgGJMtQ,745
|
343
343
|
parsl/tests/test_htex/test_multiple_disconnected_blocks.py,sha256=Axn8us43dA722O4PWdqxCJM5f_vinZqjFT1WAEvC_ZM,1995
|
344
|
+
parsl/tests/test_htex/test_resource_spec_validation.py,sha256=k1zQ--46bCyhOnt2UTaYnSh0I2UhwX747ISAfy8xPvk,952
|
344
345
|
parsl/tests/test_htex/test_worker_failure.py,sha256=Uz-RHI-LK78FMjXUvrUFmo4iYfmpDVBUcBxxRb3UG9M,603
|
345
346
|
parsl/tests/test_htex/test_zmq_binding.py,sha256=Bq1HHuMxBE_AcaP1VZ-RqE4euCHO__Du05b2UZ5H1RA,3950
|
346
347
|
parsl/tests/test_monitoring/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -354,13 +355,12 @@ parsl/tests/test_monitoring/test_memoization_representation.py,sha256=dknv2nO7pN
|
|
354
355
|
parsl/tests/test_monitoring/test_stdouterr.py,sha256=9FQSfiaMrOpoSwravZuEwmdgUgI7iG0TPRucsYC_NJA,4498
|
355
356
|
parsl/tests/test_monitoring/test_viz_colouring.py,sha256=83Qdmn3gM0j7IL6kPDcuIsp_nl4zj-liPijyIN632SY,592
|
356
357
|
parsl/tests/test_mpi_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
357
|
-
parsl/tests/test_mpi_apps/test_bad_mpi_config.py,sha256=
|
358
|
-
parsl/tests/test_mpi_apps/
|
359
|
-
parsl/tests/test_mpi_apps/test_mpi_mode_enabled.py,sha256=pV-htWmPNyY7XKN4Qo-twLmH-qreCgFlYwokgZbTS_g,5304
|
358
|
+
parsl/tests/test_mpi_apps/test_bad_mpi_config.py,sha256=QKvEUSrHIBrvqu2fRj1MAqxsYxDfcrdQ7dzWdOZejuU,1320
|
359
|
+
parsl/tests/test_mpi_apps/test_mpi_mode_enabled.py,sha256=9RaRgfweywYvcrTvteJXJwt_RSiyWSjBgii5LCnisJg,5461
|
360
360
|
parsl/tests/test_mpi_apps/test_mpi_prefix.py,sha256=yJslZvYK3JeL9UgxMwF9DDPR9QD4zJLGVjubD0F-utc,1950
|
361
361
|
parsl/tests/test_mpi_apps/test_mpi_scheduler.py,sha256=YdV8A-m67DHk9wxgNpj69wwGEKrFGL20KAC1TzLke3c,6332
|
362
|
-
parsl/tests/test_mpi_apps/test_mpiex.py,sha256=
|
363
|
-
parsl/tests/test_mpi_apps/test_resource_spec.py,sha256=
|
362
|
+
parsl/tests/test_mpi_apps/test_mpiex.py,sha256=N44sOaTOMchmZ3bI_w5h2mjOnS0sGFq8IqzIOpF0MMI,2036
|
363
|
+
parsl/tests/test_mpi_apps/test_resource_spec.py,sha256=aJo_1Nr0t-5pzw_rpDWEVp41RcICWG9sAeFUFXXJoW8,3828
|
364
364
|
parsl/tests/test_providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
365
365
|
parsl/tests/test_providers/test_cobalt_deprecation_warning.py,sha256=UN2W6xJxuLx2euPqArORKFEU2VXez9_PYqq-0rZHanQ,391
|
366
366
|
parsl/tests/test_providers/test_local_provider.py,sha256=R96E1eWgHVkvOQ1Au9wj-gfdWKAqGc-qlygFuxpGFQ8,7160
|
@@ -459,13 +459,13 @@ parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
|
|
459
459
|
parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
|
460
460
|
parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
|
461
461
|
parsl/usage_tracking/usage.py,sha256=qNEJ7nPimqd3Y7OWFLdYmNwJ6XDKlyfV_fTzasxsQw8,8690
|
462
|
-
parsl-2024.8.
|
463
|
-
parsl-2024.8.
|
464
|
-
parsl-2024.8.
|
465
|
-
parsl-2024.8.
|
466
|
-
parsl-2024.8.
|
467
|
-
parsl-2024.8.
|
468
|
-
parsl-2024.8.
|
469
|
-
parsl-2024.8.
|
470
|
-
parsl-2024.8.
|
471
|
-
parsl-2024.8.
|
462
|
+
parsl-2024.8.19.data/scripts/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
|
463
|
+
parsl-2024.8.19.data/scripts/interchange.py,sha256=Gl9h3_MN4Ux2FJZxd2ObfTSZ5T1INYQDhU_bYFezbkE,31081
|
464
|
+
parsl-2024.8.19.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
|
465
|
+
parsl-2024.8.19.data/scripts/process_worker_pool.py,sha256=78QKnV5KbY_vcteC6k60gpDE4wEk6hsciet_qzs9QoU,43061
|
466
|
+
parsl-2024.8.19.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
467
|
+
parsl-2024.8.19.dist-info/METADATA,sha256=n80gYJIibw3jBmu-BWOnzdedi6iNvsITxO4Zi2rnEmc,4121
|
468
|
+
parsl-2024.8.19.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
469
|
+
parsl-2024.8.19.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
|
470
|
+
parsl-2024.8.19.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
|
471
|
+
parsl-2024.8.19.dist-info/RECORD,,
|
@@ -1,47 +0,0 @@
|
|
1
|
-
from typing import Dict
|
2
|
-
|
3
|
-
import pytest
|
4
|
-
|
5
|
-
import parsl
|
6
|
-
from parsl import python_app
|
7
|
-
from parsl.tests.configs.htex_local import fresh_config
|
8
|
-
|
9
|
-
EXECUTOR_LABEL = "MPI_TEST"
|
10
|
-
|
11
|
-
|
12
|
-
def local_config():
|
13
|
-
config = fresh_config()
|
14
|
-
config.executors[0].label = EXECUTOR_LABEL
|
15
|
-
config.executors[0].max_workers_per_node = 1
|
16
|
-
config.executors[0].enable_mpi_mode = False
|
17
|
-
return config
|
18
|
-
|
19
|
-
|
20
|
-
@python_app
|
21
|
-
def get_env_vars(parsl_resource_specification: Dict = {}) -> Dict:
|
22
|
-
import os
|
23
|
-
|
24
|
-
parsl_vars = {}
|
25
|
-
for key in os.environ:
|
26
|
-
if key.startswith("PARSL_"):
|
27
|
-
parsl_vars[key] = os.environ[key]
|
28
|
-
return parsl_vars
|
29
|
-
|
30
|
-
|
31
|
-
@pytest.mark.local
|
32
|
-
def test_only_resource_specs_set():
|
33
|
-
"""Confirm that resource_spec env vars are set while launch prefixes are not
|
34
|
-
when enable_mpi_mode = False"""
|
35
|
-
resource_spec = {
|
36
|
-
"num_nodes": 4,
|
37
|
-
"ranks_per_node": 2,
|
38
|
-
}
|
39
|
-
|
40
|
-
future = get_env_vars(parsl_resource_specification=resource_spec)
|
41
|
-
|
42
|
-
result = future.result()
|
43
|
-
assert isinstance(result, Dict)
|
44
|
-
assert "PARSL_DEFAULT_PREFIX" not in result
|
45
|
-
assert "PARSL_SRUN_PREFIX" not in result
|
46
|
-
assert result["PARSL_NUM_NODES"] == str(resource_spec["num_nodes"])
|
47
|
-
assert result["PARSL_RANKS_PER_NODE"] == str(resource_spec["ranks_per_node"])
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|