parsl 2024.10.7__py3-none-any.whl → 2024.10.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/executors/errors.py +13 -0
- parsl/executors/high_throughput/executor.py +23 -17
- parsl/executors/high_throughput/interchange.py +1 -19
- parsl/executors/high_throughput/manager_selector.py +30 -0
- parsl/executors/high_throughput/mpi_prefix_composer.py +5 -23
- parsl/executors/high_throughput/process_worker_pool.py +1 -1
- parsl/executors/threads.py +3 -2
- parsl/executors/workqueue/executor.py +6 -5
- parsl/providers/kubernetes/kube.py +35 -28
- parsl/tests/conftest.py +1 -1
- parsl/tests/test_error_handling/test_resource_spec.py +10 -14
- parsl/tests/test_htex/test_block_manager_selector_unit.py +20 -0
- parsl/tests/test_htex/test_drain.py +6 -4
- parsl/tests/test_htex/test_manager_selector_by_block.py +53 -0
- parsl/tests/test_htex/test_resource_spec_validation.py +8 -3
- parsl/tests/test_mpi_apps/test_mpi_mode_enabled.py +2 -4
- parsl/tests/test_mpi_apps/test_resource_spec.py +2 -5
- parsl/tests/test_providers/test_kubernetes_provider.py +102 -0
- parsl/tests/test_utils/test_sanitize_dns.py +76 -0
- parsl/utils.py +78 -0
- parsl/version.py +1 -1
- {parsl-2024.10.7.data → parsl-2024.10.21.data}/scripts/interchange.py +1 -19
- {parsl-2024.10.7.data → parsl-2024.10.21.data}/scripts/process_worker_pool.py +1 -1
- {parsl-2024.10.7.dist-info → parsl-2024.10.21.dist-info}/METADATA +3 -4
- {parsl-2024.10.7.dist-info → parsl-2024.10.21.dist-info}/RECORD +31 -27
- {parsl-2024.10.7.data → parsl-2024.10.21.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2024.10.7.data → parsl-2024.10.21.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2024.10.7.dist-info → parsl-2024.10.21.dist-info}/LICENSE +0 -0
- {parsl-2024.10.7.dist-info → parsl-2024.10.21.dist-info}/WHEEL +0 -0
- {parsl-2024.10.7.dist-info → parsl-2024.10.21.dist-info}/entry_points.txt +0 -0
- {parsl-2024.10.7.dist-info → parsl-2024.10.21.dist-info}/top_level.txt +0 -0
parsl/executors/errors.py
CHANGED
@@ -1,4 +1,6 @@
|
|
1
1
|
"""Exceptions raise by Executors."""
|
2
|
+
from typing import Set
|
3
|
+
|
2
4
|
from parsl.errors import ParslError
|
3
5
|
from parsl.executors.base import ParslExecutor
|
4
6
|
|
@@ -44,6 +46,17 @@ class UnsupportedFeatureError(ExecutorError):
|
|
44
46
|
self.current_executor)
|
45
47
|
|
46
48
|
|
49
|
+
class InvalidResourceSpecification(ExecutorError):
|
50
|
+
"""Error raised when Invalid input is supplied via resource Specification"""
|
51
|
+
|
52
|
+
def __init__(self, invalid_keys: Set[str], message: str = ''):
|
53
|
+
self.invalid_keys = invalid_keys
|
54
|
+
self.message = message
|
55
|
+
|
56
|
+
def __str__(self):
|
57
|
+
return f"Invalid Resource Specification Supplied: {self.invalid_keys}. {self.message}"
|
58
|
+
|
59
|
+
|
47
60
|
class ScalingFailed(ExecutorError):
|
48
61
|
"""Scaling failed due to error in Execution provider."""
|
49
62
|
|
@@ -16,16 +16,17 @@ from parsl import curvezmq
|
|
16
16
|
from parsl.addresses import get_all_addresses
|
17
17
|
from parsl.app.errors import RemoteExceptionWrapper
|
18
18
|
from parsl.data_provider.staging import Staging
|
19
|
-
from parsl.executors.errors import
|
19
|
+
from parsl.executors.errors import (
|
20
|
+
BadMessage,
|
21
|
+
InvalidResourceSpecification,
|
22
|
+
ScalingFailed,
|
23
|
+
)
|
20
24
|
from parsl.executors.high_throughput import zmq_pipes
|
21
25
|
from parsl.executors.high_throughput.errors import CommandClientTimeoutError
|
22
26
|
from parsl.executors.high_throughput.manager_selector import (
|
23
27
|
ManagerSelector,
|
24
28
|
RandomManagerSelector,
|
25
29
|
)
|
26
|
-
from parsl.executors.high_throughput.mpi_prefix_composer import (
|
27
|
-
InvalidResourceSpecification,
|
28
|
-
)
|
29
30
|
from parsl.executors.status_handling import BlockProviderExecutor
|
30
31
|
from parsl.jobs.states import TERMINAL_STATES, JobState, JobStatus
|
31
32
|
from parsl.process_loggers import wrap_with_logs
|
@@ -145,6 +146,11 @@ GENERAL_HTEX_PARAM_DOCS = """provider : :class:`~parsl.providers.base.ExecutionP
|
|
145
146
|
|
146
147
|
encrypted : bool
|
147
148
|
Flag to enable/disable encryption (CurveZMQ). Default is False.
|
149
|
+
|
150
|
+
manager_selector: ManagerSelector
|
151
|
+
Determines what strategy the interchange uses to select managers during task distribution.
|
152
|
+
See API reference under "Manager Selectors" regarding the various manager selectors.
|
153
|
+
Default: 'RandomManagerSelector'
|
148
154
|
""" # Documentation for params used by both HTEx and MPIEx
|
149
155
|
|
150
156
|
|
@@ -340,15 +346,17 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
340
346
|
return self.logdir
|
341
347
|
|
342
348
|
def validate_resource_spec(self, resource_specification: dict):
|
343
|
-
"""HTEX
|
344
|
-
|
349
|
+
"""HTEX supports the following *Optional* resource specifications:
|
350
|
+
priority: lower value is higher priority"""
|
345
351
|
if resource_specification:
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
+
acceptable_fields = {'priority'}
|
353
|
+
keys = set(resource_specification.keys())
|
354
|
+
invalid_keys = keys - acceptable_fields
|
355
|
+
if invalid_keys:
|
356
|
+
message = "Task resource specification only accepts these types of resources: {}".format(
|
357
|
+
', '.join(acceptable_fields))
|
358
|
+
logger.error(message)
|
359
|
+
raise InvalidResourceSpecification(set(invalid_keys), message)
|
352
360
|
return
|
353
361
|
|
354
362
|
def initialize_scaling(self):
|
@@ -459,9 +467,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
459
467
|
except pickle.UnpicklingError:
|
460
468
|
raise BadMessage("Message received could not be unpickled")
|
461
469
|
|
462
|
-
if msg['type'] == '
|
463
|
-
continue
|
464
|
-
elif msg['type'] == 'result':
|
470
|
+
if msg['type'] == 'result':
|
465
471
|
try:
|
466
472
|
tid = msg['task_id']
|
467
473
|
except Exception:
|
@@ -581,7 +587,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
581
587
|
def outstanding(self) -> int:
|
582
588
|
"""Returns the count of tasks outstanding across the interchange
|
583
589
|
and managers"""
|
584
|
-
return self.
|
590
|
+
return len(self.tasks)
|
585
591
|
|
586
592
|
@property
|
587
593
|
def connected_workers(self) -> int:
|
@@ -658,7 +664,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
658
664
|
except TypeError:
|
659
665
|
raise SerializationError(func.__name__)
|
660
666
|
|
661
|
-
msg = {"task_id": task_id, "buffer": fn_buf}
|
667
|
+
msg = {"task_id": task_id, "resource_spec": resource_specification, "buffer": fn_buf}
|
662
668
|
|
663
669
|
# Post task to the outgoing queue
|
664
670
|
self.outgoing_q.put(msg)
|
@@ -6,7 +6,6 @@ import os
|
|
6
6
|
import pickle
|
7
7
|
import platform
|
8
8
|
import queue
|
9
|
-
import signal
|
10
9
|
import sys
|
11
10
|
import threading
|
12
11
|
import time
|
@@ -252,13 +251,7 @@ class Interchange:
|
|
252
251
|
try:
|
253
252
|
command_req = self.command_channel.recv_pyobj()
|
254
253
|
logger.debug("Received command request: {}".format(command_req))
|
255
|
-
if command_req == "
|
256
|
-
outstanding = self.pending_task_queue.qsize()
|
257
|
-
for manager in self._ready_managers.values():
|
258
|
-
outstanding += len(manager['tasks'])
|
259
|
-
reply = outstanding
|
260
|
-
|
261
|
-
elif command_req == "CONNECTED_BLOCKS":
|
254
|
+
if command_req == "CONNECTED_BLOCKS":
|
262
255
|
reply = self.connected_block_history
|
263
256
|
|
264
257
|
elif command_req == "WORKERS":
|
@@ -319,16 +312,6 @@ class Interchange:
|
|
319
312
|
""" Start the interchange
|
320
313
|
"""
|
321
314
|
|
322
|
-
# If a user workflow has set its own signal handler for sigterm, that
|
323
|
-
# handler will be inherited by the interchange process because it is
|
324
|
-
# launched as a multiprocessing fork process.
|
325
|
-
# That can interfere with the interchange shutdown mechanism, which is
|
326
|
-
# to receive a SIGTERM and exit immediately.
|
327
|
-
# See Parsl issue #2343 (Threads and multiprocessing cannot be
|
328
|
-
# intermingled without deadlocks) which talks about other fork-related
|
329
|
-
# parent-process-inheritance problems.
|
330
|
-
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
331
|
-
|
332
315
|
logger.info("Starting main interchange method")
|
333
316
|
|
334
317
|
if self.hub_address is not None and self.hub_zmq_port is not None:
|
@@ -549,7 +532,6 @@ class Interchange:
|
|
549
532
|
monitoring_radio.send(r['payload'])
|
550
533
|
elif r['type'] == 'heartbeat':
|
551
534
|
logger.debug("Manager %r sent heartbeat via results connection", manager_id)
|
552
|
-
b_messages.append((p_message, r))
|
553
535
|
else:
|
554
536
|
logger.error("Interchange discarding result_queue message of unknown type: %s", r["type"])
|
555
537
|
|
@@ -19,7 +19,37 @@ class ManagerSelector(metaclass=ABCMeta):
|
|
19
19
|
|
20
20
|
class RandomManagerSelector(ManagerSelector):
|
21
21
|
|
22
|
+
"""Returns a shuffled list of interesting_managers
|
23
|
+
|
24
|
+
By default this strategy is used by the interchange. Works well
|
25
|
+
in distributing workloads equally across all availble compute
|
26
|
+
resources. The random workload strategy is not effective in
|
27
|
+
conjunction with elastic scaling behavior as the even task
|
28
|
+
distribution does not allow the scaling down of blocks, leading
|
29
|
+
to wasted resource consumption.
|
30
|
+
"""
|
31
|
+
|
22
32
|
def sort_managers(self, ready_managers: Dict[bytes, ManagerRecord], manager_list: Set[bytes]) -> List[bytes]:
|
23
33
|
c_manager_list = list(manager_list)
|
24
34
|
random.shuffle(c_manager_list)
|
25
35
|
return c_manager_list
|
36
|
+
|
37
|
+
|
38
|
+
class BlockIdManagerSelector(ManagerSelector):
|
39
|
+
|
40
|
+
"""Returns an interesting_managers list sorted by block ID
|
41
|
+
|
42
|
+
Observations:
|
43
|
+
1. BlockID manager selector helps with workloads that see a varying
|
44
|
+
amount of tasks over time. New blocks are prioritized with the
|
45
|
+
blockID manager selector, when used with 'htex_auto_scaling', results
|
46
|
+
in compute cost savings.
|
47
|
+
|
48
|
+
2. Doesn't really work with bag-of-tasks workloads. When all the tasks
|
49
|
+
are put into the queue upfront, all blocks operate at near full
|
50
|
+
utilization for the majority of the workload, which task goes where
|
51
|
+
doesn't really matter.
|
52
|
+
"""
|
53
|
+
|
54
|
+
def sort_managers(self, ready_managers: Dict[bytes, ManagerRecord], manager_list: Set[bytes]) -> List[bytes]:
|
55
|
+
return sorted(manager_list, key=lambda x: (ready_managers[x]['block_id'] is not None, ready_managers[x]['block_id']))
|
@@ -1,5 +1,7 @@
|
|
1
1
|
import logging
|
2
|
-
from typing import Dict, List,
|
2
|
+
from typing import Dict, List, Tuple
|
3
|
+
|
4
|
+
from parsl.executors.errors import InvalidResourceSpecification
|
3
5
|
|
4
6
|
logger = logging.getLogger(__name__)
|
5
7
|
|
@@ -8,27 +10,6 @@ VALID_LAUNCHERS = ('srun',
|
|
8
10
|
'mpiexec')
|
9
11
|
|
10
12
|
|
11
|
-
class MissingResourceSpecification(Exception):
|
12
|
-
"""Exception raised when input is not supplied a resource specification"""
|
13
|
-
|
14
|
-
def __init__(self, reason: str):
|
15
|
-
self.reason = reason
|
16
|
-
|
17
|
-
def __str__(self):
|
18
|
-
return f"Missing resource specification: {self.reason}"
|
19
|
-
|
20
|
-
|
21
|
-
class InvalidResourceSpecification(Exception):
|
22
|
-
"""Exception raised when Invalid input is supplied via resource specification"""
|
23
|
-
|
24
|
-
def __init__(self, invalid_keys: Set[str], message: str = ''):
|
25
|
-
self.invalid_keys = invalid_keys
|
26
|
-
self.message = message
|
27
|
-
|
28
|
-
def __str__(self):
|
29
|
-
return f"Invalid resource specification options supplied: {self.invalid_keys} {self.message}"
|
30
|
-
|
31
|
-
|
32
13
|
def validate_resource_spec(resource_spec: Dict[str, str]):
|
33
14
|
"""Basic validation of keys in the resource_spec
|
34
15
|
|
@@ -40,7 +21,8 @@ def validate_resource_spec(resource_spec: Dict[str, str]):
|
|
40
21
|
# empty resource_spec when mpi_mode is set causes parsl to hang
|
41
22
|
# ref issue #3427
|
42
23
|
if len(user_keys) == 0:
|
43
|
-
raise
|
24
|
+
raise InvalidResourceSpecification(user_keys,
|
25
|
+
'MPI mode requires optional parsl_resource_specification keyword argument to be configured')
|
44
26
|
|
45
27
|
legal_keys = set(("ranks_per_node",
|
46
28
|
"num_nodes",
|
@@ -362,7 +362,7 @@ class Manager:
|
|
362
362
|
if tasks == HEARTBEAT_CODE:
|
363
363
|
logger.debug("Got heartbeat from interchange")
|
364
364
|
elif tasks == DRAINED_CODE:
|
365
|
-
logger.info("Got
|
365
|
+
logger.info("Got fully drained message from interchange - setting kill flag")
|
366
366
|
kill_event.set()
|
367
367
|
else:
|
368
368
|
task_recv_counter += len(tasks)
|
parsl/executors/threads.py
CHANGED
@@ -6,7 +6,7 @@ import typeguard
|
|
6
6
|
|
7
7
|
from parsl.data_provider.staging import Staging
|
8
8
|
from parsl.executors.base import ParslExecutor
|
9
|
-
from parsl.executors.errors import
|
9
|
+
from parsl.executors.errors import InvalidResourceSpecification
|
10
10
|
from parsl.utils import RepresentationMixin
|
11
11
|
|
12
12
|
logger = logging.getLogger(__name__)
|
@@ -54,7 +54,8 @@ class ThreadPoolExecutor(ParslExecutor, RepresentationMixin):
|
|
54
54
|
if resource_specification:
|
55
55
|
logger.error("Ignoring the resource specification. "
|
56
56
|
"Parsl resource specification is not supported in ThreadPool Executor.")
|
57
|
-
raise
|
57
|
+
raise InvalidResourceSpecification(set(resource_specification.keys()),
|
58
|
+
"Parsl resource specification is not supported in ThreadPool Executor.")
|
58
59
|
|
59
60
|
return self.executor.submit(func, *args, **kwargs)
|
60
61
|
|
@@ -28,7 +28,7 @@ import parsl.utils as putils
|
|
28
28
|
from parsl.data_provider.files import File
|
29
29
|
from parsl.data_provider.staging import Staging
|
30
30
|
from parsl.errors import OptionalModuleMissing
|
31
|
-
from parsl.executors.errors import ExecutorError
|
31
|
+
from parsl.executors.errors import ExecutorError, InvalidResourceSpecification
|
32
32
|
from parsl.executors.status_handling import BlockProviderExecutor
|
33
33
|
from parsl.executors.workqueue import exec_parsl_function
|
34
34
|
from parsl.process_loggers import wrap_with_logs
|
@@ -419,7 +419,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
419
419
|
message = "Task resource specification only accepts these types of resources: {}".format(
|
420
420
|
', '.join(acceptable_fields))
|
421
421
|
logger.error(message)
|
422
|
-
raise
|
422
|
+
raise InvalidResourceSpecification(keys, message)
|
423
423
|
|
424
424
|
# this checks that either all of the required resource types are specified, or
|
425
425
|
# that none of them are: the `required_resource_types` are not actually required,
|
@@ -430,9 +430,10 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
430
430
|
logger.error("Running with `autolabel=False`. In this mode, "
|
431
431
|
"task resource specification requires "
|
432
432
|
"three resources to be specified simultaneously: cores, memory, and disk")
|
433
|
-
raise
|
434
|
-
|
435
|
-
|
433
|
+
raise InvalidResourceSpecification(keys,
|
434
|
+
"Task resource specification requires "
|
435
|
+
"three resources to be specified simultaneously: cores, memory, and disk. "
|
436
|
+
"Try setting autolabel=True if you are unsure of the resource usage")
|
436
437
|
|
437
438
|
for k in keys:
|
438
439
|
if k == 'cores':
|
@@ -1,10 +1,5 @@
|
|
1
1
|
import logging
|
2
|
-
import
|
3
|
-
|
4
|
-
from parsl.providers.kubernetes.template import template_string
|
5
|
-
|
6
|
-
logger = logging.getLogger(__name__)
|
7
|
-
|
2
|
+
import uuid
|
8
3
|
from typing import Any, Dict, List, Optional, Tuple
|
9
4
|
|
10
5
|
import typeguard
|
@@ -12,7 +7,8 @@ import typeguard
|
|
12
7
|
from parsl.errors import OptionalModuleMissing
|
13
8
|
from parsl.jobs.states import JobState, JobStatus
|
14
9
|
from parsl.providers.base import ExecutionProvider
|
15
|
-
from parsl.
|
10
|
+
from parsl.providers.kubernetes.template import template_string
|
11
|
+
from parsl.utils import RepresentationMixin, sanitize_dns_subdomain_rfc1123
|
16
12
|
|
17
13
|
try:
|
18
14
|
from kubernetes import client, config
|
@@ -20,6 +16,8 @@ try:
|
|
20
16
|
except (ImportError, NameError, FileNotFoundError):
|
21
17
|
_kubernetes_enabled = False
|
22
18
|
|
19
|
+
logger = logging.getLogger(__name__)
|
20
|
+
|
23
21
|
translate_table = {
|
24
22
|
'Running': JobState.RUNNING,
|
25
23
|
'Pending': JobState.PENDING,
|
@@ -161,7 +159,7 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
161
159
|
self.resources: Dict[object, Dict[str, Any]]
|
162
160
|
self.resources = {}
|
163
161
|
|
164
|
-
def submit(self, cmd_string, tasks_per_node, job_name="parsl"):
|
162
|
+
def submit(self, cmd_string: str, tasks_per_node: int, job_name: str = "parsl.kube"):
|
165
163
|
""" Submit a job
|
166
164
|
Args:
|
167
165
|
- cmd_string :(String) - Name of the container to initiate
|
@@ -173,15 +171,19 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
173
171
|
Returns:
|
174
172
|
- job_id: (string) Identifier for the job
|
175
173
|
"""
|
174
|
+
job_id = uuid.uuid4().hex[:8]
|
176
175
|
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
176
|
+
pod_name = self.pod_name or job_name
|
177
|
+
try:
|
178
|
+
pod_name = sanitize_dns_subdomain_rfc1123(pod_name)
|
179
|
+
except ValueError:
|
180
|
+
logger.warning(
|
181
|
+
f"Invalid pod name '{pod_name}' for job '{job_id}', falling back to 'parsl.kube'"
|
182
|
+
)
|
183
|
+
pod_name = "parsl.kube"
|
184
|
+
pod_name = pod_name[:253 - 1 - len(job_id)] # Leave room for the job ID
|
185
|
+
pod_name = pod_name.rstrip(".-") # Remove trailing dot or hyphen after trim
|
186
|
+
pod_name = f"{pod_name}.{job_id}"
|
185
187
|
|
186
188
|
formatted_cmd = template_string.format(command=cmd_string,
|
187
189
|
worker_init=self.worker_init)
|
@@ -189,14 +191,14 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
189
191
|
logger.debug("Pod name: %s", pod_name)
|
190
192
|
self._create_pod(image=self.image,
|
191
193
|
pod_name=pod_name,
|
192
|
-
|
194
|
+
job_id=job_id,
|
193
195
|
cmd_string=formatted_cmd,
|
194
196
|
volumes=self.persistent_volumes,
|
195
197
|
service_account_name=self.service_account_name,
|
196
198
|
annotations=self.annotations)
|
197
|
-
self.resources[
|
199
|
+
self.resources[job_id] = {'status': JobStatus(JobState.RUNNING), 'pod_name': pod_name}
|
198
200
|
|
199
|
-
return
|
201
|
+
return job_id
|
200
202
|
|
201
203
|
def status(self, job_ids):
|
202
204
|
""" Get the status of a list of jobs identified by the job identifiers
|
@@ -212,6 +214,9 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
212
214
|
self._status()
|
213
215
|
return [self.resources[jid]['status'] for jid in job_ids]
|
214
216
|
|
217
|
+
def _get_pod_name(self, job_id: str) -> str:
|
218
|
+
return self.resources[job_id]['pod_name']
|
219
|
+
|
215
220
|
def cancel(self, job_ids):
|
216
221
|
""" Cancels the jobs specified by a list of job ids
|
217
222
|
Args:
|
@@ -221,7 +226,8 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
221
226
|
"""
|
222
227
|
for job in job_ids:
|
223
228
|
logger.debug("Terminating job/pod: {0}".format(job))
|
224
|
-
self.
|
229
|
+
pod_name = self._get_pod_name(job)
|
230
|
+
self._delete_pod(pod_name)
|
225
231
|
|
226
232
|
self.resources[job]['status'] = JobStatus(JobState.CANCELLED)
|
227
233
|
rets = [True for i in job_ids]
|
@@ -242,7 +248,8 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
242
248
|
for jid in to_poll_job_ids:
|
243
249
|
phase = None
|
244
250
|
try:
|
245
|
-
|
251
|
+
pod_name = self._get_pod_name(jid)
|
252
|
+
pod = self.kube_client.read_namespaced_pod(name=pod_name, namespace=self.namespace)
|
246
253
|
except Exception:
|
247
254
|
logger.exception("Failed to poll pod {} status, most likely because pod was terminated".format(jid))
|
248
255
|
if self.resources[jid]['status'] is JobStatus(JobState.RUNNING):
|
@@ -257,10 +264,10 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
257
264
|
self.resources[jid]['status'] = JobStatus(status)
|
258
265
|
|
259
266
|
def _create_pod(self,
|
260
|
-
image,
|
261
|
-
pod_name,
|
262
|
-
|
263
|
-
port=80,
|
267
|
+
image: str,
|
268
|
+
pod_name: str,
|
269
|
+
job_id: str,
|
270
|
+
port: int = 80,
|
264
271
|
cmd_string=None,
|
265
272
|
volumes=[],
|
266
273
|
service_account_name=None,
|
@@ -269,7 +276,7 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
269
276
|
Args:
|
270
277
|
- image (string) : Docker image to launch
|
271
278
|
- pod_name (string) : Name of the pod
|
272
|
-
-
|
279
|
+
- job_id (string) : Job ID
|
273
280
|
KWargs:
|
274
281
|
- port (integer) : Container port
|
275
282
|
Returns:
|
@@ -299,7 +306,7 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
299
306
|
)
|
300
307
|
# Configure Pod template container
|
301
308
|
container = client.V1Container(
|
302
|
-
name=
|
309
|
+
name=job_id,
|
303
310
|
image=image,
|
304
311
|
resources=resources,
|
305
312
|
ports=[client.V1ContainerPort(container_port=port)],
|
@@ -322,7 +329,7 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
322
329
|
claim_name=volume[0])))
|
323
330
|
|
324
331
|
metadata = client.V1ObjectMeta(name=pod_name,
|
325
|
-
labels={"
|
332
|
+
labels={"parsl-job-id": job_id},
|
326
333
|
annotations=annotations)
|
327
334
|
spec = client.V1PodSpec(containers=[container],
|
328
335
|
image_pull_secrets=[secret],
|
parsl/tests/conftest.py
CHANGED
@@ -58,7 +58,7 @@ def tmpd_cwd_session(pytestconfig):
|
|
58
58
|
|
59
59
|
config = re.sub(r"[^A-z0-9_-]+", "_", pytestconfig.getoption('config')[0])
|
60
60
|
cwd = pathlib.Path(os.getcwd())
|
61
|
-
pytest_dir = cwd / "
|
61
|
+
pytest_dir = cwd / "pytest-parsl"
|
62
62
|
pytest_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
|
63
63
|
|
64
64
|
test_dir_prefix = "parsltest-"
|
@@ -1,11 +1,9 @@
|
|
1
1
|
import parsl
|
2
2
|
from parsl.app.app import python_app
|
3
3
|
from parsl.executors import WorkQueueExecutor
|
4
|
-
from parsl.executors.errors import
|
4
|
+
from parsl.executors.errors import InvalidResourceSpecification
|
5
5
|
from parsl.executors.high_throughput.executor import HighThroughputExecutor
|
6
|
-
from parsl.executors.
|
7
|
-
InvalidResourceSpecification,
|
8
|
-
)
|
6
|
+
from parsl.executors.threads import ThreadPoolExecutor
|
9
7
|
|
10
8
|
|
11
9
|
@python_app
|
@@ -27,11 +25,10 @@ def test_resource(n=2):
|
|
27
25
|
try:
|
28
26
|
fut.result()
|
29
27
|
except InvalidResourceSpecification:
|
30
|
-
assert
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
assert isinstance(e, ExecutorError)
|
28
|
+
assert (
|
29
|
+
isinstance(executor, HighThroughputExecutor) or
|
30
|
+
isinstance(executor, WorkQueueExecutor) or
|
31
|
+
isinstance(executor, ThreadPoolExecutor))
|
35
32
|
|
36
33
|
# Specify resources with wrong types
|
37
34
|
# 'cpus' is incorrect, should be 'cores'
|
@@ -40,8 +37,7 @@ def test_resource(n=2):
|
|
40
37
|
try:
|
41
38
|
fut.result()
|
42
39
|
except InvalidResourceSpecification:
|
43
|
-
assert
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
assert isinstance(e, ExecutorError)
|
40
|
+
assert (
|
41
|
+
isinstance(executor, HighThroughputExecutor) or
|
42
|
+
isinstance(executor, WorkQueueExecutor) or
|
43
|
+
isinstance(executor, ThreadPoolExecutor))
|
@@ -0,0 +1,20 @@
|
|
1
|
+
import pytest
|
2
|
+
|
3
|
+
from parsl.executors.high_throughput.manager_record import ManagerRecord
|
4
|
+
from parsl.executors.high_throughput.manager_selector import BlockIdManagerSelector
|
5
|
+
|
6
|
+
|
7
|
+
@pytest.mark.local
|
8
|
+
def test_sort_managers():
|
9
|
+
ready_managers = {
|
10
|
+
b'manager1': {'block_id': 1},
|
11
|
+
b'manager2': {'block_id': None},
|
12
|
+
b'manager3': {'block_id': 3},
|
13
|
+
b'manager4': {'block_id': 2}
|
14
|
+
}
|
15
|
+
|
16
|
+
manager_list = {b'manager1', b'manager2', b'manager3', b'manager4'}
|
17
|
+
expected_sorted_list = [b'manager2', b'manager1', b'manager4', b'manager3']
|
18
|
+
manager_selector = BlockIdManagerSelector()
|
19
|
+
sorted_managers = manager_selector.sort_managers(ready_managers, manager_list)
|
20
|
+
assert sorted_managers == expected_sorted_list
|
@@ -13,7 +13,9 @@ from parsl.providers import LocalProvider
|
|
13
13
|
# based around the expected drain period: the drain period
|
14
14
|
# is TIME_CONST seconds, and the single executed task will
|
15
15
|
# last twice that many number of seconds.
|
16
|
-
TIME_CONST =
|
16
|
+
TIME_CONST = 4
|
17
|
+
|
18
|
+
CONNECTED_MANAGERS_POLL_MS = 100
|
17
19
|
|
18
20
|
|
19
21
|
def local_config():
|
@@ -52,7 +54,7 @@ def test_drain(try_assert):
|
|
52
54
|
|
53
55
|
# wait till we have a block running...
|
54
56
|
|
55
|
-
try_assert(lambda: len(htex.connected_managers()) == 1)
|
57
|
+
try_assert(lambda: len(htex.connected_managers()) == 1, check_period_ms=CONNECTED_MANAGERS_POLL_MS)
|
56
58
|
|
57
59
|
managers = htex.connected_managers()
|
58
60
|
assert managers[0]['active'], "The manager should be active"
|
@@ -63,7 +65,7 @@ def test_drain(try_assert):
|
|
63
65
|
time.sleep(TIME_CONST)
|
64
66
|
|
65
67
|
# this assert should happen *very fast* after the above delay...
|
66
|
-
try_assert(lambda: htex.connected_managers()[0]['draining'], timeout_ms=500)
|
68
|
+
try_assert(lambda: htex.connected_managers()[0]['draining'], timeout_ms=500, check_period_ms=CONNECTED_MANAGERS_POLL_MS)
|
67
69
|
|
68
70
|
# and the test task should still be running...
|
69
71
|
assert not fut.done(), "The test task should still be running"
|
@@ -76,4 +78,4 @@ def test_drain(try_assert):
|
|
76
78
|
# connected managers.
|
77
79
|
# As with the above draining assert, this should happen very fast after
|
78
80
|
# the task ends.
|
79
|
-
try_assert(lambda: len(htex.connected_managers()) == 0, timeout_ms=500)
|
81
|
+
try_assert(lambda: len(htex.connected_managers()) == 0, timeout_ms=500, check_period_ms=CONNECTED_MANAGERS_POLL_MS)
|
@@ -0,0 +1,53 @@
|
|
1
|
+
import time
|
2
|
+
|
3
|
+
import pytest
|
4
|
+
|
5
|
+
import parsl
|
6
|
+
from parsl.app.app import bash_app, python_app
|
7
|
+
from parsl.channels import LocalChannel
|
8
|
+
from parsl.config import Config
|
9
|
+
from parsl.executors import HighThroughputExecutor
|
10
|
+
from parsl.executors.high_throughput.manager_selector import (
|
11
|
+
BlockIdManagerSelector,
|
12
|
+
ManagerSelector,
|
13
|
+
)
|
14
|
+
from parsl.launchers import WrappedLauncher
|
15
|
+
from parsl.providers import LocalProvider
|
16
|
+
from parsl.usage_tracking.levels import LEVEL_1
|
17
|
+
|
18
|
+
BLOCK_COUNT = 2
|
19
|
+
|
20
|
+
|
21
|
+
@parsl.python_app
|
22
|
+
def get_worker_pid():
|
23
|
+
import os
|
24
|
+
return os.environ.get('PARSL_WORKER_BLOCK_ID')
|
25
|
+
|
26
|
+
|
27
|
+
@pytest.mark.local
|
28
|
+
def test_block_id_selection(try_assert):
|
29
|
+
htex = HighThroughputExecutor(
|
30
|
+
label="htex_local",
|
31
|
+
max_workers_per_node=1,
|
32
|
+
manager_selector=BlockIdManagerSelector(),
|
33
|
+
provider=LocalProvider(
|
34
|
+
channel=LocalChannel(),
|
35
|
+
init_blocks=BLOCK_COUNT,
|
36
|
+
max_blocks=BLOCK_COUNT,
|
37
|
+
min_blocks=BLOCK_COUNT,
|
38
|
+
),
|
39
|
+
)
|
40
|
+
|
41
|
+
config = Config(
|
42
|
+
executors=[htex],
|
43
|
+
usage_tracking=LEVEL_1,
|
44
|
+
)
|
45
|
+
|
46
|
+
with parsl.load(config):
|
47
|
+
blockids = []
|
48
|
+
try_assert(lambda: len(htex.connected_managers()) == BLOCK_COUNT, timeout_ms=20000)
|
49
|
+
for i in range(10):
|
50
|
+
future = get_worker_pid()
|
51
|
+
blockids.append(future.result())
|
52
|
+
|
53
|
+
assert all(blockid == "1" for blockid in blockids)
|
@@ -4,9 +4,7 @@ from unittest import mock
|
|
4
4
|
import pytest
|
5
5
|
|
6
6
|
from parsl.executors import HighThroughputExecutor
|
7
|
-
from parsl.executors.
|
8
|
-
InvalidResourceSpecification,
|
9
|
-
)
|
7
|
+
from parsl.executors.errors import InvalidResourceSpecification
|
10
8
|
|
11
9
|
|
12
10
|
def double(x):
|
@@ -32,6 +30,13 @@ def test_resource_spec_validation():
|
|
32
30
|
assert ret_val is None
|
33
31
|
|
34
32
|
|
33
|
+
@pytest.mark.local
|
34
|
+
def test_resource_spec_validation_one_key():
|
35
|
+
htex = HighThroughputExecutor()
|
36
|
+
ret_val = htex.validate_resource_spec({"priority": 2})
|
37
|
+
assert ret_val is None
|
38
|
+
|
39
|
+
|
35
40
|
@pytest.mark.local
|
36
41
|
def test_resource_spec_validation_bad_keys():
|
37
42
|
htex = HighThroughputExecutor()
|
@@ -8,9 +8,7 @@ import pytest
|
|
8
8
|
import parsl
|
9
9
|
from parsl import Config, bash_app, python_app
|
10
10
|
from parsl.executors import MPIExecutor
|
11
|
-
from parsl.executors.
|
12
|
-
MissingResourceSpecification,
|
13
|
-
)
|
11
|
+
from parsl.executors.errors import InvalidResourceSpecification
|
14
12
|
from parsl.launchers import SimpleLauncher
|
15
13
|
from parsl.providers import LocalProvider
|
16
14
|
|
@@ -185,6 +183,6 @@ def test_simulated_load(rounds: int = 100):
|
|
185
183
|
@pytest.mark.local
|
186
184
|
def test_missing_resource_spec():
|
187
185
|
|
188
|
-
with pytest.raises(
|
186
|
+
with pytest.raises(InvalidResourceSpecification):
|
189
187
|
future = mock_app(sleep_dur=0.4)
|
190
188
|
future.result(timeout=10)
|
@@ -10,12 +10,9 @@ from unittest import mock
|
|
10
10
|
import pytest
|
11
11
|
|
12
12
|
from parsl.app.app import python_app
|
13
|
+
from parsl.executors.errors import InvalidResourceSpecification
|
13
14
|
from parsl.executors.high_throughput.executor import HighThroughputExecutor
|
14
15
|
from parsl.executors.high_throughput.mpi_executor import MPIExecutor
|
15
|
-
from parsl.executors.high_throughput.mpi_prefix_composer import (
|
16
|
-
InvalidResourceSpecification,
|
17
|
-
MissingResourceSpecification,
|
18
|
-
)
|
19
16
|
from parsl.executors.high_throughput.mpi_resource_management import (
|
20
17
|
get_nodes_in_batchjob,
|
21
18
|
get_pbs_hosts_list,
|
@@ -105,7 +102,7 @@ def test_top_level():
|
|
105
102
|
({"num_nodes": 2, "ranks_per_node": 1}, None),
|
106
103
|
({"launcher_options": "--debug_foo"}, None),
|
107
104
|
({"num_nodes": 2, "BAD_OPT": 1}, InvalidResourceSpecification),
|
108
|
-
({},
|
105
|
+
({}, InvalidResourceSpecification),
|
109
106
|
)
|
110
107
|
)
|
111
108
|
def test_mpi_resource_spec(resource_spec: Dict, exception):
|
@@ -0,0 +1,102 @@
|
|
1
|
+
import re
|
2
|
+
from unittest import mock
|
3
|
+
|
4
|
+
import pytest
|
5
|
+
|
6
|
+
from parsl.providers.kubernetes.kube import KubernetesProvider
|
7
|
+
from parsl.tests.test_utils.test_sanitize_dns import DNS_SUBDOMAIN_REGEX
|
8
|
+
|
9
|
+
_MOCK_BASE = "parsl.providers.kubernetes.kube"
|
10
|
+
|
11
|
+
|
12
|
+
@pytest.fixture(autouse=True)
|
13
|
+
def mock_kube_config():
|
14
|
+
with mock.patch(f"{_MOCK_BASE}.config") as mock_config:
|
15
|
+
mock_config.load_kube_config.return_value = None
|
16
|
+
yield mock_config
|
17
|
+
|
18
|
+
|
19
|
+
@pytest.fixture
|
20
|
+
def mock_kube_client():
|
21
|
+
mock_client = mock.MagicMock()
|
22
|
+
with mock.patch(f"{_MOCK_BASE}.client.CoreV1Api") as mock_api:
|
23
|
+
mock_api.return_value = mock_client
|
24
|
+
yield mock_client
|
25
|
+
|
26
|
+
|
27
|
+
@pytest.mark.local
|
28
|
+
def test_submit_happy_path(mock_kube_client: mock.MagicMock):
|
29
|
+
image = "test-image"
|
30
|
+
namespace = "test-namespace"
|
31
|
+
cmd_string = "test-command"
|
32
|
+
volumes = [("test-volume", "test-mount-path")]
|
33
|
+
service_account_name = "test-service-account"
|
34
|
+
annotations = {"test-annotation": "test-value"}
|
35
|
+
max_cpu = 2
|
36
|
+
max_mem = "2Gi"
|
37
|
+
init_cpu = 1
|
38
|
+
init_mem = "1Gi"
|
39
|
+
provider = KubernetesProvider(
|
40
|
+
image=image,
|
41
|
+
persistent_volumes=volumes,
|
42
|
+
namespace=namespace,
|
43
|
+
service_account_name=service_account_name,
|
44
|
+
annotations=annotations,
|
45
|
+
max_cpu=max_cpu,
|
46
|
+
max_mem=max_mem,
|
47
|
+
init_cpu=init_cpu,
|
48
|
+
init_mem=init_mem,
|
49
|
+
)
|
50
|
+
|
51
|
+
job_name = "test.job.name"
|
52
|
+
job_id = provider.submit(cmd_string=cmd_string, tasks_per_node=1, job_name=job_name)
|
53
|
+
|
54
|
+
assert job_id in provider.resources
|
55
|
+
assert mock_kube_client.create_namespaced_pod.call_count == 1
|
56
|
+
|
57
|
+
call_args = mock_kube_client.create_namespaced_pod.call_args[1]
|
58
|
+
pod = call_args["body"]
|
59
|
+
container = pod.spec.containers[0]
|
60
|
+
volume = container.volume_mounts[0]
|
61
|
+
|
62
|
+
assert image == container.image
|
63
|
+
assert namespace == call_args["namespace"]
|
64
|
+
assert any(cmd_string in arg for arg in container.args)
|
65
|
+
assert volumes[0] == (volume.name, volume.mount_path)
|
66
|
+
assert service_account_name == pod.spec.service_account_name
|
67
|
+
assert annotations == pod.metadata.annotations
|
68
|
+
assert str(max_cpu) == container.resources.limits["cpu"]
|
69
|
+
assert max_mem == container.resources.limits["memory"]
|
70
|
+
assert str(init_cpu) == container.resources.requests["cpu"]
|
71
|
+
assert init_mem == container.resources.requests["memory"]
|
72
|
+
assert job_id == pod.metadata.labels["parsl-job-id"]
|
73
|
+
assert job_id == container.name
|
74
|
+
assert f"{job_name}.{job_id}" == pod.metadata.name
|
75
|
+
|
76
|
+
|
77
|
+
@pytest.mark.local
|
78
|
+
@mock.patch(f"{_MOCK_BASE}.KubernetesProvider._create_pod")
|
79
|
+
@pytest.mark.parametrize("char", (".", "-"))
|
80
|
+
def test_submit_pod_name_includes_job_id(mock_create_pod: mock.MagicMock, char: str):
|
81
|
+
provider = KubernetesProvider(image="test-image")
|
82
|
+
|
83
|
+
job_name = "a." * 121 + f"a{char}" + "a" * 9
|
84
|
+
assert len(job_name) == 253 # Max length for pod name
|
85
|
+
job_id = provider.submit(cmd_string="test-command", tasks_per_node=1, job_name=job_name)
|
86
|
+
|
87
|
+
expected_pod_name = job_name[:253 - len(job_id) - 2] + f".{job_id}"
|
88
|
+
actual_pod_name = mock_create_pod.call_args[1]["pod_name"]
|
89
|
+
assert re.match(DNS_SUBDOMAIN_REGEX, actual_pod_name)
|
90
|
+
assert expected_pod_name == actual_pod_name
|
91
|
+
|
92
|
+
|
93
|
+
@pytest.mark.local
|
94
|
+
@mock.patch(f"{_MOCK_BASE}.KubernetesProvider._create_pod")
|
95
|
+
@mock.patch(f"{_MOCK_BASE}.logger")
|
96
|
+
@pytest.mark.parametrize("job_name", ("", ".", "-", "a.-.a", "$$$"))
|
97
|
+
def test_submit_invalid_job_name(mock_logger: mock.MagicMock, mock_create_pod: mock.MagicMock, job_name: str):
|
98
|
+
provider = KubernetesProvider(image="test-image")
|
99
|
+
job_id = provider.submit(cmd_string="test-command", tasks_per_node=1, job_name=job_name)
|
100
|
+
assert mock_logger.warning.call_count == 1
|
101
|
+
assert f"Invalid pod name '{job_name}' for job '{job_id}'" in mock_logger.warning.call_args[0][0]
|
102
|
+
assert f"parsl.kube.{job_id}" == mock_create_pod.call_args[1]["pod_name"]
|
@@ -0,0 +1,76 @@
|
|
1
|
+
import random
|
2
|
+
import re
|
3
|
+
|
4
|
+
import pytest
|
5
|
+
|
6
|
+
from parsl.utils import sanitize_dns_label_rfc1123, sanitize_dns_subdomain_rfc1123
|
7
|
+
|
8
|
+
# Ref: https://datatracker.ietf.org/doc/html/rfc1123
|
9
|
+
DNS_LABEL_REGEX = r'^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?$'
|
10
|
+
DNS_SUBDOMAIN_REGEX = r'^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(\.[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)*$'
|
11
|
+
|
12
|
+
test_labels = [
|
13
|
+
"example-label-123", # Valid label
|
14
|
+
"EXAMPLE", # Case sensitivity
|
15
|
+
"!@#example*", # Remove invalid characters
|
16
|
+
"--leading-and-trailing--", # Leading and trailing hyphens
|
17
|
+
"..leading.and.trailing..", # Leading and tailing dots
|
18
|
+
"multiple..dots", # Consecutive dots
|
19
|
+
"valid--label", # Consecutive hyphens
|
20
|
+
"a" * random.randint(64, 70), # Longer than 63 characters
|
21
|
+
f"{'a' * 62}-a", # Trailing hyphen at max length
|
22
|
+
]
|
23
|
+
|
24
|
+
|
25
|
+
def _generate_test_subdomains(num_subdomains: int):
|
26
|
+
subdomains = []
|
27
|
+
for _ in range(num_subdomains):
|
28
|
+
num_labels = random.randint(1, 5)
|
29
|
+
labels = [test_labels[random.randint(0, num_labels - 1)] for _ in range(num_labels)]
|
30
|
+
subdomain = ".".join(labels)
|
31
|
+
subdomains.append(subdomain)
|
32
|
+
return subdomains
|
33
|
+
|
34
|
+
|
35
|
+
@pytest.mark.local
|
36
|
+
@pytest.mark.parametrize("raw_string", test_labels)
|
37
|
+
def test_sanitize_dns_label_rfc1123(raw_string: str):
|
38
|
+
print(sanitize_dns_label_rfc1123(raw_string))
|
39
|
+
assert re.match(DNS_LABEL_REGEX, sanitize_dns_label_rfc1123(raw_string))
|
40
|
+
|
41
|
+
|
42
|
+
@pytest.mark.local
|
43
|
+
@pytest.mark.parametrize("raw_string", ("", "-", "@", "$$$"))
|
44
|
+
def test_sanitize_dns_label_rfc1123_empty(raw_string: str):
|
45
|
+
with pytest.raises(ValueError) as e_info:
|
46
|
+
sanitize_dns_label_rfc1123(raw_string)
|
47
|
+
assert str(e_info.value) == f"Sanitized DNS label is empty for input '{raw_string}'"
|
48
|
+
|
49
|
+
|
50
|
+
@pytest.mark.local
|
51
|
+
@pytest.mark.parametrize("raw_string", _generate_test_subdomains(10))
|
52
|
+
def test_sanitize_dns_subdomain_rfc1123(raw_string: str):
|
53
|
+
assert re.match(DNS_SUBDOMAIN_REGEX, sanitize_dns_subdomain_rfc1123(raw_string))
|
54
|
+
|
55
|
+
|
56
|
+
@pytest.mark.local
|
57
|
+
@pytest.mark.parametrize("char", ("-", "."))
|
58
|
+
def test_sanitize_dns_subdomain_rfc1123_trailing_non_alphanumeric_at_max_length(char: str):
|
59
|
+
raw_string = (f"{'a' * 61}." * 4) + f".aaaa{char}a"
|
60
|
+
assert re.match(DNS_SUBDOMAIN_REGEX, sanitize_dns_subdomain_rfc1123(raw_string))
|
61
|
+
|
62
|
+
|
63
|
+
@pytest.mark.local
|
64
|
+
@pytest.mark.parametrize("raw_string", ("", ".", "..."))
|
65
|
+
def test_sanitize_dns_subdomain_rfc1123_empty(raw_string: str):
|
66
|
+
with pytest.raises(ValueError) as e_info:
|
67
|
+
sanitize_dns_subdomain_rfc1123(raw_string)
|
68
|
+
assert str(e_info.value) == f"Sanitized DNS subdomain is empty for input '{raw_string}'"
|
69
|
+
|
70
|
+
|
71
|
+
@pytest.mark.local
|
72
|
+
@pytest.mark.parametrize(
|
73
|
+
"raw_string", ("a" * 253, "a" * random.randint(254, 300)), ids=("254 chars", ">253 chars")
|
74
|
+
)
|
75
|
+
def test_sanitize_dns_subdomain_rfc1123_max_length(raw_string: str):
|
76
|
+
assert len(sanitize_dns_subdomain_rfc1123(raw_string)) <= 253
|
parsl/utils.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
import inspect
|
2
2
|
import logging
|
3
3
|
import os
|
4
|
+
import re
|
4
5
|
import shlex
|
5
6
|
import subprocess
|
6
7
|
import threading
|
@@ -380,3 +381,80 @@ class AutoCancelTimer(threading.Timer):
|
|
380
381
|
exc_tb: Optional[TracebackType]
|
381
382
|
) -> None:
|
382
383
|
self.cancel()
|
384
|
+
|
385
|
+
|
386
|
+
def sanitize_dns_label_rfc1123(raw_string: str) -> str:
|
387
|
+
"""Convert input string to a valid RFC 1123 DNS label.
|
388
|
+
|
389
|
+
Parameters
|
390
|
+
----------
|
391
|
+
raw_string : str
|
392
|
+
String to sanitize.
|
393
|
+
|
394
|
+
Returns
|
395
|
+
-------
|
396
|
+
str
|
397
|
+
Sanitized string.
|
398
|
+
|
399
|
+
Raises
|
400
|
+
------
|
401
|
+
ValueError
|
402
|
+
If the string is empty after sanitization.
|
403
|
+
"""
|
404
|
+
# Convert to lowercase and replace non-alphanumeric characters with hyphen
|
405
|
+
sanitized = re.sub(r'[^a-z0-9]', '-', raw_string.lower())
|
406
|
+
|
407
|
+
# Remove consecutive hyphens
|
408
|
+
sanitized = re.sub(r'-+', '-', sanitized)
|
409
|
+
|
410
|
+
# DNS label cannot exceed 63 characters
|
411
|
+
sanitized = sanitized[:63]
|
412
|
+
|
413
|
+
# Strip after trimming to avoid trailing hyphens
|
414
|
+
sanitized = sanitized.strip("-")
|
415
|
+
|
416
|
+
if not sanitized:
|
417
|
+
raise ValueError(f"Sanitized DNS label is empty for input '{raw_string}'")
|
418
|
+
|
419
|
+
return sanitized
|
420
|
+
|
421
|
+
|
422
|
+
def sanitize_dns_subdomain_rfc1123(raw_string: str) -> str:
|
423
|
+
"""Convert input string to a valid RFC 1123 DNS subdomain.
|
424
|
+
|
425
|
+
Parameters
|
426
|
+
----------
|
427
|
+
raw_string : str
|
428
|
+
String to sanitize.
|
429
|
+
|
430
|
+
Returns
|
431
|
+
-------
|
432
|
+
str
|
433
|
+
Sanitized string.
|
434
|
+
|
435
|
+
Raises
|
436
|
+
------
|
437
|
+
ValueError
|
438
|
+
If the string is empty after sanitization.
|
439
|
+
"""
|
440
|
+
segments = raw_string.split('.')
|
441
|
+
|
442
|
+
sanitized_segments = []
|
443
|
+
for segment in segments:
|
444
|
+
if not segment:
|
445
|
+
continue
|
446
|
+
sanitized_segment = sanitize_dns_label_rfc1123(segment)
|
447
|
+
sanitized_segments.append(sanitized_segment)
|
448
|
+
|
449
|
+
sanitized = '.'.join(sanitized_segments)
|
450
|
+
|
451
|
+
# DNS subdomain cannot exceed 253 characters
|
452
|
+
sanitized = sanitized[:253]
|
453
|
+
|
454
|
+
# Strip after trimming to avoid trailing dots or hyphens
|
455
|
+
sanitized = sanitized.strip(".-")
|
456
|
+
|
457
|
+
if not sanitized:
|
458
|
+
raise ValueError(f"Sanitized DNS subdomain is empty for input '{raw_string}'")
|
459
|
+
|
460
|
+
return sanitized
|
parsl/version.py
CHANGED
@@ -6,7 +6,6 @@ import os
|
|
6
6
|
import pickle
|
7
7
|
import platform
|
8
8
|
import queue
|
9
|
-
import signal
|
10
9
|
import sys
|
11
10
|
import threading
|
12
11
|
import time
|
@@ -252,13 +251,7 @@ class Interchange:
|
|
252
251
|
try:
|
253
252
|
command_req = self.command_channel.recv_pyobj()
|
254
253
|
logger.debug("Received command request: {}".format(command_req))
|
255
|
-
if command_req == "
|
256
|
-
outstanding = self.pending_task_queue.qsize()
|
257
|
-
for manager in self._ready_managers.values():
|
258
|
-
outstanding += len(manager['tasks'])
|
259
|
-
reply = outstanding
|
260
|
-
|
261
|
-
elif command_req == "CONNECTED_BLOCKS":
|
254
|
+
if command_req == "CONNECTED_BLOCKS":
|
262
255
|
reply = self.connected_block_history
|
263
256
|
|
264
257
|
elif command_req == "WORKERS":
|
@@ -319,16 +312,6 @@ class Interchange:
|
|
319
312
|
""" Start the interchange
|
320
313
|
"""
|
321
314
|
|
322
|
-
# If a user workflow has set its own signal handler for sigterm, that
|
323
|
-
# handler will be inherited by the interchange process because it is
|
324
|
-
# launched as a multiprocessing fork process.
|
325
|
-
# That can interfere with the interchange shutdown mechanism, which is
|
326
|
-
# to receive a SIGTERM and exit immediately.
|
327
|
-
# See Parsl issue #2343 (Threads and multiprocessing cannot be
|
328
|
-
# intermingled without deadlocks) which talks about other fork-related
|
329
|
-
# parent-process-inheritance problems.
|
330
|
-
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
331
|
-
|
332
315
|
logger.info("Starting main interchange method")
|
333
316
|
|
334
317
|
if self.hub_address is not None and self.hub_zmq_port is not None:
|
@@ -549,7 +532,6 @@ class Interchange:
|
|
549
532
|
monitoring_radio.send(r['payload'])
|
550
533
|
elif r['type'] == 'heartbeat':
|
551
534
|
logger.debug("Manager %r sent heartbeat via results connection", manager_id)
|
552
|
-
b_messages.append((p_message, r))
|
553
535
|
else:
|
554
536
|
logger.error("Interchange discarding result_queue message of unknown type: %s", r["type"])
|
555
537
|
|
@@ -362,7 +362,7 @@ class Manager:
|
|
362
362
|
if tasks == HEARTBEAT_CODE:
|
363
363
|
logger.debug("Got heartbeat from interchange")
|
364
364
|
elif tasks == DRAINED_CODE:
|
365
|
-
logger.info("Got
|
365
|
+
logger.info("Got fully drained message from interchange - setting kill flag")
|
366
366
|
kill_event.set()
|
367
367
|
else:
|
368
368
|
task_recv_counter += len(tasks)
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2024.10.
|
3
|
+
Version: 2024.10.21
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2024.10.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2024.10.21.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|
@@ -11,12 +11,11 @@ Keywords: Workflows,Scientific computing
|
|
11
11
|
Classifier: Development Status :: 5 - Production/Stable
|
12
12
|
Classifier: Intended Audience :: Developers
|
13
13
|
Classifier: License :: OSI Approved :: Apache Software License
|
14
|
-
Classifier: Programming Language :: Python :: 3.8
|
15
14
|
Classifier: Programming Language :: Python :: 3.9
|
16
15
|
Classifier: Programming Language :: Python :: 3.10
|
17
16
|
Classifier: Programming Language :: Python :: 3.11
|
18
17
|
Classifier: Programming Language :: Python :: 3.12
|
19
|
-
Requires-Python: >=3.
|
18
|
+
Requires-Python: >=3.9.0
|
20
19
|
License-File: LICENSE
|
21
20
|
Requires-Dist: pyzmq>=17.1.2
|
22
21
|
Requires-Dist: typeguard!=3.*,<5,>=2.10
|
@@ -7,8 +7,8 @@ parsl/log_utils.py,sha256=u14Fkl5eDfS4HMpl0JjseNNPdbvPaugWPRQj1_af_Zo,3273
|
|
7
7
|
parsl/multiprocessing.py,sha256=MyaEcEq-Qf860u7V98u-PZrPNdtzOZL_NW6EhIJnmfQ,1937
|
8
8
|
parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
|
9
9
|
parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
-
parsl/utils.py,sha256=
|
11
|
-
parsl/version.py,sha256=
|
10
|
+
parsl/utils.py,sha256=rMLKeadEsQ9jGwm4ogqiLIXPS3zOAyfznQJXVkJSY8E,13107
|
11
|
+
parsl/version.py,sha256=0V6_ogkULPZVJXRQqKVT9TwsP2SpvX2cDNjSb1ouhPk,131
|
12
12
|
parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
|
14
14
|
parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
|
@@ -71,25 +71,25 @@ parsl/dataflow/states.py,sha256=hV6mfv-y4A6xrujeQglcomnfEs7y3Xm2g6JFwC6dvgQ,2612
|
|
71
71
|
parsl/dataflow/taskrecord.py,sha256=-FuujdZQ1y5GSc-PJ91QKGT-Kp0lrg70MFDoxpbWI1Q,3113
|
72
72
|
parsl/executors/__init__.py,sha256=Cg8e-F2NUaBD8A9crDAXKCSdoBEwQVIdgm4FlXd-wvk,476
|
73
73
|
parsl/executors/base.py,sha256=5A59mCXPjYNCep9JgfvIjBdZvGV-1mNVHklr-ZIEojg,5200
|
74
|
-
parsl/executors/errors.py,sha256=
|
74
|
+
parsl/executors/errors.py,sha256=ZxL3nK5samPos8Xixo_jpRtPIiRJfZ5D397_qaXj2g0,2515
|
75
75
|
parsl/executors/status_handling.py,sha256=nxbkiGr6f3xDc0nsUeSrMMxlj7UD32K7nOLCLzfthDs,15416
|
76
|
-
parsl/executors/threads.py,sha256=
|
76
|
+
parsl/executors/threads.py,sha256=_LA5NA3GSvtjDend-1HVpjoDoNHHW13rAD0CET99fjQ,3463
|
77
77
|
parsl/executors/flux/__init__.py,sha256=P9grTTeRPXfqXurFhlSS7XhmE6tTbnCnyQ1f9b-oYHE,136
|
78
78
|
parsl/executors/flux/execute_parsl_task.py,sha256=gRN7F4HhdrKQ-bvn4wXrquBzFOp_9WF88hMIeUaRg5I,1553
|
79
79
|
parsl/executors/flux/executor.py,sha256=8_xakLUu5zNJAHL0LbeTCFEWqWzRK1eE-3ep4GIIIrY,17017
|
80
80
|
parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaPrSF9ec7zvRfLfYJw,2129
|
81
81
|
parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
82
82
|
parsl/executors/high_throughput/errors.py,sha256=Sak8e8UpiEcXefUjMHbhyXc4Rn7kJtOoh7L8wreBQdk,1638
|
83
|
-
parsl/executors/high_throughput/executor.py,sha256=
|
84
|
-
parsl/executors/high_throughput/interchange.py,sha256=
|
83
|
+
parsl/executors/high_throughput/executor.py,sha256=_dff5USFQq7V89kEXEWd2OqgYJQfq9i1b2e8FYA-zow,37511
|
84
|
+
parsl/executors/high_throughput/interchange.py,sha256=elt48I-3WI4Wf5s7_3ECTw_fqqLPBDA2IzOiC4vqB14,29925
|
85
85
|
parsl/executors/high_throughput/manager_record.py,sha256=yn3L8TUJFkgm2lX1x0SeS9mkvJowC0s2VIMCFiU7ThM,455
|
86
|
-
parsl/executors/high_throughput/manager_selector.py,sha256=
|
86
|
+
parsl/executors/high_throughput/manager_selector.py,sha256=UKcUE6v0tO7PDMTThpKSKxVpOpOUilxDL7UbNgpZCxo,2116
|
87
87
|
parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
|
88
88
|
parsl/executors/high_throughput/mpi_executor.py,sha256=khvGz56A8zU8XAY-R4TtqqiJB8B10mkVTXD_9xtrXgo,4696
|
89
|
-
parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=
|
89
|
+
parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=DmpKugANNa1bdYlqQBLHkrFc15fJpefPPhW9hkAlh1s,4308
|
90
90
|
parsl/executors/high_throughput/mpi_resource_management.py,sha256=LFBbJ3BnzTcY_v-jNu30uoIB2Enk4cleN4ygY3dncjY,8194
|
91
91
|
parsl/executors/high_throughput/probe.py,sha256=TNpGTXb4_DEeg_h-LHu4zEKi1-hffboxvKcZUl2OZGk,2751
|
92
|
-
parsl/executors/high_throughput/process_worker_pool.py,sha256=
|
92
|
+
parsl/executors/high_throughput/process_worker_pool.py,sha256=ndV6uJBd7ErVRZdL9Iy1362m9y3k36zMSe8w3CM6eBg,43074
|
93
93
|
parsl/executors/high_throughput/zmq_pipes.py,sha256=tAjQB3aNVMuTXziN3dbJWre46YpXgliD55qMBbhYTLU,8581
|
94
94
|
parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
|
95
95
|
parsl/executors/radical/executor.py,sha256=426cMt6d8uJFZ_7Ub1kCslaND4OKtBX5WZdz-0RXjMk,22554
|
@@ -107,7 +107,7 @@ parsl/executors/taskvine/utils.py,sha256=iSrIogeiauL3UNy_9tiZp1cBSNn6fIJkMYQRVi1
|
|
107
107
|
parsl/executors/workqueue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
108
108
|
parsl/executors/workqueue/errors.py,sha256=XO2naYhAsHHyiOBH6hpObg3mPNDmvMoFqErsj0-v7jc,541
|
109
109
|
parsl/executors/workqueue/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
|
110
|
-
parsl/executors/workqueue/executor.py,sha256=
|
110
|
+
parsl/executors/workqueue/executor.py,sha256=_Jv35gRAzUjC-pyDrSs6sEOFc7MxOFJ5cvWXt9WGRwU,49969
|
111
111
|
parsl/executors/workqueue/parsl_coprocess.py,sha256=cF1UmTgVLoey6QzBcbYgEiEsRidSaFfuO54f1HFw_EM,5737
|
112
112
|
parsl/executors/workqueue/parsl_coprocess_stub.py,sha256=_bJmpPIgL42qM6bVzeEKt1Mn1trSP41rtJguXxPGfHI,735
|
113
113
|
parsl/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -176,7 +176,7 @@ parsl/providers/grid_engine/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
|
|
176
176
|
parsl/providers/grid_engine/grid_engine.py,sha256=jTQjKaJh4eEXGbhrrCcXFV4AVFo2t39iVpslDR8gF6o,8565
|
177
177
|
parsl/providers/grid_engine/template.py,sha256=a7iViKr8LXcFTPmsf_qQeVK5o_RekOAIlUOF0X1q-2M,273
|
178
178
|
parsl/providers/kubernetes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
179
|
-
parsl/providers/kubernetes/kube.py,sha256=
|
179
|
+
parsl/providers/kubernetes/kube.py,sha256=ghOKM1gY1UjzMzWAheKsG15u8oUzRkXUyjtpqjkquIo,14952
|
180
180
|
parsl/providers/kubernetes/template.py,sha256=VsRz6cmNaII-y4OdMT6sCwzQy95SJX6NMB0hmmFBhX4,50
|
181
181
|
parsl/providers/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
182
182
|
parsl/providers/local/local.py,sha256=pTEcl9NnjRcL8FHcMeMEtJj1IXiAOxZ2Cih97Q5jDPY,11388
|
@@ -200,7 +200,7 @@ parsl/serialize/facade.py,sha256=SpKGSpI8PQb3hhxuKRJUYoQoq284t5np9ouTpogKmtU,679
|
|
200
200
|
parsl/serialize/proxystore.py,sha256=o-ha9QAvVhbN8y9S1itk3W0O75eyHYZw2AvB2xu5_Lg,1624
|
201
201
|
parsl/tests/__init__.py,sha256=VTtJzOzz_x6fWNh8IOnsgFqVbdiJShi2AZH21mcmID4,204
|
202
202
|
parsl/tests/callables_helper.py,sha256=ceP1YYsNtrZgKT6MAIvpgdccEjQ_CpFEOnZBGHKGOx0,30
|
203
|
-
parsl/tests/conftest.py,sha256=
|
203
|
+
parsl/tests/conftest.py,sha256=DjQDWyGFYWZ9uOMPMdEHFApwRgqzChSVZMdTugbLl6M,14810
|
204
204
|
parsl/tests/test_aalst_patterns.py,sha256=lNIxb7nIgh1yX7hR2fr_ck_mxYJxx8ASKK9zHUVqPno,9614
|
205
205
|
parsl/tests/test_callables.py,sha256=97vrIF1_hfDGd81FM1bhR6FemZMWFcALrH6pVHMTCt8,1974
|
206
206
|
parsl/tests/test_curvezmq.py,sha256=yyhlS4vmaZdMitiySoy4l_ih9H1bsPiN-tMdwIh3H20,12431
|
@@ -323,7 +323,7 @@ parsl/tests/test_error_handling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
|
|
323
323
|
parsl/tests/test_error_handling/test_fail.py,sha256=xx4TGWfL7le4cQ9nvnUkrlmKQJkskhD0l_3W1xwZSEI,282
|
324
324
|
parsl/tests/test_error_handling/test_python_walltime.py,sha256=rdmGZHIkuann2Njt3i62odKJ0FaODGr7-L96rOXNVYg,950
|
325
325
|
parsl/tests/test_error_handling/test_rand_fail.py,sha256=crFg4GmwdDpvx49_7w5Xt2P7H2R_V9f6i1Ar-QkASuU,3864
|
326
|
-
parsl/tests/test_error_handling/test_resource_spec.py,sha256=
|
326
|
+
parsl/tests/test_error_handling/test_resource_spec.py,sha256=gUS_lN7CcvOh_GeMY8DtZTh6LhizPfrVrYAJpt9XSYM,1428
|
327
327
|
parsl/tests/test_error_handling/test_retries.py,sha256=zJ9D2hrvXQURnK2OIf5LfQFcSDVZ8rhdpp6peGccY7s,2372
|
328
328
|
parsl/tests/test_error_handling/test_retry_handler.py,sha256=8fMHffMBLhRyNreIqkrwamx9TYRZ498uVYNlkcbAoLU,1407
|
329
329
|
parsl/tests/test_error_handling/test_retry_handler_failure.py,sha256=GaGtZZCB9Wb7ieShqTrxUFEUSKy07ZZWytCY4Qixk9Y,552
|
@@ -332,18 +332,20 @@ parsl/tests/test_error_handling/test_wrap_with_logs.py,sha256=aQDPECuhvZWUYQ6ysj
|
|
332
332
|
parsl/tests/test_flowcontrol/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
333
333
|
parsl/tests/test_htex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
334
334
|
parsl/tests/test_htex/test_basic.py,sha256=OCX4hbXLqxRakjt_pB9F68qJJv8qGOTkpiIzjHkSq1k,451
|
335
|
+
parsl/tests/test_htex/test_block_manager_selector_unit.py,sha256=BeSj8jDeBHUEJVMVXwf0KLBhZ_pnsBEkG4vacldBfEY,737
|
335
336
|
parsl/tests/test_htex/test_command_client_timeout.py,sha256=5tBViUhPT1ejnDDztTcEA690aA2BUxnPY0FpMf-1AXE,2008
|
336
337
|
parsl/tests/test_htex/test_connected_blocks.py,sha256=gaXZSr__pIaLvKY6rF-4r1p_4dO5V28gtxHLT-psEFg,1640
|
337
338
|
parsl/tests/test_htex/test_cpu_affinity_explicit.py,sha256=DVHrRCskDbJIrfB5YSi3ZSbfR4WzijA46aZfZzjNcrU,1382
|
338
339
|
parsl/tests/test_htex/test_disconnected_blocks.py,sha256=3V1Ol9gMS6knjLTgIjB5GrunRSp4ANsJ_2vAvpyMR6c,1858
|
339
340
|
parsl/tests/test_htex/test_disconnected_blocks_failing_provider.py,sha256=eOdipRpKMOkWAXB3UtY1UjqTiwfNs_csNLve8vllG_M,2040
|
340
|
-
parsl/tests/test_htex/test_drain.py,sha256=
|
341
|
+
parsl/tests/test_htex/test_drain.py,sha256=0MW3P71LUas2zmYFwwwat4G7dG8nVdxGeA3Fcy7NcMY,2454
|
341
342
|
parsl/tests/test_htex/test_htex.py,sha256=J1uEGezic8ziPPZsQwfK9iNiTJ53NqXMhIg9CUunjZw,4901
|
342
343
|
parsl/tests/test_htex/test_manager_failure.py,sha256=N-obuSZ8f7XA_XcddoN2LWKSVtpKUZvTHb7BFelS3iQ,1143
|
344
|
+
parsl/tests/test_htex/test_manager_selector_by_block.py,sha256=D7EQVFeQw11w9XEgTeeCORp-HKE3kENpGGEMR9n609w,1386
|
343
345
|
parsl/tests/test_htex/test_managers_command.py,sha256=Y-eUjtBzwW9erCYdph9bOesbkUvX8QUPqXt27DCgVS8,951
|
344
346
|
parsl/tests/test_htex/test_missing_worker.py,sha256=gyp5i7_t-JHyJGtz_eXZKKBY5w8oqLOIxO6cJgGJMtQ,745
|
345
347
|
parsl/tests/test_htex/test_multiple_disconnected_blocks.py,sha256=2vXZoIx4NuAWYuiNoL5Gxr85w72qZ7Kdb3JGh0FufTg,1867
|
346
|
-
parsl/tests/test_htex/test_resource_spec_validation.py,sha256=
|
348
|
+
parsl/tests/test_htex/test_resource_spec_validation.py,sha256=VzOk4rjMNiDcEVLb-3YdlYZND7HRoGACJkTwq8NUTnc,1102
|
347
349
|
parsl/tests/test_htex/test_worker_failure.py,sha256=Uz-RHI-LK78FMjXUvrUFmo4iYfmpDVBUcBxxRb3UG9M,603
|
348
350
|
parsl/tests/test_htex/test_zmq_binding.py,sha256=Bq1HHuMxBE_AcaP1VZ-RqE4euCHO__Du05b2UZ5H1RA,3950
|
349
351
|
parsl/tests/test_monitoring/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -358,13 +360,14 @@ parsl/tests/test_monitoring/test_stdouterr.py,sha256=9FQSfiaMrOpoSwravZuEwmdgUgI
|
|
358
360
|
parsl/tests/test_monitoring/test_viz_colouring.py,sha256=83Qdmn3gM0j7IL6kPDcuIsp_nl4zj-liPijyIN632SY,592
|
359
361
|
parsl/tests/test_mpi_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
360
362
|
parsl/tests/test_mpi_apps/test_bad_mpi_config.py,sha256=QKvEUSrHIBrvqu2fRj1MAqxsYxDfcrdQ7dzWdOZejuU,1320
|
361
|
-
parsl/tests/test_mpi_apps/test_mpi_mode_enabled.py,sha256=
|
363
|
+
parsl/tests/test_mpi_apps/test_mpi_mode_enabled.py,sha256=_fpiaDq9yEUuBxTiuxLFsBt5r1oX9S-3S-YL5yRB13E,5423
|
362
364
|
parsl/tests/test_mpi_apps/test_mpi_prefix.py,sha256=yJslZvYK3JeL9UgxMwF9DDPR9QD4zJLGVjubD0F-utc,1950
|
363
365
|
parsl/tests/test_mpi_apps/test_mpi_scheduler.py,sha256=YdV8A-m67DHk9wxgNpj69wwGEKrFGL20KAC1TzLke3c,6332
|
364
366
|
parsl/tests/test_mpi_apps/test_mpiex.py,sha256=mlFdHK3A1B6NsEhxTQQX8lhs9qVza36FMG99vNrBRW4,2021
|
365
|
-
parsl/tests/test_mpi_apps/test_resource_spec.py,sha256=
|
367
|
+
parsl/tests/test_mpi_apps/test_resource_spec.py,sha256=5k6HM2jtb6sa7jetpI-Tl1nPQiN33VLaM7YT10c307E,3756
|
366
368
|
parsl/tests/test_providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
367
369
|
parsl/tests/test_providers/test_cobalt_deprecation_warning.py,sha256=UN2W6xJxuLx2euPqArORKFEU2VXez9_PYqq-0rZHanQ,391
|
370
|
+
parsl/tests/test_providers/test_kubernetes_provider.py,sha256=AzCHfNz2HJwjP9BfxKH-XPaTHJCLXFErDMlQ_Ir8qRU,3861
|
368
371
|
parsl/tests/test_providers/test_local_provider.py,sha256=R96E1eWgHVkvOQ1Au9wj-gfdWKAqGc-qlygFuxpGFQ8,7160
|
369
372
|
parsl/tests/test_providers/test_pbspro_template.py,sha256=-bi1vags9yyNfpBxtjTqFjzMIg1VVPyf2M958UcXWmA,855
|
370
373
|
parsl/tests/test_providers/test_slurm_instantiate.py,sha256=eW3pEZRIzZO1-eKFrBc7N5uoN5otwghgbqut74Kyqoc,500
|
@@ -455,6 +458,7 @@ parsl/tests/test_threads/test_configs.py,sha256=POwCKY4FMkI5-RSFz77cQYC7R_ISj0iA
|
|
455
458
|
parsl/tests/test_threads/test_lazy_errors.py,sha256=6dJ65py5vUZkc0aRLL-LHuxBBp877Ktghb_jFDZsE98,543
|
456
459
|
parsl/tests/test_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
457
460
|
parsl/tests/test_utils/test_representation_mixin.py,sha256=kUZeIDwA2rlbJ3-beGzLLwf3dOplTMCrWJN87etHcyY,1633
|
461
|
+
parsl/tests/test_utils/test_sanitize_dns.py,sha256=8P_v5a5JLGU76OYf0LtclAwqJxGU0fMh_OZMVkMke3I,2954
|
458
462
|
parsl/tests/unit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
459
463
|
parsl/tests/unit/test_file.py,sha256=vLycnYcv3bvSzL-FV8WdoibqTyb41BrH1LUYBavobsg,2850
|
460
464
|
parsl/tests/unit/test_usage_tracking.py,sha256=xEfUlbBRpsFdUdOrCsk1Kz5AfmMxJT7f0_esZl8Ft-0,1884
|
@@ -462,13 +466,13 @@ parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
|
|
462
466
|
parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
|
463
467
|
parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
|
464
468
|
parsl/usage_tracking/usage.py,sha256=tcoZ2OUjsQVakG8Uu9_HFuEdzpSHyt4JarSRcLGnSMw,8918
|
465
|
-
parsl-2024.10.
|
466
|
-
parsl-2024.10.
|
467
|
-
parsl-2024.10.
|
468
|
-
parsl-2024.10.
|
469
|
-
parsl-2024.10.
|
470
|
-
parsl-2024.10.
|
471
|
-
parsl-2024.10.
|
472
|
-
parsl-2024.10.
|
473
|
-
parsl-2024.10.
|
474
|
-
parsl-2024.10.
|
469
|
+
parsl-2024.10.21.data/scripts/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
|
470
|
+
parsl-2024.10.21.data/scripts/interchange.py,sha256=FcEEmcuMcuFBB_aNOLzaYr5w3Yw9zKJxhtKbIUPVfhI,29912
|
471
|
+
parsl-2024.10.21.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
|
472
|
+
parsl-2024.10.21.data/scripts/process_worker_pool.py,sha256=4K9vxwFHsz8QURwfq3VvnjEls7rYBxi2q0Gyy1cce5E,43060
|
473
|
+
parsl-2024.10.21.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
474
|
+
parsl-2024.10.21.dist-info/METADATA,sha256=8hfXCgoISytZwjc6AefQ1vMFLcHWbaOAmBHOJNmg_Ds,4072
|
475
|
+
parsl-2024.10.21.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
476
|
+
parsl-2024.10.21.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
|
477
|
+
parsl-2024.10.21.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
|
478
|
+
parsl-2024.10.21.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|