parsl 2025.5.19__py3-none-any.whl → 2025.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/addresses.py +2 -3
- parsl/executors/high_throughput/executor.py +12 -10
- parsl/executors/high_throughput/interchange.py +70 -48
- parsl/executors/workqueue/executor.py +6 -4
- parsl/monitoring/radios/base.py +0 -3
- parsl/providers/pbspro/pbspro.py +22 -4
- parsl/providers/slurm/slurm.py +47 -14
- parsl/tests/conftest.py +15 -4
- parsl/tests/test_error_handling/test_resource_spec.py +2 -2
- parsl/tests/test_htex/test_priority_queue.py +70 -0
- parsl/tests/test_htex/test_resource_spec_validation.py +7 -0
- parsl/tests/test_htex/test_zmq_binding.py +12 -2
- parsl/tests/test_monitoring/test_basic.py +23 -4
- parsl/tests/test_radical/test_mpi_funcs.py +6 -0
- parsl/tests/test_scaling/test_scale_down.py +1 -0
- parsl/tests/unit/test_address.py +1 -0
- parsl/usage_tracking/usage.py +1 -0
- parsl/version.py +1 -1
- {parsl-2025.5.19.data → parsl-2025.6.2.data}/scripts/interchange.py +70 -48
- {parsl-2025.5.19.dist-info → parsl-2025.6.2.dist-info}/METADATA +3 -2
- {parsl-2025.5.19.dist-info → parsl-2025.6.2.dist-info}/RECORD +28 -27
- {parsl-2025.5.19.data → parsl-2025.6.2.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2025.5.19.data → parsl-2025.6.2.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2025.5.19.data → parsl-2025.6.2.data}/scripts/process_worker_pool.py +0 -0
- {parsl-2025.5.19.dist-info → parsl-2025.6.2.dist-info}/LICENSE +0 -0
- {parsl-2025.5.19.dist-info → parsl-2025.6.2.dist-info}/WHEEL +0 -0
- {parsl-2025.5.19.dist-info → parsl-2025.6.2.dist-info}/entry_points.txt +0 -0
- {parsl-2025.5.19.dist-info → parsl-2025.6.2.dist-info}/top_level.txt +0 -0
parsl/addresses.py
CHANGED
@@ -161,13 +161,12 @@ def get_any_address() -> str:
|
|
161
161
|
|
162
162
|
def tcp_url(address: str, port: Union[str, int, None] = None) -> str:
|
163
163
|
"""Construct a tcp url safe for IPv4 and IPv6"""
|
164
|
+
port_suffix = f":{port}" if port else ""
|
164
165
|
if address == "*":
|
165
|
-
return "tcp://*"
|
166
|
+
return "tcp://*" + port_suffix
|
166
167
|
|
167
168
|
ip_addr = ipaddress.ip_address(address)
|
168
169
|
|
169
|
-
port_suffix = f":{port}" if port else ""
|
170
|
-
|
171
170
|
if ip_addr.version == 6 and port_suffix:
|
172
171
|
url = f"tcp://[{address}]{port_suffix}"
|
173
172
|
else:
|
@@ -8,7 +8,7 @@ import warnings
|
|
8
8
|
from collections import defaultdict
|
9
9
|
from concurrent.futures import Future
|
10
10
|
from dataclasses import dataclass
|
11
|
-
from typing import Callable, Dict, List, Optional, Sequence,
|
11
|
+
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
|
12
12
|
|
13
13
|
import typeguard
|
14
14
|
|
@@ -363,7 +363,9 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
363
363
|
|
364
364
|
def validate_resource_spec(self, resource_specification: dict):
|
365
365
|
if resource_specification:
|
366
|
-
|
366
|
+
"""HTEX supports the following *Optional* resource specifications:
|
367
|
+
priority: lower value is higher priority"""
|
368
|
+
acceptable_fields = {'priority'} # add new resource spec field names here to make htex accept them
|
367
369
|
keys = set(resource_specification.keys())
|
368
370
|
invalid_keys = keys - acceptable_fields
|
369
371
|
if invalid_keys:
|
@@ -599,20 +601,20 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
599
601
|
self._result_queue_thread.start()
|
600
602
|
logger.debug("Started result queue thread: %r", self._result_queue_thread)
|
601
603
|
|
602
|
-
def
|
603
|
-
"""Puts a
|
604
|
+
def _hold_manager(self, manager_id: str) -> None:
|
605
|
+
"""Puts a manager on hold, preventing scheduling of additional tasks to it.
|
604
606
|
|
605
607
|
This is called "hold" mostly because this only stops scheduling of tasks,
|
606
|
-
and does not actually kill the
|
608
|
+
and does not actually kill the manager or workers.
|
607
609
|
|
608
610
|
Parameters
|
609
611
|
----------
|
610
612
|
|
611
|
-
|
612
|
-
|
613
|
+
manager_id : str
|
614
|
+
Manager id to be put on hold
|
613
615
|
"""
|
614
|
-
self.command_client.run("HOLD_WORKER;{}".format(
|
615
|
-
logger.debug("Sent hold request to manager: {}".format(
|
616
|
+
self.command_client.run("HOLD_WORKER;{}".format(manager_id))
|
617
|
+
logger.debug("Sent hold request to manager: {}".format(manager_id))
|
616
618
|
|
617
619
|
@property
|
618
620
|
def outstanding(self) -> int:
|
@@ -656,7 +658,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
656
658
|
for manager in managers:
|
657
659
|
if manager['block_id'] == block_id:
|
658
660
|
logger.debug("Sending hold to manager: {}".format(manager['manager']))
|
659
|
-
self.
|
661
|
+
self._hold_manager(manager['manager'])
|
660
662
|
|
661
663
|
def submit(self, func, resource_specification, *args, **kwargs):
|
662
664
|
"""Submits work to the outgoing_q.
|
@@ -5,13 +5,13 @@ import logging
|
|
5
5
|
import os
|
6
6
|
import pickle
|
7
7
|
import platform
|
8
|
-
import queue
|
9
8
|
import sys
|
10
9
|
import threading
|
11
10
|
import time
|
12
11
|
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, cast
|
13
12
|
|
14
13
|
import zmq
|
14
|
+
from sortedcontainers import SortedList
|
15
15
|
|
16
16
|
from parsl import curvezmq
|
17
17
|
from parsl.addresses import tcp_url
|
@@ -131,7 +131,7 @@ class Interchange:
|
|
131
131
|
self.hub_address = hub_address
|
132
132
|
self.hub_zmq_port = hub_zmq_port
|
133
133
|
|
134
|
-
self.pending_task_queue:
|
134
|
+
self.pending_task_queue: SortedList[Any] = SortedList(key=lambda tup: (tup[0], tup[1]))
|
135
135
|
|
136
136
|
# count of tasks that have been received from the submit side
|
137
137
|
self.task_counter = 0
|
@@ -196,13 +196,12 @@ class Interchange:
|
|
196
196
|
eg. [{'task_id':<x>, 'buffer':<buf>} ... ]
|
197
197
|
"""
|
198
198
|
tasks = []
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
tasks.append(x)
|
199
|
+
try:
|
200
|
+
for _ in range(count):
|
201
|
+
*_, task = self.pending_task_queue.pop()
|
202
|
+
tasks.append(task)
|
203
|
+
except IndexError:
|
204
|
+
pass
|
206
205
|
|
207
206
|
return tasks
|
208
207
|
|
@@ -220,11 +219,11 @@ class Interchange:
|
|
220
219
|
def process_command(self, monitoring_radio: Optional[MonitoringRadioSender]) -> None:
|
221
220
|
""" Command server to run async command to the interchange
|
222
221
|
"""
|
223
|
-
logger.debug("entering command_server section")
|
224
222
|
|
225
223
|
reply: Any # the type of reply depends on the command_req received (aka this needs dependent types...)
|
226
224
|
|
227
225
|
if self.command_channel in self.socks and self.socks[self.command_channel] == zmq.POLLIN:
|
226
|
+
logger.debug("entering command_server section")
|
228
227
|
|
229
228
|
command_req = self.command_channel.recv_pyobj()
|
230
229
|
logger.debug("Received command request: {}".format(command_req))
|
@@ -342,8 +341,15 @@ class Interchange:
|
|
342
341
|
if self.task_incoming in self.socks and self.socks[self.task_incoming] == zmq.POLLIN:
|
343
342
|
logger.debug("start task_incoming section")
|
344
343
|
msg = self.task_incoming.recv_pyobj()
|
344
|
+
|
345
|
+
# Process priority, higher number = lower priority
|
346
|
+
resource_spec = msg.get('resource_spec', {})
|
347
|
+
priority = resource_spec.get('priority', float('inf'))
|
348
|
+
queue_entry = (-priority, -self.task_counter, msg)
|
349
|
+
|
345
350
|
logger.debug("putting message onto pending_task_queue")
|
346
|
-
|
351
|
+
|
352
|
+
self.pending_task_queue.add(queue_entry)
|
347
353
|
self.task_counter += 1
|
348
354
|
logger.debug(f"Fetched {self.task_counter} tasks so far")
|
349
355
|
|
@@ -378,23 +384,24 @@ class Interchange:
|
|
378
384
|
return
|
379
385
|
|
380
386
|
if msg['type'] == 'registration':
|
381
|
-
|
382
|
-
self.
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
387
|
+
ix_minor_py = self.current_platform['python_v'].rsplit(".", 1)[0]
|
388
|
+
ix_parsl_v = self.current_platform['parsl_v']
|
389
|
+
mgr_minor_py = msg['python_v'].rsplit(".", 1)[0]
|
390
|
+
mgr_parsl_v = msg['parsl_v']
|
391
|
+
|
392
|
+
m = ManagerRecord(
|
393
|
+
block_id=None,
|
394
|
+
start_time=msg['start_time'],
|
395
|
+
tasks=[],
|
396
|
+
worker_count=0,
|
397
|
+
max_capacity=0,
|
398
|
+
active=True,
|
399
|
+
draining=False,
|
400
|
+
last_heartbeat=time.time(),
|
401
|
+
idle_since=time.time(),
|
402
|
+
parsl_version=mgr_parsl_v,
|
403
|
+
python_version=msg['python_v'],
|
404
|
+
)
|
398
405
|
|
399
406
|
# m is a ManagerRecord, but msg is a dict[Any,Any] and so can
|
400
407
|
# contain arbitrary fields beyond those in ManagerRecord (and
|
@@ -405,23 +412,43 @@ class Interchange:
|
|
405
412
|
logger.info(f"Registration info for manager {manager_id!r}: {msg}")
|
406
413
|
self._send_monitoring_info(monitoring_radio, m)
|
407
414
|
|
408
|
-
if (
|
409
|
-
msg['parsl_v'] != self.current_platform['parsl_v']):
|
410
|
-
logger.error(f"Manager {manager_id!r} has incompatible version info with the interchange")
|
411
|
-
logger.debug("Setting kill event")
|
415
|
+
if (mgr_minor_py, mgr_parsl_v) != (ix_minor_py, ix_parsl_v):
|
412
416
|
kill_event.set()
|
413
|
-
e = VersionMismatch(
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
417
|
+
e = VersionMismatch(
|
418
|
+
f"py.v={ix_minor_py} parsl.v={ix_parsl_v}",
|
419
|
+
f"py.v={mgr_minor_py} parsl.v={mgr_parsl_v}",
|
420
|
+
)
|
421
|
+
result_package = {
|
422
|
+
'type': 'result',
|
423
|
+
'task_id': -1,
|
424
|
+
'exception': serialize_object(e),
|
425
|
+
}
|
419
426
|
pkl_package = pickle.dumps(result_package)
|
420
427
|
self.results_outgoing.send(pkl_package)
|
421
|
-
logger.error(
|
428
|
+
logger.error(
|
429
|
+
"Manager has incompatible version info with the interchange;"
|
430
|
+
" sending failure reports and shutting down:"
|
431
|
+
f"\n Interchange: {e.interchange_version}"
|
432
|
+
f"\n Manager: {e.manager_version}"
|
433
|
+
)
|
434
|
+
|
422
435
|
else:
|
423
|
-
|
424
|
-
|
436
|
+
# We really should update the associated data structure; but not
|
437
|
+
# at this time. *kicks can down the road*
|
438
|
+
assert m['block_id'] is not None, "Verified externally currently"
|
439
|
+
|
440
|
+
# set up entry only if we accept the registration
|
441
|
+
self._ready_managers[manager_id] = m
|
442
|
+
self.connected_block_history.append(m['block_id'])
|
443
|
+
|
444
|
+
interesting_managers.add(manager_id)
|
445
|
+
|
446
|
+
logger.info(
|
447
|
+
f"Registered manager {manager_id!r} (py{mgr_minor_py},"
|
448
|
+
f" {mgr_parsl_v}) and added to ready queue"
|
449
|
+
)
|
450
|
+
logger.debug("Manager %r -> %s", manager_id, m)
|
451
|
+
|
425
452
|
elif msg['type'] == 'heartbeat':
|
426
453
|
manager = self._ready_managers.get(manager_id)
|
427
454
|
if manager:
|
@@ -461,10 +488,10 @@ class Interchange:
|
|
461
488
|
len(self._ready_managers)
|
462
489
|
)
|
463
490
|
|
464
|
-
if interesting_managers and
|
491
|
+
if interesting_managers and self.pending_task_queue:
|
465
492
|
shuffled_managers = self.manager_selector.sort_managers(self._ready_managers, interesting_managers)
|
466
493
|
|
467
|
-
while shuffled_managers and
|
494
|
+
while shuffled_managers and self.pending_task_queue: # cf. the if statement above...
|
468
495
|
manager_id = shuffled_managers.pop()
|
469
496
|
m = self._ready_managers[manager_id]
|
470
497
|
tasks_inflight = len(m['tasks'])
|
@@ -491,10 +518,7 @@ class Interchange:
|
|
491
518
|
self._send_monitoring_info(monitoring_radio, m)
|
492
519
|
else:
|
493
520
|
interesting_managers.remove(manager_id)
|
494
|
-
# logger.debug("Nothing to send to manager {}".format(manager_id))
|
495
521
|
logger.debug("leaving _ready_managers section, with %s managers still interesting", len(interesting_managers))
|
496
|
-
else:
|
497
|
-
logger.debug("either no interesting managers or no tasks, so skipping manager pass")
|
498
522
|
|
499
523
|
def process_results_incoming(self, interesting_managers: Set[bytes], monitoring_radio: Optional[MonitoringRadioSender]) -> None:
|
500
524
|
# Receive any results and forward to client
|
@@ -605,8 +629,6 @@ def start_file_logger(filename: str, level: int = logging.DEBUG, format_string:
|
|
605
629
|
|
606
630
|
)
|
607
631
|
|
608
|
-
global logger
|
609
|
-
logger = logging.getLogger(LOGGER_NAME)
|
610
632
|
logger.setLevel(level)
|
611
633
|
handler = logging.FileHandler(filename)
|
612
634
|
handler.setLevel(level)
|
@@ -40,16 +40,18 @@ from parsl.utils import setproctitle
|
|
40
40
|
|
41
41
|
from .errors import WorkQueueFailure, WorkQueueTaskFailure
|
42
42
|
|
43
|
+
IMPORT_EXCEPTION = None
|
43
44
|
try:
|
44
|
-
import work_queue as wq
|
45
|
-
from work_queue import (
|
45
|
+
from ndcctools import work_queue as wq
|
46
|
+
from ndcctools.work_queue import (
|
46
47
|
WORK_QUEUE_ALLOCATION_MODE_MAX_THROUGHPUT,
|
47
48
|
WORK_QUEUE_DEFAULT_PORT,
|
48
49
|
WorkQueue,
|
49
50
|
)
|
50
|
-
except ImportError:
|
51
|
+
except ImportError as e:
|
51
52
|
_work_queue_enabled = False
|
52
53
|
WORK_QUEUE_DEFAULT_PORT = 0
|
54
|
+
IMPORT_EXCEPTION = e
|
53
55
|
else:
|
54
56
|
_work_queue_enabled = True
|
55
57
|
|
@@ -257,7 +259,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
257
259
|
BlockProviderExecutor.__init__(self, provider=provider,
|
258
260
|
block_error_handler=True)
|
259
261
|
if not _work_queue_enabled:
|
260
|
-
raise OptionalModuleMissing(['work_queue'], "WorkQueueExecutor requires the work_queue module.")
|
262
|
+
raise OptionalModuleMissing(['work_queue'], f"WorkQueueExecutor requires the work_queue module. More info: {IMPORT_EXCEPTION}")
|
261
263
|
|
262
264
|
self.scaling_cores_per_worker = scaling_cores_per_worker
|
263
265
|
self.label = label
|
parsl/monitoring/radios/base.py
CHANGED
parsl/providers/pbspro/pbspro.py
CHANGED
@@ -5,6 +5,7 @@ import time
|
|
5
5
|
|
6
6
|
from parsl.jobs.states import JobState, JobStatus
|
7
7
|
from parsl.launchers import SingleNodeLauncher
|
8
|
+
from parsl.providers.errors import SubmitException
|
8
9
|
from parsl.providers.pbspro.template import template_string
|
9
10
|
from parsl.providers.torque.torque import TorqueProvider, translate_table
|
10
11
|
|
@@ -97,6 +98,14 @@ class PBSProProvider(TorqueProvider):
|
|
97
98
|
|
98
99
|
retcode, stdout, stderr = self.execute_wait("qstat -f -F json {0}".format(job_id_list))
|
99
100
|
|
101
|
+
# If qstat failed do not update job state
|
102
|
+
if retcode != 0:
|
103
|
+
logger.warning("qstat failed with retcode:%s STDOUT:%s STDERR:%s",
|
104
|
+
retcode,
|
105
|
+
stdout.strip(),
|
106
|
+
stderr.strip())
|
107
|
+
return
|
108
|
+
|
100
109
|
job_statuses = json.loads(stdout)
|
101
110
|
|
102
111
|
if 'Jobs' in job_statuses:
|
@@ -198,10 +207,19 @@ class PBSProProvider(TorqueProvider):
|
|
198
207
|
'job_stderr_path': job_stderr_path,
|
199
208
|
}
|
200
209
|
else:
|
201
|
-
message = "
|
202
|
-
|
203
|
-
|
204
|
-
|
210
|
+
message = f"Submit command '{launch_cmd}' failed"
|
211
|
+
logger.error(
|
212
|
+
f"{message}\n"
|
213
|
+
f" Return code: {retcode}\n"
|
214
|
+
f" STDOUT: {stdout.strip()}\n"
|
215
|
+
f" STDERR: {stderr.strip()}"
|
216
|
+
)
|
217
|
+
raise SubmitException(
|
218
|
+
job_name=job_name,
|
219
|
+
message=message,
|
220
|
+
stdout=stdout,
|
221
|
+
stderr=stderr,
|
222
|
+
)
|
205
223
|
|
206
224
|
return job_id
|
207
225
|
|
parsl/providers/slurm/slurm.py
CHANGED
@@ -2,6 +2,7 @@ import logging
|
|
2
2
|
import math
|
3
3
|
import os
|
4
4
|
import re
|
5
|
+
import sys
|
5
6
|
import time
|
6
7
|
from typing import Any, Dict, Optional
|
7
8
|
|
@@ -50,6 +51,28 @@ squeue_translate_table = {
|
|
50
51
|
}
|
51
52
|
|
52
53
|
|
54
|
+
if sys.version_info < (3, 12):
|
55
|
+
from itertools import islice
|
56
|
+
from typing import Iterable
|
57
|
+
|
58
|
+
def batched(
|
59
|
+
iterable: Iterable[tuple[object, Any]], n: int, *, strict: bool = False
|
60
|
+
):
|
61
|
+
"""Batched
|
62
|
+
Turns a list into a batch of size n. This code is from
|
63
|
+
https://docs.python.org/3.12/library/itertools.html#itertools.batched
|
64
|
+
and in versions 3.12+ this can be replaced with
|
65
|
+
itertools.batched
|
66
|
+
"""
|
67
|
+
if n < 1:
|
68
|
+
raise ValueError("n must be at least one")
|
69
|
+
iterator = iter(iterable)
|
70
|
+
while batch := tuple(islice(iterator, n)):
|
71
|
+
yield batch
|
72
|
+
else:
|
73
|
+
from itertools import batched
|
74
|
+
|
75
|
+
|
53
76
|
class SlurmProvider(ClusterProvider, RepresentationMixin):
|
54
77
|
"""Slurm Execution Provider
|
55
78
|
|
@@ -99,6 +122,12 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
99
122
|
symbolic group for the job ID.
|
100
123
|
worker_init : str
|
101
124
|
Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
|
125
|
+
cmd_timeout : int (Default = 10)
|
126
|
+
Number of seconds to wait for slurm commands to finish. For schedulers with many this
|
127
|
+
may need to be increased to wait longer for scheduler information.
|
128
|
+
status_batch_size: int (Default = 50)
|
129
|
+
Number of jobs to batch together in calls to the scheduler status. For schedulers
|
130
|
+
with many jobs this may need to be decreased to get jobs in smaller batches.
|
102
131
|
exclusive : bool (Default = True)
|
103
132
|
Requests nodes which are not shared with other running jobs.
|
104
133
|
launcher : Launcher
|
@@ -127,6 +156,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
127
156
|
regex_job_id: str = r"Submitted batch job (?P<id>\S*)",
|
128
157
|
worker_init: str = '',
|
129
158
|
cmd_timeout: int = 10,
|
159
|
+
status_batch_size: int = 50,
|
130
160
|
exclusive: bool = True,
|
131
161
|
launcher: Launcher = SingleNodeLauncher()):
|
132
162
|
label = 'slurm'
|
@@ -148,6 +178,8 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
148
178
|
self.qos = qos
|
149
179
|
self.constraint = constraint
|
150
180
|
self.clusters = clusters
|
181
|
+
# Used to batch requests to sacct/squeue for long jobs lists
|
182
|
+
self.status_batch_size = status_batch_size
|
151
183
|
self.scheduler_options = scheduler_options + '\n'
|
152
184
|
if exclusive:
|
153
185
|
self.scheduler_options += "#SBATCH --exclusive\n"
|
@@ -199,22 +231,23 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
199
231
|
Returns:
|
200
232
|
[status...] : Status list of all jobs
|
201
233
|
'''
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
if not job_id_list:
|
206
|
-
logger.debug('No active jobs, skipping status update')
|
234
|
+
active_jobs = {jid: job for jid, job in self.resources.items() if not job["status"].terminal}
|
235
|
+
if len(active_jobs) == 0:
|
236
|
+
logger.debug("No active jobs, skipping status update")
|
207
237
|
return
|
208
238
|
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
239
|
+
stdout = ""
|
240
|
+
for job_batch in batched(active_jobs.items(), self.status_batch_size):
|
241
|
+
job_id_list = ",".join(jid for jid, job in job_batch if not job["status"].terminal)
|
242
|
+
cmd = self._cmd.format(job_id_list)
|
243
|
+
logger.debug("Executing %s", cmd)
|
244
|
+
retcode, batch_stdout, batch_stderr = self.execute_wait(cmd)
|
245
|
+
logger.debug(f"sacct/squeue returned {retcode} {batch_stdout} {batch_stderr}")
|
246
|
+
stdout += batch_stdout
|
247
|
+
# Execute_wait failed. Do no update
|
248
|
+
if retcode != 0:
|
249
|
+
logger.warning("sacct/squeue failed with non-zero exit code {}".format(retcode))
|
250
|
+
return
|
218
251
|
|
219
252
|
jobs_missing = set(self.resources.keys())
|
220
253
|
for line in stdout.split('\n'):
|
parsl/tests/conftest.py
CHANGED
@@ -167,6 +167,14 @@ def pytest_configure(config):
|
|
167
167
|
'markers',
|
168
168
|
'issue_3620: Marks tests that do not work correctly on GlobusComputeExecutor (ref: issue 3620)'
|
169
169
|
)
|
170
|
+
config.addinivalue_line(
|
171
|
+
'markers',
|
172
|
+
'workqueue: Marks local tests that require a working Work Queue installation'
|
173
|
+
)
|
174
|
+
config.addinivalue_line(
|
175
|
+
'markers',
|
176
|
+
'taskvine: Marks local tests that require a working Task Vine installation'
|
177
|
+
)
|
170
178
|
|
171
179
|
|
172
180
|
@pytest.fixture(autouse=True, scope='session')
|
@@ -400,13 +408,13 @@ def try_assert():
|
|
400
408
|
check_period_ms: int = 20,
|
401
409
|
):
|
402
410
|
tb = create_traceback(start=1)
|
403
|
-
timeout_s = abs(timeout_ms) / 1000.0
|
404
411
|
check_period_s = abs(check_period_ms) / 1000.0
|
405
412
|
if attempts > 0:
|
406
413
|
for _attempt_no in range(attempts):
|
414
|
+
time.sleep(random.random() * check_period_s) # jitter
|
415
|
+
check_period_s *= 2
|
407
416
|
if test_func():
|
408
417
|
return
|
409
|
-
time.sleep(check_period_s)
|
410
418
|
else:
|
411
419
|
att_fail = (
|
412
420
|
f"\n (Still failing after attempt limit [{attempts}], testing"
|
@@ -415,12 +423,15 @@ def try_assert():
|
|
415
423
|
exc = AssertionError(f"{str(fail_msg)}{att_fail}".strip())
|
416
424
|
raise exc.with_traceback(tb)
|
417
425
|
|
418
|
-
elif
|
426
|
+
elif timeout_ms > 0:
|
427
|
+
timeout_s = abs(timeout_ms) / 1000.0
|
419
428
|
end = time.monotonic() + timeout_s
|
420
429
|
while time.monotonic() < end:
|
430
|
+
wait_for = random.random() * check_period_s # jitter
|
431
|
+
time.sleep(min(wait_for, end - time.monotonic()))
|
432
|
+
check_period_s *= 2
|
421
433
|
if test_func():
|
422
434
|
return
|
423
|
-
time.sleep(check_period_s)
|
424
435
|
att_fail = (
|
425
436
|
f"\n (Still failing after timeout [{timeout_ms}ms], with attempts "
|
426
437
|
f"every {check_period_ms}ms)"
|
@@ -23,7 +23,7 @@ def test_resource(n=2):
|
|
23
23
|
break
|
24
24
|
|
25
25
|
# Specify incorrect number of resources
|
26
|
-
spec = {'cores':
|
26
|
+
spec = {'cores': 1, 'memory': 1}
|
27
27
|
fut = double(n, parsl_resource_specification=spec)
|
28
28
|
try:
|
29
29
|
fut.result()
|
@@ -35,7 +35,7 @@ def test_resource(n=2):
|
|
35
35
|
|
36
36
|
# Specify resources with wrong types
|
37
37
|
# 'cpus' is incorrect, should be 'cores'
|
38
|
-
spec = {'cpus':
|
38
|
+
spec = {'cpus': 1, 'memory': 1, 'disk': 1}
|
39
39
|
fut = double(n, parsl_resource_specification=spec)
|
40
40
|
try:
|
41
41
|
fut.result()
|
@@ -0,0 +1,70 @@
|
|
1
|
+
import pytest
|
2
|
+
|
3
|
+
import parsl
|
4
|
+
from parsl.app.app import python_app
|
5
|
+
from parsl.config import Config
|
6
|
+
from parsl.executors import HighThroughputExecutor
|
7
|
+
from parsl.executors.high_throughput.manager_selector import RandomManagerSelector
|
8
|
+
from parsl.providers import LocalProvider
|
9
|
+
from parsl.usage_tracking.levels import LEVEL_1
|
10
|
+
|
11
|
+
|
12
|
+
@python_app
|
13
|
+
def fake_task(parsl_resource_specification=None):
|
14
|
+
import time
|
15
|
+
return time.time()
|
16
|
+
|
17
|
+
|
18
|
+
@pytest.mark.local
|
19
|
+
def test_priority_queue():
|
20
|
+
provider = LocalProvider(
|
21
|
+
init_blocks=0,
|
22
|
+
max_blocks=0,
|
23
|
+
min_blocks=0,
|
24
|
+
)
|
25
|
+
|
26
|
+
htex = HighThroughputExecutor(
|
27
|
+
label="htex_local",
|
28
|
+
max_workers_per_node=1,
|
29
|
+
manager_selector=RandomManagerSelector(),
|
30
|
+
provider=provider,
|
31
|
+
)
|
32
|
+
|
33
|
+
config = Config(
|
34
|
+
executors=[htex],
|
35
|
+
strategy="htex_auto_scale",
|
36
|
+
usage_tracking=LEVEL_1,
|
37
|
+
)
|
38
|
+
|
39
|
+
with parsl.load(config):
|
40
|
+
futures = {}
|
41
|
+
|
42
|
+
# Submit tasks with mixed priorities
|
43
|
+
# Priorities: [10, 10, 5, 5, 1, 1] to test fallback behavior
|
44
|
+
for i, priority in enumerate([10, 10, 5, 5, 1, 1]):
|
45
|
+
spec = {'priority': priority}
|
46
|
+
futures[(priority, i)] = fake_task(parsl_resource_specification=spec)
|
47
|
+
|
48
|
+
provider.max_blocks = 1
|
49
|
+
|
50
|
+
# Wait for completion
|
51
|
+
results = {
|
52
|
+
key: future.result() for key, future in futures.items()
|
53
|
+
}
|
54
|
+
|
55
|
+
# Sort by finish time
|
56
|
+
sorted_by_completion = sorted(results.items(), key=lambda item: item[1])
|
57
|
+
execution_order = [key for key, _ in sorted_by_completion]
|
58
|
+
|
59
|
+
# check priority queue functionality
|
60
|
+
priorities_only = [p for (p, i) in execution_order]
|
61
|
+
assert priorities_only == sorted(priorities_only), "Priority execution order failed"
|
62
|
+
|
63
|
+
# check FIFO fallback
|
64
|
+
from collections import defaultdict
|
65
|
+
seen = defaultdict(list)
|
66
|
+
for (priority, idx) in execution_order:
|
67
|
+
seen[priority].append(idx)
|
68
|
+
|
69
|
+
for priority, indices in seen.items():
|
70
|
+
assert indices == sorted(indices), f"FIFO fallback violated for priority {priority}"
|
@@ -36,3 +36,10 @@ def test_resource_spec_validation_bad_keys():
|
|
36
36
|
|
37
37
|
with pytest.raises(InvalidResourceSpecification):
|
38
38
|
htex.validate_resource_spec({"num_nodes": 2})
|
39
|
+
|
40
|
+
|
41
|
+
@pytest.mark.local
|
42
|
+
def test_resource_spec_validation_one_key():
|
43
|
+
htex = HighThroughputExecutor()
|
44
|
+
ret_val = htex.validate_resource_spec({"priority": 2})
|
45
|
+
assert ret_val is None
|
@@ -12,12 +12,15 @@ from parsl.executors.high_throughput.interchange import Interchange
|
|
12
12
|
from parsl.executors.high_throughput.manager_selector import RandomManagerSelector
|
13
13
|
|
14
14
|
|
15
|
-
def make_interchange(*,
|
15
|
+
def make_interchange(*,
|
16
|
+
interchange_address: Optional[str],
|
17
|
+
cert_dir: Optional[str],
|
18
|
+
worker_ports: Optional[tuple[int, int]] = None) -> Interchange:
|
16
19
|
return Interchange(interchange_address=interchange_address,
|
17
20
|
cert_dir=cert_dir,
|
18
21
|
client_address="127.0.0.1",
|
19
22
|
client_ports=(50055, 50056, 50057),
|
20
|
-
worker_ports=
|
23
|
+
worker_ports=worker_ports,
|
21
24
|
worker_port_range=(54000, 55000),
|
22
25
|
hub_address=None,
|
23
26
|
hub_zmq_port=None,
|
@@ -105,3 +108,10 @@ def test_limited_interface_binding(cert_dir: Optional[str]):
|
|
105
108
|
assert len(matched_conns) == 1
|
106
109
|
# laddr.ip can return ::ffff:127.0.0.1 when using IPv6
|
107
110
|
assert address in matched_conns[0].laddr.ip
|
111
|
+
|
112
|
+
|
113
|
+
@pytest.mark.local
|
114
|
+
@pytest.mark.parametrize("encrypted", (True, False), indirect=True)
|
115
|
+
def test_fixed_ports(cert_dir: Optional[str]):
|
116
|
+
ix = make_interchange(interchange_address=None, cert_dir=cert_dir, worker_ports=(51117, 51118))
|
117
|
+
assert ix.interchange_address == "*"
|
@@ -6,7 +6,6 @@ import pytest
|
|
6
6
|
import parsl
|
7
7
|
from parsl import HighThroughputExecutor
|
8
8
|
from parsl.config import Config
|
9
|
-
from parsl.executors.taskvine import TaskVineExecutor, TaskVineManagerConfig
|
10
9
|
from parsl.monitoring import MonitoringHub
|
11
10
|
|
12
11
|
|
@@ -64,17 +63,17 @@ def workqueue_config():
|
|
64
63
|
|
65
64
|
|
66
65
|
def taskvine_config():
|
66
|
+
from parsl.executors.taskvine import TaskVineExecutor, TaskVineManagerConfig
|
67
67
|
c = Config(executors=[TaskVineExecutor(manager_config=TaskVineManagerConfig(port=9000),
|
68
68
|
worker_launch_method='provider')],
|
69
|
+
strategy_period=0.5,
|
69
70
|
|
70
71
|
monitoring=MonitoringHub(hub_address="localhost",
|
71
72
|
resource_monitoring_interval=1))
|
72
73
|
return c
|
73
74
|
|
74
75
|
|
75
|
-
|
76
|
-
@pytest.mark.parametrize("fresh_config", [htex_config, htex_filesystem_config, htex_udp_config, workqueue_config, taskvine_config])
|
77
|
-
def test_row_counts(tmpd_cwd, fresh_config):
|
76
|
+
def row_counts_parametrized(tmpd_cwd, fresh_config):
|
78
77
|
# this is imported here rather than at module level because
|
79
78
|
# it isn't available in a plain parsl install, so this module
|
80
79
|
# would otherwise fail to import and break even a basic test
|
@@ -131,3 +130,23 @@ def test_row_counts(tmpd_cwd, fresh_config):
|
|
131
130
|
result = connection.execute(text("SELECT COUNT(*) FROM resource"))
|
132
131
|
(c, ) = result.first()
|
133
132
|
assert c >= 1
|
133
|
+
|
134
|
+
|
135
|
+
@pytest.mark.local
|
136
|
+
@pytest.mark.parametrize("fresh_config", [htex_config, htex_filesystem_config, htex_udp_config])
|
137
|
+
def test_row_counts_base(tmpd_cwd, fresh_config):
|
138
|
+
row_counts_parametrized(tmpd_cwd, fresh_config)
|
139
|
+
|
140
|
+
|
141
|
+
@pytest.mark.workqueue
|
142
|
+
@pytest.mark.local
|
143
|
+
@pytest.mark.parametrize("fresh_config", [workqueue_config])
|
144
|
+
def test_row_counts_wq(tmpd_cwd, fresh_config):
|
145
|
+
row_counts_parametrized(tmpd_cwd, fresh_config)
|
146
|
+
|
147
|
+
|
148
|
+
@pytest.mark.taskvine
|
149
|
+
@pytest.mark.local
|
150
|
+
@pytest.mark.parametrize("fresh_config", [taskvine_config])
|
151
|
+
def test_row_counts_tv(tmpd_cwd, fresh_config):
|
152
|
+
row_counts_parametrized(tmpd_cwd, fresh_config)
|
@@ -1,3 +1,5 @@
|
|
1
|
+
import os
|
2
|
+
|
1
3
|
import pytest
|
2
4
|
|
3
5
|
import parsl
|
@@ -18,6 +20,10 @@ apps = []
|
|
18
20
|
|
19
21
|
@pytest.mark.local
|
20
22
|
@pytest.mark.radical
|
23
|
+
@pytest.mark.skipif(
|
24
|
+
os.environ.get("RADICAL_CI") != "1",
|
25
|
+
reason="Only runs in Radical CI workflow"
|
26
|
+
)
|
21
27
|
def test_radical_mpi(n=7):
|
22
28
|
# rank size should be > 1 for the
|
23
29
|
# radical runtime system to run this function in MPI env
|
parsl/tests/unit/test_address.py
CHANGED
@@ -13,6 +13,7 @@ from parsl.addresses import tcp_url
|
|
13
13
|
("::ffff:127.0.0.1", None, "tcp://::ffff:127.0.0.1"),
|
14
14
|
("::ffff:127.0.0.1", None, "tcp://::ffff:127.0.0.1"),
|
15
15
|
("*", None, "tcp://*"),
|
16
|
+
("*", 55001, "tcp://*:55001")
|
16
17
|
])
|
17
18
|
def test_tcp_url(address, port, expected):
|
18
19
|
"""Confirm valid address generation"""
|
parsl/usage_tracking/usage.py
CHANGED
parsl/version.py
CHANGED
@@ -5,13 +5,13 @@ import logging
|
|
5
5
|
import os
|
6
6
|
import pickle
|
7
7
|
import platform
|
8
|
-
import queue
|
9
8
|
import sys
|
10
9
|
import threading
|
11
10
|
import time
|
12
11
|
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, cast
|
13
12
|
|
14
13
|
import zmq
|
14
|
+
from sortedcontainers import SortedList
|
15
15
|
|
16
16
|
from parsl import curvezmq
|
17
17
|
from parsl.addresses import tcp_url
|
@@ -131,7 +131,7 @@ class Interchange:
|
|
131
131
|
self.hub_address = hub_address
|
132
132
|
self.hub_zmq_port = hub_zmq_port
|
133
133
|
|
134
|
-
self.pending_task_queue:
|
134
|
+
self.pending_task_queue: SortedList[Any] = SortedList(key=lambda tup: (tup[0], tup[1]))
|
135
135
|
|
136
136
|
# count of tasks that have been received from the submit side
|
137
137
|
self.task_counter = 0
|
@@ -196,13 +196,12 @@ class Interchange:
|
|
196
196
|
eg. [{'task_id':<x>, 'buffer':<buf>} ... ]
|
197
197
|
"""
|
198
198
|
tasks = []
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
tasks.append(x)
|
199
|
+
try:
|
200
|
+
for _ in range(count):
|
201
|
+
*_, task = self.pending_task_queue.pop()
|
202
|
+
tasks.append(task)
|
203
|
+
except IndexError:
|
204
|
+
pass
|
206
205
|
|
207
206
|
return tasks
|
208
207
|
|
@@ -220,11 +219,11 @@ class Interchange:
|
|
220
219
|
def process_command(self, monitoring_radio: Optional[MonitoringRadioSender]) -> None:
|
221
220
|
""" Command server to run async command to the interchange
|
222
221
|
"""
|
223
|
-
logger.debug("entering command_server section")
|
224
222
|
|
225
223
|
reply: Any # the type of reply depends on the command_req received (aka this needs dependent types...)
|
226
224
|
|
227
225
|
if self.command_channel in self.socks and self.socks[self.command_channel] == zmq.POLLIN:
|
226
|
+
logger.debug("entering command_server section")
|
228
227
|
|
229
228
|
command_req = self.command_channel.recv_pyobj()
|
230
229
|
logger.debug("Received command request: {}".format(command_req))
|
@@ -342,8 +341,15 @@ class Interchange:
|
|
342
341
|
if self.task_incoming in self.socks and self.socks[self.task_incoming] == zmq.POLLIN:
|
343
342
|
logger.debug("start task_incoming section")
|
344
343
|
msg = self.task_incoming.recv_pyobj()
|
344
|
+
|
345
|
+
# Process priority, higher number = lower priority
|
346
|
+
resource_spec = msg.get('resource_spec', {})
|
347
|
+
priority = resource_spec.get('priority', float('inf'))
|
348
|
+
queue_entry = (-priority, -self.task_counter, msg)
|
349
|
+
|
345
350
|
logger.debug("putting message onto pending_task_queue")
|
346
|
-
|
351
|
+
|
352
|
+
self.pending_task_queue.add(queue_entry)
|
347
353
|
self.task_counter += 1
|
348
354
|
logger.debug(f"Fetched {self.task_counter} tasks so far")
|
349
355
|
|
@@ -378,23 +384,24 @@ class Interchange:
|
|
378
384
|
return
|
379
385
|
|
380
386
|
if msg['type'] == 'registration':
|
381
|
-
|
382
|
-
self.
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
387
|
+
ix_minor_py = self.current_platform['python_v'].rsplit(".", 1)[0]
|
388
|
+
ix_parsl_v = self.current_platform['parsl_v']
|
389
|
+
mgr_minor_py = msg['python_v'].rsplit(".", 1)[0]
|
390
|
+
mgr_parsl_v = msg['parsl_v']
|
391
|
+
|
392
|
+
m = ManagerRecord(
|
393
|
+
block_id=None,
|
394
|
+
start_time=msg['start_time'],
|
395
|
+
tasks=[],
|
396
|
+
worker_count=0,
|
397
|
+
max_capacity=0,
|
398
|
+
active=True,
|
399
|
+
draining=False,
|
400
|
+
last_heartbeat=time.time(),
|
401
|
+
idle_since=time.time(),
|
402
|
+
parsl_version=mgr_parsl_v,
|
403
|
+
python_version=msg['python_v'],
|
404
|
+
)
|
398
405
|
|
399
406
|
# m is a ManagerRecord, but msg is a dict[Any,Any] and so can
|
400
407
|
# contain arbitrary fields beyond those in ManagerRecord (and
|
@@ -405,23 +412,43 @@ class Interchange:
|
|
405
412
|
logger.info(f"Registration info for manager {manager_id!r}: {msg}")
|
406
413
|
self._send_monitoring_info(monitoring_radio, m)
|
407
414
|
|
408
|
-
if (
|
409
|
-
msg['parsl_v'] != self.current_platform['parsl_v']):
|
410
|
-
logger.error(f"Manager {manager_id!r} has incompatible version info with the interchange")
|
411
|
-
logger.debug("Setting kill event")
|
415
|
+
if (mgr_minor_py, mgr_parsl_v) != (ix_minor_py, ix_parsl_v):
|
412
416
|
kill_event.set()
|
413
|
-
e = VersionMismatch(
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
417
|
+
e = VersionMismatch(
|
418
|
+
f"py.v={ix_minor_py} parsl.v={ix_parsl_v}",
|
419
|
+
f"py.v={mgr_minor_py} parsl.v={mgr_parsl_v}",
|
420
|
+
)
|
421
|
+
result_package = {
|
422
|
+
'type': 'result',
|
423
|
+
'task_id': -1,
|
424
|
+
'exception': serialize_object(e),
|
425
|
+
}
|
419
426
|
pkl_package = pickle.dumps(result_package)
|
420
427
|
self.results_outgoing.send(pkl_package)
|
421
|
-
logger.error(
|
428
|
+
logger.error(
|
429
|
+
"Manager has incompatible version info with the interchange;"
|
430
|
+
" sending failure reports and shutting down:"
|
431
|
+
f"\n Interchange: {e.interchange_version}"
|
432
|
+
f"\n Manager: {e.manager_version}"
|
433
|
+
)
|
434
|
+
|
422
435
|
else:
|
423
|
-
|
424
|
-
|
436
|
+
# We really should update the associated data structure; but not
|
437
|
+
# at this time. *kicks can down the road*
|
438
|
+
assert m['block_id'] is not None, "Verified externally currently"
|
439
|
+
|
440
|
+
# set up entry only if we accept the registration
|
441
|
+
self._ready_managers[manager_id] = m
|
442
|
+
self.connected_block_history.append(m['block_id'])
|
443
|
+
|
444
|
+
interesting_managers.add(manager_id)
|
445
|
+
|
446
|
+
logger.info(
|
447
|
+
f"Registered manager {manager_id!r} (py{mgr_minor_py},"
|
448
|
+
f" {mgr_parsl_v}) and added to ready queue"
|
449
|
+
)
|
450
|
+
logger.debug("Manager %r -> %s", manager_id, m)
|
451
|
+
|
425
452
|
elif msg['type'] == 'heartbeat':
|
426
453
|
manager = self._ready_managers.get(manager_id)
|
427
454
|
if manager:
|
@@ -461,10 +488,10 @@ class Interchange:
|
|
461
488
|
len(self._ready_managers)
|
462
489
|
)
|
463
490
|
|
464
|
-
if interesting_managers and
|
491
|
+
if interesting_managers and self.pending_task_queue:
|
465
492
|
shuffled_managers = self.manager_selector.sort_managers(self._ready_managers, interesting_managers)
|
466
493
|
|
467
|
-
while shuffled_managers and
|
494
|
+
while shuffled_managers and self.pending_task_queue: # cf. the if statement above...
|
468
495
|
manager_id = shuffled_managers.pop()
|
469
496
|
m = self._ready_managers[manager_id]
|
470
497
|
tasks_inflight = len(m['tasks'])
|
@@ -491,10 +518,7 @@ class Interchange:
|
|
491
518
|
self._send_monitoring_info(monitoring_radio, m)
|
492
519
|
else:
|
493
520
|
interesting_managers.remove(manager_id)
|
494
|
-
# logger.debug("Nothing to send to manager {}".format(manager_id))
|
495
521
|
logger.debug("leaving _ready_managers section, with %s managers still interesting", len(interesting_managers))
|
496
|
-
else:
|
497
|
-
logger.debug("either no interesting managers or no tasks, so skipping manager pass")
|
498
522
|
|
499
523
|
def process_results_incoming(self, interesting_managers: Set[bytes], monitoring_radio: Optional[MonitoringRadioSender]) -> None:
|
500
524
|
# Receive any results and forward to client
|
@@ -605,8 +629,6 @@ def start_file_logger(filename: str, level: int = logging.DEBUG, format_string:
|
|
605
629
|
|
606
630
|
)
|
607
631
|
|
608
|
-
global logger
|
609
|
-
logger = logging.getLogger(LOGGER_NAME)
|
610
632
|
logger.setLevel(level)
|
611
633
|
handler = logging.FileHandler(filename)
|
612
634
|
handler.setLevel(level)
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2025.
|
3
|
+
Version: 2025.6.2
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2025.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2025.06.02.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|
@@ -24,6 +24,7 @@ Requires-Dist: globus-sdk
|
|
24
24
|
Requires-Dist: dill
|
25
25
|
Requires-Dist: tblib
|
26
26
|
Requires-Dist: requests
|
27
|
+
Requires-Dist: sortedcontainers
|
27
28
|
Requires-Dist: psutil>=5.5.1
|
28
29
|
Requires-Dist: setproctitle
|
29
30
|
Requires-Dist: filelock<4,>=3.13
|
@@ -1,5 +1,5 @@
|
|
1
1
|
parsl/__init__.py,sha256=65VfBnxw2k8V3sHsbhKoUCqG-ps2XP2l3x3ALMqQ13Y,1777
|
2
|
-
parsl/addresses.py,sha256=
|
2
|
+
parsl/addresses.py,sha256=z5GnIWdbzz4klRiMZtX8XmRT7OP8dJYvAk8RIKD2kzI,5290
|
3
3
|
parsl/config.py,sha256=p5HQoxLj5aMagUAYfngcXG2kw0s6SJoc6u7vH2sVhPU,9635
|
4
4
|
parsl/curvezmq.py,sha256=6Zi7RqTP_eKWi3DFgapfK2t-Jw8vJS-ZtN1bsrByPeo,7073
|
5
5
|
parsl/errors.py,sha256=SzINzQFZDBDbj9l-DPQznD0TbGkNhHIRAPkcBCogf_A,1019
|
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=JNAfgdZvQSsxVyUp229OOUqWwf_ZUhpmw8X9CdF3i6k,3614
|
|
8
8
|
parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
|
9
9
|
parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
parsl/utils.py,sha256=codTX6_KLhgeTwNkRzc1lo4bgc1M93eJ-lkqOO98fvk,14331
|
11
|
-
parsl/version.py,sha256=
|
11
|
+
parsl/version.py,sha256=iOGq8naBMZwM3-DVQabgtw9WK1TvA8x_qTWhiLs8kd4,131
|
12
12
|
parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
|
14
14
|
parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
|
@@ -73,8 +73,8 @@ parsl/executors/flux/executor.py,sha256=ii1i5V7uQnhf1BDq5nnMscmmpXJkCWtrFCuBbDaP
|
|
73
73
|
parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaPrSF9ec7zvRfLfYJw,2129
|
74
74
|
parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
75
75
|
parsl/executors/high_throughput/errors.py,sha256=k2XuvvFdUfNs2foHFnxmS-BToRMfdXpYEa4EF3ELKq4,1554
|
76
|
-
parsl/executors/high_throughput/executor.py,sha256=
|
77
|
-
parsl/executors/high_throughput/interchange.py,sha256=
|
76
|
+
parsl/executors/high_throughput/executor.py,sha256=x5759lCTIev1yzsJ4OwMp_0me7Cs108HLqKbz1r_zkg,39812
|
77
|
+
parsl/executors/high_throughput/interchange.py,sha256=lnPhV-5OR_rp4fa_Wj0tmG3avwUdPgJH4LTTBZQcrf8,28922
|
78
78
|
parsl/executors/high_throughput/manager_record.py,sha256=ZMsqFxvreGLRXAw3N-JnODDa9Qfizw2tMmcBhm4lco4,490
|
79
79
|
parsl/executors/high_throughput/manager_selector.py,sha256=UKcUE6v0tO7PDMTThpKSKxVpOpOUilxDL7UbNgpZCxo,2116
|
80
80
|
parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
|
@@ -100,7 +100,7 @@ parsl/executors/taskvine/utils.py,sha256=iSrIogeiauL3UNy_9tiZp1cBSNn6fIJkMYQRVi1
|
|
100
100
|
parsl/executors/workqueue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
101
101
|
parsl/executors/workqueue/errors.py,sha256=XO2naYhAsHHyiOBH6hpObg3mPNDmvMoFqErsj0-v7jc,541
|
102
102
|
parsl/executors/workqueue/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
|
103
|
-
parsl/executors/workqueue/executor.py,sha256=
|
103
|
+
parsl/executors/workqueue/executor.py,sha256=kE0vNN4cSau5p3R6uRDI20THN8e4dBGPHiGKWx5xpo4,49827
|
104
104
|
parsl/executors/workqueue/parsl_coprocess.py,sha256=cF1UmTgVLoey6QzBcbYgEiEsRidSaFfuO54f1HFw_EM,5737
|
105
105
|
parsl/executors/workqueue/parsl_coprocess_stub.py,sha256=_bJmpPIgL42qM6bVzeEKt1Mn1trSP41rtJguXxPGfHI,735
|
106
106
|
parsl/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -123,7 +123,7 @@ parsl/monitoring/types.py,sha256=oOCrzv-ab-_rv4pb8o58Sdb8G_RGp1aZriRbdf9zBEk,339
|
|
123
123
|
parsl/monitoring/queries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
124
124
|
parsl/monitoring/queries/pandas.py,sha256=0Z2r0rjTKCemf0eaDkF1irvVHn5g7KC5SYETvQPRxwU,2232
|
125
125
|
parsl/monitoring/radios/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
126
|
-
parsl/monitoring/radios/base.py,sha256=
|
126
|
+
parsl/monitoring/radios/base.py,sha256=MBW3aAbxQY7TMuNGZEN15dWoy3mAELOqz-GMN1d7vF4,221
|
127
127
|
parsl/monitoring/radios/filesystem.py,sha256=ioZ3jOKX5Qf0DYRtWmpCEorfuMVbS58OMS_QV7DOFOs,1765
|
128
128
|
parsl/monitoring/radios/filesystem_router.py,sha256=kQkinktSpsVwfNESfUggSzBlRZ5JgwjM7IDN-jARAhM,2146
|
129
129
|
parsl/monitoring/radios/htex.py,sha256=qBu4O5NYnSETHX0ptdwxSpqa2Pp3Z_V6a6lb3TbjKm4,1643
|
@@ -180,10 +180,10 @@ parsl/providers/lsf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
180
180
|
parsl/providers/lsf/lsf.py,sha256=dB3CwYY39rM7E2dg3eP2UkeNHvatBxfl6JJns6FvLEY,10661
|
181
181
|
parsl/providers/lsf/template.py,sha256=leQ_TpXv7ePMzbHfLaWvqMR0VORxlp-hjX5JxtkcwwU,269
|
182
182
|
parsl/providers/pbspro/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
183
|
-
parsl/providers/pbspro/pbspro.py,sha256=
|
183
|
+
parsl/providers/pbspro/pbspro.py,sha256=vzopQ80iXcl4Wx0_Bs5OtadYGeoGdbDgK7YHfjUjG_Y,8826
|
184
184
|
parsl/providers/pbspro/template.py,sha256=y-Dher--t5Eury-c7cAuSZs9FEUXWiruFUI07v81558,315
|
185
185
|
parsl/providers/slurm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
186
|
-
parsl/providers/slurm/slurm.py,sha256=
|
186
|
+
parsl/providers/slurm/slurm.py,sha256=J58iSRqcVahhHmyIEvmTFlaL0EVaf7yxkD_fec_ja_o,17323
|
187
187
|
parsl/providers/slurm/template.py,sha256=KpgBEFMc1ps-38jdrk13xUGx9TCivu-iF90jgQDdiEQ,315
|
188
188
|
parsl/providers/torque/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
189
189
|
parsl/providers/torque/template.py,sha256=4qfc2gmlEhRCAD7erFDOs4prJQ43I8s4E8DSUSVQx3A,358
|
@@ -196,7 +196,7 @@ parsl/serialize/facade.py,sha256=3uOuVp0epfyLn7qDzuWqLfsy971YVGD3sqwqcAiRwh0,668
|
|
196
196
|
parsl/serialize/proxystore.py,sha256=o-ha9QAvVhbN8y9S1itk3W0O75eyHYZw2AvB2xu5_Lg,1624
|
197
197
|
parsl/tests/__init__.py,sha256=VTtJzOzz_x6fWNh8IOnsgFqVbdiJShi2AZH21mcmID4,204
|
198
198
|
parsl/tests/callables_helper.py,sha256=ceP1YYsNtrZgKT6MAIvpgdccEjQ_CpFEOnZBGHKGOx0,30
|
199
|
-
parsl/tests/conftest.py,sha256=
|
199
|
+
parsl/tests/conftest.py,sha256=ssY-ZP12a1TXj99ifRc0E83MQjri2bgwvMIe-YF5yd0,15485
|
200
200
|
parsl/tests/test_aalst_patterns.py,sha256=lNIxb7nIgh1yX7hR2fr_ck_mxYJxx8ASKK9zHUVqPno,9614
|
201
201
|
parsl/tests/test_callables.py,sha256=97vrIF1_hfDGd81FM1bhR6FemZMWFcALrH6pVHMTCt8,1974
|
202
202
|
parsl/tests/test_curvezmq.py,sha256=yyhlS4vmaZdMitiySoy4l_ih9H1bsPiN-tMdwIh3H20,12431
|
@@ -309,7 +309,7 @@ parsl/tests/test_error_handling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
|
|
309
309
|
parsl/tests/test_error_handling/test_fail.py,sha256=xx4TGWfL7le4cQ9nvnUkrlmKQJkskhD0l_3W1xwZSEI,282
|
310
310
|
parsl/tests/test_error_handling/test_python_walltime.py,sha256=rdmGZHIkuann2Njt3i62odKJ0FaODGr7-L96rOXNVYg,950
|
311
311
|
parsl/tests/test_error_handling/test_rand_fail.py,sha256=crFg4GmwdDpvx49_7w5Xt2P7H2R_V9f6i1Ar-QkASuU,3864
|
312
|
-
parsl/tests/test_error_handling/test_resource_spec.py,sha256=
|
312
|
+
parsl/tests/test_error_handling/test_resource_spec.py,sha256=dyuzMkS3M_BmZUbu1mF7yojwkJehDbdFvphNlYwU9yM,1458
|
313
313
|
parsl/tests/test_error_handling/test_retries.py,sha256=zJ9D2hrvXQURnK2OIf5LfQFcSDVZ8rhdpp6peGccY7s,2372
|
314
314
|
parsl/tests/test_error_handling/test_retry_handler.py,sha256=8fMHffMBLhRyNreIqkrwamx9TYRZ498uVYNlkcbAoLU,1407
|
315
315
|
parsl/tests/test_error_handling/test_retry_handler_failure.py,sha256=GaGtZZCB9Wb7ieShqTrxUFEUSKy07ZZWytCY4Qixk9Y,552
|
@@ -332,12 +332,13 @@ parsl/tests/test_htex/test_manager_selector_by_block.py,sha256=VQqSE6MDhGpDSjShG
|
|
332
332
|
parsl/tests/test_htex/test_managers_command.py,sha256=SCwkfyGB-Udgu5L2yDMpR5bsaT-aNjNkiXxtuRb25DI,1622
|
333
333
|
parsl/tests/test_htex/test_missing_worker.py,sha256=gyp5i7_t-JHyJGtz_eXZKKBY5w8oqLOIxO6cJgGJMtQ,745
|
334
334
|
parsl/tests/test_htex/test_multiple_disconnected_blocks.py,sha256=2vXZoIx4NuAWYuiNoL5Gxr85w72qZ7Kdb3JGh0FufTg,1867
|
335
|
-
parsl/tests/test_htex/
|
335
|
+
parsl/tests/test_htex/test_priority_queue.py,sha256=vH58WwDZVpyIiMqhjwGkme7Cv5-jupTmM52EOcbdrEg,2106
|
336
|
+
parsl/tests/test_htex/test_resource_spec_validation.py,sha256=ZXW02jDd1rNxjBLh1jHyiz31zNoB9JzDw94aWllXFd4,1102
|
336
337
|
parsl/tests/test_htex/test_worker_failure.py,sha256=Uz-RHI-LK78FMjXUvrUFmo4iYfmpDVBUcBxxRb3UG9M,603
|
337
|
-
parsl/tests/test_htex/test_zmq_binding.py,sha256=
|
338
|
+
parsl/tests/test_htex/test_zmq_binding.py,sha256=G7D2_p9vOekgpB50MBiPRwtIz98DEkUpMqA3rdwzYTQ,4397
|
338
339
|
parsl/tests/test_monitoring/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
339
340
|
parsl/tests/test_monitoring/test_app_names.py,sha256=A-mOMCVhZDnUyJp32fsTUkHdcyval8o7WPEWacDkbD4,2208
|
340
|
-
parsl/tests/test_monitoring/test_basic.py,sha256=
|
341
|
+
parsl/tests/test_monitoring/test_basic.py,sha256=eAGHX1s7RweqZLSRmh7sY_l_zLGHQaHOXVuIN7R5SrI,5117
|
341
342
|
parsl/tests/test_monitoring/test_db_locks.py,sha256=3s3c1xhKo230ZZIJ3f1Ca4U7LcEdXnanOGVXQyNlk2U,2895
|
342
343
|
parsl/tests/test_monitoring/test_exit_helper.py,sha256=ob8Qd1hlkq_mowygfPetTnYN9LfuqeXHRpPilSfDSog,1232
|
343
344
|
parsl/tests/test_monitoring/test_fuzz_zmq.py,sha256=SQNNHhXxHB_LwW4Ujqkgut3lbG0XVW-hliPagQQpiTc,3449
|
@@ -393,7 +394,7 @@ parsl/tests/test_python_apps/test_simple.py,sha256=LYGjdHvRizTpYzZePPvwKSPwrr2MP
|
|
393
394
|
parsl/tests/test_python_apps/test_timeout.py,sha256=uENfT-1DharQkqkeG7a89E-gU1gjE7ATJrBZGUKvZSA,998
|
394
395
|
parsl/tests/test_python_apps/test_type5.py,sha256=kUyA1NuFu-DDXsJNNvJLZVyewZBt7QAOhcGm2DWFTQw,777
|
395
396
|
parsl/tests/test_radical/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
396
|
-
parsl/tests/test_radical/test_mpi_funcs.py,sha256=
|
397
|
+
parsl/tests/test_radical/test_mpi_funcs.py,sha256=nKy5V2w48QYB_wJsp5E3dy0LBdz7a3qPBhBCsbNoa_Y,886
|
397
398
|
parsl/tests/test_regression/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
398
399
|
parsl/tests/test_regression/test_1480.py,sha256=sJjcX8O0eL_OG7L3skjT3JaIUyFC-kk0ui3puaDatKA,546
|
399
400
|
parsl/tests/test_regression/test_1606_wait_for_current_tasks.py,sha256=frqPtaiVysevj9nCWoQlAeh9K1jQO5zaahr9ev_Mx_0,1134
|
@@ -410,7 +411,7 @@ parsl/tests/test_scaling/test_block_error_handler.py,sha256=OS1IyiK8gjRFI1VzpmOv
|
|
410
411
|
parsl/tests/test_scaling/test_regression_1621.py,sha256=e3-bkHR3d8LxA-uY0BugyWgYzksh00I_UbaA-jHOzKY,1872
|
411
412
|
parsl/tests/test_scaling/test_regression_3568_scaledown_vs_MISSING.py,sha256=bjE_NIBoWK6heEz5LN0tzE1977vUA9kVemAYCqcIbzY,2942
|
412
413
|
parsl/tests/test_scaling/test_regression_3696_oscillation.py,sha256=7Xc3vgocXXUbUegh9t5OyXlV91lRXDVMUlrOwErYOXA,3621
|
413
|
-
parsl/tests/test_scaling/test_scale_down.py,sha256=
|
414
|
+
parsl/tests/test_scaling/test_scale_down.py,sha256=GmxzNtlG13SySVDGGlSqXEnaHxyCx6ZVn_Hi1GcBvj8,2765
|
414
415
|
parsl/tests/test_scaling/test_scale_down_htex_auto_scale.py,sha256=EnVNllKO2AGKkGa6927cLrzvvG6mpNQeFDzVktv6x08,4521
|
415
416
|
parsl/tests/test_scaling/test_scale_down_htex_unregistered.py,sha256=OrdnYmd58n7UfkANPJ7mzha4WSCPdbgJRX1O1Zdu0tI,1954
|
416
417
|
parsl/tests/test_scaling/test_shutdown_scalein.py,sha256=sr40of5DwxeyQI97MDZxFqJILZSXZJb9Dv7qTf2gql8,2471
|
@@ -451,21 +452,21 @@ parsl/tests/test_utils/test_execute_wait.py,sha256=J796rGuv2qi2spChgAPFB1oPETdnv
|
|
451
452
|
parsl/tests/test_utils/test_representation_mixin.py,sha256=kUZeIDwA2rlbJ3-beGzLLwf3dOplTMCrWJN87etHcyY,1633
|
452
453
|
parsl/tests/test_utils/test_sanitize_dns.py,sha256=8P_v5a5JLGU76OYf0LtclAwqJxGU0fMh_OZMVkMke3I,2954
|
453
454
|
parsl/tests/unit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
454
|
-
parsl/tests/unit/test_address.py,sha256=
|
455
|
+
parsl/tests/unit/test_address.py,sha256=0JxaEyvEiLIr5aKvaNnSv0Z9ta3kNllsLS_aby23QPs,716
|
455
456
|
parsl/tests/unit/test_file.py,sha256=vLycnYcv3bvSzL-FV8WdoibqTyb41BrH1LUYBavobsg,2850
|
456
457
|
parsl/tests/unit/test_globus_compute_executor.py,sha256=9BWKZ4C03tQ5gZ3jxIsDt5j2yyYHa_VHqULJPeM7YPM,3238
|
457
458
|
parsl/tests/unit/test_usage_tracking.py,sha256=xEfUlbBRpsFdUdOrCsk1Kz5AfmMxJT7f0_esZl8Ft-0,1884
|
458
459
|
parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
459
460
|
parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
|
460
461
|
parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
|
461
|
-
parsl/usage_tracking/usage.py,sha256=
|
462
|
-
parsl-2025.
|
463
|
-
parsl-2025.
|
464
|
-
parsl-2025.
|
465
|
-
parsl-2025.
|
466
|
-
parsl-2025.
|
467
|
-
parsl-2025.
|
468
|
-
parsl-2025.
|
469
|
-
parsl-2025.
|
470
|
-
parsl-2025.
|
471
|
-
parsl-2025.
|
462
|
+
parsl/usage_tracking/usage.py,sha256=hbMo5BYgIWqMcFWqN-HYP1TbwNrTonpv-usfwnCFJKY,9212
|
463
|
+
parsl-2025.6.2.data/scripts/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
|
464
|
+
parsl-2025.6.2.data/scripts/interchange.py,sha256=_FRB1LxkL9vnT3y24NTXHOzotMlDJEXwF5ZZCjGmcww,28909
|
465
|
+
parsl-2025.6.2.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
|
466
|
+
parsl-2025.6.2.data/scripts/process_worker_pool.py,sha256=__gFeFQJpV5moRofj3WKQCnKp6gmzieXjzkmzVuTmX4,41123
|
467
|
+
parsl-2025.6.2.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
468
|
+
parsl-2025.6.2.dist-info/METADATA,sha256=6TKNQwxwu5QiBBEZaDWkTNGu0o2f2baDSnd_3BkDqsA,4054
|
469
|
+
parsl-2025.6.2.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
470
|
+
parsl-2025.6.2.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
|
471
|
+
parsl-2025.6.2.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
|
472
|
+
parsl-2025.6.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|