parsl 2024.7.15__py3-none-any.whl → 2024.7.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/dataflow/dflow.py +3 -9
- parsl/executors/base.py +4 -4
- parsl/executors/flux/executor.py +7 -7
- parsl/executors/high_throughput/executor.py +42 -49
- parsl/executors/high_throughput/interchange.py +1 -0
- parsl/executors/high_throughput/manager_record.py +1 -0
- parsl/executors/high_throughput/process_worker_pool.py +12 -9
- parsl/executors/status_handling.py +58 -33
- parsl/executors/taskvine/executor.py +0 -18
- parsl/executors/workqueue/executor.py +0 -18
- parsl/monitoring/monitoring.py +4 -4
- parsl/monitoring/radios.py +7 -7
- parsl/monitoring/remote.py +12 -12
- parsl/tests/test_monitoring/test_fuzz_zmq.py +2 -2
- parsl/version.py +1 -1
- {parsl-2024.7.15.data → parsl-2024.7.29.data}/scripts/interchange.py +1 -0
- {parsl-2024.7.15.data → parsl-2024.7.29.data}/scripts/process_worker_pool.py +12 -9
- {parsl-2024.7.15.dist-info → parsl-2024.7.29.dist-info}/METADATA +2 -2
- {parsl-2024.7.15.dist-info → parsl-2024.7.29.dist-info}/RECORD +25 -25
- {parsl-2024.7.15.data → parsl-2024.7.29.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2024.7.15.data → parsl-2024.7.29.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2024.7.15.dist-info → parsl-2024.7.29.dist-info}/LICENSE +0 -0
- {parsl-2024.7.15.dist-info → parsl-2024.7.29.dist-info}/WHEEL +0 -0
- {parsl-2024.7.15.dist-info → parsl-2024.7.29.dist-info}/entry_points.txt +0 -0
- {parsl-2024.7.15.dist-info → parsl-2024.7.29.dist-info}/top_level.txt +0 -0
parsl/dataflow/dflow.py
CHANGED
@@ -113,14 +113,10 @@ class DataFlowKernel:
|
|
113
113
|
self.monitoring: Optional[MonitoringHub]
|
114
114
|
self.monitoring = config.monitoring
|
115
115
|
|
116
|
-
# hub address and port for interchange to connect
|
117
|
-
self.hub_address = None # type: Optional[str]
|
118
|
-
self.hub_zmq_port = None # type: Optional[int]
|
119
116
|
if self.monitoring:
|
120
117
|
if self.monitoring.logdir is None:
|
121
118
|
self.monitoring.logdir = self.run_dir
|
122
|
-
self.
|
123
|
-
self.hub_zmq_port = self.monitoring.start(self.run_id, self.run_dir, self.config.run_dir)
|
119
|
+
self.monitoring.start(self.run_id, self.run_dir, self.config.run_dir)
|
124
120
|
|
125
121
|
self.time_began = datetime.datetime.now()
|
126
122
|
self.time_completed: Optional[datetime.datetime] = None
|
@@ -1181,9 +1177,9 @@ class DataFlowKernel:
|
|
1181
1177
|
for executor in executors:
|
1182
1178
|
executor.run_id = self.run_id
|
1183
1179
|
executor.run_dir = self.run_dir
|
1184
|
-
executor.hub_address = self.hub_address
|
1185
|
-
executor.hub_zmq_port = self.hub_zmq_port
|
1186
1180
|
if self.monitoring:
|
1181
|
+
executor.hub_address = self.monitoring.hub_address
|
1182
|
+
executor.hub_zmq_port = self.monitoring.hub_zmq_port
|
1187
1183
|
executor.monitoring_radio = self.monitoring.radio
|
1188
1184
|
if hasattr(executor, 'provider'):
|
1189
1185
|
if hasattr(executor.provider, 'script_dir'):
|
@@ -1460,8 +1456,6 @@ class DataFlowKernel:
|
|
1460
1456
|
Returns:
|
1461
1457
|
- dict containing, hashed -> future mappings
|
1462
1458
|
"""
|
1463
|
-
self.memo_lookup_table = None
|
1464
|
-
|
1465
1459
|
if checkpointDirs:
|
1466
1460
|
return self._load_checkpoints(checkpointDirs)
|
1467
1461
|
else:
|
parsl/executors/base.py
CHANGED
@@ -5,7 +5,7 @@ from typing import Any, Callable, Dict, Optional
|
|
5
5
|
|
6
6
|
from typing_extensions import Literal, Self
|
7
7
|
|
8
|
-
from parsl.monitoring.radios import
|
8
|
+
from parsl.monitoring.radios import MonitoringRadioSender
|
9
9
|
|
10
10
|
|
11
11
|
class ParslExecutor(metaclass=ABCMeta):
|
@@ -52,7 +52,7 @@ class ParslExecutor(metaclass=ABCMeta):
|
|
52
52
|
*,
|
53
53
|
hub_address: Optional[str] = None,
|
54
54
|
hub_zmq_port: Optional[int] = None,
|
55
|
-
monitoring_radio: Optional[
|
55
|
+
monitoring_radio: Optional[MonitoringRadioSender] = None,
|
56
56
|
run_dir: str = ".",
|
57
57
|
run_id: Optional[str] = None,
|
58
58
|
):
|
@@ -147,11 +147,11 @@ class ParslExecutor(metaclass=ABCMeta):
|
|
147
147
|
self._hub_zmq_port = value
|
148
148
|
|
149
149
|
@property
|
150
|
-
def monitoring_radio(self) -> Optional[
|
150
|
+
def monitoring_radio(self) -> Optional[MonitoringRadioSender]:
|
151
151
|
"""Local radio for sending monitoring messages
|
152
152
|
"""
|
153
153
|
return self._monitoring_radio
|
154
154
|
|
155
155
|
@monitoring_radio.setter
|
156
|
-
def monitoring_radio(self, value: Optional[
|
156
|
+
def monitoring_radio(self, value: Optional[MonitoringRadioSender]) -> None:
|
157
157
|
self._monitoring_radio = value
|
parsl/executors/flux/executor.py
CHANGED
@@ -200,7 +200,6 @@ class FluxExecutor(ParslExecutor, RepresentationMixin):
|
|
200
200
|
raise EnvironmentError("Cannot find Flux installation in PATH")
|
201
201
|
self.flux_path = os.path.abspath(flux_path)
|
202
202
|
self._task_id_counter = itertools.count()
|
203
|
-
self._socket = zmq.Context().socket(zmq.REP)
|
204
203
|
# Assumes a launch command cannot be None or empty
|
205
204
|
self.launch_cmd = launch_cmd or self.DEFAULT_LAUNCH_CMD
|
206
205
|
self._submission_queue: queue.Queue = queue.Queue()
|
@@ -213,7 +212,6 @@ class FluxExecutor(ParslExecutor, RepresentationMixin):
|
|
213
212
|
args=(
|
214
213
|
self._submission_queue,
|
215
214
|
self._stop_event,
|
216
|
-
self._socket,
|
217
215
|
self.working_dir,
|
218
216
|
self.flux_executor_kwargs,
|
219
217
|
self.provider,
|
@@ -306,11 +304,13 @@ def _submit_wrapper(
|
|
306
304
|
|
307
305
|
If an exception is thrown, error out all submitted tasks.
|
308
306
|
"""
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
307
|
+
with zmq.Context() as ctx:
|
308
|
+
with ctx.socket(zmq.REP) as socket:
|
309
|
+
try:
|
310
|
+
_submit_flux_jobs(submission_queue, stop_event, socket, *args, **kwargs)
|
311
|
+
except Exception as exc:
|
312
|
+
_error_out_jobs(submission_queue, stop_event, exc)
|
313
|
+
raise
|
314
314
|
|
315
315
|
|
316
316
|
def _error_out_jobs(
|
@@ -456,8 +456,6 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
456
456
|
"task_id" : <task_id>
|
457
457
|
"exception" : serialized exception object, on failure
|
458
458
|
}
|
459
|
-
|
460
|
-
The `None` message is a die request.
|
461
459
|
"""
|
462
460
|
logger.debug("Result queue worker starting")
|
463
461
|
|
@@ -475,58 +473,53 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
475
473
|
|
476
474
|
else:
|
477
475
|
|
478
|
-
|
479
|
-
|
480
|
-
|
476
|
+
for serialized_msg in msgs:
|
477
|
+
try:
|
478
|
+
msg = pickle.loads(serialized_msg)
|
479
|
+
except pickle.UnpicklingError:
|
480
|
+
raise BadMessage("Message received could not be unpickled")
|
481
481
|
|
482
|
-
|
483
|
-
|
482
|
+
if msg['type'] == 'heartbeat':
|
483
|
+
continue
|
484
|
+
elif msg['type'] == 'result':
|
484
485
|
try:
|
485
|
-
|
486
|
-
except
|
487
|
-
raise BadMessage("Message received
|
486
|
+
tid = msg['task_id']
|
487
|
+
except Exception:
|
488
|
+
raise BadMessage("Message received does not contain 'task_id' field")
|
489
|
+
|
490
|
+
if tid == -1 and 'exception' in msg:
|
491
|
+
logger.warning("Executor shutting down due to exception from interchange")
|
492
|
+
exception = deserialize(msg['exception'])
|
493
|
+
self.set_bad_state_and_fail_all(exception)
|
494
|
+
break
|
495
|
+
|
496
|
+
task_fut = self.tasks.pop(tid)
|
497
|
+
|
498
|
+
if 'result' in msg:
|
499
|
+
result = deserialize(msg['result'])
|
500
|
+
task_fut.set_result(result)
|
488
501
|
|
489
|
-
|
490
|
-
continue
|
491
|
-
elif msg['type'] == 'result':
|
502
|
+
elif 'exception' in msg:
|
492
503
|
try:
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
elif 'exception' in msg:
|
510
|
-
try:
|
511
|
-
s = deserialize(msg['exception'])
|
512
|
-
# s should be a RemoteExceptionWrapper... so we can reraise it
|
513
|
-
if isinstance(s, RemoteExceptionWrapper):
|
514
|
-
try:
|
515
|
-
s.reraise()
|
516
|
-
except Exception as e:
|
517
|
-
task_fut.set_exception(e)
|
518
|
-
elif isinstance(s, Exception):
|
519
|
-
task_fut.set_exception(s)
|
520
|
-
else:
|
521
|
-
raise ValueError("Unknown exception-like type received: {}".format(type(s)))
|
522
|
-
except Exception as e:
|
523
|
-
# TODO could be a proper wrapped exception?
|
524
|
-
task_fut.set_exception(
|
525
|
-
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
|
526
|
-
else:
|
527
|
-
raise BadMessage("Message received is neither result or exception")
|
504
|
+
s = deserialize(msg['exception'])
|
505
|
+
# s should be a RemoteExceptionWrapper... so we can reraise it
|
506
|
+
if isinstance(s, RemoteExceptionWrapper):
|
507
|
+
try:
|
508
|
+
s.reraise()
|
509
|
+
except Exception as e:
|
510
|
+
task_fut.set_exception(e)
|
511
|
+
elif isinstance(s, Exception):
|
512
|
+
task_fut.set_exception(s)
|
513
|
+
else:
|
514
|
+
raise ValueError("Unknown exception-like type received: {}".format(type(s)))
|
515
|
+
except Exception as e:
|
516
|
+
# TODO could be a proper wrapped exception?
|
517
|
+
task_fut.set_exception(
|
518
|
+
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
|
528
519
|
else:
|
529
|
-
raise BadMessage("Message received
|
520
|
+
raise BadMessage("Message received is neither result or exception")
|
521
|
+
else:
|
522
|
+
raise BadMessage("Message received with unknown type {}".format(msg['type']))
|
530
523
|
|
531
524
|
logger.info("Result queue worker finished")
|
532
525
|
|
@@ -184,6 +184,7 @@ class Manager:
|
|
184
184
|
|
185
185
|
self.uid = uid
|
186
186
|
self.block_id = block_id
|
187
|
+
self.start_time = time.time()
|
187
188
|
|
188
189
|
self.enable_mpi_mode = enable_mpi_mode
|
189
190
|
self.mpi_launcher = mpi_launcher
|
@@ -263,6 +264,7 @@ class Manager:
|
|
263
264
|
'worker_count': self.worker_count,
|
264
265
|
'uid': self.uid,
|
265
266
|
'block_id': self.block_id,
|
267
|
+
'start_time': self.start_time,
|
266
268
|
'prefetch_capacity': self.prefetch_capacity,
|
267
269
|
'max_capacity': self.worker_count + self.prefetch_capacity,
|
268
270
|
'os': platform.system(),
|
@@ -732,17 +734,18 @@ def worker(
|
|
732
734
|
os.sched_setaffinity(0, my_cores) # type: ignore[attr-defined, unused-ignore]
|
733
735
|
logger.info("Set worker CPU affinity to {}".format(my_cores))
|
734
736
|
|
735
|
-
# If CUDA devices, find total number of devices to allow for MPS
|
736
|
-
# See: https://developer.nvidia.com/system-management-interface
|
737
|
-
nvidia_smi_cmd = "nvidia-smi -L > /dev/null && nvidia-smi -L | wc -l"
|
738
|
-
nvidia_smi_ret = subprocess.run(nvidia_smi_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
739
|
-
if nvidia_smi_ret.returncode == 0:
|
740
|
-
num_cuda_devices = int(nvidia_smi_ret.stdout.split()[0])
|
741
|
-
else:
|
742
|
-
num_cuda_devices = None
|
743
|
-
|
744
737
|
# If desired, pin to accelerator
|
745
738
|
if accelerator is not None:
|
739
|
+
|
740
|
+
# If CUDA devices, find total number of devices to allow for MPS
|
741
|
+
# See: https://developer.nvidia.com/system-management-interface
|
742
|
+
nvidia_smi_cmd = "nvidia-smi -L > /dev/null && nvidia-smi -L | wc -l"
|
743
|
+
nvidia_smi_ret = subprocess.run(nvidia_smi_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
744
|
+
if nvidia_smi_ret.returncode == 0:
|
745
|
+
num_cuda_devices = int(nvidia_smi_ret.stdout.split()[0])
|
746
|
+
else:
|
747
|
+
num_cuda_devices = None
|
748
|
+
|
746
749
|
try:
|
747
750
|
if num_cuda_devices is not None:
|
748
751
|
procs_per_cuda_device = pool_size // num_cuda_devices
|
@@ -59,20 +59,28 @@ class BlockProviderExecutor(ParslExecutor):
|
|
59
59
|
else:
|
60
60
|
self.block_error_handler = block_error_handler
|
61
61
|
|
62
|
-
# errors can happen during the submit call to the provider; this is used
|
63
|
-
# to keep track of such errors so that they can be handled in one place
|
64
|
-
# together with errors reported by status()
|
65
|
-
self._simulated_status: Dict[str, JobStatus] = {}
|
66
62
|
self._executor_bad_state = threading.Event()
|
67
63
|
self._executor_exception: Optional[Exception] = None
|
68
64
|
|
69
65
|
self._block_id_counter = AtomicIDCounter()
|
70
66
|
|
71
67
|
self._tasks = {} # type: Dict[object, Future]
|
68
|
+
|
69
|
+
self._last_poll_time = 0.0
|
70
|
+
|
71
|
+
# these four structures track, in loosely coordinated fashion, the
|
72
|
+
# existence of blocks and jobs and how to map between their
|
73
|
+
# identifiers.
|
72
74
|
self.blocks_to_job_id = {} # type: Dict[str, str]
|
73
75
|
self.job_ids_to_block = {} # type: Dict[str, str]
|
74
76
|
|
75
|
-
|
77
|
+
# errors can happen during the submit call to the provider; this is used
|
78
|
+
# to keep track of such errors so that they can be handled in one place
|
79
|
+
# together with errors reported by status()
|
80
|
+
self._simulated_status: Dict[str, JobStatus] = {}
|
81
|
+
|
82
|
+
# this stores an approximation (sometimes delayed) of the latest status
|
83
|
+
# of pending, active and recently terminated blocks
|
76
84
|
self._status = {} # type: Dict[str, JobStatus]
|
77
85
|
|
78
86
|
def _make_status_dict(self, block_ids: List[str], status_list: List[JobStatus]) -> Dict[str, JobStatus]:
|
@@ -113,20 +121,6 @@ class BlockProviderExecutor(ParslExecutor):
|
|
113
121
|
raise NotImplementedError("Classes inheriting from BlockProviderExecutor must implement "
|
114
122
|
"outstanding()")
|
115
123
|
|
116
|
-
def status(self) -> Dict[str, JobStatus]:
|
117
|
-
"""Return the status of all jobs/blocks currently known to this executor.
|
118
|
-
|
119
|
-
:return: a dictionary mapping block ids (in string) to job status
|
120
|
-
"""
|
121
|
-
if self._provider:
|
122
|
-
block_ids, job_ids = self._get_block_and_job_ids()
|
123
|
-
status = self._make_status_dict(block_ids, self._provider.status(job_ids))
|
124
|
-
else:
|
125
|
-
status = {}
|
126
|
-
status.update(self._simulated_status)
|
127
|
-
|
128
|
-
return status
|
129
|
-
|
130
124
|
def set_bad_state_and_fail_all(self, exception: Exception):
|
131
125
|
"""Allows external error handlers to mark this executor as irrecoverably bad and cause
|
132
126
|
all tasks submitted to it now and in the future to fail. The executor is responsible
|
@@ -180,7 +174,17 @@ class BlockProviderExecutor(ParslExecutor):
|
|
180
174
|
# Filters first iterable by bool values in second
|
181
175
|
return list(compress(to_kill, killed))
|
182
176
|
|
183
|
-
def
|
177
|
+
def scale_out_facade(self, n: int) -> List[str]:
|
178
|
+
block_ids = self._scale_out(n)
|
179
|
+
if block_ids is not None:
|
180
|
+
new_status = {}
|
181
|
+
for block_id in block_ids:
|
182
|
+
new_status[block_id] = JobStatus(JobState.PENDING)
|
183
|
+
self.send_monitoring_info(new_status)
|
184
|
+
self._status.update(new_status)
|
185
|
+
return block_ids
|
186
|
+
|
187
|
+
def _scale_out(self, blocks: int = 1) -> List[str]:
|
184
188
|
"""Scales out the number of blocks by "blocks"
|
185
189
|
"""
|
186
190
|
if not self.provider:
|
@@ -199,15 +203,32 @@ class BlockProviderExecutor(ParslExecutor):
|
|
199
203
|
self._simulated_status[block_id] = JobStatus(JobState.FAILED, "Failed to start block {}: {}".format(block_id, ex))
|
200
204
|
return block_ids
|
201
205
|
|
202
|
-
@abstractmethod
|
203
206
|
def scale_in(self, blocks: int) -> List[str]:
|
204
207
|
"""Scale in method.
|
205
208
|
|
206
209
|
Cause the executor to reduce the number of blocks by count.
|
207
210
|
|
211
|
+
The default implementation will kill blocks without regard to their
|
212
|
+
status or whether they are executing tasks. Executors with more
|
213
|
+
nuanced scaling strategies might overload this method to work with
|
214
|
+
that strategy - see the HighThroughputExecutor for an example of that.
|
215
|
+
|
208
216
|
:return: A list of block ids corresponding to the blocks that were removed.
|
209
217
|
"""
|
210
|
-
|
218
|
+
# Obtain list of blocks to kill
|
219
|
+
to_kill = list(self.blocks_to_job_id.keys())[:blocks]
|
220
|
+
kill_ids = [self.blocks_to_job_id[block] for block in to_kill]
|
221
|
+
|
222
|
+
# Cancel the blocks provisioned
|
223
|
+
if self.provider:
|
224
|
+
logger.info(f"Scaling in jobs: {kill_ids}")
|
225
|
+
r = self.provider.cancel(kill_ids)
|
226
|
+
job_ids = self._filter_scale_in_ids(kill_ids, r)
|
227
|
+
block_ids_killed = [self.job_ids_to_block[jid] for jid in job_ids]
|
228
|
+
return block_ids_killed
|
229
|
+
else:
|
230
|
+
logger.error("No execution provider available to scale in")
|
231
|
+
return []
|
211
232
|
|
212
233
|
def _launch_block(self, block_id: str) -> Any:
|
213
234
|
launch_cmd = self._get_launch_command(block_id)
|
@@ -243,7 +264,7 @@ class BlockProviderExecutor(ParslExecutor):
|
|
243
264
|
# Send monitoring info for HTEX when monitoring enabled
|
244
265
|
if self.monitoring_radio:
|
245
266
|
msg = self.create_monitoring_info(status)
|
246
|
-
logger.debug("Sending
|
267
|
+
logger.debug("Sending block monitoring message: %r", msg)
|
247
268
|
self.monitoring_radio.send((MessageType.BLOCK_INFO, msg))
|
248
269
|
|
249
270
|
def create_monitoring_info(self, status: Dict[str, JobStatus]) -> Sequence[object]:
|
@@ -276,6 +297,20 @@ class BlockProviderExecutor(ParslExecutor):
|
|
276
297
|
if delta_status:
|
277
298
|
self.send_monitoring_info(delta_status)
|
278
299
|
|
300
|
+
def status(self) -> Dict[str, JobStatus]:
|
301
|
+
"""Return the status of all jobs/blocks currently known to this executor.
|
302
|
+
|
303
|
+
:return: a dictionary mapping block ids (in string) to job status
|
304
|
+
"""
|
305
|
+
if self._provider:
|
306
|
+
block_ids, job_ids = self._get_block_and_job_ids()
|
307
|
+
status = self._make_status_dict(block_ids, self._provider.status(job_ids))
|
308
|
+
else:
|
309
|
+
status = {}
|
310
|
+
status.update(self._simulated_status)
|
311
|
+
|
312
|
+
return status
|
313
|
+
|
279
314
|
@property
|
280
315
|
def status_facade(self) -> Dict[str, JobStatus]:
|
281
316
|
"""Return the status of all jobs/blocks of the executor of this poller.
|
@@ -302,13 +337,3 @@ class BlockProviderExecutor(ParslExecutor):
|
|
302
337
|
del self._status[block_id]
|
303
338
|
self.send_monitoring_info(new_status)
|
304
339
|
return block_ids
|
305
|
-
|
306
|
-
def scale_out_facade(self, n: int) -> List[str]:
|
307
|
-
block_ids = self.scale_out(n)
|
308
|
-
if block_ids is not None:
|
309
|
-
new_status = {}
|
310
|
-
for block_id in block_ids:
|
311
|
-
new_status[block_id] = JobStatus(JobState.PENDING)
|
312
|
-
self.send_monitoring_info(new_status)
|
313
|
-
self._status.update(new_status)
|
314
|
-
return block_ids
|
@@ -573,24 +573,6 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
573
573
|
def workers_per_node(self) -> Union[int, float]:
|
574
574
|
return 1
|
575
575
|
|
576
|
-
def scale_in(self, count: int) -> List[str]:
|
577
|
-
"""Scale in method. Cancel a given number of blocks
|
578
|
-
"""
|
579
|
-
# Obtain list of blocks to kill
|
580
|
-
to_kill = list(self.blocks_to_job_id.keys())[:count]
|
581
|
-
kill_ids = [self.blocks_to_job_id[block] for block in to_kill]
|
582
|
-
|
583
|
-
# Cancel the blocks provisioned
|
584
|
-
if self.provider:
|
585
|
-
logger.info(f"Scaling in jobs: {kill_ids}")
|
586
|
-
r = self.provider.cancel(kill_ids)
|
587
|
-
job_ids = self._filter_scale_in_ids(kill_ids, r)
|
588
|
-
block_ids_killed = [self.job_ids_to_block[jid] for jid in job_ids]
|
589
|
-
return block_ids_killed
|
590
|
-
else:
|
591
|
-
logger.error("No execution provider available to scale")
|
592
|
-
return []
|
593
|
-
|
594
576
|
def shutdown(self, *args, **kwargs):
|
595
577
|
"""Shutdown the executor. Sets flag to cancel the submit process and
|
596
578
|
collector thread, which shuts down the TaskVine system submission.
|
@@ -689,24 +689,6 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
689
689
|
def workers_per_node(self) -> Union[int, float]:
|
690
690
|
return self.scaling_cores_per_worker
|
691
691
|
|
692
|
-
def scale_in(self, count: int) -> List[str]:
|
693
|
-
"""Scale in method.
|
694
|
-
"""
|
695
|
-
# Obtain list of blocks to kill
|
696
|
-
to_kill = list(self.blocks_to_job_id.keys())[:count]
|
697
|
-
kill_ids = [self.blocks_to_job_id[block] for block in to_kill]
|
698
|
-
|
699
|
-
# Cancel the blocks provisioned
|
700
|
-
if self.provider:
|
701
|
-
logger.info(f"Scaling in jobs: {kill_ids}")
|
702
|
-
r = self.provider.cancel(kill_ids)
|
703
|
-
job_ids = self._filter_scale_in_ids(kill_ids, r)
|
704
|
-
block_ids_killed = [self.job_ids_to_block[jid] for jid in job_ids]
|
705
|
-
return block_ids_killed
|
706
|
-
else:
|
707
|
-
logger.error("No execution provider available to scale in")
|
708
|
-
return []
|
709
|
-
|
710
692
|
def shutdown(self, *args, **kwargs):
|
711
693
|
"""Shutdown the executor. Sets flag to cancel the submit process and
|
712
694
|
collector thread, which shuts down the Work Queue system submission.
|
parsl/monitoring/monitoring.py
CHANGED
@@ -13,7 +13,7 @@ import typeguard
|
|
13
13
|
|
14
14
|
from parsl.log_utils import set_file_logger
|
15
15
|
from parsl.monitoring.message_type import MessageType
|
16
|
-
from parsl.monitoring.radios import
|
16
|
+
from parsl.monitoring.radios import MultiprocessingQueueRadioSender
|
17
17
|
from parsl.monitoring.router import router_starter
|
18
18
|
from parsl.monitoring.types import AddressedMonitoringMessage
|
19
19
|
from parsl.multiprocessing import ForkProcess, SizedQueue
|
@@ -105,7 +105,7 @@ class MonitoringHub(RepresentationMixin):
|
|
105
105
|
self.resource_monitoring_enabled = resource_monitoring_enabled
|
106
106
|
self.resource_monitoring_interval = resource_monitoring_interval
|
107
107
|
|
108
|
-
def start(self, run_id: str, dfk_run_dir: str, config_run_dir: Union[str, os.PathLike]) ->
|
108
|
+
def start(self, run_id: str, dfk_run_dir: str, config_run_dir: Union[str, os.PathLike]) -> None:
|
109
109
|
|
110
110
|
logger.debug("Starting MonitoringHub")
|
111
111
|
|
@@ -187,7 +187,7 @@ class MonitoringHub(RepresentationMixin):
|
|
187
187
|
self.filesystem_proc.start()
|
188
188
|
logger.info(f"Started filesystem radio receiver process {self.filesystem_proc.pid}")
|
189
189
|
|
190
|
-
self.radio =
|
190
|
+
self.radio = MultiprocessingQueueRadioSender(self.block_msgs)
|
191
191
|
|
192
192
|
try:
|
193
193
|
comm_q_result = comm_q.get(block=True, timeout=120)
|
@@ -207,7 +207,7 @@ class MonitoringHub(RepresentationMixin):
|
|
207
207
|
|
208
208
|
logger.info("Monitoring Hub initialized")
|
209
209
|
|
210
|
-
|
210
|
+
self.hub_zmq_port = zmq_port
|
211
211
|
|
212
212
|
# TODO: tighten the Any message format
|
213
213
|
def send(self, mtype: MessageType, message: Any) -> None:
|
parsl/monitoring/radios.py
CHANGED
@@ -15,14 +15,14 @@ _db_manager_excepts: Optional[Exception]
|
|
15
15
|
logger = logging.getLogger(__name__)
|
16
16
|
|
17
17
|
|
18
|
-
class
|
18
|
+
class MonitoringRadioSender(metaclass=ABCMeta):
|
19
19
|
@abstractmethod
|
20
20
|
def send(self, message: object) -> None:
|
21
21
|
pass
|
22
22
|
|
23
23
|
|
24
|
-
class
|
25
|
-
"""A
|
24
|
+
class FilesystemRadioSender(MonitoringRadioSender):
|
25
|
+
"""A MonitoringRadioSender that sends messages over a shared filesystem.
|
26
26
|
|
27
27
|
The messsage directory structure is based on maildir,
|
28
28
|
https://en.wikipedia.org/wiki/Maildir
|
@@ -36,7 +36,7 @@ class FilesystemRadio(MonitoringRadio):
|
|
36
36
|
This avoids a race condition of reading partially written messages.
|
37
37
|
|
38
38
|
This radio is likely to give higher shared filesystem load compared to
|
39
|
-
the
|
39
|
+
the UDP radio, but should be much more reliable.
|
40
40
|
"""
|
41
41
|
|
42
42
|
def __init__(self, *, monitoring_url: str, source_id: int, timeout: int = 10, run_dir: str):
|
@@ -66,7 +66,7 @@ class FilesystemRadio(MonitoringRadio):
|
|
66
66
|
os.rename(tmp_filename, new_filename)
|
67
67
|
|
68
68
|
|
69
|
-
class
|
69
|
+
class HTEXRadioSender(MonitoringRadioSender):
|
70
70
|
|
71
71
|
def __init__(self, monitoring_url: str, source_id: int, timeout: int = 10):
|
72
72
|
"""
|
@@ -120,7 +120,7 @@ class HTEXRadio(MonitoringRadio):
|
|
120
120
|
return
|
121
121
|
|
122
122
|
|
123
|
-
class
|
123
|
+
class UDPRadioSender(MonitoringRadioSender):
|
124
124
|
|
125
125
|
def __init__(self, monitoring_url: str, source_id: int, timeout: int = 10):
|
126
126
|
"""
|
@@ -174,7 +174,7 @@ class UDPRadio(MonitoringRadio):
|
|
174
174
|
return
|
175
175
|
|
176
176
|
|
177
|
-
class
|
177
|
+
class MultiprocessingQueueRadioSender(MonitoringRadioSender):
|
178
178
|
"""A monitoring radio which connects over a multiprocessing Queue.
|
179
179
|
This radio is intended to be used on the submit side, where components
|
180
180
|
in the submit process, or processes launched by multiprocessing, will have
|
parsl/monitoring/remote.py
CHANGED
@@ -8,10 +8,10 @@ from typing import Any, Callable, Dict, List, Sequence, Tuple
|
|
8
8
|
|
9
9
|
from parsl.monitoring.message_type import MessageType
|
10
10
|
from parsl.monitoring.radios import (
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
11
|
+
FilesystemRadioSender,
|
12
|
+
HTEXRadioSender,
|
13
|
+
MonitoringRadioSender,
|
14
|
+
UDPRadioSender,
|
15
15
|
)
|
16
16
|
from parsl.multiprocessing import ForkProcess
|
17
17
|
from parsl.process_loggers import wrap_with_logs
|
@@ -100,17 +100,17 @@ def monitor_wrapper(*,
|
|
100
100
|
return (wrapped, args, new_kwargs)
|
101
101
|
|
102
102
|
|
103
|
-
def get_radio(radio_mode: str, monitoring_hub_url: str, task_id: int, run_dir: str) ->
|
104
|
-
radio:
|
103
|
+
def get_radio(radio_mode: str, monitoring_hub_url: str, task_id: int, run_dir: str) -> MonitoringRadioSender:
|
104
|
+
radio: MonitoringRadioSender
|
105
105
|
if radio_mode == "udp":
|
106
|
-
radio =
|
107
|
-
|
106
|
+
radio = UDPRadioSender(monitoring_hub_url,
|
107
|
+
source_id=task_id)
|
108
108
|
elif radio_mode == "htex":
|
109
|
-
radio =
|
110
|
-
|
109
|
+
radio = HTEXRadioSender(monitoring_hub_url,
|
110
|
+
source_id=task_id)
|
111
111
|
elif radio_mode == "filesystem":
|
112
|
-
radio =
|
113
|
-
|
112
|
+
radio = FilesystemRadioSender(monitoring_url=monitoring_hub_url,
|
113
|
+
source_id=task_id, run_dir=run_dir)
|
114
114
|
else:
|
115
115
|
raise RuntimeError(f"Unknown radio mode: {radio_mode}")
|
116
116
|
return radio
|
@@ -44,8 +44,8 @@ def test_row_counts():
|
|
44
44
|
# the latter is what i'm most suspicious of in my present investigation
|
45
45
|
|
46
46
|
# dig out the interchange port...
|
47
|
-
hub_address = parsl.dfk().hub_address
|
48
|
-
hub_zmq_port = parsl.dfk().hub_zmq_port
|
47
|
+
hub_address = parsl.dfk().monitoring.hub_address
|
48
|
+
hub_zmq_port = parsl.dfk().monitoring.hub_zmq_port
|
49
49
|
|
50
50
|
# this will send a string to a new socket connection
|
51
51
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
parsl/version.py
CHANGED
@@ -184,6 +184,7 @@ class Manager:
|
|
184
184
|
|
185
185
|
self.uid = uid
|
186
186
|
self.block_id = block_id
|
187
|
+
self.start_time = time.time()
|
187
188
|
|
188
189
|
self.enable_mpi_mode = enable_mpi_mode
|
189
190
|
self.mpi_launcher = mpi_launcher
|
@@ -263,6 +264,7 @@ class Manager:
|
|
263
264
|
'worker_count': self.worker_count,
|
264
265
|
'uid': self.uid,
|
265
266
|
'block_id': self.block_id,
|
267
|
+
'start_time': self.start_time,
|
266
268
|
'prefetch_capacity': self.prefetch_capacity,
|
267
269
|
'max_capacity': self.worker_count + self.prefetch_capacity,
|
268
270
|
'os': platform.system(),
|
@@ -732,17 +734,18 @@ def worker(
|
|
732
734
|
os.sched_setaffinity(0, my_cores) # type: ignore[attr-defined, unused-ignore]
|
733
735
|
logger.info("Set worker CPU affinity to {}".format(my_cores))
|
734
736
|
|
735
|
-
# If CUDA devices, find total number of devices to allow for MPS
|
736
|
-
# See: https://developer.nvidia.com/system-management-interface
|
737
|
-
nvidia_smi_cmd = "nvidia-smi -L > /dev/null && nvidia-smi -L | wc -l"
|
738
|
-
nvidia_smi_ret = subprocess.run(nvidia_smi_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
739
|
-
if nvidia_smi_ret.returncode == 0:
|
740
|
-
num_cuda_devices = int(nvidia_smi_ret.stdout.split()[0])
|
741
|
-
else:
|
742
|
-
num_cuda_devices = None
|
743
|
-
|
744
737
|
# If desired, pin to accelerator
|
745
738
|
if accelerator is not None:
|
739
|
+
|
740
|
+
# If CUDA devices, find total number of devices to allow for MPS
|
741
|
+
# See: https://developer.nvidia.com/system-management-interface
|
742
|
+
nvidia_smi_cmd = "nvidia-smi -L > /dev/null && nvidia-smi -L | wc -l"
|
743
|
+
nvidia_smi_ret = subprocess.run(nvidia_smi_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
744
|
+
if nvidia_smi_ret.returncode == 0:
|
745
|
+
num_cuda_devices = int(nvidia_smi_ret.stdout.split()[0])
|
746
|
+
else:
|
747
|
+
num_cuda_devices = None
|
748
|
+
|
746
749
|
try:
|
747
750
|
if num_cuda_devices is not None:
|
748
751
|
procs_per_cuda_device = pool_size // num_cuda_devices
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2024.7.
|
3
|
+
Version: 2024.7.29
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2024.07.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2024.07.29.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=MyaEcEq-Qf860u7V98u-PZrPNdtzOZL_NW6EhIJnmfQ,1937
|
|
8
8
|
parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
|
9
9
|
parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
parsl/utils.py,sha256=91FjQiTUY383ueAjkBAgE21My9nba6SP2a2SrbB1r1Q,11250
|
11
|
-
parsl/version.py,sha256=
|
11
|
+
parsl/version.py,sha256=1_lxZuThXb8Z-pTgQtbYaRM-j9czEj-Gcgyrj-lOHRA,131
|
12
12
|
parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
|
14
14
|
parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
|
@@ -62,7 +62,7 @@ parsl/data_provider/staging.py,sha256=ZDZuuFg38pjUStegKPcvPsfGp3iMeReMzfU6DSwtJj
|
|
62
62
|
parsl/data_provider/zip.py,sha256=S4kVuH9lxAegRURYbvIUR7EYYBOccyslaqyCrVWUBhw,4497
|
63
63
|
parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
64
64
|
parsl/dataflow/dependency_resolvers.py,sha256=Om8Dgh7a0ZwgXAc6TlhxLSzvxXHDlNNV1aBNiD3JTNY,3325
|
65
|
-
parsl/dataflow/dflow.py,sha256=
|
65
|
+
parsl/dataflow/dflow.py,sha256=rdDWhhsPhCkCqxMIhagBKOZMQKDXsDxp7hnTgY_LPqI,68256
|
66
66
|
parsl/dataflow/errors.py,sha256=9SxVhIJY_53FQx8x4OU8UA8nd7lvUbDllH7KfMXpYaY,2177
|
67
67
|
parsl/dataflow/futures.py,sha256=08LuP-HFiHBIZmeKCjlsazw_WpQ5fwevrU2_WbidkYw,6080
|
68
68
|
parsl/dataflow/memoization.py,sha256=l9uw1Bu50GucBF70M5relpGKFkE4dIM9T3R1KrxW0v0,9583
|
@@ -70,25 +70,25 @@ parsl/dataflow/rundirs.py,sha256=7aUg1cb0LLTocQxOdBzwtn7a8bIgpdMD5rjZV55UwaQ,115
|
|
70
70
|
parsl/dataflow/states.py,sha256=hV6mfv-y4A6xrujeQglcomnfEs7y3Xm2g6JFwC6dvgQ,2612
|
71
71
|
parsl/dataflow/taskrecord.py,sha256=-FuujdZQ1y5GSc-PJ91QKGT-Kp0lrg70MFDoxpbWI1Q,3113
|
72
72
|
parsl/executors/__init__.py,sha256=Cg8e-F2NUaBD8A9crDAXKCSdoBEwQVIdgm4FlXd-wvk,476
|
73
|
-
parsl/executors/base.py,sha256=
|
73
|
+
parsl/executors/base.py,sha256=BECTvBfVRDATyhBmMdcH75xoFhx-LO3rQYawVhaUJ6M,5144
|
74
74
|
parsl/executors/errors.py,sha256=xVswxgi7vmJcUMCeYDAPK8sQT2kHFFROVoOr0dnmcWE,2098
|
75
|
-
parsl/executors/status_handling.py,sha256=
|
75
|
+
parsl/executors/status_handling.py,sha256=BPv51pJbscV-HdOWgRN5JRPPyOm1b4m3qBbz7pTQjpc,14662
|
76
76
|
parsl/executors/threads.py,sha256=hJt1LzxphqX4fe_9R9Cf1MU0lepWTU_eJe8O665B0Xo,3352
|
77
77
|
parsl/executors/flux/__init__.py,sha256=P9grTTeRPXfqXurFhlSS7XhmE6tTbnCnyQ1f9b-oYHE,136
|
78
78
|
parsl/executors/flux/execute_parsl_task.py,sha256=gRN7F4HhdrKQ-bvn4wXrquBzFOp_9WF88hMIeUaRg5I,1553
|
79
|
-
parsl/executors/flux/executor.py,sha256=
|
79
|
+
parsl/executors/flux/executor.py,sha256=8_xakLUu5zNJAHL0LbeTCFEWqWzRK1eE-3ep4GIIIrY,17017
|
80
80
|
parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaPrSF9ec7zvRfLfYJw,2129
|
81
81
|
parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
82
82
|
parsl/executors/high_throughput/errors.py,sha256=Sak8e8UpiEcXefUjMHbhyXc4Rn7kJtOoh7L8wreBQdk,1638
|
83
|
-
parsl/executors/high_throughput/executor.py,sha256=
|
84
|
-
parsl/executors/high_throughput/interchange.py,sha256=
|
85
|
-
parsl/executors/high_throughput/manager_record.py,sha256=
|
83
|
+
parsl/executors/high_throughput/executor.py,sha256=fY-OuzStEgyM-ao11debyhbB3pRk4frEmXdcmBHWsvQ,37834
|
84
|
+
parsl/executors/high_throughput/interchange.py,sha256=vmsUEAgqshi_K5n9oqb-bOGIt9BOFW32Ei_3Ur1C8EE,30663
|
85
|
+
parsl/executors/high_throughput/manager_record.py,sha256=yn3L8TUJFkgm2lX1x0SeS9mkvJowC0s2VIMCFiU7ThM,455
|
86
86
|
parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
|
87
87
|
parsl/executors/high_throughput/mpi_executor.py,sha256=V07t1GOzFhcwdlZGuYUPqc1NarSr-vUbsNzbK4Cj0m8,3882
|
88
88
|
parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=hah_IznfFqk-rzuHWmg6aiF_saiDRrpW-aSo4kH9Nso,4854
|
89
89
|
parsl/executors/high_throughput/mpi_resource_management.py,sha256=LFBbJ3BnzTcY_v-jNu30uoIB2Enk4cleN4ygY3dncjY,8194
|
90
90
|
parsl/executors/high_throughput/probe.py,sha256=TNpGTXb4_DEeg_h-LHu4zEKi1-hffboxvKcZUl2OZGk,2751
|
91
|
-
parsl/executors/high_throughput/process_worker_pool.py,sha256=
|
91
|
+
parsl/executors/high_throughput/process_worker_pool.py,sha256=3s-Ouo3ZEhod7hon8euyL37t1DbP5pSVjXyC23DSN_0,43075
|
92
92
|
parsl/executors/high_throughput/zmq_pipes.py,sha256=tAjQB3aNVMuTXziN3dbJWre46YpXgliD55qMBbhYTLU,8581
|
93
93
|
parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
|
94
94
|
parsl/executors/radical/executor.py,sha256=426cMt6d8uJFZ_7Ub1kCslaND4OKtBX5WZdz-0RXjMk,22554
|
@@ -97,7 +97,7 @@ parsl/executors/radical/rpex_worker.py,sha256=qli6i6ejKubTSv3lAE3YiW8RlkHrfl4Jhr
|
|
97
97
|
parsl/executors/taskvine/__init__.py,sha256=9rwp3M8B0YyEhZMLO0RHaNw7u1nc01WHbXLqnBTanu0,293
|
98
98
|
parsl/executors/taskvine/errors.py,sha256=euIYkSslrNSI85kyi2s0xzOaO9ik4c1fYHstMIeiBJk,652
|
99
99
|
parsl/executors/taskvine/exec_parsl_function.py,sha256=ftGdJU78lKPPkphSHlEi4rj164mhuMHJjghVqfgeXKk,7085
|
100
|
-
parsl/executors/taskvine/executor.py,sha256=
|
100
|
+
parsl/executors/taskvine/executor.py,sha256=XsUzFWgFWbxf06jDLMAKiXUF-F1_VLheZ5jhAwSphZk,30977
|
101
101
|
parsl/executors/taskvine/factory.py,sha256=rWpEoFphLzqO3HEYyDEbQa14iyvgkdZg7hLZuaY39gQ,2638
|
102
102
|
parsl/executors/taskvine/factory_config.py,sha256=AbE2fN2snrF5ITYrrS4DnGn2XkJHUFr_17DYHDHIwq0,3693
|
103
103
|
parsl/executors/taskvine/manager.py,sha256=fwRSgYWpbsnr5jXlzvX0sQjOqryqn_77K_svJJ1HJ2U,25631
|
@@ -106,7 +106,7 @@ parsl/executors/taskvine/utils.py,sha256=iSrIogeiauL3UNy_9tiZp1cBSNn6fIJkMYQRVi1
|
|
106
106
|
parsl/executors/workqueue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
107
107
|
parsl/executors/workqueue/errors.py,sha256=XO2naYhAsHHyiOBH6hpObg3mPNDmvMoFqErsj0-v7jc,541
|
108
108
|
parsl/executors/workqueue/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
|
109
|
-
parsl/executors/workqueue/executor.py,sha256=
|
109
|
+
parsl/executors/workqueue/executor.py,sha256=YaY_U5DxXU2NbswmlrcJ2BtXvSbV0ElM9ZlQzP_F_BU,49803
|
110
110
|
parsl/executors/workqueue/parsl_coprocess.py,sha256=cF1UmTgVLoey6QzBcbYgEiEsRidSaFfuO54f1HFw_EM,5737
|
111
111
|
parsl/executors/workqueue/parsl_coprocess_stub.py,sha256=_bJmpPIgL42qM6bVzeEKt1Mn1trSP41rtJguXxPGfHI,735
|
112
112
|
parsl/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -122,9 +122,9 @@ parsl/launchers/launchers.py,sha256=VB--fiVv_IQne3DydTMSdGUY0o0g69puAs-Hd3mJ2vo,
|
|
122
122
|
parsl/monitoring/__init__.py,sha256=0ywNz6i0lM1xo_7_BIxhETDGeVd2C_0wwD7qgeaMR4c,83
|
123
123
|
parsl/monitoring/db_manager.py,sha256=qPdW_MINycSn6MxxFk2s_R-t8g1cbJhxncVR5mDgeGs,37011
|
124
124
|
parsl/monitoring/message_type.py,sha256=Khn88afNxcOIciKiCK4GLnn90I5BlRTiOL3zK-P07yQ,401
|
125
|
-
parsl/monitoring/monitoring.py,sha256=
|
126
|
-
parsl/monitoring/radios.py,sha256=
|
127
|
-
parsl/monitoring/remote.py,sha256=
|
125
|
+
parsl/monitoring/monitoring.py,sha256=ggffHhtgh96lSmryvjjjaHg7oV54Ci-kz4wWOPJviKU,13548
|
126
|
+
parsl/monitoring/radios.py,sha256=VaVZEPDkLsJ3Jp2CFo5kQ94mxiqpN9xltKl8lnKksiQ,5908
|
127
|
+
parsl/monitoring/remote.py,sha256=qH1N3My8F473CHKxFrA2CsaL0Uege26tQi9-KrWQrr4,13771
|
128
128
|
parsl/monitoring/router.py,sha256=l1LBT1hxCWQ2mxCR-PtxwYJ905In61E4pPJB2K2J7kM,9554
|
129
129
|
parsl/monitoring/types.py,sha256=_WGizCTgQVOkJ2dvNfsvHpYBj21Ky3bJsmyIskIx10I,631
|
130
130
|
parsl/monitoring/queries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -355,7 +355,7 @@ parsl/tests/test_monitoring/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
|
|
355
355
|
parsl/tests/test_monitoring/test_app_names.py,sha256=ayyxySGWpKSe9dDw2UeJo1dicxjpALRuLsJfprZV4Eg,2174
|
356
356
|
parsl/tests/test_monitoring/test_basic.py,sha256=lGyHEJt_rokawv_XeAx-bxV84IlZUFR4KI0PQAiLsFg,3714
|
357
357
|
parsl/tests/test_monitoring/test_db_locks.py,sha256=3s3c1xhKo230ZZIJ3f1Ca4U7LcEdXnanOGVXQyNlk2U,2895
|
358
|
-
parsl/tests/test_monitoring/test_fuzz_zmq.py,sha256
|
358
|
+
parsl/tests/test_monitoring/test_fuzz_zmq.py,sha256=--3-pQUvXXbkr8v_BEJoPvVvNly1oXvrD2nJh6yl_0M,3436
|
359
359
|
parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py,sha256=_WjymTgxWvZZwQpJQ3L2gmEt5VUkTss0hOT153AssdQ,2746
|
360
360
|
parsl/tests/test_monitoring/test_incomplete_futures.py,sha256=ZnO1sFSwlWUBHX64C_zwfTVRVC_UFNlU4h0POgx6NEo,2005
|
361
361
|
parsl/tests/test_monitoring/test_memoization_representation.py,sha256=dknv2nO7pNZ1jGxWGsC_AW3rs90gjMIeC5d7pIJ75Xc,2645
|
@@ -467,13 +467,13 @@ parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
|
|
467
467
|
parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
|
468
468
|
parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
|
469
469
|
parsl/usage_tracking/usage.py,sha256=qNEJ7nPimqd3Y7OWFLdYmNwJ6XDKlyfV_fTzasxsQw8,8690
|
470
|
-
parsl-2024.7.
|
471
|
-
parsl-2024.7.
|
472
|
-
parsl-2024.7.
|
473
|
-
parsl-2024.7.
|
474
|
-
parsl-2024.7.
|
475
|
-
parsl-2024.7.
|
476
|
-
parsl-2024.7.
|
477
|
-
parsl-2024.7.
|
478
|
-
parsl-2024.7.
|
479
|
-
parsl-2024.7.
|
470
|
+
parsl-2024.7.29.data/scripts/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
|
471
|
+
parsl-2024.7.29.data/scripts/interchange.py,sha256=q3V1mqr0BC_CzsNfebTKFD5tyE0birXUvZh-bk05vLQ,30650
|
472
|
+
parsl-2024.7.29.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
|
473
|
+
parsl-2024.7.29.data/scripts/process_worker_pool.py,sha256=78QKnV5KbY_vcteC6k60gpDE4wEk6hsciet_qzs9QoU,43061
|
474
|
+
parsl-2024.7.29.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
475
|
+
parsl-2024.7.29.dist-info/METADATA,sha256=DN5bT4CBKxmmsnscGT_y61gVnmzLEGRZDg95HFbxD54,4124
|
476
|
+
parsl-2024.7.29.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
477
|
+
parsl-2024.7.29.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
|
478
|
+
parsl-2024.7.29.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
|
479
|
+
parsl-2024.7.29.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|