parsl 2024.3.25__py3-none-any.whl → 2024.4.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/dataflow/dflow.py +16 -34
- parsl/executors/base.py +1 -1
- parsl/executors/high_throughput/executor.py +8 -20
- parsl/executors/high_throughput/process_worker_pool.py +5 -2
- parsl/executors/status_handling.py +7 -14
- parsl/executors/taskvine/executor.py +17 -13
- parsl/executors/workqueue/executor.py +17 -14
- parsl/jobs/job_status_poller.py +26 -11
- parsl/jobs/strategy.py +36 -19
- parsl/monitoring/monitoring.py +1 -20
- parsl/monitoring/remote.py +2 -1
- parsl/tests/site_tests/test_provider.py +1 -1
- parsl/tests/test_htex/test_disconnected_blocks.py +0 -1
- parsl/tests/test_htex/test_drain.py +1 -0
- parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py +9 -6
- parsl/tests/test_python_apps/test_context_manager.py +3 -3
- parsl/tests/test_scaling/test_shutdown_scalein.py +78 -0
- parsl/version.py +1 -1
- {parsl-2024.3.25.data → parsl-2024.4.8.data}/scripts/process_worker_pool.py +5 -2
- {parsl-2024.3.25.dist-info → parsl-2024.4.8.dist-info}/METADATA +2 -2
- {parsl-2024.3.25.dist-info → parsl-2024.4.8.dist-info}/RECORD +31 -31
- parsl/tests/test_data/__init__.py +0 -0
- /parsl/tests/{test_data → test_staging}/test_file.py +0 -0
- /parsl/tests/{test_data → test_staging}/test_file_apps.py +0 -0
- /parsl/tests/{test_data → test_staging}/test_file_staging.py +0 -0
- /parsl/tests/{test_data → test_staging}/test_output_chain_filenames.py +0 -0
- {parsl-2024.3.25.data → parsl-2024.4.8.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2024.3.25.data → parsl-2024.4.8.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2024.3.25.dist-info → parsl-2024.4.8.dist-info}/LICENSE +0 -0
- {parsl-2024.3.25.dist-info → parsl-2024.4.8.dist-info}/WHEEL +0 -0
- {parsl-2024.3.25.dist-info → parsl-2024.4.8.dist-info}/entry_points.txt +0 -0
- {parsl-2024.3.25.dist-info → parsl-2024.4.8.dist-info}/top_level.txt +0 -0
parsl/dataflow/dflow.py
CHANGED
@@ -34,12 +34,12 @@ from parsl.dataflow.states import States, FINAL_STATES, FINAL_FAILURE_STATES
|
|
34
34
|
from parsl.dataflow.taskrecord import TaskRecord
|
35
35
|
from parsl.errors import ConfigurationError, InternalConsistencyError, NoDataFlowKernelError
|
36
36
|
from parsl.jobs.job_status_poller import JobStatusPoller
|
37
|
-
from parsl.jobs.states import JobStatus, JobState
|
38
37
|
from parsl.usage_tracking.usage import UsageTracker
|
39
38
|
from parsl.executors.base import ParslExecutor
|
40
39
|
from parsl.executors.status_handling import BlockProviderExecutor
|
41
40
|
from parsl.executors.threads import ThreadPoolExecutor
|
42
41
|
from parsl.monitoring import MonitoringHub
|
42
|
+
from parsl.monitoring.remote import monitor_wrapper
|
43
43
|
from parsl.process_loggers import wrap_with_logs
|
44
44
|
from parsl.providers.base import ExecutionProvider
|
45
45
|
from parsl.utils import get_version, get_std_fname_mode, get_all_checkpoints, Timer
|
@@ -207,7 +207,7 @@ class DataFlowKernel:
|
|
207
207
|
atexit.register(self.atexit_cleanup)
|
208
208
|
|
209
209
|
def __enter__(self):
|
210
|
-
|
210
|
+
return self
|
211
211
|
|
212
212
|
def __exit__(self, exc_type, exc_value, traceback):
|
213
213
|
logger.debug("Exiting the context manager, calling cleanup for DFK")
|
@@ -714,14 +714,18 @@ class DataFlowKernel:
|
|
714
714
|
|
715
715
|
if self.monitoring is not None and self.monitoring.resource_monitoring_enabled:
|
716
716
|
wrapper_logging_level = logging.DEBUG if self.monitoring.monitoring_debug else logging.INFO
|
717
|
-
(function, args, kwargs) =
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
724
|
-
|
717
|
+
(function, args, kwargs) = monitor_wrapper(f=function,
|
718
|
+
args=args,
|
719
|
+
kwargs=kwargs,
|
720
|
+
x_try_id=try_id,
|
721
|
+
x_task_id=task_id,
|
722
|
+
monitoring_hub_url=self.monitoring.monitoring_hub_url,
|
723
|
+
run_id=self.run_id,
|
724
|
+
logging_level=wrapper_logging_level,
|
725
|
+
sleep_dur=self.monitoring.resource_monitoring_interval,
|
726
|
+
radio_mode=executor.radio_mode,
|
727
|
+
monitor_resources=executor.monitor_resources(),
|
728
|
+
run_dir=self.run_dir)
|
725
729
|
|
726
730
|
with self.submitter_lock:
|
727
731
|
exec_fu = executor.submit(function, task_record['resource_specification'], *args, **kwargs)
|
@@ -1141,14 +1145,7 @@ class DataFlowKernel:
|
|
1141
1145
|
self._create_remote_dirs_over_channel(executor.provider, executor.provider.channel)
|
1142
1146
|
|
1143
1147
|
self.executors[executor.label] = executor
|
1144
|
-
|
1145
|
-
if self.monitoring and block_ids:
|
1146
|
-
new_status = {}
|
1147
|
-
for bid in block_ids:
|
1148
|
-
new_status[bid] = JobStatus(JobState.PENDING)
|
1149
|
-
msg = executor.create_monitoring_info(new_status)
|
1150
|
-
logger.debug("Sending monitoring message {} to hub from DFK".format(msg))
|
1151
|
-
self.monitoring.send(MessageType.BLOCK_INFO, msg)
|
1148
|
+
executor.start()
|
1152
1149
|
block_executors = [e for e in executors if isinstance(e, BlockProviderExecutor)]
|
1153
1150
|
self.job_status_poller.add_executors(block_executors)
|
1154
1151
|
|
@@ -1221,24 +1218,9 @@ class DataFlowKernel:
|
|
1221
1218
|
self.job_status_poller.close()
|
1222
1219
|
logger.info("Terminated job status poller")
|
1223
1220
|
|
1224
|
-
logger.info("
|
1221
|
+
logger.info("Shutting down executors")
|
1225
1222
|
|
1226
1223
|
for executor in self.executors.values():
|
1227
|
-
if isinstance(executor, BlockProviderExecutor):
|
1228
|
-
if not executor.bad_state_is_set:
|
1229
|
-
logger.info(f"Scaling in executor {executor.label}")
|
1230
|
-
if executor.provider:
|
1231
|
-
job_ids = executor.provider.resources.keys()
|
1232
|
-
block_ids = executor.scale_in(len(job_ids))
|
1233
|
-
if self.monitoring and block_ids:
|
1234
|
-
new_status = {}
|
1235
|
-
for bid in block_ids:
|
1236
|
-
new_status[bid] = JobStatus(JobState.CANCELLED)
|
1237
|
-
msg = executor.create_monitoring_info(new_status)
|
1238
|
-
logger.debug("Sending message {} to hub from DFK".format(msg))
|
1239
|
-
self.monitoring.send(MessageType.BLOCK_INFO, msg)
|
1240
|
-
else: # and bad_state_is_set
|
1241
|
-
logger.warning(f"Not shutting down executor {executor.label} because it is in bad state")
|
1242
1224
|
logger.info(f"Shutting down executor {executor.label}")
|
1243
1225
|
executor.shutdown()
|
1244
1226
|
logger.info(f"Shut down executor {executor.label}")
|
parsl/executors/base.py
CHANGED
@@ -53,7 +53,7 @@ class ParslExecutor(metaclass=ABCMeta):
|
|
53
53
|
return False
|
54
54
|
|
55
55
|
@abstractmethod
|
56
|
-
def start(self) ->
|
56
|
+
def start(self) -> None:
|
57
57
|
"""Start the executor.
|
58
58
|
|
59
59
|
Any spin-up operations (for example: starting thread pools) should be performed here.
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import typing
|
2
|
+
from collections import defaultdict
|
2
3
|
from concurrent.futures import Future
|
3
4
|
import typeguard
|
4
5
|
import logging
|
@@ -400,16 +401,6 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
|
|
400
401
|
|
401
402
|
logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
|
402
403
|
|
403
|
-
# TODO: why is this a provider property?
|
404
|
-
block_ids = []
|
405
|
-
if hasattr(self.provider, 'init_blocks'):
|
406
|
-
try:
|
407
|
-
block_ids = self.scale_out(blocks=self.provider.init_blocks)
|
408
|
-
except Exception as e:
|
409
|
-
logger.error("Scaling out failed: {}".format(e))
|
410
|
-
raise e
|
411
|
-
return block_ids
|
412
|
-
|
413
404
|
def start(self):
|
414
405
|
"""Create the Interchange process and connect to it.
|
415
406
|
"""
|
@@ -439,8 +430,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
|
|
439
430
|
|
440
431
|
logger.debug("Created management thread: {}".format(self._queue_management_thread))
|
441
432
|
|
442
|
-
|
443
|
-
return block_ids
|
433
|
+
self.initialize_scaling()
|
444
434
|
|
445
435
|
@wrap_with_logs
|
446
436
|
def _queue_management_worker(self):
|
@@ -698,7 +688,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
|
|
698
688
|
d['status'] = s.status_name
|
699
689
|
d['timestamp'] = datetime.datetime.now()
|
700
690
|
d['executor_label'] = self.label
|
701
|
-
d['job_id'] = self.
|
691
|
+
d['job_id'] = self.blocks_to_job_id.get(bid, None)
|
702
692
|
d['block_id'] = bid
|
703
693
|
msg.append(d)
|
704
694
|
return msg
|
@@ -741,13 +731,11 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
|
|
741
731
|
idle: float # shortest idle time of any manager in this block
|
742
732
|
|
743
733
|
managers = self.connected_managers()
|
744
|
-
block_info: Dict[str, BlockInfo] =
|
734
|
+
block_info: Dict[str, BlockInfo] = defaultdict(lambda: BlockInfo(tasks=0, idle=float('inf')))
|
745
735
|
for manager in managers:
|
746
736
|
if not manager['active']:
|
747
737
|
continue
|
748
738
|
b_id = manager['block_id']
|
749
|
-
if b_id not in block_info:
|
750
|
-
block_info[b_id] = BlockInfo(tasks=0, idle=float('inf'))
|
751
739
|
block_info[b_id].tasks += manager['tasks']
|
752
740
|
block_info[b_id].idle = min(block_info[b_id].idle, manager['idle_duration'])
|
753
741
|
|
@@ -779,14 +767,14 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
|
|
779
767
|
|
780
768
|
# Now kill via provider
|
781
769
|
# Potential issue with multiple threads trying to remove the same blocks
|
782
|
-
to_kill = [self.
|
770
|
+
to_kill = [self.blocks_to_job_id[bid] for bid in block_ids_to_kill if bid in self.blocks_to_job_id]
|
783
771
|
|
784
772
|
r = self.provider.cancel(to_kill)
|
785
773
|
job_ids = self._filter_scale_in_ids(to_kill, r)
|
786
774
|
|
787
|
-
# to_kill block_ids are fetched from self.
|
788
|
-
# If a block_id is in self.
|
789
|
-
block_ids_killed = [self.
|
775
|
+
# to_kill block_ids are fetched from self.blocks_to_job_id
|
776
|
+
# If a block_id is in self.blocks_to_job_id, it must exist in self.job_ids_to_block
|
777
|
+
block_ids_killed = [self.job_ids_to_block[jid] for jid in job_ids]
|
790
778
|
|
791
779
|
return block_ids_killed
|
792
780
|
|
@@ -335,14 +335,17 @@ class Manager:
|
|
335
335
|
self.heartbeat_to_incoming()
|
336
336
|
last_beat = time.time()
|
337
337
|
|
338
|
-
if
|
338
|
+
if time.time() > self.drain_time:
|
339
339
|
logger.info("Requesting drain")
|
340
340
|
self.drain_to_incoming()
|
341
|
-
self.drain_time = None
|
342
341
|
# This will start the pool draining...
|
343
342
|
# Drained exit behaviour does not happen here. It will be
|
344
343
|
# driven by the interchange sending a DRAINED_CODE message.
|
345
344
|
|
345
|
+
# now set drain time to the far future so we don't send a drain
|
346
|
+
# message every iteration.
|
347
|
+
self.drain_time = float('inf')
|
348
|
+
|
346
349
|
poll_duration_s = max(0, next_interesting_event_time - time.time())
|
347
350
|
socks = dict(poller.poll(timeout=poll_duration_s * 1000))
|
348
351
|
|
@@ -68,8 +68,8 @@ class BlockProviderExecutor(ParslExecutor):
|
|
68
68
|
self._block_id_counter = AtomicIDCounter()
|
69
69
|
|
70
70
|
self._tasks = {} # type: Dict[object, Future]
|
71
|
-
self.
|
72
|
-
self.
|
71
|
+
self.blocks_to_job_id = {} # type: Dict[str, str]
|
72
|
+
self.job_ids_to_block = {} # type: Dict[str, str]
|
73
73
|
|
74
74
|
def _make_status_dict(self, block_ids: List[str], status_list: List[JobStatus]) -> Dict[str, JobStatus]:
|
75
75
|
"""Given a list of block ids and a list of corresponding status strings,
|
@@ -102,12 +102,6 @@ class BlockProviderExecutor(ParslExecutor):
|
|
102
102
|
else:
|
103
103
|
return self._provider.status_polling_interval
|
104
104
|
|
105
|
-
def _fail_job_async(self, block_id: str, message: str):
|
106
|
-
"""Marks a job that has failed to start but would not otherwise be included in status()
|
107
|
-
as failed and report it in status()
|
108
|
-
"""
|
109
|
-
self._simulated_status[block_id] = JobStatus(JobState.FAILED, message)
|
110
|
-
|
111
105
|
@abstractproperty
|
112
106
|
def outstanding(self) -> int:
|
113
107
|
"""This should return the number of tasks that the executor has been given to run (waiting to run, and running now)"""
|
@@ -194,12 +188,11 @@ class BlockProviderExecutor(ParslExecutor):
|
|
194
188
|
logger.info(f"Allocated block ID {block_id}")
|
195
189
|
try:
|
196
190
|
job_id = self._launch_block(block_id)
|
197
|
-
self.
|
198
|
-
self.
|
191
|
+
self.blocks_to_job_id[block_id] = job_id
|
192
|
+
self.job_ids_to_block[job_id] = block_id
|
199
193
|
block_ids.append(block_id)
|
200
194
|
except Exception as ex:
|
201
|
-
self.
|
202
|
-
"Failed to start block {}: {}".format(block_id, ex))
|
195
|
+
self._simulated_status[block_id] = JobStatus(JobState.FAILED, "Failed to start block {}: {}".format(block_id, ex))
|
203
196
|
return block_ids
|
204
197
|
|
205
198
|
@abstractmethod
|
@@ -232,10 +225,10 @@ class BlockProviderExecutor(ParslExecutor):
|
|
232
225
|
# Not using self.blocks.keys() and self.blocks.values() simultaneously
|
233
226
|
# The dictionary may be changed during invoking this function
|
234
227
|
# As scale_in and scale_out are invoked in multiple threads
|
235
|
-
block_ids = list(self.
|
228
|
+
block_ids = list(self.blocks_to_job_id.keys())
|
236
229
|
job_ids = [] # types: List[Any]
|
237
230
|
for bid in block_ids:
|
238
|
-
job_ids.append(self.
|
231
|
+
job_ids.append(self.blocks_to_job_id[bid])
|
239
232
|
return block_ids, job_ids
|
240
233
|
|
241
234
|
@abstractproperty
|
@@ -186,7 +186,13 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
186
186
|
# Attribute indicating whether this executor was started to shut it down properly.
|
187
187
|
# This safeguards cases where an object of this executor is created but
|
188
188
|
# the executor never starts, so it shouldn't be shutdowned.
|
189
|
-
self.
|
189
|
+
self._is_started = False
|
190
|
+
|
191
|
+
# Attribute indicating whether this executor was shutdown before.
|
192
|
+
# This safeguards cases where this object is automatically shut down (e.g.,
|
193
|
+
# via atexit) and the user also explicitly calls shut down. While this is
|
194
|
+
# permitted, the effect of an executor shutdown should happen only once.
|
195
|
+
self._is_shutdown = False
|
190
196
|
|
191
197
|
def atexit_cleanup(self):
|
192
198
|
# Calls this executor's shutdown method upon Python exiting the process.
|
@@ -252,7 +258,7 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
252
258
|
"""
|
253
259
|
|
254
260
|
# Mark this executor object as started
|
255
|
-
self.
|
261
|
+
self._is_started = True
|
256
262
|
|
257
263
|
# Synchronize connection and communication settings between the manager and factory
|
258
264
|
self.__synchronize_manager_factory_comm_settings()
|
@@ -580,13 +586,6 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
580
586
|
self._worker_command = self._construct_worker_command()
|
581
587
|
self._patch_providers()
|
582
588
|
|
583
|
-
if hasattr(self.provider, 'init_blocks'):
|
584
|
-
try:
|
585
|
-
self.scale_out(blocks=self.provider.init_blocks)
|
586
|
-
except Exception as e:
|
587
|
-
logger.error("Initial block scaling out failed: {}".format(e))
|
588
|
-
raise e
|
589
|
-
|
590
589
|
@property
|
591
590
|
def outstanding(self) -> int:
|
592
591
|
"""Count the number of outstanding tasks."""
|
@@ -601,8 +600,8 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
601
600
|
"""Scale in method. Cancel a given number of blocks
|
602
601
|
"""
|
603
602
|
# Obtain list of blocks to kill
|
604
|
-
to_kill = list(self.
|
605
|
-
kill_ids = [self.
|
603
|
+
to_kill = list(self.blocks_to_job_id.keys())[:count]
|
604
|
+
kill_ids = [self.blocks_to_job_id[block] for block in to_kill]
|
606
605
|
|
607
606
|
# Cancel the blocks provisioned
|
608
607
|
if self.provider:
|
@@ -614,15 +613,19 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
614
613
|
"""Shutdown the executor. Sets flag to cancel the submit process and
|
615
614
|
collector thread, which shuts down the TaskVine system submission.
|
616
615
|
"""
|
617
|
-
if not self.
|
616
|
+
if not self._is_started:
|
618
617
|
# Don't shutdown if the executor never starts.
|
619
618
|
return
|
620
619
|
|
620
|
+
if self._is_shutdown:
|
621
|
+
# Don't shutdown this executor again.
|
622
|
+
return
|
623
|
+
|
621
624
|
logger.debug("TaskVine shutdown started")
|
622
625
|
self._should_stop.set()
|
623
626
|
|
624
627
|
# Remove the workers that are still going
|
625
|
-
kill_ids = [self.
|
628
|
+
kill_ids = [self.blocks_to_job_id[block] for block in self.blocks_to_job_id.keys()]
|
626
629
|
if self.provider:
|
627
630
|
logger.debug("Cancelling blocks")
|
628
631
|
self.provider.cancel(kill_ids)
|
@@ -636,6 +639,7 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
636
639
|
logger.debug("Joining on factory process")
|
637
640
|
self._factory_process.join()
|
638
641
|
|
642
|
+
self._is_shutdown = True
|
639
643
|
logger.debug("TaskVine shutdown completed")
|
640
644
|
|
641
645
|
@wrap_with_logs
|
@@ -255,7 +255,6 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
255
255
|
self.label = label
|
256
256
|
self.task_queue = multiprocessing.Queue() # type: multiprocessing.Queue
|
257
257
|
self.collector_queue = multiprocessing.Queue() # type: multiprocessing.Queue
|
258
|
-
self.blocks = {} # type: Dict[str, str]
|
259
258
|
self.address = address
|
260
259
|
self.port = port
|
261
260
|
self.executor_task_counter = -1
|
@@ -305,7 +304,13 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
305
304
|
# Attribute indicating whether this executor was started to shut it down properly.
|
306
305
|
# This safeguards cases where an object of this executor is created but
|
307
306
|
# the executor never starts, so it shouldn't be shutdowned.
|
308
|
-
self.
|
307
|
+
self.is_started = False
|
308
|
+
|
309
|
+
# Attribute indicating whether this executor was shutdown before.
|
310
|
+
# This safeguards cases where this object is automatically shut down (e.g.,
|
311
|
+
# via atexit) and the user also explicitly calls shut down. While this is
|
312
|
+
# permitted, the effect of an executor shutdown should happen only once.
|
313
|
+
self.is_shutdown = False
|
309
314
|
|
310
315
|
def atexit_cleanup(self):
|
311
316
|
# Calls this executor's shutdown method upon Python exiting the process.
|
@@ -321,7 +326,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
321
326
|
retrieve Parsl tasks within the Work Queue system.
|
322
327
|
"""
|
323
328
|
# Mark this executor object as started
|
324
|
-
self.
|
329
|
+
self.is_started = True
|
325
330
|
self.tasks_lock = threading.Lock()
|
326
331
|
|
327
332
|
# Create directories for data and results
|
@@ -669,13 +674,6 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
669
674
|
self.worker_command = self._construct_worker_command()
|
670
675
|
self._patch_providers()
|
671
676
|
|
672
|
-
if hasattr(self.provider, 'init_blocks'):
|
673
|
-
try:
|
674
|
-
self.scale_out(blocks=self.provider.init_blocks)
|
675
|
-
except Exception as e:
|
676
|
-
logger.error("Initial block scaling out failed: {}".format(e))
|
677
|
-
raise e
|
678
|
-
|
679
677
|
@property
|
680
678
|
def outstanding(self) -> int:
|
681
679
|
"""Count the number of outstanding tasks. This is inefficiently
|
@@ -697,8 +695,8 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
697
695
|
"""Scale in method.
|
698
696
|
"""
|
699
697
|
# Obtain list of blocks to kill
|
700
|
-
to_kill = list(self.
|
701
|
-
kill_ids = [self.
|
698
|
+
to_kill = list(self.blocks_to_job_id.keys())[:count]
|
699
|
+
kill_ids = [self.blocks_to_job_id[block] for block in to_kill]
|
702
700
|
|
703
701
|
# Cancel the blocks provisioned
|
704
702
|
if self.provider:
|
@@ -710,15 +708,19 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
710
708
|
"""Shutdown the executor. Sets flag to cancel the submit process and
|
711
709
|
collector thread, which shuts down the Work Queue system submission.
|
712
710
|
"""
|
713
|
-
if not self.
|
711
|
+
if not self.is_started:
|
714
712
|
# Don't shutdown if the executor never starts.
|
715
713
|
return
|
716
714
|
|
715
|
+
if self.is_shutdown:
|
716
|
+
# Don't shutdown this executor again.
|
717
|
+
return
|
718
|
+
|
717
719
|
logger.debug("Work Queue shutdown started")
|
718
720
|
self.should_stop.value = True
|
719
721
|
|
720
722
|
# Remove the workers that are still going
|
721
|
-
kill_ids = [self.
|
723
|
+
kill_ids = [self.blocks_to_job_id[block] for block in self.blocks_to_job_id.keys()]
|
722
724
|
if self.provider:
|
723
725
|
logger.debug("Cancelling blocks")
|
724
726
|
self.provider.cancel(kill_ids)
|
@@ -728,6 +730,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
|
|
728
730
|
logger.debug("Joining on collector thread")
|
729
731
|
self.collector_thread.join()
|
730
732
|
|
733
|
+
self.is_shutdown = True
|
731
734
|
logger.debug("Work Queue shutdown completed")
|
732
735
|
|
733
736
|
@wrap_with_logs
|
parsl/jobs/job_status_poller.py
CHANGED
@@ -16,20 +16,19 @@ from parsl.utils import Timer
|
|
16
16
|
logger = logging.getLogger(__name__)
|
17
17
|
|
18
18
|
|
19
|
-
class
|
19
|
+
class PolledExecutorFacade:
|
20
20
|
def __init__(self, executor: BlockProviderExecutor, dfk: Optional["parsl.dataflow.dflow.DataFlowKernel"] = None):
|
21
21
|
self._executor = executor
|
22
|
-
self._dfk = dfk
|
23
22
|
self._interval = executor.status_polling_interval
|
24
23
|
self._last_poll_time = 0.0
|
25
24
|
self._status = {} # type: Dict[str, JobStatus]
|
26
25
|
|
27
26
|
# Create a ZMQ channel to send poll status to monitoring
|
28
27
|
self.monitoring_enabled = False
|
29
|
-
if
|
28
|
+
if dfk and dfk.monitoring is not None:
|
30
29
|
self.monitoring_enabled = True
|
31
|
-
hub_address =
|
32
|
-
hub_port =
|
30
|
+
hub_address = dfk.hub_address
|
31
|
+
hub_port = dfk.hub_zmq_port
|
33
32
|
context = zmq.Context()
|
34
33
|
self.hub_channel = context.socket(zmq.DEALER)
|
35
34
|
self.hub_channel.set_hwm(0)
|
@@ -109,7 +108,7 @@ class JobStatusPoller(Timer):
|
|
109
108
|
def __init__(self, *, strategy: Optional[str], max_idletime: float,
|
110
109
|
strategy_period: Union[float, int],
|
111
110
|
dfk: Optional["parsl.dataflow.dflow.DataFlowKernel"] = None) -> None:
|
112
|
-
self.
|
111
|
+
self._executor_facades = [] # type: List[PolledExecutorFacade]
|
113
112
|
self.dfk = dfk
|
114
113
|
self._strategy = Strategy(strategy=strategy,
|
115
114
|
max_idletime=max_idletime)
|
@@ -117,21 +116,37 @@ class JobStatusPoller(Timer):
|
|
117
116
|
|
118
117
|
def poll(self) -> None:
|
119
118
|
self._update_state()
|
120
|
-
self._run_error_handlers(self.
|
121
|
-
self._strategy.strategize(self.
|
119
|
+
self._run_error_handlers(self._executor_facades)
|
120
|
+
self._strategy.strategize(self._executor_facades)
|
122
121
|
|
123
|
-
def _run_error_handlers(self, status: List[
|
122
|
+
def _run_error_handlers(self, status: List[PolledExecutorFacade]) -> None:
|
124
123
|
for es in status:
|
125
124
|
es.executor.handle_errors(es.status)
|
126
125
|
|
127
126
|
def _update_state(self) -> None:
|
128
127
|
now = time.time()
|
129
|
-
for item in self.
|
128
|
+
for item in self._executor_facades:
|
130
129
|
item.poll(now)
|
131
130
|
|
132
131
|
def add_executors(self, executors: Sequence[BlockProviderExecutor]) -> None:
|
133
132
|
for executor in executors:
|
134
133
|
if executor.status_polling_interval > 0:
|
135
134
|
logger.debug("Adding executor {}".format(executor.label))
|
136
|
-
self.
|
135
|
+
self._executor_facades.append(PolledExecutorFacade(executor, self.dfk))
|
137
136
|
self._strategy.add_executors(executors)
|
137
|
+
|
138
|
+
def close(self):
|
139
|
+
super().close()
|
140
|
+
for ef in self._executor_facades:
|
141
|
+
if not ef.executor.bad_state_is_set:
|
142
|
+
logger.info(f"Scaling in executor {ef.executor.label}")
|
143
|
+
|
144
|
+
# this code needs to be at least as many blocks as need
|
145
|
+
# cancelling, but it is safe to be more, as the scaling
|
146
|
+
# code will cope with being asked to cancel more blocks
|
147
|
+
# than exist.
|
148
|
+
block_count = len(ef.status)
|
149
|
+
ef.scale_in(block_count)
|
150
|
+
|
151
|
+
else: # and bad_state_is_set
|
152
|
+
logger.warning(f"Not scaling in executor {ef.executor.label} because it is in bad state")
|
parsl/jobs/strategy.py
CHANGED
@@ -26,6 +26,10 @@ class ExecutorState(TypedDict):
|
|
26
26
|
If the executor is not idle, then None.
|
27
27
|
"""
|
28
28
|
|
29
|
+
first: bool
|
30
|
+
"""True if this executor has not yet had a strategy poll.
|
31
|
+
"""
|
32
|
+
|
29
33
|
|
30
34
|
class Strategy:
|
31
35
|
"""Scaling strategy.
|
@@ -129,8 +133,8 @@ class Strategy:
|
|
129
133
|
self.executors = {}
|
130
134
|
self.max_idletime = max_idletime
|
131
135
|
|
132
|
-
self.strategies = {None: self.
|
133
|
-
'none': self.
|
136
|
+
self.strategies = {None: self._strategy_init_only,
|
137
|
+
'none': self._strategy_init_only,
|
134
138
|
'simple': self._strategy_simple,
|
135
139
|
'htex_auto_scale': self._strategy_htex_auto_scale}
|
136
140
|
|
@@ -144,17 +148,24 @@ class Strategy:
|
|
144
148
|
|
145
149
|
def add_executors(self, executors: Sequence[ParslExecutor]) -> None:
|
146
150
|
for executor in executors:
|
147
|
-
self.executors[executor.label] = {'idle_since': None}
|
151
|
+
self.executors[executor.label] = {'idle_since': None, 'first': True}
|
148
152
|
|
149
|
-
def
|
150
|
-
"""
|
153
|
+
def _strategy_init_only(self, executor_facades: List[jsp.PolledExecutorFacade]) -> None:
|
154
|
+
"""Scale up to init_blocks at the start, then nothing more.
|
151
155
|
"""
|
152
|
-
|
156
|
+
for ef in executor_facades:
|
157
|
+
executor = ef.executor
|
158
|
+
if self.executors[executor.label]['first']:
|
159
|
+
logger.debug(f"strategy_init_only: scaling out {executor.provider.init_blocks} initial blocks for {executor.label}")
|
160
|
+
ef.scale_out(executor.provider.init_blocks)
|
161
|
+
self.executors[executor.label]['first'] = False
|
162
|
+
else:
|
163
|
+
logger.debug("strategy_init_only: doing nothing")
|
153
164
|
|
154
|
-
def _strategy_simple(self,
|
155
|
-
self._general_strategy(
|
165
|
+
def _strategy_simple(self, executor_facades: List[jsp.PolledExecutorFacade]) -> None:
|
166
|
+
self._general_strategy(executor_facades, strategy_type='simple')
|
156
167
|
|
157
|
-
def _strategy_htex_auto_scale(self,
|
168
|
+
def _strategy_htex_auto_scale(self, executor_facades: List[jsp.PolledExecutorFacade]) -> None:
|
158
169
|
"""HTEX specific auto scaling strategy
|
159
170
|
|
160
171
|
This strategy works only for HTEX. This strategy will scale out by
|
@@ -169,24 +180,30 @@ class Strategy:
|
|
169
180
|
expected to scale in effectively only when # of workers, or tasks executing
|
170
181
|
per block is close to 1.
|
171
182
|
"""
|
172
|
-
self._general_strategy(
|
183
|
+
self._general_strategy(executor_facades, strategy_type='htex')
|
173
184
|
|
174
185
|
@wrap_with_logs
|
175
|
-
def _general_strategy(self,
|
176
|
-
logger.debug(f"general strategy starting with strategy_type {strategy_type} for {len(
|
186
|
+
def _general_strategy(self, executor_facades, *, strategy_type):
|
187
|
+
logger.debug(f"general strategy starting with strategy_type {strategy_type} for {len(executor_facades)} executors")
|
177
188
|
|
178
|
-
for
|
179
|
-
executor =
|
189
|
+
for ef in executor_facades:
|
190
|
+
executor = ef.executor
|
180
191
|
label = executor.label
|
181
192
|
if not isinstance(executor, BlockProviderExecutor):
|
182
193
|
logger.debug(f"Not strategizing for executor {label} because scaling not enabled")
|
183
194
|
continue
|
184
195
|
logger.debug(f"Strategizing for executor {label}")
|
185
196
|
|
197
|
+
if self.executors[label]['first']:
|
198
|
+
executor = ef.executor
|
199
|
+
logger.debug(f"Scaling out {executor.provider.init_blocks} initial blocks for {label}")
|
200
|
+
ef.scale_out(executor.provider.init_blocks)
|
201
|
+
self.executors[label]['first'] = False
|
202
|
+
|
186
203
|
# Tasks that are either pending completion
|
187
204
|
active_tasks = executor.outstanding
|
188
205
|
|
189
|
-
status =
|
206
|
+
status = ef.status
|
190
207
|
|
191
208
|
# FIXME we need to handle case where provider does not define these
|
192
209
|
# FIXME probably more of this logic should be moved to the provider
|
@@ -242,7 +259,7 @@ class Strategy:
|
|
242
259
|
# We have resources idle for the max duration,
|
243
260
|
# we have to scale_in now.
|
244
261
|
logger.debug(f"Idle time has reached {self.max_idletime}s for executor {label}; scaling in")
|
245
|
-
|
262
|
+
ef.scale_in(active_blocks - min_blocks)
|
246
263
|
|
247
264
|
else:
|
248
265
|
logger.debug(
|
@@ -265,7 +282,7 @@ class Strategy:
|
|
265
282
|
excess_blocks = math.ceil(float(excess_slots) / (tasks_per_node * nodes_per_block))
|
266
283
|
excess_blocks = min(excess_blocks, max_blocks - active_blocks)
|
267
284
|
logger.debug(f"Requesting {excess_blocks} more blocks")
|
268
|
-
|
285
|
+
ef.scale_out(excess_blocks)
|
269
286
|
|
270
287
|
elif active_slots == 0 and active_tasks > 0:
|
271
288
|
logger.debug("Strategy case 4a: No active slots but some active tasks - could scale out by a single block")
|
@@ -274,7 +291,7 @@ class Strategy:
|
|
274
291
|
if active_blocks < max_blocks:
|
275
292
|
logger.debug("Requesting single block")
|
276
293
|
|
277
|
-
|
294
|
+
ef.scale_out(1)
|
278
295
|
else:
|
279
296
|
logger.debug("Not requesting single block, because at maxblocks already")
|
280
297
|
|
@@ -290,7 +307,7 @@ class Strategy:
|
|
290
307
|
excess_blocks = math.ceil(float(excess_slots) / (tasks_per_node * nodes_per_block))
|
291
308
|
excess_blocks = min(excess_blocks, active_blocks - min_blocks)
|
292
309
|
logger.debug(f"Requesting scaling in by {excess_blocks} blocks with idle time {self.max_idletime}s")
|
293
|
-
|
310
|
+
ef.scale_in(excess_blocks, max_idletime=self.max_idletime)
|
294
311
|
else:
|
295
312
|
logger.error("This strategy does not support scaling in except for HighThroughputExecutor - taking no action")
|
296
313
|
else:
|
parsl/monitoring/monitoring.py
CHANGED
@@ -8,8 +8,6 @@ import zmq
|
|
8
8
|
|
9
9
|
import queue
|
10
10
|
|
11
|
-
import parsl.monitoring.remote
|
12
|
-
|
13
11
|
from parsl.multiprocessing import ForkProcess, SizedQueue
|
14
12
|
from multiprocessing import Process
|
15
13
|
from multiprocessing.queues import Queue
|
@@ -23,7 +21,7 @@ from parsl.serialize import deserialize
|
|
23
21
|
from parsl.monitoring.router import router_starter
|
24
22
|
from parsl.monitoring.message_type import MessageType
|
25
23
|
from parsl.monitoring.types import AddressedMonitoringMessage
|
26
|
-
from typing import cast, Any,
|
24
|
+
from typing import cast, Any, Optional, Tuple, Union, TYPE_CHECKING
|
27
25
|
|
28
26
|
_db_manager_excepts: Optional[Exception]
|
29
27
|
|
@@ -269,23 +267,6 @@ class MonitoringHub(RepresentationMixin):
|
|
269
267
|
self.filesystem_proc.terminate()
|
270
268
|
self.filesystem_proc.join()
|
271
269
|
|
272
|
-
@staticmethod
|
273
|
-
def monitor_wrapper(f: Any,
|
274
|
-
args: Sequence,
|
275
|
-
kwargs: Dict,
|
276
|
-
try_id: int,
|
277
|
-
task_id: int,
|
278
|
-
monitoring_hub_url: str,
|
279
|
-
run_id: str,
|
280
|
-
logging_level: int,
|
281
|
-
sleep_dur: float,
|
282
|
-
radio_mode: str,
|
283
|
-
monitor_resources: bool,
|
284
|
-
run_dir: str) -> Tuple[Callable, Sequence, Dict]:
|
285
|
-
return parsl.monitoring.remote.monitor_wrapper(f, args, kwargs, try_id, task_id, monitoring_hub_url,
|
286
|
-
run_id, logging_level, sleep_dur, radio_mode,
|
287
|
-
monitor_resources, run_dir)
|
288
|
-
|
289
270
|
|
290
271
|
@wrap_with_logs
|
291
272
|
def filesystem_receiver(logdir: str, q: "queue.Queue[AddressedMonitoringMessage]", run_dir: str) -> None:
|
parsl/monitoring/remote.py
CHANGED
@@ -15,7 +15,8 @@ from typing import Any, Callable, Dict, List, Sequence, Tuple
|
|
15
15
|
logger = logging.getLogger(__name__)
|
16
16
|
|
17
17
|
|
18
|
-
def monitor_wrapper(
|
18
|
+
def monitor_wrapper(*,
|
19
|
+
f: Any, # per app
|
19
20
|
args: Sequence, # per invocation
|
20
21
|
kwargs: Dict, # per invocation
|
21
22
|
x_try_id: int, # per invocation
|
@@ -58,7 +58,7 @@ def test_provider():
|
|
58
58
|
logger.info("Job in terminal state")
|
59
59
|
|
60
60
|
_, current_jobs = executor._get_block_and_job_ids()
|
61
|
-
# PR 1952 stoped removing scale_in blocks from self.
|
61
|
+
# PR 1952 stoped removing scale_in blocks from self.blocks_to_job_id
|
62
62
|
# A new PR will handle removing blocks from self.block
|
63
63
|
# this includes failed/completed/canceled blocks
|
64
64
|
assert len(current_jobs) == 1, "Expected current_jobs == 1"
|
@@ -59,24 +59,27 @@ def test_row_counts(tmpd_cwd, strategy):
|
|
59
59
|
from sqlalchemy import text
|
60
60
|
|
61
61
|
db_url = f"sqlite:///{tmpd_cwd}/monitoring.db"
|
62
|
-
parsl.load(fresh_config(tmpd_cwd, strategy, db_url))
|
62
|
+
with parsl.load(fresh_config(tmpd_cwd, strategy, db_url)):
|
63
|
+
dfk = parsl.dfk()
|
64
|
+
run_id = dfk.run_id
|
63
65
|
|
64
|
-
|
66
|
+
this_app().result()
|
65
67
|
|
66
|
-
parsl.dfk().cleanup()
|
67
68
|
parsl.clear()
|
68
69
|
|
69
70
|
engine = sqlalchemy.create_engine(db_url)
|
70
71
|
with engine.begin() as connection:
|
71
72
|
|
72
|
-
|
73
|
+
binds = {"run_id": run_id}
|
74
|
+
|
75
|
+
result = connection.execute(text("SELECT COUNT(DISTINCT block_id) FROM block WHERE run_id = :run_id"), binds)
|
73
76
|
(c, ) = result.first()
|
74
77
|
assert c == 1, "We should see a single block in this database"
|
75
78
|
|
76
|
-
result = connection.execute(text("SELECT COUNT(*) FROM block WHERE block_id = 0 AND status = 'PENDING'"))
|
79
|
+
result = connection.execute(text("SELECT COUNT(*) FROM block WHERE block_id = 0 AND status = 'PENDING' AND run_id = :run_id"), binds)
|
77
80
|
(c, ) = result.first()
|
78
81
|
assert c == 1, "There should be a single pending status"
|
79
82
|
|
80
|
-
result = connection.execute(text("SELECT COUNT(*) FROM block WHERE block_id = 0 AND status = 'CANCELLED'"))
|
83
|
+
result = connection.execute(text("SELECT COUNT(*) FROM block WHERE block_id = 0 AND status = 'CANCELLED' AND run_id = :run_id"), binds)
|
81
84
|
(c, ) = result.first()
|
82
85
|
assert c == 1, "There should be a single cancelled status"
|
@@ -2,6 +2,7 @@ import parsl
|
|
2
2
|
from parsl.tests.configs.local_threads import fresh_config
|
3
3
|
import pytest
|
4
4
|
from parsl.errors import NoDataFlowKernelError
|
5
|
+
from parsl.dataflow.dflow import DataFlowKernel
|
5
6
|
|
6
7
|
|
7
8
|
@parsl.python_app
|
@@ -25,9 +26,8 @@ def local_teardown():
|
|
25
26
|
@pytest.mark.local
|
26
27
|
def test_within_context_manger():
|
27
28
|
config = fresh_config()
|
28
|
-
with parsl.load(config=config):
|
29
|
-
|
30
|
-
assert py_future.result() == 4
|
29
|
+
with parsl.load(config=config) as dfk:
|
30
|
+
assert isinstance(dfk, DataFlowKernel)
|
31
31
|
|
32
32
|
bash_future = foo(1)
|
33
33
|
assert bash_future.result() == 0
|
@@ -0,0 +1,78 @@
|
|
1
|
+
import threading
|
2
|
+
|
3
|
+
import pytest
|
4
|
+
|
5
|
+
import parsl
|
6
|
+
from parsl.channels import LocalChannel
|
7
|
+
from parsl.config import Config
|
8
|
+
from parsl.executors import HighThroughputExecutor
|
9
|
+
from parsl.launchers import SimpleLauncher
|
10
|
+
from parsl.providers import LocalProvider
|
11
|
+
|
12
|
+
import random
|
13
|
+
|
14
|
+
# we need some blocks, but it doesn't matter too much how many, as long
|
15
|
+
# as they can all start up and get registered within the try_assert
|
16
|
+
# timeout later on.
|
17
|
+
BLOCK_COUNT = 3
|
18
|
+
|
19
|
+
|
20
|
+
class AccumulatingLocalProvider(LocalProvider):
|
21
|
+
def __init__(self, *args, **kwargs):
|
22
|
+
# Use a list for submitted job IDs because if there are multiple
|
23
|
+
# submissions returning the same job ID, this test should count
|
24
|
+
# those...
|
25
|
+
self.submit_job_ids = []
|
26
|
+
|
27
|
+
# ... but there's no requirement, I think, that cancel must be called
|
28
|
+
# only once per job id. What matters here is that each job ID is
|
29
|
+
# cancelled at least once.
|
30
|
+
self.cancel_job_ids = set()
|
31
|
+
|
32
|
+
super().__init__(*args, **kwargs)
|
33
|
+
|
34
|
+
def submit(self, *args, **kwargs):
|
35
|
+
job_id = super().submit(*args, **kwargs)
|
36
|
+
self.submit_job_ids.append(job_id)
|
37
|
+
return job_id
|
38
|
+
|
39
|
+
def cancel(self, job_ids):
|
40
|
+
self.cancel_job_ids.update(job_ids)
|
41
|
+
return super().cancel(job_ids)
|
42
|
+
|
43
|
+
|
44
|
+
@pytest.mark.local
|
45
|
+
def test_shutdown_scalein_blocks(tmpd_cwd, try_assert):
|
46
|
+
"""
|
47
|
+
This test scales up several blocks, and then checks that they are all
|
48
|
+
scaled in at DFK shutdown.
|
49
|
+
"""
|
50
|
+
accumulating_provider = AccumulatingLocalProvider(
|
51
|
+
channel=LocalChannel(),
|
52
|
+
init_blocks=BLOCK_COUNT,
|
53
|
+
min_blocks=0,
|
54
|
+
max_blocks=0,
|
55
|
+
launcher=SimpleLauncher(),
|
56
|
+
)
|
57
|
+
|
58
|
+
htex = HighThroughputExecutor(
|
59
|
+
label="htex_local",
|
60
|
+
cores_per_worker=1,
|
61
|
+
provider=accumulating_provider
|
62
|
+
)
|
63
|
+
|
64
|
+
config = Config(
|
65
|
+
executors=[htex],
|
66
|
+
strategy='none',
|
67
|
+
strategy_period=0.1,
|
68
|
+
run_dir=str(tmpd_cwd)
|
69
|
+
)
|
70
|
+
|
71
|
+
with parsl.load(config):
|
72
|
+
# this will wait for everything to be scaled out fully
|
73
|
+
try_assert(lambda: len(htex.connected_managers()) == BLOCK_COUNT)
|
74
|
+
|
75
|
+
parsl.clear()
|
76
|
+
|
77
|
+
assert len(accumulating_provider.submit_job_ids) == BLOCK_COUNT, f"Exactly {BLOCK_COUNT} blocks should have been launched"
|
78
|
+
assert len(accumulating_provider.cancel_job_ids) == BLOCK_COUNT, f"Exactly {BLOCK_COUNT} blocks should have been scaled in"
|
parsl/version.py
CHANGED
@@ -335,14 +335,17 @@ class Manager:
|
|
335
335
|
self.heartbeat_to_incoming()
|
336
336
|
last_beat = time.time()
|
337
337
|
|
338
|
-
if
|
338
|
+
if time.time() > self.drain_time:
|
339
339
|
logger.info("Requesting drain")
|
340
340
|
self.drain_to_incoming()
|
341
|
-
self.drain_time = None
|
342
341
|
# This will start the pool draining...
|
343
342
|
# Drained exit behaviour does not happen here. It will be
|
344
343
|
# driven by the interchange sending a DRAINED_CODE message.
|
345
344
|
|
345
|
+
# now set drain time to the far future so we don't send a drain
|
346
|
+
# message every iteration.
|
347
|
+
self.drain_time = float('inf')
|
348
|
+
|
346
349
|
poll_duration_s = max(0, next_interesting_event_time - time.time())
|
347
350
|
socks = dict(poller.poll(timeout=poll_duration_s * 1000))
|
348
351
|
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2024.
|
3
|
+
Version: 2024.4.8
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2024.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2024.04.08.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=hakfdg-sgxEjwloZeDrt6EhzwdzecvjJhkPHHxh8lII,1938
|
|
8
8
|
parsl/process_loggers.py,sha256=1G3Rfrh5wuZNo2X03grG4kTYPGOxz7hHCyG6L_A3b0A,1137
|
9
9
|
parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
parsl/utils.py,sha256=A3WDMGaNB4ajVx_jCuc-74W6PFy4zswJy-pLE7u8Dz0,10979
|
11
|
-
parsl/version.py,sha256=
|
11
|
+
parsl/version.py,sha256=ahe277o-2uHeGXWS6lLdiHiTpTRqrLfBk6sTJtXZeNQ,131
|
12
12
|
parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
parsl/app/app.py,sha256=wAHchJetgnicT1pn0NJKDeDX0lV3vDFlG8cQd_Ciax4,8522
|
14
14
|
parsl/app/bash.py,sha256=bx9x1XFwkOTpZZD3CPwnVL9SyNRDjbUGtOnuGLvxN_8,5396
|
@@ -60,7 +60,7 @@ parsl/data_provider/http.py,sha256=nDHTW7XmJqAukWJjPRQjyhUXt8r6GsQ36mX9mv_wOig,2
|
|
60
60
|
parsl/data_provider/rsync.py,sha256=2-ZxqrT-hBj39x082NusJaBqsGW4Jd2qCW6JkVPpEl0,4254
|
61
61
|
parsl/data_provider/staging.py,sha256=l-mAXFburs3BWPjkSmiQKuAgJpsxCG62yATPDbrafYI,4523
|
62
62
|
parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
63
|
-
parsl/dataflow/dflow.py,sha256=
|
63
|
+
parsl/dataflow/dflow.py,sha256=FRsenqapR1uRR_6YU8bjqsENyTDMF7I9PftzLopOVzY,63254
|
64
64
|
parsl/dataflow/errors.py,sha256=w2vOt_ymzG2dOqJUO4IDcmTlrCIHlMZL8nBVyVq0O_8,2176
|
65
65
|
parsl/dataflow/futures.py,sha256=aVfEUTzp4-EdunDAtNcqVQf8l_A7ArDi2c82KZMwxfY,5256
|
66
66
|
parsl/dataflow/memoization.py,sha256=AsJO6c6cRp2ac6H8uGn2USlEi78_nX3QWvpxYt4XdYE,9583
|
@@ -68,9 +68,9 @@ parsl/dataflow/rundirs.py,sha256=XKmBZpBEIsGACBhYOkbbs2e5edC0pQegJcSlk4FWeag,115
|
|
68
68
|
parsl/dataflow/states.py,sha256=hV6mfv-y4A6xrujeQglcomnfEs7y3Xm2g6JFwC6dvgQ,2612
|
69
69
|
parsl/dataflow/taskrecord.py,sha256=bzIBmlDTsRrELtB9PUQwxTWcwrCd8aMsUAzvijle1eo,3114
|
70
70
|
parsl/executors/__init__.py,sha256=J50N97Nm9YRjz6K0oNXDxUYIsDjL43_tp3LVb2w7n-M,381
|
71
|
-
parsl/executors/base.py,sha256=
|
71
|
+
parsl/executors/base.py,sha256=AFX7AlMbOoXaImrttO74vhNWhbJwu41JFS5EaWPl8fg,4559
|
72
72
|
parsl/executors/errors.py,sha256=xVswxgi7vmJcUMCeYDAPK8sQT2kHFFROVoOr0dnmcWE,2098
|
73
|
-
parsl/executors/status_handling.py,sha256=
|
73
|
+
parsl/executors/status_handling.py,sha256=8G0QBkcB271bR_UMTUh9lHE3EguMKQNdcQ8BrSR8Cwg,10322
|
74
74
|
parsl/executors/threads.py,sha256=bMU3JFghm17Lpcua13pr3NgQhkUDDc2mqvF2yJBrVNQ,3353
|
75
75
|
parsl/executors/flux/__init__.py,sha256=P9grTTeRPXfqXurFhlSS7XhmE6tTbnCnyQ1f9b-oYHE,136
|
76
76
|
parsl/executors/flux/execute_parsl_task.py,sha256=yUG_WjZLcX8LrgPl26mpEBWZhQMlVNbRLGu08yIjdf4,1553
|
@@ -78,14 +78,14 @@ parsl/executors/flux/executor.py,sha256=0omXRPvykdW5VZb8mwgBJjxVk4H6G8xoL5D_R9pu
|
|
78
78
|
parsl/executors/flux/flux_instance_manager.py,sha256=tTEOATClm9SwdgLeBRWPC6D55iNDuh0YxqJOw3c3eQ4,2036
|
79
79
|
parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
80
80
|
parsl/executors/high_throughput/errors.py,sha256=vl69wLuVOplbKxHI9WphEGBExHWkTn5n8T9QhBXuNH0,380
|
81
|
-
parsl/executors/high_throughput/executor.py,sha256=
|
81
|
+
parsl/executors/high_throughput/executor.py,sha256=W9515Ggt1iLAz1Xs0HYB54cxbfsHj2beW2Pdt2alKws,37225
|
82
82
|
parsl/executors/high_throughput/interchange.py,sha256=Rt6HyFvQYFuqUJ1ytXmUFTDIK9wOBm4l96IHoL6OFRc,31491
|
83
83
|
parsl/executors/high_throughput/manager_record.py,sha256=w5EwzVqPtsLOyOW8jP44U3uaogt8H--tkwp7FNyKN_o,385
|
84
84
|
parsl/executors/high_throughput/monitoring_info.py,sha256=3gQpwQjjNDEBz0cQqJZB6hRiwLiWwXs83zkQDmbOwxY,297
|
85
85
|
parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=GPSejuNL407gvzw9f7dTWArTLn6heTi-erJjgcM-_8Y,4273
|
86
86
|
parsl/executors/high_throughput/mpi_resource_management.py,sha256=geLYmp2teKYgTnzATAR_JPtjAa0ysu6pHpXs90vwkds,7975
|
87
87
|
parsl/executors/high_throughput/probe.py,sha256=lvnuf-vBv57tHvFh-J51F9sDYBES7jCgs6KYgWvmKRs,2749
|
88
|
-
parsl/executors/high_throughput/process_worker_pool.py,sha256=
|
88
|
+
parsl/executors/high_throughput/process_worker_pool.py,sha256=PjZ2rFieJUF_sVf2GCsRnsf3X3exYGX1qftA2Zs3kHc,41221
|
89
89
|
parsl/executors/high_throughput/zmq_pipes.py,sha256=TEIr1PcBDVbchBukzPaEsku2lbIIFCYYjeUq5zw_VBA,6514
|
90
90
|
parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
|
91
91
|
parsl/executors/radical/executor.py,sha256=5r9WZkOr0clg79zm35E7nC7zNv0DpbyM8iTC2B6d4N0,21024
|
@@ -95,7 +95,7 @@ parsl/executors/radical/rpex_worker.py,sha256=1M1df-hzFdmZMWbRZlUzIX7uAWMKJ_SkxL
|
|
95
95
|
parsl/executors/taskvine/__init__.py,sha256=sWIJdvSLgQKul9dlSjIkNat7yBDgU3SrBF3X2yhT86E,293
|
96
96
|
parsl/executors/taskvine/errors.py,sha256=MNS_NjpvHjwevQXOjqjSEBFroqEWi-LT1ZEVZ2C5Dx0,652
|
97
97
|
parsl/executors/taskvine/exec_parsl_function.py,sha256=oUAKbPWwpbzWwQ47bZQlVDxS8txhnhPsonMf3AOEMGQ,7085
|
98
|
-
parsl/executors/taskvine/executor.py,sha256=
|
98
|
+
parsl/executors/taskvine/executor.py,sha256=gpVJg0MXl4D6YzEklhrhRXnbEQxRJaPBvGNGrro0qKI,32531
|
99
99
|
parsl/executors/taskvine/factory.py,sha256=sHhfGv7xRFrWkQclzRXuFEAHuSXhsZu2lR5LJ81aucA,2638
|
100
100
|
parsl/executors/taskvine/factory_config.py,sha256=AbE2fN2snrF5ITYrrS4DnGn2XkJHUFr_17DYHDHIwq0,3693
|
101
101
|
parsl/executors/taskvine/manager.py,sha256=VxVN2L5zFVPNfSAJrGgq87MRJKpcxf-BHdO5QWxB4TU,25822
|
@@ -104,15 +104,15 @@ parsl/executors/taskvine/utils.py,sha256=iSrIogeiauL3UNy_9tiZp1cBSNn6fIJkMYQRVi1
|
|
104
104
|
parsl/executors/workqueue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
105
105
|
parsl/executors/workqueue/errors.py,sha256=ghB93Ptb_QbOAvgLe7siV_snRRkU_T-cFHv3AR6Ziwo,541
|
106
106
|
parsl/executors/workqueue/exec_parsl_function.py,sha256=NtWNeBvRqksej38eRPw8zPBJ1CeW6vgaitve0tfz_qc,7801
|
107
|
-
parsl/executors/workqueue/executor.py,sha256=
|
107
|
+
parsl/executors/workqueue/executor.py,sha256=GG5F0PCu6gLvJjWqa7nxqHxIyRa_Iqt25NESDd7MExA,50038
|
108
108
|
parsl/executors/workqueue/parsl_coprocess.py,sha256=kEFGC-A97c_gweUPvrc9EEGume7vUpkJLJlyAb87xtQ,5737
|
109
109
|
parsl/executors/workqueue/parsl_coprocess_stub.py,sha256=_bJmpPIgL42qM6bVzeEKt1Mn1trSP41rtJguXxPGfHI,735
|
110
110
|
parsl/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
111
111
|
parsl/jobs/error_handlers.py,sha256=WcWZUA7KyE1ocX5zrMf_EwqOob8Jb7uHMjD3nlb_BUo,2319
|
112
112
|
parsl/jobs/errors.py,sha256=cpSQXCrlKtuHsQf7usjF-lX8XsDkFnE5kWpmFjiN6OU,178
|
113
|
-
parsl/jobs/job_status_poller.py,sha256=
|
113
|
+
parsl/jobs/job_status_poller.py,sha256=4aWn_IO1A7NHxzbFT2Mm9Cvk0V1f2mzNxCEXJBQDtS8,6143
|
114
114
|
parsl/jobs/states.py,sha256=rPBoAEEudKngWFijlwvXXhAagDs_9DCXvQP9rwzVgCM,4855
|
115
|
-
parsl/jobs/strategy.py,sha256
|
115
|
+
parsl/jobs/strategy.py,sha256=-rumnV3-p_SSGR9532nh-KZK5P2yxEZs2FhCqxvIIPo,13860
|
116
116
|
parsl/launchers/__init__.py,sha256=k8zAB3IBP-brfqXUptKwGkvsIRaXjAJZNBJa2XVtY1A,546
|
117
117
|
parsl/launchers/base.py,sha256=CblcvPTJiu-MNLWaRtFe29SZQ0BpTOlaY8CGcHdlHIE,538
|
118
118
|
parsl/launchers/errors.py,sha256=v5i460H_rovzukSccQetxQBVtd92jLQz-NbuDe2TdGI,467
|
@@ -120,9 +120,9 @@ parsl/launchers/launchers.py,sha256=VB--fiVv_IQne3DydTMSdGUY0o0g69puAs-Hd3mJ2vo,
|
|
120
120
|
parsl/monitoring/__init__.py,sha256=0ywNz6i0lM1xo_7_BIxhETDGeVd2C_0wwD7qgeaMR4c,83
|
121
121
|
parsl/monitoring/db_manager.py,sha256=hdmmXSTXp8Wwhr7vLpQalD_ahRl3SNxKYVsplnThRk8,37021
|
122
122
|
parsl/monitoring/message_type.py,sha256=Khn88afNxcOIciKiCK4GLnn90I5BlRTiOL3zK-P07yQ,401
|
123
|
-
parsl/monitoring/monitoring.py,sha256=
|
123
|
+
parsl/monitoring/monitoring.py,sha256=5R3-T4vtxedwQnde5aK6MVssKvjf_VU17S0gcft6oAc,13422
|
124
124
|
parsl/monitoring/radios.py,sha256=T2_6QuUjC-dd_7qMnIk6WHQead1iWz7m_P6ZC4QAqdA,5265
|
125
|
-
parsl/monitoring/remote.py,sha256=
|
125
|
+
parsl/monitoring/remote.py,sha256=0wqANMcuvq3dpja3agdbOzD72n5oUYp7PcNKyLCC35E,13923
|
126
126
|
parsl/monitoring/router.py,sha256=Y_PJjffS23HwfTJClhg5W4gUXnkAI_3crjjZMoyzxVA,9592
|
127
127
|
parsl/monitoring/types.py,sha256=SO6Fjjbb83sv_MtbutoxGssiWh6oXKkEEsD4EvwOnZ4,629
|
128
128
|
parsl/monitoring/queries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -279,7 +279,7 @@ parsl/tests/scaling_tests/wqex_condor.py,sha256=hMo1hK8aj-L36vj0PoByQvL9YQTVrenL
|
|
279
279
|
parsl/tests/scaling_tests/wqex_local.py,sha256=C-eYESKhi4V4XZuHLO0lgP5rovEj8LNYGJOHLpUDdOM,545
|
280
280
|
parsl/tests/site_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
281
281
|
parsl/tests/site_tests/site_config_selector.py,sha256=hk8SO0QMLFk9Ef2QxSa4JTnXJxpUks1mLOeDhaquyqI,1980
|
282
|
-
parsl/tests/site_tests/test_provider.py,sha256=
|
282
|
+
parsl/tests/site_tests/test_provider.py,sha256=mRhfOnmsNi5wh3v8XFY4OIyMZgf2321zyt7KLiaI_YQ,2694
|
283
283
|
parsl/tests/site_tests/test_site.py,sha256=gk23zU9BAZgctkSRAxUTolZKo6abVi45vrEavwpRCcs,1954
|
284
284
|
parsl/tests/sites/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
285
285
|
parsl/tests/sites/test_affinity.py,sha256=4bym6aNynLCpPAjlDqFYugQbVHsvJlzBu20TCWYaX1M,1582
|
@@ -313,11 +313,6 @@ parsl/tests/test_checkpointing/test_regression_232.py,sha256=AsI6AJ0DcFaefAbEY9q
|
|
313
313
|
parsl/tests/test_checkpointing/test_regression_233.py,sha256=jii7BKuygK6KMIGtg4IeBjix7Z28cYhv57rE9ixoXMU,1774
|
314
314
|
parsl/tests/test_checkpointing/test_regression_239.py,sha256=P5kmf1LOo_qHtArkBLMhdvNbSPtURDU5u2tI8SXZTb0,2441
|
315
315
|
parsl/tests/test_checkpointing/test_task_exit.py,sha256=3-ldQhX7YVEAowWK2TiZ6nrQQ7ktfWr-qaCShtjJZK8,1721
|
316
|
-
parsl/tests/test_data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
317
|
-
parsl/tests/test_data/test_file.py,sha256=Dqen1RJ-uLfzX8xIyJN2Qw3oVy4cFkQQVh6KC72xFDA,950
|
318
|
-
parsl/tests/test_data/test_file_apps.py,sha256=zTwLAf4R-lFLoqeyz9ZfFeVTs9PL9dmpKjeZEVG7C2s,1540
|
319
|
-
parsl/tests/test_data/test_file_staging.py,sha256=PTBZhTQJsNtUi38uUZOdIb8yw18-qxMoY9GFodzPYuE,674
|
320
|
-
parsl/tests/test_data/test_output_chain_filenames.py,sha256=9Mxfl9oU_x1ZSP8JSxT_t4WFCfDTprLjSeFNMm4vVxA,894
|
321
316
|
parsl/tests/test_docs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
322
317
|
parsl/tests/test_docs/test_from_slides.py,sha256=0qJHAsSN3eqn4LAFTyCAq1rIUOotBzyQg7d_rJfBoes,653
|
323
318
|
parsl/tests/test_docs/test_kwargs.py,sha256=-rMtAtarg2FOdxMuDLsZY5Crn_jmSwtelMwRNEtTlVk,925
|
@@ -340,8 +335,8 @@ parsl/tests/test_htex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
|
|
340
335
|
parsl/tests/test_htex/test_basic.py,sha256=GIOF3cr6A87QDVMxeN0LrvJnXO2Nap0c-TH462OaBK4,464
|
341
336
|
parsl/tests/test_htex/test_connected_blocks.py,sha256=0628oJ_0_aVsda5xuFwG3_3q8ZiEAM-sfIOINkUHQrk,1639
|
342
337
|
parsl/tests/test_htex/test_cpu_affinity_explicit.py,sha256=tv12ojw4DdymlVBjVNnrFX7_mhwix2jnBLkRbKOQRao,1422
|
343
|
-
parsl/tests/test_htex/test_disconnected_blocks.py,sha256=
|
344
|
-
parsl/tests/test_htex/test_drain.py,sha256=
|
338
|
+
parsl/tests/test_htex/test_disconnected_blocks.py,sha256=iga7wmhGACwUN6gkEFPw1dLodj6SzNZpevgSHNYSyjI,1856
|
339
|
+
parsl/tests/test_htex/test_drain.py,sha256=BvPQIo0xx-z191eVR2rG51x22yzqD-6dLSH7bCAUhOg,2288
|
345
340
|
parsl/tests/test_htex/test_htex.py,sha256=4dXtcthZQvgEDtMc00g6Pw7FnqNWB_0j8fuJqHKO-IE,3896
|
346
341
|
parsl/tests/test_htex/test_manager_failure.py,sha256=gemQopZoDEoZLOvep5JZkY6tQlZoko8Z0Kmpj1-Gbws,1161
|
347
342
|
parsl/tests/test_htex/test_missing_worker.py,sha256=oiDN3ylsf-72jmX-Y5OWA2kQWpbVbvmoSLnu2vnyZeY,976
|
@@ -353,7 +348,7 @@ parsl/tests/test_monitoring/test_app_names.py,sha256=4Ziggxv0JLP0UGAd5jjXdivUdZQ
|
|
353
348
|
parsl/tests/test_monitoring/test_basic.py,sha256=uXWx2O2Y2gfSO4e8zTjyj5bucKHG9OVzMxQNnq9abeY,2776
|
354
349
|
parsl/tests/test_monitoring/test_db_locks.py,sha256=PGoRmvqA6AYPXTPHOZPLH38Z4D6EEgSb6ZgNfZtwIGk,2910
|
355
350
|
parsl/tests/test_monitoring/test_fuzz_zmq.py,sha256=CpTRF2z2xpshlHHTNiNIIJMOx8bxSmSyAwbMYcOkgBk,3121
|
356
|
-
parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py,sha256=
|
351
|
+
parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py,sha256=Lfa6ENZWrExRsZcISMdF_G4VjswzSb0wlRSQFoZXkyQ,2765
|
357
352
|
parsl/tests/test_monitoring/test_incomplete_futures.py,sha256=9lJhkWlVB8gCCTkFjObzoh1uCL1pRmU6gFgEzLCztnY,2021
|
358
353
|
parsl/tests/test_monitoring/test_memoization_representation.py,sha256=tErT7zseSMaQ5eNmK3hH90J6OZKuAaFQG50OXK6Jy9s,2660
|
359
354
|
parsl/tests/test_monitoring/test_viz_colouring.py,sha256=k8SiELxPtnGYZ4r02VQt46RC61fGDVC4nmY768snX1U,591
|
@@ -374,7 +369,7 @@ parsl/tests/test_providers/test_submiterror_deprecation.py,sha256=ZutVj_0VJ7M-5U
|
|
374
369
|
parsl/tests/test_python_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
375
370
|
parsl/tests/test_python_apps/test_arg_input_types.py,sha256=JXpfHiu8lr9BN6u1OzqFvGwBhxzsGTPMewHx6Wdo-HI,670
|
376
371
|
parsl/tests/test_python_apps/test_basic.py,sha256=lFqh4ugePbp_FRiHGUXxzV34iS7l8C5UkxTHuLcpnYs,855
|
377
|
-
parsl/tests/test_python_apps/test_context_manager.py,sha256=
|
372
|
+
parsl/tests/test_python_apps/test_context_manager.py,sha256=iMBjOFmqEO2rDGPJwDySa1kWz-dRnZZNEWDPipS_Xi4,877
|
378
373
|
parsl/tests/test_python_apps/test_dep_standard_futures.py,sha256=BloeaYBci0jS5al2d8Eqe3OfZ1tvolA5ZflOBQPR9Wo,859
|
379
374
|
parsl/tests/test_python_apps/test_dependencies.py,sha256=IRiTI_lPoWBSFSFnaBlE6Bv08PKEaf-qj5dfqO2RjT0,272
|
380
375
|
parsl/tests/test_python_apps/test_depfail_propagation.py,sha256=3q3HlVWrOixFtXWBvR_ypKtbdAHAJcKndXQ5drwrBQU,1488
|
@@ -417,6 +412,7 @@ parsl/tests/test_scaling/test_block_error_handler.py,sha256=VFKs_jq7yd7bpdfYva3S
|
|
417
412
|
parsl/tests/test_scaling/test_regression_1621.py,sha256=iRu3GFsg2l9J61AVZKWLc6zJcvI2JYD0WvtTYDSv22I,1770
|
418
413
|
parsl/tests/test_scaling/test_scale_down.py,sha256=T8NVmoIebdpSjrNJCdgDHumpz9eKLkJrpeW7Kwi8cBg,2821
|
419
414
|
parsl/tests/test_scaling/test_scale_down_htex_auto_scale.py,sha256=1vP2a8qygnxuUji7B3kJOUgwjmmIC1fDPhDdqzs5YFA,4597
|
415
|
+
parsl/tests/test_scaling/test_shutdown_scalein.py,sha256=8QYnU67Ezx7Il9edR-Wrwzxp3xE3E3ocXfrs4P1eCFQ,2417
|
420
416
|
parsl/tests/test_serialization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
421
417
|
parsl/tests/test_serialization/test_2555_caching_deserializer.py,sha256=J8__b4djA5tErd8FUSXGkGcdXlW2KHbBWRbCTAnV08Q,767
|
422
418
|
parsl/tests/test_serialization/test_basic.py,sha256=51KshqIk2RNr7S2iSkl5tZo40CJBb0h6uby8YPgOGlg,543
|
@@ -432,6 +428,10 @@ parsl/tests/test_staging/test_1316.py,sha256=pj1QbmOJSRES1R4Ov380MmVe6xXvPUXh4FB
|
|
432
428
|
parsl/tests/test_staging/test_docs_1.py,sha256=SIGIYo9w2vwkQ-i9Io38sYYj8ns7uFrD1uziR_0Ae2w,628
|
433
429
|
parsl/tests/test_staging/test_docs_2.py,sha256=zy6P6aanR27_U6ASDrB0YyG8udyRvA8r2HRDX5RcslU,463
|
434
430
|
parsl/tests/test_staging/test_elaborate_noop_file.py,sha256=d694K2jKhyBM0bIY9j3w_huVjTU2CVFPgIRfYFpIQQM,2466
|
431
|
+
parsl/tests/test_staging/test_file.py,sha256=Dqen1RJ-uLfzX8xIyJN2Qw3oVy4cFkQQVh6KC72xFDA,950
|
432
|
+
parsl/tests/test_staging/test_file_apps.py,sha256=zTwLAf4R-lFLoqeyz9ZfFeVTs9PL9dmpKjeZEVG7C2s,1540
|
433
|
+
parsl/tests/test_staging/test_file_staging.py,sha256=PTBZhTQJsNtUi38uUZOdIb8yw18-qxMoY9GFodzPYuE,674
|
434
|
+
parsl/tests/test_staging/test_output_chain_filenames.py,sha256=9Mxfl9oU_x1ZSP8JSxT_t4WFCfDTprLjSeFNMm4vVxA,894
|
435
435
|
parsl/tests/test_staging/test_staging_ftp.py,sha256=EkRoTcQ00FZGh8lDVYBdKb-pQ-ybW2Sx5vqGltoMGJ4,778
|
436
436
|
parsl/tests/test_staging/test_staging_ftp_in_task.py,sha256=kR2XrGvbvVFDpHg53NnjO04kqEksTJjQAMQwYqBdb2M,884
|
437
437
|
parsl/tests/test_staging/test_staging_globus.py,sha256=ds8nDH5dNbI10FV_GxMHyVaY6GPnuPPzkX9IiqROLF0,2339
|
@@ -443,12 +443,12 @@ parsl/tests/test_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
|
|
443
443
|
parsl/tests/test_utils/test_representation_mixin.py,sha256=kUZeIDwA2rlbJ3-beGzLLwf3dOplTMCrWJN87etHcyY,1633
|
444
444
|
parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
445
445
|
parsl/usage_tracking/usage.py,sha256=pSADeogWqvkYI_n2pojv4IWDEFAQ3KwXNx6LDTohMHQ,6684
|
446
|
-
parsl-2024.
|
447
|
-
parsl-2024.
|
448
|
-
parsl-2024.
|
449
|
-
parsl-2024.
|
450
|
-
parsl-2024.
|
451
|
-
parsl-2024.
|
452
|
-
parsl-2024.
|
453
|
-
parsl-2024.
|
454
|
-
parsl-2024.
|
446
|
+
parsl-2024.4.8.data/scripts/exec_parsl_function.py,sha256=NtWNeBvRqksej38eRPw8zPBJ1CeW6vgaitve0tfz_qc,7801
|
447
|
+
parsl-2024.4.8.data/scripts/parsl_coprocess.py,sha256=Y7Tc-h9WGui-YDe3w_h91w2Sm1JNL1gJ9kAV4PE_gw8,5722
|
448
|
+
parsl-2024.4.8.data/scripts/process_worker_pool.py,sha256=V3K4admJ7QvwR9sN0GH-c6uOTgNU8zVb76q872WtYCo,41207
|
449
|
+
parsl-2024.4.8.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
450
|
+
parsl-2024.4.8.dist-info/METADATA,sha256=iCcfSeM8LvJdXuanu5jEEGo_H-wXXz56VWD0iCzUYfg,3973
|
451
|
+
parsl-2024.4.8.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
452
|
+
parsl-2024.4.8.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
|
453
|
+
parsl-2024.4.8.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
|
454
|
+
parsl-2024.4.8.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|