parsl 2024.3.25__py3-none-any.whl → 2024.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. parsl/dataflow/dflow.py +25 -33
  2. parsl/executors/base.py +1 -1
  3. parsl/executors/high_throughput/executor.py +8 -20
  4. parsl/executors/high_throughput/process_worker_pool.py +5 -2
  5. parsl/executors/status_handling.py +6 -6
  6. parsl/executors/taskvine/executor.py +17 -13
  7. parsl/executors/workqueue/executor.py +17 -14
  8. parsl/jobs/job_status_poller.py +8 -7
  9. parsl/jobs/strategy.py +31 -18
  10. parsl/monitoring/monitoring.py +1 -20
  11. parsl/tests/site_tests/test_provider.py +1 -1
  12. parsl/tests/test_htex/test_disconnected_blocks.py +0 -1
  13. parsl/tests/test_htex/test_drain.py +1 -0
  14. parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py +9 -6
  15. parsl/tests/test_python_apps/test_context_manager.py +3 -3
  16. parsl/tests/test_scaling/test_shutdown_scalein.py +78 -0
  17. parsl/version.py +1 -1
  18. {parsl-2024.3.25.data → parsl-2024.4.1.data}/scripts/process_worker_pool.py +5 -2
  19. {parsl-2024.3.25.dist-info → parsl-2024.4.1.dist-info}/METADATA +2 -2
  20. {parsl-2024.3.25.dist-info → parsl-2024.4.1.dist-info}/RECORD +30 -30
  21. parsl/tests/test_data/__init__.py +0 -0
  22. /parsl/tests/{test_data → test_staging}/test_file.py +0 -0
  23. /parsl/tests/{test_data → test_staging}/test_file_apps.py +0 -0
  24. /parsl/tests/{test_data → test_staging}/test_file_staging.py +0 -0
  25. /parsl/tests/{test_data → test_staging}/test_output_chain_filenames.py +0 -0
  26. {parsl-2024.3.25.data → parsl-2024.4.1.data}/scripts/exec_parsl_function.py +0 -0
  27. {parsl-2024.3.25.data → parsl-2024.4.1.data}/scripts/parsl_coprocess.py +0 -0
  28. {parsl-2024.3.25.dist-info → parsl-2024.4.1.dist-info}/LICENSE +0 -0
  29. {parsl-2024.3.25.dist-info → parsl-2024.4.1.dist-info}/WHEEL +0 -0
  30. {parsl-2024.3.25.dist-info → parsl-2024.4.1.dist-info}/entry_points.txt +0 -0
  31. {parsl-2024.3.25.dist-info → parsl-2024.4.1.dist-info}/top_level.txt +0 -0
parsl/dataflow/dflow.py CHANGED
@@ -34,12 +34,12 @@ from parsl.dataflow.states import States, FINAL_STATES, FINAL_FAILURE_STATES
34
34
  from parsl.dataflow.taskrecord import TaskRecord
35
35
  from parsl.errors import ConfigurationError, InternalConsistencyError, NoDataFlowKernelError
36
36
  from parsl.jobs.job_status_poller import JobStatusPoller
37
- from parsl.jobs.states import JobStatus, JobState
38
37
  from parsl.usage_tracking.usage import UsageTracker
39
38
  from parsl.executors.base import ParslExecutor
40
39
  from parsl.executors.status_handling import BlockProviderExecutor
41
40
  from parsl.executors.threads import ThreadPoolExecutor
42
41
  from parsl.monitoring import MonitoringHub
42
+ from parsl.monitoring.remote import monitor_wrapper
43
43
  from parsl.process_loggers import wrap_with_logs
44
44
  from parsl.providers.base import ExecutionProvider
45
45
  from parsl.utils import get_version, get_std_fname_mode, get_all_checkpoints, Timer
@@ -207,7 +207,7 @@ class DataFlowKernel:
207
207
  atexit.register(self.atexit_cleanup)
208
208
 
209
209
  def __enter__(self):
210
- pass
210
+ return self
211
211
 
212
212
  def __exit__(self, exc_type, exc_value, traceback):
213
213
  logger.debug("Exiting the context manager, calling cleanup for DFK")
@@ -714,14 +714,14 @@ class DataFlowKernel:
714
714
 
715
715
  if self.monitoring is not None and self.monitoring.resource_monitoring_enabled:
716
716
  wrapper_logging_level = logging.DEBUG if self.monitoring.monitoring_debug else logging.INFO
717
- (function, args, kwargs) = self.monitoring.monitor_wrapper(function, args, kwargs, try_id, task_id,
718
- self.monitoring.monitoring_hub_url,
719
- self.run_id,
720
- wrapper_logging_level,
721
- self.monitoring.resource_monitoring_interval,
722
- executor.radio_mode,
723
- executor.monitor_resources(),
724
- self.run_dir)
717
+ (function, args, kwargs) = monitor_wrapper(function, args, kwargs, try_id, task_id,
718
+ self.monitoring.monitoring_hub_url,
719
+ self.run_id,
720
+ wrapper_logging_level,
721
+ self.monitoring.resource_monitoring_interval,
722
+ executor.radio_mode,
723
+ executor.monitor_resources(),
724
+ self.run_dir)
725
725
 
726
726
  with self.submitter_lock:
727
727
  exec_fu = executor.submit(function, task_record['resource_specification'], *args, **kwargs)
@@ -1141,14 +1141,7 @@ class DataFlowKernel:
1141
1141
  self._create_remote_dirs_over_channel(executor.provider, executor.provider.channel)
1142
1142
 
1143
1143
  self.executors[executor.label] = executor
1144
- block_ids = executor.start()
1145
- if self.monitoring and block_ids:
1146
- new_status = {}
1147
- for bid in block_ids:
1148
- new_status[bid] = JobStatus(JobState.PENDING)
1149
- msg = executor.create_monitoring_info(new_status)
1150
- logger.debug("Sending monitoring message {} to hub from DFK".format(msg))
1151
- self.monitoring.send(MessageType.BLOCK_INFO, msg)
1144
+ executor.start()
1152
1145
  block_executors = [e for e in executors if isinstance(e, BlockProviderExecutor)]
1153
1146
  self.job_status_poller.add_executors(block_executors)
1154
1147
 
@@ -1223,22 +1216,21 @@ class DataFlowKernel:
1223
1216
 
1224
1217
  logger.info("Scaling in and shutting down executors")
1225
1218
 
1219
+ for ef in self.job_status_poller._executor_facades:
1220
+ if not ef.executor.bad_state_is_set:
1221
+ logger.info(f"Scaling in executor {ef.executor.label}")
1222
+
1223
+ # this code needs to be at least as many blocks as need
1224
+ # cancelling, but it is safe to be more, as the scaling
1225
+ # code will cope with being asked to cancel more blocks
1226
+ # than exist.
1227
+ block_count = len(ef.status)
1228
+ ef.scale_in(block_count)
1229
+
1230
+ else: # and bad_state_is_set
1231
+ logger.warning(f"Not scaling in executor {ef.executor.label} because it is in bad state")
1232
+
1226
1233
  for executor in self.executors.values():
1227
- if isinstance(executor, BlockProviderExecutor):
1228
- if not executor.bad_state_is_set:
1229
- logger.info(f"Scaling in executor {executor.label}")
1230
- if executor.provider:
1231
- job_ids = executor.provider.resources.keys()
1232
- block_ids = executor.scale_in(len(job_ids))
1233
- if self.monitoring and block_ids:
1234
- new_status = {}
1235
- for bid in block_ids:
1236
- new_status[bid] = JobStatus(JobState.CANCELLED)
1237
- msg = executor.create_monitoring_info(new_status)
1238
- logger.debug("Sending message {} to hub from DFK".format(msg))
1239
- self.monitoring.send(MessageType.BLOCK_INFO, msg)
1240
- else: # and bad_state_is_set
1241
- logger.warning(f"Not shutting down executor {executor.label} because it is in bad state")
1242
1234
  logger.info(f"Shutting down executor {executor.label}")
1243
1235
  executor.shutdown()
1244
1236
  logger.info(f"Shut down executor {executor.label}")
parsl/executors/base.py CHANGED
@@ -53,7 +53,7 @@ class ParslExecutor(metaclass=ABCMeta):
53
53
  return False
54
54
 
55
55
  @abstractmethod
56
- def start(self) -> Optional[List[str]]:
56
+ def start(self) -> None:
57
57
  """Start the executor.
58
58
 
59
59
  Any spin-up operations (for example: starting thread pools) should be performed here.
@@ -1,4 +1,5 @@
1
1
  import typing
2
+ from collections import defaultdict
2
3
  from concurrent.futures import Future
3
4
  import typeguard
4
5
  import logging
@@ -400,16 +401,6 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
400
401
 
401
402
  logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
402
403
 
403
- # TODO: why is this a provider property?
404
- block_ids = []
405
- if hasattr(self.provider, 'init_blocks'):
406
- try:
407
- block_ids = self.scale_out(blocks=self.provider.init_blocks)
408
- except Exception as e:
409
- logger.error("Scaling out failed: {}".format(e))
410
- raise e
411
- return block_ids
412
-
413
404
  def start(self):
414
405
  """Create the Interchange process and connect to it.
415
406
  """
@@ -439,8 +430,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
439
430
 
440
431
  logger.debug("Created management thread: {}".format(self._queue_management_thread))
441
432
 
442
- block_ids = self.initialize_scaling()
443
- return block_ids
433
+ self.initialize_scaling()
444
434
 
445
435
  @wrap_with_logs
446
436
  def _queue_management_worker(self):
@@ -698,7 +688,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
698
688
  d['status'] = s.status_name
699
689
  d['timestamp'] = datetime.datetime.now()
700
690
  d['executor_label'] = self.label
701
- d['job_id'] = self.blocks.get(bid, None)
691
+ d['job_id'] = self.blocks_to_job_id.get(bid, None)
702
692
  d['block_id'] = bid
703
693
  msg.append(d)
704
694
  return msg
@@ -741,13 +731,11 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
741
731
  idle: float # shortest idle time of any manager in this block
742
732
 
743
733
  managers = self.connected_managers()
744
- block_info: Dict[str, BlockInfo] = {}
734
+ block_info: Dict[str, BlockInfo] = defaultdict(lambda: BlockInfo(tasks=0, idle=float('inf')))
745
735
  for manager in managers:
746
736
  if not manager['active']:
747
737
  continue
748
738
  b_id = manager['block_id']
749
- if b_id not in block_info:
750
- block_info[b_id] = BlockInfo(tasks=0, idle=float('inf'))
751
739
  block_info[b_id].tasks += manager['tasks']
752
740
  block_info[b_id].idle = min(block_info[b_id].idle, manager['idle_duration'])
753
741
 
@@ -779,14 +767,14 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
779
767
 
780
768
  # Now kill via provider
781
769
  # Potential issue with multiple threads trying to remove the same blocks
782
- to_kill = [self.blocks[bid] for bid in block_ids_to_kill if bid in self.blocks]
770
+ to_kill = [self.blocks_to_job_id[bid] for bid in block_ids_to_kill if bid in self.blocks_to_job_id]
783
771
 
784
772
  r = self.provider.cancel(to_kill)
785
773
  job_ids = self._filter_scale_in_ids(to_kill, r)
786
774
 
787
- # to_kill block_ids are fetched from self.blocks
788
- # If a block_id is in self.block, it must exist in self.block_mapping
789
- block_ids_killed = [self.block_mapping[jid] for jid in job_ids]
775
+ # to_kill block_ids are fetched from self.blocks_to_job_id
776
+ # If a block_id is in self.blocks_to_job_id, it must exist in self.job_ids_to_block
777
+ block_ids_killed = [self.job_ids_to_block[jid] for jid in job_ids]
790
778
 
791
779
  return block_ids_killed
792
780
 
@@ -335,14 +335,17 @@ class Manager:
335
335
  self.heartbeat_to_incoming()
336
336
  last_beat = time.time()
337
337
 
338
- if self.drain_time and time.time() > self.drain_time:
338
+ if time.time() > self.drain_time:
339
339
  logger.info("Requesting drain")
340
340
  self.drain_to_incoming()
341
- self.drain_time = None
342
341
  # This will start the pool draining...
343
342
  # Drained exit behaviour does not happen here. It will be
344
343
  # driven by the interchange sending a DRAINED_CODE message.
345
344
 
345
+ # now set drain time to the far future so we don't send a drain
346
+ # message every iteration.
347
+ self.drain_time = float('inf')
348
+
346
349
  poll_duration_s = max(0, next_interesting_event_time - time.time())
347
350
  socks = dict(poller.poll(timeout=poll_duration_s * 1000))
348
351
 
@@ -68,8 +68,8 @@ class BlockProviderExecutor(ParslExecutor):
68
68
  self._block_id_counter = AtomicIDCounter()
69
69
 
70
70
  self._tasks = {} # type: Dict[object, Future]
71
- self.blocks = {} # type: Dict[str, str]
72
- self.block_mapping = {} # type: Dict[str, str]
71
+ self.blocks_to_job_id = {} # type: Dict[str, str]
72
+ self.job_ids_to_block = {} # type: Dict[str, str]
73
73
 
74
74
  def _make_status_dict(self, block_ids: List[str], status_list: List[JobStatus]) -> Dict[str, JobStatus]:
75
75
  """Given a list of block ids and a list of corresponding status strings,
@@ -194,8 +194,8 @@ class BlockProviderExecutor(ParslExecutor):
194
194
  logger.info(f"Allocated block ID {block_id}")
195
195
  try:
196
196
  job_id = self._launch_block(block_id)
197
- self.blocks[block_id] = job_id
198
- self.block_mapping[job_id] = block_id
197
+ self.blocks_to_job_id[block_id] = job_id
198
+ self.job_ids_to_block[job_id] = block_id
199
199
  block_ids.append(block_id)
200
200
  except Exception as ex:
201
201
  self._fail_job_async(block_id,
@@ -232,10 +232,10 @@ class BlockProviderExecutor(ParslExecutor):
232
232
  # Not using self.blocks.keys() and self.blocks.values() simultaneously
233
233
  # The dictionary may be changed during invoking this function
234
234
  # As scale_in and scale_out are invoked in multiple threads
235
- block_ids = list(self.blocks.keys())
235
+ block_ids = list(self.blocks_to_job_id.keys())
236
236
  job_ids = [] # types: List[Any]
237
237
  for bid in block_ids:
238
- job_ids.append(self.blocks[bid])
238
+ job_ids.append(self.blocks_to_job_id[bid])
239
239
  return block_ids, job_ids
240
240
 
241
241
  @abstractproperty
@@ -186,7 +186,13 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
186
186
  # Attribute indicating whether this executor was started to shut it down properly.
187
187
  # This safeguards cases where an object of this executor is created but
188
188
  # the executor never starts, so it shouldn't be shutdowned.
189
- self._started = False
189
+ self._is_started = False
190
+
191
+ # Attribute indicating whether this executor was shutdown before.
192
+ # This safeguards cases where this object is automatically shut down (e.g.,
193
+ # via atexit) and the user also explicitly calls shut down. While this is
194
+ # permitted, the effect of an executor shutdown should happen only once.
195
+ self._is_shutdown = False
190
196
 
191
197
  def atexit_cleanup(self):
192
198
  # Calls this executor's shutdown method upon Python exiting the process.
@@ -252,7 +258,7 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
252
258
  """
253
259
 
254
260
  # Mark this executor object as started
255
- self._started = True
261
+ self._is_started = True
256
262
 
257
263
  # Synchronize connection and communication settings between the manager and factory
258
264
  self.__synchronize_manager_factory_comm_settings()
@@ -580,13 +586,6 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
580
586
  self._worker_command = self._construct_worker_command()
581
587
  self._patch_providers()
582
588
 
583
- if hasattr(self.provider, 'init_blocks'):
584
- try:
585
- self.scale_out(blocks=self.provider.init_blocks)
586
- except Exception as e:
587
- logger.error("Initial block scaling out failed: {}".format(e))
588
- raise e
589
-
590
589
  @property
591
590
  def outstanding(self) -> int:
592
591
  """Count the number of outstanding tasks."""
@@ -601,8 +600,8 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
601
600
  """Scale in method. Cancel a given number of blocks
602
601
  """
603
602
  # Obtain list of blocks to kill
604
- to_kill = list(self.blocks.keys())[:count]
605
- kill_ids = [self.blocks[block] for block in to_kill]
603
+ to_kill = list(self.blocks_to_job_id.keys())[:count]
604
+ kill_ids = [self.blocks_to_job_id[block] for block in to_kill]
606
605
 
607
606
  # Cancel the blocks provisioned
608
607
  if self.provider:
@@ -614,15 +613,19 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
614
613
  """Shutdown the executor. Sets flag to cancel the submit process and
615
614
  collector thread, which shuts down the TaskVine system submission.
616
615
  """
617
- if not self._started:
616
+ if not self._is_started:
618
617
  # Don't shutdown if the executor never starts.
619
618
  return
620
619
 
620
+ if self._is_shutdown:
621
+ # Don't shutdown this executor again.
622
+ return
623
+
621
624
  logger.debug("TaskVine shutdown started")
622
625
  self._should_stop.set()
623
626
 
624
627
  # Remove the workers that are still going
625
- kill_ids = [self.blocks[block] for block in self.blocks.keys()]
628
+ kill_ids = [self.blocks_to_job_id[block] for block in self.blocks_to_job_id.keys()]
626
629
  if self.provider:
627
630
  logger.debug("Cancelling blocks")
628
631
  self.provider.cancel(kill_ids)
@@ -636,6 +639,7 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
636
639
  logger.debug("Joining on factory process")
637
640
  self._factory_process.join()
638
641
 
642
+ self._is_shutdown = True
639
643
  logger.debug("TaskVine shutdown completed")
640
644
 
641
645
  @wrap_with_logs
@@ -255,7 +255,6 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
255
255
  self.label = label
256
256
  self.task_queue = multiprocessing.Queue() # type: multiprocessing.Queue
257
257
  self.collector_queue = multiprocessing.Queue() # type: multiprocessing.Queue
258
- self.blocks = {} # type: Dict[str, str]
259
258
  self.address = address
260
259
  self.port = port
261
260
  self.executor_task_counter = -1
@@ -305,7 +304,13 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
305
304
  # Attribute indicating whether this executor was started to shut it down properly.
306
305
  # This safeguards cases where an object of this executor is created but
307
306
  # the executor never starts, so it shouldn't be shutdowned.
308
- self.started = False
307
+ self.is_started = False
308
+
309
+ # Attribute indicating whether this executor was shutdown before.
310
+ # This safeguards cases where this object is automatically shut down (e.g.,
311
+ # via atexit) and the user also explicitly calls shut down. While this is
312
+ # permitted, the effect of an executor shutdown should happen only once.
313
+ self.is_shutdown = False
309
314
 
310
315
  def atexit_cleanup(self):
311
316
  # Calls this executor's shutdown method upon Python exiting the process.
@@ -321,7 +326,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
321
326
  retrieve Parsl tasks within the Work Queue system.
322
327
  """
323
328
  # Mark this executor object as started
324
- self.started = True
329
+ self.is_started = True
325
330
  self.tasks_lock = threading.Lock()
326
331
 
327
332
  # Create directories for data and results
@@ -669,13 +674,6 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
669
674
  self.worker_command = self._construct_worker_command()
670
675
  self._patch_providers()
671
676
 
672
- if hasattr(self.provider, 'init_blocks'):
673
- try:
674
- self.scale_out(blocks=self.provider.init_blocks)
675
- except Exception as e:
676
- logger.error("Initial block scaling out failed: {}".format(e))
677
- raise e
678
-
679
677
  @property
680
678
  def outstanding(self) -> int:
681
679
  """Count the number of outstanding tasks. This is inefficiently
@@ -697,8 +695,8 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
697
695
  """Scale in method.
698
696
  """
699
697
  # Obtain list of blocks to kill
700
- to_kill = list(self.blocks.keys())[:count]
701
- kill_ids = [self.blocks[block] for block in to_kill]
698
+ to_kill = list(self.blocks_to_job_id.keys())[:count]
699
+ kill_ids = [self.blocks_to_job_id[block] for block in to_kill]
702
700
 
703
701
  # Cancel the blocks provisioned
704
702
  if self.provider:
@@ -710,15 +708,19 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
710
708
  """Shutdown the executor. Sets flag to cancel the submit process and
711
709
  collector thread, which shuts down the Work Queue system submission.
712
710
  """
713
- if not self.started:
711
+ if not self.is_started:
714
712
  # Don't shutdown if the executor never starts.
715
713
  return
716
714
 
715
+ if self.is_shutdown:
716
+ # Don't shutdown this executor again.
717
+ return
718
+
717
719
  logger.debug("Work Queue shutdown started")
718
720
  self.should_stop.value = True
719
721
 
720
722
  # Remove the workers that are still going
721
- kill_ids = [self.blocks[block] for block in self.blocks.keys()]
723
+ kill_ids = [self.blocks_to_job_id[block] for block in self.blocks_to_job_id.keys()]
722
724
  if self.provider:
723
725
  logger.debug("Cancelling blocks")
724
726
  self.provider.cancel(kill_ids)
@@ -728,6 +730,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
728
730
  logger.debug("Joining on collector thread")
729
731
  self.collector_thread.join()
730
732
 
733
+ self.is_shutdown = True
731
734
  logger.debug("Work Queue shutdown completed")
732
735
 
733
736
  @wrap_with_logs
@@ -16,13 +16,14 @@ from parsl.utils import Timer
16
16
  logger = logging.getLogger(__name__)
17
17
 
18
18
 
19
- class PollItem:
19
+ class PolledExecutorFacade:
20
20
  def __init__(self, executor: BlockProviderExecutor, dfk: Optional["parsl.dataflow.dflow.DataFlowKernel"] = None):
21
21
  self._executor = executor
22
22
  self._dfk = dfk
23
23
  self._interval = executor.status_polling_interval
24
24
  self._last_poll_time = 0.0
25
25
  self._status = {} # type: Dict[str, JobStatus]
26
+ self.first = True
26
27
 
27
28
  # Create a ZMQ channel to send poll status to monitoring
28
29
  self.monitoring_enabled = False
@@ -109,7 +110,7 @@ class JobStatusPoller(Timer):
109
110
  def __init__(self, *, strategy: Optional[str], max_idletime: float,
110
111
  strategy_period: Union[float, int],
111
112
  dfk: Optional["parsl.dataflow.dflow.DataFlowKernel"] = None) -> None:
112
- self._poll_items = [] # type: List[PollItem]
113
+ self._executor_facades = [] # type: List[PolledExecutorFacade]
113
114
  self.dfk = dfk
114
115
  self._strategy = Strategy(strategy=strategy,
115
116
  max_idletime=max_idletime)
@@ -117,21 +118,21 @@ class JobStatusPoller(Timer):
117
118
 
118
119
  def poll(self) -> None:
119
120
  self._update_state()
120
- self._run_error_handlers(self._poll_items)
121
- self._strategy.strategize(self._poll_items)
121
+ self._run_error_handlers(self._executor_facades)
122
+ self._strategy.strategize(self._executor_facades)
122
123
 
123
- def _run_error_handlers(self, status: List[PollItem]) -> None:
124
+ def _run_error_handlers(self, status: List[PolledExecutorFacade]) -> None:
124
125
  for es in status:
125
126
  es.executor.handle_errors(es.status)
126
127
 
127
128
  def _update_state(self) -> None:
128
129
  now = time.time()
129
- for item in self._poll_items:
130
+ for item in self._executor_facades:
130
131
  item.poll(now)
131
132
 
132
133
  def add_executors(self, executors: Sequence[BlockProviderExecutor]) -> None:
133
134
  for executor in executors:
134
135
  if executor.status_polling_interval > 0:
135
136
  logger.debug("Adding executor {}".format(executor.label))
136
- self._poll_items.append(PollItem(executor, self.dfk))
137
+ self._executor_facades.append(PolledExecutorFacade(executor, self.dfk))
137
138
  self._strategy.add_executors(executors)
parsl/jobs/strategy.py CHANGED
@@ -129,8 +129,8 @@ class Strategy:
129
129
  self.executors = {}
130
130
  self.max_idletime = max_idletime
131
131
 
132
- self.strategies = {None: self._strategy_noop,
133
- 'none': self._strategy_noop,
132
+ self.strategies = {None: self._strategy_init_only,
133
+ 'none': self._strategy_init_only,
134
134
  'simple': self._strategy_simple,
135
135
  'htex_auto_scale': self._strategy_htex_auto_scale}
136
136
 
@@ -146,15 +146,22 @@ class Strategy:
146
146
  for executor in executors:
147
147
  self.executors[executor.label] = {'idle_since': None}
148
148
 
149
- def _strategy_noop(self, status: List[jsp.PollItem]) -> None:
150
- """Do nothing.
149
+ def _strategy_init_only(self, executor_facades: List[jsp.PolledExecutorFacade]) -> None:
150
+ """Scale up to init_blocks at the start, then nothing more.
151
151
  """
152
- logger.debug("strategy_noop: doing nothing")
152
+ for ef in executor_facades:
153
+ if ef.first:
154
+ executor = ef.executor
155
+ logger.debug(f"strategy_init_only: scaling out {executor.provider.init_blocks} initial blocks for {executor.label}")
156
+ ef.scale_out(executor.provider.init_blocks)
157
+ ef.first = False
158
+ else:
159
+ logger.debug("strategy_init_only: doing nothing")
153
160
 
154
- def _strategy_simple(self, status_list: List[jsp.PollItem]) -> None:
155
- self._general_strategy(status_list, strategy_type='simple')
161
+ def _strategy_simple(self, executor_facades: List[jsp.PolledExecutorFacade]) -> None:
162
+ self._general_strategy(executor_facades, strategy_type='simple')
156
163
 
157
- def _strategy_htex_auto_scale(self, status_list: List[jsp.PollItem]) -> None:
164
+ def _strategy_htex_auto_scale(self, executor_facades: List[jsp.PolledExecutorFacade]) -> None:
158
165
  """HTEX specific auto scaling strategy
159
166
 
160
167
  This strategy works only for HTEX. This strategy will scale out by
@@ -169,24 +176,30 @@ class Strategy:
169
176
  expected to scale in effectively only when # of workers, or tasks executing
170
177
  per block is close to 1.
171
178
  """
172
- self._general_strategy(status_list, strategy_type='htex')
179
+ self._general_strategy(executor_facades, strategy_type='htex')
173
180
 
174
181
  @wrap_with_logs
175
- def _general_strategy(self, status_list, *, strategy_type):
176
- logger.debug(f"general strategy starting with strategy_type {strategy_type} for {len(status_list)} executors")
182
+ def _general_strategy(self, executor_facades, *, strategy_type):
183
+ logger.debug(f"general strategy starting with strategy_type {strategy_type} for {len(executor_facades)} executors")
177
184
 
178
- for exec_status in status_list:
179
- executor = exec_status.executor
185
+ for ef in executor_facades:
186
+ executor = ef.executor
180
187
  label = executor.label
181
188
  if not isinstance(executor, BlockProviderExecutor):
182
189
  logger.debug(f"Not strategizing for executor {label} because scaling not enabled")
183
190
  continue
184
191
  logger.debug(f"Strategizing for executor {label}")
185
192
 
193
+ if ef.first:
194
+ executor = ef.executor
195
+ logger.debug(f"Scaling out {executor.provider.init_blocks} initial blocks for {label}")
196
+ ef.scale_out(executor.provider.init_blocks)
197
+ ef.first = False
198
+
186
199
  # Tasks that are either pending completion
187
200
  active_tasks = executor.outstanding
188
201
 
189
- status = exec_status.status
202
+ status = ef.status
190
203
 
191
204
  # FIXME we need to handle case where provider does not define these
192
205
  # FIXME probably more of this logic should be moved to the provider
@@ -242,7 +255,7 @@ class Strategy:
242
255
  # We have resources idle for the max duration,
243
256
  # we have to scale_in now.
244
257
  logger.debug(f"Idle time has reached {self.max_idletime}s for executor {label}; scaling in")
245
- exec_status.scale_in(active_blocks - min_blocks)
258
+ ef.scale_in(active_blocks - min_blocks)
246
259
 
247
260
  else:
248
261
  logger.debug(
@@ -265,7 +278,7 @@ class Strategy:
265
278
  excess_blocks = math.ceil(float(excess_slots) / (tasks_per_node * nodes_per_block))
266
279
  excess_blocks = min(excess_blocks, max_blocks - active_blocks)
267
280
  logger.debug(f"Requesting {excess_blocks} more blocks")
268
- exec_status.scale_out(excess_blocks)
281
+ ef.scale_out(excess_blocks)
269
282
 
270
283
  elif active_slots == 0 and active_tasks > 0:
271
284
  logger.debug("Strategy case 4a: No active slots but some active tasks - could scale out by a single block")
@@ -274,7 +287,7 @@ class Strategy:
274
287
  if active_blocks < max_blocks:
275
288
  logger.debug("Requesting single block")
276
289
 
277
- exec_status.scale_out(1)
290
+ ef.scale_out(1)
278
291
  else:
279
292
  logger.debug("Not requesting single block, because at maxblocks already")
280
293
 
@@ -290,7 +303,7 @@ class Strategy:
290
303
  excess_blocks = math.ceil(float(excess_slots) / (tasks_per_node * nodes_per_block))
291
304
  excess_blocks = min(excess_blocks, active_blocks - min_blocks)
292
305
  logger.debug(f"Requesting scaling in by {excess_blocks} blocks with idle time {self.max_idletime}s")
293
- exec_status.scale_in(excess_blocks, max_idletime=self.max_idletime)
306
+ ef.scale_in(excess_blocks, max_idletime=self.max_idletime)
294
307
  else:
295
308
  logger.error("This strategy does not support scaling in except for HighThroughputExecutor - taking no action")
296
309
  else:
@@ -8,8 +8,6 @@ import zmq
8
8
 
9
9
  import queue
10
10
 
11
- import parsl.monitoring.remote
12
-
13
11
  from parsl.multiprocessing import ForkProcess, SizedQueue
14
12
  from multiprocessing import Process
15
13
  from multiprocessing.queues import Queue
@@ -23,7 +21,7 @@ from parsl.serialize import deserialize
23
21
  from parsl.monitoring.router import router_starter
24
22
  from parsl.monitoring.message_type import MessageType
25
23
  from parsl.monitoring.types import AddressedMonitoringMessage
26
- from typing import cast, Any, Callable, Dict, Optional, Sequence, Tuple, Union, TYPE_CHECKING
24
+ from typing import cast, Any, Optional, Tuple, Union, TYPE_CHECKING
27
25
 
28
26
  _db_manager_excepts: Optional[Exception]
29
27
 
@@ -269,23 +267,6 @@ class MonitoringHub(RepresentationMixin):
269
267
  self.filesystem_proc.terminate()
270
268
  self.filesystem_proc.join()
271
269
 
272
- @staticmethod
273
- def monitor_wrapper(f: Any,
274
- args: Sequence,
275
- kwargs: Dict,
276
- try_id: int,
277
- task_id: int,
278
- monitoring_hub_url: str,
279
- run_id: str,
280
- logging_level: int,
281
- sleep_dur: float,
282
- radio_mode: str,
283
- monitor_resources: bool,
284
- run_dir: str) -> Tuple[Callable, Sequence, Dict]:
285
- return parsl.monitoring.remote.monitor_wrapper(f, args, kwargs, try_id, task_id, monitoring_hub_url,
286
- run_id, logging_level, sleep_dur, radio_mode,
287
- monitor_resources, run_dir)
288
-
289
270
 
290
271
  @wrap_with_logs
291
272
  def filesystem_receiver(logdir: str, q: "queue.Queue[AddressedMonitoringMessage]", run_dir: str) -> None:
@@ -58,7 +58,7 @@ def test_provider():
58
58
  logger.info("Job in terminal state")
59
59
 
60
60
  _, current_jobs = executor._get_block_and_job_ids()
61
- # PR 1952 stoped removing scale_in blocks from self.blocks
61
+ # PR 1952 stoped removing scale_in blocks from self.blocks_to_job_id
62
62
  # A new PR will handle removing blocks from self.block
63
63
  # this includes failed/completed/canceled blocks
64
64
  assert len(current_jobs) == 1, "Expected current_jobs == 1"
@@ -26,7 +26,6 @@ def local_config():
26
26
  ),
27
27
  )
28
28
  ],
29
- run_dir="/tmp/test_htex",
30
29
  max_idletime=0.5,
31
30
  strategy='htex_auto_scale',
32
31
  )
@@ -35,6 +35,7 @@ def local_config():
35
35
  )
36
36
  ],
37
37
  strategy='none',
38
+ strategy_period=0.1
38
39
  )
39
40
 
40
41
 
@@ -59,24 +59,27 @@ def test_row_counts(tmpd_cwd, strategy):
59
59
  from sqlalchemy import text
60
60
 
61
61
  db_url = f"sqlite:///{tmpd_cwd}/monitoring.db"
62
- parsl.load(fresh_config(tmpd_cwd, strategy, db_url))
62
+ with parsl.load(fresh_config(tmpd_cwd, strategy, db_url)):
63
+ dfk = parsl.dfk()
64
+ run_id = dfk.run_id
63
65
 
64
- this_app().result()
66
+ this_app().result()
65
67
 
66
- parsl.dfk().cleanup()
67
68
  parsl.clear()
68
69
 
69
70
  engine = sqlalchemy.create_engine(db_url)
70
71
  with engine.begin() as connection:
71
72
 
72
- result = connection.execute(text("SELECT COUNT(DISTINCT block_id) FROM block"))
73
+ binds = {"run_id": run_id}
74
+
75
+ result = connection.execute(text("SELECT COUNT(DISTINCT block_id) FROM block WHERE run_id = :run_id"), binds)
73
76
  (c, ) = result.first()
74
77
  assert c == 1, "We should see a single block in this database"
75
78
 
76
- result = connection.execute(text("SELECT COUNT(*) FROM block WHERE block_id = 0 AND status = 'PENDING'"))
79
+ result = connection.execute(text("SELECT COUNT(*) FROM block WHERE block_id = 0 AND status = 'PENDING' AND run_id = :run_id"), binds)
77
80
  (c, ) = result.first()
78
81
  assert c == 1, "There should be a single pending status"
79
82
 
80
- result = connection.execute(text("SELECT COUNT(*) FROM block WHERE block_id = 0 AND status = 'CANCELLED'"))
83
+ result = connection.execute(text("SELECT COUNT(*) FROM block WHERE block_id = 0 AND status = 'CANCELLED' AND run_id = :run_id"), binds)
81
84
  (c, ) = result.first()
82
85
  assert c == 1, "There should be a single cancelled status"
@@ -2,6 +2,7 @@ import parsl
2
2
  from parsl.tests.configs.local_threads import fresh_config
3
3
  import pytest
4
4
  from parsl.errors import NoDataFlowKernelError
5
+ from parsl.dataflow.dflow import DataFlowKernel
5
6
 
6
7
 
7
8
  @parsl.python_app
@@ -25,9 +26,8 @@ def local_teardown():
25
26
  @pytest.mark.local
26
27
  def test_within_context_manger():
27
28
  config = fresh_config()
28
- with parsl.load(config=config):
29
- py_future = square(2)
30
- assert py_future.result() == 4
29
+ with parsl.load(config=config) as dfk:
30
+ assert isinstance(dfk, DataFlowKernel)
31
31
 
32
32
  bash_future = foo(1)
33
33
  assert bash_future.result() == 0
@@ -0,0 +1,78 @@
1
+ import threading
2
+
3
+ import pytest
4
+
5
+ import parsl
6
+ from parsl.channels import LocalChannel
7
+ from parsl.config import Config
8
+ from parsl.executors import HighThroughputExecutor
9
+ from parsl.launchers import SimpleLauncher
10
+ from parsl.providers import LocalProvider
11
+
12
+ import random
13
+
14
+ # we need some blocks, but it doesn't matter too much how many, as long
15
+ # as they can all start up and get registered within the try_assert
16
+ # timeout later on.
17
+ BLOCK_COUNT = 3
18
+
19
+
20
+ class AccumulatingLocalProvider(LocalProvider):
21
+ def __init__(self, *args, **kwargs):
22
+ # Use a list for submitted job IDs because if there are multiple
23
+ # submissions returning the same job ID, this test should count
24
+ # those...
25
+ self.submit_job_ids = []
26
+
27
+ # ... but there's no requirement, I think, that cancel must be called
28
+ # only once per job id. What matters here is that each job ID is
29
+ # cancelled at least once.
30
+ self.cancel_job_ids = set()
31
+
32
+ super().__init__(*args, **kwargs)
33
+
34
+ def submit(self, *args, **kwargs):
35
+ job_id = super().submit(*args, **kwargs)
36
+ self.submit_job_ids.append(job_id)
37
+ return job_id
38
+
39
+ def cancel(self, job_ids):
40
+ self.cancel_job_ids.update(job_ids)
41
+ return super().cancel(job_ids)
42
+
43
+
44
+ @pytest.mark.local
45
+ def test_shutdown_scalein_blocks(tmpd_cwd, try_assert):
46
+ """
47
+ This test scales up several blocks, and then checks that they are all
48
+ scaled in at DFK shutdown.
49
+ """
50
+ accumulating_provider = AccumulatingLocalProvider(
51
+ channel=LocalChannel(),
52
+ init_blocks=BLOCK_COUNT,
53
+ min_blocks=0,
54
+ max_blocks=0,
55
+ launcher=SimpleLauncher(),
56
+ )
57
+
58
+ htex = HighThroughputExecutor(
59
+ label="htex_local",
60
+ cores_per_worker=1,
61
+ provider=accumulating_provider
62
+ )
63
+
64
+ config = Config(
65
+ executors=[htex],
66
+ strategy='none',
67
+ strategy_period=0.1,
68
+ run_dir=str(tmpd_cwd)
69
+ )
70
+
71
+ with parsl.load(config):
72
+ # this will wait for everything to be scaled out fully
73
+ try_assert(lambda: len(htex.connected_managers()) == BLOCK_COUNT)
74
+
75
+ parsl.clear()
76
+
77
+ assert len(accumulating_provider.submit_job_ids) == BLOCK_COUNT, f"Exactly {BLOCK_COUNT} blocks should have been launched"
78
+ assert len(accumulating_provider.cancel_job_ids) == BLOCK_COUNT, f"Exactly {BLOCK_COUNT} blocks should have been scaled in"
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2024.03.25'
6
+ VERSION = '2024.04.01'
@@ -335,14 +335,17 @@ class Manager:
335
335
  self.heartbeat_to_incoming()
336
336
  last_beat = time.time()
337
337
 
338
- if self.drain_time and time.time() > self.drain_time:
338
+ if time.time() > self.drain_time:
339
339
  logger.info("Requesting drain")
340
340
  self.drain_to_incoming()
341
- self.drain_time = None
342
341
  # This will start the pool draining...
343
342
  # Drained exit behaviour does not happen here. It will be
344
343
  # driven by the interchange sending a DRAINED_CODE message.
345
344
 
345
+ # now set drain time to the far future so we don't send a drain
346
+ # message every iteration.
347
+ self.drain_time = float('inf')
348
+
346
349
  poll_duration_s = max(0, next_interesting_event_time - time.time())
347
350
  socks = dict(poller.poll(timeout=poll_duration_s * 1000))
348
351
 
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2024.3.25
3
+ Version: 2024.4.1
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2024.03.25.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2024.04.01.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=hakfdg-sgxEjwloZeDrt6EhzwdzecvjJhkPHHxh8lII,1938
8
8
  parsl/process_loggers.py,sha256=1G3Rfrh5wuZNo2X03grG4kTYPGOxz7hHCyG6L_A3b0A,1137
9
9
  parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  parsl/utils.py,sha256=A3WDMGaNB4ajVx_jCuc-74W6PFy4zswJy-pLE7u8Dz0,10979
11
- parsl/version.py,sha256=xw1wZ0QmQ9UaGCS5xiCsTOYpLaH5Ht4qp-xYq_4FbaE,131
11
+ parsl/version.py,sha256=2xAmun0db7lx3hTmt30wYML-vGWWFXBqeVr7_i8j3Ac,131
12
12
  parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  parsl/app/app.py,sha256=wAHchJetgnicT1pn0NJKDeDX0lV3vDFlG8cQd_Ciax4,8522
14
14
  parsl/app/bash.py,sha256=bx9x1XFwkOTpZZD3CPwnVL9SyNRDjbUGtOnuGLvxN_8,5396
@@ -60,7 +60,7 @@ parsl/data_provider/http.py,sha256=nDHTW7XmJqAukWJjPRQjyhUXt8r6GsQ36mX9mv_wOig,2
60
60
  parsl/data_provider/rsync.py,sha256=2-ZxqrT-hBj39x082NusJaBqsGW4Jd2qCW6JkVPpEl0,4254
61
61
  parsl/data_provider/staging.py,sha256=l-mAXFburs3BWPjkSmiQKuAgJpsxCG62yATPDbrafYI,4523
62
62
  parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
63
- parsl/dataflow/dflow.py,sha256=38bZ0cFnCKqlbyf-XYxyA8PscsypQNOzd_8bIUWT0wM,64461
63
+ parsl/dataflow/dflow.py,sha256=N72pg1yxgICjOAQVsTN_n3PwTaE2MYfPcc8MUTc8oMA,63593
64
64
  parsl/dataflow/errors.py,sha256=w2vOt_ymzG2dOqJUO4IDcmTlrCIHlMZL8nBVyVq0O_8,2176
65
65
  parsl/dataflow/futures.py,sha256=aVfEUTzp4-EdunDAtNcqVQf8l_A7ArDi2c82KZMwxfY,5256
66
66
  parsl/dataflow/memoization.py,sha256=AsJO6c6cRp2ac6H8uGn2USlEi78_nX3QWvpxYt4XdYE,9583
@@ -68,9 +68,9 @@ parsl/dataflow/rundirs.py,sha256=XKmBZpBEIsGACBhYOkbbs2e5edC0pQegJcSlk4FWeag,115
68
68
  parsl/dataflow/states.py,sha256=hV6mfv-y4A6xrujeQglcomnfEs7y3Xm2g6JFwC6dvgQ,2612
69
69
  parsl/dataflow/taskrecord.py,sha256=bzIBmlDTsRrELtB9PUQwxTWcwrCd8aMsUAzvijle1eo,3114
70
70
  parsl/executors/__init__.py,sha256=J50N97Nm9YRjz6K0oNXDxUYIsDjL43_tp3LVb2w7n-M,381
71
- parsl/executors/base.py,sha256=CNWddQ7eP_Kqd2THv4bj5Dg1Jgb3dbI3z3aznsRP6dc,4574
71
+ parsl/executors/base.py,sha256=AFX7AlMbOoXaImrttO74vhNWhbJwu41JFS5EaWPl8fg,4559
72
72
  parsl/executors/errors.py,sha256=xVswxgi7vmJcUMCeYDAPK8sQT2kHFFROVoOr0dnmcWE,2098
73
- parsl/executors/status_handling.py,sha256=DP7Wu2BjoMPcJ8bdYmwALHvC5Fd1y8-g9sLj_cKoGOQ,10574
73
+ parsl/executors/status_handling.py,sha256=RRPDFOCxrQT8_5XTpF5-keml4pttxX1rxztZGaJ1Wzw,10620
74
74
  parsl/executors/threads.py,sha256=bMU3JFghm17Lpcua13pr3NgQhkUDDc2mqvF2yJBrVNQ,3353
75
75
  parsl/executors/flux/__init__.py,sha256=P9grTTeRPXfqXurFhlSS7XhmE6tTbnCnyQ1f9b-oYHE,136
76
76
  parsl/executors/flux/execute_parsl_task.py,sha256=yUG_WjZLcX8LrgPl26mpEBWZhQMlVNbRLGu08yIjdf4,1553
@@ -78,14 +78,14 @@ parsl/executors/flux/executor.py,sha256=0omXRPvykdW5VZb8mwgBJjxVk4H6G8xoL5D_R9pu
78
78
  parsl/executors/flux/flux_instance_manager.py,sha256=tTEOATClm9SwdgLeBRWPC6D55iNDuh0YxqJOw3c3eQ4,2036
79
79
  parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
80
80
  parsl/executors/high_throughput/errors.py,sha256=vl69wLuVOplbKxHI9WphEGBExHWkTn5n8T9QhBXuNH0,380
81
- parsl/executors/high_throughput/executor.py,sha256=mSxDizbw79zmnW4yMDuRflZfGwZKciWmZ8XW9rAy4gI,37591
81
+ parsl/executors/high_throughput/executor.py,sha256=W9515Ggt1iLAz1Xs0HYB54cxbfsHj2beW2Pdt2alKws,37225
82
82
  parsl/executors/high_throughput/interchange.py,sha256=Rt6HyFvQYFuqUJ1ytXmUFTDIK9wOBm4l96IHoL6OFRc,31491
83
83
  parsl/executors/high_throughput/manager_record.py,sha256=w5EwzVqPtsLOyOW8jP44U3uaogt8H--tkwp7FNyKN_o,385
84
84
  parsl/executors/high_throughput/monitoring_info.py,sha256=3gQpwQjjNDEBz0cQqJZB6hRiwLiWwXs83zkQDmbOwxY,297
85
85
  parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=GPSejuNL407gvzw9f7dTWArTLn6heTi-erJjgcM-_8Y,4273
86
86
  parsl/executors/high_throughput/mpi_resource_management.py,sha256=geLYmp2teKYgTnzATAR_JPtjAa0ysu6pHpXs90vwkds,7975
87
87
  parsl/executors/high_throughput/probe.py,sha256=lvnuf-vBv57tHvFh-J51F9sDYBES7jCgs6KYgWvmKRs,2749
88
- parsl/executors/high_throughput/process_worker_pool.py,sha256=hUKno44b3hzPQHKnF91K_BQxusDoapT8K8y-2E0DlDo,41109
88
+ parsl/executors/high_throughput/process_worker_pool.py,sha256=PjZ2rFieJUF_sVf2GCsRnsf3X3exYGX1qftA2Zs3kHc,41221
89
89
  parsl/executors/high_throughput/zmq_pipes.py,sha256=TEIr1PcBDVbchBukzPaEsku2lbIIFCYYjeUq5zw_VBA,6514
90
90
  parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
91
91
  parsl/executors/radical/executor.py,sha256=5r9WZkOr0clg79zm35E7nC7zNv0DpbyM8iTC2B6d4N0,21024
@@ -95,7 +95,7 @@ parsl/executors/radical/rpex_worker.py,sha256=1M1df-hzFdmZMWbRZlUzIX7uAWMKJ_SkxL
95
95
  parsl/executors/taskvine/__init__.py,sha256=sWIJdvSLgQKul9dlSjIkNat7yBDgU3SrBF3X2yhT86E,293
96
96
  parsl/executors/taskvine/errors.py,sha256=MNS_NjpvHjwevQXOjqjSEBFroqEWi-LT1ZEVZ2C5Dx0,652
97
97
  parsl/executors/taskvine/exec_parsl_function.py,sha256=oUAKbPWwpbzWwQ47bZQlVDxS8txhnhPsonMf3AOEMGQ,7085
98
- parsl/executors/taskvine/executor.py,sha256=YAPnZZV31R_H1A4mILNIiDQVVvzO6G1wUo0HIjmcw7g,32264
98
+ parsl/executors/taskvine/executor.py,sha256=gpVJg0MXl4D6YzEklhrhRXnbEQxRJaPBvGNGrro0qKI,32531
99
99
  parsl/executors/taskvine/factory.py,sha256=sHhfGv7xRFrWkQclzRXuFEAHuSXhsZu2lR5LJ81aucA,2638
100
100
  parsl/executors/taskvine/factory_config.py,sha256=AbE2fN2snrF5ITYrrS4DnGn2XkJHUFr_17DYHDHIwq0,3693
101
101
  parsl/executors/taskvine/manager.py,sha256=VxVN2L5zFVPNfSAJrGgq87MRJKpcxf-BHdO5QWxB4TU,25822
@@ -104,15 +104,15 @@ parsl/executors/taskvine/utils.py,sha256=iSrIogeiauL3UNy_9tiZp1cBSNn6fIJkMYQRVi1
104
104
  parsl/executors/workqueue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
105
105
  parsl/executors/workqueue/errors.py,sha256=ghB93Ptb_QbOAvgLe7siV_snRRkU_T-cFHv3AR6Ziwo,541
106
106
  parsl/executors/workqueue/exec_parsl_function.py,sha256=NtWNeBvRqksej38eRPw8zPBJ1CeW6vgaitve0tfz_qc,7801
107
- parsl/executors/workqueue/executor.py,sha256=QnHNdj7aVVYOzK-jmo0YqKMqW2__XmmruHHilqGUVy0,49823
107
+ parsl/executors/workqueue/executor.py,sha256=GG5F0PCu6gLvJjWqa7nxqHxIyRa_Iqt25NESDd7MExA,50038
108
108
  parsl/executors/workqueue/parsl_coprocess.py,sha256=kEFGC-A97c_gweUPvrc9EEGume7vUpkJLJlyAb87xtQ,5737
109
109
  parsl/executors/workqueue/parsl_coprocess_stub.py,sha256=_bJmpPIgL42qM6bVzeEKt1Mn1trSP41rtJguXxPGfHI,735
110
110
  parsl/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
111
111
  parsl/jobs/error_handlers.py,sha256=WcWZUA7KyE1ocX5zrMf_EwqOob8Jb7uHMjD3nlb_BUo,2319
112
112
  parsl/jobs/errors.py,sha256=cpSQXCrlKtuHsQf7usjF-lX8XsDkFnE5kWpmFjiN6OU,178
113
- parsl/jobs/job_status_poller.py,sha256=hLFyT9tnvYrIev72bg0jZUhabYR9GJ0o15M5l9TR3Qo,5422
113
+ parsl/jobs/job_status_poller.py,sha256=owi712XlFEBLNEb-uSXq0hoCWm1Qgbq2cFMScc98w9I,5526
114
114
  parsl/jobs/states.py,sha256=rPBoAEEudKngWFijlwvXXhAagDs_9DCXvQP9rwzVgCM,4855
115
- parsl/jobs/strategy.py,sha256=a-W3vxEHHCfe521LMfSoZLpJjdTtwCfTgdn1ChxzUuI,12959
115
+ parsl/jobs/strategy.py,sha256=Jgz5H0sFLhAhFxqp6UCTihpDgG-HGp7NGz0ynXOmDSo,13656
116
116
  parsl/launchers/__init__.py,sha256=k8zAB3IBP-brfqXUptKwGkvsIRaXjAJZNBJa2XVtY1A,546
117
117
  parsl/launchers/base.py,sha256=CblcvPTJiu-MNLWaRtFe29SZQ0BpTOlaY8CGcHdlHIE,538
118
118
  parsl/launchers/errors.py,sha256=v5i460H_rovzukSccQetxQBVtd92jLQz-NbuDe2TdGI,467
@@ -120,7 +120,7 @@ parsl/launchers/launchers.py,sha256=VB--fiVv_IQne3DydTMSdGUY0o0g69puAs-Hd3mJ2vo,
120
120
  parsl/monitoring/__init__.py,sha256=0ywNz6i0lM1xo_7_BIxhETDGeVd2C_0wwD7qgeaMR4c,83
121
121
  parsl/monitoring/db_manager.py,sha256=hdmmXSTXp8Wwhr7vLpQalD_ahRl3SNxKYVsplnThRk8,37021
122
122
  parsl/monitoring/message_type.py,sha256=Khn88afNxcOIciKiCK4GLnn90I5BlRTiOL3zK-P07yQ,401
123
- parsl/monitoring/monitoring.py,sha256=ZkwbIKGG7Zx8Nxj8TEaTT_FrmjwJvmv1jNlEmaUZYAM,14313
123
+ parsl/monitoring/monitoring.py,sha256=5R3-T4vtxedwQnde5aK6MVssKvjf_VU17S0gcft6oAc,13422
124
124
  parsl/monitoring/radios.py,sha256=T2_6QuUjC-dd_7qMnIk6WHQead1iWz7m_P6ZC4QAqdA,5265
125
125
  parsl/monitoring/remote.py,sha256=OcIgudujtPO_DsY-YV36x92skeiNdGt-6aEOqaCU8T0,13900
126
126
  parsl/monitoring/router.py,sha256=Y_PJjffS23HwfTJClhg5W4gUXnkAI_3crjjZMoyzxVA,9592
@@ -279,7 +279,7 @@ parsl/tests/scaling_tests/wqex_condor.py,sha256=hMo1hK8aj-L36vj0PoByQvL9YQTVrenL
279
279
  parsl/tests/scaling_tests/wqex_local.py,sha256=C-eYESKhi4V4XZuHLO0lgP5rovEj8LNYGJOHLpUDdOM,545
280
280
  parsl/tests/site_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
281
281
  parsl/tests/site_tests/site_config_selector.py,sha256=hk8SO0QMLFk9Ef2QxSa4JTnXJxpUks1mLOeDhaquyqI,1980
282
- parsl/tests/site_tests/test_provider.py,sha256=5rtzDNj7LWCuWdic6ifEQIfqErYOhL-MPRuaehaMXEQ,2684
282
+ parsl/tests/site_tests/test_provider.py,sha256=mRhfOnmsNi5wh3v8XFY4OIyMZgf2321zyt7KLiaI_YQ,2694
283
283
  parsl/tests/site_tests/test_site.py,sha256=gk23zU9BAZgctkSRAxUTolZKo6abVi45vrEavwpRCcs,1954
284
284
  parsl/tests/sites/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
285
285
  parsl/tests/sites/test_affinity.py,sha256=4bym6aNynLCpPAjlDqFYugQbVHsvJlzBu20TCWYaX1M,1582
@@ -313,11 +313,6 @@ parsl/tests/test_checkpointing/test_regression_232.py,sha256=AsI6AJ0DcFaefAbEY9q
313
313
  parsl/tests/test_checkpointing/test_regression_233.py,sha256=jii7BKuygK6KMIGtg4IeBjix7Z28cYhv57rE9ixoXMU,1774
314
314
  parsl/tests/test_checkpointing/test_regression_239.py,sha256=P5kmf1LOo_qHtArkBLMhdvNbSPtURDU5u2tI8SXZTb0,2441
315
315
  parsl/tests/test_checkpointing/test_task_exit.py,sha256=3-ldQhX7YVEAowWK2TiZ6nrQQ7ktfWr-qaCShtjJZK8,1721
316
- parsl/tests/test_data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
317
- parsl/tests/test_data/test_file.py,sha256=Dqen1RJ-uLfzX8xIyJN2Qw3oVy4cFkQQVh6KC72xFDA,950
318
- parsl/tests/test_data/test_file_apps.py,sha256=zTwLAf4R-lFLoqeyz9ZfFeVTs9PL9dmpKjeZEVG7C2s,1540
319
- parsl/tests/test_data/test_file_staging.py,sha256=PTBZhTQJsNtUi38uUZOdIb8yw18-qxMoY9GFodzPYuE,674
320
- parsl/tests/test_data/test_output_chain_filenames.py,sha256=9Mxfl9oU_x1ZSP8JSxT_t4WFCfDTprLjSeFNMm4vVxA,894
321
316
  parsl/tests/test_docs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
322
317
  parsl/tests/test_docs/test_from_slides.py,sha256=0qJHAsSN3eqn4LAFTyCAq1rIUOotBzyQg7d_rJfBoes,653
323
318
  parsl/tests/test_docs/test_kwargs.py,sha256=-rMtAtarg2FOdxMuDLsZY5Crn_jmSwtelMwRNEtTlVk,925
@@ -340,8 +335,8 @@ parsl/tests/test_htex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
340
335
  parsl/tests/test_htex/test_basic.py,sha256=GIOF3cr6A87QDVMxeN0LrvJnXO2Nap0c-TH462OaBK4,464
341
336
  parsl/tests/test_htex/test_connected_blocks.py,sha256=0628oJ_0_aVsda5xuFwG3_3q8ZiEAM-sfIOINkUHQrk,1639
342
337
  parsl/tests/test_htex/test_cpu_affinity_explicit.py,sha256=tv12ojw4DdymlVBjVNnrFX7_mhwix2jnBLkRbKOQRao,1422
343
- parsl/tests/test_htex/test_disconnected_blocks.py,sha256=HQhtX757t4CdWbtu-VT3MttLHQowGyHPOp9vX0TN_a4,1890
344
- parsl/tests/test_htex/test_drain.py,sha256=bnbQfoEQi9EIlDJsDMp7adR45gsAScTNPoOZ2vc12HY,2260
338
+ parsl/tests/test_htex/test_disconnected_blocks.py,sha256=iga7wmhGACwUN6gkEFPw1dLodj6SzNZpevgSHNYSyjI,1856
339
+ parsl/tests/test_htex/test_drain.py,sha256=BvPQIo0xx-z191eVR2rG51x22yzqD-6dLSH7bCAUhOg,2288
345
340
  parsl/tests/test_htex/test_htex.py,sha256=4dXtcthZQvgEDtMc00g6Pw7FnqNWB_0j8fuJqHKO-IE,3896
346
341
  parsl/tests/test_htex/test_manager_failure.py,sha256=gemQopZoDEoZLOvep5JZkY6tQlZoko8Z0Kmpj1-Gbws,1161
347
342
  parsl/tests/test_htex/test_missing_worker.py,sha256=oiDN3ylsf-72jmX-Y5OWA2kQWpbVbvmoSLnu2vnyZeY,976
@@ -353,7 +348,7 @@ parsl/tests/test_monitoring/test_app_names.py,sha256=4Ziggxv0JLP0UGAd5jjXdivUdZQ
353
348
  parsl/tests/test_monitoring/test_basic.py,sha256=uXWx2O2Y2gfSO4e8zTjyj5bucKHG9OVzMxQNnq9abeY,2776
354
349
  parsl/tests/test_monitoring/test_db_locks.py,sha256=PGoRmvqA6AYPXTPHOZPLH38Z4D6EEgSb6ZgNfZtwIGk,2910
355
350
  parsl/tests/test_monitoring/test_fuzz_zmq.py,sha256=CpTRF2z2xpshlHHTNiNIIJMOx8bxSmSyAwbMYcOkgBk,3121
356
- parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py,sha256=aFxQWL0p3kmVWHgLjYTx0MdY3JMkVPQSae3pmRn5T3s,2605
351
+ parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py,sha256=Lfa6ENZWrExRsZcISMdF_G4VjswzSb0wlRSQFoZXkyQ,2765
357
352
  parsl/tests/test_monitoring/test_incomplete_futures.py,sha256=9lJhkWlVB8gCCTkFjObzoh1uCL1pRmU6gFgEzLCztnY,2021
358
353
  parsl/tests/test_monitoring/test_memoization_representation.py,sha256=tErT7zseSMaQ5eNmK3hH90J6OZKuAaFQG50OXK6Jy9s,2660
359
354
  parsl/tests/test_monitoring/test_viz_colouring.py,sha256=k8SiELxPtnGYZ4r02VQt46RC61fGDVC4nmY768snX1U,591
@@ -374,7 +369,7 @@ parsl/tests/test_providers/test_submiterror_deprecation.py,sha256=ZutVj_0VJ7M-5U
374
369
  parsl/tests/test_python_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
375
370
  parsl/tests/test_python_apps/test_arg_input_types.py,sha256=JXpfHiu8lr9BN6u1OzqFvGwBhxzsGTPMewHx6Wdo-HI,670
376
371
  parsl/tests/test_python_apps/test_basic.py,sha256=lFqh4ugePbp_FRiHGUXxzV34iS7l8C5UkxTHuLcpnYs,855
377
- parsl/tests/test_python_apps/test_context_manager.py,sha256=ajG8Vy0gxXzMO_rAm7yO_WO6XJCjEqUgmdRHdPBLCY0,844
372
+ parsl/tests/test_python_apps/test_context_manager.py,sha256=iMBjOFmqEO2rDGPJwDySa1kWz-dRnZZNEWDPipS_Xi4,877
378
373
  parsl/tests/test_python_apps/test_dep_standard_futures.py,sha256=BloeaYBci0jS5al2d8Eqe3OfZ1tvolA5ZflOBQPR9Wo,859
379
374
  parsl/tests/test_python_apps/test_dependencies.py,sha256=IRiTI_lPoWBSFSFnaBlE6Bv08PKEaf-qj5dfqO2RjT0,272
380
375
  parsl/tests/test_python_apps/test_depfail_propagation.py,sha256=3q3HlVWrOixFtXWBvR_ypKtbdAHAJcKndXQ5drwrBQU,1488
@@ -417,6 +412,7 @@ parsl/tests/test_scaling/test_block_error_handler.py,sha256=VFKs_jq7yd7bpdfYva3S
417
412
  parsl/tests/test_scaling/test_regression_1621.py,sha256=iRu3GFsg2l9J61AVZKWLc6zJcvI2JYD0WvtTYDSv22I,1770
418
413
  parsl/tests/test_scaling/test_scale_down.py,sha256=T8NVmoIebdpSjrNJCdgDHumpz9eKLkJrpeW7Kwi8cBg,2821
419
414
  parsl/tests/test_scaling/test_scale_down_htex_auto_scale.py,sha256=1vP2a8qygnxuUji7B3kJOUgwjmmIC1fDPhDdqzs5YFA,4597
415
+ parsl/tests/test_scaling/test_shutdown_scalein.py,sha256=8QYnU67Ezx7Il9edR-Wrwzxp3xE3E3ocXfrs4P1eCFQ,2417
420
416
  parsl/tests/test_serialization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
421
417
  parsl/tests/test_serialization/test_2555_caching_deserializer.py,sha256=J8__b4djA5tErd8FUSXGkGcdXlW2KHbBWRbCTAnV08Q,767
422
418
  parsl/tests/test_serialization/test_basic.py,sha256=51KshqIk2RNr7S2iSkl5tZo40CJBb0h6uby8YPgOGlg,543
@@ -432,6 +428,10 @@ parsl/tests/test_staging/test_1316.py,sha256=pj1QbmOJSRES1R4Ov380MmVe6xXvPUXh4FB
432
428
  parsl/tests/test_staging/test_docs_1.py,sha256=SIGIYo9w2vwkQ-i9Io38sYYj8ns7uFrD1uziR_0Ae2w,628
433
429
  parsl/tests/test_staging/test_docs_2.py,sha256=zy6P6aanR27_U6ASDrB0YyG8udyRvA8r2HRDX5RcslU,463
434
430
  parsl/tests/test_staging/test_elaborate_noop_file.py,sha256=d694K2jKhyBM0bIY9j3w_huVjTU2CVFPgIRfYFpIQQM,2466
431
+ parsl/tests/test_staging/test_file.py,sha256=Dqen1RJ-uLfzX8xIyJN2Qw3oVy4cFkQQVh6KC72xFDA,950
432
+ parsl/tests/test_staging/test_file_apps.py,sha256=zTwLAf4R-lFLoqeyz9ZfFeVTs9PL9dmpKjeZEVG7C2s,1540
433
+ parsl/tests/test_staging/test_file_staging.py,sha256=PTBZhTQJsNtUi38uUZOdIb8yw18-qxMoY9GFodzPYuE,674
434
+ parsl/tests/test_staging/test_output_chain_filenames.py,sha256=9Mxfl9oU_x1ZSP8JSxT_t4WFCfDTprLjSeFNMm4vVxA,894
435
435
  parsl/tests/test_staging/test_staging_ftp.py,sha256=EkRoTcQ00FZGh8lDVYBdKb-pQ-ybW2Sx5vqGltoMGJ4,778
436
436
  parsl/tests/test_staging/test_staging_ftp_in_task.py,sha256=kR2XrGvbvVFDpHg53NnjO04kqEksTJjQAMQwYqBdb2M,884
437
437
  parsl/tests/test_staging/test_staging_globus.py,sha256=ds8nDH5dNbI10FV_GxMHyVaY6GPnuPPzkX9IiqROLF0,2339
@@ -443,12 +443,12 @@ parsl/tests/test_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
443
443
  parsl/tests/test_utils/test_representation_mixin.py,sha256=kUZeIDwA2rlbJ3-beGzLLwf3dOplTMCrWJN87etHcyY,1633
444
444
  parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
445
445
  parsl/usage_tracking/usage.py,sha256=pSADeogWqvkYI_n2pojv4IWDEFAQ3KwXNx6LDTohMHQ,6684
446
- parsl-2024.3.25.data/scripts/exec_parsl_function.py,sha256=NtWNeBvRqksej38eRPw8zPBJ1CeW6vgaitve0tfz_qc,7801
447
- parsl-2024.3.25.data/scripts/parsl_coprocess.py,sha256=Y7Tc-h9WGui-YDe3w_h91w2Sm1JNL1gJ9kAV4PE_gw8,5722
448
- parsl-2024.3.25.data/scripts/process_worker_pool.py,sha256=iVrw160CpTAVuX9PH-ezU4ebm9C1_U6IMrkcdyTQJ58,41095
449
- parsl-2024.3.25.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
450
- parsl-2024.3.25.dist-info/METADATA,sha256=9FTVzNNJqpfn1NqveTatneCqEQhMu3Oo9kA-M7fx40k,3974
451
- parsl-2024.3.25.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
452
- parsl-2024.3.25.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
453
- parsl-2024.3.25.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
454
- parsl-2024.3.25.dist-info/RECORD,,
446
+ parsl-2024.4.1.data/scripts/exec_parsl_function.py,sha256=NtWNeBvRqksej38eRPw8zPBJ1CeW6vgaitve0tfz_qc,7801
447
+ parsl-2024.4.1.data/scripts/parsl_coprocess.py,sha256=Y7Tc-h9WGui-YDe3w_h91w2Sm1JNL1gJ9kAV4PE_gw8,5722
448
+ parsl-2024.4.1.data/scripts/process_worker_pool.py,sha256=V3K4admJ7QvwR9sN0GH-c6uOTgNU8zVb76q872WtYCo,41207
449
+ parsl-2024.4.1.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
450
+ parsl-2024.4.1.dist-info/METADATA,sha256=kM9BKd2VQ5ij6tV9au1Z1GvCH9MZcB7UWNo_YJVbqTA,3973
451
+ parsl-2024.4.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
452
+ parsl-2024.4.1.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
453
+ parsl-2024.4.1.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
454
+ parsl-2024.4.1.dist-info/RECORD,,
File without changes
File without changes