parsl 2025.9.22__py3-none-any.whl → 2025.10.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of parsl might be problematic. Click here for more details.

Files changed (32) hide show
  1. parsl/benchmark/perf.py +22 -9
  2. parsl/dataflow/dflow.py +6 -4
  3. parsl/dataflow/memoization.py +9 -19
  4. parsl/executors/high_throughput/executor.py +76 -20
  5. parsl/executors/high_throughput/interchange.py +4 -3
  6. parsl/executors/high_throughput/mpi_executor.py +1 -2
  7. parsl/executors/high_throughput/mpi_resource_management.py +3 -4
  8. parsl/executors/high_throughput/process_worker_pool.py +25 -4
  9. parsl/executors/high_throughput/zmq_pipes.py +7 -24
  10. parsl/executors/taskvine/executor.py +5 -1
  11. parsl/tests/configs/taskvine_ex.py +1 -1
  12. parsl/tests/test_checkpointing/test_periodic.py +15 -9
  13. parsl/tests/test_checkpointing/test_regression_233.py +0 -1
  14. parsl/tests/test_htex/test_htex.py +36 -1
  15. parsl/tests/test_htex/test_interchange_exit_bad_registration.py +0 -1
  16. parsl/tests/test_mpi_apps/test_mpi_scheduler.py +12 -12
  17. parsl/tests/test_python_apps/test_garbage_collect.py +1 -6
  18. parsl/tests/test_python_apps/test_memoize_2.py +11 -1
  19. parsl/tests/test_regression/test_3874.py +47 -0
  20. parsl/version.py +1 -1
  21. {parsl-2025.9.22.data → parsl-2025.10.6.data}/scripts/interchange.py +4 -3
  22. {parsl-2025.9.22.data → parsl-2025.10.6.data}/scripts/process_worker_pool.py +25 -4
  23. {parsl-2025.9.22.dist-info → parsl-2025.10.6.dist-info}/METADATA +3 -4
  24. {parsl-2025.9.22.dist-info → parsl-2025.10.6.dist-info}/RECORD +30 -31
  25. parsl/tests/configs/local_threads_checkpoint_periodic.py +0 -11
  26. parsl/tests/configs/local_threads_no_cache.py +0 -11
  27. {parsl-2025.9.22.data → parsl-2025.10.6.data}/scripts/exec_parsl_function.py +0 -0
  28. {parsl-2025.9.22.data → parsl-2025.10.6.data}/scripts/parsl_coprocess.py +0 -0
  29. {parsl-2025.9.22.dist-info → parsl-2025.10.6.dist-info}/LICENSE +0 -0
  30. {parsl-2025.9.22.dist-info → parsl-2025.10.6.dist-info}/WHEEL +0 -0
  31. {parsl-2025.9.22.dist-info → parsl-2025.10.6.dist-info}/entry_points.txt +0 -0
  32. {parsl-2025.9.22.dist-info → parsl-2025.10.6.dist-info}/top_level.txt +0 -0
parsl/benchmark/perf.py CHANGED
@@ -2,32 +2,45 @@ import argparse
2
2
  import concurrent.futures
3
3
  import importlib
4
4
  import time
5
+ from typing import Any, Dict
5
6
 
6
7
  import parsl
8
+ from parsl.dataflow.dflow import DataFlowKernel
7
9
 
8
10
  min_iterations = 2
9
11
 
10
12
 
11
13
  # TODO: factor with conftest.py where this is copy/pasted from?
12
- def load_dfk_from_config(filename):
14
+ def load_dfk_from_config(filename: str) -> DataFlowKernel:
13
15
  spec = importlib.util.spec_from_file_location('', filename)
16
+
17
+ if spec is None:
18
+ raise RuntimeError("Could not import configuration")
19
+
20
+ module = importlib.util.module_from_spec(spec)
21
+
22
+ if spec.loader is None:
23
+ raise RuntimeError("Could not load configuration")
24
+
25
+ spec.loader.exec_module(module)
26
+
14
27
  module = importlib.util.module_from_spec(spec)
15
28
  spec.loader.exec_module(module)
16
29
 
17
30
  if hasattr(module, 'config'):
18
- parsl.load(module.config)
31
+ return parsl.load(module.config)
19
32
  elif hasattr(module, 'fresh_config'):
20
- parsl.load(module.fresh_config())
33
+ return parsl.load(module.fresh_config())
21
34
  else:
22
35
  raise RuntimeError("Config module does not define config or fresh_config")
23
36
 
24
37
 
25
38
  @parsl.python_app
26
- def app(extra_payload, parsl_resource_specification={}):
39
+ def app(extra_payload: Any, parsl_resource_specification: Dict = {}) -> int:
27
40
  return 7
28
41
 
29
42
 
30
- def performance(*, resources: dict, target_t: float, args_extra_size: int):
43
+ def performance(*, resources: dict, target_t: float, args_extra_size: int) -> None:
31
44
  n = 10
32
45
 
33
46
  delta_t: float
@@ -82,6 +95,7 @@ Example usage: python -m parsl.benchmark.perf --config parsl/tests/configs/workq
82
95
  parser.add_argument("--resources", metavar="EXPR", help="parsl_resource_specification dictionary")
83
96
  parser.add_argument("--time", metavar="SECONDS", help="target number of seconds for an iteration", default=120, type=float)
84
97
  parser.add_argument("--argsize", metavar="BYTES", help="extra bytes to add into app invocation arguments", default=0, type=int)
98
+ parser.add_argument("--version", action="version", version=f"parsl-perf from Parsl {parsl.__version__}")
85
99
 
86
100
  args = parser.parse_args()
87
101
 
@@ -90,10 +104,9 @@ Example usage: python -m parsl.benchmark.perf --config parsl/tests/configs/workq
90
104
  else:
91
105
  resources = {}
92
106
 
93
- load_dfk_from_config(args.config)
94
- performance(resources=resources, target_t=args.time, args_extra_size=args.argsize)
95
- print("Cleaning up DFK")
96
- parsl.dfk().cleanup()
107
+ with load_dfk_from_config(args.config):
108
+ performance(resources=resources, target_t=args.time, args_extra_size=args.argsize)
109
+ print("Tests complete - leaving DFK block")
97
110
  print("The end")
98
111
 
99
112
 
parsl/dataflow/dflow.py CHANGED
@@ -175,7 +175,7 @@ class DataFlowKernel:
175
175
  else:
176
176
  checkpoint_files = []
177
177
 
178
- self.memoizer = Memoizer(self, memoize=config.app_cache, checkpoint_files=checkpoint_files)
178
+ self.memoizer = Memoizer(memoize=config.app_cache, checkpoint_files=checkpoint_files)
179
179
  self.checkpointed_tasks = 0
180
180
  self._checkpoint_timer = None
181
181
  self.checkpoint_mode = config.checkpoint_mode
@@ -375,6 +375,7 @@ class DataFlowKernel:
375
375
  logger.info("Task {} failed due to dependency failure so skipping retries".format(task_id))
376
376
  task_record['time_returned'] = datetime.datetime.now()
377
377
  self._send_task_log_info(task_record)
378
+ self.memoizer.update_memo(task_record)
378
379
  with task_record['app_fu']._update_lock:
379
380
  task_record['app_fu'].set_exception(e)
380
381
 
@@ -400,6 +401,7 @@ class DataFlowKernel:
400
401
  self.update_task_state(task_record, States.failed)
401
402
  task_record['time_returned'] = datetime.datetime.now()
402
403
  self._send_task_log_info(task_record)
404
+ self.memoizer.update_memo(task_record)
403
405
  with task_record['app_fu']._update_lock:
404
406
  task_record['app_fu'].set_exception(e)
405
407
 
@@ -446,6 +448,7 @@ class DataFlowKernel:
446
448
  self.update_task_state(task_record, States.failed)
447
449
  task_record['time_returned'] = datetime.datetime.now()
448
450
  self._send_task_log_info(task_record)
451
+ self.memoizer.update_memo(task_record)
449
452
  with task_record['app_fu']._update_lock:
450
453
  task_record['app_fu'].set_exception(
451
454
  TypeError(f"join_app body must return a Future or list of Futures, got {joinable} of type {type(joinable)}"))
@@ -521,6 +524,7 @@ class DataFlowKernel:
521
524
 
522
525
  self.update_task_state(task_record, States.failed)
523
526
  task_record['time_returned'] = datetime.datetime.now()
527
+ self.memoizer.update_memo(task_record)
524
528
  with task_record['app_fu']._update_lock:
525
529
  task_record['app_fu'].set_exception(e)
526
530
 
@@ -561,8 +565,6 @@ class DataFlowKernel:
561
565
  if not task_record['app_fu'] == future:
562
566
  logger.error("Internal consistency error: callback future is not the app_fu in task structure, for task {}".format(task_id))
563
567
 
564
- self.memoizer.update_memo(task_record, future)
565
-
566
568
  # Cover all checkpointing cases here:
567
569
  # Do we need to checkpoint now, or queue for later,
568
570
  # or do nothing?
@@ -591,6 +593,7 @@ class DataFlowKernel:
591
593
  logger.info(f"Task {task_record['id']} completed ({old_state.name} -> {new_state.name})")
592
594
  task_record['time_returned'] = datetime.datetime.now()
593
595
 
596
+ self.memoizer.update_memo(task_record)
594
597
  with task_record['app_fu']._update_lock:
595
598
  task_record['app_fu'].set_result(result)
596
599
 
@@ -1202,7 +1205,6 @@ class DataFlowKernel:
1202
1205
 
1203
1206
  self.log_task_states()
1204
1207
 
1205
- # Checkpointing takes priority over the rest of the tasks
1206
1208
  # checkpoint if any valid checkpoint method is specified
1207
1209
  if self.checkpoint_mode is not None:
1208
1210
  self.checkpoint()
@@ -4,20 +4,16 @@ import hashlib
4
4
  import logging
5
5
  import os
6
6
  import pickle
7
+ import types
8
+ from concurrent.futures import Future
7
9
  from functools import lru_cache, singledispatch
8
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
10
+ from typing import Any, Dict, List, Optional, Sequence
9
11
 
10
12
  import typeguard
11
13
 
12
14
  from parsl.dataflow.errors import BadCheckpoint
13
15
  from parsl.dataflow.taskrecord import TaskRecord
14
16
 
15
- if TYPE_CHECKING:
16
- from parsl import DataFlowKernel # import loop at runtime - needed for typechecking - TODO turn into "if typing:"
17
-
18
- import types
19
- from concurrent.futures import Future
20
-
21
17
  logger = logging.getLogger(__name__)
22
18
 
23
19
 
@@ -150,17 +146,13 @@ class Memoizer:
150
146
 
151
147
  """
152
148
 
153
- def __init__(self, dfk: DataFlowKernel, *, memoize: bool = True, checkpoint_files: Sequence[str]):
149
+ def __init__(self, *, memoize: bool = True, checkpoint_files: Sequence[str]):
154
150
  """Initialize the memoizer.
155
151
 
156
- Args:
157
- - dfk (DFK obj): The DFK object
158
-
159
152
  KWargs:
160
153
  - memoize (Bool): enable memoization or not.
161
154
  - checkpoint (Dict): A checkpoint loaded as a dict.
162
155
  """
163
- self.dfk = dfk
164
156
  self.memoize = memoize
165
157
 
166
158
  checkpoint = self.load_checkpoints(checkpoint_files)
@@ -242,16 +234,14 @@ class Memoizer:
242
234
  assert isinstance(result, Future) or result is None
243
235
  return result
244
236
 
245
- def update_memo(self, task: TaskRecord, r: Future[Any]) -> None:
237
+ def update_memo(self, task: TaskRecord) -> None:
246
238
  """Updates the memoization lookup table with the result from a task.
239
+ This doesn't move any values around but associates the memoization
240
+ hashsum with the completed (by success or failure) AppFuture.
247
241
 
248
242
  Args:
249
- - task (dict) : A task dict from dfk.tasks
250
- - r (Result future): Result future
243
+ - task (TaskRecord) : A task record from dfk.tasks
251
244
  """
252
- # TODO: could use typeguard
253
- assert isinstance(r, Future)
254
-
255
245
  task_id = task['id']
256
246
 
257
247
  if not self.memoize or not task['memoize'] or 'hashsum' not in task:
@@ -265,7 +255,7 @@ class Memoizer:
265
255
  logger.info(f"Replacing app cache entry {task['hashsum']} with result from task {task_id}")
266
256
  else:
267
257
  logger.debug(f"Storing app cache entry {task['hashsum']} with result from task {task_id}")
268
- self.memo_lookup_table[task['hashsum']] = r
258
+ self.memo_lookup_table[task['hashsum']] = task['app_fu']
269
259
 
270
260
  def _load_checkpoints(self, checkpointDirs: Sequence[str]) -> Dict[str, Future[Any]]:
271
261
  """Load a checkpoint file into a lookup table.
@@ -160,6 +160,12 @@ GENERAL_HTEX_PARAM_DOCS = """provider : :class:`~parsl.providers.base.ExecutionP
160
160
  """ # Documentation for params used by both HTEx and MPIEx
161
161
 
162
162
 
163
+ class HTEXFuture(Future):
164
+ def __init__(self, task_id) -> None:
165
+ super().__init__()
166
+ self.parsl_executor_task_id = task_id
167
+
168
+
163
169
  class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageInformation):
164
170
  __doc__ = f"""Executor designed for cluster-scale
165
171
 
@@ -237,7 +243,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
237
243
  @typeguard.typechecked
238
244
  def __init__(self,
239
245
  label: str = 'HighThroughputExecutor',
240
- provider: ExecutionProvider = LocalProvider(),
246
+ provider: Optional[ExecutionProvider] = None,
241
247
  launch_cmd: Optional[str] = None,
242
248
  interchange_launch_cmd: Optional[Sequence[str]] = None,
243
249
  address: Optional[str] = None,
@@ -267,7 +273,9 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
267
273
 
268
274
  logger.debug("Initializing HighThroughputExecutor")
269
275
 
270
- BlockProviderExecutor.__init__(self, provider=provider, block_error_handler=block_error_handler)
276
+ BlockProviderExecutor.__init__(self,
277
+ provider=provider if provider else LocalProvider(),
278
+ block_error_handler=block_error_handler)
271
279
  self.label = label
272
280
  self.worker_debug = worker_debug
273
281
  self.storage_access = storage_access
@@ -501,10 +509,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
501
509
  else:
502
510
 
503
511
  for serialized_msg in msgs:
504
- try:
505
- msg = pickle.loads(serialized_msg)
506
- except pickle.UnpicklingError:
507
- raise BadMessage("Message received could not be unpickled")
512
+ msg = pickle.loads(serialized_msg)
508
513
 
509
514
  if msg['type'] == 'result':
510
515
  try:
@@ -671,7 +676,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
671
676
  logger.debug("Sending hold to manager: {}".format(manager['manager']))
672
677
  self._hold_manager(manager['manager'])
673
678
 
674
- def submit(self, func, resource_specification, *args, **kwargs):
679
+ def submit(self, func: Callable, resource_specification: dict, *args, **kwargs) -> HTEXFuture:
675
680
  """Submits work to the outgoing_q.
676
681
 
677
682
  The outgoing_q is an external process listens on this
@@ -692,32 +697,83 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
692
697
 
693
698
  self.validate_resource_spec(resource_specification)
694
699
 
695
- if self.bad_state_is_set:
696
- raise self.executor_exception
697
-
698
- self._task_counter += 1
699
- task_id = self._task_counter
700
-
701
700
  # handle people sending blobs gracefully
702
701
  if logger.getEffectiveLevel() <= logging.DEBUG:
703
702
  args_to_print = tuple([ar if len(ar := repr(arg)) < 100 else (ar[:100] + '...') for arg in args])
704
703
  logger.debug("Pushing function {} to queue with args {}".format(func, args_to_print))
705
704
 
706
- fut = Future()
707
- fut.parsl_executor_task_id = task_id
708
- self.tasks[task_id] = fut
709
-
710
705
  try:
711
706
  fn_buf = pack_apply_message(func, args, kwargs, buffer_threshold=1 << 20)
712
707
  except TypeError:
713
708
  raise SerializationError(func.__name__)
714
709
 
715
- msg = {"task_id": task_id, "resource_spec": resource_specification, "buffer": fn_buf}
710
+ context = {}
711
+ if resource_specification:
712
+ context["resource_spec"] = resource_specification
713
+
714
+ return self.submit_payload(context, fn_buf)
715
+
716
+ def submit_payload(self, context: dict, buffer: bytes) -> HTEXFuture:
717
+ """
718
+ Submit specially crafted payloads.
719
+
720
+ For use-cases where the ``HighThroughputExecutor`` consumer needs the payload
721
+ handled by the worker in a special way. For example, if the function is
722
+ serialized differently than Parsl's default approach, or if the task must
723
+ be setup more precisely than Parsl's default ``execute_task`` allows.
724
+
725
+ An example interaction:
726
+
727
+ .. code-block: python
728
+
729
+ >>> htex: HighThroughputExecutor # setup prior to this example
730
+ >>> ctxt = {
731
+ ... "task_executor": {
732
+ ... "f": "full.import.path.of.custom_execute_task",
733
+ ... "a": ("additional", "arguments"),
734
+ ... "k": {"some": "keyword", "args": "here"}
735
+ ... }
736
+ ... }
737
+ >>> fn_buf = custom_serialize(task_func, *task_args, **task_kwargs)
738
+ >>> fut = htex.submit_payload(ctxt, fn_buf)
739
+
740
+ The custom ``custom_execute_task`` would be dynamically imported, and
741
+ invoked as:
742
+
743
+ .. code-block: python
744
+
745
+ args = ("additional", "arguments")
746
+ kwargs = {"some": "keyword", "args": "here"}
747
+ result = custom_execute_task(fn_buf, *args, **kwargs)
748
+
749
+ Parameters
750
+ ----------
751
+ context:
752
+ A task-specific context associated with the function buffer. Parsl
753
+ currently implements the keys ``task_executor`` and ``resource_spec``
754
+
755
+ buffer:
756
+ A serialized function, that will be deserialized and executed by
757
+ ``execute_task`` (or custom function, if ``task_executor`` is specified)
758
+
759
+ Returns
760
+ -------
761
+ An HTEXFuture (a normal Future, with the attribute ``.parsl_executor_task_id``
762
+ set). The future will be set to done when the associated function buffer has
763
+ been invoked and completed.
764
+ """
765
+ if self.bad_state_is_set:
766
+ raise self.executor_exception
767
+
768
+ self._task_counter += 1
769
+ task_id = self._task_counter
770
+
771
+ fut = HTEXFuture(task_id)
772
+ self.tasks[task_id] = fut
716
773
 
717
- # Post task to the outgoing queue
774
+ msg = {"task_id": task_id, "context": context, "buffer": buffer}
718
775
  self.outgoing_q.put(msg)
719
776
 
720
- # Return the future
721
777
  return fut
722
778
 
723
779
  @property
@@ -332,7 +332,7 @@ class Interchange:
332
332
  msg = self.task_incoming.recv_pyobj()
333
333
 
334
334
  # Process priority, higher number = lower priority
335
- resource_spec = msg.get('resource_spec', {})
335
+ resource_spec = msg['context'].get('resource_spec', {})
336
336
  priority = resource_spec.get('priority', float('inf'))
337
337
  queue_entry = (-priority, -self.task_counter, msg)
338
338
 
@@ -360,9 +360,10 @@ class Interchange:
360
360
  mtype = meta['type']
361
361
  except Exception as e:
362
362
  logger.warning(
363
- f'Failed to read manager message ([{type(e).__name__}] {e})'
363
+ 'Failed to read manager message; ignoring message'
364
+ f' (Exception: [{type(e).__name__}] {e})'
364
365
  )
365
- logger.debug('Message:\n %r\n', msg_parts, exc_info=e)
366
+ logger.debug('Raw message bytes:\n %r\n', msg_parts, exc_info=e)
366
367
  return
367
368
 
368
369
  logger.debug(
@@ -16,7 +16,6 @@ from parsl.executors.status_handling import BlockProviderExecutor
16
16
  from parsl.jobs.states import JobStatus
17
17
  from parsl.launchers import SimpleLauncher
18
18
  from parsl.monitoring.radios.base import RadioConfig
19
- from parsl.providers import LocalProvider
20
19
  from parsl.providers.base import ExecutionProvider
21
20
 
22
21
 
@@ -47,7 +46,7 @@ class MPIExecutor(HighThroughputExecutor):
47
46
  @typeguard.typechecked
48
47
  def __init__(self,
49
48
  label: str = 'MPIExecutor',
50
- provider: ExecutionProvider = LocalProvider(),
49
+ provider: Optional[ExecutionProvider] = None,
51
50
  launch_cmd: Optional[str] = None,
52
51
  interchange_launch_cmd: Optional[str] = None,
53
52
  address: Optional[str] = None,
@@ -166,11 +166,10 @@ class MPITaskScheduler(TaskScheduler):
166
166
 
167
167
  def put_task(self, task_package: dict):
168
168
  """Schedule task if resources are available otherwise backlog the task"""
169
- resource_spec = task_package.get("resource_spec", {})
169
+ resource_spec = task_package.get("context", {}).get("resource_spec", {})
170
170
 
171
- nodes_needed = resource_spec.get("num_nodes")
172
- tid = task_package["task_id"]
173
- if nodes_needed:
171
+ if nodes_needed := resource_spec.get("num_nodes"):
172
+ tid = task_package["task_id"]
174
173
  try:
175
174
  allocated_nodes = self._get_nodes(nodes_needed)
176
175
  except MPINodesUnavailable:
@@ -1,6 +1,7 @@
1
1
  #!/usr/bin/env python3
2
2
 
3
3
  import argparse
4
+ import importlib
4
5
  import logging
5
6
  import math
6
7
  import multiprocessing
@@ -17,7 +18,7 @@ from importlib.metadata import distributions
17
18
  from multiprocessing.context import SpawnProcess
18
19
  from multiprocessing.managers import DictProxy
19
20
  from multiprocessing.sharedctypes import Synchronized
20
- from typing import Dict, List, Optional, Sequence
21
+ from typing import Callable, Dict, List, Optional, Sequence
21
22
 
22
23
  import psutil
23
24
  import zmq
@@ -373,6 +374,8 @@ class Manager:
373
374
  if socks.get(ix_sock) == zmq.POLLIN:
374
375
  pkl_msg = ix_sock.recv()
375
376
  tasks = pickle.loads(pkl_msg)
377
+ del pkl_msg
378
+
376
379
  last_interchange_contact = time.time()
377
380
 
378
381
  if tasks == HEARTBEAT_CODE:
@@ -454,6 +457,7 @@ class Manager:
454
457
  'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))}
455
458
  pkl_package = pickle.dumps(result_package)
456
459
  self.pending_result_queue.put(pkl_package)
460
+ del pkl_package
457
461
  except KeyError:
458
462
  logger.info("Worker {} was not busy when it died".format(worker_id))
459
463
 
@@ -770,17 +774,33 @@ def worker(
770
774
  ready_worker_count.value -= 1
771
775
  worker_enqueued = False
772
776
 
773
- _init_mpi_env(mpi_launcher=mpi_launcher, resource_spec=req["resource_spec"])
777
+ ctxt = req["context"]
778
+ res_spec = ctxt.get("resource_spec", {})
779
+
780
+ _init_mpi_env(mpi_launcher=mpi_launcher, resource_spec=res_spec)
781
+
782
+ exec_func: Callable = execute_task
783
+ exec_args = ()
784
+ exec_kwargs = {}
774
785
 
775
786
  try:
776
- result = execute_task(req['buffer'])
787
+ if task_executor := ctxt.get("task_executor", None):
788
+ mod_name, _, fn_name = task_executor["f"].rpartition(".")
789
+ exec_mod = importlib.import_module(mod_name)
790
+ exec_func = getattr(exec_mod, fn_name)
791
+
792
+ exec_args = task_executor.get("a", ())
793
+ exec_kwargs = task_executor.get("k", {})
794
+
795
+ result = exec_func(req['buffer'], *exec_args, **exec_kwargs)
777
796
  serialized_result = serialize(result, buffer_threshold=1000000)
778
797
  except Exception as e:
779
798
  logger.info('Caught an exception: {}'.format(e))
780
799
  result_package = {'type': 'result', 'task_id': tid, 'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))}
781
800
  else:
782
801
  result_package = {'type': 'result', 'task_id': tid, 'result': serialized_result}
783
- # logger.debug("Result: {}".format(result))
802
+ del serialized_result
803
+ del req
784
804
 
785
805
  logger.info("Completed executor task {}".format(tid))
786
806
  try:
@@ -792,6 +812,7 @@ def worker(
792
812
  })
793
813
 
794
814
  result_queue.put(pkl_package)
815
+ del pkl_package, result_package
795
816
  tasks_in_progress.pop(worker_id)
796
817
  logger.info("All processing finished for executor task {}".format(tid))
797
818
 
@@ -136,30 +136,18 @@ class TasksOutgoing:
136
136
  self.port = self.zmq_socket.bind_to_random_port(tcp_url(ip_address),
137
137
  min_port=port_range[0],
138
138
  max_port=port_range[1])
139
- self.poller = zmq.Poller()
140
- self.poller.register(self.zmq_socket, zmq.POLLOUT)
141
139
 
142
140
  def put(self, message):
143
141
  """ This function needs to be fast at the same time aware of the possibility of
144
142
  ZMQ pipes overflowing.
145
143
 
146
- The timeout increases slowly if contention is detected on ZMQ pipes.
147
144
  We could set copy=False and get slightly better latency but this results
148
145
  in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.
149
146
  This issue can be magnified if each the serialized buffer itself is larger.
150
147
  """
151
- timeout_ms = 1
152
- while True:
153
- socks = dict(self.poller.poll(timeout=timeout_ms))
154
- if self.zmq_socket in socks and socks[self.zmq_socket] == zmq.POLLOUT:
155
- # The copy option adds latency but reduces the risk of ZMQ overflow
156
- logger.debug("Sending TasksOutgoing message")
157
- self.zmq_socket.send_pyobj(message, copy=True)
158
- logger.debug("Sent TasksOutgoing message")
159
- return
160
- else:
161
- timeout_ms *= 2
162
- logger.debug("Not sending due to non-ready zmq pipe, timeout: {} ms".format(timeout_ms))
148
+ logger.debug("Sending TasksOutgoing message")
149
+ self.zmq_socket.send_pyobj(message)
150
+ logger.debug("Sent TasksOutgoing message")
163
151
 
164
152
  def close(self):
165
153
  self.zmq_socket.close()
@@ -192,20 +180,15 @@ class ResultsIncoming:
192
180
  self.port = self.results_receiver.bind_to_random_port(tcp_url(ip_address),
193
181
  min_port=port_range[0],
194
182
  max_port=port_range[1])
195
- self.poller = zmq.Poller()
196
- self.poller.register(self.results_receiver, zmq.POLLIN)
197
183
 
198
184
  def get(self, timeout_ms=None):
199
185
  """Get a message from the queue, returning None if timeout expires
200
186
  without a message. timeout is measured in milliseconds.
201
187
  """
202
- socks = dict(self.poller.poll(timeout=timeout_ms))
203
- if self.results_receiver in socks and socks[self.results_receiver] == zmq.POLLIN:
204
- m = self.results_receiver.recv_multipart()
205
- logger.debug("Received ResultsIncoming message")
206
- return m
207
- else:
208
- return None
188
+ if zmq.POLLIN == self.results_receiver.poll(timeout_ms, zmq.POLLIN):
189
+ logger.debug("Receiving ResultsIncoming multipart message")
190
+ return self.results_receiver.recv_multipart()
191
+ return None
209
192
 
210
193
  def close(self):
211
194
  self.results_receiver.close()
@@ -107,13 +107,17 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
107
107
  function_exec_mode: Union[Literal['regular'], Literal['serverless']] = 'regular',
108
108
  manager_config: TaskVineManagerConfig = TaskVineManagerConfig(),
109
109
  factory_config: TaskVineFactoryConfig = TaskVineFactoryConfig(),
110
- provider: Optional[ExecutionProvider] = LocalProvider(init_blocks=1),
110
+ provider: Optional[ExecutionProvider] = None,
111
111
  storage_access: Optional[List[Staging]] = None,
112
112
  remote_monitoring_radio: Optional[RadioConfig] = None):
113
113
 
114
114
  # Set worker launch option for this executor
115
115
  if worker_launch_method == 'factory' or worker_launch_method == 'manual':
116
116
  provider = None
117
+ elif worker_launch_method == 'provider' and provider is None:
118
+ # provider method chosen, but no explicit provider supplied to __init__
119
+ # so default to LocalProvider
120
+ provider = LocalProvider(init_blocks=1)
117
121
 
118
122
  # Initialize the parent class with the execution provider and block error handling enabled.
119
123
  # If provider is None, then no worker is launched via the provider method.
@@ -7,4 +7,4 @@ from parsl.executors.taskvine import TaskVineExecutor, TaskVineManagerConfig
7
7
 
8
8
  def fresh_config():
9
9
  return Config(executors=[TaskVineExecutor(manager_config=TaskVineManagerConfig(port=9000),
10
- worker_launch_method='factory')])
10
+ worker_launch_method='provider')])
@@ -2,11 +2,17 @@ import pytest
2
2
 
3
3
  import parsl
4
4
  from parsl.app.app import python_app
5
- from parsl.tests.configs.local_threads_checkpoint_periodic import fresh_config
5
+ from parsl.config import Config
6
+ from parsl.executors.threads import ThreadPoolExecutor
6
7
 
7
8
 
8
- def local_setup():
9
- parsl.load(fresh_config())
9
+ def fresh_config():
10
+ tpe = ThreadPoolExecutor(label='local_threads_checkpoint_periodic', max_threads=1)
11
+ return Config(
12
+ executors=[tpe],
13
+ checkpoint_mode='periodic',
14
+ checkpoint_period='00:00:02'
15
+ )
10
16
 
11
17
 
12
18
  @python_app(cache=True)
@@ -25,12 +31,12 @@ def tstamp_to_seconds(line):
25
31
  def test_periodic():
26
32
  """Test checkpointing with task_periodic behavior
27
33
  """
28
- h, m, s = map(int, parsl.dfk().config.checkpoint_period.split(":"))
29
- assert h == 0, "Verify test setup"
30
- assert m == 0, "Verify test setup"
31
- assert s > 0, "Verify test setup"
32
- sleep_for = s + 1
33
- with parsl.dfk():
34
+ with parsl.load(fresh_config()):
35
+ h, m, s = map(int, parsl.dfk().config.checkpoint_period.split(":"))
36
+ assert h == 0, "Verify test setup"
37
+ assert m == 0, "Verify test setup"
38
+ assert s > 0, "Verify test setup"
39
+ sleep_for = s + 1
34
40
  futs = [slow_double(sleep_for) for _ in range(4)]
35
41
  [f.result() for f in futs]
36
42
  run_dir = parsl.dfk().run_dir
@@ -5,7 +5,6 @@ from parsl.dataflow.dflow import DataFlowKernel
5
5
 
6
6
 
7
7
  def run_checkpointed(checkpoints):
8
- # set_stream_logger()
9
8
  from parsl.tests.configs.local_threads_checkpoint_task_exit import config
10
9
  config.checkpoint_files = checkpoints
11
10
  dfk = DataFlowKernel(config=config)
@@ -7,6 +7,7 @@ from unittest import mock
7
7
  import pytest
8
8
 
9
9
  from parsl import HighThroughputExecutor, curvezmq
10
+ from parsl.serialize.facade import pack_apply_message, unpack_apply_message
10
11
 
11
12
  _MOCK_BASE = "parsl.executors.high_throughput.executor"
12
13
 
@@ -19,11 +20,16 @@ def encrypted(request: pytest.FixtureRequest):
19
20
 
20
21
 
21
22
  @pytest.fixture
22
- def htex(encrypted: bool):
23
+ def htex(encrypted: bool, tmpd_cwd):
23
24
  htex = HighThroughputExecutor(encrypted=encrypted)
25
+ htex.max_workers_per_node = 1
26
+ htex.run_dir = tmpd_cwd
27
+ htex.provider.script_dir = tmpd_cwd
24
28
 
25
29
  yield htex
26
30
 
31
+ if hasattr(htex, "outgoing_q"):
32
+ htex.scale_in(blocks=1000)
27
33
  htex.shutdown()
28
34
 
29
35
 
@@ -146,3 +152,32 @@ def test_htex_interchange_launch_cmd(cmd: Optional[Sequence[str]]):
146
152
  else:
147
153
  htex = HighThroughputExecutor()
148
154
  assert htex.interchange_launch_cmd == ["interchange.py"]
155
+
156
+
157
+ def dyn_exec(buf, *vec_y):
158
+ f, a, _ = unpack_apply_message(buf)
159
+ custom_args = [a, vec_y]
160
+ return f(*custom_args)
161
+
162
+
163
+ @pytest.mark.local
164
+ def test_worker_dynamic_import(htex: HighThroughputExecutor):
165
+ def _dot_prod(vec_x, vec_y):
166
+ return sum(x * y for x, y in zip(vec_x, vec_y))
167
+
168
+ htex.start()
169
+ htex.scale_out_facade(1)
170
+
171
+ num_array = tuple(range(10))
172
+
173
+ fn_buf = pack_apply_message(_dot_prod, num_array, {})
174
+ ctxt = {
175
+ "task_executor": {
176
+ "f": f"{dyn_exec.__module__}.{dyn_exec.__name__}",
177
+ "a": num_array, # prove "custom" dyn_exec
178
+ }
179
+ }
180
+ val = htex.submit_payload(ctxt, fn_buf).result()
181
+ exp_val = _dot_prod(num_array, num_array)
182
+
183
+ assert val == exp_val
@@ -1,4 +1,3 @@
1
- import json
2
1
  import logging
3
2
  import os
4
3
  import pickle
@@ -43,8 +43,8 @@ def test_MPISched_put_task():
43
43
  assert len(scheduler.available_nodes) == 8
44
44
  assert scheduler._free_node_counter.value == 8
45
45
 
46
- res_spec = {"num_nodes": 2, "ranks_per_node": 2}
47
- task_package = {"task_id": 1, "buffer": mock_task_buffer, "resource_spec": res_spec}
46
+ ctxt = {"resource_spec": {"num_nodes": 2, "ranks_per_node": 2}}
47
+ task_package = {"task_id": 1, "buffer": mock_task_buffer, "context": ctxt}
48
48
  scheduler.put_task(task_package)
49
49
 
50
50
  assert scheduler._free_node_counter.value == 6
@@ -82,8 +82,8 @@ def test_MPISched_roundtrip():
82
82
  for trip in range(1, 9):
83
83
  assert scheduler._free_node_counter.value == 8
84
84
 
85
- res_spec = {"num_nodes": trip, "ranks_per_node": 2}
86
- task_package = {"task_id": trip, "buffer": mock_task_buffer, "resource_spec": res_spec}
85
+ ctxt = {"resource_spec": {"num_nodes": trip, "ranks_per_node": 2}}
86
+ task_package = {"task_id": trip, "buffer": mock_task_buffer, "context": ctxt}
87
87
  scheduler.put_task(task_package)
88
88
 
89
89
  assert scheduler._free_node_counter.value == 8 - trip
@@ -107,15 +107,15 @@ def test_MPISched_contention():
107
107
 
108
108
  assert scheduler._free_node_counter.value == 8
109
109
 
110
- rspec_1 = {"num_nodes": 8, "ranks_per_node": 2}
111
- task_package = {"task_id": 1, "buffer": mock_task_buffer, "resource_spec": rspec_1}
110
+ ctxt_1 = {"resource_spec": {"num_nodes": 8, "ranks_per_node": 2}}
111
+ task_package = {"task_id": 1, "buffer": mock_task_buffer, "context": ctxt_1}
112
112
  scheduler.put_task(task_package)
113
113
 
114
114
  assert scheduler._free_node_counter.value == 0
115
115
  assert scheduler._backlog_queue.empty()
116
116
 
117
- rspec_2 = {"num_nodes": 8, "ranks_per_node": 2}
118
- task_package = {"task_id": 2, "buffer": mock_task_buffer, "resource_spec": rspec_2}
117
+ ctxt_2 = {"resource_spec": {"num_nodes": 8, "ranks_per_node": 2}}
118
+ task_package = {"task_id": 2, "buffer": mock_task_buffer, "context": ctxt_2}
119
119
  scheduler.put_task(task_package)
120
120
 
121
121
  # Second task should now be in the backlog_queue
@@ -124,7 +124,7 @@ def test_MPISched_contention():
124
124
  # Confirm that the first task is available and has all 8 nodes provisioned
125
125
  task_on_worker_side = task_q.get()
126
126
  assert task_on_worker_side['task_id'] == 1
127
- assert len(rspec_1["MPI_NODELIST"].split(",")) == 8
127
+ assert len(ctxt_1["resource_spec"]["MPI_NODELIST"].split(",")) == 8
128
128
  assert task_q.empty() # Confirm that task 2 is not yet scheduled
129
129
 
130
130
  # Simulate worker returning result and the scheduler picking up result
@@ -139,7 +139,7 @@ def test_MPISched_contention():
139
139
  # Pop in a mock result
140
140
  task_on_worker_side = task_q.get()
141
141
  assert task_on_worker_side['task_id'] == 2
142
- assert len(rspec_2["MPI_NODELIST"].split(",")) == 8
142
+ assert len(ctxt_2["resource_spec"]["MPI_NODELIST"].split(",")) == 8
143
143
 
144
144
 
145
145
  @pytest.mark.local
@@ -157,7 +157,7 @@ def test_hashable_backlog_queue():
157
157
  assert scheduler._free_node_counter.value == 8
158
158
 
159
159
  for i in range(3):
160
- res_spec = {"num_nodes": 8, "ranks_per_node": 2}
161
- task_package = {"task_id": i, "buffer": mock_task_buffer, "resource_spec": res_spec}
160
+ ctxt = {"resource_spec": {"num_nodes": 8, "ranks_per_node": 2}}
161
+ task_package = {"task_id": i, "buffer": mock_task_buffer, "context": ctxt}
162
162
  scheduler.put_task(task_package)
163
163
  assert scheduler._backlog_queue.qsize() == 2, "Expected 2 backlogged tasks"
@@ -27,10 +27,5 @@ def test_garbage_collect():
27
27
 
28
28
  evt.set()
29
29
  assert x.result() == 10 * 4
30
- if parsl.dfk().checkpoint_mode is not None:
31
- # We explicit call checkpoint if checkpoint_mode is enabled covering
32
- # cases like manual/periodic where checkpointing may be deferred.
33
- parsl.dfk().checkpoint()
34
-
35
- time.sleep(0.01) # Give enough time for task wipes to work
30
+ time.sleep(0.01) # Give enough time for task wipes to work - see issue #1279
36
31
  assert x.tid not in parsl.dfk().tasks, "Task record should be wiped after task completion"
@@ -4,7 +4,17 @@ import pytest
4
4
 
5
5
  import parsl
6
6
  from parsl.app.app import python_app
7
- from parsl.tests.configs.local_threads_no_cache import fresh_config as local_config
7
+ from parsl.config import Config
8
+ from parsl.executors.threads import ThreadPoolExecutor
9
+
10
+
11
+ def local_config():
12
+ return Config(
13
+ executors=[
14
+ ThreadPoolExecutor(max_threads=4),
15
+ ],
16
+ app_cache=False
17
+ )
8
18
 
9
19
 
10
20
  @python_app
@@ -0,0 +1,47 @@
1
+ import shutil
2
+
3
+ import pytest
4
+
5
+ import parsl
6
+ from parsl.app.app import python_app
7
+ from parsl.config import Config
8
+ from parsl.executors import HighThroughputExecutor
9
+
10
+
11
+ @python_app
12
+ def noop():
13
+ pass
14
+
15
+
16
+ @pytest.mark.local
17
+ def test_regression_3874(tmpd_cwd_session):
18
+ # HTEX run 1
19
+
20
+ rundir_1 = str(tmpd_cwd_session / "1")
21
+
22
+ config = Config(executors=[HighThroughputExecutor()], strategy_period=0.5)
23
+ config.run_dir = rundir_1
24
+
25
+ with parsl.load(config):
26
+ noop().result()
27
+
28
+ # It is necessary to delete this rundir to exercise the bug. Otherwise,
29
+ # the next run will be able to continue looking at this directory - the
30
+ # bug manifests when it cannot.
31
+
32
+ shutil.rmtree(rundir_1)
33
+
34
+ # HTEX run 2
35
+ # In the case of issue 3874, this run hangs (rather than failing) as the
36
+ # JobStatusPoller fails to collect status of all of its managed tasks
37
+ # every iteration, without converging towards failure.
38
+
39
+ rundir_2 = str(tmpd_cwd_session / "2")
40
+
41
+ config = Config(executors=[HighThroughputExecutor()], strategy_period=0.5)
42
+ config.run_dir = rundir_2
43
+
44
+ with parsl.load(config):
45
+ noop().result()
46
+
47
+ shutil.rmtree(rundir_2)
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2025.09.22'
6
+ VERSION = '2025.10.06'
@@ -332,7 +332,7 @@ class Interchange:
332
332
  msg = self.task_incoming.recv_pyobj()
333
333
 
334
334
  # Process priority, higher number = lower priority
335
- resource_spec = msg.get('resource_spec', {})
335
+ resource_spec = msg['context'].get('resource_spec', {})
336
336
  priority = resource_spec.get('priority', float('inf'))
337
337
  queue_entry = (-priority, -self.task_counter, msg)
338
338
 
@@ -360,9 +360,10 @@ class Interchange:
360
360
  mtype = meta['type']
361
361
  except Exception as e:
362
362
  logger.warning(
363
- f'Failed to read manager message ([{type(e).__name__}] {e})'
363
+ 'Failed to read manager message; ignoring message'
364
+ f' (Exception: [{type(e).__name__}] {e})'
364
365
  )
365
- logger.debug('Message:\n %r\n', msg_parts, exc_info=e)
366
+ logger.debug('Raw message bytes:\n %r\n', msg_parts, exc_info=e)
366
367
  return
367
368
 
368
369
  logger.debug(
@@ -1,6 +1,7 @@
1
1
  #!python
2
2
 
3
3
  import argparse
4
+ import importlib
4
5
  import logging
5
6
  import math
6
7
  import multiprocessing
@@ -17,7 +18,7 @@ from importlib.metadata import distributions
17
18
  from multiprocessing.context import SpawnProcess
18
19
  from multiprocessing.managers import DictProxy
19
20
  from multiprocessing.sharedctypes import Synchronized
20
- from typing import Dict, List, Optional, Sequence
21
+ from typing import Callable, Dict, List, Optional, Sequence
21
22
 
22
23
  import psutil
23
24
  import zmq
@@ -373,6 +374,8 @@ class Manager:
373
374
  if socks.get(ix_sock) == zmq.POLLIN:
374
375
  pkl_msg = ix_sock.recv()
375
376
  tasks = pickle.loads(pkl_msg)
377
+ del pkl_msg
378
+
376
379
  last_interchange_contact = time.time()
377
380
 
378
381
  if tasks == HEARTBEAT_CODE:
@@ -454,6 +457,7 @@ class Manager:
454
457
  'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))}
455
458
  pkl_package = pickle.dumps(result_package)
456
459
  self.pending_result_queue.put(pkl_package)
460
+ del pkl_package
457
461
  except KeyError:
458
462
  logger.info("Worker {} was not busy when it died".format(worker_id))
459
463
 
@@ -770,17 +774,33 @@ def worker(
770
774
  ready_worker_count.value -= 1
771
775
  worker_enqueued = False
772
776
 
773
- _init_mpi_env(mpi_launcher=mpi_launcher, resource_spec=req["resource_spec"])
777
+ ctxt = req["context"]
778
+ res_spec = ctxt.get("resource_spec", {})
779
+
780
+ _init_mpi_env(mpi_launcher=mpi_launcher, resource_spec=res_spec)
781
+
782
+ exec_func: Callable = execute_task
783
+ exec_args = ()
784
+ exec_kwargs = {}
774
785
 
775
786
  try:
776
- result = execute_task(req['buffer'])
787
+ if task_executor := ctxt.get("task_executor", None):
788
+ mod_name, _, fn_name = task_executor["f"].rpartition(".")
789
+ exec_mod = importlib.import_module(mod_name)
790
+ exec_func = getattr(exec_mod, fn_name)
791
+
792
+ exec_args = task_executor.get("a", ())
793
+ exec_kwargs = task_executor.get("k", {})
794
+
795
+ result = exec_func(req['buffer'], *exec_args, **exec_kwargs)
777
796
  serialized_result = serialize(result, buffer_threshold=1000000)
778
797
  except Exception as e:
779
798
  logger.info('Caught an exception: {}'.format(e))
780
799
  result_package = {'type': 'result', 'task_id': tid, 'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))}
781
800
  else:
782
801
  result_package = {'type': 'result', 'task_id': tid, 'result': serialized_result}
783
- # logger.debug("Result: {}".format(result))
802
+ del serialized_result
803
+ del req
784
804
 
785
805
  logger.info("Completed executor task {}".format(tid))
786
806
  try:
@@ -792,6 +812,7 @@ def worker(
792
812
  })
793
813
 
794
814
  result_queue.put(pkl_package)
815
+ del pkl_package, result_package
795
816
  tasks_in_progress.pop(worker_id)
796
817
  logger.info("All processing finished for executor task {}".format(tid))
797
818
 
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2025.9.22
3
+ Version: 2025.10.6
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2025.09.22.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2025.10.06.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -11,11 +11,10 @@ Keywords: Workflows,Scientific computing
11
11
  Classifier: Development Status :: 5 - Production/Stable
12
12
  Classifier: Intended Audience :: Developers
13
13
  Classifier: License :: OSI Approved :: Apache Software License
14
- Classifier: Programming Language :: Python :: 3.9
15
14
  Classifier: Programming Language :: Python :: 3.10
16
15
  Classifier: Programming Language :: Python :: 3.11
17
16
  Classifier: Programming Language :: Python :: 3.12
18
- Requires-Python: >=3.9.0
17
+ Requires-Python: >=3.10.0
19
18
  License-File: LICENSE
20
19
  Requires-Dist: pyzmq>=17.1.2
21
20
  Requires-Dist: typeguard!=3.*,<5,>=2.10
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=xqieTLko3DrHykCqqSHQszMwd8ORYllrgz6Qc_PsHCE,2112
8
8
  parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
9
9
  parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  parsl/utils.py,sha256=smVYTusMoYUTD5N9OxTW5bh6o2iioh0NnfjrBAj8zYk,14452
11
- parsl/version.py,sha256=1FmTAxb_oLz19QvMrCDIghwk5d3IAI0plHgiPluzCgI,131
11
+ parsl/version.py,sha256=f99TcEFVCtxnPE3Zu7czlSIUkdUi5SaPzVKQ0u864mA,131
12
12
  parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
14
14
  parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
@@ -16,7 +16,7 @@ parsl/app/errors.py,sha256=SQQ1fNp8834DZnoRnlsoZn1WMAFM3fnh2CNHRPmFcKc,3854
16
16
  parsl/app/futures.py,sha256=2tMUeKIuDzwuhLIWlsEiZuDrhkxxsUed4QUbQuQg20Y,2826
17
17
  parsl/app/python.py,sha256=0hrz2BppVOwwNfh5hnoP70Yv56gSRkIoT-fP9XNb4v4,2331
18
18
  parsl/benchmark/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
- parsl/benchmark/perf.py,sha256=kKXefDozWXSJKSNA7qdfUgEoacA2-R9kSZcI2YvZ5uE,3096
19
+ parsl/benchmark/perf.py,sha256=DRTNSA_-Qq01GfMl6MEWtwENEu1aReHC23SX6f0HuXk,3615
20
20
  parsl/concurrent/__init__.py,sha256=TvIVceJYaJAsxedNBF3Vdo9lEQNHH_j3uxJv0zUjP7w,3288
21
21
  parsl/configs/ASPIRE1.py,sha256=nQm6BvCPE07YXEsC94wMrHeVAyYcyfvPgWyHIysjAoA,1690
22
22
  parsl/configs/Azure.py,sha256=CJms3xWmdb-S3CksbHrPF2TfMxJC5I0faqUKCOzVg0k,1268
@@ -55,10 +55,10 @@ parsl/data_provider/staging.py,sha256=ZDZuuFg38pjUStegKPcvPsfGp3iMeReMzfU6DSwtJj
55
55
  parsl/data_provider/zip.py,sha256=S4kVuH9lxAegRURYbvIUR7EYYBOccyslaqyCrVWUBhw,4497
56
56
  parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
57
  parsl/dataflow/dependency_resolvers.py,sha256=Om8Dgh7a0ZwgXAc6TlhxLSzvxXHDlNNV1aBNiD3JTNY,3325
58
- parsl/dataflow/dflow.py,sha256=jn6gzrvz1XHpxX6SZYsh8Ics9ZUG6n8Mhd-bB2gOqow,62940
58
+ parsl/dataflow/dflow.py,sha256=shxgZ5ulMYPnvyKx4nOrdhCZYLX3JgXPo1OC3168OZw,63087
59
59
  parsl/dataflow/errors.py,sha256=daVfr2BWs1zRsGD6JtosEMttWHvK1df1Npiu_MUvFKg,3998
60
60
  parsl/dataflow/futures.py,sha256=08LuP-HFiHBIZmeKCjlsazw_WpQ5fwevrU2_WbidkYw,6080
61
- parsl/dataflow/memoization.py,sha256=dJRISYd3pXDH8NQzU0HW4jDH4rCfBLSs48d2SrbX1uA,12206
61
+ parsl/dataflow/memoization.py,sha256=AyO1khMwlbuGJQQk-l_wJRj0QeOHTOnmlvzXgQdNNQk,11977
62
62
  parsl/dataflow/rundirs.py,sha256=JZdzybVGubY35jL2YiKcDo65ZmRl1WyOApc8ajYxztc,1087
63
63
  parsl/dataflow/states.py,sha256=hV6mfv-y4A6xrujeQglcomnfEs7y3Xm2g6JFwC6dvgQ,2612
64
64
  parsl/dataflow/taskrecord.py,sha256=qIW7T6hn9dYTuNPdUura3HQwwUpUJACwPP5REm5COf4,3042
@@ -75,17 +75,17 @@ parsl/executors/flux/executor.py,sha256=UhW8R_QVYIuafrX4xOIokzl1z7p4KjFBUzser3bA
75
75
  parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaPrSF9ec7zvRfLfYJw,2129
76
76
  parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
77
  parsl/executors/high_throughput/errors.py,sha256=k2XuvvFdUfNs2foHFnxmS-BToRMfdXpYEa4EF3ELKq4,1554
78
- parsl/executors/high_throughput/executor.py,sha256=fAjGPJM4_EQbIosIvNpXzKWTLqGVDNWB635l-6hkBow,40240
79
- parsl/executors/high_throughput/interchange.py,sha256=PcalTRzNRVW5B5B6CVR4IoGqqrdhOSzzIcHoQawRH3A,26089
78
+ parsl/executors/high_throughput/executor.py,sha256=xtII7lb1skv2sUHmt9K6k4bE2LRvGmku-4h6cUTUM8k,42373
79
+ parsl/executors/high_throughput/interchange.py,sha256=ODXsNNTkaEtwCGwqa5aclXyLw7x_dEtO9mANsOUTeNE,26158
80
80
  parsl/executors/high_throughput/manager_record.py,sha256=ZMsqFxvreGLRXAw3N-JnODDa9Qfizw2tMmcBhm4lco4,490
81
81
  parsl/executors/high_throughput/manager_selector.py,sha256=UKcUE6v0tO7PDMTThpKSKxVpOpOUilxDL7UbNgpZCxo,2116
82
82
  parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
83
- parsl/executors/high_throughput/mpi_executor.py,sha256=P8n81Y9t5cw-YuNFgkrGtc4oG75ntBJDonUIfhkp_5I,5223
83
+ parsl/executors/high_throughput/mpi_executor.py,sha256=Tumz8VD7ujxCFjuEsJq47Ez49QqO3OMZi-So3JK3vos,5180
84
84
  parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=DmpKugANNa1bdYlqQBLHkrFc15fJpefPPhW9hkAlh1s,4308
85
- parsl/executors/high_throughput/mpi_resource_management.py,sha256=SeFtvvS-8asTGaukM5YW85m_8FdIGod0I5Vi0fNcXZg,7796
85
+ parsl/executors/high_throughput/mpi_resource_management.py,sha256=KEG_yulFxEl61hZapdBsf3AejetivIBmXMkq1IkGcH0,7799
86
86
  parsl/executors/high_throughput/probe.py,sha256=QlBFwSSxMmtH-Aa2JEvCzQLddsbWZluMUxq5ypLR51E,3831
87
- parsl/executors/high_throughput/process_worker_pool.py,sha256=KLNE4kswtV0OzoHWCgg00Bs8zPtWixJGBqAbDPPfrWY,40688
88
- parsl/executors/high_throughput/zmq_pipes.py,sha256=fANpmyvBetp0_b-qsI59yqBW8ank-PDNqThuQ3JeVl4,8183
87
+ parsl/executors/high_throughput/process_worker_pool.py,sha256=NGYBWCssX83JH_tdtSPoFgazcxy7esi36yDyOO622WY,41375
88
+ parsl/executors/high_throughput/zmq_pipes.py,sha256=Lr3A9Y5uyyf4VdD4L0Yontg-mNlO566k9-Ag7tXKp18,7330
89
89
  parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
90
90
  parsl/executors/radical/executor.py,sha256=eb7zgakpFBvGVlQ2NgxQlfjaql_mQQrYqqJJqbFxEno,22643
91
91
  parsl/executors/radical/rpex_resources.py,sha256=Q7-0u3K447LBCe2y7mVcdw6jqWI7SdPXxCKhkr6FoRQ,5139
@@ -93,7 +93,7 @@ parsl/executors/radical/rpex_worker.py,sha256=vl807EucEH4YgKgQ-OAP1cZPDqRwKjte3W
93
93
  parsl/executors/taskvine/__init__.py,sha256=9rwp3M8B0YyEhZMLO0RHaNw7u1nc01WHbXLqnBTanu0,293
94
94
  parsl/executors/taskvine/errors.py,sha256=euIYkSslrNSI85kyi2s0xzOaO9ik4c1fYHstMIeiBJk,652
95
95
  parsl/executors/taskvine/exec_parsl_function.py,sha256=ftGdJU78lKPPkphSHlEi4rj164mhuMHJjghVqfgeXKk,7085
96
- parsl/executors/taskvine/executor.py,sha256=VBQ5wNA2t4n2tWyvEAo-T_eu84F6Q2CHsIZhorS93D4,31314
96
+ parsl/executors/taskvine/executor.py,sha256=L2k8uxyagbCiYjoDkPAgeoRlsxQNXAlOWnTDChpWp28,31538
97
97
  parsl/executors/taskvine/factory.py,sha256=GU5JryEAKJuYKwrSc162BN-lhcKhapvBZHT820pxwic,2772
98
98
  parsl/executors/taskvine/factory_config.py,sha256=ZQC5vyDe8cM0nuv7fbBCV2xnWGAZ87iLlT2UqmFFI1U,3695
99
99
  parsl/executors/taskvine/manager.py,sha256=2GKpw9bnA2NQmti1AACZ6gHRVa8MN88NBk6j5VyTUIY,25935
@@ -226,19 +226,17 @@ parsl/tests/configs/local_radical_mpi.py,sha256=5OabeXXJPE0fyiA1AlGcQYoPRjQRk-HN
226
226
  parsl/tests/configs/local_threads.py,sha256=oEnQSlom_JMLFX9_Ln49JAfOP3nSMbw8gTaDJo_NYfo,202
227
227
  parsl/tests/configs/local_threads_checkpoint.py,sha256=Ex7CI1Eo6wVRsem9uXTtbVJrkKc_vOYlVvCNa2RLpIo,286
228
228
  parsl/tests/configs/local_threads_checkpoint_dfk_exit.py,sha256=ECL1n0uBsXDuW3sLCmjiwe8s3Xd7EFIj5wt446w6bh4,254
229
- parsl/tests/configs/local_threads_checkpoint_periodic.py,sha256=F2MVlwJZk-hkCgCrsAm_rKsv4mtLgsf5cyPsRoHm0ig,319
230
229
  parsl/tests/configs/local_threads_checkpoint_task_exit.py,sha256=zHKN68T-xhAVQwQp3fSWPIEcWOx-F7NBGZTOhF07iL8,256
231
230
  parsl/tests/configs/local_threads_ftp_in_task.py,sha256=c9odRbxgj1bM_ttpkWTh2Ch_MV7f5cmn-68BOjLeJ70,444
232
231
  parsl/tests/configs/local_threads_globus.py,sha256=NhY27cD4vcqLh762Ye0BINZnt63EmTyHXg7FQMffOBw,1097
233
232
  parsl/tests/configs/local_threads_http_in_task.py,sha256=csDY-C50tXKO2ntbbPBvppCRlXBcB7UCQOHN_FyfFYc,447
234
- parsl/tests/configs/local_threads_no_cache.py,sha256=2LM8rYhl62LIFUMjAs2_VI_R25YW5AI3RfVK_e5bdN8,236
235
233
  parsl/tests/configs/midway.py,sha256=ZLdAUDR5paPA8gheRNLI0q9Vj5HcnCYuIttu-C-TlJs,1335
236
234
  parsl/tests/configs/nscc_singapore.py,sha256=ECENZcBuCjkY6OWZstEMhfMrmjRmjCc7ELdfGEp7ly4,1481
237
235
  parsl/tests/configs/osg_htex.py,sha256=x-C_r7Kpwvqroc4Ay1Yaya9K6_j7IU1ywqPegBU7HKI,1371
238
236
  parsl/tests/configs/petrelkube.py,sha256=uUxrZrD_cF-_t6ytlRA_MUtw8RQbpW0CmNRbw3mWs1o,1699
239
237
  parsl/tests/configs/slurm_local.py,sha256=8-Zlxg4F2lXQq-_usjawkf-wen9Tu5FOFOE9N6qqh6M,737
240
238
  parsl/tests/configs/summit.py,sha256=0LbuTVmc8nl2eGiqAayhV0RCx0pg5kUpYhz9LvTFhDo,1378
241
- parsl/tests/configs/taskvine_ex.py,sha256=Nsovxtb59q6ta2opGrl7ufWcavYQtzSPrscLmaLYkUU,472
239
+ parsl/tests/configs/taskvine_ex.py,sha256=karvmnW1KY-IOq6QvdBiaC5jjmzWlz_eoJDHDUlh-5o,473
242
240
  parsl/tests/configs/user_opts.py,sha256=JcEQr1emjyTdmVDddcSGbx9df__0C2m7X3vGNbdKnpo,5858
243
241
  parsl/tests/configs/workqueue_ex.py,sha256=c-vKc1MHmU9IyIyZGuxIPKfg93lKBeNnEoWBKjoIRcg,389
244
242
  parsl/tests/integration/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -280,11 +278,11 @@ parsl/tests/test_bash_apps/test_pipeline.py,sha256=1kQDD8-Dh5H9SKFcKHzN_mSrdxAV_
280
278
  parsl/tests/test_bash_apps/test_std_uri.py,sha256=CvAt8BUhNl2pA5chq9YyhkD6eo2IUH6PjWfe3SQ-YRU,3752
281
279
  parsl/tests/test_bash_apps/test_stdout.py,sha256=lNBzCJGst0IhKaSl8CM8-mTJ5eaK7hTlZ8gY-M2TDBU,3244
282
280
  parsl/tests/test_checkpointing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
283
- parsl/tests/test_checkpointing/test_periodic.py,sha256=nfMgrG7sZ8rkMu6iOHS6lp_iTU4IsOyQLQ2Gur_FMmE,1509
281
+ parsl/tests/test_checkpointing/test_periodic.py,sha256=92mHbznqkA1R3RDU7q_am1QH5jyuK--Ff6lyLdy1jeU,1732
284
282
  parsl/tests/test_checkpointing/test_python_checkpoint_1.py,sha256=bi7c6fy6P7jmrMQkQP5me-LTfwVwJGq1O9BjnmdDIKc,715
285
283
  parsl/tests/test_checkpointing/test_python_checkpoint_2.py,sha256=Q_cXeAVz_dJuDDeiemUIGd-wmb7aCY3ggpqYjRRhHRc,1089
286
284
  parsl/tests/test_checkpointing/test_regression_232.py,sha256=AsI6AJ0DcFaefAbEY9qWa41ER0VX-4yLuIdlgvBw360,2637
287
- parsl/tests/test_checkpointing/test_regression_233.py,sha256=jii7BKuygK6KMIGtg4IeBjix7Z28cYhv57rE9ixoXMU,1774
285
+ parsl/tests/test_checkpointing/test_regression_233.py,sha256=i3x55DzMnuEOSojsF3wUCnAqlevPddL_7jV453MnqYU,1748
288
286
  parsl/tests/test_checkpointing/test_regression_239.py,sha256=xycW1_IwVC55L25oMES_OzJU58TN5BoMvRUZ_xB69jU,2441
289
287
  parsl/tests/test_checkpointing/test_task_exit.py,sha256=KLR2BFm3jyh4i1UMC1jrohTIVPHVXmDS4DWYsjkJV5k,1705
290
288
  parsl/tests/test_docs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -313,8 +311,8 @@ parsl/tests/test_htex/test_cpu_affinity_explicit.py,sha256=DVHrRCskDbJIrfB5YSi3Z
313
311
  parsl/tests/test_htex/test_disconnected_blocks.py,sha256=3V1Ol9gMS6knjLTgIjB5GrunRSp4ANsJ_2vAvpyMR6c,1858
314
312
  parsl/tests/test_htex/test_disconnected_blocks_failing_provider.py,sha256=eOdipRpKMOkWAXB3UtY1UjqTiwfNs_csNLve8vllG_M,2040
315
313
  parsl/tests/test_htex/test_drain.py,sha256=gYA7qzbv5ozox3clVdW0rlxAzwa_f_P0kqsAez3tIfk,2370
316
- parsl/tests/test_htex/test_htex.py,sha256=J1uEGezic8ziPPZsQwfK9iNiTJ53NqXMhIg9CUunjZw,4901
317
- parsl/tests/test_htex/test_interchange_exit_bad_registration.py,sha256=VWe-kj7kyvQcdUiAh3b2cZn8KWwHWIpel7bVa4XwlP0,4544
314
+ parsl/tests/test_htex/test_htex.py,sha256=PQ5F9xh4fG46lezRvnp7FfaW-4zoyTVIjY3ShCammDY,5897
315
+ parsl/tests/test_htex/test_interchange_exit_bad_registration.py,sha256=n6M-GSI4lPDPlkl6QLDxQnT2ZHIf6cu0C1EMIBGNNbs,4532
318
316
  parsl/tests/test_htex/test_manager_failure.py,sha256=N-obuSZ8f7XA_XcddoN2LWKSVtpKUZvTHb7BFelS3iQ,1143
319
317
  parsl/tests/test_htex/test_manager_selector_by_block.py,sha256=VQqSE6MDhGpDSjShGUTbj7l9Ahuj2tC9qD--o4puF44,1310
320
318
  parsl/tests/test_htex/test_managers_command.py,sha256=SCwkfyGB-Udgu5L2yDMpR5bsaT-aNjNkiXxtuRb25DI,1622
@@ -343,7 +341,7 @@ parsl/tests/test_mpi_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
343
341
  parsl/tests/test_mpi_apps/test_bad_mpi_config.py,sha256=QKvEUSrHIBrvqu2fRj1MAqxsYxDfcrdQ7dzWdOZejuU,1320
344
342
  parsl/tests/test_mpi_apps/test_mpi_mode_enabled.py,sha256=_fpiaDq9yEUuBxTiuxLFsBt5r1oX9S-3S-YL5yRB13E,5423
345
343
  parsl/tests/test_mpi_apps/test_mpi_prefix.py,sha256=yJslZvYK3JeL9UgxMwF9DDPR9QD4zJLGVjubD0F-utc,1950
346
- parsl/tests/test_mpi_apps/test_mpi_scheduler.py,sha256=3LEPPYzZEPCYFiqv1YJIRJwiVmZHIplu8P-czUJ6N5U,5550
344
+ parsl/tests/test_mpi_apps/test_mpi_scheduler.py,sha256=ePjidjiMYhDoy-iUsqU13Qb5QPBFzLayKrMyNRkoR6I,5619
347
345
  parsl/tests/test_mpi_apps/test_mpiex.py,sha256=mlFdHK3A1B6NsEhxTQQX8lhs9qVza36FMG99vNrBRW4,2021
348
346
  parsl/tests/test_mpi_apps/test_resource_spec.py,sha256=5k6HM2jtb6sa7jetpI-Tl1nPQiN33VLaM7YT10c307E,3756
349
347
  parsl/tests/test_providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -365,14 +363,14 @@ parsl/tests/test_python_apps/test_fail.py,sha256=gMuZwxZNaUCaonlUX-7SOBvXg8kidkB
365
363
  parsl/tests/test_python_apps/test_fibonacci_iterative.py,sha256=ly2s5HuB9R53Z2FM_zy0WWdOk01iVhgcwSpQyK6ErIY,573
366
364
  parsl/tests/test_python_apps/test_fibonacci_recursive.py,sha256=q7LMFcu_pJSNPdz8iY0UiRoIweEWIBGwMjQffHWAuDc,592
367
365
  parsl/tests/test_python_apps/test_futures.py,sha256=EWnzmPn5sVCgeMxc0Uz2ieaaVYr98tFZ7g8YJFqYuC8,2355
368
- parsl/tests/test_python_apps/test_garbage_collect.py,sha256=RPntrLuzPkeNbhS7mmqEnHbyOcuV1YVppgZ8BaX-h84,1076
366
+ parsl/tests/test_python_apps/test_garbage_collect.py,sha256=jBKSvHr5SvuIRPEtUf2ED-oXntW4Nkri5lyxXVd2xU4,861
369
367
  parsl/tests/test_python_apps/test_import_fail.py,sha256=Vd8IMa_UsbHYkr3IGnS-rgGb6zKxB1tOTqMZY5lc_xY,691
370
368
  parsl/tests/test_python_apps/test_inputs_default.py,sha256=J2GR1NgdvEucNSJkfO6GC5OoMiuvSzO0tASCowT8HM0,436
371
369
  parsl/tests/test_python_apps/test_join.py,sha256=OWd6_A0Cf-1Xpjr0OT3HaJ1IMYcJ0LFL1VnmL0cZkL8,2988
372
370
  parsl/tests/test_python_apps/test_lifted.py,sha256=Na6qC_dZSeYJcZdkGn-dCjgYkQV267HmGFfaqFcRVcQ,3408
373
371
  parsl/tests/test_python_apps/test_mapred.py,sha256=C7nTl0NsP_2TCtcmZXWFMpvAG4pwGswrIJKr-5sRUNY,786
374
372
  parsl/tests/test_python_apps/test_memoize_1.py,sha256=E_VQAaykFKT_G7yRUWOhXxfOICj07qLq2R7onZ4oY9g,449
375
- parsl/tests/test_python_apps/test_memoize_2.py,sha256=uG9zG9j3ap1FqeJ8aB0Gj_dX191pN3dxWXeQ-asxPgU,553
373
+ parsl/tests/test_python_apps/test_memoize_2.py,sha256=tTJOZRSC6HwLctvPm_x6acNA5QGrpH-fJnpYLGbjYOc,705
376
374
  parsl/tests/test_python_apps/test_memoize_4.py,sha256=CdK_vHW5s-phi5KPqcAQm_BRh8xek91GVGeQRjfJ4Bk,569
377
375
  parsl/tests/test_python_apps/test_memoize_bad_id_for_memo.py,sha256=5v25zdU6koXexRTkccj_3sSSdXqHdsU8ZdNrnZ3ONZU,1436
378
376
  parsl/tests/test_python_apps/test_memoize_exception.py,sha256=GdvB5XFnW5pbkFMETzxWC3nIKo13Pm0benq9u2UnM1E,1232
@@ -394,6 +392,7 @@ parsl/tests/test_regression/test_1653.py,sha256=eMXPfTALwQ7k2pGpar-kYed8yKtOj2U2
394
392
  parsl/tests/test_regression/test_221.py,sha256=jOS0EVu_2sbh10eg5hnivPvhNt0my_50vQ7jQYS1Bfg,520
395
393
  parsl/tests/test_regression/test_226.py,sha256=tVqGAU99RRQqz9KuMgeLVoddot2pRqG2y4daW44RrlE,1110
396
394
  parsl/tests/test_regression/test_2652.py,sha256=R_ZoX7Vgz4H2ionhjm_KWFW-vWt_MlgWV_zdTsT68M0,848
395
+ parsl/tests/test_regression/test_3874.py,sha256=8MCxwyh9CXuB7tDgaHg6goclq6Hhrh_ryaQuKLLiLgU,1171
397
396
  parsl/tests/test_regression/test_69a.py,sha256=sRkMT95b7WvFAK1hUy7eNwKnzFNqaX9qESdNmoh0rAo,1902
398
397
  parsl/tests/test_regression/test_97_parallelism_0.py,sha256=Fe58KFhQpZuU982IP9ZSpKBb_Jpftv9pJjH73f8_ec8,1592
399
398
  parsl/tests/test_regression/test_98.py,sha256=E7dituuonKN5uWocZkJYZlaE5x5rDM4MZlv2PloAKzY,452
@@ -451,13 +450,13 @@ parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
451
450
  parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
452
451
  parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
453
452
  parsl/usage_tracking/usage.py,sha256=hbMo5BYgIWqMcFWqN-HYP1TbwNrTonpv-usfwnCFJKY,9212
454
- parsl-2025.9.22.data/scripts/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
455
- parsl-2025.9.22.data/scripts/interchange.py,sha256=5hLSdQNG65v0iSx2FSeS6uyp7i3Ez2qWIzNSVblSixI,26076
456
- parsl-2025.9.22.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
457
- parsl-2025.9.22.data/scripts/process_worker_pool.py,sha256=WGR9yr8EfBptQ6X-CvAPQEo1rlljD6peCc-za36-1xM,40674
458
- parsl-2025.9.22.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
459
- parsl-2025.9.22.dist-info/METADATA,sha256=IpHlJRMpOyOsgV7Ig9GQssk2IETO6cSZWaqEifE29c0,4055
460
- parsl-2025.9.22.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
461
- parsl-2025.9.22.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
462
- parsl-2025.9.22.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
463
- parsl-2025.9.22.dist-info/RECORD,,
453
+ parsl-2025.10.6.data/scripts/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
454
+ parsl-2025.10.6.data/scripts/interchange.py,sha256=Kn0yJnpcRsc37gfhD6mGkoX9wD7vP_QgWst7qwUjj5o,26145
455
+ parsl-2025.10.6.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
456
+ parsl-2025.10.6.data/scripts/process_worker_pool.py,sha256=tD01F96f3RCGz9kUe8fMq8g8DqeJe425n6V5tQ5fDPE,41361
457
+ parsl-2025.10.6.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
458
+ parsl-2025.10.6.dist-info/METADATA,sha256=4MOap6ipA5ijRtv_KdjigUGPoPSc18UrwWkxf0HA29M,4006
459
+ parsl-2025.10.6.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
460
+ parsl-2025.10.6.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
461
+ parsl-2025.10.6.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
462
+ parsl-2025.10.6.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- from parsl.config import Config
2
- from parsl.executors.threads import ThreadPoolExecutor
3
-
4
-
5
- def fresh_config():
6
- tpe = ThreadPoolExecutor(label='local_threads_checkpoint_periodic', max_threads=1)
7
- return Config(
8
- executors=[tpe],
9
- checkpoint_mode='periodic',
10
- checkpoint_period='00:00:02'
11
- )
@@ -1,11 +0,0 @@
1
- from parsl.config import Config
2
- from parsl.executors.threads import ThreadPoolExecutor
3
-
4
-
5
- def fresh_config():
6
- return Config(
7
- executors=[
8
- ThreadPoolExecutor(max_threads=4),
9
- ],
10
- app_cache=False
11
- )