parsl 2025.1.6__py3-none-any.whl → 2025.1.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. parsl/app/errors.py +3 -6
  2. parsl/app/futures.py +6 -8
  3. parsl/configs/gc_multisite.py +27 -0
  4. parsl/configs/gc_tutorial.py +18 -0
  5. parsl/dataflow/dflow.py +17 -22
  6. parsl/dataflow/errors.py +1 -4
  7. parsl/dataflow/rundirs.py +4 -5
  8. parsl/dataflow/taskrecord.py +1 -3
  9. parsl/executors/__init__.py +3 -1
  10. parsl/executors/globus_compute.py +125 -0
  11. parsl/executors/high_throughput/errors.py +2 -5
  12. parsl/executors/high_throughput/executor.py +2 -4
  13. parsl/executors/high_throughput/zmq_pipes.py +0 -1
  14. parsl/executors/taskvine/executor.py +0 -6
  15. parsl/executors/taskvine/factory_config.py +1 -1
  16. parsl/executors/taskvine/manager_config.py +2 -2
  17. parsl/executors/workqueue/executor.py +0 -6
  18. parsl/tests/configs/globus_compute.py +20 -0
  19. parsl/tests/conftest.py +4 -0
  20. parsl/tests/test_error_handling/test_resource_spec.py +3 -0
  21. parsl/tests/test_htex/test_interchange_exit_bad_registration.py +120 -0
  22. parsl/tests/test_htex/test_resource_spec_validation.py +0 -7
  23. parsl/tests/test_python_apps/test_dep_standard_futures.py +3 -0
  24. parsl/tests/test_python_apps/test_fail.py +16 -32
  25. parsl/tests/test_python_apps/test_join.py +6 -0
  26. parsl/tests/unit/test_globus_compute_executor.py +104 -0
  27. parsl/usage_tracking/usage.py +13 -8
  28. parsl/version.py +1 -1
  29. {parsl-2025.1.6.dist-info → parsl-2025.1.20.dist-info}/METADATA +7 -4
  30. {parsl-2025.1.6.dist-info → parsl-2025.1.20.dist-info}/RECORD +38 -32
  31. {parsl-2025.1.6.data → parsl-2025.1.20.data}/scripts/exec_parsl_function.py +0 -0
  32. {parsl-2025.1.6.data → parsl-2025.1.20.data}/scripts/interchange.py +0 -0
  33. {parsl-2025.1.6.data → parsl-2025.1.20.data}/scripts/parsl_coprocess.py +0 -0
  34. {parsl-2025.1.6.data → parsl-2025.1.20.data}/scripts/process_worker_pool.py +0 -0
  35. {parsl-2025.1.6.dist-info → parsl-2025.1.20.dist-info}/LICENSE +0 -0
  36. {parsl-2025.1.6.dist-info → parsl-2025.1.20.dist-info}/WHEEL +0 -0
  37. {parsl-2025.1.6.dist-info → parsl-2025.1.20.dist-info}/entry_points.txt +0 -0
  38. {parsl-2025.1.6.dist-info → parsl-2025.1.20.dist-info}/top_level.txt +0 -0
parsl/app/errors.py CHANGED
@@ -70,8 +70,8 @@ class MissingOutputs(ParslError):
70
70
  self.reason = reason
71
71
  self.outputs = outputs
72
72
 
73
- def __repr__(self) -> str:
74
- return "Missing Outputs: {0}, Reason:{1}".format(self.outputs, self.reason)
73
+ def __str__(self) -> str:
74
+ return "Missing Outputs: {0}, Reason: {1}".format(self.outputs, self.reason)
75
75
 
76
76
 
77
77
  class BadStdStreamFile(ParslError):
@@ -85,11 +85,8 @@ class BadStdStreamFile(ParslError):
85
85
  super().__init__(reason)
86
86
  self._reason = reason
87
87
 
88
- def __repr__(self) -> str:
89
- return "Bad Stream File: {}".format(self._reason)
90
-
91
88
  def __str__(self) -> str:
92
- return self.__repr__()
89
+ return "Bad Stream File: {}".format(self._reason)
93
90
 
94
91
 
95
92
  class RemoteExceptionWrapper:
parsl/app/futures.py CHANGED
@@ -2,7 +2,6 @@
2
2
  """
3
3
  import logging
4
4
  from concurrent.futures import Future
5
- from typing import Optional
6
5
 
7
6
  import typeguard
8
7
 
@@ -39,24 +38,23 @@ class DataFuture(Future):
39
38
  self.set_result(self.file_obj)
40
39
 
41
40
  @typeguard.typechecked
42
- def __init__(self, fut: Future, file_obj: File, tid: Optional[int] = None) -> None:
41
+ def __init__(self, fut: Future, file_obj: File, tid: int) -> None:
43
42
  """Construct the DataFuture object.
44
43
 
45
44
  If the file_obj is a string convert to a File.
46
45
 
47
46
  Args:
48
- - fut (AppFuture) : AppFuture that this DataFuture will track
49
- - file_obj (string/File obj) : Something representing file(s)
47
+ - fut (Future) : Future that this DataFuture will track.
48
+ Completion of ``fut`` indicates that the data is
49
+ ready.
50
+ - file_obj (File) : File that this DataFuture represents the availability of
50
51
 
51
52
  Kwargs:
52
53
  - tid (task_id) : Task id that this DataFuture tracks
53
54
  """
54
55
  super().__init__()
55
56
  self._tid = tid
56
- if isinstance(file_obj, File):
57
- self.file_obj = file_obj
58
- else:
59
- raise ValueError("DataFuture must be initialized with a File, not {}".format(type(file_obj)))
57
+ self.file_obj = file_obj
60
58
  self.parent = fut
61
59
 
62
60
  self.parent.add_done_callback(self.parent_callback)
@@ -0,0 +1,27 @@
1
+ from globus_compute_sdk import Executor
2
+
3
+ from parsl.config import Config
4
+ from parsl.executors import GlobusComputeExecutor
5
+ from parsl.usage_tracking.levels import LEVEL_1
6
+
7
+ # Please start your own endpoint on perlmutter following instructions below to use this config:
8
+ # https://globus-compute.readthedocs.io/en/stable/endpoints/endpoint_examples.html#perlmutter-nersc
9
+ perlmutter_endpoint = 'YOUR_PERLMUTTER_ENDPOINT_UUID'
10
+
11
+ # Please start your own endpoint on expanse following instructions below to use this config:
12
+ # https://globus-compute.readthedocs.io/en/stable/endpoints/endpoint_examples.html#expanse-sdsc
13
+ expanse_endpoint = 'YOUR_EXPANSE_ENDPOINT_UUID'
14
+
15
+ config = Config(
16
+ executors=[
17
+ GlobusComputeExecutor(
18
+ executor=Executor(endpoint_id=perlmutter_endpoint),
19
+ label="Perlmutter",
20
+ ),
21
+ GlobusComputeExecutor(
22
+ executor=Executor(endpoint_id=expanse_endpoint),
23
+ label="Expanse",
24
+ ),
25
+ ],
26
+ usage_tracking=LEVEL_1,
27
+ )
@@ -0,0 +1,18 @@
1
+ from globus_compute_sdk import Executor
2
+
3
+ from parsl.config import Config
4
+ from parsl.executors import GlobusComputeExecutor
5
+ from parsl.usage_tracking.levels import LEVEL_1
6
+
7
+ # Public tutorial endpoint
8
+ tutorial_endpoint = '4b116d3c-1703-4f8f-9f6f-39921e5864df'
9
+
10
+ config = Config(
11
+ executors=[
12
+ GlobusComputeExecutor(
13
+ executor=Executor(endpoint_id=tutorial_endpoint),
14
+ label="Tutorial_Endpoint_py3.11",
15
+ )
16
+ ],
17
+ usage_tracking=LEVEL_1,
18
+ )
parsl/dataflow/dflow.py CHANGED
@@ -484,24 +484,18 @@ class DataFlowKernel:
484
484
 
485
485
  # now we know each joinable Future is done
486
486
  # so now look for any exceptions
487
- exceptions_tids: List[Tuple[BaseException, Optional[str]]]
487
+ exceptions_tids: List[Tuple[BaseException, str]]
488
488
  exceptions_tids = []
489
489
  if isinstance(joinable, Future):
490
490
  je = joinable.exception()
491
491
  if je is not None:
492
- if hasattr(joinable, 'task_record'):
493
- tid = joinable.task_record['id']
494
- else:
495
- tid = None
492
+ tid = self.render_future_description(joinable)
496
493
  exceptions_tids = [(je, tid)]
497
494
  elif isinstance(joinable, list):
498
495
  for future in joinable:
499
496
  je = future.exception()
500
497
  if je is not None:
501
- if hasattr(joinable, 'task_record'):
502
- tid = joinable.task_record['id']
503
- else:
504
- tid = None
498
+ tid = self.render_future_description(future)
505
499
  exceptions_tids.append((je, tid))
506
500
  else:
507
501
  raise TypeError(f"Unknown joinable type {type(joinable)}")
@@ -918,13 +912,7 @@ class DataFlowKernel:
918
912
  dep_failures = []
919
913
 
920
914
  def append_failure(e: Exception, dep: Future) -> None:
921
- # If this Future is associated with a task inside this DFK,
922
- # then refer to the task ID.
923
- # Otherwise make a repr of the Future object.
924
- if hasattr(dep, 'task_record') and dep.task_record['dfk'] == self:
925
- tid = "task " + repr(dep.task_record['id'])
926
- else:
927
- tid = repr(dep)
915
+ tid = self.render_future_description(dep)
928
916
  dep_failures.extend([(e, tid)])
929
917
 
930
918
  # Replace item in args
@@ -1076,10 +1064,7 @@ class DataFlowKernel:
1076
1064
 
1077
1065
  depend_descs = []
1078
1066
  for d in depends:
1079
- if isinstance(d, AppFuture) or isinstance(d, DataFuture):
1080
- depend_descs.append("task {}".format(d.tid))
1081
- else:
1082
- depend_descs.append(repr(d))
1067
+ depend_descs.append(self.render_future_description(d))
1083
1068
 
1084
1069
  if depend_descs != []:
1085
1070
  waiting_message = "waiting on {}".format(", ".join(depend_descs))
@@ -1215,10 +1200,8 @@ class DataFlowKernel:
1215
1200
  self._checkpoint_timer.close()
1216
1201
 
1217
1202
  # Send final stats
1218
- logger.info("Sending end message for usage tracking")
1219
1203
  self.usage_tracker.send_end_message()
1220
1204
  self.usage_tracker.close()
1221
- logger.info("Closed usage tracking")
1222
1205
 
1223
1206
  logger.info("Closing job status poller")
1224
1207
  self.job_status_poller.close()
@@ -1438,6 +1421,18 @@ class DataFlowKernel:
1438
1421
  '' if label is None else '_{}'.format(label),
1439
1422
  kw))
1440
1423
 
1424
+ def render_future_description(self, dep: Future) -> str:
1425
+ """Renders a description of the future in the context of the
1426
+ current DFK.
1427
+ """
1428
+ if isinstance(dep, AppFuture) and dep.task_record['dfk'] == self:
1429
+ tid = "task " + repr(dep.task_record['id'])
1430
+ elif isinstance(dep, DataFuture):
1431
+ tid = "DataFuture from task " + repr(dep.tid)
1432
+ else:
1433
+ tid = repr(dep)
1434
+ return tid
1435
+
1441
1436
 
1442
1437
  class DataFlowKernelLoader:
1443
1438
  """Manage which DataFlowKernel is active.
parsl/dataflow/errors.py CHANGED
@@ -25,11 +25,8 @@ class BadCheckpoint(DataFlowException):
25
25
  def __init__(self, reason: str) -> None:
26
26
  self.reason = reason
27
27
 
28
- def __repr__(self) -> str:
29
- return self.reason
30
-
31
28
  def __str__(self) -> str:
32
- return self.__repr__()
29
+ return self.reason
33
30
 
34
31
 
35
32
  class DependencyError(DataFlowException):
parsl/dataflow/rundirs.py CHANGED
@@ -6,17 +6,16 @@ logger = logging.getLogger(__name__)
6
6
 
7
7
 
8
8
  def make_rundir(path: str) -> str:
9
- """When a path has not been specified, make the run directory.
9
+ """Create a numbered run directory under the specified path.
10
10
 
11
- Creates a rundir with the following hierarchy:
12
- ./runinfo <- Home of all run directories
11
+ ./runinfo <- specified path
13
12
  |----000
14
13
  |----001 <- Directories for each run
15
14
  | ....
16
15
  |----NNN
17
16
 
18
- Kwargs:
19
- - path (str): String path to a specific run dir
17
+ Args:
18
+ - path (str): String path to root of all rundirs
20
19
  """
21
20
  try:
22
21
  if not os.path.exists(path):
@@ -43,12 +43,11 @@ class TaskRecord(TypedDict, total=False):
43
43
  executed on.
44
44
  """
45
45
 
46
- retries_left: int
47
46
  fail_count: int
48
47
  fail_cost: float
49
48
  fail_history: List[str]
50
49
 
51
- checkpoint: bool # this change is also in #1516
50
+ checkpoint: bool
52
51
  """Should this task be checkpointed?
53
52
  """
54
53
 
@@ -68,7 +67,6 @@ class TaskRecord(TypedDict, total=False):
68
67
 
69
68
  # these three could be more strongly typed perhaps but I'm not thinking about that now
70
69
  func: Callable
71
- fn_hash: str
72
70
  args: Sequence[Any]
73
71
  # in some places we uses a Tuple[Any, ...] and in some places a List[Any].
74
72
  # This is an attempt to correctly type both of those.
@@ -1,4 +1,5 @@
1
1
  from parsl.executors.flux.executor import FluxExecutor
2
+ from parsl.executors.globus_compute import GlobusComputeExecutor
2
3
  from parsl.executors.high_throughput.executor import HighThroughputExecutor
3
4
  from parsl.executors.high_throughput.mpi_executor import MPIExecutor
4
5
  from parsl.executors.threads import ThreadPoolExecutor
@@ -8,4 +9,5 @@ __all__ = ['ThreadPoolExecutor',
8
9
  'HighThroughputExecutor',
9
10
  'MPIExecutor',
10
11
  'WorkQueueExecutor',
11
- 'FluxExecutor']
12
+ 'FluxExecutor',
13
+ 'GlobusComputeExecutor']
@@ -0,0 +1,125 @@
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ from concurrent.futures import Future
5
+ from typing import Any, Callable, Dict
6
+
7
+ import typeguard
8
+
9
+ from parsl.errors import OptionalModuleMissing
10
+ from parsl.executors.base import ParslExecutor
11
+ from parsl.utils import RepresentationMixin
12
+
13
+ try:
14
+ from globus_compute_sdk import Executor
15
+ _globus_compute_enabled = True
16
+ except ImportError:
17
+ _globus_compute_enabled = False
18
+
19
+
20
+ class GlobusComputeExecutor(ParslExecutor, RepresentationMixin):
21
+ """ GlobusComputeExecutor enables remote execution on Globus Compute endpoints
22
+
23
+ GlobusComputeExecutor is a thin wrapper over globus_compute_sdk.Executor
24
+ Refer to `globus-compute user documentation <https://globus-compute.readthedocs.io/en/latest/executor.html>`_
25
+ and `reference documentation <https://globus-compute.readthedocs.io/en/latest/reference/executor.html>`_
26
+ for more details.
27
+
28
+ .. note::
29
+ As a remote execution system, Globus Compute relies on serialization to ship
30
+ tasks and results between the Parsl client side and the remote Globus Compute
31
+ Endpoint side. Serialization is unreliable across python versions, and
32
+ wrappers used by Parsl assume identical Parsl versions across on both sides.
33
+ We recommend using matching Python, Parsl and Globus Compute version on both
34
+ the client side and the endpoint side for stable behavior.
35
+
36
+ """
37
+
38
+ @typeguard.typechecked
39
+ def __init__(
40
+ self,
41
+ executor: Executor,
42
+ label: str = 'GlobusComputeExecutor',
43
+ ):
44
+ """
45
+ Parameters
46
+ ----------
47
+
48
+ executor: globus_compute_sdk.Executor
49
+ Pass a globus_compute_sdk Executor that will be used to execute
50
+ tasks on a globus_compute endpoint. Refer to `globus-compute docs
51
+ <https://globus-compute.readthedocs.io/en/latest/reference/executor.html#globus-compute-executor>`_
52
+
53
+ label:
54
+ a label to name the executor
55
+ """
56
+ if not _globus_compute_enabled:
57
+ raise OptionalModuleMissing(
58
+ ['globus-compute-sdk'],
59
+ "GlobusComputeExecutor requires globus-compute-sdk installed"
60
+ )
61
+
62
+ super().__init__()
63
+ self.executor: Executor = executor
64
+ self.resource_specification = self.executor.resource_specification
65
+ self.user_endpoint_config = self.executor.user_endpoint_config
66
+ self.label = label
67
+
68
+ def start(self) -> None:
69
+ """ Start the Globus Compute Executor """
70
+ pass
71
+
72
+ def submit(self, func: Callable, resource_specification: Dict[str, Any], *args: Any, **kwargs: Any) -> Future:
73
+ """ Submit func to globus-compute
74
+
75
+
76
+ Parameters
77
+ ----------
78
+
79
+ func: Callable
80
+ Python function to execute remotely
81
+
82
+ resource_specification: Dict[str, Any]
83
+ Resource specification can be used specify MPI resources required by MPI applications on
84
+ Endpoints configured to use globus compute's MPIEngine. GCE also accepts *user_endpoint_config*
85
+ to configure endpoints when the endpoint is a `Multi-User Endpoint
86
+ <https://globus-compute.readthedocs.io/en/latest/endpoints/endpoints.html#templating-endpoint-configuration>`_
87
+
88
+ args:
89
+ Args to pass to the function
90
+
91
+ kwargs:
92
+ kwargs to pass to the function
93
+
94
+ Returns
95
+ -------
96
+
97
+ Future
98
+ """
99
+ res_spec = copy.deepcopy(resource_specification or self.resource_specification)
100
+ # Pop user_endpoint_config since it is illegal in resource_spec for globus_compute
101
+ if res_spec:
102
+ user_endpoint_config = res_spec.pop('user_endpoint_config', self.user_endpoint_config)
103
+ else:
104
+ user_endpoint_config = self.user_endpoint_config
105
+
106
+ try:
107
+ self.executor.resource_specification = res_spec
108
+ self.executor.user_endpoint_config = user_endpoint_config
109
+ return self.executor.submit(func, *args, **kwargs)
110
+ finally:
111
+ # Reset executor state to defaults set at configuration time
112
+ self.executor.resource_specification = self.resource_specification
113
+ self.executor.user_endpoint_config = self.user_endpoint_config
114
+
115
+ def shutdown(self):
116
+ """Clean-up the resources associated with the Executor.
117
+
118
+ GCE.shutdown will cancel all futures that have not yet registered with
119
+ Globus Compute and will not wait for the launched futures to complete.
120
+ This method explicitly shutsdown the result_watcher thread to avoid
121
+ it waiting for outstanding futures at thread exit.
122
+ """
123
+ self.executor.shutdown(wait=False, cancel_futures=True)
124
+ result_watcher = self.executor._get_result_watcher()
125
+ result_watcher.shutdown(wait=False, cancel_futures=True)
@@ -27,7 +27,7 @@ class VersionMismatch(Exception):
27
27
  def __str__(self) -> str:
28
28
  return (
29
29
  f"Manager version info {self.manager_version} does not match interchange"
30
- f" version info {self.interchange_version}, causing a critical failure"
30
+ f" version info {self.interchange_version}"
31
31
  )
32
32
 
33
33
 
@@ -38,11 +38,8 @@ class WorkerLost(Exception):
38
38
  self.worker_id = worker_id
39
39
  self.hostname = hostname
40
40
 
41
- def __repr__(self):
42
- return "Task failure due to loss of worker {} on host {}".format(self.worker_id, self.hostname)
43
-
44
41
  def __str__(self):
45
- return self.__repr__()
42
+ return "Task failure due to loss of worker {} on host {}".format(self.worker_id, self.hostname)
46
43
 
47
44
 
48
45
  class CommandClientTimeoutError(Exception):
@@ -8,7 +8,7 @@ import warnings
8
8
  from collections import defaultdict
9
9
  from concurrent.futures import Future
10
10
  from dataclasses import dataclass
11
- from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
11
+ from typing import Callable, Dict, List, Optional, Sequence, Set, Tuple, Union
12
12
 
13
13
  import typeguard
14
14
 
@@ -357,10 +357,8 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
357
357
  return self.logdir
358
358
 
359
359
  def validate_resource_spec(self, resource_specification: dict):
360
- """HTEX supports the following *Optional* resource specifications:
361
- priority: lower value is higher priority"""
362
360
  if resource_specification:
363
- acceptable_fields = {'priority'}
361
+ acceptable_fields: Set[str] = set() # add new resource spec field names here to make htex accept them
364
362
  keys = set(resource_specification.keys())
365
363
  invalid_keys = keys - acceptable_fields
366
364
  if invalid_keys:
@@ -213,7 +213,6 @@ class ResultsIncoming:
213
213
  """Get a message from the queue, returning None if timeout expires
214
214
  without a message. timeout is measured in milliseconds.
215
215
  """
216
- logger.debug("Waiting for ResultsIncoming message")
217
216
  socks = dict(self.poller.poll(timeout=timeout_ms))
218
217
  if self.results_receiver in socks and socks[self.results_receiver] == zmq.POLLIN:
219
218
  m = self.results_receiver.recv_multipart()
@@ -582,12 +582,6 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
582
582
  logger.debug("TaskVine shutdown started")
583
583
  self._should_stop.set()
584
584
 
585
- # Remove the workers that are still going
586
- kill_ids = [self.blocks_to_job_id[block] for block in self.blocks_to_job_id.keys()]
587
- if self.provider:
588
- logger.debug("Cancelling blocks")
589
- self.provider.cancel(kill_ids)
590
-
591
585
  # Join all processes before exiting
592
586
  logger.debug("Joining on submit process")
593
587
  self._submit_process.join()
@@ -32,7 +32,7 @@ class TaskVineFactoryConfig:
32
32
 
33
33
  worker_options: Optional[str]
34
34
  Additional options to pass to workers. Run
35
- `vine_worker --help` for more details.
35
+ ``vine_worker --help`` for more details.
36
36
  Default is None.
37
37
 
38
38
  worker_executable: str
@@ -24,7 +24,7 @@ class TaskVineManagerConfig:
24
24
 
25
25
  address: Optional[str]
26
26
  Address of the local machine.
27
- If None, socket.gethostname() will be used to determine the address.
27
+ If None, :py:func:`parsl.addresses.get_any_address` will be used to determine the address.
28
28
 
29
29
  project_name: Optional[str]
30
30
  If given, TaskVine will periodically report its status and performance
@@ -56,7 +56,7 @@ class TaskVineManagerConfig:
56
56
  environment name is given, TaskVine will package the conda
57
57
  environment in a tarball and send it along with tasks to be
58
58
  executed in a replicated conda environment.
59
- If a tarball of packages (*.tar.gz) is given, TaskVine
59
+ If a tarball of packages (``*.tar.gz``) is given, TaskVine
60
60
  skips the packaging step and sends the tarball along with
61
61
  tasks to be executed in a replicated conda environment.
62
62
 
@@ -697,12 +697,6 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
697
697
  logger.debug("Work Queue shutdown started")
698
698
  self.should_stop.value = True
699
699
 
700
- # Remove the workers that are still going
701
- kill_ids = [self.blocks_to_job_id[block] for block in self.blocks_to_job_id.keys()]
702
- if self.provider:
703
- logger.debug("Cancelling blocks")
704
- self.provider.cancel(kill_ids)
705
-
706
700
  logger.debug("Joining on submit process")
707
701
  self.submit_process.join()
708
702
  self.submit_process.close()
@@ -0,0 +1,20 @@
1
+ import os
2
+
3
+ from globus_compute_sdk import Executor
4
+
5
+ from parsl.config import Config
6
+ from parsl.executors import GlobusComputeExecutor
7
+
8
+
9
+ def fresh_config():
10
+
11
+ endpoint_id = os.environ["GLOBUS_COMPUTE_ENDPOINT"]
12
+
13
+ return Config(
14
+ executors=[
15
+ GlobusComputeExecutor(
16
+ executor=Executor(endpoint_id=endpoint_id),
17
+ label="globus_compute",
18
+ )
19
+ ]
20
+ )
parsl/tests/conftest.py CHANGED
@@ -163,6 +163,10 @@ def pytest_configure(config):
163
163
  'markers',
164
164
  'shared_fs: Marks tests that require a shared_fs between the workers are the test client'
165
165
  )
166
+ config.addinivalue_line(
167
+ 'markers',
168
+ 'issue_3620: Marks tests that do not work correctly on GlobusComputeExecutor (ref: issue 3620)'
169
+ )
166
170
 
167
171
 
168
172
  @pytest.fixture(autouse=True, scope='session')
@@ -1,3 +1,5 @@
1
+ import pytest
2
+
1
3
  import parsl
2
4
  from parsl.app.app import python_app
3
5
  from parsl.executors import WorkQueueExecutor
@@ -11,6 +13,7 @@ def double(x, parsl_resource_specification={}):
11
13
  return x * 2
12
14
 
13
15
 
16
+ @pytest.mark.issue_3620
14
17
  def test_resource(n=2):
15
18
  executors = parsl.dfk().executors
16
19
  executor = None
@@ -0,0 +1,120 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ import pickle
5
+ import platform
6
+ import subprocess
7
+ import time
8
+
9
+ import psutil
10
+ import pytest
11
+ import zmq
12
+
13
+ import parsl.executors.high_throughput.zmq_pipes as zmq_pipes
14
+ from parsl.executors.high_throughput.executor import DEFAULT_INTERCHANGE_LAUNCH_CMD
15
+ from parsl.executors.high_throughput.manager_selector import RandomManagerSelector
16
+ from parsl.version import VERSION as PARSL_VERSION
17
+
18
+ P_ms = 10
19
+
20
+
21
+ @pytest.mark.local
22
+ def test_exit_with_bad_registration(tmpd_cwd, try_assert):
23
+ """Test that the interchange exits when it receives a bad registration message.
24
+ This complements parsl/tests/test_scaling/test_worker_interchange_bad_messages_3262.py
25
+ which tests that the interchange is resistent to other forms of bad message.
26
+ """
27
+
28
+ outgoing_q = zmq_pipes.TasksOutgoing(
29
+ "127.0.0.1", (49152, 65535), None
30
+ )
31
+ incoming_q = zmq_pipes.ResultsIncoming(
32
+ "127.0.0.1", (49152, 65535), None
33
+ )
34
+ command_client = zmq_pipes.CommandClient(
35
+ "127.0.0.1", (49152, 65535), None
36
+ )
37
+
38
+ interchange_config = {"client_address": "127.0.0.1",
39
+ "client_ports": (outgoing_q.port,
40
+ incoming_q.port,
41
+ command_client.port),
42
+ "interchange_address": "127.0.0.1",
43
+ "worker_ports": None,
44
+ "worker_port_range": (50000, 60000),
45
+ "hub_address": None,
46
+ "hub_zmq_port": None,
47
+ "logdir": tmpd_cwd,
48
+ "heartbeat_threshold": 120,
49
+ "poll_period": P_ms,
50
+ "logging_level": logging.DEBUG,
51
+ "cert_dir": None,
52
+ "manager_selector": RandomManagerSelector(),
53
+ "run_id": "test"
54
+ }
55
+
56
+ config_pickle = pickle.dumps(interchange_config)
57
+
58
+ interchange_proc = subprocess.Popen(DEFAULT_INTERCHANGE_LAUNCH_CMD, stdin=subprocess.PIPE)
59
+ stdin = interchange_proc.stdin
60
+ assert stdin is not None, "Popen should have created an IO object (vs default None) because of PIPE mode"
61
+
62
+ stdin.write(config_pickle)
63
+ stdin.flush()
64
+ stdin.close()
65
+
66
+ # wait for interchange to be alive, by waiting for the command thread to become
67
+ # responsive. if the interchange process didn't start enough to get the command
68
+ # thread running, this will time out.
69
+
70
+ (task_port, result_port) = command_client.run("WORKER_PORTS", timeout_s=120)
71
+
72
+ # now we'll assume that if the interchange command thread is responding,
73
+ # then the worker polling code is also running and that the interchange has
74
+ # started successfully.
75
+
76
+ # send bad registration message as if from a new worker pool. The badness here
77
+ # is that the Python version does not match the real Python version - which
78
+ # unlike some other bad interchange messages, should cause the interchange
79
+ # to shut down.
80
+
81
+ msg = {'type': 'registration',
82
+ 'parsl_v': PARSL_VERSION,
83
+ 'python_v': "{}.{}.{}".format(1, 1, 1), # this is the bad bit
84
+ 'worker_count': 1,
85
+ 'uid': 'testuid',
86
+ 'block_id': 0,
87
+ 'start_time': time.time(),
88
+ 'prefetch_capacity': 0,
89
+ 'max_capacity': 1,
90
+ 'os': platform.system(),
91
+ 'hostname': platform.node(),
92
+ 'dir': os.getcwd(),
93
+ 'cpu_count': psutil.cpu_count(logical=False),
94
+ 'total_memory': psutil.virtual_memory().total,
95
+ }
96
+
97
+ # connect to worker port and send this message.
98
+
99
+ context = zmq.Context()
100
+ channel_timeout = 10000 # in milliseconds
101
+ task_channel = context.socket(zmq.DEALER)
102
+ task_channel.setsockopt(zmq.LINGER, 0)
103
+ task_channel.setsockopt(zmq.IDENTITY, b'testid')
104
+
105
+ task_channel.set_hwm(0)
106
+ task_channel.setsockopt(zmq.SNDTIMEO, channel_timeout)
107
+ task_channel.connect(f"tcp://127.0.0.1:{task_port}")
108
+
109
+ b_msg = json.dumps(msg).encode('utf-8')
110
+
111
+ task_channel.send(b_msg)
112
+
113
+ # check that the interchange exits within some reasonable time
114
+ try_assert(lambda: interchange_proc.poll() is not None, "Interchange did not exit after killing watched client process", timeout_ms=5000)
115
+
116
+ # See issue #3697 - ideally the interchange would exit cleanly, but it does not.
117
+ # assert interchange_proc.poll() == 0, "Interchange exited with an error code, not 0"
118
+
119
+ task_channel.close()
120
+ context.term()
@@ -30,13 +30,6 @@ def test_resource_spec_validation():
30
30
  assert ret_val is None
31
31
 
32
32
 
33
- @pytest.mark.local
34
- def test_resource_spec_validation_one_key():
35
- htex = HighThroughputExecutor()
36
- ret_val = htex.validate_resource_spec({"priority": 2})
37
- assert ret_val is None
38
-
39
-
40
33
  @pytest.mark.local
41
34
  def test_resource_spec_validation_bad_keys():
42
35
  htex = HighThroughputExecutor()
@@ -43,3 +43,6 @@ def test_future_fail_dependency():
43
43
  # Future, plain_fut, somewhere in its str
44
44
 
45
45
  assert repr(plain_fut) in str(ex)
46
+ assert len(ex.dependent_exceptions_tids) == 1
47
+ assert isinstance(ex.dependent_exceptions_tids[0][0], ValueError)
48
+ assert ex.dependent_exceptions_tids[0][1].startswith("<Future ")
@@ -27,42 +27,26 @@ def test_no_deps():
27
27
  pass
28
28
 
29
29
 
30
- @pytest.mark.parametrize("fail_probs", ((1, 0), (0, 1)))
31
- def test_fail_sequence(fail_probs):
32
- """Test failure in a sequence of dependencies
30
+ def test_fail_sequence_first():
31
+ t1 = random_fail(fail_prob=1)
32
+ t2 = random_fail(fail_prob=0, inputs=[t1])
33
+ t_final = random_fail(fail_prob=0, inputs=[t2])
33
34
 
34
- App1 -> App2 ... -> AppN
35
- """
35
+ with pytest.raises(DependencyError):
36
+ t_final.result()
36
37
 
37
- t1_fail_prob, t2_fail_prob = fail_probs
38
- t1 = random_fail(fail_prob=t1_fail_prob)
39
- t2 = random_fail(fail_prob=t2_fail_prob, inputs=[t1])
38
+ assert len(t_final.exception().dependent_exceptions_tids) == 1
39
+ assert isinstance(t_final.exception().dependent_exceptions_tids[0][0], DependencyError)
40
+ assert t_final.exception().dependent_exceptions_tids[0][1].startswith("task ")
41
+
42
+
43
+ def test_fail_sequence_middle():
44
+ t1 = random_fail(fail_prob=0)
45
+ t2 = random_fail(fail_prob=1, inputs=[t1])
40
46
  t_final = random_fail(fail_prob=0, inputs=[t2])
41
47
 
42
48
  with pytest.raises(DependencyError):
43
49
  t_final.result()
44
50
 
45
-
46
- def test_deps(width=3):
47
- """Random failures in branches of Map -> Map -> reduce"""
48
- # App1 App2 ... AppN
49
- futs = [random_fail(fail_prob=0.4) for _ in range(width)]
50
-
51
- # App1 App2 ... AppN
52
- # | | |
53
- # V V V
54
- # App1 App2 ... AppN
55
-
56
- futs = [random_fail(fail_prob=0.8, inputs=[f]) for f in futs]
57
-
58
- # App1 App2 ... AppN
59
- # | | |
60
- # V V V
61
- # App1 App2 ... AppN
62
- # \ | /
63
- # \ | /
64
- # App_Final
65
- try:
66
- random_fail(fail_prob=0, inputs=futs).result()
67
- except DependencyError:
68
- pass
51
+ assert len(t_final.exception().dependent_exceptions_tids) == 1
52
+ assert isinstance(t_final.exception().dependent_exceptions_tids[0][0], ManufacturedTestFailure)
@@ -97,7 +97,10 @@ def test_error():
97
97
  f = outer_error()
98
98
  e = f.exception()
99
99
  assert isinstance(e, JoinError)
100
+
101
+ assert len(e.dependent_exceptions_tids) == 1
100
102
  assert isinstance(e.dependent_exceptions_tids[0][0], InnerError)
103
+ assert e.dependent_exceptions_tids[0][1].startswith("task ")
101
104
 
102
105
 
103
106
  def test_two_errors():
@@ -109,10 +112,12 @@ def test_two_errors():
109
112
  de0 = e.dependent_exceptions_tids[0][0]
110
113
  assert isinstance(de0, InnerError)
111
114
  assert de0.args[0] == "Error A"
115
+ assert e.dependent_exceptions_tids[0][1].startswith("task ")
112
116
 
113
117
  de1 = e.dependent_exceptions_tids[1][0]
114
118
  assert isinstance(de1, InnerError)
115
119
  assert de1.args[0] == "Error B"
120
+ assert e.dependent_exceptions_tids[1][1].startswith("task ")
116
121
 
117
122
 
118
123
  def test_one_error_one_result():
@@ -125,6 +130,7 @@ def test_one_error_one_result():
125
130
  de0 = e.dependent_exceptions_tids[0][0]
126
131
  assert isinstance(de0, InnerError)
127
132
  assert de0.args[0] == "Error A"
133
+ assert e.dependent_exceptions_tids[0][1].startswith("task ")
128
134
 
129
135
 
130
136
  @join_app
@@ -0,0 +1,104 @@
1
+ import random
2
+ from unittest import mock
3
+
4
+ import pytest
5
+ from globus_compute_sdk import Executor
6
+
7
+ from parsl.executors import GlobusComputeExecutor
8
+
9
+
10
+ @pytest.fixture
11
+ def mock_ex():
12
+ # Not Parsl's job to test GC's Executor
13
+ yield mock.Mock(spec=Executor)
14
+
15
+
16
+ @pytest.mark.local
17
+ def test_gc_executor_mock_spec(mock_ex):
18
+ # a test of tests -- make sure we're using spec= in the mock
19
+ with pytest.raises(AttributeError):
20
+ mock_ex.aasdf()
21
+
22
+
23
+ @pytest.mark.local
24
+ def test_gc_executor_label_default(mock_ex):
25
+ gce = GlobusComputeExecutor(mock_ex)
26
+ assert gce.label == type(gce).__name__, "Expect reasonable default label"
27
+
28
+
29
+ @pytest.mark.local
30
+ def test_gc_executor_label(mock_ex, randomstring):
31
+ exp_label = randomstring()
32
+ gce = GlobusComputeExecutor(mock_ex, label=exp_label)
33
+ assert gce.label == exp_label
34
+
35
+
36
+ @pytest.mark.local
37
+ def test_gc_executor_resets_spec_after_submit(mock_ex, randomstring):
38
+ submit_res = {randomstring(): "some submit res"}
39
+ res = {"some": randomstring(), "spec": randomstring()}
40
+ mock_ex.resource_specification = res
41
+ mock_ex.user_endpoint_config = None
42
+ gce = GlobusComputeExecutor(mock_ex)
43
+
44
+ fn = mock.Mock()
45
+ orig_res = mock_ex.resource_specification
46
+ orig_uep = mock_ex.user_endpoint_config
47
+
48
+ def mock_submit(*a, **k):
49
+ assert mock_ex.resource_specification == submit_res, "Expect set for submission"
50
+ assert mock_ex.user_endpoint_config is None
51
+ mock_ex.submit.side_effect = mock_submit
52
+
53
+ gce.submit(fn, resource_specification=submit_res)
54
+
55
+ assert mock_ex.resource_specification == orig_res
56
+ assert mock_ex.user_endpoint_config is orig_uep
57
+
58
+
59
+ @pytest.mark.local
60
+ def test_gc_executor_resets_uep_after_submit(mock_ex, randomstring):
61
+ uep_conf = randomstring()
62
+ res = {"some": randomstring()}
63
+ gce = GlobusComputeExecutor(mock_ex)
64
+
65
+ fn = mock.Mock()
66
+ orig_res = mock_ex.resource_specification
67
+ orig_uep = mock_ex.user_endpoint_config
68
+
69
+ def mock_submit(*a, **k):
70
+
71
+ assert mock_ex.resource_specification == res, "Expect set for submission"
72
+ assert mock_ex.user_endpoint_config == uep_conf, "Expect set for submission"
73
+ mock_ex.submit.side_effect = mock_submit
74
+
75
+ gce.submit(fn, resource_specification={"user_endpoint_config": uep_conf, **res})
76
+
77
+ assert mock_ex.resource_specification == orig_res
78
+ assert mock_ex.user_endpoint_config is orig_uep
79
+
80
+
81
+ @pytest.mark.local
82
+ def test_gc_executor_happy_path(mock_ex, randomstring):
83
+ mock_fn = mock.Mock()
84
+ args = tuple(randomstring() for _ in range(random.randint(0, 3)))
85
+ kwargs = {randomstring(): randomstring() for _ in range(random.randint(0, 3))}
86
+
87
+ gce = GlobusComputeExecutor(mock_ex)
88
+ gce.submit(mock_fn, {}, *args, **kwargs)
89
+
90
+ assert mock_ex.submit.called, "Expect proxying of args to underlying executor"
91
+ found_a, found_k = mock_ex.submit.call_args
92
+ assert found_a[0] is mock_fn
93
+ assert found_a[1:] == args
94
+ assert found_k == kwargs
95
+
96
+
97
+ @pytest.mark.local
98
+ def test_gc_executor_shuts_down_asynchronously(mock_ex):
99
+ gce = GlobusComputeExecutor(mock_ex)
100
+ gce.shutdown()
101
+ assert mock_ex.shutdown.called
102
+ a, k = mock_ex.shutdown.call_args
103
+ assert k["wait"] is False
104
+ assert k["cancel_futures"] is True
@@ -213,12 +213,14 @@ class UsageTracker:
213
213
 
214
214
  def send_start_message(self) -> None:
215
215
  if self.tracking_level:
216
+ logger.info("Sending start message for usage tracking")
216
217
  self.start_time = time.time()
217
218
  message = self.construct_start_message()
218
219
  self.send_UDP_message(message)
219
220
 
220
221
  def send_end_message(self) -> None:
221
222
  if self.tracking_level == 3:
223
+ logger.info("Sending end message for usage tracking")
222
224
  message = self.construct_end_message()
223
225
  self.send_UDP_message(message)
224
226
 
@@ -229,11 +231,14 @@ class UsageTracker:
229
231
  definitely either: going to behave broadly the same as to SIGKILL,
230
232
  or won't respond to SIGTERM.
231
233
  """
232
- for proc in self.procs:
233
- logger.debug("Joining usage tracking process %s", proc)
234
- proc.join(timeout=timeout)
235
- if proc.is_alive():
236
- logger.warning("Usage tracking process did not end itself; sending SIGKILL")
237
- proc.kill()
238
-
239
- proc.close()
234
+ if self.tracking_level:
235
+ logger.info("Closing usage tracking")
236
+
237
+ for proc in self.procs:
238
+ logger.debug("Joining usage tracking process %s", proc)
239
+ proc.join(timeout=timeout)
240
+ if proc.is_alive():
241
+ logger.warning("Usage tracking process did not end itself; sending SIGKILL")
242
+ proc.kill()
243
+
244
+ proc.close()
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2025.01.06'
6
+ VERSION = '2025.01.20'
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2025.1.6
3
+ Version: 2025.1.20
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2025.01.06.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2025.01.20.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -33,7 +33,7 @@ Requires-Dist: pydot>=1.4.2; extra == "all"
33
33
  Requires-Dist: networkx<3.3,>=3.2; extra == "all"
34
34
  Requires-Dist: Flask>=1.0.2; extra == "all"
35
35
  Requires-Dist: flask-sqlalchemy; extra == "all"
36
- Requires-Dist: pandas<2.2; extra == "all"
36
+ Requires-Dist: pandas<3,>=2.2; extra == "all"
37
37
  Requires-Dist: plotly; extra == "all"
38
38
  Requires-Dist: python-daemon; extra == "all"
39
39
  Requires-Dist: boto3; extra == "all"
@@ -54,6 +54,7 @@ Requires-Dist: jsonschema; extra == "all"
54
54
  Requires-Dist: proxystore; extra == "all"
55
55
  Requires-Dist: radical.pilot==1.90; extra == "all"
56
56
  Requires-Dist: radical.utils==1.90; extra == "all"
57
+ Requires-Dist: globus-compute-sdk>=2.34.0; extra == "all"
57
58
  Provides-Extra: aws
58
59
  Requires-Dist: boto3; extra == "aws"
59
60
  Provides-Extra: azure
@@ -68,6 +69,8 @@ Provides-Extra: flux
68
69
  Requires-Dist: pyyaml; extra == "flux"
69
70
  Requires-Dist: cffi; extra == "flux"
70
71
  Requires-Dist: jsonschema; extra == "flux"
72
+ Provides-Extra: globus_compute
73
+ Requires-Dist: globus-compute-sdk>=2.34.0; extra == "globus-compute"
71
74
  Provides-Extra: google_cloud
72
75
  Requires-Dist: google-auth; extra == "google-cloud"
73
76
  Requires-Dist: google-api-python-client; extra == "google-cloud"
@@ -87,7 +90,7 @@ Requires-Dist: pydot>=1.4.2; extra == "visualization"
87
90
  Requires-Dist: networkx<3.3,>=3.2; extra == "visualization"
88
91
  Requires-Dist: Flask>=1.0.2; extra == "visualization"
89
92
  Requires-Dist: flask-sqlalchemy; extra == "visualization"
90
- Requires-Dist: pandas<2.2; extra == "visualization"
93
+ Requires-Dist: pandas<3,>=2.2; extra == "visualization"
91
94
  Requires-Dist: plotly; extra == "visualization"
92
95
  Requires-Dist: python-daemon; extra == "visualization"
93
96
  Provides-Extra: workqueue
@@ -8,12 +8,12 @@ parsl/multiprocessing.py,sha256=MyaEcEq-Qf860u7V98u-PZrPNdtzOZL_NW6EhIJnmfQ,1937
8
8
  parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
9
9
  parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  parsl/utils.py,sha256=5FvHIMao3Ik0Rm2p2ieL1KQcQcYXc5K83Jrx5csi-B4,14301
11
- parsl/version.py,sha256=lIhZrp6NnBCcbBMgntHRdPaNhpDBWe88rZRBa8C0pSw,131
11
+ parsl/version.py,sha256=B68kDKT369JEJ5yXX5Ue1dD455x8WPmdaddBARy1bBc,131
12
12
  parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
14
14
  parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
15
- parsl/app/errors.py,sha256=nJmOEPglAISfD3R1UsTZH-avqiSOJgx_DkpdL9B591w,3917
16
- parsl/app/futures.py,sha256=XU1NwkoNVsxy3KF5y0Ihsla5hPbhhuSikZInfS7h7Uo,2910
15
+ parsl/app/errors.py,sha256=SQQ1fNp8834DZnoRnlsoZn1WMAFM3fnh2CNHRPmFcKc,3854
16
+ parsl/app/futures.py,sha256=2tMUeKIuDzwuhLIWlsEiZuDrhkxxsUed4QUbQuQg20Y,2826
17
17
  parsl/app/python.py,sha256=0hrz2BppVOwwNfh5hnoP70Yv56gSRkIoT-fP9XNb4v4,2331
18
18
  parsl/benchmark/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  parsl/benchmark/perf.py,sha256=kKXefDozWXSJKSNA7qdfUgEoacA2-R9kSZcI2YvZ5uE,3096
@@ -26,6 +26,8 @@ parsl/configs/cc_in2p3.py,sha256=8KQ18URTKX5o16k66_h-TRCRQeh7Vrsi-s2d_AuOBHs,705
26
26
  parsl/configs/ec2.py,sha256=5xtlZI4Fc558sYXdM4nQQvQDBNPdzhRRCO14F-8H7Y4,944
27
27
  parsl/configs/expanse.py,sha256=ADUY3GZWSfVKmqFWbgdfC85kRxNPChqOGwly0XdcKSw,1033
28
28
  parsl/configs/frontera.py,sha256=HkZ3sFvFqKrk8kdxMonbUiWjGaZxz3vgvhtgg6_0vpY,1415
29
+ parsl/configs/gc_multisite.py,sha256=831wrArZ-cUHB7eucpGDtgqvfgXV4lTjKYy7sN8rtDY,1001
30
+ parsl/configs/gc_tutorial.py,sha256=2BnWVmblSG_dxeO2aBez3IeaBk5Phk-X0ZtJXWxZpLI,478
29
31
  parsl/configs/htex_local.py,sha256=ktc1RlF1ig0QHDJHUl8ZROutSle-5f1yYYod8RFZcaY,462
30
32
  parsl/configs/illinoiscluster.py,sha256=ZR22A8uwFb8tzSzmU1D0kR0qcr5Thr0j-7Nb5hiCgQ8,1170
31
33
  parsl/configs/improv.py,sha256=le9fDip-Mr-HqKObiyHXbdR-Ne7cy15Ao5ONoUzCSaE,1252
@@ -51,17 +53,18 @@ parsl/data_provider/staging.py,sha256=ZDZuuFg38pjUStegKPcvPsfGp3iMeReMzfU6DSwtJj
51
53
  parsl/data_provider/zip.py,sha256=S4kVuH9lxAegRURYbvIUR7EYYBOccyslaqyCrVWUBhw,4497
52
54
  parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
55
  parsl/dataflow/dependency_resolvers.py,sha256=Om8Dgh7a0ZwgXAc6TlhxLSzvxXHDlNNV1aBNiD3JTNY,3325
54
- parsl/dataflow/dflow.py,sha256=HClWgi9BpEcFhe3IBEkTNaIV8JtYTBc1oSmjtOkIdh8,65253
55
- parsl/dataflow/errors.py,sha256=9SxVhIJY_53FQx8x4OU8UA8nd7lvUbDllH7KfMXpYaY,2177
56
+ parsl/dataflow/dflow.py,sha256=tbAFEcLgKQp4IUwMhH1fYVxYU_e3_jw8ENElEA9kdeE,64928
57
+ parsl/dataflow/errors.py,sha256=vzgEEFqIfIQ3_QGvqDCcsACZZlUKNAKaMchM30TGHyY,2114
56
58
  parsl/dataflow/futures.py,sha256=08LuP-HFiHBIZmeKCjlsazw_WpQ5fwevrU2_WbidkYw,6080
57
59
  parsl/dataflow/memoization.py,sha256=l9uw1Bu50GucBF70M5relpGKFkE4dIM9T3R1KrxW0v0,9583
58
- parsl/dataflow/rundirs.py,sha256=7aUg1cb0LLTocQxOdBzwtn7a8bIgpdMD5rjZV55UwaQ,1154
60
+ parsl/dataflow/rundirs.py,sha256=JZdzybVGubY35jL2YiKcDo65ZmRl1WyOApc8ajYxztc,1087
59
61
  parsl/dataflow/states.py,sha256=hV6mfv-y4A6xrujeQglcomnfEs7y3Xm2g6JFwC6dvgQ,2612
60
- parsl/dataflow/taskrecord.py,sha256=-FuujdZQ1y5GSc-PJ91QKGT-Kp0lrg70MFDoxpbWI1Q,3113
61
- parsl/executors/__init__.py,sha256=Cg8e-F2NUaBD8A9crDAXKCSdoBEwQVIdgm4FlXd-wvk,476
62
+ parsl/dataflow/taskrecord.py,sha256=qIW7T6hn9dYTuNPdUura3HQwwUpUJACwPP5REm5COf4,3042
63
+ parsl/executors/__init__.py,sha256=PEuXYrnVqwlaz_nt82s9D_YNaVsX7ET29DeIZRUR8hw,577
62
64
  parsl/executors/base.py,sha256=jYEa5nS1_Vn8k3A92TCSYG0n2zeHnN6KiuVWsILl4sE,5205
63
65
  parsl/executors/errors.py,sha256=ZxL3nK5samPos8Xixo_jpRtPIiRJfZ5D397_qaXj2g0,2515
64
66
  parsl/executors/execute_task.py,sha256=PtqHxk778UQaNah1AN-TJV5emZbOcU5TGtWDxFn3_F4,1079
67
+ parsl/executors/globus_compute.py,sha256=giyCyq5KWK_o4nhQJBX2Xm9FatdpdKrgloZuHhN0Zwg,4840
65
68
  parsl/executors/status_handling.py,sha256=nxbkiGr6f3xDc0nsUeSrMMxlj7UD32K7nOLCLzfthDs,15416
66
69
  parsl/executors/threads.py,sha256=_LA5NA3GSvtjDend-1HVpjoDoNHHW13rAD0CET99fjQ,3463
67
70
  parsl/executors/flux/__init__.py,sha256=P9grTTeRPXfqXurFhlSS7XhmE6tTbnCnyQ1f9b-oYHE,136
@@ -69,8 +72,8 @@ parsl/executors/flux/execute_parsl_task.py,sha256=zHP5M7ILGiwnoalZ8WsfVVdZM7uP4i
69
72
  parsl/executors/flux/executor.py,sha256=8_xakLUu5zNJAHL0LbeTCFEWqWzRK1eE-3ep4GIIIrY,17017
70
73
  parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaPrSF9ec7zvRfLfYJw,2129
71
74
  parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
72
- parsl/executors/high_throughput/errors.py,sha256=Sak8e8UpiEcXefUjMHbhyXc4Rn7kJtOoh7L8wreBQdk,1638
73
- parsl/executors/high_throughput/executor.py,sha256=kueKbVVblvXcbXsHEnmQXMkvdfuF6IpwV7-clp2Tkzk,38393
75
+ parsl/executors/high_throughput/errors.py,sha256=k2XuvvFdUfNs2foHFnxmS-BToRMfdXpYEa4EF3ELKq4,1554
76
+ parsl/executors/high_throughput/executor.py,sha256=xzNW4X9Zn10o0FsY8rnIuHCuaBu0vaKEXqMpe-2jlaA,38341
74
77
  parsl/executors/high_throughput/interchange.py,sha256=i0TYgAQzcnzRnSdKal9M9d6TcWRoIWK77kuxMsiYxXE,30142
75
78
  parsl/executors/high_throughput/manager_record.py,sha256=yn3L8TUJFkgm2lX1x0SeS9mkvJowC0s2VIMCFiU7ThM,455
76
79
  parsl/executors/high_throughput/manager_selector.py,sha256=UKcUE6v0tO7PDMTThpKSKxVpOpOUilxDL7UbNgpZCxo,2116
@@ -80,7 +83,7 @@ parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=DmpKugANNa1bdYlqQB
80
83
  parsl/executors/high_throughput/mpi_resource_management.py,sha256=hqotZLn3Q_iPRfMVmvvpKiGdguw55iYq1L_Gp9x6y4Y,7790
81
84
  parsl/executors/high_throughput/probe.py,sha256=QOEaliO3x5cB6ltMOZMsZQ-ath9AAuFqXcBzRgWOM60,2754
82
85
  parsl/executors/high_throughput/process_worker_pool.py,sha256=3zXe3_X5GvbTOlfeJJD_E0ssfJqkAfkqXHfeU7mymdI,41865
83
- parsl/executors/high_throughput/zmq_pipes.py,sha256=XtNTxbnt9UHfhiduj9f3UkviYRebUy4ierhEGvowB8k,9040
86
+ parsl/executors/high_throughput/zmq_pipes.py,sha256=NUK25IEh0UkxzdqQQyM8tMtuZmjSiTeWu1DzkkAIOhA,8980
84
87
  parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
85
88
  parsl/executors/radical/executor.py,sha256=en2TKzZnJYU_juojkM_aZUdWhbAgutAYn_EL6HGpfSY,22835
86
89
  parsl/executors/radical/rpex_resources.py,sha256=Q7-0u3K447LBCe2y7mVcdw6jqWI7SdPXxCKhkr6FoRQ,5139
@@ -88,16 +91,16 @@ parsl/executors/radical/rpex_worker.py,sha256=z6r82ZujKb6sdKIdHsQ_5EBMDIQieeGcrl
88
91
  parsl/executors/taskvine/__init__.py,sha256=9rwp3M8B0YyEhZMLO0RHaNw7u1nc01WHbXLqnBTanu0,293
89
92
  parsl/executors/taskvine/errors.py,sha256=euIYkSslrNSI85kyi2s0xzOaO9ik4c1fYHstMIeiBJk,652
90
93
  parsl/executors/taskvine/exec_parsl_function.py,sha256=ftGdJU78lKPPkphSHlEi4rj164mhuMHJjghVqfgeXKk,7085
91
- parsl/executors/taskvine/executor.py,sha256=y1x44p_GRlaOqLr0J92ungU3CuDeull6MW-lEedzu2M,31164
94
+ parsl/executors/taskvine/executor.py,sha256=4c0mt83G-F4ZFMxhdJByvYjG05QdLrLYYHsmpPXY6YE,30906
92
95
  parsl/executors/taskvine/factory.py,sha256=rWpEoFphLzqO3HEYyDEbQa14iyvgkdZg7hLZuaY39gQ,2638
93
- parsl/executors/taskvine/factory_config.py,sha256=AbE2fN2snrF5ITYrrS4DnGn2XkJHUFr_17DYHDHIwq0,3693
96
+ parsl/executors/taskvine/factory_config.py,sha256=ZQC5vyDe8cM0nuv7fbBCV2xnWGAZ87iLlT2UqmFFI1U,3695
94
97
  parsl/executors/taskvine/manager.py,sha256=SUi5mqqMm_rnkBLrZtTQe7RiHqWDn1oOejQscYzfwAU,25797
95
- parsl/executors/taskvine/manager_config.py,sha256=Lf3dxcDR5Jo97Odv4JFXfuRLclVX-xQP_QXQnS5OVtk,7643
98
+ parsl/executors/taskvine/manager_config.py,sha256=96G1LMBvgg74sHX4UcOzkCXhEdtVXry4ZzMDEYLWBTQ,7669
96
99
  parsl/executors/taskvine/utils.py,sha256=iSrIogeiauL3UNy_9tiZp1cBSNn6fIJkMYQRVi1n_r8,4156
97
100
  parsl/executors/workqueue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
98
101
  parsl/executors/workqueue/errors.py,sha256=XO2naYhAsHHyiOBH6hpObg3mPNDmvMoFqErsj0-v7jc,541
99
102
  parsl/executors/workqueue/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
100
- parsl/executors/workqueue/executor.py,sha256=_Jv35gRAzUjC-pyDrSs6sEOFc7MxOFJ5cvWXt9WGRwU,49969
103
+ parsl/executors/workqueue/executor.py,sha256=EYKnqRcMF1OYPCUSMiPDgi7p-Tlpm3YLKxg_lpoRDDU,49711
101
104
  parsl/executors/workqueue/parsl_coprocess.py,sha256=cF1UmTgVLoey6QzBcbYgEiEsRidSaFfuO54f1HFw_EM,5737
102
105
  parsl/executors/workqueue/parsl_coprocess_stub.py,sha256=_bJmpPIgL42qM6bVzeEKt1Mn1trSP41rtJguXxPGfHI,735
103
106
  parsl/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -191,7 +194,7 @@ parsl/serialize/facade.py,sha256=3uOuVp0epfyLn7qDzuWqLfsy971YVGD3sqwqcAiRwh0,668
191
194
  parsl/serialize/proxystore.py,sha256=o-ha9QAvVhbN8y9S1itk3W0O75eyHYZw2AvB2xu5_Lg,1624
192
195
  parsl/tests/__init__.py,sha256=VTtJzOzz_x6fWNh8IOnsgFqVbdiJShi2AZH21mcmID4,204
193
196
  parsl/tests/callables_helper.py,sha256=ceP1YYsNtrZgKT6MAIvpgdccEjQ_CpFEOnZBGHKGOx0,30
194
- parsl/tests/conftest.py,sha256=njhszRuR15nZDufKF2S90lgkL8bSnQY4vH7dckx9q24,14851
197
+ parsl/tests/conftest.py,sha256=i8vJ2OSyhU1g8MoA1g6XPPEJ4VaavGN88dTcBROrhYE,15009
195
198
  parsl/tests/test_aalst_patterns.py,sha256=lNIxb7nIgh1yX7hR2fr_ck_mxYJxx8ASKK9zHUVqPno,9614
196
199
  parsl/tests/test_callables.py,sha256=97vrIF1_hfDGd81FM1bhR6FemZMWFcALrH6pVHMTCt8,1974
197
200
  parsl/tests/test_curvezmq.py,sha256=yyhlS4vmaZdMitiySoy4l_ih9H1bsPiN-tMdwIh3H20,12431
@@ -210,6 +213,7 @@ parsl/tests/configs/ec2_single_node.py,sha256=rK9AfMf4C84CXMhS5nhgHA_dNG2An7Yiq2
210
213
  parsl/tests/configs/ec2_spot.py,sha256=NKDCKgKxYNOHGVLBl2DFfiUwkR6xQnyhNb_E04TBs28,1253
211
214
  parsl/tests/configs/flux_local.py,sha256=xliKQfB5FFpfNHWYEHoA8FKOTVHFCXVhWNuKQ5VJNTk,182
212
215
  parsl/tests/configs/frontera.py,sha256=WEXJjFmHMhFT8X8JLUIUK35Qm2FRDolhOCAaE0zlL0U,1483
216
+ parsl/tests/configs/globus_compute.py,sha256=5lRtOVmXSUJ1vLgCk3eoR96GVX-vitP9IsU8_e3netE,418
213
217
  parsl/tests/configs/htex_local.py,sha256=6TKsAbEyFknFWT1cpMyd34DmPoo0OrsyPTayzSf2OFQ,675
214
218
  parsl/tests/configs/htex_local_alternate.py,sha256=CgIpjBpenLn-xOG0Ix5xrWgqO8wllOFnNXomJEWSkDY,2453
215
219
  parsl/tests/configs/htex_local_intask_staging.py,sha256=JTcrc8qNseF1sdBHxX_BfiGc-ZCPv7HXHlzZ94Jh-YE,806
@@ -304,7 +308,7 @@ parsl/tests/test_error_handling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
304
308
  parsl/tests/test_error_handling/test_fail.py,sha256=xx4TGWfL7le4cQ9nvnUkrlmKQJkskhD0l_3W1xwZSEI,282
305
309
  parsl/tests/test_error_handling/test_python_walltime.py,sha256=rdmGZHIkuann2Njt3i62odKJ0FaODGr7-L96rOXNVYg,950
306
310
  parsl/tests/test_error_handling/test_rand_fail.py,sha256=crFg4GmwdDpvx49_7w5Xt2P7H2R_V9f6i1Ar-QkASuU,3864
307
- parsl/tests/test_error_handling/test_resource_spec.py,sha256=gUS_lN7CcvOh_GeMY8DtZTh6LhizPfrVrYAJpt9XSYM,1428
311
+ parsl/tests/test_error_handling/test_resource_spec.py,sha256=uOYGHf2EC3cn4-G5ZYWK8WxXl_QONQfn9E0Tk7RJrGA,1467
308
312
  parsl/tests/test_error_handling/test_retries.py,sha256=zJ9D2hrvXQURnK2OIf5LfQFcSDVZ8rhdpp6peGccY7s,2372
309
313
  parsl/tests/test_error_handling/test_retry_handler.py,sha256=8fMHffMBLhRyNreIqkrwamx9TYRZ498uVYNlkcbAoLU,1407
310
314
  parsl/tests/test_error_handling/test_retry_handler_failure.py,sha256=GaGtZZCB9Wb7ieShqTrxUFEUSKy07ZZWytCY4Qixk9Y,552
@@ -321,12 +325,13 @@ parsl/tests/test_htex/test_disconnected_blocks.py,sha256=3V1Ol9gMS6knjLTgIjB5Gru
321
325
  parsl/tests/test_htex/test_disconnected_blocks_failing_provider.py,sha256=eOdipRpKMOkWAXB3UtY1UjqTiwfNs_csNLve8vllG_M,2040
322
326
  parsl/tests/test_htex/test_drain.py,sha256=gYA7qzbv5ozox3clVdW0rlxAzwa_f_P0kqsAez3tIfk,2370
323
327
  parsl/tests/test_htex/test_htex.py,sha256=J1uEGezic8ziPPZsQwfK9iNiTJ53NqXMhIg9CUunjZw,4901
328
+ parsl/tests/test_htex/test_interchange_exit_bad_registration.py,sha256=3yqX_VVQz9fCEiV6Wd1BlcFYcARDDpNaR6ws7LWn-oc,4549
324
329
  parsl/tests/test_htex/test_manager_failure.py,sha256=N-obuSZ8f7XA_XcddoN2LWKSVtpKUZvTHb7BFelS3iQ,1143
325
330
  parsl/tests/test_htex/test_manager_selector_by_block.py,sha256=VQqSE6MDhGpDSjShGUTbj7l9Ahuj2tC9qD--o4puF44,1310
326
331
  parsl/tests/test_htex/test_managers_command.py,sha256=Y-eUjtBzwW9erCYdph9bOesbkUvX8QUPqXt27DCgVS8,951
327
332
  parsl/tests/test_htex/test_missing_worker.py,sha256=gyp5i7_t-JHyJGtz_eXZKKBY5w8oqLOIxO6cJgGJMtQ,745
328
333
  parsl/tests/test_htex/test_multiple_disconnected_blocks.py,sha256=2vXZoIx4NuAWYuiNoL5Gxr85w72qZ7Kdb3JGh0FufTg,1867
329
- parsl/tests/test_htex/test_resource_spec_validation.py,sha256=VzOk4rjMNiDcEVLb-3YdlYZND7HRoGACJkTwq8NUTnc,1102
334
+ parsl/tests/test_htex/test_resource_spec_validation.py,sha256=JqboQRRFV0tEfWrGOdYT9pHazsUjyZLbF7qqnLFS_-A,914
330
335
  parsl/tests/test_htex/test_worker_failure.py,sha256=Uz-RHI-LK78FMjXUvrUFmo4iYfmpDVBUcBxxRb3UG9M,603
331
336
  parsl/tests/test_htex/test_zmq_binding.py,sha256=WNFsCKKfid2uEfem0WLgl1wnBncIabpAv6kmg3imBxk,4001
332
337
  parsl/tests/test_monitoring/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -357,18 +362,18 @@ parsl/tests/test_python_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
357
362
  parsl/tests/test_python_apps/test_arg_input_types.py,sha256=JXpfHiu8lr9BN6u1OzqFvGwBhxzsGTPMewHx6Wdo-HI,670
358
363
  parsl/tests/test_python_apps/test_basic.py,sha256=lFqh4ugePbp_FRiHGUXxzV34iS7l8C5UkxTHuLcpnYs,855
359
364
  parsl/tests/test_python_apps/test_context_manager.py,sha256=8kUgcxN-6cz2u-lUoDhMAgu_ObUwEZvE3Eyxra6pFCo,3869
360
- parsl/tests/test_python_apps/test_dep_standard_futures.py,sha256=a3decndowPh8ma641BbxFAyMUZFGMT00TYpa7Y-7dV8,860
365
+ parsl/tests/test_python_apps/test_dep_standard_futures.py,sha256=kMOMZLaxJMmpABCUVniDIOIfkEqflZyhKjS_wkDti7A,1049
361
366
  parsl/tests/test_python_apps/test_dependencies.py,sha256=IRiTI_lPoWBSFSFnaBlE6Bv08PKEaf-qj5dfqO2RjT0,272
362
367
  parsl/tests/test_python_apps/test_dependencies_deep.py,sha256=Cuow2LLGY7zffPFj89AOIwKlXxHtsin3v_UIhfdwV_w,1542
363
368
  parsl/tests/test_python_apps/test_depfail_propagation.py,sha256=3q3HlVWrOixFtXWBvR_ypKtbdAHAJcKndXQ5drwrBQU,1488
364
- parsl/tests/test_python_apps/test_fail.py,sha256=0Gld8LS6NB0Io1bU82vVR73twkuL5nW0ifKbIUcsJcw,1671
369
+ parsl/tests/test_python_apps/test_fail.py,sha256=CgZq_ByzX6YLhBg71nWGXwaOaesYXwE6TWwslwrVuq4,1466
365
370
  parsl/tests/test_python_apps/test_fibonacci_iterative.py,sha256=ly2s5HuB9R53Z2FM_zy0WWdOk01iVhgcwSpQyK6ErIY,573
366
371
  parsl/tests/test_python_apps/test_fibonacci_recursive.py,sha256=q7LMFcu_pJSNPdz8iY0UiRoIweEWIBGwMjQffHWAuDc,592
367
372
  parsl/tests/test_python_apps/test_futures.py,sha256=EWnzmPn5sVCgeMxc0Uz2ieaaVYr98tFZ7g8YJFqYuC8,2355
368
373
  parsl/tests/test_python_apps/test_garbage_collect.py,sha256=RPntrLuzPkeNbhS7mmqEnHbyOcuV1YVppgZ8BaX-h84,1076
369
374
  parsl/tests/test_python_apps/test_import_fail.py,sha256=Vd8IMa_UsbHYkr3IGnS-rgGb6zKxB1tOTqMZY5lc_xY,691
370
375
  parsl/tests/test_python_apps/test_inputs_default.py,sha256=J2GR1NgdvEucNSJkfO6GC5OoMiuvSzO0tASCowT8HM0,436
371
- parsl/tests/test_python_apps/test_join.py,sha256=qnwdPYC_uIS5hQ2jmU2nIP_3P_TaMY8Av1ut10EZA_M,2678
376
+ parsl/tests/test_python_apps/test_join.py,sha256=OWd6_A0Cf-1Xpjr0OT3HaJ1IMYcJ0LFL1VnmL0cZkL8,2988
372
377
  parsl/tests/test_python_apps/test_lifted.py,sha256=Na6qC_dZSeYJcZdkGn-dCjgYkQV267HmGFfaqFcRVcQ,3408
373
378
  parsl/tests/test_python_apps/test_mapred.py,sha256=C7nTl0NsP_2TCtcmZXWFMpvAG4pwGswrIJKr-5sRUNY,786
374
379
  parsl/tests/test_python_apps/test_memoize_1.py,sha256=PXazRnekBe_KScUdbh8P3I7Vu_1Tc-nGssWBpfcic7M,514
@@ -445,18 +450,19 @@ parsl/tests/test_utils/test_sanitize_dns.py,sha256=8P_v5a5JLGU76OYf0LtclAwqJxGU0
445
450
  parsl/tests/unit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
446
451
  parsl/tests/unit/test_address.py,sha256=LL9qhp00JFG5lDN7-lY1YtuhTDlKHXHHp3a9TX06c84,682
447
452
  parsl/tests/unit/test_file.py,sha256=vLycnYcv3bvSzL-FV8WdoibqTyb41BrH1LUYBavobsg,2850
453
+ parsl/tests/unit/test_globus_compute_executor.py,sha256=9BWKZ4C03tQ5gZ3jxIsDt5j2yyYHa_VHqULJPeM7YPM,3238
448
454
  parsl/tests/unit/test_usage_tracking.py,sha256=xEfUlbBRpsFdUdOrCsk1Kz5AfmMxJT7f0_esZl8Ft-0,1884
449
455
  parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
450
456
  parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
451
457
  parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
452
- parsl/usage_tracking/usage.py,sha256=tcoZ2OUjsQVakG8Uu9_HFuEdzpSHyt4JarSRcLGnSMw,8918
453
- parsl-2025.1.6.data/scripts/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
454
- parsl-2025.1.6.data/scripts/interchange.py,sha256=rUhF_Bwk5NOqLhh-HgP-ei_gclKnPIJJ7uS32p0j-XI,30129
455
- parsl-2025.1.6.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
456
- parsl-2025.1.6.data/scripts/process_worker_pool.py,sha256=82FoJTye2SysJzPg-N8BpenuHGU7hOI8-Bedq8HV9C0,41851
457
- parsl-2025.1.6.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
458
- parsl-2025.1.6.dist-info/METADATA,sha256=vd794sFRn6CYChMUdYFIu7M5BZTvn18yg0Ymji3XGjI,3860
459
- parsl-2025.1.6.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
460
- parsl-2025.1.6.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
461
- parsl-2025.1.6.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
462
- parsl-2025.1.6.dist-info/RECORD,,
458
+ parsl/usage_tracking/usage.py,sha256=f9k6QcpbQxkGyP5WTC9PVyv0CA05s9NDpRe5wwRdBTM,9163
459
+ parsl-2025.1.20.data/scripts/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
460
+ parsl-2025.1.20.data/scripts/interchange.py,sha256=rUhF_Bwk5NOqLhh-HgP-ei_gclKnPIJJ7uS32p0j-XI,30129
461
+ parsl-2025.1.20.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
462
+ parsl-2025.1.20.data/scripts/process_worker_pool.py,sha256=82FoJTye2SysJzPg-N8BpenuHGU7hOI8-Bedq8HV9C0,41851
463
+ parsl-2025.1.20.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
464
+ parsl-2025.1.20.dist-info/METADATA,sha256=7DsaLRVRoBAcLHQbTggqANCocVk-5c4G6T2b-1Hd3go,4027
465
+ parsl-2025.1.20.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
466
+ parsl-2025.1.20.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
467
+ parsl-2025.1.20.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
468
+ parsl-2025.1.20.dist-info/RECORD,,