parsl 2023.12.11__py3-none-any.whl → 2023.12.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
parsl/dataflow/dflow.py CHANGED
@@ -854,10 +854,13 @@ class DataFlowKernel:
854
854
  try:
855
855
  new_args.extend([dep.result()])
856
856
  except Exception as e:
857
- if hasattr(dep, 'task_record'):
858
- tid = dep.task_record['id']
857
+ # If this Future is associated with a task inside this DFK,
858
+ # then refer to the task ID.
859
+ # Otherwise make a repr of the Future object.
860
+ if hasattr(dep, 'task_record') and dep.task_record['dfk'] == self:
861
+ tid = "task " + repr(dep.task_record['id'])
859
862
  else:
860
- tid = None
863
+ tid = repr(dep)
861
864
  dep_failures.extend([(e, tid)])
862
865
  else:
863
866
  new_args.extend([dep])
parsl/dataflow/errors.py CHANGED
@@ -36,7 +36,9 @@ class DependencyError(DataFlowException):
36
36
  in a dependency.
37
37
 
38
38
  Args:
39
- - dependent_exceptions_tids: List of dependency task IDs which failed
39
+ - dependent_exceptions_tids: List of exceptions and identifiers for
40
+ dependencies which failed. The identifier might be a task ID or
41
+ the repr of a non-DFK Future.
40
42
  - task_id: Task ID of the task that failed because of the dependency error
41
43
  """
42
44
 
@@ -45,8 +47,8 @@ class DependencyError(DataFlowException):
45
47
  self.task_id = task_id
46
48
 
47
49
  def __str__(self) -> str:
48
- dep_tids = [tid for (exception, tid) in self.dependent_exceptions_tids]
49
- return "Dependency failure for task {} with failed dependencies from tasks {}".format(self.task_id, dep_tids)
50
+ deps = ", ".join(tid for _exc, tid in self.dependent_exceptions_tids)
51
+ return f"Dependency failure for task {self.task_id} with failed dependencies from {deps}"
50
52
 
51
53
 
52
54
  class JoinError(DataFlowException):
@@ -10,7 +10,7 @@ import pickle
10
10
  import time
11
11
  import queue
12
12
  import uuid
13
- from typing import Sequence, Optional, Union
13
+ from typing import Sequence, Optional
14
14
 
15
15
  import zmq
16
16
  import math
@@ -48,22 +48,22 @@ class Manager:
48
48
  | | IPC-Qeueues
49
49
 
50
50
  """
51
- def __init__(self,
52
- addresses="127.0.0.1",
53
- address_probe_timeout=30,
54
- task_port="50097",
55
- result_port="50098",
56
- cores_per_worker=1,
57
- mem_per_worker=None,
58
- max_workers=float('inf'),
59
- prefetch_capacity=0,
60
- uid=None,
61
- block_id=None,
62
- heartbeat_threshold=120,
63
- heartbeat_period=30,
64
- poll_period=10,
65
- cpu_affinity=False,
66
- available_accelerators: Sequence[str] = ()):
51
+ def __init__(self, *,
52
+ addresses,
53
+ address_probe_timeout,
54
+ task_port,
55
+ result_port,
56
+ cores_per_worker,
57
+ mem_per_worker,
58
+ max_workers,
59
+ prefetch_capacity,
60
+ uid,
61
+ block_id,
62
+ heartbeat_threshold,
63
+ heartbeat_period,
64
+ poll_period,
65
+ cpu_affinity,
66
+ available_accelerators: Sequence[str]):
67
67
  """
68
68
  Parameters
69
69
  ----------
@@ -72,7 +72,7 @@ class Manager:
72
72
 
73
73
  address_probe_timeout : int
74
74
  Timeout in seconds for the address probe to detect viable addresses
75
- to the interchange. Default : 30s
75
+ to the interchange.
76
76
 
77
77
  uid : str
78
78
  string unique identifier
@@ -82,43 +82,41 @@ class Manager:
82
82
 
83
83
  cores_per_worker : float
84
84
  cores to be assigned to each worker. Oversubscription is possible
85
- by setting cores_per_worker < 1.0. Default=1
85
+ by setting cores_per_worker < 1.0.
86
86
 
87
87
  mem_per_worker : float
88
88
  GB of memory required per worker. If this option is specified, the node manager
89
89
  will check the available memory at startup and limit the number of workers such that
90
90
  the there's sufficient memory for each worker. If set to None, memory on node is not
91
91
  considered in the determination of workers to be launched on node by the manager.
92
- Default: None
93
92
 
94
93
  max_workers : int
95
94
  caps the maximum number of workers that can be launched.
96
- default: infinity
97
95
 
98
96
  prefetch_capacity : int
99
97
  Number of tasks that could be prefetched over available worker capacity.
100
98
  When there are a few tasks (<100) or when tasks are long running, this option should
101
- be set to 0 for better load balancing. Default is 0.
99
+ be set to 0 for better load balancing.
102
100
 
103
101
  heartbeat_threshold : int
104
102
  Seconds since the last message from the interchange after which the
105
- interchange is assumed to be un-available, and the manager initiates shutdown. Default:120s
103
+ interchange is assumed to be un-available, and the manager initiates shutdown.
106
104
 
107
105
  Number of seconds since the last message from the interchange after which the worker
108
- assumes that the interchange is lost and the manager shuts down. Default:120
106
+ assumes that the interchange is lost and the manager shuts down.
109
107
 
110
108
  heartbeat_period : int
111
109
  Number of seconds after which a heartbeat message is sent to the interchange, and workers
112
110
  are checked for liveness.
113
111
 
114
112
  poll_period : int
115
- Timeout period used by the manager in milliseconds. Default: 10ms
113
+ Timeout period used by the manager in milliseconds.
116
114
 
117
115
  cpu_affinity : str
118
116
  Whether or how each worker should force its affinity to different CPUs
119
117
 
120
118
  available_accelerators: list of str
121
- List of accelerators available to the workers. Default: Empty list
119
+ List of accelerators available to the workers.
122
120
 
123
121
  """
124
122
 
@@ -540,7 +538,7 @@ def worker(
540
538
  monitoring_queue: queue.Queue,
541
539
  ready_worker_count: Synchronized,
542
540
  tasks_in_progress: DictProxy,
543
- cpu_affinity: Union[str, bool],
541
+ cpu_affinity: str,
544
542
  accelerator: Optional[str],
545
543
  block_id: str,
546
544
  task_queue_timeout: int,
@@ -732,6 +730,7 @@ if __name__ == "__main__":
732
730
  parser.add_argument("-r", "--result_port", required=True,
733
731
  help="REQUIRED: Result port for posting results to the interchange")
734
732
  parser.add_argument("--cpu-affinity", type=str, choices=["none", "block", "alternating", "block-reverse"],
733
+ required=True,
735
734
  help="Whether/how workers should control CPU affinity.")
736
735
  parser.add_argument("--available-accelerators", type=str, nargs="*",
737
736
  help="Names of available accelerators")
@@ -132,6 +132,8 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
132
132
  self.exclusive = exclusive
133
133
  self.move_files = move_files
134
134
  self.account = account
135
+ self.qos = qos
136
+ self.constraint = constraint
135
137
  self.scheduler_options = scheduler_options + '\n'
136
138
  if exclusive:
137
139
  self.scheduler_options += "#SBATCH --exclusive\n"
@@ -33,4 +33,12 @@ def test_future_fail_dependency():
33
33
 
34
34
  plain_fut.set_exception(ValueError("Plain failure"))
35
35
 
36
- assert isinstance(parsl_fut.exception(), DependencyError)
36
+ ex = parsl_fut.exception()
37
+
38
+ # check that what we got is a dependency error...
39
+ assert isinstance(ex, DependencyError)
40
+
41
+ # and that the dependency error string mentions the dependency
42
+ # Future, plain_fut, somewhere in its str
43
+
44
+ assert repr(plain_fut) in str(ex)
@@ -21,6 +21,10 @@ def test_depfail_once():
21
21
  assert not isinstance(f1.exception(), DependencyError)
22
22
  assert isinstance(f2.exception(), DependencyError)
23
23
 
24
+ # check that the task ID of the failing task is mentioned
25
+ # in the DependencyError message
26
+ assert ("task " + str(f1.task_record['id'])) in str(f2.exception())
27
+
24
28
 
25
29
  def test_depfail_chain():
26
30
  """Test that dependency failures chain"""
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2023.12.11'
6
+ VERSION = '2023.12.25'
@@ -10,7 +10,7 @@ import pickle
10
10
  import time
11
11
  import queue
12
12
  import uuid
13
- from typing import Sequence, Optional, Union
13
+ from typing import Sequence, Optional
14
14
 
15
15
  import zmq
16
16
  import math
@@ -48,22 +48,22 @@ class Manager:
48
48
  | | IPC-Qeueues
49
49
 
50
50
  """
51
- def __init__(self,
52
- addresses="127.0.0.1",
53
- address_probe_timeout=30,
54
- task_port="50097",
55
- result_port="50098",
56
- cores_per_worker=1,
57
- mem_per_worker=None,
58
- max_workers=float('inf'),
59
- prefetch_capacity=0,
60
- uid=None,
61
- block_id=None,
62
- heartbeat_threshold=120,
63
- heartbeat_period=30,
64
- poll_period=10,
65
- cpu_affinity=False,
66
- available_accelerators: Sequence[str] = ()):
51
+ def __init__(self, *,
52
+ addresses,
53
+ address_probe_timeout,
54
+ task_port,
55
+ result_port,
56
+ cores_per_worker,
57
+ mem_per_worker,
58
+ max_workers,
59
+ prefetch_capacity,
60
+ uid,
61
+ block_id,
62
+ heartbeat_threshold,
63
+ heartbeat_period,
64
+ poll_period,
65
+ cpu_affinity,
66
+ available_accelerators: Sequence[str]):
67
67
  """
68
68
  Parameters
69
69
  ----------
@@ -72,7 +72,7 @@ class Manager:
72
72
 
73
73
  address_probe_timeout : int
74
74
  Timeout in seconds for the address probe to detect viable addresses
75
- to the interchange. Default : 30s
75
+ to the interchange.
76
76
 
77
77
  uid : str
78
78
  string unique identifier
@@ -82,43 +82,41 @@ class Manager:
82
82
 
83
83
  cores_per_worker : float
84
84
  cores to be assigned to each worker. Oversubscription is possible
85
- by setting cores_per_worker < 1.0. Default=1
85
+ by setting cores_per_worker < 1.0.
86
86
 
87
87
  mem_per_worker : float
88
88
  GB of memory required per worker. If this option is specified, the node manager
89
89
  will check the available memory at startup and limit the number of workers such that
90
90
  the there's sufficient memory for each worker. If set to None, memory on node is not
91
91
  considered in the determination of workers to be launched on node by the manager.
92
- Default: None
93
92
 
94
93
  max_workers : int
95
94
  caps the maximum number of workers that can be launched.
96
- default: infinity
97
95
 
98
96
  prefetch_capacity : int
99
97
  Number of tasks that could be prefetched over available worker capacity.
100
98
  When there are a few tasks (<100) or when tasks are long running, this option should
101
- be set to 0 for better load balancing. Default is 0.
99
+ be set to 0 for better load balancing.
102
100
 
103
101
  heartbeat_threshold : int
104
102
  Seconds since the last message from the interchange after which the
105
- interchange is assumed to be un-available, and the manager initiates shutdown. Default:120s
103
+ interchange is assumed to be un-available, and the manager initiates shutdown.
106
104
 
107
105
  Number of seconds since the last message from the interchange after which the worker
108
- assumes that the interchange is lost and the manager shuts down. Default:120
106
+ assumes that the interchange is lost and the manager shuts down.
109
107
 
110
108
  heartbeat_period : int
111
109
  Number of seconds after which a heartbeat message is sent to the interchange, and workers
112
110
  are checked for liveness.
113
111
 
114
112
  poll_period : int
115
- Timeout period used by the manager in milliseconds. Default: 10ms
113
+ Timeout period used by the manager in milliseconds.
116
114
 
117
115
  cpu_affinity : str
118
116
  Whether or how each worker should force its affinity to different CPUs
119
117
 
120
118
  available_accelerators: list of str
121
- List of accelerators available to the workers. Default: Empty list
119
+ List of accelerators available to the workers.
122
120
 
123
121
  """
124
122
 
@@ -540,7 +538,7 @@ def worker(
540
538
  monitoring_queue: queue.Queue,
541
539
  ready_worker_count: Synchronized,
542
540
  tasks_in_progress: DictProxy,
543
- cpu_affinity: Union[str, bool],
541
+ cpu_affinity: str,
544
542
  accelerator: Optional[str],
545
543
  block_id: str,
546
544
  task_queue_timeout: int,
@@ -732,6 +730,7 @@ if __name__ == "__main__":
732
730
  parser.add_argument("-r", "--result_port", required=True,
733
731
  help="REQUIRED: Result port for posting results to the interchange")
734
732
  parser.add_argument("--cpu-affinity", type=str, choices=["none", "block", "alternating", "block-reverse"],
733
+ required=True,
735
734
  help="Whether/how workers should control CPU affinity.")
736
735
  parser.add_argument("--available-accelerators", type=str, nargs="*",
737
736
  help="Names of available accelerators")
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2023.12.11
3
+ Version: 2023.12.25
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2023.12.11.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2023.12.25.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -7,7 +7,7 @@ parsl/multiprocessing.py,sha256=w3t1pFkHo4oZpznc2KF6Ff-Jj8MvXqvjm-hoiRqZDDQ,1984
7
7
  parsl/process_loggers.py,sha256=1G3Rfrh5wuZNo2X03grG4kTYPGOxz7hHCyG6L_A3b0A,1137
8
8
  parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  parsl/utils.py,sha256=_flbNpTu6IXHbzIyE5JkUbOBIK4poc1R1bjBtwJUVdo,11622
10
- parsl/version.py,sha256=dq6nySNPdvDlzzMFyTCed-Ct6nwInHRR7BShO48ZzA0,131
10
+ parsl/version.py,sha256=72YSwmW-MS_RN7eW_oxX5vVjFjXLm9v5J1JePWhtnv4,131
11
11
  parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  parsl/app/app.py,sha256=wAHchJetgnicT1pn0NJKDeDX0lV3vDFlG8cQd_Ciax4,8522
13
13
  parsl/app/bash.py,sha256=bx9x1XFwkOTpZZD3CPwnVL9SyNRDjbUGtOnuGLvxN_8,5396
@@ -62,8 +62,8 @@ parsl/data_provider/http.py,sha256=nDHTW7XmJqAukWJjPRQjyhUXt8r6GsQ36mX9mv_wOig,2
62
62
  parsl/data_provider/rsync.py,sha256=2-ZxqrT-hBj39x082NusJaBqsGW4Jd2qCW6JkVPpEl0,4254
63
63
  parsl/data_provider/staging.py,sha256=l-mAXFburs3BWPjkSmiQKuAgJpsxCG62yATPDbrafYI,4523
64
64
  parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
65
- parsl/dataflow/dflow.py,sha256=875obETSEog7WyovHDxR9k07zr_nifhbn67H5zmHxf8,63658
66
- parsl/dataflow/errors.py,sha256=JfjoqqqSg1WvTash5uwUuJt1ZDvxw-aAMenO-N3zoas,2084
65
+ parsl/dataflow/dflow.py,sha256=uuXY9pURFDBpL0w52J0DWGCOtorTQ5wFy5V0WwHS9L8,63909
66
+ parsl/dataflow/errors.py,sha256=w2vOt_ymzG2dOqJUO4IDcmTlrCIHlMZL8nBVyVq0O_8,2176
67
67
  parsl/dataflow/futures.py,sha256=aVfEUTzp4-EdunDAtNcqVQf8l_A7ArDi2c82KZMwxfY,5256
68
68
  parsl/dataflow/memoization.py,sha256=AsJO6c6cRp2ac6H8uGn2USlEi78_nX3QWvpxYt4XdYE,9583
69
69
  parsl/dataflow/rundirs.py,sha256=XKmBZpBEIsGACBhYOkbbs2e5edC0pQegJcSlk4FWeag,1154
@@ -85,7 +85,7 @@ parsl/executors/high_throughput/interchange.py,sha256=tX_EvQf7WkSKMJG-TNmA-WADjh
85
85
  parsl/executors/high_throughput/manager_record.py,sha256=T8-JVMfDJU6SJfzJRooD0mO8AHGMXlcn3PBOM0m_vng,366
86
86
  parsl/executors/high_throughput/monitoring_info.py,sha256=3gQpwQjjNDEBz0cQqJZB6hRiwLiWwXs83zkQDmbOwxY,297
87
87
  parsl/executors/high_throughput/probe.py,sha256=lvnuf-vBv57tHvFh-J51F9sDYBES7jCgs6KYgWvmKRs,2749
88
- parsl/executors/high_throughput/process_worker_pool.py,sha256=1DZKDkGaO1R2tognVbI6Q35xWN7SbeyfTeG8x1pbM0I,34287
88
+ parsl/executors/high_throughput/process_worker_pool.py,sha256=l0l5F3mpJ60idMCN-d1AbdaogmOtO5eO3uGWogspNXg,34070
89
89
  parsl/executors/high_throughput/zmq_pipes.py,sha256=88VJz9QejOCQ_yyhaO5C1uQuDYZTovYEcnKn15WxHSU,6103
90
90
  parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
91
91
  parsl/executors/radical/executor.py,sha256=ZYycq58jXlBlhmIO1355JCK1xIJHkspiy62NN1XiMYQ,20729
@@ -182,7 +182,7 @@ parsl/providers/pbspro/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
182
182
  parsl/providers/pbspro/pbspro.py,sha256=zXsb45LhgCkLEwwKXjdjsm2jv884j1fXHJ2hky9auD0,7789
183
183
  parsl/providers/pbspro/template.py,sha256=ozMbrx0HNsLnSoWbkZhy-55yJoTX5gpdRrDuVn6TFWA,369
184
184
  parsl/providers/slurm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
185
- parsl/providers/slurm/slurm.py,sha256=lx8wzZSC4bMDgG_ppSXwrnYK93TStdLBYyt1FUvbuhQ,12270
185
+ parsl/providers/slurm/slurm.py,sha256=qHTNI5crS90PzdcoDu_lzDrGrYNss8yY_1XuWU5S2bc,12330
186
186
  parsl/providers/slurm/template.py,sha256=cc-3l5wnThEWfqzpniPgi3FP6934Ni05UZ9r0A1RA8s,369
187
187
  parsl/providers/torque/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
188
188
  parsl/providers/torque/template.py,sha256=4qfc2gmlEhRCAD7erFDOs4prJQ43I8s4E8DSUSVQx3A,358
@@ -351,9 +351,9 @@ parsl/tests/test_providers/test_local_provider.py,sha256=G6Fuko22SvAtD7xhfQv8k_8
351
351
  parsl/tests/test_python_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
352
352
  parsl/tests/test_python_apps/test_arg_input_types.py,sha256=JXpfHiu8lr9BN6u1OzqFvGwBhxzsGTPMewHx6Wdo-HI,670
353
353
  parsl/tests/test_python_apps/test_basic.py,sha256=lFqh4ugePbp_FRiHGUXxzV34iS7l8C5UkxTHuLcpnYs,855
354
- parsl/tests/test_python_apps/test_dep_standard_futures.py,sha256=L3mUPmcrpC6QhZgHQbmnZIPE5JRmAA0JSMEmRQo_oOA,639
354
+ parsl/tests/test_python_apps/test_dep_standard_futures.py,sha256=BloeaYBci0jS5al2d8Eqe3OfZ1tvolA5ZflOBQPR9Wo,859
355
355
  parsl/tests/test_python_apps/test_dependencies.py,sha256=IRiTI_lPoWBSFSFnaBlE6Bv08PKEaf-qj5dfqO2RjT0,272
356
- parsl/tests/test_python_apps/test_depfail_propagation.py,sha256=Jukzl6P5Be4I7VpZw2SoD2cwY_Yd0oU_THKl8wswx7k,1316
356
+ parsl/tests/test_python_apps/test_depfail_propagation.py,sha256=3q3HlVWrOixFtXWBvR_ypKtbdAHAJcKndXQ5drwrBQU,1488
357
357
  parsl/tests/test_python_apps/test_fail.py,sha256=0Gld8LS6NB0Io1bU82vVR73twkuL5nW0ifKbIUcsJcw,1671
358
358
  parsl/tests/test_python_apps/test_fibonacci_iterative.py,sha256=ly2s5HuB9R53Z2FM_zy0WWdOk01iVhgcwSpQyK6ErIY,573
359
359
  parsl/tests/test_python_apps/test_fibonacci_recursive.py,sha256=q7LMFcu_pJSNPdz8iY0UiRoIweEWIBGwMjQffHWAuDc,592
@@ -413,12 +413,12 @@ parsl/tests/test_threads/test_configs.py,sha256=QA9YjIMAtZ2jmkfOWqBzEfzQQcFVCDiz
413
413
  parsl/tests/test_threads/test_lazy_errors.py,sha256=nGhYfCMHFZYSy6YJ4gnAmiLl9SfYs0WVnuvj8DXQ9bw,560
414
414
  parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
415
415
  parsl/usage_tracking/usage.py,sha256=TEuAIm_U_G2ojZxvd0bbVa6gZlU61_mVRa2yJC9mGiI,7555
416
- parsl-2023.12.11.data/scripts/exec_parsl_function.py,sha256=NtWNeBvRqksej38eRPw8zPBJ1CeW6vgaitve0tfz_qc,7801
417
- parsl-2023.12.11.data/scripts/parsl_coprocess.py,sha256=kzX_1RI3V2KMKs6L-il4I1qkLNVodDKFXN_1FHB9fmM,6031
418
- parsl-2023.12.11.data/scripts/process_worker_pool.py,sha256=1GAfG53wE2uxq-3TiX2ur4A6JUcZbT-ifyug-FTKbv8,34273
419
- parsl-2023.12.11.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
420
- parsl-2023.12.11.dist-info/METADATA,sha256=-fGjlELlK1VkSk3_6O8KlISe9ASmoUwZdWENYIhdGYw,3818
421
- parsl-2023.12.11.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
422
- parsl-2023.12.11.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
423
- parsl-2023.12.11.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
424
- parsl-2023.12.11.dist-info/RECORD,,
416
+ parsl-2023.12.25.data/scripts/exec_parsl_function.py,sha256=NtWNeBvRqksej38eRPw8zPBJ1CeW6vgaitve0tfz_qc,7801
417
+ parsl-2023.12.25.data/scripts/parsl_coprocess.py,sha256=kzX_1RI3V2KMKs6L-il4I1qkLNVodDKFXN_1FHB9fmM,6031
418
+ parsl-2023.12.25.data/scripts/process_worker_pool.py,sha256=ytz3F8ZYeBr8tFqSRv2O9eZGdsID7oZRulBmmQmZaV8,34056
419
+ parsl-2023.12.25.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
420
+ parsl-2023.12.25.dist-info/METADATA,sha256=vri9mnQiuUfyLPY1aPnU1F6V12aehbagmSqm6wEdi1s,3818
421
+ parsl-2023.12.25.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
422
+ parsl-2023.12.25.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
423
+ parsl-2023.12.25.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
424
+ parsl-2023.12.25.dist-info/RECORD,,