parsl 2024.6.3__py3-none-any.whl → 2024.6.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/config.py +27 -4
- parsl/dataflow/dflow.py +36 -10
- parsl/executors/high_throughput/executor.py +2 -1
- parsl/executors/high_throughput/interchange.py +21 -20
- parsl/providers/kubernetes/kube.py +19 -6
- parsl/providers/slurm/slurm.py +31 -22
- parsl/tests/configs/flux_local.py +11 -0
- parsl/tests/conftest.py +4 -0
- parsl/tests/test_bash_apps/test_stdout.py +20 -2
- parsl/tests/test_htex/test_zmq_binding.py +22 -6
- parsl/tests/test_python_apps/test_context_manager.py +96 -1
- parsl/tests/test_python_apps/test_dependencies_deep.py +59 -0
- parsl/tests/test_radical/test_mpi_funcs.py +0 -1
- parsl/tests/unit/test_usage_tracking.py +45 -0
- parsl/usage_tracking/levels.py +6 -0
- parsl/usage_tracking/usage.py +54 -23
- parsl/version.py +1 -1
- {parsl-2024.6.3.dist-info → parsl-2024.6.10.dist-info}/METADATA +2 -2
- {parsl-2024.6.3.dist-info → parsl-2024.6.10.dist-info}/RECORD +26 -22
- {parsl-2024.6.3.data → parsl-2024.6.10.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2024.6.3.data → parsl-2024.6.10.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2024.6.3.data → parsl-2024.6.10.data}/scripts/process_worker_pool.py +0 -0
- {parsl-2024.6.3.dist-info → parsl-2024.6.10.dist-info}/LICENSE +0 -0
- {parsl-2024.6.3.dist-info → parsl-2024.6.10.dist-info}/WHEEL +0 -0
- {parsl-2024.6.3.dist-info → parsl-2024.6.10.dist-info}/entry_points.txt +0 -0
- {parsl-2024.6.3.dist-info → parsl-2024.6.10.dist-info}/top_level.txt +0 -0
parsl/config.py
CHANGED
@@ -11,6 +11,8 @@ from parsl.executors.base import ParslExecutor
|
|
11
11
|
from parsl.executors.threads import ThreadPoolExecutor
|
12
12
|
from parsl.monitoring import MonitoringHub
|
13
13
|
from parsl.usage_tracking.api import UsageInformation
|
14
|
+
from parsl.usage_tracking.levels import DISABLED as USAGE_TRACKING_DISABLED
|
15
|
+
from parsl.usage_tracking.levels import LEVEL_3 as USAGE_TRACKING_LEVEL_3
|
14
16
|
from parsl.utils import RepresentationMixin
|
15
17
|
|
16
18
|
logger = logging.getLogger(__name__)
|
@@ -38,6 +40,15 @@ class Config(RepresentationMixin, UsageInformation):
|
|
38
40
|
``checkpoint_mode='periodic'``.
|
39
41
|
dependency_resolver: plugin point for custom dependency resolvers. Default: only resolve Futures,
|
40
42
|
using the `SHALLOW_DEPENDENCY_RESOLVER`.
|
43
|
+
exit_mode: str, optional
|
44
|
+
When Parsl is used as a context manager (using ``with parsl.load`` syntax) then this parameter
|
45
|
+
controls what will happen to running tasks and exceptions at exit. The options are:
|
46
|
+
|
47
|
+
* ``cleanup``: cleanup the DFK on exit without waiting for any tasks
|
48
|
+
* ``skip``: skip all shutdown behaviour when exiting the context manager
|
49
|
+
* ``wait``: wait for all tasks to complete when exiting normally, but exit immediately when exiting due to an exception.
|
50
|
+
|
51
|
+
Default is ``cleanup``.
|
41
52
|
garbage_collect : bool. optional.
|
42
53
|
Delete task records from DFK when tasks have completed. Default: True
|
43
54
|
internal_tasks_max_threads : int, optional
|
@@ -66,9 +77,12 @@ class Config(RepresentationMixin, UsageInformation):
|
|
66
77
|
How often the scaling strategy should be executed. Default is 5 seconds.
|
67
78
|
max_idletime : float, optional
|
68
79
|
The maximum idle time allowed for an executor before strategy could shut down unused blocks. Default is 120.0 seconds.
|
69
|
-
usage_tracking :
|
70
|
-
Set this field to
|
71
|
-
|
80
|
+
usage_tracking : int, optional
|
81
|
+
Set this field to 1, 2, or 3 to opt-in to Parsl's usage tracking system.
|
82
|
+
The value represents the level of usage tracking detail to be collected.
|
83
|
+
Setting this field to 0 will disable usage tracking. Default (this field is not set): usage tracking is not enabled.
|
84
|
+
Parsl only collects minimal, non personally-identifiable,
|
85
|
+
information used for reporting to our funding agencies.
|
72
86
|
initialize_logging : bool, optional
|
73
87
|
Make DFK optionally not initialize any logging. Log messages
|
74
88
|
will still be passed into the python logging system under the
|
@@ -92,6 +106,7 @@ class Config(RepresentationMixin, UsageInformation):
|
|
92
106
|
Literal['manual']] = None,
|
93
107
|
checkpoint_period: Optional[str] = None,
|
94
108
|
dependency_resolver: Optional[DependencyResolver] = None,
|
109
|
+
exit_mode: Literal['cleanup', 'skip', 'wait'] = 'cleanup',
|
95
110
|
garbage_collect: bool = True,
|
96
111
|
internal_tasks_max_threads: int = 10,
|
97
112
|
retries: int = 0,
|
@@ -102,7 +117,7 @@ class Config(RepresentationMixin, UsageInformation):
|
|
102
117
|
strategy_period: Union[float, int] = 5,
|
103
118
|
max_idletime: float = 120.0,
|
104
119
|
monitoring: Optional[MonitoringHub] = None,
|
105
|
-
usage_tracking:
|
120
|
+
usage_tracking: int = 0,
|
106
121
|
initialize_logging: bool = True) -> None:
|
107
122
|
|
108
123
|
executors = tuple(executors or [])
|
@@ -128,6 +143,7 @@ class Config(RepresentationMixin, UsageInformation):
|
|
128
143
|
checkpoint_period = "00:30:00"
|
129
144
|
self.checkpoint_period = checkpoint_period
|
130
145
|
self.dependency_resolver = dependency_resolver
|
146
|
+
self.exit_mode = exit_mode
|
131
147
|
self.garbage_collect = garbage_collect
|
132
148
|
self.internal_tasks_max_threads = internal_tasks_max_threads
|
133
149
|
self.retries = retries
|
@@ -136,6 +152,7 @@ class Config(RepresentationMixin, UsageInformation):
|
|
136
152
|
self.strategy = strategy
|
137
153
|
self.strategy_period = strategy_period
|
138
154
|
self.max_idletime = max_idletime
|
155
|
+
self.validate_usage_tracking(usage_tracking)
|
139
156
|
self.usage_tracking = usage_tracking
|
140
157
|
self.initialize_logging = initialize_logging
|
141
158
|
self.monitoring = monitoring
|
@@ -156,6 +173,12 @@ class Config(RepresentationMixin, UsageInformation):
|
|
156
173
|
raise ConfigurationError('Executors must have unique labels ({})'.format(
|
157
174
|
', '.join(['label={}'.format(repr(d)) for d in duplicates])))
|
158
175
|
|
176
|
+
def validate_usage_tracking(self, level: int) -> None:
|
177
|
+
if not USAGE_TRACKING_DISABLED <= level <= USAGE_TRACKING_LEVEL_3:
|
178
|
+
raise ConfigurationError(
|
179
|
+
f"Usage Tracking values must be 0, 1, 2, or 3 and not {level}"
|
180
|
+
)
|
181
|
+
|
159
182
|
def get_usage_information(self):
|
160
183
|
return {"executors_len": len(self.executors),
|
161
184
|
"dependency_resolver": self.dependency_resolver is not None}
|
parsl/dataflow/dflow.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
import atexit
|
4
|
+
import concurrent.futures as cf
|
4
5
|
import datetime
|
5
6
|
import inspect
|
6
7
|
import logging
|
@@ -209,6 +210,8 @@ class DataFlowKernel:
|
|
209
210
|
self.tasks: Dict[int, TaskRecord] = {}
|
210
211
|
self.submitter_lock = threading.Lock()
|
211
212
|
|
213
|
+
self.dependency_launch_pool = cf.ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dependency-Launch")
|
214
|
+
|
212
215
|
self.dependency_resolver = self.config.dependency_resolver if self.config.dependency_resolver is not None \
|
213
216
|
else SHALLOW_DEPENDENCY_RESOLVER
|
214
217
|
|
@@ -217,9 +220,24 @@ class DataFlowKernel:
|
|
217
220
|
def __enter__(self):
|
218
221
|
return self
|
219
222
|
|
220
|
-
def __exit__(self, exc_type, exc_value, traceback):
|
221
|
-
|
222
|
-
|
223
|
+
def __exit__(self, exc_type, exc_value, traceback) -> None:
|
224
|
+
mode = self.config.exit_mode
|
225
|
+
logger.debug("Exiting context manager, with exit mode '%s'", mode)
|
226
|
+
if mode == "cleanup":
|
227
|
+
logger.info("Calling cleanup for DFK")
|
228
|
+
self.cleanup()
|
229
|
+
elif mode == "skip":
|
230
|
+
logger.info("Skipping all cleanup handling")
|
231
|
+
elif mode == "wait":
|
232
|
+
if exc_type is None:
|
233
|
+
logger.info("Waiting for all tasks to complete")
|
234
|
+
self.wait_for_current_tasks()
|
235
|
+
self.cleanup()
|
236
|
+
else:
|
237
|
+
logger.info("There was an exception - cleaning up without waiting for task completion")
|
238
|
+
self.cleanup()
|
239
|
+
else:
|
240
|
+
raise InternalConsistencyError(f"Exit case for {mode} should be unreachable, validated by typeguard on Config()")
|
223
241
|
|
224
242
|
def _send_task_log_info(self, task_record: TaskRecord) -> None:
|
225
243
|
if self.monitoring:
|
@@ -611,9 +629,9 @@ class DataFlowKernel:
|
|
611
629
|
return kwargs.get('_parsl_staging_inhibit', False)
|
612
630
|
|
613
631
|
def launch_if_ready(self, task_record: TaskRecord) -> None:
|
614
|
-
"""
|
615
|
-
|
616
|
-
|
632
|
+
"""Schedules a task record for re-inspection to see if it is ready
|
633
|
+
for launch and for launch if it is ready. The call will return
|
634
|
+
immediately.
|
617
635
|
|
618
636
|
This should be called by any piece of the DataFlowKernel that
|
619
637
|
thinks a task may have become ready to run.
|
@@ -622,13 +640,17 @@ class DataFlowKernel:
|
|
622
640
|
ready to run - launch_if_ready will not incorrectly launch that
|
623
641
|
task.
|
624
642
|
|
625
|
-
It is also not an error to call launch_if_ready on a task that has
|
626
|
-
already been launched - launch_if_ready will not re-launch that
|
627
|
-
task.
|
628
|
-
|
629
643
|
launch_if_ready is thread safe, so may be called from any thread
|
630
644
|
or callback.
|
631
645
|
"""
|
646
|
+
self.dependency_launch_pool.submit(self._launch_if_ready_async, task_record)
|
647
|
+
|
648
|
+
@wrap_with_logs
|
649
|
+
def _launch_if_ready_async(self, task_record: TaskRecord) -> None:
|
650
|
+
"""
|
651
|
+
_launch_if_ready will launch the specified task, if it is ready
|
652
|
+
to run (for example, without dependencies, and in pending state).
|
653
|
+
"""
|
632
654
|
exec_fu = None
|
633
655
|
|
634
656
|
task_id = task_record['id']
|
@@ -1271,6 +1293,10 @@ class DataFlowKernel:
|
|
1271
1293
|
self.monitoring.close()
|
1272
1294
|
logger.info("Terminated monitoring")
|
1273
1295
|
|
1296
|
+
logger.info("Terminating dependency launch pool")
|
1297
|
+
self.dependency_launch_pool.shutdown()
|
1298
|
+
logger.info("Terminated dependency launch pool")
|
1299
|
+
|
1274
1300
|
logger.info("Unregistering atexit hook")
|
1275
1301
|
atexit.unregister(self.atexit_cleanup)
|
1276
1302
|
logger.info("Unregistered atexit hook")
|
@@ -527,7 +527,8 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
527
527
|
get the worker task and result ports that the interchange has bound to.
|
528
528
|
"""
|
529
529
|
self.interchange_proc = ForkProcess(target=interchange.starter,
|
530
|
-
kwargs={"
|
530
|
+
kwargs={"client_address": "127.0.0.1",
|
531
|
+
"client_ports": (self.outgoing_q.port,
|
531
532
|
self.incoming_q.port,
|
532
533
|
self.command_client.port),
|
533
534
|
"interchange_address": self.address,
|
@@ -65,18 +65,19 @@ class Interchange:
|
|
65
65
|
3. Detect workers that have failed using heartbeats
|
66
66
|
"""
|
67
67
|
def __init__(self,
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
68
|
+
*,
|
69
|
+
client_address: str,
|
70
|
+
interchange_address: Optional[str],
|
71
|
+
client_ports: Tuple[int, int, int],
|
72
|
+
worker_ports: Optional[Tuple[int, int]],
|
73
|
+
worker_port_range: Tuple[int, int],
|
74
|
+
hub_address: Optional[str],
|
75
|
+
hub_zmq_port: Optional[int],
|
76
|
+
heartbeat_threshold: int,
|
77
|
+
logdir: str,
|
78
|
+
logging_level: int,
|
79
|
+
poll_period: int,
|
80
|
+
cert_dir: Optional[str],
|
80
81
|
) -> None:
|
81
82
|
"""
|
82
83
|
Parameters
|
@@ -92,34 +93,34 @@ class Interchange:
|
|
92
93
|
The ports at which the client can be reached
|
93
94
|
|
94
95
|
worker_ports : tuple(int, int)
|
95
|
-
The specific two ports at which workers will connect to the Interchange.
|
96
|
+
The specific two ports at which workers will connect to the Interchange.
|
96
97
|
|
97
98
|
worker_port_range : tuple(int, int)
|
98
99
|
The interchange picks ports at random from the range which will be used by workers.
|
99
|
-
This is overridden when the worker_ports option is set.
|
100
|
+
This is overridden when the worker_ports option is set.
|
100
101
|
|
101
102
|
hub_address : str
|
102
103
|
The IP address at which the interchange can send info about managers to when monitoring is enabled.
|
103
|
-
|
104
|
+
When None, monitoring is disabled.
|
104
105
|
|
105
106
|
hub_zmq_port : str
|
106
107
|
The port at which the interchange can send info about managers to when monitoring is enabled.
|
107
|
-
|
108
|
+
When None, monitoring is disabled.
|
108
109
|
|
109
110
|
heartbeat_threshold : int
|
110
111
|
Number of seconds since the last heartbeat after which worker is considered lost.
|
111
112
|
|
112
113
|
logdir : str
|
113
|
-
Parsl log directory paths. Logs and temp files go here.
|
114
|
+
Parsl log directory paths. Logs and temp files go here.
|
114
115
|
|
115
116
|
logging_level : int
|
116
|
-
Logging level as defined in the logging module.
|
117
|
+
Logging level as defined in the logging module.
|
117
118
|
|
118
119
|
poll_period : int
|
119
|
-
The main thread polling period, in milliseconds.
|
120
|
+
The main thread polling period, in milliseconds.
|
120
121
|
|
121
122
|
cert_dir : str | None
|
122
|
-
Path to the certificate directory.
|
123
|
+
Path to the certificate directory.
|
123
124
|
"""
|
124
125
|
self.cert_dir = cert_dir
|
125
126
|
self.logdir = logdir
|
@@ -83,6 +83,10 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
83
83
|
persistent_volumes: list[(str, str)]
|
84
84
|
List of tuples describing persistent volumes to be mounted in the pod.
|
85
85
|
The tuples consist of (PVC Name, Mount Directory).
|
86
|
+
service_account_name: str
|
87
|
+
Name of the service account to run the pod as.
|
88
|
+
annotations: Dict[str, str]
|
89
|
+
Annotations to set on the pod.
|
86
90
|
"""
|
87
91
|
@typeguard.typechecked
|
88
92
|
def __init__(self,
|
@@ -103,7 +107,9 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
103
107
|
group_id: Optional[str] = None,
|
104
108
|
run_as_non_root: bool = False,
|
105
109
|
secret: Optional[str] = None,
|
106
|
-
persistent_volumes: List[Tuple[str, str]] = []
|
110
|
+
persistent_volumes: List[Tuple[str, str]] = [],
|
111
|
+
service_account_name: Optional[str] = None,
|
112
|
+
annotations: Optional[Dict[str, str]] = None) -> None:
|
107
113
|
if not _kubernetes_enabled:
|
108
114
|
raise OptionalModuleMissing(['kubernetes'],
|
109
115
|
"Kubernetes provider requires kubernetes module and config.")
|
@@ -146,6 +152,8 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
146
152
|
self.group_id = group_id
|
147
153
|
self.run_as_non_root = run_as_non_root
|
148
154
|
self.persistent_volumes = persistent_volumes
|
155
|
+
self.service_account_name = service_account_name
|
156
|
+
self.annotations = annotations
|
149
157
|
|
150
158
|
self.kube_client = client.CoreV1Api()
|
151
159
|
|
@@ -184,7 +192,9 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
184
192
|
pod_name=pod_name,
|
185
193
|
job_name=job_name,
|
186
194
|
cmd_string=formatted_cmd,
|
187
|
-
volumes=self.persistent_volumes
|
195
|
+
volumes=self.persistent_volumes,
|
196
|
+
service_account_name=self.service_account_name,
|
197
|
+
annotations=self.annotations)
|
188
198
|
self.resources[pod_name] = {'status': JobStatus(JobState.RUNNING)}
|
189
199
|
|
190
200
|
return pod_name
|
@@ -253,7 +263,9 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
253
263
|
job_name,
|
254
264
|
port=80,
|
255
265
|
cmd_string=None,
|
256
|
-
volumes=[]
|
266
|
+
volumes=[],
|
267
|
+
service_account_name=None,
|
268
|
+
annotations=None):
|
257
269
|
""" Create a kubernetes pod for the job.
|
258
270
|
Args:
|
259
271
|
- image (string) : Docker image to launch
|
@@ -311,11 +323,12 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
|
|
311
323
|
claim_name=volume[0])))
|
312
324
|
|
313
325
|
metadata = client.V1ObjectMeta(name=pod_name,
|
314
|
-
labels={"app": job_name}
|
326
|
+
labels={"app": job_name},
|
327
|
+
annotations=annotations)
|
315
328
|
spec = client.V1PodSpec(containers=[container],
|
316
329
|
image_pull_secrets=[secret],
|
317
|
-
volumes=volume_defs
|
318
|
-
)
|
330
|
+
volumes=volume_defs,
|
331
|
+
service_account_name=service_account_name)
|
319
332
|
|
320
333
|
pod = client.V1Pod(spec=spec, metadata=metadata)
|
321
334
|
api_response = self.kube_client.create_namespaced_pod(namespace=self.namespace,
|
parsl/providers/slurm/slurm.py
CHANGED
@@ -19,25 +19,29 @@ from parsl.utils import RepresentationMixin, wtime_to_minutes
|
|
19
19
|
|
20
20
|
logger = logging.getLogger(__name__)
|
21
21
|
|
22
|
+
# From https://slurm.schedmd.com/sacct.html#SECTION_JOB-STATE-CODES
|
22
23
|
translate_table = {
|
23
|
-
'
|
24
|
-
'
|
25
|
-
'
|
26
|
-
'
|
27
|
-
'
|
28
|
-
'
|
29
|
-
'
|
30
|
-
'
|
31
|
-
'
|
32
|
-
'
|
33
|
-
'
|
24
|
+
'PENDING': JobState.PENDING,
|
25
|
+
'RUNNING': JobState.RUNNING,
|
26
|
+
'CANCELLED': JobState.CANCELLED,
|
27
|
+
'COMPLETED': JobState.COMPLETED,
|
28
|
+
'FAILED': JobState.FAILED,
|
29
|
+
'NODE_FAIL': JobState.FAILED,
|
30
|
+
'BOOT_FAIL': JobState.FAILED,
|
31
|
+
'DEADLINE': JobState.TIMEOUT,
|
32
|
+
'TIMEOUT': JobState.TIMEOUT,
|
33
|
+
'REVOKED': JobState.FAILED,
|
34
|
+
'OUT_OF_MEMORY': JobState.FAILED,
|
35
|
+
'SUSPENDED': JobState.HELD,
|
36
|
+
'PREEMPTED': JobState.TIMEOUT,
|
37
|
+
'REQUEUED': JobState.PENDING
|
34
38
|
}
|
35
39
|
|
36
40
|
|
37
41
|
class SlurmProvider(ClusterProvider, RepresentationMixin):
|
38
42
|
"""Slurm Execution Provider
|
39
43
|
|
40
|
-
This provider uses sbatch to submit,
|
44
|
+
This provider uses sbatch to submit, sacct for status and scancel to cancel
|
41
45
|
jobs. The sbatch script to be used is created from a template file in this
|
42
46
|
same module.
|
43
47
|
|
@@ -168,14 +172,16 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
168
172
|
logger.debug('No active jobs, skipping status update')
|
169
173
|
return
|
170
174
|
|
171
|
-
|
175
|
+
# Using state%20 to get enough characters to not truncate output
|
176
|
+
# of the state. Without output can look like "<job_id> CANCELLED+"
|
177
|
+
cmd = "sacct -X --noheader --format=jobid,state%20 --job '{0}'".format(job_id_list)
|
172
178
|
logger.debug("Executing %s", cmd)
|
173
179
|
retcode, stdout, stderr = self.execute_wait(cmd)
|
174
|
-
logger.debug("
|
180
|
+
logger.debug("sacct returned %s %s", stdout, stderr)
|
175
181
|
|
176
182
|
# Execute_wait failed. Do no update
|
177
183
|
if retcode != 0:
|
178
|
-
logger.warning("
|
184
|
+
logger.warning("sacct failed with non-zero exit code {}".format(retcode))
|
179
185
|
return
|
180
186
|
|
181
187
|
jobs_missing = set(self.resources.keys())
|
@@ -183,7 +189,10 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
183
189
|
if not line:
|
184
190
|
# Blank line
|
185
191
|
continue
|
186
|
-
|
192
|
+
# Sacct includes extra information in some outputs
|
193
|
+
# For example "<job_id> CANCELLED by <user_id>"
|
194
|
+
# This splits and ignores anything past the first two unpacked values
|
195
|
+
job_id, slurm_state, *ignore = line.split()
|
187
196
|
if slurm_state not in translate_table:
|
188
197
|
logger.warning(f"Slurm status {slurm_state} is not recognized")
|
189
198
|
status = translate_table.get(slurm_state, JobState.UNKNOWN)
|
@@ -193,13 +202,13 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
193
202
|
stderr_path=self.resources[job_id]['job_stderr_path'])
|
194
203
|
jobs_missing.remove(job_id)
|
195
204
|
|
196
|
-
#
|
197
|
-
#
|
205
|
+
# sacct can get job info after jobs have completed so this path shouldn't be hit
|
206
|
+
# log a warning if there are missing jobs for some reason
|
198
207
|
for missing_job in jobs_missing:
|
199
|
-
logger.
|
200
|
-
self.resources[missing_job]['status'] = JobStatus(
|
201
|
-
|
202
|
-
|
208
|
+
logger.warning("Updating missing job {} to completed status".format(missing_job))
|
209
|
+
self.resources[missing_job]['status'] = JobStatus(
|
210
|
+
JobState.COMPLETED, stdout_path=self.resources[missing_job]['job_stdout_path'],
|
211
|
+
stderr_path=self.resources[missing_job]['job_stderr_path'])
|
203
212
|
|
204
213
|
def submit(self, command: str, tasks_per_node: int, job_name="parsl.slurm") -> str:
|
205
214
|
"""Submit the command as a slurm job.
|
parsl/tests/conftest.py
CHANGED
@@ -151,6 +151,10 @@ def pytest_configure(config):
|
|
151
151
|
'markers',
|
152
152
|
'multiple_cores_required: Marks tests that require multiple cores, such as htex affinity'
|
153
153
|
)
|
154
|
+
config.addinivalue_line(
|
155
|
+
'markers',
|
156
|
+
'unix_filesystem_permissions_required: Marks tests that require unix-level filesystem permission enforcement'
|
157
|
+
)
|
154
158
|
config.addinivalue_line(
|
155
159
|
'markers',
|
156
160
|
'issue3328: Marks tests broken by issue #3328'
|
@@ -16,7 +16,6 @@ def echo_to_streams(msg, stderr=None, stdout=None):
|
|
16
16
|
whitelist = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'configs', '*threads*')
|
17
17
|
|
18
18
|
speclist = (
|
19
|
-
'/bad/dir/t.out',
|
20
19
|
['t3.out', 'w'],
|
21
20
|
('t4.out', None),
|
22
21
|
(42, 'w'),
|
@@ -26,7 +25,6 @@ speclist = (
|
|
26
25
|
)
|
27
26
|
|
28
27
|
testids = [
|
29
|
-
'nonexistent_dir',
|
30
28
|
'list_not_tuple',
|
31
29
|
'null_mode',
|
32
30
|
'not_a_string',
|
@@ -55,6 +53,26 @@ def test_bad_stdout_specs(spec):
|
|
55
53
|
|
56
54
|
|
57
55
|
@pytest.mark.issue3328
|
56
|
+
@pytest.mark.unix_filesystem_permissions_required
|
57
|
+
def test_bad_stdout_file():
|
58
|
+
"""Testing bad stderr file"""
|
59
|
+
|
60
|
+
o = "/bad/dir/t2.out"
|
61
|
+
|
62
|
+
fn = echo_to_streams("Hello world", stdout=o, stderr='t.err')
|
63
|
+
|
64
|
+
try:
|
65
|
+
fn.result()
|
66
|
+
except perror.BadStdStreamFile:
|
67
|
+
pass
|
68
|
+
else:
|
69
|
+
assert False, "Did not raise expected exception BadStdStreamFile"
|
70
|
+
|
71
|
+
return
|
72
|
+
|
73
|
+
|
74
|
+
@pytest.mark.issue3328
|
75
|
+
@pytest.mark.unix_filesystem_permissions_required
|
58
76
|
def test_bad_stderr_file():
|
59
77
|
"""Testing bad stderr file"""
|
60
78
|
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import logging
|
1
2
|
import pathlib
|
2
3
|
from typing import Optional
|
3
4
|
from unittest import mock
|
@@ -10,6 +11,21 @@ from parsl import curvezmq
|
|
10
11
|
from parsl.executors.high_throughput.interchange import Interchange
|
11
12
|
|
12
13
|
|
14
|
+
def make_interchange(*, interchange_address: Optional[str], cert_dir: Optional[str]) -> Interchange:
|
15
|
+
return Interchange(interchange_address=interchange_address,
|
16
|
+
cert_dir=cert_dir,
|
17
|
+
client_address="127.0.0.1",
|
18
|
+
client_ports=(50055, 50056, 50057),
|
19
|
+
worker_ports=None,
|
20
|
+
worker_port_range=(54000, 55000),
|
21
|
+
hub_address=None,
|
22
|
+
hub_zmq_port=None,
|
23
|
+
heartbeat_threshold=60,
|
24
|
+
logdir=".",
|
25
|
+
logging_level=logging.INFO,
|
26
|
+
poll_period=10)
|
27
|
+
|
28
|
+
|
13
29
|
@pytest.fixture
|
14
30
|
def encrypted(request: pytest.FixtureRequest):
|
15
31
|
if hasattr(request, "param"):
|
@@ -31,7 +47,7 @@ def test_interchange_curvezmq_sockets(
|
|
31
47
|
mock_socket: mock.MagicMock, cert_dir: Optional[str], encrypted: bool
|
32
48
|
):
|
33
49
|
address = "127.0.0.1"
|
34
|
-
ix =
|
50
|
+
ix = make_interchange(interchange_address=address, cert_dir=cert_dir)
|
35
51
|
assert isinstance(ix.zmq_context, curvezmq.ServerContext)
|
36
52
|
assert ix.zmq_context.encrypted is encrypted
|
37
53
|
assert mock_socket.call_count == 5
|
@@ -40,7 +56,7 @@ def test_interchange_curvezmq_sockets(
|
|
40
56
|
@pytest.mark.local
|
41
57
|
@pytest.mark.parametrize("encrypted", (True, False), indirect=True)
|
42
58
|
def test_interchange_binding_no_address(cert_dir: Optional[str]):
|
43
|
-
ix =
|
59
|
+
ix = make_interchange(interchange_address=None, cert_dir=cert_dir)
|
44
60
|
assert ix.interchange_address == "*"
|
45
61
|
|
46
62
|
|
@@ -49,7 +65,7 @@ def test_interchange_binding_no_address(cert_dir: Optional[str]):
|
|
49
65
|
def test_interchange_binding_with_address(cert_dir: Optional[str]):
|
50
66
|
# Using loopback address
|
51
67
|
address = "127.0.0.1"
|
52
|
-
ix =
|
68
|
+
ix = make_interchange(interchange_address=address, cert_dir=cert_dir)
|
53
69
|
assert ix.interchange_address == address
|
54
70
|
|
55
71
|
|
@@ -60,7 +76,7 @@ def test_interchange_binding_with_non_ipv4_address(cert_dir: Optional[str]):
|
|
60
76
|
# Confirm that a ipv4 address is required
|
61
77
|
address = "localhost"
|
62
78
|
with pytest.raises(zmq.error.ZMQError):
|
63
|
-
|
79
|
+
make_interchange(interchange_address=address, cert_dir=cert_dir)
|
64
80
|
|
65
81
|
|
66
82
|
@pytest.mark.local
|
@@ -69,7 +85,7 @@ def test_interchange_binding_bad_address(cert_dir: Optional[str]):
|
|
69
85
|
"""Confirm that we raise a ZMQError when a bad address is supplied"""
|
70
86
|
address = "550.0.0.0"
|
71
87
|
with pytest.raises(zmq.error.ZMQError):
|
72
|
-
|
88
|
+
make_interchange(interchange_address=address, cert_dir=cert_dir)
|
73
89
|
|
74
90
|
|
75
91
|
@pytest.mark.local
|
@@ -77,7 +93,7 @@ def test_interchange_binding_bad_address(cert_dir: Optional[str]):
|
|
77
93
|
def test_limited_interface_binding(cert_dir: Optional[str]):
|
78
94
|
"""When address is specified the worker_port would be bound to it rather than to 0.0.0.0"""
|
79
95
|
address = "127.0.0.1"
|
80
|
-
ix =
|
96
|
+
ix = make_interchange(interchange_address=address, cert_dir=cert_dir)
|
81
97
|
ix.worker_result_port
|
82
98
|
proc = psutil.Process()
|
83
99
|
conns = proc.connections(kind="tcp")
|
@@ -1,7 +1,11 @@
|
|
1
|
+
from concurrent.futures import Future
|
2
|
+
from threading import Event
|
3
|
+
|
1
4
|
import pytest
|
2
5
|
|
3
6
|
import parsl
|
4
|
-
from parsl.
|
7
|
+
from parsl.config import Config
|
8
|
+
from parsl.dataflow.dflow import DataFlowKernel, DataFlowKernelLoader
|
5
9
|
from parsl.errors import NoDataFlowKernelError
|
6
10
|
from parsl.tests.configs.local_threads import fresh_config
|
7
11
|
|
@@ -16,6 +20,16 @@ def foo(x, stdout='foo.stdout'):
|
|
16
20
|
return f"echo {x + 1}"
|
17
21
|
|
18
22
|
|
23
|
+
@parsl.python_app
|
24
|
+
def wait_for_event(ev: Event):
|
25
|
+
ev.wait()
|
26
|
+
|
27
|
+
|
28
|
+
@parsl.python_app
|
29
|
+
def raise_app():
|
30
|
+
raise RuntimeError("raise_app deliberate failure")
|
31
|
+
|
32
|
+
|
19
33
|
@pytest.mark.local
|
20
34
|
def test_within_context_manger(tmpd_cwd):
|
21
35
|
config = fresh_config()
|
@@ -31,3 +45,84 @@ def test_within_context_manger(tmpd_cwd):
|
|
31
45
|
with pytest.raises(NoDataFlowKernelError) as excinfo:
|
32
46
|
square(2).result()
|
33
47
|
assert str(excinfo.value) == "Must first load config"
|
48
|
+
|
49
|
+
|
50
|
+
@pytest.mark.local
|
51
|
+
def test_exit_skip():
|
52
|
+
config = fresh_config()
|
53
|
+
config.exit_mode = "skip"
|
54
|
+
|
55
|
+
with parsl.load(config) as dfk:
|
56
|
+
ev = Event()
|
57
|
+
fut = wait_for_event(ev)
|
58
|
+
# deliberately don't wait for this to finish, so that the context
|
59
|
+
# manager can exit
|
60
|
+
|
61
|
+
assert parsl.dfk() is dfk, "global dfk should be left in place by skip mode"
|
62
|
+
|
63
|
+
assert not fut.done(), "wait_for_event should not be done yet"
|
64
|
+
ev.set()
|
65
|
+
|
66
|
+
# now we can wait for that result...
|
67
|
+
fut.result()
|
68
|
+
assert fut.done(), "wait_for_event should complete outside of context manager in 'skip' mode"
|
69
|
+
|
70
|
+
# now cleanup the DFK that the above `with` block
|
71
|
+
# deliberately avoided doing...
|
72
|
+
dfk.cleanup()
|
73
|
+
|
74
|
+
|
75
|
+
# 'wait' mode has two cases to test:
|
76
|
+
# 1. that we wait when there is no exception
|
77
|
+
# 2. that we do not wait when there is an exception
|
78
|
+
@pytest.mark.local
|
79
|
+
def test_exit_wait_no_exception():
|
80
|
+
config = fresh_config()
|
81
|
+
config.exit_mode = "wait"
|
82
|
+
|
83
|
+
with parsl.load(config) as dfk:
|
84
|
+
fut = square(1)
|
85
|
+
# deliberately don't wait for this to finish, so that the context
|
86
|
+
# manager can exit
|
87
|
+
|
88
|
+
assert fut.done(), "This future should be marked as done before the context manager exits"
|
89
|
+
|
90
|
+
assert dfk.cleanup_called, "The DFK should have been cleaned up by the context manager"
|
91
|
+
assert DataFlowKernelLoader._dfk is None, "The global DFK should have been removed"
|
92
|
+
|
93
|
+
|
94
|
+
@pytest.mark.local
|
95
|
+
def test_exit_wait_exception():
|
96
|
+
config = fresh_config()
|
97
|
+
config.exit_mode = "wait"
|
98
|
+
|
99
|
+
with pytest.raises(RuntimeError):
|
100
|
+
with parsl.load(config) as dfk:
|
101
|
+
# we'll never fire this future
|
102
|
+
fut_never = Future()
|
103
|
+
|
104
|
+
fut_raise = raise_app()
|
105
|
+
|
106
|
+
fut_depend = square(fut_never)
|
107
|
+
|
108
|
+
# this should cause an exception, which should cause the context
|
109
|
+
# manager to exit, without waiting for fut_depend to finish.
|
110
|
+
fut_raise.result()
|
111
|
+
|
112
|
+
assert dfk.cleanup_called, "The DFK should have been cleaned up by the context manager"
|
113
|
+
assert DataFlowKernelLoader._dfk is None, "The global DFK should have been removed"
|
114
|
+
assert fut_raise.exception() is not None, "fut_raise should contain an exception"
|
115
|
+
assert not fut_depend.done(), "fut_depend should have been left un-done (due to dependency failure)"
|
116
|
+
|
117
|
+
|
118
|
+
@pytest.mark.local
|
119
|
+
def test_exit_wrong_mode():
|
120
|
+
|
121
|
+
with pytest.raises(Exception) as ex:
|
122
|
+
Config(exit_mode="wrongmode")
|
123
|
+
|
124
|
+
# with typeguard 4.x this is TypeCheckError,
|
125
|
+
# with typeguard 2.x this is TypeError
|
126
|
+
# we can't instantiate TypeCheckError if we're in typeguard 2.x environment
|
127
|
+
# because it does not exist... so check name using strings.
|
128
|
+
assert ex.type.__name__ == "TypeCheckError" or ex.type.__name__ == "TypeError"
|
@@ -0,0 +1,59 @@
|
|
1
|
+
import inspect
|
2
|
+
from concurrent.futures import Future
|
3
|
+
from typing import Any, Callable, Dict
|
4
|
+
|
5
|
+
import pytest
|
6
|
+
|
7
|
+
import parsl
|
8
|
+
from parsl.executors.base import ParslExecutor
|
9
|
+
|
10
|
+
# N is the number of tasks to chain
|
11
|
+
# With mid-2024 Parsl, N>140 causes Parsl to hang
|
12
|
+
N = 100
|
13
|
+
|
14
|
+
# MAX_STACK is the maximum Python stack depth allowed for either
|
15
|
+
# task submission to an executor or execution of a task.
|
16
|
+
# With mid-2024 Parsl, 2-3 stack entries will be used per
|
17
|
+
# recursively launched parsl task. So this should be smaller than
|
18
|
+
# 2*N, but big enough to allow regular pytest+parsl stuff to
|
19
|
+
# happen.
|
20
|
+
MAX_STACK = 50
|
21
|
+
|
22
|
+
|
23
|
+
def local_config():
|
24
|
+
return parsl.Config(executors=[ImmediateExecutor()])
|
25
|
+
|
26
|
+
|
27
|
+
class ImmediateExecutor(ParslExecutor):
|
28
|
+
def start(self):
|
29
|
+
pass
|
30
|
+
|
31
|
+
def shutdown(self):
|
32
|
+
pass
|
33
|
+
|
34
|
+
def submit(self, func: Callable, resource_specification: Dict[str, Any], *args: Any, **kwargs: Any) -> Future:
|
35
|
+
stack_depth = len(inspect.stack())
|
36
|
+
assert stack_depth < MAX_STACK, "tasks should not be launched deep in the Python stack"
|
37
|
+
fut: Future[None] = Future()
|
38
|
+
res = func(*args, **kwargs)
|
39
|
+
fut.set_result(res)
|
40
|
+
return fut
|
41
|
+
|
42
|
+
|
43
|
+
@parsl.python_app
|
44
|
+
def chain(upstream):
|
45
|
+
stack_depth = len(inspect.stack())
|
46
|
+
assert stack_depth < MAX_STACK, "chained dependencies should not be launched deep in the Python stack"
|
47
|
+
|
48
|
+
|
49
|
+
@pytest.mark.local
|
50
|
+
def test_deep_dependency_stack_depth():
|
51
|
+
|
52
|
+
fut = Future()
|
53
|
+
here = fut
|
54
|
+
|
55
|
+
for _ in range(N):
|
56
|
+
here = chain(here)
|
57
|
+
|
58
|
+
fut.set_result(None)
|
59
|
+
here.result()
|
@@ -0,0 +1,45 @@
|
|
1
|
+
"""Test usage_tracking values."""
|
2
|
+
|
3
|
+
import pytest
|
4
|
+
|
5
|
+
import parsl
|
6
|
+
from parsl.config import Config
|
7
|
+
from parsl.errors import ConfigurationError
|
8
|
+
|
9
|
+
|
10
|
+
@pytest.mark.local
|
11
|
+
def test_config_load():
|
12
|
+
"""Test loading a config with usage tracking."""
|
13
|
+
with parsl.load(Config(usage_tracking=3)):
|
14
|
+
pass
|
15
|
+
parsl.clear()
|
16
|
+
|
17
|
+
|
18
|
+
@pytest.mark.local
|
19
|
+
@pytest.mark.parametrize("level", (0, 1, 2, 3, False, True))
|
20
|
+
def test_valid(level):
|
21
|
+
"""Test valid usage_tracking values."""
|
22
|
+
Config(usage_tracking=level)
|
23
|
+
assert Config(usage_tracking=level).usage_tracking == level
|
24
|
+
|
25
|
+
|
26
|
+
@pytest.mark.local
|
27
|
+
@pytest.mark.parametrize("level", (12, 1000, -1))
|
28
|
+
def test_invalid_values(level):
|
29
|
+
"""Test invalid usage_tracking values."""
|
30
|
+
with pytest.raises(ConfigurationError):
|
31
|
+
Config(usage_tracking=level)
|
32
|
+
|
33
|
+
|
34
|
+
@pytest.mark.local
|
35
|
+
@pytest.mark.parametrize("level", ("abcd", None, bytes(1), 1.0, 1j, object()))
|
36
|
+
def test_invalid_types(level):
|
37
|
+
"""Test invalid usage_tracking types."""
|
38
|
+
with pytest.raises(Exception) as ex:
|
39
|
+
Config(usage_tracking=level)
|
40
|
+
|
41
|
+
# with typeguard 4.x this is TypeCheckError,
|
42
|
+
# with typeguard 2.x this is TypeError
|
43
|
+
# we can't instantiate TypeCheckError if we're in typeguard 2.x environment
|
44
|
+
# because it does not exist... so check name using strings.
|
45
|
+
assert ex.type.__name__ in ["TypeCheckError", "TypeError"]
|
@@ -0,0 +1,6 @@
|
|
1
|
+
"""Module for defining the usage tracking levels."""
|
2
|
+
|
3
|
+
DISABLED = 0 # Tracking is disabled
|
4
|
+
LEVEL_1 = 1 # Share info about Parsl version, Python version, platform
|
5
|
+
LEVEL_2 = 2 # Share info about config + level 1
|
6
|
+
LEVEL_3 = 3 # Share info about app count, app fails, execution time + level 2
|
parsl/usage_tracking/usage.py
CHANGED
@@ -7,8 +7,11 @@ import time
|
|
7
7
|
import uuid
|
8
8
|
|
9
9
|
from parsl.dataflow.states import States
|
10
|
+
from parsl.errors import ConfigurationError
|
10
11
|
from parsl.multiprocessing import ForkProcess
|
11
12
|
from parsl.usage_tracking.api import get_parsl_usage
|
13
|
+
from parsl.usage_tracking.levels import DISABLED as USAGE_TRACKING_DISABLED
|
14
|
+
from parsl.usage_tracking.levels import LEVEL_3 as USAGE_TRACKING_LEVEL_3
|
12
15
|
from parsl.utils import setproctitle
|
13
16
|
from parsl.version import VERSION as PARSL_VERSION
|
14
17
|
|
@@ -110,17 +113,32 @@ class UsageTracker:
|
|
110
113
|
self.python_version = "{}.{}.{}".format(sys.version_info.major,
|
111
114
|
sys.version_info.minor,
|
112
115
|
sys.version_info.micro)
|
113
|
-
self.
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
116
|
+
self.tracking_level = self.check_tracking_level()
|
117
|
+
self.start_time = None
|
118
|
+
logger.debug("Tracking level: {}".format(self.tracking_level))
|
119
|
+
|
120
|
+
def check_tracking_level(self) -> int:
|
121
|
+
"""Check if tracking is enabled and return level.
|
122
|
+
|
123
|
+
Checks usage_tracking in Config
|
124
|
+
- Possible values: [True, False, 0, 1, 2, 3]
|
125
|
+
|
126
|
+
True/False values are treated as Level 1/Level 0 respectively.
|
127
|
+
|
128
|
+
Returns: int
|
129
|
+
- 0 : Tracking is disabled
|
130
|
+
- 1 : Tracking is enabled with level 1
|
131
|
+
Share info about Parsl version, Python version, platform
|
132
|
+
- 2 : Tracking is enabled with level 2
|
133
|
+
Share info about config + level 1
|
134
|
+
- 3 : Tracking is enabled with level 3
|
135
|
+
Share info about app count, app fails, execution time + level 2
|
123
136
|
"""
|
137
|
+
if not USAGE_TRACKING_DISABLED <= self.config.usage_tracking <= USAGE_TRACKING_LEVEL_3:
|
138
|
+
raise ConfigurationError(
|
139
|
+
f"Usage Tracking values must be 0, 1, 2, or 3 and not {self.config.usage_tracking}"
|
140
|
+
)
|
141
|
+
|
124
142
|
return self.config.usage_tracking
|
125
143
|
|
126
144
|
def construct_start_message(self) -> bytes:
|
@@ -133,18 +151,28 @@ class UsageTracker:
|
|
133
151
|
'parsl_v': self.parsl_version,
|
134
152
|
'python_v': self.python_version,
|
135
153
|
'platform.system': platform.system(),
|
136
|
-
'
|
137
|
-
|
154
|
+
'tracking_level': int(self.tracking_level)}
|
155
|
+
|
156
|
+
if self.tracking_level >= 2:
|
157
|
+
message['components'] = get_parsl_usage(self.dfk._config)
|
158
|
+
|
159
|
+
if self.tracking_level == 3:
|
160
|
+
self.start_time = int(time.time())
|
161
|
+
message['start'] = self.start_time
|
162
|
+
|
138
163
|
logger.debug(f"Usage tracking start message: {message}")
|
139
164
|
|
140
165
|
return self.encode_message(message)
|
141
166
|
|
142
167
|
def construct_end_message(self) -> bytes:
|
143
168
|
"""Collect the final run information at the time of DFK cleanup.
|
169
|
+
This is only called if tracking level is 3.
|
144
170
|
|
145
171
|
Returns:
|
146
172
|
- Message dict dumped as json string, ready for UDP
|
147
173
|
"""
|
174
|
+
end_time = int(time.time())
|
175
|
+
|
148
176
|
app_count = self.dfk.task_count
|
149
177
|
|
150
178
|
app_fails = self.dfk.task_state_counts[States.failed] + self.dfk.task_state_counts[States.dep_fail]
|
@@ -157,7 +185,8 @@ class UsageTracker:
|
|
157
185
|
'app_fails': app_fails}
|
158
186
|
|
159
187
|
message = {'correlator': self.correlator_uuid,
|
160
|
-
'end':
|
188
|
+
'end': end_time,
|
189
|
+
'execution_time': end_time - self.start_time,
|
161
190
|
'components': [dfk_component] + get_parsl_usage(self.dfk._config)}
|
162
191
|
logger.debug(f"Usage tracking end message (unencoded): {message}")
|
163
192
|
|
@@ -168,20 +197,22 @@ class UsageTracker:
|
|
168
197
|
|
169
198
|
def send_UDP_message(self, message: bytes) -> None:
|
170
199
|
"""Send UDP message."""
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
logger.debug("Usage tracking failed: {}".format(e))
|
200
|
+
try:
|
201
|
+
proc = udp_messenger(self.domain_name, self.UDP_PORT, self.sock_timeout, message)
|
202
|
+
self.procs.append(proc)
|
203
|
+
except Exception as e:
|
204
|
+
logger.debug("Usage tracking failed: {}".format(e))
|
177
205
|
|
178
206
|
def send_start_message(self) -> None:
|
179
|
-
|
180
|
-
|
207
|
+
if self.tracking_level:
|
208
|
+
self.start_time = time.time()
|
209
|
+
message = self.construct_start_message()
|
210
|
+
self.send_UDP_message(message)
|
181
211
|
|
182
212
|
def send_end_message(self) -> None:
|
183
|
-
|
184
|
-
|
213
|
+
if self.tracking_level == 3:
|
214
|
+
message = self.construct_end_message()
|
215
|
+
self.send_UDP_message(message)
|
185
216
|
|
186
217
|
def close(self, timeout: float = 10.0) -> None:
|
187
218
|
"""First give each process one timeout period to finish what it is
|
parsl/version.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2024.6.
|
3
|
+
Version: 2024.6.10
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2024.06.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2024.06.10.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|
@@ -1,6 +1,6 @@
|
|
1
1
|
parsl/__init__.py,sha256=65VfBnxw2k8V3sHsbhKoUCqG-ps2XP2l3x3ALMqQ13Y,1777
|
2
2
|
parsl/addresses.py,sha256=WJI5hG8DwHRMu95nCFW_GdoQTQjMXtuvJour11J1D5I,4823
|
3
|
-
parsl/config.py,sha256=
|
3
|
+
parsl/config.py,sha256=WX6lnZ7dyK7D8COX8kgwTb-p5-D7LarwKqrQL5_SVX4,9340
|
4
4
|
parsl/curvezmq.py,sha256=FtZEYP1IWDry39cH-tOKUm9TnaR1U7krOmvVYpATcOk,6939
|
5
5
|
parsl/errors.py,sha256=SzINzQFZDBDbj9l-DPQznD0TbGkNhHIRAPkcBCogf_A,1019
|
6
6
|
parsl/log_utils.py,sha256=u14Fkl5eDfS4HMpl0JjseNNPdbvPaugWPRQj1_af_Zo,3273
|
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=MyaEcEq-Qf860u7V98u-PZrPNdtzOZL_NW6EhIJnmfQ,1937
|
|
8
8
|
parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
|
9
9
|
parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
parsl/utils.py,sha256=91FjQiTUY383ueAjkBAgE21My9nba6SP2a2SrbB1r1Q,11250
|
11
|
-
parsl/version.py,sha256=
|
11
|
+
parsl/version.py,sha256=6Fjs2de_W12bDpbGxnHHYeGEOd0mla6OUrqgAATthTg,131
|
12
12
|
parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
parsl/app/app.py,sha256=4LO0MsFfcR4TNx9p87OkhuUImKfb-Okt5fco36LYibM,8502
|
14
14
|
parsl/app/bash.py,sha256=iTpWH1K5E0e60nH23bwl97zNgg5BssFIqfp-182wkjA,5656
|
@@ -62,7 +62,7 @@ parsl/data_provider/staging.py,sha256=ZDZuuFg38pjUStegKPcvPsfGp3iMeReMzfU6DSwtJj
|
|
62
62
|
parsl/data_provider/zip.py,sha256=S4kVuH9lxAegRURYbvIUR7EYYBOccyslaqyCrVWUBhw,4497
|
63
63
|
parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
64
64
|
parsl/dataflow/dependency_resolvers.py,sha256=Om8Dgh7a0ZwgXAc6TlhxLSzvxXHDlNNV1aBNiD3JTNY,3325
|
65
|
-
parsl/dataflow/dflow.py,sha256=
|
65
|
+
parsl/dataflow/dflow.py,sha256=j2FApaGbY45fL4fyqQEv2pyZ4m1CnHGrAAmPZxqs2kk,67555
|
66
66
|
parsl/dataflow/errors.py,sha256=9SxVhIJY_53FQx8x4OU8UA8nd7lvUbDllH7KfMXpYaY,2177
|
67
67
|
parsl/dataflow/futures.py,sha256=08LuP-HFiHBIZmeKCjlsazw_WpQ5fwevrU2_WbidkYw,6080
|
68
68
|
parsl/dataflow/memoization.py,sha256=l9uw1Bu50GucBF70M5relpGKFkE4dIM9T3R1KrxW0v0,9583
|
@@ -80,8 +80,8 @@ parsl/executors/flux/executor.py,sha256=gPq49CQwtSZYZggLZ0dCXdpUlllKHJbvR8WRKeGh
|
|
80
80
|
parsl/executors/flux/flux_instance_manager.py,sha256=2KVcphlybF-ALYD_3_YjMUi0f5LkjdoJOT_783CW4H0,2036
|
81
81
|
parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
82
82
|
parsl/executors/high_throughput/errors.py,sha256=77ZGrw9suLh9tSWjyhCaIvnC9nRAOmrXsZmvHM6nT68,626
|
83
|
-
parsl/executors/high_throughput/executor.py,sha256=
|
84
|
-
parsl/executors/high_throughput/interchange.py,sha256=
|
83
|
+
parsl/executors/high_throughput/executor.py,sha256=rdsTocFEHWz6zDygHtHdr--udfoUt3HRS0tLdKsUXWE,37202
|
84
|
+
parsl/executors/high_throughput/interchange.py,sha256=tCLNdSqzrz_jmhfif697K20CQpxiHl6kIJ40XFU1SBA,31681
|
85
85
|
parsl/executors/high_throughput/manager_record.py,sha256=9XppKjDW0DJ7SMkPNxsiDs-HvXGPLrTg6Ceyh4b6gNs,433
|
86
86
|
parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
|
87
87
|
parsl/executors/high_throughput/mpi_executor.py,sha256=B2CR1pHaGQzIwTrQ-_i08NZG-NwS6yr8y7nxPaa_rkA,3760
|
@@ -175,7 +175,7 @@ parsl/providers/grid_engine/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
|
|
175
175
|
parsl/providers/grid_engine/grid_engine.py,sha256=jTQjKaJh4eEXGbhrrCcXFV4AVFo2t39iVpslDR8gF6o,8565
|
176
176
|
parsl/providers/grid_engine/template.py,sha256=a7iViKr8LXcFTPmsf_qQeVK5o_RekOAIlUOF0X1q-2M,273
|
177
177
|
parsl/providers/kubernetes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
178
|
-
parsl/providers/kubernetes/kube.py,sha256
|
178
|
+
parsl/providers/kubernetes/kube.py,sha256=a6l4JPTwPWzytWmI3EjmtFThqCfS8pExV3D2BSAZtus,14507
|
179
179
|
parsl/providers/kubernetes/template.py,sha256=VsRz6cmNaII-y4OdMT6sCwzQy95SJX6NMB0hmmFBhX4,50
|
180
180
|
parsl/providers/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
181
181
|
parsl/providers/local/local.py,sha256=pTEcl9NnjRcL8FHcMeMEtJj1IXiAOxZ2Cih97Q5jDPY,11388
|
@@ -186,7 +186,7 @@ parsl/providers/pbspro/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
|
|
186
186
|
parsl/providers/pbspro/pbspro.py,sha256=jh9rzSOKRf0LKtqHSaolqVQtRa1jyjcZLsjk8Wp-llg,8794
|
187
187
|
parsl/providers/pbspro/template.py,sha256=y-Dher--t5Eury-c7cAuSZs9FEUXWiruFUI07v81558,315
|
188
188
|
parsl/providers/slurm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
189
|
-
parsl/providers/slurm/slurm.py,sha256=
|
189
|
+
parsl/providers/slurm/slurm.py,sha256=4PwAXTqnOFKDfMxIWYf6XURl7tZL4tYRoccQ-ffasbM,14289
|
190
190
|
parsl/providers/slurm/template.py,sha256=KpgBEFMc1ps-38jdrk13xUGx9TCivu-iF90jgQDdiEQ,315
|
191
191
|
parsl/providers/torque/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
192
192
|
parsl/providers/torque/template.py,sha256=4qfc2gmlEhRCAD7erFDOs4prJQ43I8s4E8DSUSVQx3A,358
|
@@ -199,7 +199,7 @@ parsl/serialize/facade.py,sha256=SpKGSpI8PQb3hhxuKRJUYoQoq284t5np9ouTpogKmtU,679
|
|
199
199
|
parsl/serialize/proxystore.py,sha256=o-ha9QAvVhbN8y9S1itk3W0O75eyHYZw2AvB2xu5_Lg,1624
|
200
200
|
parsl/tests/__init__.py,sha256=VTtJzOzz_x6fWNh8IOnsgFqVbdiJShi2AZH21mcmID4,204
|
201
201
|
parsl/tests/callables_helper.py,sha256=ceP1YYsNtrZgKT6MAIvpgdccEjQ_CpFEOnZBGHKGOx0,30
|
202
|
-
parsl/tests/conftest.py,sha256=
|
202
|
+
parsl/tests/conftest.py,sha256=uD8LI4_U8EoRJ_i224InZC2zbPoOhOYCrO8oryp2z88,14805
|
203
203
|
parsl/tests/test_aalst_patterns.py,sha256=lNIxb7nIgh1yX7hR2fr_ck_mxYJxx8ASKK9zHUVqPno,9614
|
204
204
|
parsl/tests/test_callables.py,sha256=97vrIF1_hfDGd81FM1bhR6FemZMWFcALrH6pVHMTCt8,1974
|
205
205
|
parsl/tests/test_curvezmq.py,sha256=yyhlS4vmaZdMitiySoy4l_ih9H1bsPiN-tMdwIh3H20,12431
|
@@ -217,6 +217,7 @@ parsl/tests/configs/comet.py,sha256=Azsm2tLBau5Ox3RcKqqD948Wb_dJvAq7MvBZAfXVXWw,
|
|
217
217
|
parsl/tests/configs/cooley_htex.py,sha256=f7KtkzwUhNacJpIuTqNS0NKnOcEUWHzzhZM3iVTd-s0,1360
|
218
218
|
parsl/tests/configs/ec2_single_node.py,sha256=rK9AfMf4C84CXMhS5nhgHA_dNG2An7Yiq2yzR4h6MEE,1423
|
219
219
|
parsl/tests/configs/ec2_spot.py,sha256=NKDCKgKxYNOHGVLBl2DFfiUwkR6xQnyhNb_E04TBs28,1253
|
220
|
+
parsl/tests/configs/flux_local.py,sha256=xliKQfB5FFpfNHWYEHoA8FKOTVHFCXVhWNuKQ5VJNTk,182
|
220
221
|
parsl/tests/configs/frontera.py,sha256=VXaRcvsi9ZjqJHi71BbKXSJBuQXdhCzPxXKW7H3LRBI,1567
|
221
222
|
parsl/tests/configs/htex_ad_hoc_cluster.py,sha256=Nr5ZVs4kVvX2UbRk8j9VW6xYGf9SR43SvodkU8RVWEQ,944
|
222
223
|
parsl/tests/configs/htex_local.py,sha256=o7Lxz1nErHpLNcH7vEEy9KyCNiEf6r3gpCrBmdQbh94,719
|
@@ -305,7 +306,7 @@ parsl/tests/test_bash_apps/test_memoize_ignore_args_regr.py,sha256=8geUkrr09Oc4Z
|
|
305
306
|
parsl/tests/test_bash_apps/test_multiline.py,sha256=stpMEv2eopGG-ietxjUtD5gYMOVpwPdLauDizjUfTdA,1082
|
306
307
|
parsl/tests/test_bash_apps/test_pipeline.py,sha256=1kQDD8-Dh5H9SKFcKHzN_mSrdxAV_VYzk8ZnDyna3l8,2444
|
307
308
|
parsl/tests/test_bash_apps/test_std_uri.py,sha256=CvAt8BUhNl2pA5chq9YyhkD6eo2IUH6PjWfe3SQ-YRU,3752
|
308
|
-
parsl/tests/test_bash_apps/test_stdout.py,sha256=
|
309
|
+
parsl/tests/test_bash_apps/test_stdout.py,sha256=hrzHXLt308qH2Gg_r0-qy5nFBNXI56vCZQBXIIocCPY,3198
|
309
310
|
parsl/tests/test_channels/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
310
311
|
parsl/tests/test_channels/test_large_output.py,sha256=PGeNSW_sN5mR7KF1hVL2CPfktydYxo4oNz1wVQ-ENN0,595
|
311
312
|
parsl/tests/test_checkpointing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -348,7 +349,7 @@ parsl/tests/test_htex/test_managers_command.py,sha256=Y-eUjtBzwW9erCYdph9bOesbkU
|
|
348
349
|
parsl/tests/test_htex/test_missing_worker.py,sha256=gyp5i7_t-JHyJGtz_eXZKKBY5w8oqLOIxO6cJgGJMtQ,745
|
349
350
|
parsl/tests/test_htex/test_multiple_disconnected_blocks.py,sha256=Axn8us43dA722O4PWdqxCJM5f_vinZqjFT1WAEvC_ZM,1995
|
350
351
|
parsl/tests/test_htex/test_worker_failure.py,sha256=Uz-RHI-LK78FMjXUvrUFmo4iYfmpDVBUcBxxRb3UG9M,603
|
351
|
-
parsl/tests/test_htex/test_zmq_binding.py,sha256=
|
352
|
+
parsl/tests/test_htex/test_zmq_binding.py,sha256=Bealo3kRwiu76uwj68bvXH4JNVYUsFtarNKt80f3a04,3757
|
352
353
|
parsl/tests/test_monitoring/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
353
354
|
parsl/tests/test_monitoring/test_app_names.py,sha256=ayyxySGWpKSe9dDw2UeJo1dicxjpALRuLsJfprZV4Eg,2174
|
354
355
|
parsl/tests/test_monitoring/test_basic.py,sha256=lGyHEJt_rokawv_XeAx-bxV84IlZUFR4KI0PQAiLsFg,3714
|
@@ -377,9 +378,10 @@ parsl/tests/test_providers/test_submiterror_deprecation.py,sha256=m1L8dV_xrbjQsN
|
|
377
378
|
parsl/tests/test_python_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
378
379
|
parsl/tests/test_python_apps/test_arg_input_types.py,sha256=JXpfHiu8lr9BN6u1OzqFvGwBhxzsGTPMewHx6Wdo-HI,670
|
379
380
|
parsl/tests/test_python_apps/test_basic.py,sha256=lFqh4ugePbp_FRiHGUXxzV34iS7l8C5UkxTHuLcpnYs,855
|
380
|
-
parsl/tests/test_python_apps/test_context_manager.py,sha256=
|
381
|
+
parsl/tests/test_python_apps/test_context_manager.py,sha256=8kUgcxN-6cz2u-lUoDhMAgu_ObUwEZvE3Eyxra6pFCo,3869
|
381
382
|
parsl/tests/test_python_apps/test_dep_standard_futures.py,sha256=a3decndowPh8ma641BbxFAyMUZFGMT00TYpa7Y-7dV8,860
|
382
383
|
parsl/tests/test_python_apps/test_dependencies.py,sha256=IRiTI_lPoWBSFSFnaBlE6Bv08PKEaf-qj5dfqO2RjT0,272
|
384
|
+
parsl/tests/test_python_apps/test_dependencies_deep.py,sha256=Cuow2LLGY7zffPFj89AOIwKlXxHtsin3v_UIhfdwV_w,1542
|
383
385
|
parsl/tests/test_python_apps/test_depfail_propagation.py,sha256=3q3HlVWrOixFtXWBvR_ypKtbdAHAJcKndXQ5drwrBQU,1488
|
384
386
|
parsl/tests/test_python_apps/test_fail.py,sha256=0Gld8LS6NB0Io1bU82vVR73twkuL5nW0ifKbIUcsJcw,1671
|
385
387
|
parsl/tests/test_python_apps/test_fibonacci_iterative.py,sha256=ly2s5HuB9R53Z2FM_zy0WWdOk01iVhgcwSpQyK6ErIY,573
|
@@ -404,7 +406,7 @@ parsl/tests/test_python_apps/test_simple.py,sha256=LYGjdHvRizTpYzZePPvwKSPwrr2MP
|
|
404
406
|
parsl/tests/test_python_apps/test_timeout.py,sha256=uENfT-1DharQkqkeG7a89E-gU1gjE7ATJrBZGUKvZSA,998
|
405
407
|
parsl/tests/test_python_apps/test_type5.py,sha256=kUyA1NuFu-DDXsJNNvJLZVyewZBt7QAOhcGm2DWFTQw,777
|
406
408
|
parsl/tests/test_radical/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
407
|
-
parsl/tests/test_radical/test_mpi_funcs.py,sha256
|
409
|
+
parsl/tests/test_radical/test_mpi_funcs.py,sha256=PSnLE2IQTIXIysRvmFh2xZ-4wnBL9GeiwXQjW-0z6dk,765
|
408
410
|
parsl/tests/test_regression/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
409
411
|
parsl/tests/test_regression/test_1480.py,sha256=sJjcX8O0eL_OG7L3skjT3JaIUyFC-kk0ui3puaDatKA,546
|
410
412
|
parsl/tests/test_regression/test_1606_wait_for_current_tasks.py,sha256=frqPtaiVysevj9nCWoQlAeh9K1jQO5zaahr9ev_Mx_0,1134
|
@@ -457,15 +459,17 @@ parsl/tests/test_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
|
|
457
459
|
parsl/tests/test_utils/test_representation_mixin.py,sha256=kUZeIDwA2rlbJ3-beGzLLwf3dOplTMCrWJN87etHcyY,1633
|
458
460
|
parsl/tests/unit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
459
461
|
parsl/tests/unit/test_file.py,sha256=vLycnYcv3bvSzL-FV8WdoibqTyb41BrH1LUYBavobsg,2850
|
462
|
+
parsl/tests/unit/test_usage_tracking.py,sha256=eoEAHHINQ15LlQDulH_yUYZLBHnrm_rs-IK2vws3Nes,1345
|
460
463
|
parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
461
464
|
parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
|
462
|
-
parsl/usage_tracking/
|
463
|
-
parsl
|
464
|
-
parsl-2024.6.
|
465
|
-
parsl-2024.6.
|
466
|
-
parsl-2024.6.
|
467
|
-
parsl-2024.6.
|
468
|
-
parsl-2024.6.
|
469
|
-
parsl-2024.6.
|
470
|
-
parsl-2024.6.
|
471
|
-
parsl-2024.6.
|
465
|
+
parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
|
466
|
+
parsl/usage_tracking/usage.py,sha256=qNEJ7nPimqd3Y7OWFLdYmNwJ6XDKlyfV_fTzasxsQw8,8690
|
467
|
+
parsl-2024.6.10.data/scripts/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
|
468
|
+
parsl-2024.6.10.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
|
469
|
+
parsl-2024.6.10.data/scripts/process_worker_pool.py,sha256=weug6_LAMbqEKQhiI6ZMg8r3e-XBDw1-L5_COEt7caM,41879
|
470
|
+
parsl-2024.6.10.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
471
|
+
parsl-2024.6.10.dist-info/METADATA,sha256=zXwr4W5jcm17zCmQuBJZX1PLlX0_HzJsFhkqF_7KUB0,4124
|
472
|
+
parsl-2024.6.10.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
473
|
+
parsl-2024.6.10.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
|
474
|
+
parsl-2024.6.10.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
|
475
|
+
parsl-2024.6.10.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|