parsl 2023.10.23__py3-none-any.whl → 2023.11.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/__init__.py +1 -0
- parsl/app/app.py +29 -21
- parsl/channels/base.py +12 -24
- parsl/config.py +19 -12
- parsl/configs/ad_hoc.py +2 -2
- parsl/dataflow/dflow.py +10 -4
- parsl/executors/base.py +1 -3
- parsl/executors/high_throughput/executor.py +3 -3
- parsl/executors/high_throughput/interchange.py +59 -53
- parsl/executors/high_throughput/process_worker_pool.py +2 -2
- parsl/executors/high_throughput/zmq_pipes.py +1 -1
- parsl/executors/radical/__init__.py +4 -0
- parsl/executors/radical/executor.py +550 -0
- parsl/executors/radical/rpex_master.py +42 -0
- parsl/executors/radical/rpex_resources.py +165 -0
- parsl/executors/radical/rpex_worker.py +61 -0
- parsl/executors/status_handling.py +1 -2
- parsl/executors/taskvine/exec_parsl_function.py +3 -4
- parsl/executors/taskvine/executor.py +18 -4
- parsl/executors/taskvine/factory.py +1 -1
- parsl/executors/taskvine/manager.py +12 -16
- parsl/executors/taskvine/utils.py +5 -5
- parsl/executors/threads.py +1 -2
- parsl/executors/workqueue/exec_parsl_function.py +2 -1
- parsl/executors/workqueue/executor.py +34 -24
- parsl/jobs/job_status_poller.py +2 -3
- parsl/monitoring/monitoring.py +6 -6
- parsl/monitoring/remote.py +1 -1
- parsl/monitoring/visualization/plots/default/workflow_plots.py +4 -4
- parsl/monitoring/visualization/plots/default/workflow_resource_plots.py +2 -2
- parsl/providers/slurm/slurm.py +1 -1
- parsl/tests/configs/ad_hoc_cluster_htex.py +3 -3
- parsl/tests/configs/htex_ad_hoc_cluster.py +1 -1
- parsl/tests/configs/local_radical.py +20 -0
- parsl/tests/configs/local_radical_mpi.py +20 -0
- parsl/tests/configs/local_threads_monitoring.py +1 -1
- parsl/tests/conftest.py +6 -2
- parsl/tests/scaling_tests/vineex_condor.py +1 -1
- parsl/tests/scaling_tests/vineex_local.py +1 -1
- parsl/tests/scaling_tests/wqex_condor.py +1 -1
- parsl/tests/scaling_tests/wqex_local.py +1 -1
- parsl/tests/test_docs/test_kwargs.py +37 -0
- parsl/tests/test_python_apps/test_garbage_collect.py +1 -1
- parsl/tests/test_python_apps/test_lifted.py +3 -2
- parsl/tests/test_radical/__init__.py +0 -0
- parsl/tests/test_radical/test_mpi_funcs.py +27 -0
- parsl/tests/test_regression/test_1606_wait_for_current_tasks.py +1 -1
- parsl/utils.py +4 -4
- parsl/version.py +1 -1
- {parsl-2023.10.23.data → parsl-2023.11.20.data}/scripts/exec_parsl_function.py +2 -1
- {parsl-2023.10.23.data → parsl-2023.11.20.data}/scripts/process_worker_pool.py +2 -2
- {parsl-2023.10.23.dist-info → parsl-2023.11.20.dist-info}/METADATA +5 -2
- {parsl-2023.10.23.dist-info → parsl-2023.11.20.dist-info}/RECORD +58 -48
- {parsl-2023.10.23.dist-info → parsl-2023.11.20.dist-info}/WHEEL +1 -1
- {parsl-2023.10.23.data → parsl-2023.11.20.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2023.10.23.dist-info → parsl-2023.11.20.dist-info}/LICENSE +0 -0
- {parsl-2023.10.23.dist-info → parsl-2023.11.20.dist-info}/entry_points.txt +0 -0
- {parsl-2023.10.23.dist-info → parsl-2023.11.20.dist-info}/top_level.txt +0 -0
@@ -22,7 +22,7 @@ gantt_colors = {'unsched': 'rgb(240, 240, 240)',
|
|
22
22
|
'exec_done': 'rgb(0, 200, 0)',
|
23
23
|
'memo_done': 'rgb(64, 200, 64)',
|
24
24
|
'fail_retryable': 'rgb(200, 128,128)'
|
25
|
-
|
25
|
+
}
|
26
26
|
|
27
27
|
|
28
28
|
def task_gantt_plot(df_task, df_status, time_completed=None):
|
@@ -50,7 +50,7 @@ def task_gantt_plot(df_task, df_status, time_completed=None):
|
|
50
50
|
'Start': last_status['timestamp'],
|
51
51
|
'Finish': status['timestamp'],
|
52
52
|
'Resource': last_status['task_status_name']
|
53
|
-
|
53
|
+
}
|
54
54
|
parsl_tasks.extend([last_status_bar])
|
55
55
|
last_status = status
|
56
56
|
|
@@ -60,7 +60,7 @@ def task_gantt_plot(df_task, df_status, time_completed=None):
|
|
60
60
|
'Start': last_status['timestamp'],
|
61
61
|
'Finish': time_completed,
|
62
62
|
'Resource': last_status['task_status_name']
|
63
|
-
|
63
|
+
}
|
64
64
|
parsl_tasks.extend([last_status_bar])
|
65
65
|
|
66
66
|
fig = ff.create_gantt(parsl_tasks,
|
@@ -205,7 +205,7 @@ dag_state_colors = {"unsched": (0, 'rgb(240, 240, 240)'),
|
|
205
205
|
"fail_retryable": (8, 'rgb(200, 128,128)'),
|
206
206
|
"joining": (9, 'rgb(128, 128, 255)'),
|
207
207
|
"running_ended": (10, 'rgb(64, 64, 255)')
|
208
|
-
|
208
|
+
}
|
209
209
|
|
210
210
|
|
211
211
|
def workflow_dag_plot(df_tasks, group_by_apps=True):
|
@@ -164,7 +164,7 @@ def worker_efficiency(task, node):
|
|
164
164
|
y=[total_workers] * (end - start + 1),
|
165
165
|
name='Total of workers in whole run',
|
166
166
|
)
|
167
|
-
|
167
|
+
],
|
168
168
|
layout=go.Layout(xaxis=dict(autorange=True,
|
169
169
|
title='Time (seconds)'),
|
170
170
|
yaxis=dict(title='Number of workers'),
|
@@ -230,7 +230,7 @@ def resource_efficiency(resource, node, label):
|
|
230
230
|
y=[total] * (end - start + 1),
|
231
231
|
name=name2,
|
232
232
|
)
|
233
|
-
|
233
|
+
],
|
234
234
|
layout=go.Layout(xaxis=dict(autorange=True,
|
235
235
|
title='Time (seconds)'),
|
236
236
|
yaxis=dict(title=yaxis),
|
parsl/providers/slurm/slurm.py
CHANGED
@@ -250,7 +250,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
250
250
|
self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}
|
251
251
|
break
|
252
252
|
else:
|
253
|
-
logger.error("Could not read job ID from
|
253
|
+
logger.error("Could not read job ID from submit command standard output.")
|
254
254
|
logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
|
255
255
|
else:
|
256
256
|
logger.error("Submit command failed")
|
@@ -9,8 +9,8 @@ user_opts = {'adhoc':
|
|
9
9
|
{'username': 'YOUR_USERNAME',
|
10
10
|
'script_dir': 'YOUR_SCRIPT_DIR',
|
11
11
|
'remote_hostnames': ['REMOTE_HOST_URL_1', 'REMOTE_HOST_URL_2']
|
12
|
-
|
13
|
-
} # type: Dict[str, Dict[str, Any]]
|
12
|
+
}
|
13
|
+
} # type: Dict[str, Dict[str, Any]]
|
14
14
|
|
15
15
|
config = Config(
|
16
16
|
executors=[
|
@@ -25,7 +25,7 @@ config = Config(
|
|
25
25
|
channels=[SSHChannel(hostname=m,
|
26
26
|
username=user_opts['adhoc']['username'],
|
27
27
|
script_dir=user_opts['adhoc']['script_dir'],
|
28
|
-
|
28
|
+
) for m in user_opts['adhoc']['remote_hostnames']]
|
29
29
|
)
|
30
30
|
)
|
31
31
|
],
|
@@ -20,7 +20,7 @@ config = Config(
|
|
20
20
|
channels=[SSHChannel(hostname=m,
|
21
21
|
username=user_opts['adhoc']['username'],
|
22
22
|
script_dir=user_opts['adhoc']['script_dir'],
|
23
|
-
|
23
|
+
) for m in user_opts['adhoc']['remote_hostnames']]
|
24
24
|
)
|
25
25
|
)
|
26
26
|
],
|
@@ -0,0 +1,20 @@
|
|
1
|
+
import os
|
2
|
+
|
3
|
+
from parsl.config import Config
|
4
|
+
from parsl.executors.radical import RadicalPilotExecutor
|
5
|
+
from parsl.executors.radical import ResourceConfig
|
6
|
+
|
7
|
+
|
8
|
+
rpex_cfg = ResourceConfig()
|
9
|
+
|
10
|
+
|
11
|
+
def fresh_config():
|
12
|
+
|
13
|
+
return Config(
|
14
|
+
executors=[
|
15
|
+
RadicalPilotExecutor(
|
16
|
+
label='RPEXBulk',
|
17
|
+
rpex_cfg=rpex_cfg,
|
18
|
+
bulk_mode=True,
|
19
|
+
resource='local.localhost',
|
20
|
+
runtime=30, cores=4)])
|
@@ -0,0 +1,20 @@
|
|
1
|
+
import os
|
2
|
+
from parsl.config import Config
|
3
|
+
|
4
|
+
|
5
|
+
def fresh_config():
|
6
|
+
from parsl.executors.radical import ResourceConfig
|
7
|
+
from parsl.executors.radical import RadicalPilotExecutor
|
8
|
+
|
9
|
+
rpex_cfg = ResourceConfig()
|
10
|
+
rpex_cfg.worker_type = "MPI"
|
11
|
+
rpex_cfg.worker_cores_per_node = 7
|
12
|
+
|
13
|
+
return Config(
|
14
|
+
executors=[
|
15
|
+
RadicalPilotExecutor(
|
16
|
+
label='RPEXMPI',
|
17
|
+
rpex_cfg=rpex_cfg,
|
18
|
+
bulk_mode=True,
|
19
|
+
resource='local.localhost',
|
20
|
+
runtime=30, cores=8)])
|
parsl/tests/conftest.py
CHANGED
@@ -243,7 +243,7 @@ def setup_data(tmpd_cwd):
|
|
243
243
|
|
244
244
|
|
245
245
|
@pytest.fixture(autouse=True, scope='function')
|
246
|
-
def
|
246
|
+
def assert_no_outstanding_tasks(pytestconfig):
|
247
247
|
"""If we're in a config-file based mode, wait for task completion between
|
248
248
|
each test. This will detect early on (by hanging) if particular test
|
249
249
|
tasks are not finishing, rather than silently falling off the end of
|
@@ -254,7 +254,11 @@ def wait_for_task_completion(pytestconfig):
|
|
254
254
|
config = pytestconfig.getoption('config')[0]
|
255
255
|
yield
|
256
256
|
if config != 'local':
|
257
|
-
|
257
|
+
logger.info("Checking no outstanding tasks")
|
258
|
+
for task_record in parsl.dfk().tasks.values():
|
259
|
+
fut = task_record['app_fu']
|
260
|
+
assert fut.done(), f"Incomplete task found, task id {task_record['id']}"
|
261
|
+
logger.info("No outstanding tasks found")
|
258
262
|
|
259
263
|
|
260
264
|
def pytest_make_collect_report(collector):
|
@@ -0,0 +1,37 @@
|
|
1
|
+
"""Functions used to explain kwargs"""
|
2
|
+
from pathlib import Path
|
3
|
+
|
4
|
+
from parsl import python_app, File
|
5
|
+
|
6
|
+
|
7
|
+
def test_inputs():
|
8
|
+
@python_app()
|
9
|
+
def map_app(x):
|
10
|
+
return x * 2
|
11
|
+
|
12
|
+
@python_app()
|
13
|
+
def reduce_app(inputs=()):
|
14
|
+
return sum(inputs)
|
15
|
+
|
16
|
+
map_futures = [map_app(x) for x in range(3)]
|
17
|
+
reduce_future = reduce_app(inputs=map_futures)
|
18
|
+
|
19
|
+
assert reduce_future.result() == 6
|
20
|
+
|
21
|
+
|
22
|
+
def test_outputs(tmpdir):
|
23
|
+
@python_app()
|
24
|
+
def write_app(message, outputs=()):
|
25
|
+
"""Write a single message to every file in outputs"""
|
26
|
+
for path in outputs:
|
27
|
+
with open(path, 'w') as fp:
|
28
|
+
print(message, file=fp)
|
29
|
+
|
30
|
+
to_write = [
|
31
|
+
File(Path(tmpdir) / 'output-0.txt'),
|
32
|
+
File(Path(tmpdir) / 'output-1.txt')
|
33
|
+
]
|
34
|
+
write_app('Hello!', outputs=to_write).result()
|
35
|
+
for path in to_write:
|
36
|
+
with open(path) as fp:
|
37
|
+
assert fp.read() == 'Hello!\n'
|
@@ -89,11 +89,12 @@ def test_returns_a_class_instance():
|
|
89
89
|
|
90
90
|
def test_returns_a_class_instance_no_underscores():
|
91
91
|
# test that _underscore attribute references are not lifted
|
92
|
+
f = returns_a_class_instance()
|
92
93
|
with pytest.raises(AttributeError):
|
93
|
-
|
94
|
+
f._nosuchattribute.result()
|
95
|
+
f.exception() # wait for f to complete before the test ends
|
94
96
|
|
95
97
|
|
96
|
-
@pytest.mark.skip("returning classes is not supported in WorkQueue or Task Vine - see issue #2908")
|
97
98
|
def test_returns_a_class():
|
98
99
|
|
99
100
|
# precondition that returns_a_class behaves
|
File without changes
|
@@ -0,0 +1,27 @@
|
|
1
|
+
import parsl
|
2
|
+
import pytest
|
3
|
+
|
4
|
+
from parsl.tests.configs.local_radical_mpi import fresh_config as local_config
|
5
|
+
|
6
|
+
|
7
|
+
@parsl.python_app
|
8
|
+
def test_mpi_func(msg, sleep, comm=None, parsl_resource_specification={}):
|
9
|
+
import time
|
10
|
+
msg = 'hello %d/%d: %s' % (comm.rank, comm.size, msg)
|
11
|
+
time.sleep(sleep)
|
12
|
+
print(msg)
|
13
|
+
return comm.size
|
14
|
+
|
15
|
+
|
16
|
+
apps = []
|
17
|
+
|
18
|
+
|
19
|
+
@pytest.mark.local
|
20
|
+
def test_radical_mpi(n=7):
|
21
|
+
# rank size should be > 1 for the
|
22
|
+
# radical runtime system to run this function in MPI env
|
23
|
+
for i in range(2, n):
|
24
|
+
spec = {'ranks': i}
|
25
|
+
t = test_mpi_func(msg='mpi.func.%06d' % i, sleep=1, comm=None, parsl_resource_specification=spec)
|
26
|
+
apps.append(t)
|
27
|
+
assert [len(app.result()) for app in apps] == list(range(2, n))
|
parsl/utils.py
CHANGED
@@ -7,7 +7,7 @@ import threading
|
|
7
7
|
import time
|
8
8
|
from contextlib import contextmanager
|
9
9
|
from types import TracebackType
|
10
|
-
from typing import Any, Callable, List, Tuple, Union, Generator, IO, AnyStr, Dict, Optional
|
10
|
+
from typing import Any, Callable, List, Sequence, Tuple, Union, Generator, IO, AnyStr, Dict, Optional
|
11
11
|
|
12
12
|
import typeguard
|
13
13
|
from typing_extensions import Type
|
@@ -47,7 +47,7 @@ def get_version() -> str:
|
|
47
47
|
|
48
48
|
|
49
49
|
@typeguard.typechecked
|
50
|
-
def get_all_checkpoints(rundir: str = "runinfo") ->
|
50
|
+
def get_all_checkpoints(rundir: str = "runinfo") -> Sequence[str]:
|
51
51
|
"""Finds the checkpoints from all runs in the rundir.
|
52
52
|
|
53
53
|
Kwargs:
|
@@ -76,7 +76,7 @@ def get_all_checkpoints(rundir: str = "runinfo") -> List[str]:
|
|
76
76
|
|
77
77
|
|
78
78
|
@typeguard.typechecked
|
79
|
-
def get_last_checkpoint(rundir: str = "runinfo") ->
|
79
|
+
def get_last_checkpoint(rundir: str = "runinfo") -> Sequence[str]:
|
80
80
|
"""Finds the checkpoint from the last run, if one exists.
|
81
81
|
|
82
82
|
Note that checkpoints are incremental, and this helper will not find
|
@@ -128,7 +128,7 @@ def get_std_fname_mode(
|
|
128
128
|
|
129
129
|
@contextmanager
|
130
130
|
def wait_for_file(path: str, seconds: int = 10) -> Generator[None, None, None]:
|
131
|
-
for
|
131
|
+
for _ in range(0, int(seconds * 100)):
|
132
132
|
time.sleep(seconds / 100.)
|
133
133
|
if os.path.exists(path):
|
134
134
|
break
|
parsl/version.py
CHANGED
@@ -4,6 +4,7 @@ from parsl.utils import get_std_fname_mode
|
|
4
4
|
import traceback
|
5
5
|
import sys
|
6
6
|
import pickle
|
7
|
+
from parsl.serialize import serialize
|
7
8
|
|
8
9
|
# This scripts executes a parsl function which is pickled in a file:
|
9
10
|
#
|
@@ -32,7 +33,7 @@ def load_pickled_file(filename):
|
|
32
33
|
|
33
34
|
def dump_result_to_file(result_file, result_package):
|
34
35
|
with open(result_file, "wb") as f_out:
|
35
|
-
|
36
|
+
f_out.write(serialize(result_package))
|
36
37
|
|
37
38
|
|
38
39
|
def remap_location(mapping, parsl_file):
|
@@ -234,7 +234,7 @@ class Manager:
|
|
234
234
|
'dir': os.getcwd(),
|
235
235
|
'cpu_count': psutil.cpu_count(logical=False),
|
236
236
|
'total_memory': psutil.virtual_memory().total,
|
237
|
-
|
237
|
+
}
|
238
238
|
b_msg = json.dumps(msg).encode('utf-8')
|
239
239
|
return b_msg
|
240
240
|
|
@@ -608,7 +608,7 @@ def worker(worker_id, pool_id, pool_size, task_queue, result_queue, worker_queue
|
|
608
608
|
logger.exception("Caught exception while trying to pickle the result package")
|
609
609
|
pkl_package = pickle.dumps({'type': 'result', 'task_id': tid,
|
610
610
|
'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))
|
611
|
-
|
611
|
+
})
|
612
612
|
|
613
613
|
result_queue.put(pkl_package)
|
614
614
|
tasks_in_progress.pop(worker_id)
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2023.
|
3
|
+
Version: 2023.11.20
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2023.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2023.11.20.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|
@@ -53,6 +53,7 @@ Requires-Dist: pyyaml ; extra == 'all'
|
|
53
53
|
Requires-Dist: cffi ; extra == 'all'
|
54
54
|
Requires-Dist: jsonschema ; extra == 'all'
|
55
55
|
Requires-Dist: proxystore ; extra == 'all'
|
56
|
+
Requires-Dist: radical.pilot ; extra == 'all'
|
56
57
|
Provides-Extra: aws
|
57
58
|
Requires-Dist: boto3 ; extra == 'aws'
|
58
59
|
Provides-Extra: azure
|
@@ -79,6 +80,8 @@ Provides-Extra: oauth_ssh
|
|
79
80
|
Requires-Dist: oauth-ssh >=0.9 ; extra == 'oauth_ssh'
|
80
81
|
Provides-Extra: proxystore
|
81
82
|
Requires-Dist: proxystore ; extra == 'proxystore'
|
83
|
+
Provides-Extra: radical-pilot
|
84
|
+
Requires-Dist: radical.pilot ; extra == 'radical-pilot'
|
82
85
|
Provides-Extra: visualization
|
83
86
|
Requires-Dist: pydot ; extra == 'visualization'
|
84
87
|
Requires-Dist: networkx <2.6,>=2.5 ; extra == 'visualization'
|