parsl 2023.7.3__py3-none-any.whl → 2023.7.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/data_provider/files.py +6 -5
- parsl/dataflow/dflow.py +7 -1
- parsl/dataflow/memoization.py +7 -7
- parsl/executors/high_throughput/executor.py +2 -3
- parsl/executors/high_throughput/process_worker_pool.py +2 -3
- parsl/launchers/errors.py +1 -1
- parsl/providers/cluster_provider.py +2 -1
- parsl/providers/local/local.py +1 -1
- parsl/serialize/base.py +3 -13
- parsl/serialize/concretes.py +22 -3
- parsl/serialize/facade.py +13 -23
- parsl/tests/conftest.py +94 -11
- parsl/tests/test_bash_apps/test_basic.py +32 -63
- parsl/tests/test_bash_apps/test_kwarg_storage.py +18 -89
- parsl/tests/test_bash_apps/test_memoize.py +17 -41
- parsl/tests/test_bash_apps/test_multiline.py +19 -45
- parsl/tests/test_bash_apps/test_pipeline.py +46 -82
- parsl/tests/test_bash_apps/test_stdout.py +15 -30
- parsl/tests/test_data/test_file_apps.py +13 -15
- parsl/tests/test_data/test_file_staging.py +2 -2
- parsl/tests/test_data/test_output_chain_filenames.py +17 -27
- parsl/tests/test_docs/test_workflow4.py +18 -28
- parsl/tests/test_error_handling/test_htex_worker_failure.py +5 -12
- parsl/tests/test_python_apps/test_fail.py +31 -69
- parsl/tests/test_python_apps/test_garbage_collect.py +15 -9
- parsl/tests/test_python_apps/test_join.py +19 -20
- parsl/tests/test_python_apps/test_mapred.py +13 -38
- parsl/tests/test_python_apps/test_memoize_bad_id_for_memo.py +6 -7
- parsl/tests/test_python_apps/test_outputs.py +11 -24
- parsl/tests/test_python_apps/test_overview.py +5 -42
- parsl/tests/test_python_apps/test_pipeline.py +16 -19
- parsl/tests/test_regression/test_1606_wait_for_current_tasks.py +35 -10
- parsl/tests/test_scaling/test_regression_1621.py +26 -20
- parsl/tests/test_scaling/test_scale_down.py +49 -32
- parsl/tests/test_serialization/test_2555_caching_deserializer.py +34 -0
- parsl/utils.py +8 -6
- parsl/version.py +1 -1
- {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/process_worker_pool.py +2 -3
- {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/METADATA +2 -2
- {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/RECORD +46 -45
- {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/LICENSE +0 -0
- {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/WHEEL +0 -0
- {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/entry_points.txt +0 -0
- {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,7 @@
|
|
1
1
|
import argparse
|
2
2
|
|
3
|
+
import pytest
|
4
|
+
|
3
5
|
import parsl
|
4
6
|
from parsl.app.app import python_app
|
5
7
|
from parsl.tests.configs.local_threads import config
|
@@ -11,28 +13,23 @@ def increment(x):
|
|
11
13
|
|
12
14
|
|
13
15
|
@python_app
|
14
|
-
def slow_increment(x
|
16
|
+
def slow_increment(x):
|
15
17
|
import time
|
16
|
-
time.sleep(
|
18
|
+
time.sleep(0.001)
|
17
19
|
return x + 1
|
18
20
|
|
19
21
|
|
20
|
-
|
21
|
-
|
22
|
-
"""
|
23
|
-
futs =
|
24
|
-
for i in range(1, depth)
|
25
|
-
|
26
|
-
|
27
|
-
print([futs[i].result() for i in futs if not isinstance(futs[i], int)])
|
28
|
-
|
22
|
+
@pytest.mark.parametrize("depth", (2, 3))
|
23
|
+
def test_increment(depth):
|
24
|
+
"""Test simple pipeline A->B...->N"""
|
25
|
+
futs = [increment(0)]
|
26
|
+
futs.extend(increment(futs[i - 1]) for i in range(1, depth))
|
27
|
+
assert sum(f.result() for f in futs) == sum(range(1, depth + 1))
|
29
28
|
|
30
|
-
def test_increment_slow(depth=2):
|
31
|
-
"""Test simple pipeline A->B...->N with delay
|
32
|
-
"""
|
33
|
-
futs = {0: 0}
|
34
|
-
for i in range(1, depth):
|
35
|
-
futs[i] = slow_increment(futs[i - 1], 0.5)
|
36
29
|
|
37
|
-
|
38
|
-
|
30
|
+
@pytest.mark.parametrize("depth", (2, 3))
|
31
|
+
def test_increment_slow(depth):
|
32
|
+
"""Test simple pipeline A->B...->N with delay"""
|
33
|
+
futs = [slow_increment(0)]
|
34
|
+
futs.extend(slow_increment(futs[i - 1]) for i in range(1, depth))
|
35
|
+
assert sum(f.result() for f in futs) == sum(range(1, depth + 1))
|
@@ -1,17 +1,42 @@
|
|
1
|
+
import threading
|
2
|
+
import time
|
3
|
+
|
4
|
+
import pytest
|
5
|
+
|
1
6
|
import parsl
|
7
|
+
from parsl.tests.configs.local_threads import fresh_config as local_config # noqa
|
2
8
|
|
3
9
|
|
4
10
|
@parsl.python_app
|
5
|
-
def slow_app(
|
6
|
-
|
7
|
-
time.sleep(delay)
|
11
|
+
def slow_app(evt: threading.Event):
|
12
|
+
evt.wait()
|
8
13
|
|
9
14
|
|
15
|
+
@pytest.mark.local
|
10
16
|
def test_wait_for_tasks():
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
17
|
+
"""
|
18
|
+
gh#1606 reported that wait_for_current_tasks fails due to tasks being removed
|
19
|
+
from the DFK tasks dict as they complete; bug introduced in #1543.
|
20
|
+
"""
|
21
|
+
def test_kernel(may_wait: threading.Event):
|
22
|
+
e1, e2 = threading.Event(), threading.Event()
|
23
|
+
|
24
|
+
# app_slow is in *middle* of internal DFK data structure
|
25
|
+
app_fast1, app_slow, app_fast2 = slow_app(e1), slow_app(e2), slow_app(e1)
|
26
|
+
|
27
|
+
may_wait.set() # initiated wait in outer test
|
28
|
+
time.sleep(0.01)
|
29
|
+
|
30
|
+
e1.set()
|
31
|
+
|
32
|
+
while not all(f.done() for f in (app_fast1, app_fast2)):
|
33
|
+
time.sleep(0.01)
|
34
|
+
|
35
|
+
e2.set()
|
36
|
+
app_slow.result()
|
37
|
+
|
38
|
+
may_continue = threading.Event()
|
39
|
+
threading.Thread(target=test_kernel, daemon=True, args=(may_continue,)).start()
|
40
|
+
|
41
|
+
may_continue.wait()
|
42
|
+
parsl.dfk().wait_for_current_tasks() # per sleeps, waits for all 3 tasks
|
@@ -1,33 +1,27 @@
|
|
1
|
-
|
2
|
-
|
1
|
+
import threading
|
2
|
+
|
3
|
+
import pytest
|
3
4
|
|
4
|
-
import logging
|
5
5
|
import parsl
|
6
6
|
from parsl.channels import LocalChannel
|
7
7
|
from parsl.config import Config
|
8
8
|
from parsl.executors import HighThroughputExecutor
|
9
9
|
from parsl.launchers import SimpleLauncher
|
10
10
|
from parsl.providers import LocalProvider
|
11
|
-
import pytest
|
12
|
-
|
13
|
-
|
14
|
-
logger = logging.getLogger(__name__)
|
15
11
|
|
16
12
|
|
17
13
|
@parsl.python_app
|
18
14
|
def app():
|
19
15
|
import time
|
20
|
-
time.sleep(
|
16
|
+
time.sleep(1)
|
21
17
|
|
22
18
|
|
23
19
|
class OneShotLocalProvider(LocalProvider):
|
24
20
|
def __init__(self, *args, **kwargs):
|
25
|
-
logger.info("OneShotLocalProvider __init__ with MRO: {}".format(type(self).mro()))
|
26
21
|
self.recorded_submits = 0
|
27
22
|
super().__init__(*args, **kwargs)
|
28
23
|
|
29
24
|
def submit(self, *args, **kwargs):
|
30
|
-
logger.info("OneShotLocalProvider submit")
|
31
25
|
self.recorded_submits += 1
|
32
26
|
return super().submit(*args, **kwargs)
|
33
27
|
|
@@ -35,32 +29,44 @@ class OneShotLocalProvider(LocalProvider):
|
|
35
29
|
|
36
30
|
|
37
31
|
@pytest.mark.local
|
38
|
-
def test_one_block():
|
39
|
-
|
32
|
+
def test_one_block(tmpd_cwd):
|
33
|
+
"""
|
34
|
+
this test is intended to ensure that only one block is launched when only
|
35
|
+
one app is invoked. this is a regression test.
|
36
|
+
"""
|
40
37
|
oneshot_provider = OneShotLocalProvider(
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
38
|
+
channel=LocalChannel(),
|
39
|
+
init_blocks=0,
|
40
|
+
min_blocks=0,
|
41
|
+
max_blocks=10,
|
42
|
+
launcher=SimpleLauncher(),
|
43
|
+
)
|
47
44
|
|
48
45
|
config = Config(
|
49
46
|
executors=[
|
50
47
|
HighThroughputExecutor(
|
51
48
|
label="htex_local",
|
49
|
+
address="127.0.0.1",
|
52
50
|
worker_debug=True,
|
53
51
|
cores_per_worker=1,
|
54
52
|
provider=oneshot_provider,
|
53
|
+
worker_logdir_root=str(tmpd_cwd)
|
55
54
|
)
|
56
55
|
],
|
57
56
|
strategy='simple',
|
58
57
|
)
|
59
58
|
|
60
59
|
parsl.load(config)
|
60
|
+
dfk = parsl.dfk()
|
61
|
+
|
62
|
+
def poller():
|
63
|
+
import time
|
64
|
+
while True:
|
65
|
+
dfk.job_status_poller.poll()
|
66
|
+
time.sleep(0.1)
|
61
67
|
|
62
|
-
|
63
|
-
|
68
|
+
threading.Thread(target=poller, daemon=True).start()
|
69
|
+
app().result()
|
64
70
|
parsl.dfk().cleanup()
|
65
71
|
parsl.clear()
|
66
72
|
|
@@ -1,80 +1,97 @@
|
|
1
1
|
import logging
|
2
|
-
import parsl
|
3
|
-
import pytest
|
4
2
|
import time
|
5
|
-
from parsl import python_app
|
6
3
|
|
4
|
+
import pytest
|
5
|
+
|
6
|
+
import parsl
|
7
|
+
|
8
|
+
from parsl import File, python_app
|
7
9
|
from parsl.providers import LocalProvider
|
8
10
|
from parsl.channels import LocalChannel
|
9
|
-
# from parsl.launchers import SimpleLauncher
|
10
11
|
from parsl.launchers import SingleNodeLauncher
|
11
|
-
|
12
12
|
from parsl.config import Config
|
13
13
|
from parsl.executors import HighThroughputExecutor
|
14
14
|
|
15
15
|
logger = logging.getLogger(__name__)
|
16
16
|
|
17
|
+
_max_blocks = 5
|
18
|
+
_min_blocks = 2
|
19
|
+
|
17
20
|
|
18
21
|
def local_config():
|
19
22
|
return Config(
|
20
23
|
executors=[
|
21
24
|
HighThroughputExecutor(
|
22
|
-
heartbeat_period=
|
23
|
-
heartbeat_threshold=
|
24
|
-
poll_period=
|
25
|
+
heartbeat_period=1,
|
26
|
+
heartbeat_threshold=2,
|
27
|
+
poll_period=100,
|
25
28
|
label="htex_local",
|
29
|
+
address="127.0.0.1",
|
26
30
|
max_workers=1,
|
27
31
|
provider=LocalProvider(
|
28
32
|
channel=LocalChannel(),
|
29
33
|
init_blocks=0,
|
30
|
-
max_blocks=
|
31
|
-
min_blocks=
|
34
|
+
max_blocks=_max_blocks,
|
35
|
+
min_blocks=_min_blocks,
|
32
36
|
launcher=SingleNodeLauncher(),
|
33
37
|
),
|
34
38
|
)
|
35
39
|
],
|
36
|
-
max_idletime=5,
|
40
|
+
max_idletime=0.5,
|
37
41
|
strategy='htex_auto_scale',
|
38
42
|
)
|
39
43
|
|
40
44
|
|
41
45
|
@python_app
|
42
|
-
def
|
46
|
+
def waiting_app(ident: int, inputs=()):
|
47
|
+
import pathlib
|
43
48
|
import time
|
44
|
-
|
49
|
+
|
50
|
+
# Approximate an Event by writing to files; the test logic will poll this file
|
51
|
+
with open(inputs[0], "a") as f:
|
52
|
+
f.write(f"Ready: {ident}\n")
|
53
|
+
|
54
|
+
# Similarly, use Event approximation (file check!) by polling.
|
55
|
+
may_finish_file = pathlib.Path(inputs[1])
|
56
|
+
while not may_finish_file.exists():
|
57
|
+
time.sleep(0.01)
|
45
58
|
|
46
59
|
|
47
60
|
# see issue #1885 for details of failures of this test.
|
48
61
|
# at the time of issue #1885 this test was failing frequently
|
49
62
|
# in CI.
|
50
63
|
@pytest.mark.local
|
51
|
-
def test_scale_out():
|
52
|
-
logger.info("start")
|
64
|
+
def test_scale_out(tmpd_cwd, try_assert):
|
53
65
|
dfk = parsl.dfk()
|
54
66
|
|
55
|
-
|
56
|
-
|
67
|
+
num_managers = len(dfk.executors['htex_local'].connected_managers())
|
68
|
+
|
69
|
+
assert num_managers == 0, "Expected 0 managers at start"
|
57
70
|
assert dfk.executors['htex_local'].outstanding == 0, "Expected 0 tasks at start"
|
58
71
|
|
59
|
-
|
60
|
-
|
72
|
+
ntasks = 10
|
73
|
+
ready_path = tmpd_cwd / "workers_ready"
|
74
|
+
finish_path = tmpd_cwd / "workers_may_continue"
|
75
|
+
ready_path.touch()
|
76
|
+
inputs = [File(str(ready_path)), File(str(finish_path))]
|
61
77
|
|
62
|
-
|
63
|
-
time.sleep(15)
|
78
|
+
futs = [waiting_app(i, inputs=inputs) for i in range(ntasks)]
|
64
79
|
|
65
|
-
|
66
|
-
|
80
|
+
while ready_path.read_text().count("\n") < _max_blocks:
|
81
|
+
time.sleep(0.5)
|
67
82
|
|
68
|
-
|
69
|
-
[x.result() for x in fus]
|
83
|
+
assert len(dfk.executors['htex_local'].connected_managers()) == _max_blocks
|
70
84
|
|
71
|
-
|
72
|
-
|
85
|
+
finish_path.touch() # Approximation of Event, via files
|
86
|
+
[x.result() for x in futs]
|
73
87
|
|
74
|
-
|
75
|
-
time.sleep(25)
|
88
|
+
assert dfk.executors['htex_local'].outstanding == 0
|
76
89
|
|
77
|
-
|
78
|
-
|
90
|
+
def assert_kernel():
|
91
|
+
return len(dfk.executors['htex_local'].connected_managers()) == _min_blocks
|
79
92
|
|
80
|
-
|
93
|
+
try_assert(
|
94
|
+
assert_kernel,
|
95
|
+
fail_msg=f"Expected {_min_blocks} managers when no tasks (min_blocks)",
|
96
|
+
timeout_ms=15000,
|
97
|
+
)
|
@@ -0,0 +1,34 @@
|
|
1
|
+
import parsl
|
2
|
+
import pytest
|
3
|
+
|
4
|
+
from parsl.tests.configs.htex_local import fresh_config as local_config
|
5
|
+
|
6
|
+
|
7
|
+
@parsl.python_app
|
8
|
+
def return_range(x):
|
9
|
+
return list(range(x))
|
10
|
+
|
11
|
+
|
12
|
+
@pytest.mark.local
|
13
|
+
def test_range_identities():
|
14
|
+
x = 3
|
15
|
+
|
16
|
+
fut1 = return_range(x)
|
17
|
+
res1 = fut1.result()
|
18
|
+
|
19
|
+
fut2 = return_range(x)
|
20
|
+
res2 = fut2.result()
|
21
|
+
|
22
|
+
# Check that the returned futures are different, by both usual
|
23
|
+
# Python equalities.
|
24
|
+
# This is not strictly part of the regression test for #2555
|
25
|
+
# but will detect related unexpected Future caching.
|
26
|
+
|
27
|
+
assert fut1 != fut2
|
28
|
+
assert id(fut1) != id(fut2)
|
29
|
+
|
30
|
+
# check that the two invocations returned the same value...
|
31
|
+
assert res1 == res2
|
32
|
+
|
33
|
+
# ... but in two different objects.
|
34
|
+
assert id(res1) != id(res2)
|
parsl/utils.py
CHANGED
@@ -5,12 +5,11 @@ import shlex
|
|
5
5
|
import subprocess
|
6
6
|
import threading
|
7
7
|
import time
|
8
|
-
from types import TracebackType
|
9
|
-
|
10
|
-
import typeguard
|
11
8
|
from contextlib import contextmanager
|
9
|
+
from types import TracebackType
|
12
10
|
from typing import Any, Callable, List, Tuple, Union, Generator, IO, AnyStr, Dict, Optional
|
13
11
|
|
12
|
+
import typeguard
|
14
13
|
from typing_extensions import Type
|
15
14
|
|
16
15
|
import parsl
|
@@ -110,9 +109,12 @@ def get_last_checkpoint(rundir: str = "runinfo") -> List[str]:
|
|
110
109
|
|
111
110
|
|
112
111
|
@typeguard.typechecked
|
113
|
-
def get_std_fname_mode(
|
112
|
+
def get_std_fname_mode(
|
113
|
+
fdname: str,
|
114
|
+
stdfspec: Union[os.PathLike, str, Tuple[str, str], Tuple[os.PathLike, str]]
|
115
|
+
) -> Tuple[str, str]:
|
114
116
|
import parsl.app.errors as pe
|
115
|
-
if isinstance(stdfspec, str):
|
117
|
+
if isinstance(stdfspec, (str, os.PathLike)):
|
116
118
|
fname = stdfspec
|
117
119
|
mode = 'a+'
|
118
120
|
elif isinstance(stdfspec, tuple):
|
@@ -121,7 +123,7 @@ def get_std_fname_mode(fdname: str, stdfspec: Union[str, Tuple[str, str]]) -> Tu
|
|
121
123
|
f"{len(stdfspec)}")
|
122
124
|
raise pe.BadStdStreamFile(msg, TypeError('Bad Tuple Length'))
|
123
125
|
fname, mode = stdfspec
|
124
|
-
return fname, mode
|
126
|
+
return str(fname), mode
|
125
127
|
|
126
128
|
|
127
129
|
@contextmanager
|
parsl/version.py
CHANGED
@@ -370,7 +370,7 @@ class Manager:
|
|
370
370
|
logger.critical("Exiting")
|
371
371
|
|
372
372
|
@wrap_with_logs
|
373
|
-
def worker_watchdog(self, kill_event):
|
373
|
+
def worker_watchdog(self, kill_event: threading.Event):
|
374
374
|
"""Keeps workers alive.
|
375
375
|
|
376
376
|
Parameters:
|
@@ -381,7 +381,7 @@ class Manager:
|
|
381
381
|
|
382
382
|
logger.debug("Starting worker watchdog")
|
383
383
|
|
384
|
-
while not kill_event.
|
384
|
+
while not kill_event.wait(self.heartbeat_period):
|
385
385
|
for worker_id, p in self.procs.items():
|
386
386
|
if not p.is_alive():
|
387
387
|
logger.error("Worker {} has died".format(worker_id))
|
@@ -409,7 +409,6 @@ class Manager:
|
|
409
409
|
name="HTEX-Worker-{}".format(worker_id))
|
410
410
|
self.procs[worker_id] = p
|
411
411
|
logger.info("Worker {} has been restarted".format(worker_id))
|
412
|
-
time.sleep(self.heartbeat_period)
|
413
412
|
|
414
413
|
logger.critical("Exiting")
|
415
414
|
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2023.7.
|
3
|
+
Version: 2023.7.17
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2023.07.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2023.07.17.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|