parsl 2024.4.15__py3-none-any.whl → 2024.4.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/addresses.py +2 -2
- parsl/app/bash.py +10 -2
- parsl/app/errors.py +3 -5
- parsl/config.py +10 -1
- parsl/data_provider/zip.py +32 -0
- parsl/dataflow/dflow.py +102 -62
- parsl/dataflow/futures.py +26 -5
- parsl/executors/base.py +16 -0
- parsl/executors/high_throughput/executor.py +7 -1
- parsl/executors/taskvine/executor.py +6 -0
- parsl/executors/workqueue/executor.py +6 -0
- parsl/monitoring/monitoring.py +15 -0
- parsl/providers/kubernetes/kube.py +20 -1
- parsl/tests/configs/local_threads_checkpoint_periodic.py +8 -10
- parsl/tests/conftest.py +12 -1
- parsl/tests/test_bash_apps/test_basic.py +2 -0
- parsl/tests/test_bash_apps/test_std_uri.py +128 -0
- parsl/tests/test_checkpointing/test_periodic.py +20 -33
- parsl/tests/test_checkpointing/test_task_exit.py +1 -1
- parsl/tests/test_htex/test_basic.py +2 -2
- parsl/tests/test_htex/test_missing_worker.py +0 -4
- parsl/tests/test_htex/test_zmq_binding.py +1 -0
- parsl/tests/test_monitoring/test_stdouterr.py +137 -0
- parsl/tests/test_mpi_apps/test_resource_spec.py +2 -8
- parsl/tests/test_python_apps/test_context_manager.py +3 -3
- parsl/tests/test_scaling/test_regression_1621.py +11 -11
- parsl/tests/test_staging/test_staging_stdout.py +61 -0
- parsl/tests/test_staging/test_zip_in.py +42 -0
- parsl/tests/test_staging/test_zip_to_zip.py +44 -0
- parsl/tests/unit/__init__.py +0 -0
- parsl/tests/unit/test_file.py +99 -0
- parsl/usage_tracking/api.py +66 -0
- parsl/usage_tracking/usage.py +39 -26
- parsl/utils.py +11 -2
- parsl/version.py +1 -1
- {parsl-2024.4.15.dist-info → parsl-2024.4.29.dist-info}/METADATA +4 -4
- {parsl-2024.4.15.dist-info → parsl-2024.4.29.dist-info}/RECORD +44 -36
- {parsl-2024.4.15.data → parsl-2024.4.29.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2024.4.15.data → parsl-2024.4.29.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2024.4.15.data → parsl-2024.4.29.data}/scripts/process_worker_pool.py +0 -0
- {parsl-2024.4.15.dist-info → parsl-2024.4.29.dist-info}/LICENSE +0 -0
- {parsl-2024.4.15.dist-info → parsl-2024.4.29.dist-info}/WHEEL +0 -0
- {parsl-2024.4.15.dist-info → parsl-2024.4.29.dist-info}/entry_points.txt +0 -0
- {parsl-2024.4.15.dist-info → parsl-2024.4.29.dist-info}/top_level.txt +0 -0
parsl/tests/conftest.py
CHANGED
@@ -3,8 +3,10 @@ import itertools
|
|
3
3
|
import logging
|
4
4
|
import os
|
5
5
|
import pathlib
|
6
|
+
import random
|
6
7
|
import re
|
7
8
|
import shutil
|
9
|
+
import string
|
8
10
|
import time
|
9
11
|
import types
|
10
12
|
import signal
|
@@ -139,7 +141,7 @@ def pytest_configure(config):
|
|
139
141
|
)
|
140
142
|
config.addinivalue_line(
|
141
143
|
'markers',
|
142
|
-
'staging_required: Marks tests that require a staging provider, when there is no sharedFS
|
144
|
+
'staging_required: Marks tests that require a staging provider, when there is no sharedFS'
|
143
145
|
)
|
144
146
|
config.addinivalue_line(
|
145
147
|
'markers',
|
@@ -245,6 +247,7 @@ def load_dfk_local_module(request, pytestconfig, tmpd_cwd_session):
|
|
245
247
|
|
246
248
|
if callable(local_teardown):
|
247
249
|
local_teardown()
|
250
|
+
assert DataFlowKernelLoader._dfk is None, "Expected teardown to clear DFK"
|
248
251
|
|
249
252
|
if local_config:
|
250
253
|
if parsl.dfk() != dfk:
|
@@ -421,3 +424,11 @@ def try_assert():
|
|
421
424
|
raise AssertionError("Bad assert call: no attempts or timeout period")
|
422
425
|
|
423
426
|
yield _impl
|
427
|
+
|
428
|
+
|
429
|
+
@pytest.fixture
|
430
|
+
def randomstring():
|
431
|
+
def func(length=5, alphabet=string.ascii_letters):
|
432
|
+
return "".join(random.choice(alphabet) for _ in range(length))
|
433
|
+
|
434
|
+
return func
|
@@ -50,6 +50,8 @@ def test_auto_log_filename_format(caplog):
|
|
50
50
|
foo_future.result())
|
51
51
|
|
52
52
|
log_fpath = foo_future.stdout
|
53
|
+
assert isinstance(log_fpath, str)
|
54
|
+
|
53
55
|
log_pattern = fr".*/task_\d+_foo_{app_label}"
|
54
56
|
assert re.match(log_pattern, log_fpath), 'Output file "{0}" does not match pattern "{1}"'.format(
|
55
57
|
log_fpath, log_pattern)
|
@@ -0,0 +1,128 @@
|
|
1
|
+
import logging
|
2
|
+
import parsl
|
3
|
+
import pytest
|
4
|
+
import zipfile
|
5
|
+
|
6
|
+
from functools import partial
|
7
|
+
from parsl.app.futures import DataFuture
|
8
|
+
from parsl.data_provider.files import File
|
9
|
+
from parsl.executors import ThreadPoolExecutor
|
10
|
+
|
11
|
+
|
12
|
+
@parsl.bash_app
|
13
|
+
def app_stdout(stdout=parsl.AUTO_LOGNAME):
|
14
|
+
return "echo hello"
|
15
|
+
|
16
|
+
|
17
|
+
def const_str(cpath, task_record, err_or_out):
|
18
|
+
return cpath
|
19
|
+
|
20
|
+
|
21
|
+
def const_with_cpath(autopath_specifier, content_path, caplog):
|
22
|
+
with parsl.load(parsl.Config(std_autopath=partial(const_str, autopath_specifier))):
|
23
|
+
fut = app_stdout()
|
24
|
+
|
25
|
+
# we don't have to wait for a result to check this attributes
|
26
|
+
assert fut.stdout is autopath_specifier
|
27
|
+
|
28
|
+
# there is no DataFuture to wait for in the str case: the model is that
|
29
|
+
# the stdout will be immediately available on task completion.
|
30
|
+
fut.result()
|
31
|
+
|
32
|
+
with open(content_path, "r") as file:
|
33
|
+
assert file.readlines() == ["hello\n"]
|
34
|
+
|
35
|
+
for record in caplog.records:
|
36
|
+
assert record.levelno < logging.ERROR
|
37
|
+
|
38
|
+
parsl.clear()
|
39
|
+
|
40
|
+
|
41
|
+
@pytest.mark.local
|
42
|
+
def test_std_autopath_const_str(caplog, tmpd_cwd):
|
43
|
+
"""Tests str and tuple mode autopaths with constant autopath, which should
|
44
|
+
all be passed through unmodified.
|
45
|
+
"""
|
46
|
+
cpath = str(tmpd_cwd / "CONST")
|
47
|
+
const_with_cpath(cpath, cpath, caplog)
|
48
|
+
|
49
|
+
|
50
|
+
@pytest.mark.local
|
51
|
+
def test_std_autopath_const_pathlike(caplog, tmpd_cwd):
|
52
|
+
cpath = tmpd_cwd / "CONST"
|
53
|
+
const_with_cpath(cpath, cpath, caplog)
|
54
|
+
|
55
|
+
|
56
|
+
@pytest.mark.local
|
57
|
+
def test_std_autopath_const_tuples(caplog, tmpd_cwd):
|
58
|
+
file = tmpd_cwd / "CONST"
|
59
|
+
cpath = (file, "w")
|
60
|
+
const_with_cpath(cpath, file, caplog)
|
61
|
+
|
62
|
+
|
63
|
+
class URIFailError(Exception):
|
64
|
+
pass
|
65
|
+
|
66
|
+
|
67
|
+
def fail_uri(task_record, err_or_out):
|
68
|
+
raise URIFailError("Deliberate failure in std stream filename generation")
|
69
|
+
|
70
|
+
|
71
|
+
@pytest.mark.local
|
72
|
+
def test_std_autopath_fail(caplog):
|
73
|
+
with parsl.load(parsl.Config(std_autopath=fail_uri)):
|
74
|
+
with pytest.raises(URIFailError):
|
75
|
+
app_stdout()
|
76
|
+
|
77
|
+
parsl.clear()
|
78
|
+
|
79
|
+
|
80
|
+
@parsl.bash_app
|
81
|
+
def app_both(stdout=parsl.AUTO_LOGNAME, stderr=parsl.AUTO_LOGNAME):
|
82
|
+
return "echo hello; echo goodbye >&2"
|
83
|
+
|
84
|
+
|
85
|
+
def zip_uri(base, task_record, err_or_out):
|
86
|
+
"""Should generate Files in base.zip like app_both.0.out or app_both.123.err"""
|
87
|
+
zip_path = base / "base.zip"
|
88
|
+
file = f"{task_record['func_name']}.{task_record['id']}.{task_record['try_id']}.{err_or_out}"
|
89
|
+
return File(f"zip:{zip_path}/{file}")
|
90
|
+
|
91
|
+
|
92
|
+
@pytest.mark.local
|
93
|
+
def test_std_autopath_zip(caplog, tmpd_cwd):
|
94
|
+
with parsl.load(parsl.Config(run_dir=str(tmpd_cwd),
|
95
|
+
executors=[ThreadPoolExecutor(working_dir=str(tmpd_cwd))],
|
96
|
+
std_autopath=partial(zip_uri, tmpd_cwd))):
|
97
|
+
futs = []
|
98
|
+
|
99
|
+
for _ in range(10):
|
100
|
+
fut = app_both()
|
101
|
+
|
102
|
+
# assertions that should hold after submission
|
103
|
+
assert isinstance(fut.stdout, DataFuture)
|
104
|
+
assert fut.stdout.file_obj.url.startswith("zip")
|
105
|
+
|
106
|
+
futs.append(fut)
|
107
|
+
|
108
|
+
# Barrier for all the stageouts to complete so that we can
|
109
|
+
# poke at the zip file.
|
110
|
+
[(fut.stdout.result(), fut.stderr.result()) for fut in futs]
|
111
|
+
|
112
|
+
with zipfile.ZipFile(tmpd_cwd / "base.zip") as z:
|
113
|
+
for fut in futs:
|
114
|
+
|
115
|
+
assert fut.done(), "AppFuture should be done if stageout is done"
|
116
|
+
|
117
|
+
stdout_relative_path = f"app_both.{fut.tid}.0.stdout"
|
118
|
+
with z.open(stdout_relative_path) as f:
|
119
|
+
assert f.readlines() == [b'hello\n']
|
120
|
+
|
121
|
+
stderr_relative_path = f"app_both.{fut.tid}.0.stderr"
|
122
|
+
with z.open(stderr_relative_path) as f:
|
123
|
+
assert f.readlines()[-1] == b'goodbye\n'
|
124
|
+
|
125
|
+
for record in caplog.records:
|
126
|
+
assert record.levelno < logging.ERROR
|
127
|
+
|
128
|
+
parsl.clear()
|
@@ -1,16 +1,12 @@
|
|
1
|
-
import argparse
|
2
|
-
import time
|
3
|
-
|
4
1
|
import pytest
|
5
2
|
|
6
3
|
import parsl
|
7
4
|
from parsl.app.app import python_app
|
8
|
-
from parsl.tests.configs.local_threads_checkpoint_periodic import
|
5
|
+
from parsl.tests.configs.local_threads_checkpoint_periodic import fresh_config
|
9
6
|
|
10
7
|
|
11
8
|
def local_setup():
|
12
|
-
|
13
|
-
dfk = parsl.load(config)
|
9
|
+
parsl.load(fresh_config())
|
14
10
|
|
15
11
|
|
16
12
|
def local_teardown():
|
@@ -27,40 +23,31 @@ def slow_double(x, sleep_dur=1):
|
|
27
23
|
|
28
24
|
|
29
25
|
def tstamp_to_seconds(line):
|
30
|
-
print("Parsing line: ", line)
|
31
26
|
f = line.partition(" ")[0]
|
32
27
|
return float(f)
|
33
28
|
|
34
29
|
|
35
30
|
@pytest.mark.local
|
36
|
-
def test_periodic(
|
31
|
+
def test_periodic():
|
37
32
|
"""Test checkpointing with task_periodic behavior
|
38
33
|
"""
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
for i in range(0, n):
|
48
|
-
d[i].result()
|
49
|
-
print("Done sleeping")
|
50
|
-
|
51
|
-
time.sleep(16)
|
52
|
-
dfk.cleanup()
|
34
|
+
h, m, s = map(int, parsl.dfk().config.checkpoint_period.split(":"))
|
35
|
+
assert h == 0, "Verify test setup"
|
36
|
+
assert m == 0, "Verify test setup"
|
37
|
+
assert s > 0, "Verify test setup"
|
38
|
+
sleep_for = s + 1
|
39
|
+
with parsl.dfk():
|
40
|
+
futs = [slow_double(sleep_for) for _ in range(4)]
|
41
|
+
[f.result() for f in futs]
|
53
42
|
|
54
43
|
# Here we will check if the loglines came back with 5 seconds deltas
|
55
|
-
|
56
|
-
|
57
|
-
with open("{}/parsl.log".format(dfk.run_dir), 'r') as f:
|
44
|
+
with open("{}/parsl.log".format(parsl.dfk().run_dir)) as f:
|
58
45
|
log_lines = f.readlines()
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
46
|
+
expected_msg = " Done checkpointing"
|
47
|
+
expected_msg2 = " No tasks checkpointed in this pass"
|
48
|
+
|
49
|
+
lines = [line for line in log_lines if expected_msg in line or expected_msg2 in line]
|
50
|
+
assert len(lines) >= 3, "Insufficient checkpoint lines in logfile"
|
51
|
+
deltas = [tstamp_to_seconds(line) for line in lines]
|
52
|
+
assert deltas[1] - deltas[0] < 5.5, "Delta between checkpoints exceeded period"
|
53
|
+
assert deltas[2] - deltas[1] < 5.5, "Delta between checkpoints exceeded period"
|
@@ -37,7 +37,3 @@ def test_that_it_fails():
|
|
37
37
|
raise Exception("The app somehow ran without a valid worker")
|
38
38
|
|
39
39
|
assert parsl.dfk().config.executors[0]._executor_bad_state.is_set()
|
40
|
-
|
41
|
-
# htex needs shutting down explicitly because dfk.cleanup() will not
|
42
|
-
# do that, as it is in bad state
|
43
|
-
parsl.dfk().config.executors[0].shutdown()
|
@@ -53,6 +53,7 @@ def test_interchange_binding_with_address(cert_dir: Optional[str]):
|
|
53
53
|
assert ix.interchange_address == address
|
54
54
|
|
55
55
|
|
56
|
+
@pytest.mark.skip("This behaviour is possibly unexpected. See issue #3037")
|
56
57
|
@pytest.mark.local
|
57
58
|
@pytest.mark.parametrize("encrypted", (True, False), indirect=True)
|
58
59
|
def test_interchange_binding_with_non_ipv4_address(cert_dir: Optional[str]):
|
@@ -0,0 +1,137 @@
|
|
1
|
+
"""Tests monitoring records app name under various decoration patterns.
|
2
|
+
"""
|
3
|
+
|
4
|
+
import logging
|
5
|
+
import os
|
6
|
+
import parsl
|
7
|
+
import pytest
|
8
|
+
import re
|
9
|
+
import time
|
10
|
+
|
11
|
+
from typing import Union
|
12
|
+
|
13
|
+
from parsl.config import Config
|
14
|
+
from parsl.data_provider.files import File
|
15
|
+
from parsl.data_provider.data_manager import default_staging
|
16
|
+
from parsl.data_provider.staging import Staging
|
17
|
+
from parsl.executors import HighThroughputExecutor
|
18
|
+
from parsl.monitoring import MonitoringHub
|
19
|
+
from parsl.providers import LocalProvider
|
20
|
+
|
21
|
+
|
22
|
+
def fresh_config(run_dir):
|
23
|
+
return Config(
|
24
|
+
run_dir=str(run_dir),
|
25
|
+
executors=[
|
26
|
+
HighThroughputExecutor(
|
27
|
+
address="127.0.0.1",
|
28
|
+
label="htex_Local",
|
29
|
+
provider=LocalProvider(
|
30
|
+
init_blocks=1,
|
31
|
+
min_blocks=1,
|
32
|
+
max_blocks=1,
|
33
|
+
)
|
34
|
+
)
|
35
|
+
],
|
36
|
+
strategy='simple',
|
37
|
+
strategy_period=0.1,
|
38
|
+
monitoring=MonitoringHub(
|
39
|
+
hub_address="localhost",
|
40
|
+
hub_port=55055,
|
41
|
+
)
|
42
|
+
)
|
43
|
+
|
44
|
+
|
45
|
+
@parsl.python_app
|
46
|
+
def stdapp(stdout=None, stderr=None):
|
47
|
+
pass
|
48
|
+
|
49
|
+
|
50
|
+
class ArbitraryPathLike(os.PathLike):
|
51
|
+
def __init__(self, path: Union[str, bytes]) -> None:
|
52
|
+
self.path = path
|
53
|
+
|
54
|
+
def __fspath__(self) -> Union[str, bytes]:
|
55
|
+
return self.path
|
56
|
+
|
57
|
+
|
58
|
+
class ArbitraryStaging(Staging):
|
59
|
+
"""This staging provider will not actually do any staging, but will
|
60
|
+
accept arbitrary: scheme URLs. That's enough for this monitoring test
|
61
|
+
which doesn't need any actual stage out action to happen.
|
62
|
+
"""
|
63
|
+
def can_stage_out(self, file):
|
64
|
+
return file.scheme == "arbitrary"
|
65
|
+
|
66
|
+
|
67
|
+
@pytest.mark.local
|
68
|
+
@pytest.mark.parametrize('stdx,expected_stdx',
|
69
|
+
[('hello.txt', 'hello.txt'),
|
70
|
+
(None, ''),
|
71
|
+
(('tuple.txt', 'w'), 'tuple.txt'),
|
72
|
+
(ArbitraryPathLike('pl.txt'), 'pl.txt'),
|
73
|
+
(ArbitraryPathLike(b'pl2.txt'), 'pl2.txt'),
|
74
|
+
((ArbitraryPathLike('pl3.txt'), 'w'), 'pl3.txt'),
|
75
|
+
((ArbitraryPathLike(b'pl4.txt'), 'w'), 'pl4.txt'),
|
76
|
+
(parsl.AUTO_LOGNAME,
|
77
|
+
lambda p:
|
78
|
+
isinstance(p, str) and
|
79
|
+
os.path.isabs(p) and
|
80
|
+
re.match("^.*/task_0000_stdapp\\.std...$", p)),
|
81
|
+
(File("arbitrary:abc123"), "arbitrary:abc123"),
|
82
|
+
(File("file:///tmp/pl5"), "file:///tmp/pl5"),
|
83
|
+
])
|
84
|
+
@pytest.mark.parametrize('stream', ['stdout', 'stderr'])
|
85
|
+
def test_stdstream_to_monitoring(stdx, expected_stdx, stream, tmpd_cwd, caplog):
|
86
|
+
"""This tests that various forms of stdout/err specification are
|
87
|
+
represented in monitoring correctly. The stderr and stdout codepaths
|
88
|
+
are generally duplicated, rather than factorised, and so this test
|
89
|
+
runs the same tests on both stdout and stderr.
|
90
|
+
"""
|
91
|
+
|
92
|
+
# this is imported here rather than at module level because
|
93
|
+
# it isn't available in a plain parsl install, so this module
|
94
|
+
# would otherwise fail to import and break even a basic test
|
95
|
+
# run.
|
96
|
+
import sqlalchemy
|
97
|
+
|
98
|
+
c = fresh_config(tmpd_cwd)
|
99
|
+
c.monitoring.logging_endpoint = f"sqlite:///{tmpd_cwd}/monitoring.db"
|
100
|
+
c.executors[0].storage_access = default_staging + [ArbitraryStaging()]
|
101
|
+
|
102
|
+
with parsl.load(c):
|
103
|
+
kwargs = {stream: stdx}
|
104
|
+
stdapp(**kwargs).result()
|
105
|
+
|
106
|
+
parsl.clear()
|
107
|
+
|
108
|
+
engine = sqlalchemy.create_engine(c.monitoring.logging_endpoint)
|
109
|
+
with engine.begin() as connection:
|
110
|
+
|
111
|
+
def count_rows(table: str):
|
112
|
+
result = connection.execute(f"SELECT COUNT(*) FROM {table}")
|
113
|
+
(c, ) = result.first()
|
114
|
+
return c
|
115
|
+
|
116
|
+
# one workflow...
|
117
|
+
assert count_rows("workflow") == 1
|
118
|
+
|
119
|
+
# ... with one task ...
|
120
|
+
assert count_rows("task") == 1
|
121
|
+
|
122
|
+
# ... that was tried once ...
|
123
|
+
assert count_rows("try") == 1
|
124
|
+
|
125
|
+
# ... and has the expected name.
|
126
|
+
result = connection.execute(f"SELECT task_{stream} FROM task")
|
127
|
+
(c, ) = result.first()
|
128
|
+
|
129
|
+
if isinstance(expected_stdx, str):
|
130
|
+
assert c == expected_stdx
|
131
|
+
elif callable(expected_stdx):
|
132
|
+
assert expected_stdx(c)
|
133
|
+
else:
|
134
|
+
raise RuntimeError("Bad expected_stdx value")
|
135
|
+
|
136
|
+
for record in caplog.records:
|
137
|
+
assert record.levelno < logging.ERROR
|
@@ -25,17 +25,11 @@ from parsl.executors.high_throughput.mpi_prefix_composer import (
|
|
25
25
|
EXECUTOR_LABEL = "MPI_TEST"
|
26
26
|
|
27
27
|
|
28
|
-
def
|
28
|
+
def local_config():
|
29
29
|
config = fresh_config()
|
30
30
|
config.executors[0].label = EXECUTOR_LABEL
|
31
31
|
config.executors[0].max_workers_per_node = 1
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
def local_teardown():
|
36
|
-
logging.warning("Exiting")
|
37
|
-
parsl.dfk().cleanup()
|
38
|
-
parsl.clear()
|
32
|
+
return config
|
39
33
|
|
40
34
|
|
41
35
|
@python_app
|
@@ -24,15 +24,15 @@ def local_teardown():
|
|
24
24
|
|
25
25
|
|
26
26
|
@pytest.mark.local
|
27
|
-
def test_within_context_manger():
|
27
|
+
def test_within_context_manger(tmpd_cwd):
|
28
28
|
config = fresh_config()
|
29
29
|
with parsl.load(config=config) as dfk:
|
30
30
|
assert isinstance(dfk, DataFlowKernel)
|
31
31
|
|
32
|
-
bash_future = foo(1)
|
32
|
+
bash_future = foo(1, stdout=tmpd_cwd / 'foo.stdout')
|
33
33
|
assert bash_future.result() == 0
|
34
34
|
|
35
|
-
with open('foo.stdout', 'r') as f:
|
35
|
+
with open(tmpd_cwd / 'foo.stdout', 'r') as f:
|
36
36
|
assert f.read() == "2\n"
|
37
37
|
|
38
38
|
with pytest.raises(NoDataFlowKernelError) as excinfo:
|
@@ -9,6 +9,14 @@ from parsl.executors import HighThroughputExecutor
|
|
9
9
|
from parsl.launchers import SimpleLauncher
|
10
10
|
from parsl.providers import LocalProvider
|
11
11
|
|
12
|
+
# Timing notes:
|
13
|
+
# The configured strategy_period must be much smaller than the delay in
|
14
|
+
# app() so that multiple iterations of the strategy have had a chance
|
15
|
+
# to (mis)behave.
|
16
|
+
# The status polling interval in OneShotLocalProvider must be much bigger
|
17
|
+
# than the above times, so that the job status cached from the provider
|
18
|
+
# will not be updated while the single invocation of app() runs.
|
19
|
+
|
12
20
|
|
13
21
|
@parsl.python_app
|
14
22
|
def app():
|
@@ -55,20 +63,12 @@ def test_one_block(tmpd_cwd):
|
|
55
63
|
)
|
56
64
|
],
|
57
65
|
strategy='simple',
|
66
|
+
strategy_period=0.1
|
58
67
|
)
|
59
68
|
|
60
|
-
parsl.load(config)
|
61
|
-
|
62
|
-
|
63
|
-
def poller():
|
64
|
-
import time
|
65
|
-
while True:
|
66
|
-
dfk.job_status_poller.poll()
|
67
|
-
time.sleep(0.1)
|
69
|
+
with parsl.load(config):
|
70
|
+
app().result()
|
68
71
|
|
69
|
-
threading.Thread(target=poller, daemon=True).start()
|
70
|
-
app().result()
|
71
|
-
parsl.dfk().cleanup()
|
72
72
|
parsl.clear()
|
73
73
|
|
74
74
|
assert oneshot_provider.recorded_submits == 1
|
@@ -0,0 +1,61 @@
|
|
1
|
+
import logging
|
2
|
+
import os
|
3
|
+
import parsl
|
4
|
+
import pytest
|
5
|
+
import zipfile
|
6
|
+
|
7
|
+
from parsl.app.futures import DataFuture
|
8
|
+
from parsl.tests.configs.htex_local import fresh_config as local_config
|
9
|
+
from parsl.data_provider.files import File
|
10
|
+
|
11
|
+
|
12
|
+
@parsl.bash_app
|
13
|
+
def output_to_stds(*, stdout=parsl.AUTO_LOGNAME, stderr=parsl.AUTO_LOGNAME):
|
14
|
+
return "echo hello ; echo goodbye >&2"
|
15
|
+
|
16
|
+
|
17
|
+
def test_stdout_staging_file(tmpd_cwd, caplog):
|
18
|
+
basename = str(tmpd_cwd) + "/stdout.txt"
|
19
|
+
stdout_file = File("file://" + basename)
|
20
|
+
|
21
|
+
app_future = output_to_stds(stdout=stdout_file)
|
22
|
+
|
23
|
+
assert isinstance(app_future.stdout, DataFuture)
|
24
|
+
app_future.stdout.result()
|
25
|
+
|
26
|
+
assert os.path.exists(basename)
|
27
|
+
|
28
|
+
for record in caplog.records:
|
29
|
+
assert record.levelno < logging.ERROR
|
30
|
+
|
31
|
+
|
32
|
+
def test_stdout_stderr_staging_zip(tmpd_cwd, caplog):
|
33
|
+
zipfile_name = str(tmpd_cwd) + "/staging.zip"
|
34
|
+
stdout_relative_path = "somewhere/test-out.txt"
|
35
|
+
stdout_file = File("zip:" + zipfile_name + "/" + stdout_relative_path)
|
36
|
+
|
37
|
+
stderr_relative_path = "somewhere/test-error.txt"
|
38
|
+
stderr_file = File("zip:" + zipfile_name + "/" + stderr_relative_path)
|
39
|
+
|
40
|
+
app_future = output_to_stds(stdout=stdout_file, stderr=stderr_file)
|
41
|
+
|
42
|
+
assert isinstance(app_future.stdout, DataFuture)
|
43
|
+
app_future.stdout.result()
|
44
|
+
|
45
|
+
# check the file exists as soon as possible
|
46
|
+
assert os.path.exists(zipfile_name)
|
47
|
+
with zipfile.ZipFile(zipfile_name) as z:
|
48
|
+
with z.open(stdout_relative_path) as f:
|
49
|
+
assert f.readlines() == [b'hello\n']
|
50
|
+
|
51
|
+
assert isinstance(app_future.stderr, DataFuture)
|
52
|
+
app_future.stderr.result()
|
53
|
+
with zipfile.ZipFile(zipfile_name) as z:
|
54
|
+
with z.open(stderr_relative_path) as f:
|
55
|
+
# The last line of stderr should be goodbye, but Parsl will write
|
56
|
+
# other Parsl-specific into to stderr before that, so only assert
|
57
|
+
# the behaviour of the final line.
|
58
|
+
assert f.readlines()[-1] == b'goodbye\n'
|
59
|
+
|
60
|
+
for record in caplog.records:
|
61
|
+
assert record.levelno < logging.ERROR
|
@@ -0,0 +1,42 @@
|
|
1
|
+
import parsl
|
2
|
+
import pytest
|
3
|
+
import random
|
4
|
+
import zipfile
|
5
|
+
|
6
|
+
from parsl.data_provider.files import File
|
7
|
+
from parsl.data_provider.zip import ZipAuthorityError, ZipFileStaging
|
8
|
+
|
9
|
+
from parsl.providers import LocalProvider
|
10
|
+
from parsl.channels import LocalChannel
|
11
|
+
from parsl.launchers import SimpleLauncher
|
12
|
+
|
13
|
+
from parsl.config import Config
|
14
|
+
from parsl.executors import HighThroughputExecutor
|
15
|
+
|
16
|
+
from parsl.tests.configs.htex_local import fresh_config as local_config
|
17
|
+
|
18
|
+
|
19
|
+
@parsl.python_app
|
20
|
+
def count_lines(file):
|
21
|
+
with open(file, "r") as f:
|
22
|
+
return len(f.readlines())
|
23
|
+
|
24
|
+
|
25
|
+
@pytest.mark.local
|
26
|
+
def test_zip_in(tmpd_cwd):
|
27
|
+
# basic test of zip file stage-in
|
28
|
+
zip_path = tmpd_cwd / "container.zip"
|
29
|
+
file_base = "data.txt"
|
30
|
+
zip_file = File(f"zip:{zip_path / file_base}")
|
31
|
+
|
32
|
+
# create a zip file containing one file with some abitrary number of lines
|
33
|
+
n_lines = random.randint(0, 1000)
|
34
|
+
|
35
|
+
with zipfile.ZipFile(zip_path, mode='w') as z:
|
36
|
+
with z.open(file_base, mode='w') as f:
|
37
|
+
for _ in range(n_lines):
|
38
|
+
f.write(b'someline\n')
|
39
|
+
|
40
|
+
app_future = count_lines(zip_file)
|
41
|
+
|
42
|
+
assert app_future.result() == n_lines
|
@@ -0,0 +1,44 @@
|
|
1
|
+
import parsl
|
2
|
+
import pytest
|
3
|
+
import random
|
4
|
+
import zipfile
|
5
|
+
|
6
|
+
from parsl.data_provider.files import File
|
7
|
+
from parsl.data_provider.zip import ZipAuthorityError, ZipFileStaging
|
8
|
+
|
9
|
+
from parsl.providers import LocalProvider
|
10
|
+
from parsl.channels import LocalChannel
|
11
|
+
from parsl.launchers import SimpleLauncher
|
12
|
+
|
13
|
+
from parsl.config import Config
|
14
|
+
from parsl.executors import HighThroughputExecutor
|
15
|
+
|
16
|
+
from parsl.tests.configs.htex_local import fresh_config as local_config
|
17
|
+
|
18
|
+
|
19
|
+
@parsl.python_app
|
20
|
+
def generate_lines(n: int, *, outputs):
|
21
|
+
with open(outputs[0], "w") as f:
|
22
|
+
for x in range(n):
|
23
|
+
# write numbered lines
|
24
|
+
f.write(str(x) + "\n")
|
25
|
+
|
26
|
+
|
27
|
+
@parsl.python_app
|
28
|
+
def count_lines(file):
|
29
|
+
with open(file, "r") as f:
|
30
|
+
return len(f.readlines())
|
31
|
+
|
32
|
+
|
33
|
+
@pytest.mark.local
|
34
|
+
def test_zip_pipeline(tmpd_cwd):
|
35
|
+
# basic test of zip file stage-in
|
36
|
+
zip_path = tmpd_cwd / "container.zip"
|
37
|
+
file_base = "data.txt"
|
38
|
+
zip_file = File(f"zip:{zip_path / file_base}")
|
39
|
+
|
40
|
+
n_lines = random.randint(0, 1000)
|
41
|
+
generate_fut = generate_lines(n_lines, outputs=[zip_file])
|
42
|
+
n_lines_out = count_lines(generate_fut.outputs[0]).result()
|
43
|
+
|
44
|
+
assert n_lines == n_lines_out
|
File without changes
|