parsl 2023.7.3__py3-none-any.whl → 2023.7.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. parsl/data_provider/files.py +6 -5
  2. parsl/dataflow/dflow.py +7 -1
  3. parsl/dataflow/memoization.py +7 -7
  4. parsl/executors/high_throughput/executor.py +2 -3
  5. parsl/executors/high_throughput/process_worker_pool.py +2 -3
  6. parsl/launchers/errors.py +1 -1
  7. parsl/providers/cluster_provider.py +2 -1
  8. parsl/providers/local/local.py +1 -1
  9. parsl/serialize/base.py +3 -13
  10. parsl/serialize/concretes.py +22 -3
  11. parsl/serialize/facade.py +13 -23
  12. parsl/tests/conftest.py +94 -11
  13. parsl/tests/test_bash_apps/test_basic.py +32 -63
  14. parsl/tests/test_bash_apps/test_kwarg_storage.py +18 -89
  15. parsl/tests/test_bash_apps/test_memoize.py +17 -41
  16. parsl/tests/test_bash_apps/test_multiline.py +19 -45
  17. parsl/tests/test_bash_apps/test_pipeline.py +46 -82
  18. parsl/tests/test_bash_apps/test_stdout.py +15 -30
  19. parsl/tests/test_data/test_file_apps.py +13 -15
  20. parsl/tests/test_data/test_file_staging.py +2 -2
  21. parsl/tests/test_data/test_output_chain_filenames.py +17 -27
  22. parsl/tests/test_docs/test_workflow4.py +18 -28
  23. parsl/tests/test_error_handling/test_htex_worker_failure.py +5 -12
  24. parsl/tests/test_python_apps/test_fail.py +31 -69
  25. parsl/tests/test_python_apps/test_garbage_collect.py +15 -9
  26. parsl/tests/test_python_apps/test_join.py +19 -20
  27. parsl/tests/test_python_apps/test_mapred.py +13 -38
  28. parsl/tests/test_python_apps/test_memoize_bad_id_for_memo.py +6 -7
  29. parsl/tests/test_python_apps/test_outputs.py +11 -24
  30. parsl/tests/test_python_apps/test_overview.py +5 -42
  31. parsl/tests/test_python_apps/test_pipeline.py +16 -19
  32. parsl/tests/test_regression/test_1606_wait_for_current_tasks.py +35 -10
  33. parsl/tests/test_scaling/test_regression_1621.py +26 -20
  34. parsl/tests/test_scaling/test_scale_down.py +49 -32
  35. parsl/tests/test_serialization/test_2555_caching_deserializer.py +34 -0
  36. parsl/utils.py +8 -6
  37. parsl/version.py +1 -1
  38. {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/process_worker_pool.py +2 -3
  39. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/METADATA +2 -2
  40. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/RECORD +46 -45
  41. {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/exec_parsl_function.py +0 -0
  42. {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/parsl_coprocess.py +0 -0
  43. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/LICENSE +0 -0
  44. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/WHEEL +0 -0
  45. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/entry_points.txt +0 -0
  46. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/top_level.txt +0 -0
@@ -1,104 +1,33 @@
1
- import os
2
1
  import pytest
2
+
3
3
  from parsl.app.app import bash_app
4
4
 
5
5
 
6
6
  @bash_app
7
7
  def foo(z=2, stdout=None):
8
- return """echo {val}
9
- """.format(val=z)
8
+ return f"echo {z}"
10
9
 
11
10
 
12
11
  @pytest.mark.issue363
13
- def test_command_format_1():
12
+ def test_command_format_1(tmpd_cwd):
14
13
  """Testing command format for BashApps
15
14
  """
16
15
 
17
- stdout = os.path.abspath('std.out.0')
18
- if os.path.exists(stdout):
19
- os.remove(stdout)
20
-
21
- app_fu = foo(stdout=stdout)
22
- print("app_fu : ", app_fu)
23
- contents = None
24
-
25
- assert app_fu.result() == 0, "BashApp exited with an error code : {0}".format(
26
- app_fu.result())
27
-
28
- with open(stdout, 'r') as stdout_f:
29
- contents = stdout_f.read()
30
- print("Contents : ", contents)
31
-
32
- if os.path.exists('stdout_file'):
33
- os.remove(stdout)
34
-
35
- assert contents == '2\n', 'Output does not match expected string "2", Got: "{0}"'.format(
36
- contents)
37
-
38
- # ===========
39
-
40
- stdout = os.path.abspath('std.out.1')
41
- if os.path.exists(stdout):
42
- os.remove(stdout)
43
-
44
- app_fu = foo(z=3, stdout=stdout)
45
- print("app_fu : ", app_fu)
46
- contents = None
47
-
48
- assert app_fu.result() == 0, "BashApp exited with an error code : {0}".format(
49
- app_fu.result())
50
-
51
- with open(stdout, 'r') as stdout_f:
52
- contents = stdout_f.read()
53
- print("Contents : ", contents)
54
-
55
- if os.path.exists('stdout_file'):
56
- os.remove(stdout)
57
-
58
- assert contents == '3\n', 'Output does not match expected string "3", Got: "{0}"'.format(
59
- contents)
60
-
61
- # ===========
62
- stdout = os.path.abspath('std.out.2')
63
- if os.path.exists(stdout):
64
- os.remove(stdout)
65
-
66
- app_fu = foo(z=4, stdout=stdout)
67
- print("app_fu : ", app_fu)
68
- contents = None
69
-
70
- assert app_fu.result() == 0, "BashApp exited with an error code : {0}".format(
71
- app_fu.result())
72
-
73
- with open(stdout, 'r') as stdout_f:
74
- contents = stdout_f.read()
75
- print("Contents : ", contents)
76
-
77
- if os.path.exists('stdout_file'):
78
- os.remove(stdout)
79
-
80
- assert contents == '4\n', 'Output does not match expected string "4", Got: "{0}"'.format(
81
- contents)
82
-
83
- # ===========
84
- stdout = os.path.abspath('std.out.3')
85
- if os.path.exists(stdout):
86
- os.remove(stdout)
87
-
88
- app_fu = foo(stdout=stdout)
89
- print("app_fu : ", app_fu)
90
- contents = None
91
-
92
- assert app_fu.result() == 0, "BashApp exited with an error code : {0}".format(
93
- app_fu.result())
16
+ stdout = tmpd_cwd / "std.out"
17
+ for exp_value, z in (
18
+ ("3", 3),
19
+ ("4", 4),
20
+ ("5", 5),
21
+ ):
22
+ app_fu = foo(z=z, stdout=str(stdout))
23
+ assert app_fu.result() == 0, "BashApp had non-zero exit"
94
24
 
95
- with open(stdout, 'r') as stdout_f:
96
- contents = stdout_f.read()
97
- print("Contents : ", contents)
25
+ so_content = stdout.read_text().strip()
26
+ assert so_content == exp_value
27
+ stdout.unlink()
98
28
 
99
- if os.path.exists('stdout_file'):
100
- os.remove(stdout)
29
+ app_fu = foo(stdout=str(stdout))
30
+ assert app_fu.result() == 0, "BashApp had non-zero exit"
101
31
 
102
- assert contents == '2\n', 'Output does not match expected string "2", Got: "{0}"'.format(
103
- contents)
104
- return True
32
+ so_content = stdout.read_text().strip()
33
+ assert so_content == "2"
@@ -1,15 +1,11 @@
1
- import argparse
2
- import os
3
1
  import pytest
4
2
 
5
- import parsl
6
3
  from parsl import File
7
4
  from parsl.app.app import bash_app
8
- from parsl.tests.configs.local_threads import config
9
5
 
10
6
 
11
7
  @bash_app(cache=True)
12
- def fail_on_presence(outputs=[]):
8
+ def fail_on_presence(outputs=()):
13
9
  return 'if [ -f {0} ] ; then exit 1 ; else touch {0}; fi'.format(outputs[0])
14
10
 
15
11
 
@@ -17,31 +13,20 @@ def fail_on_presence(outputs=[]):
17
13
  # won't work if there's a staging provider.
18
14
  # @pytest.mark.sharedFS_required
19
15
  @pytest.mark.issue363
20
- def test_bash_memoization(n=2):
16
+ def test_bash_memoization(tmpd_cwd, n=2):
21
17
  """Testing bash memoization
22
18
  """
23
- temp_filename = "test.memoization.tmp"
24
- temp_file = File(temp_filename)
19
+ mpath = tmpd_cwd / "test.memoization.tmp"
20
+ temp_file = File(str(mpath))
21
+ fail_on_presence(outputs=[temp_file]).result()
25
22
 
26
- if os.path.exists(temp_filename):
27
- os.remove(temp_filename)
28
-
29
- temp_file = File(temp_filename)
30
-
31
- print("Launching: ", n)
32
- x = fail_on_presence(outputs=[temp_file])
33
- x.result()
34
-
35
- d = {}
36
- for i in range(0, n):
37
- d[i] = fail_on_presence(outputs=[temp_file])
38
-
39
- for i in d:
40
- assert d[i].exception() is None
23
+ futs = [fail_on_presence(outputs=[temp_file]) for _ in range(n)]
24
+ for f in futs:
25
+ assert f.exception() is None
41
26
 
42
27
 
43
28
  @bash_app(cache=True)
44
- def fail_on_presence_kw(outputs=[], foo={}):
29
+ def fail_on_presence_kw(outputs=(), foo=None):
45
30
  return 'if [ -f {0} ] ; then exit 1 ; else touch {0}; fi'.format(outputs[0])
46
31
 
47
32
 
@@ -49,24 +34,15 @@ def fail_on_presence_kw(outputs=[], foo={}):
49
34
  # won't work if there's a staging provider.
50
35
  # @pytest.mark.sharedFS_required
51
36
  @pytest.mark.issue363
52
- def test_bash_memoization_keywords(n=2):
37
+ def test_bash_memoization_keywords(tmpd_cwd, n=2):
53
38
  """Testing bash memoization
54
39
  """
55
- temp_filename = "test.memoization.tmp"
56
- temp_file = File("test.memoization.tmp")
57
-
58
- if os.path.exists(temp_filename):
59
- os.remove(temp_filename)
60
-
61
- temp_file = File(temp_filename)
62
-
63
- print("Launching: ", n)
64
- x = fail_on_presence_kw(outputs=[temp_file], foo={"a": 1, "b": 2})
65
- x.result()
40
+ mpath = tmpd_cwd / "test.memoization.tmp"
41
+ temp_file = File(str(mpath))
66
42
 
67
- d = {}
68
- for i in range(0, n):
69
- d[i] = fail_on_presence_kw(outputs=[temp_file], foo={"b": 2, "a": 1})
43
+ foo = {"a": 1, "b": 2}
44
+ fail_on_presence_kw(outputs=[temp_file], foo=foo).result()
70
45
 
71
- for i in d:
72
- assert d[i].exception() is None
46
+ futs = [fail_on_presence_kw(outputs=[temp_file], foo=foo) for _ in range(n)]
47
+ for f in futs:
48
+ assert f.exception() is None
@@ -1,21 +1,11 @@
1
- import argparse
2
- import os
3
1
  import pytest
4
- import shutil
5
- import time
6
2
 
7
- import parsl
8
3
  from parsl import File
9
4
  from parsl.app.app import bash_app
10
- from parsl.tests.configs.local_threads import config
11
5
 
12
6
 
13
7
  @bash_app
14
- def multiline(
15
- inputs=[],
16
- outputs=[],
17
- stderr=os.path.abspath('std.err'),
18
- stdout=os.path.abspath('std.out')):
8
+ def multiline(inputs=(), outputs=(), stderr=None, stdout=None):
19
9
  return """echo {inputs[0]} &> {outputs[0]}
20
10
  echo {inputs[1]} &> {outputs[1]}
21
11
  echo {inputs[2]} &> {outputs[2]}
@@ -25,39 +15,23 @@ def multiline(
25
15
 
26
16
 
27
17
  @pytest.mark.issue363
28
- def test_multiline():
29
-
30
- outdir = os.path.abspath('outputs')
31
-
32
- if not os.path.exists(outdir):
33
- os.makedirs(outdir)
34
- else:
35
- shutil.rmtree(outdir)
36
- os.makedirs(outdir)
37
-
18
+ def test_multiline(tmpd_cwd):
19
+ so, se = tmpd_cwd / "std.out", tmpd_cwd / "std.err"
38
20
  f = multiline(
39
- inputs=["Hello", "This is", "Cat!"],
40
- outputs=[
41
- File('{0}/hello.txt'.format(outdir)),
42
- File('{0}/this.txt'.format(outdir)),
43
- File('{0}/cat.txt'.format(outdir))
44
- ]
21
+ inputs=["Hello", "This is", "Cat!"],
22
+ outputs=[
23
+ File(str(tmpd_cwd / "hello.txt")),
24
+ File(str(tmpd_cwd / "this.txt")),
25
+ File(str(tmpd_cwd / "cat.txt")),
26
+ ],
27
+ stdout=str(so),
28
+ stderr=str(se),
45
29
  )
46
- print(f.result())
47
-
48
- time.sleep(0.1)
49
- assert 'hello.txt' in os.listdir(outdir), "hello.txt is missing"
50
- assert 'this.txt' in os.listdir(outdir), "this.txt is missing"
51
- assert 'cat.txt' in os.listdir(outdir), "cat.txt is missing"
52
-
53
- with open('std.out', 'r') as o:
54
- out = o.read()
55
- assert out != "Testing STDOUT", "Stdout is bad"
56
-
57
- with open('std.err', 'r') as o:
58
- err = o.read()
59
- assert err != "Testing STDERR", "Stderr is bad"
60
-
61
- os.remove('std.err')
62
- os.remove('std.out')
63
- return True
30
+ f.result()
31
+
32
+ flist = list(map(str, (f.name for f in tmpd_cwd.iterdir())))
33
+ assert 'hello.txt' in flist, "hello.txt is missing"
34
+ assert 'this.txt' in flist, "this.txt is missing"
35
+ assert 'cat.txt' in flist, "cat.txt is missing"
36
+ assert "Testing STDOUT" in so.read_text()
37
+ assert "Testing STDERR" in se.read_text()
@@ -1,17 +1,12 @@
1
- import argparse
2
- import os
3
1
  import pytest
4
2
 
5
- import parsl
6
3
  from parsl.app.app import bash_app
7
4
  from parsl.data_provider.files import File
8
5
  from parsl.app.futures import DataFuture
9
6
 
10
- from parsl.tests.configs.local_threads import config
11
-
12
7
 
13
8
  @bash_app
14
- def increment(inputs=[], outputs=[], stdout=None, stderr=None):
9
+ def increment(inputs=(), outputs=(), stdout=None, stderr=None):
15
10
  cmd_line = """
16
11
  if ! [ -f {inputs[0]} ] ; then exit 43 ; fi
17
12
  x=$(cat {inputs[0]})
@@ -21,7 +16,7 @@ def increment(inputs=[], outputs=[], stdout=None, stderr=None):
21
16
 
22
17
 
23
18
  @bash_app
24
- def slow_increment(dur, inputs=[], outputs=[], stdout=None, stderr=None):
19
+ def slow_increment(dur, inputs=(), outputs=(), stdout=None, stderr=None):
25
20
  cmd_line = """
26
21
  x=$(cat {inputs[0]})
27
22
  echo $(($x+1)) > {outputs[0]}
@@ -30,91 +25,60 @@ def slow_increment(dur, inputs=[], outputs=[], stdout=None, stderr=None):
30
25
  return cmd_line
31
26
 
32
27
 
33
- def cleanup_work(depth):
34
- for i in range(0, depth):
35
- fn = "test{0}.txt".format(i)
36
- if os.path.exists(fn):
37
- os.remove(fn)
38
-
39
-
40
28
  @pytest.mark.staging_required
41
- def test_increment(depth=5):
29
+ def test_increment(tmpd_cwd, depth=5):
42
30
  """Test simple pipeline A->B...->N
43
31
  """
32
+ fpath = tmpd_cwd / "test0.txt"
33
+ fpath.write_text("0\n")
44
34
 
45
- cleanup_work(depth)
46
-
47
- # Create the first file
48
- open("test0.txt", 'w').write('0\n')
49
-
50
- # Create the first entry in the dictionary holding the futures
51
- prev = File("test0.txt")
52
- futs = {}
35
+ prev = [File(str(fpath))]
36
+ futs = []
53
37
  for i in range(1, depth):
54
- print("Launching {0} with {1}".format(i, prev))
55
- assert isinstance(prev, DataFuture) or isinstance(prev, File)
56
- output = File("test{0}.txt".format(i))
57
- fu = increment(inputs=[prev], # Depend on the future from previous call
58
- # Name the file to be created here
59
- outputs=[output],
60
- stdout="incr{0}.out".format(i),
61
- stderr="incr{0}.err".format(i))
62
- [prev] = fu.outputs
63
- futs[i] = prev
64
- print(prev.filepath)
65
- assert isinstance(prev, DataFuture)
66
-
67
- for key in futs:
68
- if key > 0:
69
- fu = futs[key]
70
- file = fu.result()
71
- filename = file.filepath
72
-
73
- # this test is a bit close to a test of the specific implementation
74
- # of File
75
- assert file.local_path is None, "File on local side has overridden local_path, file: {}".format(repr(file))
76
- assert file.filepath == "test{0}.txt".format(key), "Submit side filepath has not been preserved over execution"
77
-
78
- data = open(filename, 'r').read().strip()
79
- assert data == str(
80
- key), "[TEST] incr failed for key: {0} got data: {1} from filename {2}".format(key, data, filename)
81
-
82
- cleanup_work(depth)
38
+ assert isinstance(prev[0], (DataFuture, File))
39
+ output = File(str(tmpd_cwd / f"test{i}.txt"))
40
+ f = increment(
41
+ inputs=prev,
42
+ outputs=[output],
43
+ stdout=str(tmpd_cwd / f"incr{i}.out"),
44
+ stderr=str(tmpd_cwd / f"incr{i}.err"),
45
+ )
46
+ prev = f.outputs
47
+ futs.append((i, prev[0]))
48
+ assert isinstance(prev[0], DataFuture)
49
+
50
+ for key, f in futs:
51
+ file = f.result()
52
+ expected = str(tmpd_cwd / f"test{key}.txt")
53
+
54
+ assert file.local_path is None, "File on local side has overridden local_path, file: {}".format(repr(file))
55
+ assert file.filepath == expected, "Submit side filepath has not been preserved over execution"
56
+ data = open(file.filepath).read().strip()
57
+ assert data == str(key)
83
58
 
84
59
 
85
60
  @pytest.mark.staging_required
86
- def test_increment_slow(depth=5, dur=0.5):
61
+ def test_increment_slow(tmpd_cwd, depth=5, dur=0.01):
87
62
  """Test simple pipeline slow (sleep.5) A->B...->N
88
63
  """
89
64
 
90
- cleanup_work(depth)
65
+ fpath = tmpd_cwd / "test0.txt"
66
+ fpath.write_text("0\n")
91
67
 
92
- # Create the first file
93
- open("test0.txt", 'w').write('0\n')
94
-
95
- prev = File("test0.txt")
96
- # Create the first entry in the dictionary holding the futures
97
- futs = {}
98
- print("************** Type: ", type(dur), dur)
68
+ prev = [File(str(fpath))]
69
+ futs = []
99
70
  for i in range(1, depth):
100
- print("Launching {0} with {1}".format(i, prev))
101
- output = File("test{0}.txt".format(i))
102
- fu = slow_increment(dur,
103
- # Depend on the future from previous call
104
- inputs=[prev],
105
- # Name the file to be created here
106
- outputs=[output],
107
- stdout="incr{0}.out".format(i),
108
- stderr="incr{0}.err".format(i))
109
- [prev] = fu.outputs
110
- futs[i] = prev
111
- print(prev.filepath)
112
-
113
- for key in futs:
114
- if key > 0:
115
- fu = futs[key]
116
- data = open(fu.result().filepath, 'r').read().strip()
117
- assert data == str(
118
- key), "[TEST] incr failed for key: {0} got: {1}".format(key, data)
119
-
120
- cleanup_work(depth)
71
+ output = File(str(tmpd_cwd / f"test{i}.txt"))
72
+ f = slow_increment(
73
+ dur,
74
+ inputs=prev,
75
+ outputs=[output],
76
+ stdout=str(tmpd_cwd / f"incr{i}.out"),
77
+ stderr=str(tmpd_cwd / f"incr{i}.err"),
78
+ )
79
+ prev = f.outputs
80
+ futs.append((i, prev[0]))
81
+
82
+ for key, f in futs:
83
+ data = open(f.result().filepath).read().strip()
84
+ assert data == str(key)
@@ -1,16 +1,13 @@
1
- import argparse
2
1
  import os
3
2
 
4
3
  import pytest
5
4
 
6
- import parsl
7
5
  import parsl.app.errors as perror
8
6
  from parsl.app.app import bash_app
9
- from parsl.tests.configs.local_threads import config
10
7
 
11
8
 
12
9
  @bash_app
13
- def echo_to_streams(msg, stderr='std.err', stdout='std.out'):
10
+ def echo_to_streams(msg, stderr=None, stdout=None):
14
11
  return 'echo "{0}"; echo "{0}" >&2'.format(msg)
15
12
 
16
13
 
@@ -40,8 +37,7 @@ testids = [
40
37
  @pytest.mark.issue363
41
38
  @pytest.mark.parametrize('spec', speclist, ids=testids)
42
39
  def test_bad_stdout_specs(spec):
43
- """Testing bad stdout spec cases
44
- """
40
+ """Testing bad stdout spec cases"""
45
41
 
46
42
  fn = echo_to_streams("Hello world", stdout=spec, stderr='t.err')
47
43
 
@@ -52,18 +48,14 @@ def test_bad_stdout_specs(spec):
52
48
  else:
53
49
  assert False, "Did not raise expected exception"
54
50
 
55
- return
56
-
57
51
 
58
52
  @pytest.mark.issue363
59
53
  def test_bad_stderr_file():
54
+ """Testing bad stderr file"""
60
55
 
61
- """ Testing bad stderr file """
62
-
63
- out = "t2.out"
64
56
  err = "/bad/dir/t2.err"
65
57
 
66
- fn = echo_to_streams("Hello world", stdout=out, stderr=err)
58
+ fn = echo_to_streams("Hello world", stderr=err)
67
59
 
68
60
  try:
69
61
  fn.result()
@@ -76,13 +68,11 @@ def test_bad_stderr_file():
76
68
 
77
69
 
78
70
  @pytest.mark.issue363
79
- def test_stdout_truncate():
80
-
81
- """ Testing truncation of prior content of stdout """
71
+ def test_stdout_truncate(tmpd_cwd):
72
+ """Testing truncation of prior content of stdout"""
82
73
 
83
- out = ('t1.out', 'w')
84
- err = 't1.err'
85
- os.system('rm -f ' + out[0] + ' ' + err)
74
+ out = (str(tmpd_cwd / 't1.out'), 'w')
75
+ err = str(tmpd_cwd / 't1.err')
86
76
 
87
77
  echo_to_streams('hi', stdout=out, stderr=err).result()
88
78
  len1 = len(open(out[0]).readlines())
@@ -90,19 +80,16 @@ def test_stdout_truncate():
90
80
  echo_to_streams('hi', stdout=out, stderr=err).result()
91
81
  len2 = len(open(out[0]).readlines())
92
82
 
93
- assert len1 == len2 == 1, "Line count of output files should both be 1, but: len1={} len2={}".format(len1, len2)
94
-
95
- os.system('rm -f ' + out[0] + ' ' + err)
83
+ assert len1 == 1
84
+ assert len1 == len2
96
85
 
97
86
 
98
87
  @pytest.mark.issue363
99
- def test_stdout_append():
100
-
101
- """ Testing appending to prior content of stdout (default open() mode) """
88
+ def test_stdout_append(tmpd_cwd):
89
+ """Testing appending to prior content of stdout (default open() mode)"""
102
90
 
103
- out = 't1.out'
104
- err = 't1.err'
105
- os.system('rm -f ' + out + ' ' + err)
91
+ out = str(tmpd_cwd / 't1.out')
92
+ err = str(tmpd_cwd / 't1.err')
106
93
 
107
94
  echo_to_streams('hi', stdout=out, stderr=err).result()
108
95
  len1 = len(open(out).readlines())
@@ -110,6 +97,4 @@ def test_stdout_append():
110
97
  echo_to_streams('hi', stdout=out, stderr=err).result()
111
98
  len2 = len(open(out).readlines())
112
99
 
113
- assert len1 == 1 and len2 == 2, "Line count of output files should be 1 and 2, but: len1={} len2={}".format(len1, len2)
114
-
115
- os.system('rm -f ' + out + ' ' + err)
100
+ assert len1 == 1 and len2 == 2
@@ -12,13 +12,13 @@ def cat(inputs=(), outputs=(), stdout=None, stderr=None):
12
12
 
13
13
  @pytest.mark.staging_required
14
14
  def test_files(setup_data):
15
- fs = sorted(str(setup_data / f) for f in setup_data.iterdir())
15
+ fs = sorted(setup_data / f for f in setup_data.iterdir())
16
16
  fs = list(map(File, fs))
17
17
  x = cat(
18
18
  inputs=fs,
19
- outputs=[File(str(setup_data / "cat_out.txt"))],
20
- stdout=str(setup_data / "f_app.out"),
21
- stderr=str(setup_data / "f_app.err"),
19
+ outputs=[File(setup_data / "cat_out.txt")],
20
+ stdout=setup_data / "f_app.out",
21
+ stderr=setup_data / "f_app.err",
22
22
  )
23
23
  x.result()
24
24
  d_x = x.outputs[0]
@@ -28,29 +28,27 @@ def test_files(setup_data):
28
28
 
29
29
  @bash_app
30
30
  def increment(inputs=(), outputs=(), stdout=None, stderr=None):
31
- # Place double braces to avoid python complaining about missing keys for {item = $1}
32
- return """
33
- x=$(cat {i})
34
- echo $(($x+1)) > {o}
35
- """.format(i=inputs[0], o=outputs[0])
31
+ return (
32
+ f"x=$(cat {inputs[0]})\n"
33
+ f"echo $(($x+1)) > {outputs[0]}"
34
+ )
36
35
 
37
36
 
38
37
  @pytest.mark.staging_required
39
38
  def test_increment(tmp_path, depth=5):
40
- """Test simple pipeline A->B...->N
41
- """
39
+ """Test simple pipeline A->B...->N"""
42
40
  # Test setup
43
41
  first_fpath = tmp_path / "test0.txt"
44
42
  first_fpath.write_text("0\n")
45
43
 
46
- prev = [File(str(first_fpath))]
44
+ prev = [File(first_fpath)]
47
45
  futs = []
48
46
  for i in range(1, depth):
49
47
  f = increment(
50
48
  inputs=prev,
51
- outputs=[File(str(tmp_path / f"test{i}.txt"))],
52
- stdout=str(tmp_path / f"incr{i}.out"),
53
- stderr=str(tmp_path / f"incr{i}.err"),
49
+ outputs=[File(tmp_path / f"test{i}.txt")],
50
+ stdout=tmp_path / f"incr{i}.out",
51
+ stderr=tmp_path / f"incr{i}.err",
54
52
  )
55
53
  prev = f.outputs
56
54
  futs.append((i, prev[0]))
@@ -7,7 +7,7 @@ from parsl.data_provider.files import File
7
7
  @bash_app
8
8
  def cat(inputs=(), outputs=(), stdout=None, stderr=None):
9
9
  infiles = " ".join(i.filepath for i in inputs)
10
- return f"cat {infiles} &> {outputs[0]}\n"
10
+ return f"cat {infiles} &> {outputs[0]}"
11
11
 
12
12
 
13
13
  @pytest.mark.staging_required
@@ -17,7 +17,7 @@ def test_regression_200(tmp_path):
17
17
  fpath = tmp_path / "test.txt"
18
18
 
19
19
  fpath.write_text("Hello World")
20
- f = cat(inputs=[File(str(fpath))], outputs=[File(str(opath))])
20
+ f = cat(inputs=[File(fpath)], outputs=[File(opath)])
21
21
 
22
22
  f.result()
23
23
  with open(f.outputs[0].filepath) as f: