parsl 2023.7.3__py3-none-any.whl → 2023.7.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. parsl/data_provider/files.py +6 -5
  2. parsl/dataflow/dflow.py +7 -1
  3. parsl/dataflow/memoization.py +7 -7
  4. parsl/executors/high_throughput/executor.py +2 -3
  5. parsl/executors/high_throughput/process_worker_pool.py +2 -3
  6. parsl/launchers/errors.py +1 -1
  7. parsl/providers/cluster_provider.py +2 -1
  8. parsl/providers/local/local.py +1 -1
  9. parsl/serialize/base.py +3 -13
  10. parsl/serialize/concretes.py +22 -3
  11. parsl/serialize/facade.py +13 -23
  12. parsl/tests/conftest.py +94 -11
  13. parsl/tests/test_bash_apps/test_basic.py +32 -63
  14. parsl/tests/test_bash_apps/test_kwarg_storage.py +18 -89
  15. parsl/tests/test_bash_apps/test_memoize.py +17 -41
  16. parsl/tests/test_bash_apps/test_multiline.py +19 -45
  17. parsl/tests/test_bash_apps/test_pipeline.py +46 -82
  18. parsl/tests/test_bash_apps/test_stdout.py +15 -30
  19. parsl/tests/test_data/test_file_apps.py +13 -15
  20. parsl/tests/test_data/test_file_staging.py +2 -2
  21. parsl/tests/test_data/test_output_chain_filenames.py +17 -27
  22. parsl/tests/test_docs/test_workflow4.py +18 -28
  23. parsl/tests/test_error_handling/test_htex_worker_failure.py +5 -12
  24. parsl/tests/test_python_apps/test_fail.py +31 -69
  25. parsl/tests/test_python_apps/test_garbage_collect.py +15 -9
  26. parsl/tests/test_python_apps/test_join.py +19 -20
  27. parsl/tests/test_python_apps/test_mapred.py +13 -38
  28. parsl/tests/test_python_apps/test_memoize_bad_id_for_memo.py +6 -7
  29. parsl/tests/test_python_apps/test_outputs.py +11 -24
  30. parsl/tests/test_python_apps/test_overview.py +5 -42
  31. parsl/tests/test_python_apps/test_pipeline.py +16 -19
  32. parsl/tests/test_regression/test_1606_wait_for_current_tasks.py +35 -10
  33. parsl/tests/test_scaling/test_regression_1621.py +26 -20
  34. parsl/tests/test_scaling/test_scale_down.py +49 -32
  35. parsl/tests/test_serialization/test_2555_caching_deserializer.py +34 -0
  36. parsl/utils.py +8 -6
  37. parsl/version.py +1 -1
  38. {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/process_worker_pool.py +2 -3
  39. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/METADATA +2 -2
  40. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/RECORD +46 -45
  41. {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/exec_parsl_function.py +0 -0
  42. {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/parsl_coprocess.py +0 -0
  43. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/LICENSE +0 -0
  44. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/WHEEL +0 -0
  45. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/entry_points.txt +0 -0
  46. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/top_level.txt +0 -0
@@ -1,46 +1,36 @@
1
- import argparse
2
- import os
3
-
4
- import pytest
5
-
6
- import parsl
7
-
8
1
  from concurrent.futures import Future
2
+
9
3
  from parsl import File
10
4
  from parsl.app.app import bash_app
11
5
 
12
6
 
13
7
  @bash_app
14
- def app1(inputs=[], outputs=[], stdout=None, stderr=None, mock=False):
15
- cmd_line = f"""echo 'test' > {outputs[0]}"""
16
- return cmd_line
8
+ def app1(inputs=(), outputs=(), stdout=None, stderr=None, mock=False):
9
+ return f"echo 'test' > {outputs[0]}"
17
10
 
18
11
 
19
12
  @bash_app
20
- def app2(inputs=[], outputs=[], stdout=None, stderr=None, mock=False):
21
-
22
- with open('somefile.txt', 'w') as f:
23
- f.write("%s\n" % inputs[0])
24
- cmd_line = f"""echo '{inputs[0]}' > {outputs[0]}"""
25
- return cmd_line
13
+ def app2(inputs=(), outputs=(), stdout=None, stderr=None, mock=False):
14
+ return f"echo '{inputs[0]}' > {outputs[0]}"
26
15
 
27
16
 
28
- def test_behavior():
29
- app1_future = app1(inputs=[],
30
- outputs=[File("simple-out.txt")])
17
+ def test_behavior(tmpd_cwd):
18
+ expected_path = str(tmpd_cwd / "simple-out.txt")
19
+ app1_future = app1(
20
+ inputs=[],
21
+ outputs=[File(expected_path)]
22
+ )
31
23
 
32
24
  o = app1_future.outputs[0]
33
25
  assert isinstance(o, Future)
34
26
 
35
- app2_future = app2(inputs=[o],
36
- outputs=[File("simple-out2.txt")])
27
+ app2_future = app2(
28
+ inputs=[o],
29
+ outputs=[File(str(tmpd_cwd / "simple-out2.txt"))]
30
+ )
37
31
  app2_future.result()
38
32
 
39
- expected_name = 'b'
40
- with open('somefile.txt', 'r') as f:
41
- name = f.read()
42
-
43
33
  with open(app2_future.outputs[0].filepath, 'r') as f:
44
- expected_name = f.read()
34
+ name = f.read().strip()
45
35
 
46
- assert name == expected_name, "Filename mangled due to DataFuture handling"
36
+ assert name == expected_path, "Filename mangled due to DataFuture handling"
@@ -1,53 +1,43 @@
1
- import os
2
- import parsl
1
+ import pytest
3
2
 
4
3
  from parsl.app.app import bash_app, python_app
5
- from parsl.tests.configs.local_threads import config
6
4
  from parsl.data_provider.files import File
7
5
 
8
- import pytest
9
-
10
- # parsl.set_stream_logger()
11
-
12
6
 
13
7
  @bash_app
14
- def generate(outputs=[]):
15
- return "echo $(( RANDOM % (10 - 5 + 1 ) + 5 )) &> {o}".format(o=outputs[0])
8
+ def generate(outputs=()):
9
+ return "echo 1 &> {o}".format(o=outputs[0])
16
10
 
17
11
 
18
12
  @bash_app
19
- def concat(inputs=[], outputs=[], stdout="stdout.txt", stderr='stderr.txt'):
13
+ def concat(inputs=(), outputs=(), stdout=None, stderr=None):
20
14
  return "cat {0} >> {1}".format(" ".join(map(lambda x: x.filepath, inputs)), outputs[0])
21
15
 
22
16
 
23
17
  @python_app
24
- def total(inputs=[]):
25
- total = 0
26
- with open(inputs[0].filepath, 'r') as f:
27
- for line in f:
28
- total += int(line)
29
- return total
18
+ def total(inputs=()):
19
+ with open(inputs[0].filepath, "r") as f:
20
+ return sum(int(line) for line in f)
30
21
 
31
22
 
32
23
  @pytest.mark.staging_required
33
- def test_parallel_dataflow():
24
+ @pytest.mark.parametrize("width", (5, 10, 15))
25
+ def test_parallel_dataflow(tmpd_cwd, width):
34
26
  """Test parallel dataflow from docs on Composing workflows
35
27
  """
36
28
 
37
- if os.path.exists('all.txt'):
38
- os.remove('all.txt')
39
-
40
29
  # create 5 files with random numbers
41
- output_files = []
42
- for i in range(5):
43
- if os.path.exists('random-%s.txt' % i):
44
- os.remove('random-%s.txt' % i)
45
- output_files.append(generate(outputs=[File('random-%s.txt' % i)]))
30
+ output_files = [
31
+ generate(outputs=[File(str(tmpd_cwd / f"random-{i}.txt"))])
32
+ for i in range(width)
33
+ ]
46
34
 
47
35
  # concatenate the files into a single file
48
- cc = concat(inputs=[i.outputs[0]
49
- for i in output_files], outputs=[File("all.txt")])
36
+ cc = concat(
37
+ inputs=[i.outputs[0] for i in output_files],
38
+ outputs=[File(str(tmpd_cwd / "all.txt"))]
39
+ )
50
40
 
51
41
  # calculate the average of the random numbers
52
42
  totals = total(inputs=[cc.outputs[0]])
53
- print(totals.result())
43
+ assert totals.result() == len(output_files)
@@ -1,28 +1,21 @@
1
1
  import pytest
2
2
 
3
- import parsl
4
3
  from parsl.app.app import python_app
5
- from parsl.tests.configs.htex_local import fresh_config
6
-
7
4
  from parsl.executors.high_throughput.errors import WorkerLost
8
5
 
9
6
 
10
- def local_setup():
7
+ def local_config():
8
+ from parsl.tests.configs.htex_local import fresh_config
11
9
  config = fresh_config()
12
10
  config.executors[0].poll_period = 1
13
11
  config.executors[0].max_workers = 1
14
- parsl.load(config)
15
-
16
-
17
- def local_teardown():
18
- parsl.dfk().cleanup()
19
- parsl.clear()
12
+ config.executors[0].heartbeat_period = 1
13
+ return config
20
14
 
21
15
 
22
16
  @python_app
23
17
  def kill_worker():
24
- import sys
25
- sys.exit(2)
18
+ raise SystemExit(2)
26
19
 
27
20
 
28
21
  @pytest.mark.local
@@ -1,89 +1,59 @@
1
- import argparse
1
+ import pytest
2
2
 
3
- import parsl
4
3
  from parsl.app.app import python_app
5
- from parsl.tests.configs.local_threads import fresh_config as local_config
4
+ from parsl.dataflow.errors import DependencyError
6
5
 
7
6
 
8
- @python_app
9
- def sleep_fail(sleep_dur, sleep_rand_max, fail_prob, inputs=[]):
10
- import time
11
- import random
7
+ class ManufacturedTestFailure(Exception):
8
+ pass
12
9
 
13
- s = sleep_dur + random.randint(-sleep_rand_max, sleep_rand_max)
14
10
 
15
- time.sleep(s)
16
- raise Exception("App failure")
11
+ @python_app
12
+ def random_fail(fail_prob: float, inputs=()):
13
+ import random
14
+ if random.random() < fail_prob:
15
+ raise ManufacturedTestFailure("App failure")
17
16
 
18
17
 
19
- def test_no_deps(numtasks=2):
18
+ def test_no_deps():
20
19
  """Test basic error handling, with no dependent failures
21
20
  """
21
+ futs = [random_fail(1), random_fail(0), random_fail(0)]
22
22
 
23
- fus = []
24
- for i in range(0, numtasks):
25
-
26
- fu = sleep_fail(0.1, 0, .8)
27
- fus.extend([fu])
28
-
29
- count = 0
30
- for fu in fus:
23
+ for f in futs:
31
24
  try:
32
- fu.result()
33
- except Exception as e:
34
- print("Caught exception : ", "*" * 20)
35
- print(e)
36
- print("*" * 20)
37
- count += 1
25
+ f.result()
26
+ except ManufacturedTestFailure:
27
+ pass
38
28
 
39
- print("Caught failures of {0}/{1}".format(count, len(fus)))
40
29
 
41
-
42
- def test_fail_sequence(numtasks=2):
30
+ @pytest.mark.parametrize("fail_probs", ((1, 0), (0, 1)))
31
+ def test_fail_sequence(fail_probs):
43
32
  """Test failure in a sequence of dependencies
44
33
 
45
34
  App1 -> App2 ... -> AppN
46
35
  """
47
36
 
48
- sleep_dur = 0.1
49
- fail_prob = 0.4
50
-
51
- fus = {0: None}
52
- for i in range(0, numtasks):
53
- print("Chaining {0} to {1}".format(i + 1, fus[i]))
54
- fus[i + 1] = sleep_fail(sleep_dur, 0, fail_prob, inputs=[fus[i]])
37
+ t1_fail_prob, t2_fail_prob = fail_probs
38
+ t1 = random_fail(fail_prob=t1_fail_prob)
39
+ t2 = random_fail(fail_prob=t2_fail_prob, inputs=[t1])
40
+ t_final = random_fail(fail_prob=0, inputs=[t2])
55
41
 
56
- # time.sleep(numtasks*sleep_dur)
57
- for k in sorted(fus.keys()):
58
- try:
59
- x = fus[i].result()
60
- print("{0} : {1}".format(k, x))
61
- except Exception as e:
62
- print("{0} : {1}".format(k, e))
63
-
64
- return
65
-
66
-
67
- def test_deps(numtasks=2):
68
- """Random failures in branches of Map -> Map -> reduce
42
+ with pytest.raises(DependencyError):
43
+ t_final.result()
69
44
 
70
- App1 App2 ... AppN
71
- """
72
45
 
73
- fus = []
74
- for i in range(0, numtasks):
75
- fu = sleep_fail(0.2, 0, .4)
76
- fus.extend([fu])
46
+ def test_deps(width=3):
47
+ """Random failures in branches of Map -> Map -> reduce"""
48
+ # App1 App2 ... AppN
49
+ futs = [random_fail(fail_prob=0.4) for _ in range(width)]
77
50
 
78
51
  # App1 App2 ... AppN
79
52
  # | | |
80
53
  # V V V
81
54
  # App1 App2 ... AppN
82
55
 
83
- fus_2 = []
84
- for fu in fus:
85
- fu = sleep_fail(0, 0, .8, inputs=[fu])
86
- fus_2.extend([fu])
56
+ futs = [random_fail(fail_prob=0.8, inputs=[f]) for f in futs]
87
57
 
88
58
  # App1 App2 ... AppN
89
59
  # | | |
@@ -92,15 +62,7 @@ def test_deps(numtasks=2):
92
62
  # \ | /
93
63
  # \ | /
94
64
  # App_Final
95
-
96
- fu_final = sleep_fail(1, 0, 0, inputs=fus_2)
97
-
98
65
  try:
99
- print("Final status : ", fu_final.result())
100
- except parsl.dataflow.errors.DependencyError as e:
101
- print("Caught the right exception")
102
- print("Exception : ", e)
103
- except Exception as e:
104
- assert False, "Expected DependencyError but got: %s" % e
105
- else:
106
- raise RuntimeError("Expected DependencyError, but got no exception")
66
+ random_fail(fail_prob=0, inputs=futs).result()
67
+ except DependencyError:
68
+ pass
@@ -1,30 +1,36 @@
1
- import parsl
1
+ import threading
2
2
  import time
3
3
 
4
+ import pytest
5
+
6
+ import parsl
4
7
  from parsl.app.app import python_app
8
+ from parsl.tests.configs.local_threads import fresh_config as local_config # noqa
5
9
 
6
10
 
7
11
  @python_app
8
- def slow_double(x):
9
- import time
10
- time.sleep(0.1)
12
+ def slow_double(x, may_continue: threading.Event):
13
+ may_continue.wait()
11
14
  return x * 2
12
15
 
13
16
 
17
+ @pytest.mark.local
14
18
  def test_garbage_collect():
15
19
  """ Launches an app with a dependency and waits till it's done and asserts that
16
20
  the internal refs were wiped
17
21
  """
18
- x = slow_double(slow_double(10))
22
+ evt = threading.Event()
23
+ x = slow_double(10, evt)
24
+ x = slow_double(x, evt)
19
25
 
20
- if x.done() is False:
21
- assert parsl.dfk().tasks[x.tid]['app_fu'] == x, "Tasks table should have app_fu ref before done"
26
+ assert parsl.dfk().tasks[x.tid]['app_fu'] == x, "Tasks table should have app_fu ref before done"
22
27
 
23
- x.result()
28
+ evt.set()
29
+ assert x.result() == 10 * 4
24
30
  if parsl.dfk().checkpoint_mode is not None:
25
31
  # We explicit call checkpoint if checkpoint_mode is enabled covering
26
32
  # cases like manual/periodic where checkpointing may be deferred.
27
33
  parsl.dfk().checkpoint()
28
34
 
29
- time.sleep(0.2) # Give enough time for task wipes to work
35
+ time.sleep(0.01) # Give enough time for task wipes to work
30
36
  assert x.tid not in parsl.dfk().tasks, "Task record should be wiped after task completion"
@@ -1,17 +1,14 @@
1
1
  import pytest
2
- import time
3
2
 
4
3
  from parsl import join_app, python_app
5
4
  from parsl.dataflow.errors import JoinError
6
5
 
7
- from parsl.tests.configs.local_threads import fresh_config as local_config
8
6
 
9
7
  RESULT_CONSTANT = 3
10
8
 
11
9
 
12
- @python_app(cache=True)
10
+ @python_app
13
11
  def inner_app():
14
- time.sleep(1)
15
12
  return RESULT_CONSTANT
16
13
 
17
14
 
@@ -34,24 +31,17 @@ def combine(*args):
34
31
 
35
32
  @join_app
36
33
  def outer_make_a_dag_combine(n):
37
- futs = []
38
- for _ in range(n):
39
- futs.append(inner_app())
40
- return combine(*futs)
34
+ return combine(*(inner_app() for _ in range(n)))
41
35
 
42
36
 
43
37
  @join_app
44
38
  def outer_make_a_dag_multi(n):
45
- futs = []
46
- for _ in range(n):
47
- futs.append(inner_app())
48
- return futs
39
+ return [inner_app() for _ in range(n)]
49
40
 
50
41
 
51
42
  def test_result_flow():
52
43
  f = outer_app()
53
- res = f.result()
54
- assert res == RESULT_CONSTANT
44
+ assert f.result() == RESULT_CONSTANT
55
45
 
56
46
 
57
47
  @join_app
@@ -67,20 +57,17 @@ def test_wrong_type():
67
57
 
68
58
  def test_dependency_on_joined():
69
59
  g = add_one(outer_app())
70
- res = g.result()
71
- assert res == RESULT_CONSTANT + 1
60
+ assert g.result() == RESULT_CONSTANT + 1
72
61
 
73
62
 
74
63
  def test_combine():
75
64
  f = outer_make_a_dag_combine(inner_app())
76
- res = f.result()
77
- assert res == [RESULT_CONSTANT] * RESULT_CONSTANT
65
+ assert f.result() == [RESULT_CONSTANT] * RESULT_CONSTANT
78
66
 
79
67
 
80
68
  def test_multiple_return():
81
69
  f = outer_make_a_dag_multi(inner_app())
82
- res = f.result()
83
- assert res == [RESULT_CONSTANT] * RESULT_CONSTANT
70
+ assert f.result() == [RESULT_CONSTANT] * RESULT_CONSTANT
84
71
 
85
72
 
86
73
  class InnerError(RuntimeError):
@@ -139,3 +126,15 @@ def test_one_error_one_result():
139
126
  de0 = e.dependent_exceptions_tids[0][0]
140
127
  assert isinstance(de0, InnerError)
141
128
  assert de0.args[0] == "Error A"
129
+
130
+
131
+ @join_app
132
+ def app_no_futures():
133
+ return []
134
+
135
+
136
+ def test_no_futures():
137
+ # tests that a list of futures that contains no futures will
138
+ # complete - regression test for issue #2792
139
+ f = app_no_futures()
140
+ assert f.result() == []
@@ -1,19 +1,15 @@
1
- import argparse
1
+ import pytest
2
2
 
3
- import parsl
4
3
  from parsl.app.app import python_app
5
- from parsl.tests.configs.local_threads import config
6
4
 
7
5
 
8
6
  @python_app
9
- def fan_out(x, dur):
10
- import time
11
- time.sleep(dur)
7
+ def times_two(x):
12
8
  return x * 2
13
9
 
14
10
 
15
11
  @python_app
16
- def accumulate(inputs=[]):
12
+ def accumulate(inputs=()):
17
13
  return sum(inputs)
18
14
 
19
15
 
@@ -22,38 +18,17 @@ def accumulate_t(*args):
22
18
  return sum(args)
23
19
 
24
20
 
25
- def test_mapred_type1(width=2):
26
- """MapReduce test with the reduce stage taking futures in inputs=[]
27
- """
28
-
29
- futs = []
30
- for i in range(1, width + 1):
31
- fu = fan_out(i, 1)
32
- futs.extend([fu])
33
-
34
- print("Fan out : ", futs)
35
-
21
+ @pytest.mark.parametrize("width", (2, 3, 5))
22
+ def test_mapred_type1(width):
23
+ """MapReduce test with the reduce stage taking futures in inputs=[]"""
24
+ futs = [times_two(i) for i in range(width)]
36
25
  red = accumulate(inputs=futs)
37
- # print([(i, i.done()) for i in futs])
38
- r = sum([x * 2 for x in range(1, width + 1)])
39
- assert r == red.result(), "[TEST] MapRed type1 expected %s, got %s" % (
40
- r, red.result())
41
-
26
+ assert red.result() == 2 * sum(range(width))
42
27
 
43
- def test_mapred_type2(width=2):
44
- """MapReduce test with the reduce stage taking futures on the args
45
- """
46
-
47
- futs = []
48
- for i in range(1, width + 1):
49
- fu = fan_out(i, 0.1)
50
- futs.extend([fu])
51
-
52
- print("Fan out : ", futs)
53
28
 
29
+ @pytest.mark.parametrize("width", (2, 3, 5))
30
+ def test_mapred_type2(width):
31
+ """MapReduce test with the reduce stage taking futures on the args"""
32
+ futs = [times_two(i) for i in range(width)]
54
33
  red = accumulate_t(*futs)
55
-
56
- # print([(i, i.done()) for i in futs])
57
- r = sum([x * 2 for x in range(1, width + 1)])
58
- assert r == red.result(), "[TEST] MapRed type2 expected %s, got %s" % (
59
- r, red.result())
34
+ assert red.result() == 2 * sum(range(width))
@@ -29,9 +29,8 @@ def noop_app(x, inputs=[], cache=True):
29
29
 
30
30
 
31
31
  @python_app
32
- def sleep(t):
33
- import time
34
- time.sleep(t)
32
+ def some_func(_t):
33
+ pass
35
34
 
36
35
 
37
36
  def test_python_unmemoizable():
@@ -51,14 +50,14 @@ def test_python_failing_memoizer():
51
50
 
52
51
 
53
52
  def test_python_unmemoizable_after_dep():
54
- sleep_fut = sleep(1)
55
- fut = noop_app(Unmemoizable(), inputs=[sleep_fut])
53
+ memoizable_fut = some_func(1)
54
+ fut = noop_app(Unmemoizable(), inputs=[memoizable_fut])
56
55
  with pytest.raises(ValueError):
57
56
  fut.result()
58
57
 
59
58
 
60
59
  def test_python_failing_memoizer_afer_dep():
61
- sleep_fut = sleep(1)
62
- fut = noop_app(FailingMemoizable(), inputs=[sleep_fut])
60
+ memoizable_fut = some_func(1)
61
+ fut = noop_app(FailingMemoizable(), inputs=[memoizable_fut])
63
62
  with pytest.raises(ValueError):
64
63
  fut.result()
@@ -1,12 +1,9 @@
1
- import argparse
2
1
  import os
3
- import pytest
4
- import shutil
5
-
6
2
  from concurrent.futures import wait
7
3
 
4
+ import pytest
5
+
8
6
  from parsl import File, python_app
9
- from parsl.tests.configs.local_threads import fresh_config as local_config
10
7
 
11
8
 
12
9
  @python_app
@@ -20,22 +17,12 @@ whitelist = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'configs',
20
17
 
21
18
 
22
19
  @pytest.mark.issue363
23
- def test_launch_apps(n=2, outdir='outputs'):
24
- if not os.path.exists(outdir):
25
- os.makedirs(outdir)
26
- else:
27
- shutil.rmtree(outdir)
28
- os.makedirs(outdir)
29
- print('outdir is ', outdir)
30
-
31
- all_futs = []
32
- for i in range(n):
33
- fus = double(i, outputs=[File('{0}/{1}.txt'.format(outdir, i))])
34
- all_futs.append(fus)
35
-
36
- wait(all_futs)
37
-
38
- stdout_file_count = len(
39
- [item for item in os.listdir(outdir) if item.endswith('.txt')])
40
- assert stdout_file_count == n, "Only {}/{} files in '{}' ".format(
41
- len(os.listdir('outputs/')), n, os.listdir(outdir))
20
+ def test_launch_apps(tmpd_cwd, n=2):
21
+ outdir = tmpd_cwd / "outputs"
22
+ outdir.mkdir()
23
+
24
+ futs = [double(i, outputs=[File(str(outdir / f"{i}.txt"))]) for i in range(n)]
25
+ wait(futs)
26
+
27
+ stdout_file_count = len(list(outdir.glob("*.txt")))
28
+ assert stdout_file_count == n, sorted(outdir.glob("*.txt"))
@@ -1,8 +1,4 @@
1
- import argparse
2
-
3
- import parsl
4
1
  from parsl.app.app import python_app
5
- from parsl.tests.configs.local_threads import config
6
2
 
7
3
 
8
4
  @python_app
@@ -11,50 +7,17 @@ def app_double(x):
11
7
 
12
8
 
13
9
  @python_app
14
- def app_sum(inputs=[]):
10
+ def app_sum(inputs=()):
15
11
  return sum(inputs)
16
12
 
17
13
 
18
- @python_app
19
- def slow_app_double(x, sleep_dur=0.05):
20
- import time
21
- time.sleep(sleep_dur)
22
- return x * 2
23
-
24
-
25
14
  def test_1(N=10):
26
15
  """Testing code snippet from the documentation
27
16
  """
28
17
 
29
- # Create a list of integers
30
- items = range(0, N)
31
-
32
- # Map Phase : Apply an *app* function to each item in list
33
- mapped_results = []
34
- for i in items:
35
- x = app_double(i)
36
- mapped_results.append(x)
37
-
38
- total = app_sum(inputs=mapped_results)
39
-
40
- assert total.result() != sum(items), "Sum is wrong {0} != {1}".format(
41
- total.result(), sum(items))
42
-
43
-
44
- def test_2(N=10):
45
- """Testing code snippet from the documentation
46
- """
47
-
48
- # Create a list of integers
49
- items = range(0, N)
50
-
51
- # Map Phase : Apply an *app* function to each item in list
52
- mapped_results = []
53
- for i in items:
54
- x = slow_app_double(i)
55
- mapped_results.append(x)
18
+ # Create a list of integers, then apply *app* function to each
19
+ items = range(N)
20
+ mapped_results = list(map(app_double, items))
56
21
 
57
22
  total = app_sum(inputs=mapped_results)
58
-
59
- assert total.result() != sum(items), "Sum is wrong {0} != {1}".format(
60
- total.result(), sum(items))
23
+ assert total.result() == 2 * sum(items)