parsl 2023.7.3__py3-none-any.whl → 2023.7.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. parsl/data_provider/files.py +6 -5
  2. parsl/dataflow/dflow.py +7 -1
  3. parsl/dataflow/memoization.py +7 -7
  4. parsl/executors/high_throughput/executor.py +2 -3
  5. parsl/executors/high_throughput/process_worker_pool.py +2 -3
  6. parsl/launchers/errors.py +1 -1
  7. parsl/providers/cluster_provider.py +2 -1
  8. parsl/providers/local/local.py +1 -1
  9. parsl/serialize/base.py +3 -13
  10. parsl/serialize/concretes.py +22 -3
  11. parsl/serialize/facade.py +13 -23
  12. parsl/tests/conftest.py +94 -11
  13. parsl/tests/test_bash_apps/test_basic.py +32 -63
  14. parsl/tests/test_bash_apps/test_kwarg_storage.py +18 -89
  15. parsl/tests/test_bash_apps/test_memoize.py +17 -41
  16. parsl/tests/test_bash_apps/test_multiline.py +19 -45
  17. parsl/tests/test_bash_apps/test_pipeline.py +46 -82
  18. parsl/tests/test_bash_apps/test_stdout.py +15 -30
  19. parsl/tests/test_data/test_file_apps.py +13 -15
  20. parsl/tests/test_data/test_file_staging.py +2 -2
  21. parsl/tests/test_data/test_output_chain_filenames.py +17 -27
  22. parsl/tests/test_docs/test_workflow4.py +18 -28
  23. parsl/tests/test_error_handling/test_htex_worker_failure.py +5 -12
  24. parsl/tests/test_python_apps/test_fail.py +31 -69
  25. parsl/tests/test_python_apps/test_garbage_collect.py +15 -9
  26. parsl/tests/test_python_apps/test_join.py +19 -20
  27. parsl/tests/test_python_apps/test_mapred.py +13 -38
  28. parsl/tests/test_python_apps/test_memoize_bad_id_for_memo.py +6 -7
  29. parsl/tests/test_python_apps/test_outputs.py +11 -24
  30. parsl/tests/test_python_apps/test_overview.py +5 -42
  31. parsl/tests/test_python_apps/test_pipeline.py +16 -19
  32. parsl/tests/test_regression/test_1606_wait_for_current_tasks.py +35 -10
  33. parsl/tests/test_scaling/test_regression_1621.py +26 -20
  34. parsl/tests/test_scaling/test_scale_down.py +49 -32
  35. parsl/tests/test_serialization/test_2555_caching_deserializer.py +34 -0
  36. parsl/utils.py +8 -6
  37. parsl/version.py +1 -1
  38. {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/process_worker_pool.py +2 -3
  39. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/METADATA +2 -2
  40. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/RECORD +46 -45
  41. {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/exec_parsl_function.py +0 -0
  42. {parsl-2023.7.3.data → parsl-2023.7.17.data}/scripts/parsl_coprocess.py +0 -0
  43. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/LICENSE +0 -0
  44. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/WHEEL +0 -0
  45. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/entry_points.txt +0 -0
  46. {parsl-2023.7.3.dist-info → parsl-2023.7.17.dist-info}/top_level.txt +0 -0
@@ -5,11 +5,11 @@ to transfer the file as well as to give the appropriate filepath depending
5
5
  on where (client-side, remote-side, intermediary-side) the File.filepath is
6
6
  being called from.
7
7
  """
8
-
9
8
  import os
9
+
10
10
  import typeguard
11
11
  import logging
12
- from typing import Optional
12
+ from typing import Optional, Union
13
13
  from urllib.parse import urlparse
14
14
 
15
15
  logger = logging.getLogger(__name__)
@@ -28,17 +28,18 @@ class File:
28
28
  """
29
29
 
30
30
  @typeguard.typechecked
31
- def __init__(self, url: str):
31
+ def __init__(self, url: Union[os.PathLike, str]):
32
32
  """Construct a File object from a url string.
33
33
 
34
34
  Args:
35
- - url (string) : url string of the file e.g.
35
+ - url (string or PathLike) : url of the file e.g.
36
36
  - 'input.txt'
37
+ - pathlib.Path('input.txt')
37
38
  - 'file:///scratch/proj101/input.txt'
38
39
  - 'globus://go#ep1/~/data/input.txt'
39
40
  - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'
40
41
  """
41
- self.url = url
42
+ self.url = str(url)
42
43
  parsed_url = urlparse(self.url)
43
44
  self.scheme = parsed_url.scheme if parsed_url.scheme else 'file'
44
45
  self.netloc = parsed_url.netloc
parsl/dataflow/dflow.py CHANGED
@@ -380,6 +380,12 @@ class DataFlowKernel:
380
380
  task_record['join_lock'] = threading.Lock()
381
381
  self._send_task_log_info(task_record)
382
382
  joinable.add_done_callback(partial(self.handle_join_update, task_record))
383
+ elif joinable == []: # got a list, but it had no entries, and specifically, no Futures.
384
+ self.update_task_state(task_record, States.joining)
385
+ task_record['joins'] = joinable
386
+ task_record['join_lock'] = threading.Lock()
387
+ self._send_task_log_info(task_record)
388
+ self.handle_join_update(task_record, None)
383
389
  elif isinstance(joinable, list) and [j for j in joinable if not isinstance(j, Future)] == []:
384
390
  self.update_task_state(task_record, States.joining)
385
391
  task_record['joins'] = joinable
@@ -403,7 +409,7 @@ class DataFlowKernel:
403
409
  if task_record['status'] == States.pending:
404
410
  self.launch_if_ready(task_record)
405
411
 
406
- def handle_join_update(self, task_record: TaskRecord, inner_app_future: AppFuture) -> None:
412
+ def handle_join_update(self, task_record: TaskRecord, inner_app_future: Optional[AppFuture]) -> None:
407
413
  with task_record['join_lock']:
408
414
  # inner_app_future has completed, which is one (potentially of many)
409
415
  # futures the outer task is joining on.
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
  import hashlib
3
3
  from functools import lru_cache, singledispatch
4
4
  import logging
5
+ import pickle
5
6
  from parsl.dataflow.taskrecord import TaskRecord
6
7
 
7
8
  from typing import Dict, Any, List, Optional, TYPE_CHECKING
@@ -11,7 +12,6 @@ if TYPE_CHECKING:
11
12
 
12
13
  from concurrent.futures import Future
13
14
 
14
- from parsl.serialize import serialize
15
15
  import types
16
16
 
17
17
  logger = logging.getLogger(__name__)
@@ -54,8 +54,8 @@ def id_for_memo(obj: object, output_ref: bool = False) -> bytes:
54
54
  @id_for_memo.register(int)
55
55
  @id_for_memo.register(float)
56
56
  @id_for_memo.register(type(None))
57
- def id_for_memo_serialize(obj: object, output_ref: bool = False) -> bytes:
58
- return serialize(obj)
57
+ def id_for_memo_pickle(obj: object, output_ref: bool = False) -> bytes:
58
+ return pickle.dumps(obj)
59
59
 
60
60
 
61
61
  @id_for_memo.register(list)
@@ -68,7 +68,7 @@ def id_for_memo_list(denormalized_list: list, output_ref: bool = False) -> bytes
68
68
  for e in denormalized_list:
69
69
  normalized_list.append(id_for_memo(e, output_ref=output_ref))
70
70
 
71
- return serialize(normalized_list)
71
+ return pickle.dumps(normalized_list)
72
72
 
73
73
 
74
74
  @id_for_memo.register(tuple)
@@ -81,7 +81,7 @@ def id_for_memo_tuple(denormalized_tuple: tuple, output_ref: bool = False) -> by
81
81
  for e in denormalized_tuple:
82
82
  normalized_list.append(id_for_memo(e, output_ref=output_ref))
83
83
 
84
- return serialize(normalized_list)
84
+ return pickle.dumps(normalized_list)
85
85
 
86
86
 
87
87
  @id_for_memo.register(dict)
@@ -100,7 +100,7 @@ def id_for_memo_dict(denormalized_dict: dict, output_ref: bool = False) -> bytes
100
100
  for k in keys:
101
101
  normalized_list.append(id_for_memo(k))
102
102
  normalized_list.append(id_for_memo(denormalized_dict[k], output_ref=output_ref))
103
- return serialize(normalized_list)
103
+ return pickle.dumps(normalized_list)
104
104
 
105
105
 
106
106
  # the LRU cache decorator must be applied closer to the id_for_memo_function call
@@ -112,7 +112,7 @@ def id_for_memo_function(f: types.FunctionType, output_ref: bool = False) -> byt
112
112
  This means that changing source code (other than the function name) will
113
113
  not cause a checkpoint invalidation.
114
114
  """
115
- return serialize(["types.FunctionType", f.__name__, f.__module__])
115
+ return pickle.dumps(["types.FunctionType", f.__name__, f.__module__])
116
116
 
117
117
 
118
118
  class Memoizer:
@@ -595,10 +595,9 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
595
595
  except TypeError:
596
596
  raise SerializationError(func.__name__)
597
597
 
598
- msg = {"task_id": task_id,
599
- "buffer": fn_buf}
598
+ msg = {"task_id": task_id, "buffer": fn_buf}
600
599
 
601
- # Post task to the the outgoing queue
600
+ # Post task to the outgoing queue
602
601
  self.outgoing_q.put(msg)
603
602
 
604
603
  # Return the future
@@ -370,7 +370,7 @@ class Manager:
370
370
  logger.critical("Exiting")
371
371
 
372
372
  @wrap_with_logs
373
- def worker_watchdog(self, kill_event):
373
+ def worker_watchdog(self, kill_event: threading.Event):
374
374
  """Keeps workers alive.
375
375
 
376
376
  Parameters:
@@ -381,7 +381,7 @@ class Manager:
381
381
 
382
382
  logger.debug("Starting worker watchdog")
383
383
 
384
- while not kill_event.is_set():
384
+ while not kill_event.wait(self.heartbeat_period):
385
385
  for worker_id, p in self.procs.items():
386
386
  if not p.is_alive():
387
387
  logger.error("Worker {} has died".format(worker_id))
@@ -409,7 +409,6 @@ class Manager:
409
409
  name="HTEX-Worker-{}".format(worker_id))
410
410
  self.procs[worker_id] = p
411
411
  logger.info("Worker {} has been restarted".format(worker_id))
412
- time.sleep(self.heartbeat_period)
413
412
 
414
413
  logger.critical("Exiting")
415
414
 
parsl/launchers/errors.py CHANGED
@@ -10,4 +10,4 @@ class BadLauncher(ExecutionProviderException, TypeError):
10
10
  self.launcher = launcher
11
11
 
12
12
  def __str__(self) -> str:
13
- return f"Bad Launcher provided: {self.launcher}, expecting a parsl.launcher.launcher.Launcher or callable"
13
+ return f"Bad Launcher provided: {self.launcher}, expecting a parsl.launcher.launcher.Launcher"
@@ -3,6 +3,7 @@ from abc import abstractmethod
3
3
  from string import Template
4
4
 
5
5
  from parsl.providers.errors import SchedulerMissingArgs, ScriptPathError
6
+ from parsl.launchers.base import Launcher
6
7
  from parsl.launchers.errors import BadLauncher
7
8
  from parsl.providers.base import ExecutionProvider
8
9
 
@@ -66,7 +67,7 @@ class ClusterProvider(ExecutionProvider):
66
67
  self.launcher = launcher
67
68
  self.walltime = walltime
68
69
  self.cmd_timeout = cmd_timeout
69
- if not callable(self.launcher):
70
+ if not isinstance(self.launcher, Launcher):
70
71
  raise BadLauncher(self.launcher)
71
72
 
72
73
  self.script_dir = None
@@ -214,7 +214,7 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
214
214
  logger.debug("Pushing start script")
215
215
  script_path = self.channel.push_file(script_path, self.channel.script_dir)
216
216
 
217
- logger.debug("Launching in remote mode")
217
+ logger.debug("Launching")
218
218
  # We need to capture the exit code and the streams, so we put them in files. We also write
219
219
  # '-' to the exit code file to isolate potential problems with writing to files in the
220
220
  # script directory
parsl/serialize/base.py CHANGED
@@ -1,6 +1,5 @@
1
1
  from abc import abstractmethod
2
2
  import logging
3
- import functools
4
3
 
5
4
  from typing import Any
6
5
 
@@ -32,24 +31,15 @@ class SerializerBase:
32
31
 
33
32
  @property
34
33
  def identifier(self) -> bytes:
35
- """ Get the identifier of the serialization method
34
+ """Get that identifier that will be used to indicate in byte streams
35
+ that this class should be used for deserialization.
36
36
 
37
37
  Returns
38
38
  -------
39
- identifier : str
39
+ identifier : bytes
40
40
  """
41
41
  return self._identifier
42
42
 
43
- def enable_caching(self, maxsize: int = 128) -> None:
44
- """ Add functools.lru_cache onto the serialize, deserialize methods
45
- """
46
-
47
- # ignore types here because mypy at the moment is not fond of monkeypatching
48
- self.serialize = functools.lru_cache(maxsize=maxsize)(self.serialize) # type: ignore[method-assign]
49
- self.deserialize = functools.lru_cache(maxsize=maxsize)(self.deserialize) # type: ignore[method-assign]
50
-
51
- return
52
-
53
43
  @abstractmethod
54
44
  def serialize(self, data: Any) -> bytes:
55
45
  pass
@@ -1,4 +1,5 @@
1
1
  import dill
2
+ import functools
2
3
  import pickle
3
4
  import logging
4
5
 
@@ -14,11 +15,10 @@ class PickleSerializer(SerializerBase):
14
15
  * functions defined in a interpreter/notebook
15
16
  * classes defined in local context and not importable using a fully qualified name
16
17
  * closures, generators and coroutines
17
- * [sometimes] issues with wrapped/decorated functions
18
18
  """
19
19
 
20
20
  _identifier = b'01'
21
- _for_code = True
21
+ _for_code = False
22
22
  _for_data = True
23
23
 
24
24
  def serialize(self, data: Any) -> bytes:
@@ -41,7 +41,7 @@ class DillSerializer(SerializerBase):
41
41
  """
42
42
 
43
43
  _identifier = b'02'
44
- _for_code = True
44
+ _for_code = False
45
45
  _for_data = True
46
46
 
47
47
  def serialize(self, data: Any) -> bytes:
@@ -49,3 +49,22 @@ class DillSerializer(SerializerBase):
49
49
 
50
50
  def deserialize(self, body: bytes) -> Any:
51
51
  return dill.loads(body)
52
+
53
+
54
+ class DillCallableSerializer(SerializerBase):
55
+ """This serializer is a variant of the DillSerializer that will
56
+ serialize and deserialize callables using an lru_cache, under the
57
+ assumption that callables are immutable and so can be cached.
58
+ """
59
+
60
+ _identifier = b'C2'
61
+ _for_code = True
62
+ _for_data = False
63
+
64
+ @functools.lru_cache
65
+ def serialize(self, data: Any) -> bytes:
66
+ return dill.dumps(data)
67
+
68
+ @functools.lru_cache
69
+ def deserialize(self, body: bytes) -> Any:
70
+ return dill.loads(body)
parsl/serialize/facade.py CHANGED
@@ -1,8 +1,8 @@
1
1
  from parsl.serialize.concretes import * # noqa: F403,F401
2
- from parsl.serialize.base import METHODS_MAP_DATA, METHODS_MAP_CODE, SerializerBase
2
+ from parsl.serialize.base import METHODS_MAP_DATA, METHODS_MAP_CODE
3
3
  import logging
4
4
 
5
- from typing import Any, Dict, List, Tuple, Union
5
+ from typing import Any, List, Union
6
6
 
7
7
  logger = logging.getLogger(__name__)
8
8
 
@@ -14,16 +14,11 @@ methods_for_data = {}
14
14
 
15
15
  for key in METHODS_MAP_CODE:
16
16
  methods_for_code[key] = METHODS_MAP_CODE[key]()
17
- methods_for_code[key].enable_caching(maxsize=128)
18
17
 
19
18
  for key in METHODS_MAP_DATA:
20
19
  methods_for_data[key] = METHODS_MAP_DATA[key]()
21
20
 
22
21
 
23
- def _list_methods() -> Tuple[Dict[bytes, SerializerBase], Dict[bytes, SerializerBase]]:
24
- return methods_for_code, methods_for_data
25
-
26
-
27
22
  def pack_apply_message(func: Any, args: Any, kwargs: Any, buffer_threshold: int = int(128 * 1e6)) -> bytes:
28
23
  """Serialize and pack function and parameters
29
24
 
@@ -65,23 +60,18 @@ def serialize(obj: Any, buffer_threshold: int = int(1e6)) -> bytes:
65
60
  """
66
61
  result: Union[bytes, Exception]
67
62
  if callable(obj):
68
- for method in methods_for_code.values():
69
- try:
70
- result = method._identifier + b'\n' + method.serialize(obj)
71
- except Exception as e:
72
- result = e
73
- continue
74
- else:
75
- break
63
+ methods = methods_for_code
76
64
  else:
77
- for method in methods_for_data.values():
78
- try:
79
- result = method._identifier + b'\n' + method.serialize(obj)
80
- except Exception as e:
81
- result = e
82
- continue
83
- else:
84
- break
65
+ methods = methods_for_data
66
+
67
+ for method in methods.values():
68
+ try:
69
+ result = method._identifier + b'\n' + method.serialize(obj)
70
+ except Exception as e:
71
+ result = e
72
+ continue
73
+ else:
74
+ break
85
75
 
86
76
  if isinstance(result, BaseException):
87
77
  raise result
parsl/tests/conftest.py CHANGED
@@ -1,12 +1,19 @@
1
1
  import importlib.util
2
+ import itertools
2
3
  import logging
3
4
  import os
4
- from glob import glob
5
- from itertools import chain
5
+ import pathlib
6
+ import time
7
+ import types
6
8
  import signal
7
9
  import sys
10
+ import tempfile
8
11
  import threading
9
12
  import traceback
13
+ import typing as t
14
+ from datetime import datetime
15
+ from glob import glob
16
+ from itertools import chain
10
17
 
11
18
  import pytest
12
19
  import _pytest.runner as runner
@@ -36,6 +43,20 @@ def pytest_sessionstart(session):
36
43
  signal.signal(signal.SIGUSR1, dumpstacks)
37
44
 
38
45
 
46
+ @pytest.fixture(scope="session")
47
+ def tmpd_cwd_session():
48
+ n = datetime.now().strftime('%Y%m%d.%H%I%S')
49
+ with tempfile.TemporaryDirectory(dir=os.getcwd(), prefix=f".pytest-{n}-") as tmpd:
50
+ yield pathlib.Path(tmpd)
51
+
52
+
53
+ @pytest.fixture
54
+ def tmpd_cwd(tmpd_cwd_session, request):
55
+ prefix = f"{request.node.name}-"
56
+ with tempfile.TemporaryDirectory(dir=tmpd_cwd_session, prefix=prefix) as tmpd:
57
+ yield pathlib.Path(tmpd)
58
+
59
+
39
60
  def pytest_addoption(parser):
40
61
  """Add parsl-specific command-line options to pytest.
41
62
  """
@@ -140,9 +161,9 @@ def load_dfk_local_module(request, pytestconfig):
140
161
  parsl.load. It should be a Callable that returns a parsl Config object.
141
162
 
142
163
  If local_setup and/or local_teardown are callables (such as functions) in
143
- the test module, they they will be invoked before/after the tests. This
144
- can be used to perform more interesting DFK initialisation not possible
145
- with local_config.
164
+ the test module, they will be invoked before/after the tests. This can
165
+ be used to perform more interesting DFK initialisation not possible with
166
+ local_config.
146
167
  """
147
168
 
148
169
  config = pytestconfig.getoption('config')[0]
@@ -212,14 +233,12 @@ def apply_masks(request, pytestconfig):
212
233
 
213
234
 
214
235
  @pytest.fixture
215
- def setup_data(tmp_path):
216
- data_dir = tmp_path / "data"
236
+ def setup_data(tmpd_cwd):
237
+ data_dir = tmpd_cwd / "data"
217
238
  data_dir.mkdir()
218
239
 
219
- with open(data_dir / "test1.txt", "w") as f:
220
- f.write("1\n")
221
- with open(data_dir / "test2.txt", "w") as f:
222
- f.write("2\n")
240
+ (data_dir / "test1.txt").write_text("1\n")
241
+ (data_dir / "test2.txt").write_text("2\n")
223
242
  return data_dir
224
243
 
225
244
 
@@ -275,3 +294,67 @@ def pytest_ignore_collect(path):
275
294
  return True
276
295
  else:
277
296
  return False
297
+
298
+
299
+ def create_traceback(start: int = 0) -> t.Optional[types.TracebackType]:
300
+ """
301
+ Dynamically create a traceback.
302
+
303
+ Builds a traceback from the top of the stack (the currently executing frame) on
304
+ down to the root frame. Optionally, use start to build from an earlier stack
305
+ frame.
306
+
307
+ N.B. uses `sys._getframe`, which I only know to exist in CPython.
308
+ """
309
+ tb = None
310
+ for depth in itertools.count(start + 1, 1):
311
+ try:
312
+ frame = sys._getframe(depth)
313
+ tb = types.TracebackType(tb, frame, frame.f_lasti, frame.f_lineno)
314
+ except ValueError:
315
+ break
316
+ return tb
317
+
318
+
319
+ @pytest.fixture
320
+ def try_assert():
321
+ def _impl(
322
+ test_func: t.Callable[[], bool],
323
+ fail_msg: str = "",
324
+ timeout_ms: float = 5000,
325
+ attempts: int = 0,
326
+ check_period_ms: int = 20,
327
+ ):
328
+ tb = create_traceback(start=1)
329
+ timeout_s = abs(timeout_ms) / 1000.0
330
+ check_period_s = abs(check_period_ms) / 1000.0
331
+ if attempts > 0:
332
+ for _attempt_no in range(attempts):
333
+ if test_func():
334
+ return
335
+ time.sleep(check_period_s)
336
+ else:
337
+ att_fail = (
338
+ f"\n (Still failing after attempt limit [{attempts}], testing"
339
+ f" every {check_period_ms}ms)"
340
+ )
341
+ exc = AssertionError(f"{str(fail_msg)}{att_fail}".strip())
342
+ raise exc.with_traceback(tb)
343
+
344
+ elif timeout_s > 0:
345
+ end = time.monotonic() + timeout_s
346
+ while time.monotonic() < end:
347
+ if test_func():
348
+ return
349
+ time.sleep(check_period_s)
350
+ att_fail = (
351
+ f"\n (Still failing after timeout [{timeout_ms}ms], with attempts "
352
+ f"every {check_period_ms}ms)"
353
+ )
354
+ exc = AssertionError(f"{str(fail_msg)}{att_fail}".strip())
355
+ raise exc.with_traceback(tb)
356
+
357
+ else:
358
+ raise AssertionError("Bad assert call: no attempts or timeout period")
359
+
360
+ yield _impl
@@ -1,56 +1,41 @@
1
- import argparse
2
1
  import os
3
- import pytest
4
- import shutil
5
- import time
6
2
  import random
7
3
  import re
8
4
 
5
+ import pytest
6
+
9
7
  import parsl
10
8
  from parsl import File
11
9
  from parsl.app.app import bash_app
12
10
 
13
- from parsl.tests.configs.local_threads import config
14
-
15
11
 
16
12
  @bash_app
17
- def echo_to_file(inputs=[], outputs=[], stderr='std.err', stdout='std.out'):
13
+ def echo_to_file(inputs=(), outputs=(), stderr=None, stdout=None):
18
14
  res = ""
19
- for i in inputs:
20
- for o in outputs:
15
+ for o in outputs:
16
+ for i in inputs:
21
17
  res += "echo {} >& {}".format(i, o)
22
18
  return res
23
19
 
24
20
 
25
21
  @bash_app
26
22
  def foo(x, y, z=10, stdout=None, label=None):
27
- return """echo {0} {1} {z}
28
- """.format(x, y, z=z)
23
+ return f"echo {x} {y} {z}"
29
24
 
30
25
 
31
26
  @pytest.mark.issue363
32
- def test_command_format_1():
33
- """Testing command format for BashApps
34
- """
35
-
36
- outdir = os.path.abspath('outputs')
37
- stdout = os.path.join(outdir, 'foo-std.out')
38
- if os.path.exists(stdout):
39
- os.remove(stdout)
27
+ def test_command_format_1(tmpd_cwd):
28
+ """Testing command format for BashApps"""
40
29
 
41
- foo_future = foo(1, 4, stdout=stdout)
42
- print("[test_command_format_1] foo_future: ", foo_future)
43
- contents = None
30
+ outdir = tmpd_cwd / "outputs"
31
+ outdir.mkdir()
32
+ stdout = outdir / "foo-std.out"
44
33
 
45
- assert foo_future.result() == 0, "BashApp exited with an error code : {0}".format(
46
- foo_future.result())
47
-
48
- with open(stdout, 'r') as stdout_f:
49
- contents = stdout_f.read()
34
+ foo_future = foo(1, 4, stdout=str(stdout))
35
+ assert foo_future.result() == 0, "BashApp had non-zero exit code"
50
36
 
51
- assert contents == '1 4 10\n', 'Output does not match expected string "1 4 10", Got: "{0}"'.format(
52
- contents)
53
- return True
37
+ so_content = stdout.read_text().strip()
38
+ assert so_content == "1 4 10"
54
39
 
55
40
 
56
41
  @pytest.mark.issue363
@@ -61,8 +46,6 @@ def test_auto_log_filename_format():
61
46
  rand_int = random.randint(1000, 1000000000)
62
47
 
63
48
  foo_future = foo(1, rand_int, stdout=parsl.AUTO_LOGNAME, label=app_label)
64
- print("[test_auto_log_filename_format] foo_future: ", foo_future)
65
- contents = None
66
49
 
67
50
  assert foo_future.result() == 0, "BashApp exited with an error code : {0}".format(
68
51
  foo_future.result())
@@ -77,39 +60,25 @@ def test_auto_log_filename_format():
77
60
 
78
61
  assert contents == '1 {0} 10\n'.format(rand_int), \
79
62
  'Output does not match expected string "1 {0} 10", Got: "{1}"'.format(rand_int, contents)
80
- return True
81
63
 
82
64
 
83
65
  @pytest.mark.issue363
84
- def test_parallel_for(n=3):
85
- """Testing a simple parallel for loop
86
- """
87
- outdir = os.path.join(os.path.abspath('outputs'), 'test_parallel')
88
- if not os.path.exists(outdir):
89
- os.makedirs(outdir)
90
- else:
91
- shutil.rmtree(outdir)
92
- os.makedirs(outdir)
93
-
94
- d = {}
95
-
96
- start = time.time()
97
- for i in range(0, n):
98
- d[i] = echo_to_file(
99
- inputs=['Hello World {0}'.format(i)],
100
- outputs=[File('{0}/out.{1}.txt'.format(outdir, i))],
101
- stdout='{0}/std.{1}.out'.format(outdir, i),
102
- stderr='{0}/std.{1}.err'.format(outdir, i),
66
+ def test_parallel_for(tmpd_cwd, n=3):
67
+ """Testing a simple parallel for loop"""
68
+ outdir = tmpd_cwd / "outputs/test_parallel"
69
+ outdir.mkdir(parents=True)
70
+ futs = [
71
+ echo_to_file(
72
+ inputs=[f"Hello World {i}"],
73
+ outputs=[File(str(outdir / f"out.{i}.txt"))],
74
+ stdout=str(outdir / f"std.{i}.out"),
75
+ stderr=str(outdir / f"std.{i}.err"),
103
76
  )
77
+ for i in range(n)
78
+ ]
79
+
80
+ for f in futs:
81
+ f.result()
104
82
 
105
- assert len(
106
- d.keys()) == n, "Only {0}/{1} keys in dict".format(len(d.keys()), n)
107
-
108
- [d[i].result() for i in d]
109
- print("Duration : {0}s".format(time.time() - start))
110
- stdout_file_count = len(
111
- [item for item in os.listdir(outdir) if item.endswith('.out')])
112
- assert stdout_file_count == n, "Only {0}/{1} files in '{2}' ".format(len(os.listdir('outputs/')),
113
- n, outdir)
114
- print("[TEST STATUS] test_parallel_for [SUCCESS]")
115
- return d
83
+ stdout_file_count = len(list(outdir.glob("*.out")))
84
+ assert stdout_file_count == n, sorted(outdir.iterdir())