parsl 2024.10.28__py3-none-any.whl → 2024.11.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. parsl/channels/base.py +6 -46
  2. parsl/channels/errors.py +0 -67
  3. parsl/channels/local/local.py +5 -56
  4. parsl/dataflow/dflow.py +6 -61
  5. parsl/executors/high_throughput/executor.py +0 -1
  6. parsl/executors/high_throughput/mpi_resource_management.py +0 -12
  7. parsl/executors/taskvine/manager.py +6 -0
  8. parsl/executors/taskvine/manager_config.py +5 -0
  9. parsl/monitoring/monitoring.py +23 -26
  10. parsl/monitoring/radios.py +4 -17
  11. parsl/monitoring/remote.py +3 -5
  12. parsl/providers/__init__.py +0 -2
  13. parsl/providers/base.py +1 -1
  14. parsl/providers/cluster_provider.py +1 -4
  15. parsl/providers/condor/condor.py +1 -4
  16. parsl/providers/grid_engine/grid_engine.py +1 -4
  17. parsl/providers/lsf/lsf.py +1 -4
  18. parsl/providers/pbspro/pbspro.py +1 -4
  19. parsl/providers/slurm/slurm.py +1 -4
  20. parsl/providers/torque/torque.py +1 -4
  21. parsl/tests/configs/user_opts.py +0 -7
  22. parsl/tests/conftest.py +4 -4
  23. parsl/tests/site_tests/site_config_selector.py +1 -6
  24. parsl/tests/test_bash_apps/test_basic.py +3 -0
  25. parsl/tests/test_bash_apps/test_error_codes.py +4 -0
  26. parsl/tests/test_bash_apps/test_kwarg_storage.py +1 -0
  27. parsl/tests/test_bash_apps/test_memoize.py +2 -6
  28. parsl/tests/test_bash_apps/test_memoize_ignore_args.py +3 -0
  29. parsl/tests/test_bash_apps/test_memoize_ignore_args_regr.py +1 -0
  30. parsl/tests/test_bash_apps/test_multiline.py +1 -0
  31. parsl/tests/test_bash_apps/test_stdout.py +2 -0
  32. parsl/tests/test_channels/test_local_channel.py +0 -19
  33. parsl/tests/test_docs/test_from_slides.py +3 -0
  34. parsl/tests/test_docs/test_kwargs.py +3 -0
  35. parsl/tests/test_monitoring/test_basic.py +13 -1
  36. parsl/tests/test_providers/test_local_provider.py +0 -135
  37. parsl/tests/test_providers/test_pbspro_template.py +2 -1
  38. parsl/tests/test_providers/test_slurm_template.py +2 -1
  39. parsl/tests/test_python_apps/test_outputs.py +1 -0
  40. parsl/tests/test_regression/test_226.py +1 -0
  41. parsl/tests/test_staging/test_docs_1.py +1 -0
  42. parsl/tests/test_staging/test_output_chain_filenames.py +3 -0
  43. parsl/tests/test_staging/test_staging_ftp.py +1 -0
  44. parsl/tests/test_staging/test_staging_https.py +3 -0
  45. parsl/tests/test_staging/test_staging_stdout.py +2 -0
  46. parsl/version.py +1 -1
  47. {parsl-2024.10.28.dist-info → parsl-2024.11.11.dist-info}/METADATA +2 -8
  48. {parsl-2024.10.28.dist-info → parsl-2024.11.11.dist-info}/RECORD +56 -74
  49. {parsl-2024.10.28.dist-info → parsl-2024.11.11.dist-info}/WHEEL +1 -1
  50. parsl/channels/oauth_ssh/__init__.py +0 -0
  51. parsl/channels/oauth_ssh/oauth_ssh.py +0 -119
  52. parsl/channels/ssh/__init__.py +0 -0
  53. parsl/channels/ssh/ssh.py +0 -295
  54. parsl/channels/ssh_il/__init__.py +0 -0
  55. parsl/channels/ssh_il/ssh_il.py +0 -85
  56. parsl/providers/ad_hoc/__init__.py +0 -0
  57. parsl/providers/ad_hoc/ad_hoc.py +0 -252
  58. parsl/providers/cobalt/__init__.py +0 -0
  59. parsl/providers/cobalt/cobalt.py +0 -236
  60. parsl/providers/cobalt/template.py +0 -17
  61. parsl/tests/configs/cooley_htex.py +0 -37
  62. parsl/tests/configs/local_adhoc.py +0 -18
  63. parsl/tests/configs/theta.py +0 -37
  64. parsl/tests/manual_tests/test_fan_in_out_htex_remote.py +0 -88
  65. parsl/tests/sites/test_local_adhoc.py +0 -62
  66. parsl/tests/test_channels/test_dfk_close.py +0 -26
  67. parsl/tests/test_providers/test_cobalt_deprecation_warning.py +0 -18
  68. {parsl-2024.10.28.data → parsl-2024.11.11.data}/scripts/exec_parsl_function.py +0 -0
  69. {parsl-2024.10.28.data → parsl-2024.11.11.data}/scripts/interchange.py +0 -0
  70. {parsl-2024.10.28.data → parsl-2024.11.11.data}/scripts/parsl_coprocess.py +0 -0
  71. {parsl-2024.10.28.data → parsl-2024.11.11.data}/scripts/process_worker_pool.py +0 -0
  72. {parsl-2024.10.28.dist-info → parsl-2024.11.11.dist-info}/LICENSE +0 -0
  73. {parsl-2024.10.28.dist-info → parsl-2024.11.11.dist-info}/entry_points.txt +0 -0
  74. {parsl-2024.10.28.dist-info → parsl-2024.11.11.dist-info}/top_level.txt +0 -0
@@ -5,15 +5,9 @@ import socket
5
5
  import uuid
6
6
  from abc import ABCMeta, abstractmethod
7
7
  from multiprocessing.queues import Queue
8
- from typing import Optional
9
8
 
10
9
  import zmq
11
10
 
12
- from parsl.serialize import serialize
13
-
14
- _db_manager_excepts: Optional[Exception]
15
-
16
-
17
11
  logger = logging.getLogger(__name__)
18
12
 
19
13
 
@@ -41,9 +35,8 @@ class FilesystemRadioSender(MonitoringRadioSender):
41
35
  the UDP radio, but should be much more reliable.
42
36
  """
43
37
 
44
- def __init__(self, *, monitoring_url: str, source_id: int, timeout: int = 10, run_dir: str):
38
+ def __init__(self, *, monitoring_url: str, timeout: int = 10, run_dir: str):
45
39
  logger.info("filesystem based monitoring channel initializing")
46
- self.source_id = source_id
47
40
  self.base_path = f"{run_dir}/monitor-fs-radio/"
48
41
  self.tmp_path = f"{self.base_path}/tmp"
49
42
  self.new_path = f"{self.base_path}/new"
@@ -64,25 +57,22 @@ class FilesystemRadioSender(MonitoringRadioSender):
64
57
  # move it into new/, so that a partially written
65
58
  # file will never be observed in new/
66
59
  with open(tmp_filename, "wb") as f:
67
- f.write(serialize(buffer))
60
+ pickle.dump(buffer, f)
68
61
  os.rename(tmp_filename, new_filename)
69
62
 
70
63
 
71
64
  class HTEXRadioSender(MonitoringRadioSender):
72
65
 
73
- def __init__(self, monitoring_url: str, source_id: int, timeout: int = 10):
66
+ def __init__(self, monitoring_url: str, timeout: int = 10):
74
67
  """
75
68
  Parameters
76
69
  ----------
77
70
 
78
71
  monitoring_url : str
79
72
  URL of the form <scheme>://<IP>:<PORT>
80
- source_id : str
81
- String identifier of the source
82
73
  timeout : int
83
74
  timeout, default=10s
84
75
  """
85
- self.source_id = source_id
86
76
  logger.info("htex-based monitoring channel initialising")
87
77
 
88
78
  def send(self, message: object) -> None:
@@ -124,21 +114,18 @@ class HTEXRadioSender(MonitoringRadioSender):
124
114
 
125
115
  class UDPRadioSender(MonitoringRadioSender):
126
116
 
127
- def __init__(self, monitoring_url: str, source_id: int, timeout: int = 10):
117
+ def __init__(self, monitoring_url: str, timeout: int = 10):
128
118
  """
129
119
  Parameters
130
120
  ----------
131
121
 
132
122
  monitoring_url : str
133
123
  URL of the form <scheme>://<IP>:<PORT>
134
- source_id : str
135
- String identifier of the source
136
124
  timeout : int
137
125
  timeout, default=10s
138
126
  """
139
127
  self.monitoring_url = monitoring_url
140
128
  self.sock_timeout = timeout
141
- self.source_id = source_id
142
129
  try:
143
130
  self.scheme, self.ip, port = (x.strip('/') for x in monitoring_url.split(':'))
144
131
  self.port = int(port)
@@ -103,14 +103,12 @@ def monitor_wrapper(*,
103
103
  def get_radio(radio_mode: str, monitoring_hub_url: str, task_id: int, run_dir: str) -> MonitoringRadioSender:
104
104
  radio: MonitoringRadioSender
105
105
  if radio_mode == "udp":
106
- radio = UDPRadioSender(monitoring_hub_url,
107
- source_id=task_id)
106
+ radio = UDPRadioSender(monitoring_hub_url)
108
107
  elif radio_mode == "htex":
109
- radio = HTEXRadioSender(monitoring_hub_url,
110
- source_id=task_id)
108
+ radio = HTEXRadioSender(monitoring_hub_url)
111
109
  elif radio_mode == "filesystem":
112
110
  radio = FilesystemRadioSender(monitoring_url=monitoring_hub_url,
113
- source_id=task_id, run_dir=run_dir)
111
+ run_dir=run_dir)
114
112
  else:
115
113
  raise RuntimeError(f"Unknown radio mode: {radio_mode}")
116
114
  return radio
@@ -1,7 +1,6 @@
1
1
  # Cloud Providers
2
2
  from parsl.providers.aws.aws import AWSProvider
3
3
  from parsl.providers.azure.azure import AzureProvider
4
- from parsl.providers.cobalt.cobalt import CobaltProvider
5
4
  from parsl.providers.condor.condor import CondorProvider
6
5
  from parsl.providers.googlecloud.googlecloud import GoogleCloudProvider
7
6
  from parsl.providers.grid_engine.grid_engine import GridEngineProvider
@@ -15,7 +14,6 @@ from parsl.providers.slurm.slurm import SlurmProvider
15
14
  from parsl.providers.torque.torque import TorqueProvider
16
15
 
17
16
  __all__ = ['LocalProvider',
18
- 'CobaltProvider',
19
17
  'CondorProvider',
20
18
  'GridEngineProvider',
21
19
  'SlurmProvider',
parsl/providers/base.py CHANGED
@@ -11,7 +11,7 @@ class ExecutionProvider(metaclass=ABCMeta):
11
11
  """Execution providers are responsible for managing execution resources
12
12
  that have a Local Resource Manager (LRM). For instance, campus clusters
13
13
  and supercomputers generally have LRMs (schedulers) such as Slurm,
14
- Torque/PBS, Condor and Cobalt. Clouds, on the other hand, have API
14
+ Torque/PBS, and Condor. Clouds, on the other hand, have API
15
15
  interfaces that allow much more fine-grained composition of an execution
16
16
  environment. An execution provider abstracts these types of resources and
17
17
  provides a single uniform interface to them.
@@ -18,10 +18,7 @@ class ClusterProvider(ExecutionProvider):
18
18
  label : str
19
19
  Label for this provider.
20
20
  channel : Channel
21
- Channel for accessing this provider. Possible channels include
22
- :class:`~parsl.channels.LocalChannel` (the default),
23
- :class:`~parsl.channels.SSHChannel`, or
24
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
21
+ Channel for accessing this provider.
25
22
  walltime : str
26
23
  Walltime requested per block in HH:MM:SS.
27
24
  launcher : Launcher
@@ -37,10 +37,7 @@ class CondorProvider(RepresentationMixin, ClusterProvider):
37
37
  Parameters
38
38
  ----------
39
39
  channel : Channel
40
- Channel for accessing this provider. Possible channels include
41
- :class:`~parsl.channels.LocalChannel` (the default),
42
- :class:`~parsl.channels.SSHChannel`, or
43
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
40
+ Channel for accessing this provider.
44
41
  nodes_per_block : int
45
42
  Nodes to provision per block.
46
43
  cores_per_slot : int
@@ -37,10 +37,7 @@ class GridEngineProvider(ClusterProvider, RepresentationMixin):
37
37
  Parameters
38
38
  ----------
39
39
  channel : Channel
40
- Channel for accessing this provider. Possible channels include
41
- :class:`~parsl.channels.LocalChannel` (the default),
42
- :class:`~parsl.channels.SSHChannel`, or
43
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
40
+ Channel for accessing this provider.
44
41
  nodes_per_block : int
45
42
  Nodes to provision per block.
46
43
  min_blocks : int
@@ -33,10 +33,7 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
33
33
  Parameters
34
34
  ----------
35
35
  channel : Channel
36
- Channel for accessing this provider. Possible channels include
37
- :class:`~parsl.channels.LocalChannel` (the default),
38
- :class:`~parsl.channels.SSHChannel`, or
39
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
36
+ Channel for accessing this provider.
40
37
  nodes_per_block : int
41
38
  Nodes to provision per block.
42
39
  When request_by_nodes is False, it is computed by cores_per_block / cores_per_node.
@@ -18,10 +18,7 @@ class PBSProProvider(TorqueProvider):
18
18
  Parameters
19
19
  ----------
20
20
  channel : Channel
21
- Channel for accessing this provider. Possible channels include
22
- :class:`~parsl.channels.LocalChannel` (the default),
23
- :class:`~parsl.channels.SSHChannel`, or
24
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
21
+ Channel for accessing this provider.
25
22
  account : str
26
23
  Account the job will be charged against.
27
24
  queue : str
@@ -71,10 +71,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
71
71
  constraint : str
72
72
  Slurm job constraint, often used to choose cpu or gpu type. If unspecified or ``None``, no constraint slurm directive will be added.
73
73
  channel : Channel
74
- Channel for accessing this provider. Possible channels include
75
- :class:`~parsl.channels.LocalChannel` (the default),
76
- :class:`~parsl.channels.SSHChannel`, or
77
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
74
+ Channel for accessing this provider.
78
75
  nodes_per_block : int
79
76
  Nodes to provision per block.
80
77
  cores_per_node : int
@@ -34,10 +34,7 @@ class TorqueProvider(ClusterProvider, RepresentationMixin):
34
34
  Parameters
35
35
  ----------
36
36
  channel : Channel
37
- Channel for accessing this provider. Possible channels include
38
- :class:`~parsl.channels.LocalChannel` (the default),
39
- :class:`~parsl.channels.SSHChannel`, or
40
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
37
+ Channel for accessing this provider.
41
38
  account : str
42
39
  Account the job will be charged against.
43
40
  queue : str
@@ -135,13 +135,6 @@ user_opts = {
135
135
  # # For example:
136
136
  # 'remote_writeable': 'globus://af7bda53-6d04-11e5-ba46-22000b92c6ec/home/bzc/'
137
137
  # },
138
- # 'adhoc': {
139
- # # This specifies configuration parameters when testing an ad-hoc SSH based cluster
140
- # 'username': 'fixme', # username on remote systems
141
- # 'remote_hostnames': ['hostname1', 'hostname2'], # addresses of remote systems
142
- # 'worker_init': 'init commands', # worker_init for remote systems
143
- # 'script_dir': "/path" # script directory on remote systems
144
- # }
145
138
  #
146
139
  } # type: Dict[str, Any]
147
140
 
parsl/tests/conftest.py CHANGED
@@ -143,10 +143,6 @@ def pytest_configure(config):
143
143
  'markers',
144
144
  'staging_required: Marks tests that require a staging provider, when there is no sharedFS'
145
145
  )
146
- config.addinivalue_line(
147
- 'markers',
148
- 'sshd_required: Marks tests that require a SSHD'
149
- )
150
146
  config.addinivalue_line(
151
147
  'markers',
152
148
  'multiple_cores_required: Marks tests that require multiple cores, such as htex affinity'
@@ -163,6 +159,10 @@ def pytest_configure(config):
163
159
  'markers',
164
160
  'executor_supports_std_stream_tuples: Marks tests that require tuple support for stdout/stderr'
165
161
  )
162
+ config.addinivalue_line(
163
+ 'markers',
164
+ 'shared_fs: Marks tests that require a shared_fs between the workers are the test client'
165
+ )
166
166
 
167
167
 
168
168
  @pytest.fixture(autouse=True, scope='session')
@@ -7,12 +7,7 @@ def fresh_config():
7
7
  hostname = os.getenv('PARSL_HOSTNAME', platform.uname().node)
8
8
  print("Loading config for {}".format(hostname))
9
9
 
10
- if 'thetalogin' in hostname:
11
- from parsl.tests.configs.theta import fresh_config
12
- config = fresh_config()
13
- print("Loading Theta config")
14
-
15
- elif 'frontera' in hostname:
10
+ if 'frontera' in hostname:
16
11
  print("Loading Frontera config")
17
12
  from parsl.tests.configs.frontera import fresh_config
18
13
  config = fresh_config()
@@ -24,6 +24,7 @@ def foo(x, y, z=10, stdout=None, label=None):
24
24
  return f"echo {x} {y} {z}"
25
25
 
26
26
 
27
+ @pytest.mark.shared_fs
27
28
  def test_command_format_1(tmpd_cwd):
28
29
  """Testing command format for BashApps"""
29
30
 
@@ -38,6 +39,7 @@ def test_command_format_1(tmpd_cwd):
38
39
  assert so_content == "1 4 10"
39
40
 
40
41
 
42
+ @pytest.mark.shared_fs
41
43
  def test_auto_log_filename_format(caplog):
42
44
  """Testing auto log filename format for BashApps
43
45
  """
@@ -66,6 +68,7 @@ def test_auto_log_filename_format(caplog):
66
68
  assert record.levelno < logging.ERROR
67
69
 
68
70
 
71
+ @pytest.mark.shared_fs
69
72
  def test_parallel_for(tmpd_cwd, n=3):
70
73
  """Testing a simple parallel for loop"""
71
74
  outdir = tmpd_cwd / "outputs/test_parallel"
@@ -58,6 +58,7 @@ test_matrix = {
58
58
  whitelist = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'configs', '*threads*')
59
59
 
60
60
 
61
+ @pytest.mark.shared_fs
61
62
  def test_div_0(test_fn=div_0):
62
63
  err_code = test_matrix[test_fn]['exit_code']
63
64
  f = test_fn()
@@ -73,6 +74,7 @@ def test_div_0(test_fn=div_0):
73
74
  os.remove('std.out')
74
75
 
75
76
 
77
+ @pytest.mark.shared_fs
76
78
  def test_bash_misuse(test_fn=bash_misuse):
77
79
  err_code = test_matrix[test_fn]['exit_code']
78
80
  f = test_fn()
@@ -87,6 +89,7 @@ def test_bash_misuse(test_fn=bash_misuse):
87
89
  os.remove('std.out')
88
90
 
89
91
 
92
+ @pytest.mark.shared_fs
90
93
  def test_command_not_found(test_fn=command_not_found):
91
94
  err_code = test_matrix[test_fn]['exit_code']
92
95
  f = test_fn()
@@ -103,6 +106,7 @@ def test_command_not_found(test_fn=command_not_found):
103
106
  return True
104
107
 
105
108
 
109
+ @pytest.mark.shared_fs
106
110
  def test_not_executable(test_fn=not_executable):
107
111
  err_code = test_matrix[test_fn]['exit_code']
108
112
  f = test_fn()
@@ -8,6 +8,7 @@ def foo(z=2, stdout=None):
8
8
  return f"echo {z}"
9
9
 
10
10
 
11
+ @pytest.mark.shared_fs
11
12
  def test_command_format_1(tmpd_cwd):
12
13
  """Testing command format for BashApps
13
14
  """
@@ -9,9 +9,7 @@ def fail_on_presence(outputs=()):
9
9
  return 'if [ -f {0} ] ; then exit 1 ; else touch {0}; fi'.format(outputs[0])
10
10
 
11
11
 
12
- # This test is an oddity that requires a shared-FS and simply
13
- # won't work if there's a staging provider.
14
- # @pytest.mark.sharedFS_required
12
+ @pytest.mark.shared_fs
15
13
  def test_bash_memoization(tmpd_cwd, n=2):
16
14
  """Testing bash memoization
17
15
  """
@@ -29,9 +27,7 @@ def fail_on_presence_kw(outputs=(), foo=None):
29
27
  return 'if [ -f {0} ] ; then exit 1 ; else touch {0}; fi'.format(outputs[0])
30
28
 
31
29
 
32
- # This test is an oddity that requires a shared-FS and simply
33
- # won't work if there's a staging provider.
34
- # @pytest.mark.sharedFS_required
30
+ @pytest.mark.shared_fs
35
31
  def test_bash_memoization_keywords(tmpd_cwd, n=2):
36
32
  """Testing bash memoization
37
33
  """
@@ -1,5 +1,7 @@
1
1
  import os
2
2
 
3
+ import pytest
4
+
3
5
  import parsl
4
6
  from parsl.app.app import bash_app
5
7
 
@@ -21,6 +23,7 @@ def no_checkpoint_stdout_app_ignore_args(stdout=None):
21
23
  return "echo X"
22
24
 
23
25
 
26
+ @pytest.mark.shared_fs
24
27
  def test_memo_stdout(tmpd_cwd):
25
28
  path_x = tmpd_cwd / "test.memo.stdout.x"
26
29
 
@@ -29,6 +29,7 @@ def no_checkpoint_stdout_app(stdout=None):
29
29
  return "echo X"
30
30
 
31
31
 
32
+ @pytest.mark.shared_fs
32
33
  def test_memo_stdout(tmpd_cwd):
33
34
  assert const_list_x == const_list_x_arg
34
35
 
@@ -14,6 +14,7 @@ def multiline(inputs=(), outputs=(), stderr=None, stdout=None):
14
14
  """.format(inputs=inputs, outputs=outputs)
15
15
 
16
16
 
17
+ @pytest.mark.shared_fs
17
18
  def test_multiline(tmpd_cwd):
18
19
  so, se = tmpd_cwd / "std.out", tmpd_cwd / "std.err"
19
20
  f = multiline(
@@ -91,6 +91,7 @@ def test_bad_stderr_file():
91
91
 
92
92
 
93
93
  @pytest.mark.executor_supports_std_stream_tuples
94
+ @pytest.mark.shared_fs
94
95
  def test_stdout_truncate(tmpd_cwd, caplog):
95
96
  """Testing truncation of prior content of stdout"""
96
97
 
@@ -110,6 +111,7 @@ def test_stdout_truncate(tmpd_cwd, caplog):
110
111
  assert record.levelno < logging.ERROR
111
112
 
112
113
 
114
+ @pytest.mark.shared_fs
113
115
  def test_stdout_append(tmpd_cwd, caplog):
114
116
  """Testing appending to prior content of stdout (default open() mode)"""
115
117
 
@@ -17,22 +17,3 @@ def test_env():
17
17
 
18
18
  x = [s for s in stdout if s.startswith("HOME=")]
19
19
  assert x, "HOME not found"
20
-
21
-
22
- @pytest.mark.local
23
- def test_env_mod():
24
- ''' Testing for env update at execute time.
25
- '''
26
-
27
- lc = LocalChannel()
28
- rc, stdout, stderr = lc.execute_wait("env", 1, {'TEST_ENV': 'fooo'})
29
-
30
- stdout = stdout.split('\n')
31
- x = [s for s in stdout if s.startswith("PATH=")]
32
- assert x, "PATH not found"
33
-
34
- x = [s for s in stdout if s.startswith("HOME=")]
35
- assert x, "HOME not found"
36
-
37
- x = [s for s in stdout if s.startswith("TEST_ENV=fooo")]
38
- assert x, "User set env missing"
@@ -1,5 +1,7 @@
1
1
  import os
2
2
 
3
+ import pytest
4
+
3
5
  from parsl.app.app import bash_app, python_app
4
6
  from parsl.data_provider.files import File
5
7
 
@@ -15,6 +17,7 @@ def cat(inputs=[]):
15
17
  return f.readlines()
16
18
 
17
19
 
20
+ @pytest.mark.staging_required
18
21
  def test_slides():
19
22
  """Testing code snippet from slides """
20
23
 
@@ -1,6 +1,8 @@
1
1
  """Functions used to explain kwargs"""
2
2
  from pathlib import Path
3
3
 
4
+ import pytest
5
+
4
6
  from parsl import File, python_app
5
7
 
6
8
 
@@ -19,6 +21,7 @@ def test_inputs():
19
21
  assert reduce_future.result() == 6
20
22
 
21
23
 
24
+ @pytest.mark.shared_fs
22
25
  def test_outputs(tmpd_cwd):
23
26
  @python_app()
24
27
  def write_app(message, outputs=()):
@@ -42,6 +42,18 @@ def htex_udp_config():
42
42
  return c
43
43
 
44
44
 
45
+ def htex_filesystem_config():
46
+ """This config will force filesystem radio"""
47
+ from parsl.tests.configs.htex_local_alternate import fresh_config
48
+ c = fresh_config()
49
+ assert len(c.executors) == 1
50
+
51
+ assert c.executors[0].radio_mode == "htex", "precondition: htex has a radio mode attribute, configured for htex radio"
52
+ c.executors[0].radio_mode = "filesystem"
53
+
54
+ return c
55
+
56
+
45
57
  def workqueue_config():
46
58
  from parsl.tests.configs.workqueue_ex import fresh_config
47
59
  c = fresh_config()
@@ -61,7 +73,7 @@ def taskvine_config():
61
73
 
62
74
 
63
75
  @pytest.mark.local
64
- @pytest.mark.parametrize("fresh_config", [htex_config, htex_udp_config, workqueue_config, taskvine_config])
76
+ @pytest.mark.parametrize("fresh_config", [htex_config, htex_filesystem_config, htex_udp_config, workqueue_config, taskvine_config])
65
77
  def test_row_counts(tmpd_cwd, fresh_config):
66
78
  # this is imported here rather than at module level because
67
79
  # it isn't available in a plain parsl install, so this module
@@ -12,7 +12,6 @@ import time
12
12
  import pytest
13
13
 
14
14
  from parsl.channels import LocalChannel
15
- from parsl.channels.ssh.ssh import DeprecatedSSHChannel
16
15
  from parsl.jobs.states import JobState
17
16
  from parsl.launchers import SingleNodeLauncher
18
17
  from parsl.providers import LocalProvider
@@ -69,140 +68,6 @@ def test_local_channel():
69
68
  _run_tests(p)
70
69
 
71
70
 
72
- SSHD_CONFIG = """
73
- Port {port}
74
- ListenAddress 127.0.0.1
75
- HostKey {hostkey}
76
- AuthorizedKeysFile {connpubkey}
77
- AuthenticationMethods publickey
78
- StrictModes no
79
- Subsystem sftp {sftp_path}
80
- """
81
-
82
-
83
- # It would probably be better, when more formalized site testing comes into existence, to
84
- # use a site-testing provided server/configuration instead of the current scheme
85
- @pytest.mark.local
86
- @pytest.mark.sshd_required
87
- def test_ssh_channel():
88
- with tempfile.TemporaryDirectory() as config_dir:
89
- sshd_thread, priv_key, server_port = _start_sshd(config_dir)
90
- try:
91
- with tempfile.TemporaryDirectory() as remote_script_dir:
92
- # The SSH library fails to add the new host key to the file if the file does not
93
- # already exist, so create it here.
94
- pathlib.Path('{}/known.hosts'.format(config_dir)).touch(mode=0o600)
95
- script_dir = tempfile.mkdtemp()
96
- channel = DeprecatedSSHChannel('127.0.0.1', port=server_port,
97
- script_dir=remote_script_dir,
98
- host_keys_filename='{}/known.hosts'.format(config_dir),
99
- key_filename=priv_key)
100
- try:
101
- p = LocalProvider(channel=channel,
102
- launcher=SingleNodeLauncher(debug=False))
103
- p.script_dir = script_dir
104
- _run_tests(p)
105
- finally:
106
- channel.close()
107
- finally:
108
- _stop_sshd(sshd_thread)
109
-
110
-
111
- def _stop_sshd(sshd_thread):
112
- sshd_thread.stop()
113
- sshd_thread.join()
114
-
115
-
116
- class SSHDThread(threading.Thread):
117
- def __init__(self, config_file):
118
- threading.Thread.__init__(self, daemon=True)
119
- self.config_file = config_file
120
- self.stop_flag = False
121
- self.error = None
122
-
123
- def run(self):
124
- try:
125
- # sshd needs to be run with an absolute path, hence the call to which()
126
- sshpath = shutil.which('sshd')
127
- assert sshpath is not None, "can find sshd executable"
128
- p = subprocess.Popen([sshpath, '-D', '-f', self.config_file],
129
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
130
- while True:
131
- ec = p.poll()
132
- if self.stop_flag:
133
- p.terminate()
134
- break
135
- elif ec is None:
136
- time.sleep(0.1)
137
- elif ec == 0:
138
- self.error = Exception('sshd exited prematurely: {}{}'.format(p.stdout.read(),
139
- p.stderr.read()))
140
- break
141
- else:
142
- self.error = Exception('sshd failed: {}{}'.format(p.stdout.read(),
143
- p.stderr.read()))
144
- break
145
- except Exception as ex:
146
- logger.exception("SSHDThread exception from run loop")
147
- self.error = ex
148
-
149
- def stop(self):
150
- self.stop_flag = True
151
-
152
-
153
- def _start_sshd(config_dir: str):
154
- server_config, priv_key, port = _init_sshd(config_dir)
155
- sshd_thread = SSHDThread(server_config)
156
- sshd_thread.start()
157
- time.sleep(1.0)
158
- if not sshd_thread.is_alive():
159
- raise Exception('Failed to start sshd: {}'.format(sshd_thread.error))
160
- return sshd_thread, priv_key, port
161
-
162
-
163
- def _init_sshd(config_dir):
164
- hostkey = '{}/hostkey'.format(config_dir)
165
- connkey = '{}/connkey'.format(config_dir)
166
- os.system('ssh-keygen -b 2048 -t rsa -q -N "" -f {}'.format(hostkey))
167
- os.system('ssh-keygen -b 2048 -t rsa -q -N "" -f {}'.format(connkey))
168
- port = _find_free_port(22222)
169
- server_config_str = SSHD_CONFIG.format(port=port, hostkey=hostkey,
170
- connpubkey='{}.pub'.format(connkey),
171
- sftp_path=_get_system_sftp_path())
172
- server_config = '{}/sshd_config'.format(config_dir)
173
- with open(server_config, 'w') as f:
174
- f.write(server_config_str)
175
- return server_config, connkey, port
176
-
177
-
178
- def _get_system_sftp_path():
179
- try:
180
- with open('/etc/ssh/sshd_config') as f:
181
- line = f.readline()
182
- while line:
183
- tokens = line.split()
184
- if tokens[0] == 'Subsystem' and tokens[1] == 'sftp':
185
- return tokens[2]
186
- line = f.readline()
187
- except Exception:
188
- pass
189
- return '/usr/lib/openssh/sftp-server'
190
-
191
-
192
- def _find_free_port(start: int):
193
- port = start
194
- while port < 65535:
195
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
196
- try:
197
- s.bind(('127.0.0.1', port))
198
- s.close()
199
- return port
200
- except Exception:
201
- pass
202
- port += random.randint(1, 20)
203
- raise Exception('Could not find free port')
204
-
205
-
206
71
  def _run(p: LocalProvider, command: str, np: int = 1):
207
72
  id = p.submit(command, np)
208
73
  return _wait(p, id)
@@ -12,9 +12,10 @@ def test_submit_script_basic(tmp_path):
12
12
  """Test slurm resources table"""
13
13
 
14
14
  provider = PBSProProvider(
15
- queue="debug", channel=LocalChannel(script_dir=tmp_path)
15
+ queue="debug", channel=LocalChannel()
16
16
  )
17
17
  provider.script_dir = tmp_path
18
+ provider.channel.script_dir = tmp_path
18
19
  job_id = str(random.randint(55000, 59000))
19
20
  provider.execute_wait = mock.Mock(spec=PBSProProvider.execute_wait)
20
21
  provider.execute_wait.return_value = (0, job_id, "")
@@ -13,9 +13,10 @@ def test_submit_script_basic(tmp_path):
13
13
  """Test slurm resources table"""
14
14
 
15
15
  provider = SlurmProvider(
16
- partition="debug", channel=LocalChannel(script_dir=tmp_path)
16
+ partition="debug", channel=LocalChannel()
17
17
  )
18
18
  provider.script_dir = tmp_path
19
+ provider.channel.script_dir = tmp_path
19
20
  job_id = str(random.randint(55000, 59000))
20
21
  provider.execute_wait = mock.MagicMock(spec=SlurmProvider.execute_wait)
21
22
  provider.execute_wait.return_value = (0, f"Submitted batch job {job_id}", "")