parsl 2024.11.4__py3-none-any.whl → 2024.11.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. parsl/channels/base.py +6 -46
  2. parsl/channels/errors.py +0 -67
  3. parsl/channels/local/local.py +5 -56
  4. parsl/dataflow/dflow.py +1 -58
  5. parsl/executors/taskvine/manager.py +6 -0
  6. parsl/executors/taskvine/manager_config.py +5 -0
  7. parsl/monitoring/db_manager.py +6 -6
  8. parsl/monitoring/monitoring.py +27 -33
  9. parsl/monitoring/radios.py +1 -3
  10. parsl/monitoring/router.py +11 -11
  11. parsl/providers/cluster_provider.py +1 -4
  12. parsl/providers/condor/condor.py +1 -4
  13. parsl/providers/grid_engine/grid_engine.py +1 -4
  14. parsl/providers/lsf/lsf.py +1 -4
  15. parsl/providers/pbspro/pbspro.py +1 -4
  16. parsl/providers/slurm/slurm.py +26 -7
  17. parsl/providers/torque/torque.py +1 -4
  18. parsl/tests/configs/user_opts.py +0 -7
  19. parsl/tests/conftest.py +0 -4
  20. parsl/tests/test_channels/test_local_channel.py +0 -19
  21. parsl/tests/test_providers/test_local_provider.py +0 -135
  22. parsl/tests/test_providers/test_pbspro_template.py +2 -1
  23. parsl/tests/test_providers/test_slurm_template.py +2 -1
  24. parsl/version.py +1 -1
  25. {parsl-2024.11.4.dist-info → parsl-2024.11.18.dist-info}/METADATA +2 -8
  26. {parsl-2024.11.4.dist-info → parsl-2024.11.18.dist-info}/RECORD +34 -45
  27. {parsl-2024.11.4.dist-info → parsl-2024.11.18.dist-info}/WHEEL +1 -1
  28. parsl/channels/oauth_ssh/__init__.py +0 -0
  29. parsl/channels/oauth_ssh/oauth_ssh.py +0 -119
  30. parsl/channels/ssh/__init__.py +0 -0
  31. parsl/channels/ssh/ssh.py +0 -295
  32. parsl/channels/ssh_il/__init__.py +0 -0
  33. parsl/channels/ssh_il/ssh_il.py +0 -85
  34. parsl/providers/ad_hoc/__init__.py +0 -0
  35. parsl/providers/ad_hoc/ad_hoc.py +0 -252
  36. parsl/tests/configs/local_adhoc.py +0 -18
  37. parsl/tests/sites/test_local_adhoc.py +0 -62
  38. parsl/tests/test_channels/test_dfk_close.py +0 -26
  39. {parsl-2024.11.4.data → parsl-2024.11.18.data}/scripts/exec_parsl_function.py +0 -0
  40. {parsl-2024.11.4.data → parsl-2024.11.18.data}/scripts/interchange.py +0 -0
  41. {parsl-2024.11.4.data → parsl-2024.11.18.data}/scripts/parsl_coprocess.py +0 -0
  42. {parsl-2024.11.4.data → parsl-2024.11.18.data}/scripts/process_worker_pool.py +0 -0
  43. {parsl-2024.11.4.dist-info → parsl-2024.11.18.dist-info}/LICENSE +0 -0
  44. {parsl-2024.11.4.dist-info → parsl-2024.11.18.dist-info}/entry_points.txt +0 -0
  45. {parsl-2024.11.4.dist-info → parsl-2024.11.18.dist-info}/top_level.txt +0 -0
@@ -14,6 +14,7 @@ import typeguard
14
14
  import zmq
15
15
 
16
16
  from parsl.log_utils import set_file_logger
17
+ from parsl.monitoring.radios import MultiprocessingQueueRadioSender
17
18
  from parsl.monitoring.types import TaggedMonitoringMessage
18
19
  from parsl.process_loggers import wrap_with_logs
19
20
  from parsl.utils import setproctitle
@@ -30,7 +31,7 @@ class MonitoringRouter:
30
31
  zmq_port_range: Tuple[int, int] = (55050, 56000),
31
32
 
32
33
  monitoring_hub_address: str = "127.0.0.1",
33
- logdir: str = ".",
34
+ run_dir: str = ".",
34
35
  logging_level: int = logging.INFO,
35
36
  atexit_timeout: int = 3, # in seconds
36
37
  resource_msgs: mpq.Queue,
@@ -47,7 +48,7 @@ class MonitoringRouter:
47
48
  zmq_port_range : tuple(int, int)
48
49
  The MonitoringHub picks ports at random from the range which will be used by Hub.
49
50
  Default: (55050, 56000)
50
- logdir : str
51
+ run_dir : str
51
52
  Parsl log directory paths. Logs and temp files go here. Default: '.'
52
53
  logging_level : int
53
54
  Logging level as defined in the logging module. Default: logging.INFO
@@ -55,12 +56,11 @@ class MonitoringRouter:
55
56
  The amount of time in seconds to terminate the hub without receiving any messages, after the last dfk workflow message is received.
56
57
  resource_msgs : multiprocessing.Queue
57
58
  A multiprocessing queue to receive messages to be routed onwards to the database process
58
-
59
59
  exit_event : Event
60
60
  An event that the main Parsl process will set to signal that the monitoring router should shut down.
61
61
  """
62
- os.makedirs(logdir, exist_ok=True)
63
- self.logger = set_file_logger("{}/monitoring_router.log".format(logdir),
62
+ os.makedirs(run_dir, exist_ok=True)
63
+ self.logger = set_file_logger(f"{run_dir}/monitoring_router.log",
64
64
  name="monitoring_router",
65
65
  level=logging_level)
66
66
  self.logger.debug("Monitoring router starting")
@@ -98,7 +98,7 @@ class MonitoringRouter:
98
98
  min_port=zmq_port_range[0],
99
99
  max_port=zmq_port_range[1])
100
100
 
101
- self.resource_msgs = resource_msgs
101
+ self.target_radio = MultiprocessingQueueRadioSender(resource_msgs)
102
102
  self.exit_event = exit_event
103
103
 
104
104
  @wrap_with_logs(target="monitoring_router")
@@ -125,7 +125,7 @@ class MonitoringRouter:
125
125
  data, addr = self.udp_sock.recvfrom(2048)
126
126
  resource_msg = pickle.loads(data)
127
127
  self.logger.debug("Got UDP Message from {}: {}".format(addr, resource_msg))
128
- self.resource_msgs.put(resource_msg)
128
+ self.target_radio.send(resource_msg)
129
129
  except socket.timeout:
130
130
  pass
131
131
 
@@ -136,7 +136,7 @@ class MonitoringRouter:
136
136
  data, addr = self.udp_sock.recvfrom(2048)
137
137
  msg = pickle.loads(data)
138
138
  self.logger.debug("Got UDP Message from {}: {}".format(addr, msg))
139
- self.resource_msgs.put(msg)
139
+ self.target_radio.send(msg)
140
140
  last_msg_received_time = time.time()
141
141
  except socket.timeout:
142
142
  pass
@@ -160,7 +160,7 @@ class MonitoringRouter:
160
160
  assert len(msg) >= 1, "ZMQ Receiver expects tuples of length at least 1, got {}".format(msg)
161
161
  assert len(msg) == 2, "ZMQ Receiver expects message tuples of exactly length 2, got {}".format(msg)
162
162
 
163
- self.resource_msgs.put(msg)
163
+ self.target_radio.send(msg)
164
164
  except zmq.Again:
165
165
  pass
166
166
  except Exception:
@@ -187,14 +187,14 @@ def router_starter(*,
187
187
  udp_port: Optional[int],
188
188
  zmq_port_range: Tuple[int, int],
189
189
 
190
- logdir: str,
190
+ run_dir: str,
191
191
  logging_level: int) -> None:
192
192
  setproctitle("parsl: monitoring router")
193
193
  try:
194
194
  router = MonitoringRouter(hub_address=hub_address,
195
195
  udp_port=udp_port,
196
196
  zmq_port_range=zmq_port_range,
197
- logdir=logdir,
197
+ run_dir=run_dir,
198
198
  logging_level=logging_level,
199
199
  resource_msgs=resource_msgs,
200
200
  exit_event=exit_event)
@@ -18,10 +18,7 @@ class ClusterProvider(ExecutionProvider):
18
18
  label : str
19
19
  Label for this provider.
20
20
  channel : Channel
21
- Channel for accessing this provider. Possible channels include
22
- :class:`~parsl.channels.LocalChannel` (the default),
23
- :class:`~parsl.channels.SSHChannel`, or
24
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
21
+ Channel for accessing this provider.
25
22
  walltime : str
26
23
  Walltime requested per block in HH:MM:SS.
27
24
  launcher : Launcher
@@ -37,10 +37,7 @@ class CondorProvider(RepresentationMixin, ClusterProvider):
37
37
  Parameters
38
38
  ----------
39
39
  channel : Channel
40
- Channel for accessing this provider. Possible channels include
41
- :class:`~parsl.channels.LocalChannel` (the default),
42
- :class:`~parsl.channels.SSHChannel`, or
43
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
40
+ Channel for accessing this provider.
44
41
  nodes_per_block : int
45
42
  Nodes to provision per block.
46
43
  cores_per_slot : int
@@ -37,10 +37,7 @@ class GridEngineProvider(ClusterProvider, RepresentationMixin):
37
37
  Parameters
38
38
  ----------
39
39
  channel : Channel
40
- Channel for accessing this provider. Possible channels include
41
- :class:`~parsl.channels.LocalChannel` (the default),
42
- :class:`~parsl.channels.SSHChannel`, or
43
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
40
+ Channel for accessing this provider.
44
41
  nodes_per_block : int
45
42
  Nodes to provision per block.
46
43
  min_blocks : int
@@ -33,10 +33,7 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
33
33
  Parameters
34
34
  ----------
35
35
  channel : Channel
36
- Channel for accessing this provider. Possible channels include
37
- :class:`~parsl.channels.LocalChannel` (the default),
38
- :class:`~parsl.channels.SSHChannel`, or
39
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
36
+ Channel for accessing this provider.
40
37
  nodes_per_block : int
41
38
  Nodes to provision per block.
42
39
  When request_by_nodes is False, it is computed by cores_per_block / cores_per_node.
@@ -18,10 +18,7 @@ class PBSProProvider(TorqueProvider):
18
18
  Parameters
19
19
  ----------
20
20
  channel : Channel
21
- Channel for accessing this provider. Possible channels include
22
- :class:`~parsl.channels.LocalChannel` (the default),
23
- :class:`~parsl.channels.SSHChannel`, or
24
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
21
+ Channel for accessing this provider.
25
22
  account : str
26
23
  Account the job will be charged against.
27
24
  queue : str
@@ -70,11 +70,11 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
70
70
  Slurm queue to place job in. If unspecified or ``None``, no queue slurm directive will be specified.
71
71
  constraint : str
72
72
  Slurm job constraint, often used to choose cpu or gpu type. If unspecified or ``None``, no constraint slurm directive will be added.
73
+ clusters : str
74
+ Slurm cluster name, or comma seperated cluster list, used to choose between different clusters in a federated Slurm instance.
75
+ If unspecified or ``None``, no slurm directive for clusters will be added.
73
76
  channel : Channel
74
- Channel for accessing this provider. Possible channels include
75
- :class:`~parsl.channels.LocalChannel` (the default),
76
- :class:`~parsl.channels.SSHChannel`, or
77
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
77
+ Channel for accessing this provider.
78
78
  nodes_per_block : int
79
79
  Nodes to provision per block.
80
80
  cores_per_node : int
@@ -119,6 +119,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
119
119
  account: Optional[str] = None,
120
120
  qos: Optional[str] = None,
121
121
  constraint: Optional[str] = None,
122
+ clusters: Optional[str] = None,
122
123
  channel: Channel = LocalChannel(),
123
124
  nodes_per_block: int = 1,
124
125
  cores_per_node: Optional[int] = None,
@@ -155,6 +156,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
155
156
  self.account = account
156
157
  self.qos = qos
157
158
  self.constraint = constraint
159
+ self.clusters = clusters
158
160
  self.scheduler_options = scheduler_options + '\n'
159
161
  if exclusive:
160
162
  self.scheduler_options += "#SBATCH --exclusive\n"
@@ -166,6 +168,8 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
166
168
  self.scheduler_options += "#SBATCH --qos={}\n".format(qos)
167
169
  if constraint:
168
170
  self.scheduler_options += "#SBATCH --constraint={}\n".format(constraint)
171
+ if clusters:
172
+ self.scheduler_options += "#SBATCH --clusters={}\n".format(clusters)
169
173
 
170
174
  self.regex_job_id = regex_job_id
171
175
  self.worker_init = worker_init + '\n'
@@ -177,14 +181,22 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
177
181
  logger.debug(f"sacct returned retcode={retcode} stderr={stderr}")
178
182
  if retcode == 0:
179
183
  logger.debug("using sacct to get job status")
184
+ _cmd = "sacct"
185
+ # Add clusters option to sacct if provided
186
+ if self.clusters:
187
+ _cmd += f" --clusters={self.clusters}"
180
188
  # Using state%20 to get enough characters to not truncate output
181
189
  # of the state. Without output can look like "<job_id> CANCELLED+"
182
- self._cmd = "sacct -X --noheader --format=jobid,state%20 --job '{0}'"
190
+ self._cmd = _cmd + " -X --noheader --format=jobid,state%20 --job '{0}'"
183
191
  self._translate_table = sacct_translate_table
184
192
  else:
185
193
  logger.debug(f"sacct failed with retcode={retcode}")
186
194
  logger.debug("falling back to using squeue to get job status")
187
- self._cmd = "squeue --noheader --format='%i %t' --job '{0}'"
195
+ _cmd = "squeue"
196
+ # Add clusters option to squeue if provided
197
+ if self.clusters:
198
+ _cmd += f" --clusters={self.clusters}"
199
+ self._cmd = _cmd + " --noheader --format='%i %t' --job '{0}'"
188
200
  self._translate_table = squeue_translate_table
189
201
 
190
202
  def _status(self):
@@ -347,7 +359,14 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
347
359
  '''
348
360
 
349
361
  job_id_list = ' '.join(job_ids)
350
- retcode, stdout, stderr = self.execute_wait("scancel {0}".format(job_id_list))
362
+
363
+ # Make the command to cancel jobs
364
+ _cmd = "scancel"
365
+ if self.clusters:
366
+ _cmd += f" --clusters={self.clusters}"
367
+ _cmd += " {0}"
368
+
369
+ retcode, stdout, stderr = self.execute_wait(_cmd.format(job_id_list))
351
370
  rets = None
352
371
  if retcode == 0:
353
372
  for jid in job_ids:
@@ -34,10 +34,7 @@ class TorqueProvider(ClusterProvider, RepresentationMixin):
34
34
  Parameters
35
35
  ----------
36
36
  channel : Channel
37
- Channel for accessing this provider. Possible channels include
38
- :class:`~parsl.channels.LocalChannel` (the default),
39
- :class:`~parsl.channels.SSHChannel`, or
40
- :class:`~parsl.channels.SSHInteractiveLoginChannel`.
37
+ Channel for accessing this provider.
41
38
  account : str
42
39
  Account the job will be charged against.
43
40
  queue : str
@@ -135,13 +135,6 @@ user_opts = {
135
135
  # # For example:
136
136
  # 'remote_writeable': 'globus://af7bda53-6d04-11e5-ba46-22000b92c6ec/home/bzc/'
137
137
  # },
138
- # 'adhoc': {
139
- # # This specifies configuration parameters when testing an ad-hoc SSH based cluster
140
- # 'username': 'fixme', # username on remote systems
141
- # 'remote_hostnames': ['hostname1', 'hostname2'], # addresses of remote systems
142
- # 'worker_init': 'init commands', # worker_init for remote systems
143
- # 'script_dir': "/path" # script directory on remote systems
144
- # }
145
138
  #
146
139
  } # type: Dict[str, Any]
147
140
 
parsl/tests/conftest.py CHANGED
@@ -143,10 +143,6 @@ def pytest_configure(config):
143
143
  'markers',
144
144
  'staging_required: Marks tests that require a staging provider, when there is no sharedFS'
145
145
  )
146
- config.addinivalue_line(
147
- 'markers',
148
- 'sshd_required: Marks tests that require a SSHD'
149
- )
150
146
  config.addinivalue_line(
151
147
  'markers',
152
148
  'multiple_cores_required: Marks tests that require multiple cores, such as htex affinity'
@@ -17,22 +17,3 @@ def test_env():
17
17
 
18
18
  x = [s for s in stdout if s.startswith("HOME=")]
19
19
  assert x, "HOME not found"
20
-
21
-
22
- @pytest.mark.local
23
- def test_env_mod():
24
- ''' Testing for env update at execute time.
25
- '''
26
-
27
- lc = LocalChannel()
28
- rc, stdout, stderr = lc.execute_wait("env", 1, {'TEST_ENV': 'fooo'})
29
-
30
- stdout = stdout.split('\n')
31
- x = [s for s in stdout if s.startswith("PATH=")]
32
- assert x, "PATH not found"
33
-
34
- x = [s for s in stdout if s.startswith("HOME=")]
35
- assert x, "HOME not found"
36
-
37
- x = [s for s in stdout if s.startswith("TEST_ENV=fooo")]
38
- assert x, "User set env missing"
@@ -12,7 +12,6 @@ import time
12
12
  import pytest
13
13
 
14
14
  from parsl.channels import LocalChannel
15
- from parsl.channels.ssh.ssh import DeprecatedSSHChannel
16
15
  from parsl.jobs.states import JobState
17
16
  from parsl.launchers import SingleNodeLauncher
18
17
  from parsl.providers import LocalProvider
@@ -69,140 +68,6 @@ def test_local_channel():
69
68
  _run_tests(p)
70
69
 
71
70
 
72
- SSHD_CONFIG = """
73
- Port {port}
74
- ListenAddress 127.0.0.1
75
- HostKey {hostkey}
76
- AuthorizedKeysFile {connpubkey}
77
- AuthenticationMethods publickey
78
- StrictModes no
79
- Subsystem sftp {sftp_path}
80
- """
81
-
82
-
83
- # It would probably be better, when more formalized site testing comes into existence, to
84
- # use a site-testing provided server/configuration instead of the current scheme
85
- @pytest.mark.local
86
- @pytest.mark.sshd_required
87
- def test_ssh_channel():
88
- with tempfile.TemporaryDirectory() as config_dir:
89
- sshd_thread, priv_key, server_port = _start_sshd(config_dir)
90
- try:
91
- with tempfile.TemporaryDirectory() as remote_script_dir:
92
- # The SSH library fails to add the new host key to the file if the file does not
93
- # already exist, so create it here.
94
- pathlib.Path('{}/known.hosts'.format(config_dir)).touch(mode=0o600)
95
- script_dir = tempfile.mkdtemp()
96
- channel = DeprecatedSSHChannel('127.0.0.1', port=server_port,
97
- script_dir=remote_script_dir,
98
- host_keys_filename='{}/known.hosts'.format(config_dir),
99
- key_filename=priv_key)
100
- try:
101
- p = LocalProvider(channel=channel,
102
- launcher=SingleNodeLauncher(debug=False))
103
- p.script_dir = script_dir
104
- _run_tests(p)
105
- finally:
106
- channel.close()
107
- finally:
108
- _stop_sshd(sshd_thread)
109
-
110
-
111
- def _stop_sshd(sshd_thread):
112
- sshd_thread.stop()
113
- sshd_thread.join()
114
-
115
-
116
- class SSHDThread(threading.Thread):
117
- def __init__(self, config_file):
118
- threading.Thread.__init__(self, daemon=True)
119
- self.config_file = config_file
120
- self.stop_flag = False
121
- self.error = None
122
-
123
- def run(self):
124
- try:
125
- # sshd needs to be run with an absolute path, hence the call to which()
126
- sshpath = shutil.which('sshd')
127
- assert sshpath is not None, "can find sshd executable"
128
- p = subprocess.Popen([sshpath, '-D', '-f', self.config_file],
129
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
130
- while True:
131
- ec = p.poll()
132
- if self.stop_flag:
133
- p.terminate()
134
- break
135
- elif ec is None:
136
- time.sleep(0.1)
137
- elif ec == 0:
138
- self.error = Exception('sshd exited prematurely: {}{}'.format(p.stdout.read(),
139
- p.stderr.read()))
140
- break
141
- else:
142
- self.error = Exception('sshd failed: {}{}'.format(p.stdout.read(),
143
- p.stderr.read()))
144
- break
145
- except Exception as ex:
146
- logger.exception("SSHDThread exception from run loop")
147
- self.error = ex
148
-
149
- def stop(self):
150
- self.stop_flag = True
151
-
152
-
153
- def _start_sshd(config_dir: str):
154
- server_config, priv_key, port = _init_sshd(config_dir)
155
- sshd_thread = SSHDThread(server_config)
156
- sshd_thread.start()
157
- time.sleep(1.0)
158
- if not sshd_thread.is_alive():
159
- raise Exception('Failed to start sshd: {}'.format(sshd_thread.error))
160
- return sshd_thread, priv_key, port
161
-
162
-
163
- def _init_sshd(config_dir):
164
- hostkey = '{}/hostkey'.format(config_dir)
165
- connkey = '{}/connkey'.format(config_dir)
166
- os.system('ssh-keygen -b 2048 -t rsa -q -N "" -f {}'.format(hostkey))
167
- os.system('ssh-keygen -b 2048 -t rsa -q -N "" -f {}'.format(connkey))
168
- port = _find_free_port(22222)
169
- server_config_str = SSHD_CONFIG.format(port=port, hostkey=hostkey,
170
- connpubkey='{}.pub'.format(connkey),
171
- sftp_path=_get_system_sftp_path())
172
- server_config = '{}/sshd_config'.format(config_dir)
173
- with open(server_config, 'w') as f:
174
- f.write(server_config_str)
175
- return server_config, connkey, port
176
-
177
-
178
- def _get_system_sftp_path():
179
- try:
180
- with open('/etc/ssh/sshd_config') as f:
181
- line = f.readline()
182
- while line:
183
- tokens = line.split()
184
- if tokens[0] == 'Subsystem' and tokens[1] == 'sftp':
185
- return tokens[2]
186
- line = f.readline()
187
- except Exception:
188
- pass
189
- return '/usr/lib/openssh/sftp-server'
190
-
191
-
192
- def _find_free_port(start: int):
193
- port = start
194
- while port < 65535:
195
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
196
- try:
197
- s.bind(('127.0.0.1', port))
198
- s.close()
199
- return port
200
- except Exception:
201
- pass
202
- port += random.randint(1, 20)
203
- raise Exception('Could not find free port')
204
-
205
-
206
71
  def _run(p: LocalProvider, command: str, np: int = 1):
207
72
  id = p.submit(command, np)
208
73
  return _wait(p, id)
@@ -12,9 +12,10 @@ def test_submit_script_basic(tmp_path):
12
12
  """Test slurm resources table"""
13
13
 
14
14
  provider = PBSProProvider(
15
- queue="debug", channel=LocalChannel(script_dir=tmp_path)
15
+ queue="debug", channel=LocalChannel()
16
16
  )
17
17
  provider.script_dir = tmp_path
18
+ provider.channel.script_dir = tmp_path
18
19
  job_id = str(random.randint(55000, 59000))
19
20
  provider.execute_wait = mock.Mock(spec=PBSProProvider.execute_wait)
20
21
  provider.execute_wait.return_value = (0, job_id, "")
@@ -13,9 +13,10 @@ def test_submit_script_basic(tmp_path):
13
13
  """Test slurm resources table"""
14
14
 
15
15
  provider = SlurmProvider(
16
- partition="debug", channel=LocalChannel(script_dir=tmp_path)
16
+ partition="debug", channel=LocalChannel()
17
17
  )
18
18
  provider.script_dir = tmp_path
19
+ provider.channel.script_dir = tmp_path
19
20
  job_id = str(random.randint(55000, 59000))
20
21
  provider.execute_wait = mock.MagicMock(spec=SlurmProvider.execute_wait)
21
22
  provider.execute_wait.return_value = (0, f"Submitted batch job {job_id}", "")
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2024.11.04'
6
+ VERSION = '2024.11.18'
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2024.11.4
3
+ Version: 2024.11.18
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2024.11.04.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2024.11.18.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -38,7 +38,6 @@ Requires-Dist: plotly; extra == "all"
38
38
  Requires-Dist: python-daemon; extra == "all"
39
39
  Requires-Dist: boto3; extra == "all"
40
40
  Requires-Dist: kubernetes; extra == "all"
41
- Requires-Dist: oauth-ssh>=0.9; extra == "all"
42
41
  Requires-Dist: ipython<=8.6.0; extra == "all"
43
42
  Requires-Dist: nbsphinx; extra == "all"
44
43
  Requires-Dist: sphinx<7.2,>=7.1; extra == "all"
@@ -55,7 +54,6 @@ Requires-Dist: jsonschema; extra == "all"
55
54
  Requires-Dist: proxystore; extra == "all"
56
55
  Requires-Dist: radical.pilot==1.60; extra == "all"
57
56
  Requires-Dist: radical.utils==1.60; extra == "all"
58
- Requires-Dist: paramiko; extra == "all"
59
57
  Provides-Extra: aws
60
58
  Requires-Dist: boto3; extra == "aws"
61
59
  Provides-Extra: azure
@@ -79,15 +77,11 @@ Provides-Extra: kubernetes
79
77
  Requires-Dist: kubernetes; extra == "kubernetes"
80
78
  Provides-Extra: monitoring
81
79
  Requires-Dist: sqlalchemy<2,>=1.4; extra == "monitoring"
82
- Provides-Extra: oauth_ssh
83
- Requires-Dist: oauth-ssh>=0.9; extra == "oauth-ssh"
84
80
  Provides-Extra: proxystore
85
81
  Requires-Dist: proxystore; extra == "proxystore"
86
82
  Provides-Extra: radical-pilot
87
83
  Requires-Dist: radical.pilot==1.60; extra == "radical-pilot"
88
84
  Requires-Dist: radical.utils==1.60; extra == "radical-pilot"
89
- Provides-Extra: ssh
90
- Requires-Dist: paramiko; extra == "ssh"
91
85
  Provides-Extra: visualization
92
86
  Requires-Dist: pydot; extra == "visualization"
93
87
  Requires-Dist: networkx<2.6,>=2.5; extra == "visualization"