parsl 2024.11.25__py3-none-any.whl → 2024.12.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. parsl/configs/ASPIRE1.py +0 -1
  2. parsl/configs/cc_in2p3.py +0 -2
  3. parsl/configs/frontera.py +0 -2
  4. parsl/configs/htex_local.py +0 -2
  5. parsl/dataflow/dflow.py +0 -2
  6. parsl/executors/base.py +1 -1
  7. parsl/executors/high_throughput/executor.py +15 -2
  8. parsl/executors/high_throughput/interchange.py +2 -1
  9. parsl/executors/high_throughput/zmq_pipes.py +13 -4
  10. parsl/monitoring/monitoring.py +1 -1
  11. parsl/monitoring/radios/base.py +13 -0
  12. parsl/monitoring/radios/filesystem.py +52 -0
  13. parsl/monitoring/radios/htex.py +57 -0
  14. parsl/monitoring/radios/multiprocessing.py +17 -0
  15. parsl/monitoring/radios/udp.py +56 -0
  16. parsl/monitoring/radios/zmq.py +17 -0
  17. parsl/monitoring/remote.py +4 -6
  18. parsl/monitoring/router.py +1 -1
  19. parsl/providers/cluster_provider.py +2 -5
  20. parsl/providers/condor/condor.py +4 -13
  21. parsl/providers/grid_engine/grid_engine.py +3 -9
  22. parsl/providers/local/local.py +6 -23
  23. parsl/providers/lsf/lsf.py +3 -18
  24. parsl/providers/pbspro/pbspro.py +3 -10
  25. parsl/providers/slurm/slurm.py +4 -20
  26. parsl/providers/torque/torque.py +2 -10
  27. parsl/tests/configs/cc_in2p3.py +0 -2
  28. parsl/tests/configs/frontera.py +0 -2
  29. parsl/tests/configs/htex_local.py +0 -2
  30. parsl/tests/configs/htex_local_alternate.py +0 -3
  31. parsl/tests/configs/htex_local_intask_staging.py +0 -2
  32. parsl/tests/configs/htex_local_rsync_staging.py +0 -2
  33. parsl/tests/configs/local_threads_monitoring.py +0 -1
  34. parsl/tests/configs/slurm_local.py +0 -2
  35. parsl/tests/manual_tests/htex_local.py +0 -2
  36. parsl/tests/manual_tests/test_memory_limits.py +0 -2
  37. parsl/tests/manual_tests/test_udp_simple.py +0 -1
  38. parsl/tests/scaling_tests/htex_local.py +0 -2
  39. parsl/tests/sites/test_affinity.py +0 -2
  40. parsl/tests/sites/test_worker_info.py +0 -2
  41. parsl/tests/test_htex/test_drain.py +0 -2
  42. parsl/tests/test_htex/test_manager_selector_by_block.py +0 -2
  43. parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py +0 -3
  44. parsl/tests/test_monitoring/test_stdouterr.py +0 -1
  45. parsl/tests/test_providers/test_local_provider.py +1 -2
  46. parsl/tests/test_providers/test_pbspro_template.py +1 -3
  47. parsl/tests/test_providers/test_slurm_template.py +1 -3
  48. parsl/tests/test_scaling/test_regression_1621.py +0 -2
  49. parsl/tests/test_scaling/test_regression_3568_scaledown_vs_MISSING.py +0 -1
  50. parsl/tests/test_scaling/test_scale_down.py +0 -2
  51. parsl/tests/test_scaling/test_scale_down_htex_auto_scale.py +0 -2
  52. parsl/tests/test_scaling/test_scale_down_htex_unregistered.py +0 -2
  53. parsl/tests/test_scaling/test_shutdown_scalein.py +0 -2
  54. parsl/tests/test_scaling/test_worker_interchange_bad_messages_3262.py +0 -2
  55. parsl/tests/test_staging/test_zip_in.py +0 -1
  56. parsl/tests/test_staging/test_zip_out.py +0 -1
  57. parsl/tests/test_staging/test_zip_to_zip.py +0 -1
  58. parsl/tests/test_utils/test_execute_wait.py +35 -0
  59. parsl/utils.py +35 -0
  60. parsl/version.py +1 -1
  61. {parsl-2024.11.25.data → parsl-2024.12.9.data}/scripts/interchange.py +2 -1
  62. {parsl-2024.11.25.dist-info → parsl-2024.12.9.dist-info}/METADATA +2 -2
  63. {parsl-2024.11.25.dist-info → parsl-2024.12.9.dist-info}/RECORD +71 -73
  64. parsl/channels/__init__.py +0 -4
  65. parsl/channels/base.py +0 -82
  66. parsl/channels/errors.py +0 -30
  67. parsl/channels/local/local.py +0 -102
  68. parsl/monitoring/radios.py +0 -191
  69. parsl/tests/integration/test_channels/__init__.py +0 -0
  70. parsl/tests/test_channels/__init__.py +0 -0
  71. parsl/tests/test_channels/test_large_output.py +0 -22
  72. parsl/tests/test_channels/test_local_channel.py +0 -19
  73. /parsl/{channels/local → monitoring/radios}/__init__.py +0 -0
  74. {parsl-2024.11.25.data → parsl-2024.12.9.data}/scripts/exec_parsl_function.py +0 -0
  75. {parsl-2024.11.25.data → parsl-2024.12.9.data}/scripts/parsl_coprocess.py +0 -0
  76. {parsl-2024.11.25.data → parsl-2024.12.9.data}/scripts/process_worker_pool.py +0 -0
  77. {parsl-2024.11.25.dist-info → parsl-2024.12.9.dist-info}/LICENSE +0 -0
  78. {parsl-2024.11.25.dist-info → parsl-2024.12.9.dist-info}/WHEEL +0 -0
  79. {parsl-2024.11.25.dist-info → parsl-2024.12.9.dist-info}/entry_points.txt +0 -0
  80. {parsl-2024.11.25.dist-info → parsl-2024.12.9.dist-info}/top_level.txt +0 -0
@@ -2,7 +2,6 @@ import logging
2
2
  import os
3
3
  import time
4
4
 
5
- from parsl.channels import LocalChannel
6
5
  from parsl.jobs.states import JobState, JobStatus
7
6
  from parsl.launchers import SingleNodeLauncher
8
7
  from parsl.providers.base import ExecutionProvider
@@ -11,7 +10,7 @@ from parsl.providers.errors import (
11
10
  ScriptPathError,
12
11
  SubmitException,
13
12
  )
14
- from parsl.utils import RepresentationMixin
13
+ from parsl.utils import RepresentationMixin, execute_wait
15
14
 
16
15
  logger = logging.getLogger(__name__)
17
16
 
@@ -32,15 +31,11 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
32
31
  Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
33
32
  scaling where as many resources as possible are used; parallelism close to 0 represents
34
33
  the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
35
- move_files : Optional[Bool]
36
- Should files be moved? By default, Parsl will try to figure this out itself (= None).
37
- If True, then will always move. If False, will never move.
38
34
  worker_init : str
39
35
  Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
40
36
  """
41
37
 
42
38
  def __init__(self,
43
- channel=LocalChannel(),
44
39
  nodes_per_block=1,
45
40
  launcher=SingleNodeLauncher(),
46
41
  init_blocks=1,
@@ -48,9 +43,7 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
48
43
  max_blocks=1,
49
44
  worker_init='',
50
45
  cmd_timeout=30,
51
- parallelism=1,
52
- move_files=None):
53
- self.channel = channel
46
+ parallelism=1):
54
47
  self._label = 'local'
55
48
  self.nodes_per_block = nodes_per_block
56
49
  self.launcher = launcher
@@ -61,7 +54,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
61
54
  self.parallelism = parallelism
62
55
  self.script_dir = None
63
56
  self.cmd_timeout = cmd_timeout
64
- self.move_files = move_files
65
57
 
66
58
  # Dictionary that keeps track of jobs, keyed on job_id
67
59
  self.resources = {}
@@ -83,7 +75,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
83
75
  if job_dict['status'] and job_dict['status'].terminal:
84
76
  # We already checked this and it can't change after that
85
77
  continue
86
- # Script path should point to remote path if _should_move_files() is True
87
78
  script_path = job_dict['script_path']
88
79
 
89
80
  alive = self._is_alive(job_dict)
@@ -124,7 +115,7 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
124
115
  return [self.resources[jid]['status'] for jid in job_ids]
125
116
 
126
117
  def _is_alive(self, job_dict):
127
- retcode, stdout, stderr = self.channel.execute_wait(
118
+ retcode, stdout, stderr = execute_wait(
128
119
  'ps -p {} > /dev/null 2> /dev/null; echo "STATUS:$?" '.format(
129
120
  job_dict['remote_pid']), self.cmd_timeout)
130
121
  for line in stdout.split('\n'):
@@ -137,8 +128,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
137
128
 
138
129
  def _job_file_path(self, script_path: str, suffix: str) -> str:
139
130
  path = '{0}{1}'.format(script_path, suffix)
140
- if self._should_move_files():
141
- path = self.channel.pull_file(path, self.script_dir)
142
131
  return path
143
132
 
144
133
  def _read_job_file(self, script_path: str, suffix: str) -> str:
@@ -216,9 +205,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
216
205
 
217
206
  job_id = None
218
207
  remote_pid = None
219
- if self._should_move_files():
220
- logger.debug("Pushing start script")
221
- script_path = self.channel.push_file(script_path, self.channel.script_dir)
222
208
 
223
209
  logger.debug("Launching")
224
210
  # We need to capture the exit code and the streams, so we put them in files. We also write
@@ -234,11 +220,11 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
234
220
  # cancel the task later.
235
221
  #
236
222
  # We need to do the >/dev/null 2>&1 so that bash closes stdout, otherwise
237
- # channel.execute_wait hangs reading the process stdout until all the
223
+ # execute_wait hangs reading the process stdout until all the
238
224
  # background commands complete.
239
225
  cmd = '/bin/bash -c \'echo - >{0}.ec && {{ {{ bash {0} 1>{0}.out 2>{0}.err ; ' \
240
226
  'echo $? > {0}.ec ; }} >/dev/null 2>&1 & echo "PID:$!" ; }}\''.format(script_path)
241
- retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout)
227
+ retcode, stdout, stderr = execute_wait(cmd, self.cmd_timeout)
242
228
  if retcode != 0:
243
229
  raise SubmitException(job_name, "Launch command exited with code {0}".format(retcode),
244
230
  stdout, stderr)
@@ -254,9 +240,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
254
240
 
255
241
  return job_id
256
242
 
257
- def _should_move_files(self):
258
- return (self.move_files is None and not isinstance(self.channel, LocalChannel)) or (self.move_files)
259
-
260
243
  def cancel(self, job_ids):
261
244
  ''' Cancels the jobs specified by a list of job ids
262
245
 
@@ -272,7 +255,7 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
272
255
  job_dict['cancelled'] = True
273
256
  logger.debug("Terminating job/process ID: {0}".format(job))
274
257
  cmd = "kill -- -$(ps -o pgid= {} | grep -o '[0-9]*')".format(job_dict['remote_pid'])
275
- retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout)
258
+ retcode, stdout, stderr = execute_wait(cmd, self.cmd_timeout)
276
259
  if retcode != 0:
277
260
  logger.warning("Failed to kill PID: {} and child processes on {}".format(job_dict['remote_pid'],
278
261
  self.label))
@@ -3,7 +3,6 @@ import math
3
3
  import os
4
4
  import time
5
5
 
6
- from parsl.channels import LocalChannel
7
6
  from parsl.jobs.states import JobState, JobStatus
8
7
  from parsl.launchers import SingleNodeLauncher
9
8
  from parsl.providers.cluster_provider import ClusterProvider
@@ -32,8 +31,6 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
32
31
 
33
32
  Parameters
34
33
  ----------
35
- channel : Channel
36
- Channel for accessing this provider.
37
34
  nodes_per_block : int
38
35
  Nodes to provision per block.
39
36
  When request_by_nodes is False, it is computed by cores_per_block / cores_per_node.
@@ -68,7 +65,6 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
68
65
  :class:`~parsl.launchers.SingleNodeLauncher` (the default),
69
66
  :class:`~parsl.launchers.SrunLauncher`, or
70
67
  :class:`~parsl.launchers.AprunLauncher`
71
- move_files : Optional[Bool]: should files be moved? by default, Parsl will try to move files.
72
68
  bsub_redirection: Bool
73
69
  Should a redirection symbol "<" be included when submitting jobs, i.e., Bsub < job_script.
74
70
  request_by_nodes: Bool
@@ -78,7 +74,6 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
78
74
  """
79
75
 
80
76
  def __init__(self,
81
- channel=LocalChannel(),
82
77
  nodes_per_block=1,
83
78
  cores_per_block=None,
84
79
  cores_per_node=None,
@@ -92,13 +87,11 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
92
87
  project=None,
93
88
  queue=None,
94
89
  cmd_timeout=120,
95
- move_files=True,
96
90
  bsub_redirection=False,
97
91
  request_by_nodes=True,
98
92
  launcher=SingleNodeLauncher()):
99
93
  label = 'LSF'
100
94
  super().__init__(label,
101
- channel,
102
95
  nodes_per_block,
103
96
  init_blocks,
104
97
  min_blocks,
@@ -112,7 +105,6 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
112
105
  self.queue = queue
113
106
  self.cores_per_block = cores_per_block
114
107
  self.cores_per_node = cores_per_node
115
- self.move_files = move_files
116
108
  self.bsub_redirection = bsub_redirection
117
109
  self.request_by_nodes = request_by_nodes
118
110
 
@@ -214,7 +206,7 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
214
206
  logger.debug("Requesting one block with {} nodes".format(self.nodes_per_block))
215
207
 
216
208
  job_config = {}
217
- job_config["submit_script_dir"] = self.channel.script_dir
209
+ job_config["submit_script_dir"] = self.script_dir
218
210
  job_config["nodes"] = self.nodes_per_block
219
211
  job_config["tasks_per_node"] = tasks_per_node
220
212
  job_config["walltime"] = wtime_to_minutes(self.walltime)
@@ -230,17 +222,10 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
230
222
  logger.debug("Writing submit script")
231
223
  self._write_submit_script(template_string, script_path, job_name, job_config)
232
224
 
233
- if self.move_files:
234
- logger.debug("moving files")
235
- channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
236
- else:
237
- logger.debug("not moving files")
238
- channel_script_path = script_path
239
-
240
225
  if self.bsub_redirection:
241
- cmd = "bsub < {0}".format(channel_script_path)
226
+ cmd = "bsub < {0}".format(script_path)
242
227
  else:
243
- cmd = "bsub {0}".format(channel_script_path)
228
+ cmd = "bsub {0}".format(script_path)
244
229
  retcode, stdout, stderr = super().execute_wait(cmd)
245
230
 
246
231
  job_id = None
@@ -3,7 +3,6 @@ import logging
3
3
  import os
4
4
  import time
5
5
 
6
- from parsl.channels import LocalChannel
7
6
  from parsl.jobs.states import JobState, JobStatus
8
7
  from parsl.launchers import SingleNodeLauncher
9
8
  from parsl.providers.pbspro.template import template_string
@@ -17,8 +16,6 @@ class PBSProProvider(TorqueProvider):
17
16
 
18
17
  Parameters
19
18
  ----------
20
- channel : Channel
21
- Channel for accessing this provider.
22
19
  account : str
23
20
  Account the job will be charged against.
24
21
  queue : str
@@ -51,7 +48,6 @@ class PBSProProvider(TorqueProvider):
51
48
  :class:`~parsl.launchers.SingleNodeLauncher`.
52
49
  """
53
50
  def __init__(self,
54
- channel=LocalChannel(),
55
51
  account=None,
56
52
  queue=None,
57
53
  scheduler_options='',
@@ -66,8 +62,7 @@ class PBSProProvider(TorqueProvider):
66
62
  launcher=SingleNodeLauncher(),
67
63
  walltime="00:20:00",
68
64
  cmd_timeout=120):
69
- super().__init__(channel,
70
- account,
65
+ super().__init__(account,
71
66
  queue,
72
67
  scheduler_options,
73
68
  worker_init,
@@ -159,7 +154,7 @@ class PBSProProvider(TorqueProvider):
159
154
  )
160
155
 
161
156
  job_config = {}
162
- job_config["submit_script_dir"] = self.channel.script_dir
157
+ job_config["submit_script_dir"] = self.script_dir
163
158
  job_config["nodes_per_block"] = self.nodes_per_block
164
159
  job_config["ncpus"] = self.cpus_per_node
165
160
  job_config["walltime"] = self.walltime
@@ -183,15 +178,13 @@ class PBSProProvider(TorqueProvider):
183
178
  logger.debug("Writing submit script")
184
179
  self._write_submit_script(self.template_string, script_path, job_name, job_config)
185
180
 
186
- channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
187
-
188
181
  submit_options = ''
189
182
  if self.queue is not None:
190
183
  submit_options = '{0} -q {1}'.format(submit_options, self.queue)
191
184
  if self.account is not None:
192
185
  submit_options = '{0} -A {1}'.format(submit_options, self.account)
193
186
 
194
- launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
187
+ launch_cmd = "qsub {0} {1}".format(submit_options, script_path)
195
188
  retcode, stdout, stderr = self.execute_wait(launch_cmd)
196
189
 
197
190
  job_id = None
@@ -3,12 +3,10 @@ import math
3
3
  import os
4
4
  import re
5
5
  import time
6
- from typing import Optional
6
+ from typing import Any, Dict, Optional
7
7
 
8
8
  import typeguard
9
9
 
10
- from parsl.channels import LocalChannel
11
- from parsl.channels.base import Channel
12
10
  from parsl.jobs.states import JobState, JobStatus
13
11
  from parsl.launchers import SingleNodeLauncher
14
12
  from parsl.launchers.base import Launcher
@@ -73,8 +71,6 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
73
71
  clusters : str
74
72
  Slurm cluster name, or comma seperated cluster list, used to choose between different clusters in a federated Slurm instance.
75
73
  If unspecified or ``None``, no slurm directive for clusters will be added.
76
- channel : Channel
77
- Channel for accessing this provider.
78
74
  nodes_per_block : int
79
75
  Nodes to provision per block.
80
76
  cores_per_node : int
@@ -110,7 +106,6 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
110
106
  :class:`~parsl.launchers.SingleNodeLauncher` (the default),
111
107
  :class:`~parsl.launchers.SrunLauncher`, or
112
108
  :class:`~parsl.launchers.AprunLauncher`
113
- move_files : Optional[Bool]: should files be moved? by default, Parsl will try to move files.
114
109
  """
115
110
 
116
111
  @typeguard.typechecked
@@ -120,7 +115,6 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
120
115
  qos: Optional[str] = None,
121
116
  constraint: Optional[str] = None,
122
117
  clusters: Optional[str] = None,
123
- channel: Channel = LocalChannel(),
124
118
  nodes_per_block: int = 1,
125
119
  cores_per_node: Optional[int] = None,
126
120
  mem_per_node: Optional[int] = None,
@@ -134,11 +128,9 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
134
128
  worker_init: str = '',
135
129
  cmd_timeout: int = 10,
136
130
  exclusive: bool = True,
137
- move_files: bool = True,
138
131
  launcher: Launcher = SingleNodeLauncher()):
139
132
  label = 'slurm'
140
133
  super().__init__(label,
141
- channel,
142
134
  nodes_per_block,
143
135
  init_blocks,
144
136
  min_blocks,
@@ -152,7 +144,6 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
152
144
  self.cores_per_node = cores_per_node
153
145
  self.mem_per_node = mem_per_node
154
146
  self.exclusive = exclusive
155
- self.move_files = move_files
156
147
  self.account = account
157
148
  self.qos = qos
158
149
  self.constraint = constraint
@@ -289,8 +280,8 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
289
280
 
290
281
  logger.debug("Requesting one block with {} nodes".format(self.nodes_per_block))
291
282
 
292
- job_config = {}
293
- job_config["submit_script_dir"] = self.channel.script_dir
283
+ job_config: Dict[str, Any] = {}
284
+ job_config["submit_script_dir"] = self.script_dir
294
285
  job_config["nodes"] = self.nodes_per_block
295
286
  job_config["tasks_per_node"] = tasks_per_node
296
287
  job_config["walltime"] = wtime_to_minutes(self.walltime)
@@ -308,14 +299,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
308
299
  logger.debug("Writing submit script")
309
300
  self._write_submit_script(template_string, script_path, job_name, job_config)
310
301
 
311
- if self.move_files:
312
- logger.debug("moving files")
313
- channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
314
- else:
315
- logger.debug("not moving files")
316
- channel_script_path = script_path
317
-
318
- retcode, stdout, stderr = self.execute_wait("sbatch {0}".format(channel_script_path))
302
+ retcode, stdout, stderr = self.execute_wait("sbatch {0}".format(script_path))
319
303
 
320
304
  if retcode == 0:
321
305
  for line in stdout.split('\n'):
@@ -2,7 +2,6 @@ import logging
2
2
  import os
3
3
  import time
4
4
 
5
- from parsl.channels import LocalChannel
6
5
  from parsl.jobs.states import JobState, JobStatus
7
6
  from parsl.launchers import AprunLauncher
8
7
  from parsl.providers.cluster_provider import ClusterProvider
@@ -33,8 +32,6 @@ class TorqueProvider(ClusterProvider, RepresentationMixin):
33
32
 
34
33
  Parameters
35
34
  ----------
36
- channel : Channel
37
- Channel for accessing this provider.
38
35
  account : str
39
36
  Account the job will be charged against.
40
37
  queue : str
@@ -65,7 +62,6 @@ class TorqueProvider(ClusterProvider, RepresentationMixin):
65
62
 
66
63
  """
67
64
  def __init__(self,
68
- channel=LocalChannel(),
69
65
  account=None,
70
66
  queue=None,
71
67
  scheduler_options='',
@@ -80,7 +76,6 @@ class TorqueProvider(ClusterProvider, RepresentationMixin):
80
76
  cmd_timeout=120):
81
77
  label = 'torque'
82
78
  super().__init__(label,
83
- channel,
84
79
  nodes_per_block,
85
80
  init_blocks,
86
81
  min_blocks,
@@ -170,8 +165,7 @@ class TorqueProvider(ClusterProvider, RepresentationMixin):
170
165
  tasks_per_node)
171
166
 
172
167
  job_config = {}
173
- # TODO : script_path might need to change to accommodate script dir set via channels
174
- job_config["submit_script_dir"] = self.channel.script_dir
168
+ job_config["submit_script_dir"] = self.script_dir
175
169
  job_config["nodes"] = self.nodes_per_block
176
170
  job_config["task_blocks"] = self.nodes_per_block * tasks_per_node
177
171
  job_config["nodes_per_block"] = self.nodes_per_block
@@ -189,15 +183,13 @@ class TorqueProvider(ClusterProvider, RepresentationMixin):
189
183
  logger.debug("Writing submit script")
190
184
  self._write_submit_script(self.template_string, script_path, job_name, job_config)
191
185
 
192
- channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
193
-
194
186
  submit_options = ''
195
187
  if self.queue is not None:
196
188
  submit_options = '{0} -q {1}'.format(submit_options, self.queue)
197
189
  if self.account is not None:
198
190
  submit_options = '{0} -A {1}'.format(submit_options, self.account)
199
191
 
200
- launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
192
+ launch_cmd = "qsub {0} {1}".format(submit_options, script_path)
201
193
  retcode, stdout, stderr = self.execute_wait(launch_cmd)
202
194
 
203
195
  job_id = None
@@ -1,4 +1,3 @@
1
- from parsl.channels import LocalChannel
2
1
  from parsl.config import Config
3
2
  from parsl.executors import HighThroughputExecutor
4
3
  from parsl.providers import GridEngineProvider
@@ -14,7 +13,6 @@ def fresh_config():
14
13
  max_workers_per_node=1,
15
14
  encrypted=True,
16
15
  provider=GridEngineProvider(
17
- channel=LocalChannel(),
18
16
  nodes_per_block=2,
19
17
  init_blocks=2,
20
18
  max_blocks=2,
@@ -1,4 +1,3 @@
1
- from parsl.channels import LocalChannel
2
1
  from parsl.config import Config
3
2
  from parsl.executors import HighThroughputExecutor
4
3
  from parsl.launchers import SrunLauncher
@@ -20,7 +19,6 @@ def fresh_config():
20
19
  encrypted=True,
21
20
  provider=SlurmProvider(
22
21
  cmd_timeout=60, # Add extra time for slow scheduler responses
23
- channel=LocalChannel(),
24
22
  nodes_per_block=2,
25
23
  init_blocks=1,
26
24
  min_blocks=1,
@@ -1,4 +1,3 @@
1
- from parsl.channels import LocalChannel
2
1
  from parsl.config import Config
3
2
  from parsl.executors import HighThroughputExecutor
4
3
  from parsl.launchers import SimpleLauncher
@@ -15,7 +14,6 @@ def fresh_config():
15
14
  cores_per_worker=1,
16
15
  encrypted=True,
17
16
  provider=LocalProvider(
18
- channel=LocalChannel(),
19
17
  init_blocks=1,
20
18
  max_blocks=1,
21
19
  launcher=SimpleLauncher(),
@@ -17,7 +17,6 @@ those timing parameters control.
17
17
 
18
18
  import os
19
19
 
20
- from parsl.channels import LocalChannel
21
20
  from parsl.config import Config
22
21
  from parsl.data_provider.file_noop import NoOpFileStaging
23
22
  from parsl.data_provider.ftp import FTPInTaskStaging
@@ -48,7 +47,6 @@ def fresh_config():
48
47
  poll_period=100,
49
48
  encrypted=True,
50
49
  provider=LocalProvider(
51
- channel=LocalChannel(),
52
50
  init_blocks=0,
53
51
  min_blocks=0,
54
52
  max_blocks=5,
@@ -62,7 +60,6 @@ def fresh_config():
62
60
  retries=2,
63
61
  monitoring=MonitoringHub(
64
62
  hub_address="localhost",
65
- hub_port=55055,
66
63
  monitoring_debug=False,
67
64
  resource_monitoring_interval=1,
68
65
  ),
@@ -1,4 +1,3 @@
1
- from parsl.channels import LocalChannel
2
1
  from parsl.config import Config
3
2
  from parsl.data_provider.file_noop import NoOpFileStaging
4
3
  from parsl.data_provider.ftp import FTPInTaskStaging
@@ -15,7 +14,6 @@ config = Config(
15
14
  cores_per_worker=1,
16
15
  encrypted=True,
17
16
  provider=LocalProvider(
18
- channel=LocalChannel(),
19
17
  init_blocks=1,
20
18
  max_blocks=1,
21
19
  launcher=SimpleLauncher(),
@@ -1,4 +1,3 @@
1
- from parsl.channels import LocalChannel
2
1
  from parsl.config import Config
3
2
  from parsl.data_provider.ftp import FTPInTaskStaging
4
3
  from parsl.data_provider.http import HTTPInTaskStaging
@@ -16,7 +15,6 @@ config = Config(
16
15
  working_dir="./rsync-workdir/",
17
16
  encrypted=True,
18
17
  provider=LocalProvider(
19
- channel=LocalChannel(),
20
18
  init_blocks=1,
21
19
  max_blocks=1,
22
20
  launcher=SimpleLauncher(),
@@ -5,7 +5,6 @@ from parsl.monitoring import MonitoringHub
5
5
  config = Config(executors=[ThreadPoolExecutor(label='threads', max_threads=4)],
6
6
  monitoring=MonitoringHub(
7
7
  hub_address="localhost",
8
- hub_port=55055,
9
8
  resource_monitoring_interval=3,
10
9
  )
11
10
  )
@@ -1,4 +1,3 @@
1
- from parsl.channels import LocalChannel
2
1
  from parsl.config import Config
3
2
  from parsl.executors import HighThroughputExecutor
4
3
  from parsl.launchers import SrunLauncher
@@ -13,7 +12,6 @@ def fresh_config():
13
12
  encrypted=True,
14
13
  provider=SlurmProvider(
15
14
  cmd_timeout=60, # Add extra time for slow scheduler responses
16
- channel=LocalChannel(),
17
15
  nodes_per_block=1,
18
16
  init_blocks=1,
19
17
  min_blocks=1,
@@ -1,4 +1,3 @@
1
- from parsl.channels import LocalChannel
2
1
  from parsl.config import Config
3
2
  from parsl.executors import HighThroughputExecutor
4
3
 
@@ -15,7 +14,6 @@ config = Config(
15
14
  cores_per_worker=1,
16
15
  encrypted=True,
17
16
  provider=LocalProvider(
18
- channel=LocalChannel(),
19
17
  init_blocks=1,
20
18
  max_blocks=1,
21
19
  # tasks_per_node=1, # For HighThroughputExecutor, this option should in most cases be 1
@@ -5,7 +5,6 @@ import psutil
5
5
 
6
6
  import parsl
7
7
  from parsl.app.app import python_app # , bash_app
8
- from parsl.channels import LocalChannel
9
8
  from parsl.config import Config
10
9
  from parsl.executors import HighThroughputExecutor
11
10
  from parsl.launchers import SingleNodeLauncher
@@ -30,7 +29,6 @@ def test_simple(mem_per_worker):
30
29
  suppress_failure=True,
31
30
  encrypted=True,
32
31
  provider=LocalProvider(
33
- channel=LocalChannel(),
34
32
  init_blocks=1,
35
33
  max_blocks=1,
36
34
  launcher=SingleNodeLauncher(),
@@ -15,7 +15,6 @@ def local_setup():
15
15
  ],
16
16
  monitoring=MonitoringHub(
17
17
  hub_address="127.0.0.1",
18
- hub_port=55055,
19
18
  logging_level=logging.INFO,
20
19
  resource_monitoring_interval=10))
21
20
 
@@ -1,4 +1,3 @@
1
- from parsl.channels import LocalChannel
2
1
  from parsl.config import Config
3
2
  from parsl.executors import HighThroughputExecutor
4
3
  from parsl.providers import LocalProvider
@@ -12,7 +11,6 @@ config = Config(
12
11
  max_workers_per_node=8,
13
12
  encrypted=True,
14
13
  provider=LocalProvider(
15
- channel=LocalChannel(),
16
14
  init_blocks=1,
17
15
  max_blocks=1,
18
16
  ),
@@ -5,7 +5,6 @@ import os
5
5
  import pytest
6
6
 
7
7
  from parsl import python_app
8
- from parsl.channels import LocalChannel
9
8
  from parsl.config import Config
10
9
  from parsl.executors import HighThroughputExecutor
11
10
  from parsl.providers import LocalProvider
@@ -22,7 +21,6 @@ def local_config():
22
21
  available_accelerators=2,
23
22
  encrypted=True,
24
23
  provider=LocalProvider(
25
- channel=LocalChannel(),
26
24
  init_blocks=1,
27
25
  max_blocks=1,
28
26
  ),
@@ -3,7 +3,6 @@
3
3
  import pytest
4
4
 
5
5
  from parsl import python_app
6
- from parsl.channels import LocalChannel
7
6
  from parsl.config import Config
8
7
  from parsl.executors import HighThroughputExecutor
9
8
  from parsl.providers import LocalProvider
@@ -18,7 +17,6 @@ def local_config():
18
17
  max_workers_per_node=4,
19
18
  encrypted=True,
20
19
  provider=LocalProvider(
21
- channel=LocalChannel(),
22
20
  init_blocks=1,
23
21
  max_blocks=1,
24
22
  ),
@@ -3,7 +3,6 @@ import time
3
3
  import pytest
4
4
 
5
5
  import parsl
6
- from parsl.channels import LocalChannel
7
6
  from parsl.config import Config
8
7
  from parsl.executors import HighThroughputExecutor
9
8
  from parsl.launchers import SimpleLauncher
@@ -28,7 +27,6 @@ def local_config():
28
27
  cores_per_worker=1,
29
28
  encrypted=True,
30
29
  provider=LocalProvider(
31
- channel=LocalChannel(),
32
30
  init_blocks=1,
33
31
  min_blocks=0,
34
32
  max_blocks=0,
@@ -4,7 +4,6 @@ import pytest
4
4
 
5
5
  import parsl
6
6
  from parsl.app.app import bash_app, python_app
7
- from parsl.channels import LocalChannel
8
7
  from parsl.config import Config
9
8
  from parsl.executors import HighThroughputExecutor
10
9
  from parsl.executors.high_throughput.manager_selector import (
@@ -31,7 +30,6 @@ def test_block_id_selection(try_assert):
31
30
  max_workers_per_node=1,
32
31
  manager_selector=BlockIdManagerSelector(),
33
32
  provider=LocalProvider(
34
- channel=LocalChannel(),
35
33
  init_blocks=BLOCK_COUNT,
36
34
  max_blocks=BLOCK_COUNT,
37
35
  min_blocks=BLOCK_COUNT,
@@ -5,7 +5,6 @@ import time
5
5
  import pytest
6
6
 
7
7
  import parsl
8
- from parsl.channels import LocalChannel
9
8
  from parsl.config import Config
10
9
  from parsl.executors import HighThroughputExecutor
11
10
  from parsl.launchers import SimpleLauncher
@@ -22,7 +21,6 @@ def fresh_config(run_dir, strategy, db_url):
22
21
  cores_per_worker=1,
23
22
  encrypted=True,
24
23
  provider=LocalProvider(
25
- channel=LocalChannel(),
26
24
  init_blocks=1,
27
25
  # min and max are set to 0 to ensure that we don't get
28
26
  # a block from ongoing strategy scaling, only from
@@ -37,7 +35,6 @@ def fresh_config(run_dir, strategy, db_url):
37
35
  strategy_period=0.1,
38
36
  monitoring=MonitoringHub(
39
37
  hub_address="localhost",
40
- hub_port=55055,
41
38
  logging_endpoint=db_url
42
39
  )
43
40
  )
@@ -37,7 +37,6 @@ def fresh_config(run_dir):
37
37
  strategy_period=0.1,
38
38
  monitoring=MonitoringHub(
39
39
  hub_address="localhost",
40
- hub_port=55055,
41
40
  )
42
41
  )
43
42