parsl 2024.1.22__py3-none-any.whl → 2024.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. parsl/app/errors.py +1 -5
  2. parsl/curvezmq.py +205 -0
  3. parsl/dataflow/dflow.py +1 -1
  4. parsl/executors/high_throughput/executor.py +78 -49
  5. parsl/executors/high_throughput/interchange.py +14 -7
  6. parsl/executors/high_throughput/process_worker_pool.py +44 -9
  7. parsl/executors/high_throughput/zmq_pipes.py +21 -15
  8. parsl/executors/taskvine/manager.py +60 -43
  9. parsl/executors/taskvine/manager_config.py +14 -0
  10. parsl/monitoring/monitoring.py +22 -4
  11. parsl/monitoring/remote.py +1 -1
  12. parsl/providers/errors.py +4 -6
  13. parsl/providers/slurm/slurm.py +7 -6
  14. parsl/tests/configs/ad_hoc_cluster_htex.py +1 -0
  15. parsl/tests/configs/azure_single_node.py +1 -0
  16. parsl/tests/configs/bluewaters.py +1 -0
  17. parsl/tests/configs/bridges.py +1 -0
  18. parsl/tests/configs/cc_in2p3.py +1 -0
  19. parsl/tests/configs/comet.py +1 -0
  20. parsl/tests/configs/cooley_htex.py +1 -0
  21. parsl/tests/configs/ec2_single_node.py +1 -0
  22. parsl/tests/configs/ec2_spot.py +1 -0
  23. parsl/tests/configs/frontera.py +1 -0
  24. parsl/tests/configs/htex_ad_hoc_cluster.py +1 -0
  25. parsl/tests/configs/htex_local.py +1 -0
  26. parsl/tests/configs/htex_local_alternate.py +1 -0
  27. parsl/tests/configs/htex_local_intask_staging.py +1 -0
  28. parsl/tests/configs/htex_local_rsync_staging.py +1 -0
  29. parsl/tests/configs/local_adhoc.py +1 -0
  30. parsl/tests/configs/midway.py +1 -0
  31. parsl/tests/configs/nscc_singapore.py +1 -0
  32. parsl/tests/configs/osg_htex.py +1 -0
  33. parsl/tests/configs/petrelkube.py +1 -0
  34. parsl/tests/configs/summit.py +1 -0
  35. parsl/tests/configs/swan_htex.py +1 -0
  36. parsl/tests/configs/theta.py +1 -0
  37. parsl/tests/conftest.py +12 -2
  38. parsl/tests/manual_tests/htex_local.py +1 -0
  39. parsl/tests/manual_tests/test_ad_hoc_htex.py +1 -0
  40. parsl/tests/manual_tests/test_fan_in_out_htex_remote.py +1 -0
  41. parsl/tests/manual_tests/test_memory_limits.py +1 -0
  42. parsl/tests/scaling_tests/htex_local.py +1 -0
  43. parsl/tests/sites/test_affinity.py +1 -0
  44. parsl/tests/sites/test_concurrent.py +2 -1
  45. parsl/tests/sites/test_dynamic_executor.py +1 -0
  46. parsl/tests/sites/test_worker_info.py +1 -0
  47. parsl/tests/test_bash_apps/test_stdout.py +6 -1
  48. parsl/tests/test_curvezmq.py +455 -0
  49. parsl/tests/test_data/test_file_apps.py +5 -5
  50. parsl/tests/test_data/test_file_staging.py +3 -3
  51. parsl/tests/test_docs/test_kwargs.py +3 -3
  52. parsl/tests/test_htex/test_cpu_affinity_explicit.py +52 -0
  53. parsl/tests/test_htex/test_htex.py +46 -0
  54. parsl/tests/test_htex/test_htex_zmq_binding.py +53 -13
  55. parsl/tests/test_python_apps/test_futures.py +5 -5
  56. parsl/tests/test_regression/test_97_parallelism_0.py +1 -0
  57. parsl/tests/test_scaling/test_block_error_handler.py +6 -5
  58. parsl/tests/test_scaling/test_regression_1621.py +1 -0
  59. parsl/tests/test_scaling/test_scale_down.py +1 -0
  60. parsl/version.py +1 -1
  61. {parsl-2024.1.22.data → parsl-2024.2.5.data}/scripts/process_worker_pool.py +44 -9
  62. {parsl-2024.1.22.dist-info → parsl-2024.2.5.dist-info}/METADATA +5 -6
  63. {parsl-2024.1.22.dist-info → parsl-2024.2.5.dist-info}/RECORD +69 -65
  64. {parsl-2024.1.22.data → parsl-2024.2.5.data}/scripts/exec_parsl_function.py +0 -0
  65. {parsl-2024.1.22.data → parsl-2024.2.5.data}/scripts/parsl_coprocess.py +0 -0
  66. {parsl-2024.1.22.dist-info → parsl-2024.2.5.dist-info}/LICENSE +0 -0
  67. {parsl-2024.1.22.dist-info → parsl-2024.2.5.dist-info}/WHEEL +0 -0
  68. {parsl-2024.1.22.dist-info → parsl-2024.2.5.dist-info}/entry_points.txt +0 -0
  69. {parsl-2024.1.22.dist-info → parsl-2024.2.5.dist-info}/top_level.txt +0 -0
@@ -4,24 +4,28 @@ import zmq
4
4
  import logging
5
5
  import threading
6
6
 
7
+ from parsl import curvezmq
8
+
7
9
  logger = logging.getLogger(__name__)
8
10
 
9
11
 
10
12
  class CommandClient:
11
13
  """ CommandClient
12
14
  """
13
- def __init__(self, ip_address, port_range):
15
+ def __init__(self, zmq_context: curvezmq.ClientContext, ip_address, port_range):
14
16
  """
15
17
  Parameters
16
18
  ----------
17
19
 
20
+ zmq_context: curvezmq.ClientContext
21
+ CurveZMQ client context used to create secure sockets
18
22
  ip_address: str
19
23
  IP address of the client (where Parsl runs)
20
24
  port_range: tuple(int, int)
21
25
  Port range for the comms between client and interchange
22
26
 
23
27
  """
24
- self.context = zmq.Context()
28
+ self.zmq_context = zmq_context
25
29
  self.ip_address = ip_address
26
30
  self.port_range = port_range
27
31
  self.port = None
@@ -33,7 +37,7 @@ class CommandClient:
33
37
 
34
38
  Upon recreating the socket, we bind to the same port.
35
39
  """
36
- self.zmq_socket = self.context.socket(zmq.REQ)
40
+ self.zmq_socket = self.zmq_context.socket(zmq.REQ)
37
41
  self.zmq_socket.setsockopt(zmq.LINGER, 0)
38
42
  if self.port is None:
39
43
  self.port = self.zmq_socket.bind_to_random_port("tcp://{}".format(self.ip_address),
@@ -62,9 +66,7 @@ class CommandClient:
62
66
  except zmq.ZMQError:
63
67
  logger.exception("Potential ZMQ REQ-REP deadlock caught")
64
68
  logger.info("Trying to reestablish context")
65
- self.zmq_socket.close()
66
- self.context.destroy()
67
- self.context = zmq.Context()
69
+ self.zmq_context.recreate()
68
70
  self.create_socket_and_bind()
69
71
  else:
70
72
  break
@@ -77,25 +79,27 @@ class CommandClient:
77
79
 
78
80
  def close(self):
79
81
  self.zmq_socket.close()
80
- self.context.term()
82
+ self.zmq_context.term()
81
83
 
82
84
 
83
85
  class TasksOutgoing:
84
86
  """ Outgoing task queue from the executor to the Interchange
85
87
  """
86
- def __init__(self, ip_address, port_range):
88
+ def __init__(self, zmq_context: curvezmq.ClientContext, ip_address, port_range):
87
89
  """
88
90
  Parameters
89
91
  ----------
90
92
 
93
+ zmq_context: curvezmq.ClientContext
94
+ CurveZMQ client context used to create secure sockets
91
95
  ip_address: str
92
96
  IP address of the client (where Parsl runs)
93
97
  port_range: tuple(int, int)
94
98
  Port range for the comms between client and interchange
95
99
 
96
100
  """
97
- self.context = zmq.Context()
98
- self.zmq_socket = self.context.socket(zmq.DEALER)
101
+ self.zmq_context = zmq_context
102
+ self.zmq_socket = self.zmq_context.socket(zmq.DEALER)
99
103
  self.zmq_socket.set_hwm(0)
100
104
  self.port = self.zmq_socket.bind_to_random_port("tcp://{}".format(ip_address),
101
105
  min_port=port_range[0],
@@ -127,26 +131,28 @@ class TasksOutgoing:
127
131
 
128
132
  def close(self):
129
133
  self.zmq_socket.close()
130
- self.context.term()
134
+ self.zmq_context.term()
131
135
 
132
136
 
133
137
  class ResultsIncoming:
134
138
  """ Incoming results queue from the Interchange to the executor
135
139
  """
136
140
 
137
- def __init__(self, ip_address, port_range):
141
+ def __init__(self, zmq_context: curvezmq.ClientContext, ip_address, port_range):
138
142
  """
139
143
  Parameters
140
144
  ----------
141
145
 
146
+ zmq_context: curvezmq.ClientContext
147
+ CurveZMQ client context used to create secure sockets
142
148
  ip_address: str
143
149
  IP address of the client (where Parsl runs)
144
150
  port_range: tuple(int, int)
145
151
  Port range for the comms between client and interchange
146
152
 
147
153
  """
148
- self.context = zmq.Context()
149
- self.results_receiver = self.context.socket(zmq.DEALER)
154
+ self.zmq_context = zmq_context
155
+ self.results_receiver = self.zmq_context.socket(zmq.DEALER)
150
156
  self.results_receiver.set_hwm(0)
151
157
  self.port = self.results_receiver.bind_to_random_port("tcp://{}".format(ip_address),
152
158
  min_port=port_range[0],
@@ -160,4 +166,4 @@ class ResultsIncoming:
160
166
 
161
167
  def close(self):
162
168
  self.results_receiver.close()
163
- self.context.term()
169
+ self.zmq_context.term()
@@ -47,6 +47,10 @@ def _set_manager_attributes(m, config):
47
47
  if config.enable_peer_transfers:
48
48
  m.enable_peer_transfers()
49
49
 
50
+ # Set catalog report to parsl if project name exists
51
+ if m.name:
52
+ m.set_property("framework", "parsl")
53
+
50
54
 
51
55
  def _prepare_environment_serverless(manager_config, env_cache_dir, poncho_create_script):
52
56
  # Return path to a packaged poncho environment
@@ -203,7 +207,7 @@ def _taskvine_submit_wait(ready_task_queue=None,
203
207
  break
204
208
 
205
209
  # Submit tasks
206
- while ready_task_queue.qsize() > 0 and not should_stop.is_set():
210
+ while ready_task_queue.qsize() > 0 or m.empty() and not should_stop.is_set():
207
211
  # Obtain task from ready_task_queue
208
212
  try:
209
213
  task = ready_task_queue.get(timeout=1)
@@ -248,6 +252,22 @@ def _taskvine_submit_wait(ready_task_queue=None,
248
252
  poncho_env=poncho_env_path,
249
253
  init_command=manager_config.init_command,
250
254
  add_env=add_env)
255
+
256
+ # Configure the library if provided
257
+ if manager_config.library_config:
258
+ lib_cores = manager_config.library_config.get('cores', None)
259
+ lib_memory = manager_config.library_config.get('memory', None)
260
+ lib_disk = manager_config.library_config.get('disk', None)
261
+ lib_slots = manager_config.library_config.get('num_slots', None)
262
+ if lib_cores:
263
+ serverless_lib.set_cores(lib_cores)
264
+ if lib_memory:
265
+ serverless_lib.set_memory(lib_memory)
266
+ if lib_disk:
267
+ serverless_lib.set_disk(lib_disk)
268
+ if lib_slots:
269
+ serverless_lib.set_function_slots(lib_slots)
270
+
251
271
  if poncho_env_path:
252
272
  serverless_lib_env_file = m.declare_poncho(poncho_env_path, cache=True, peer_transfer=True)
253
273
  serverless_lib.add_environment(serverless_lib_env_file)
@@ -377,48 +397,45 @@ def _taskvine_submit_wait(ready_task_queue=None,
377
397
 
378
398
  # If the queue is not empty wait on the TaskVine queue for a task
379
399
  task_found = True
380
- if not m.empty():
381
- while task_found and not should_stop.is_set():
382
- # Obtain the task from the queue
383
- t = m.wait(1)
384
- if t is None:
385
- task_found = False
386
- continue
387
- logger.debug('Found a task')
388
- executor_task_id = vine_id_to_executor_task_id[str(t.id)][0]
389
- vine_id_to_executor_task_id.pop(str(t.id))
390
-
391
- # When a task is found
392
- result_file = result_file_of_task_id.pop(executor_task_id)
393
-
394
- logger.debug(f"completed executor task info: {executor_task_id}, {t.category}, {t.command}, {t.std_output}")
395
-
396
- # A tasks completes 'succesfully' if it has result file.
397
- # A check whether the Python object represented using this file can be
398
- # deserialized happens later in the collector thread of the executor
399
- # process.
400
- logger.debug("Looking for result in {}".format(result_file))
401
- if os.path.exists(result_file):
402
- logger.debug("Found result in {}".format(result_file))
403
- finished_task_queue.put_nowait(VineTaskToParsl(executor_id=executor_task_id,
404
- result_received=True,
405
- result_file=result_file,
406
- reason=None,
407
- status=t.exit_code))
408
- # If a result file could not be generated, explain the
409
- # failure according to taskvine error codes.
410
- else:
411
- reason = _explain_taskvine_result(t)
412
- logger.debug("Did not find result in {}".format(result_file))
413
- logger.debug("Wrapper Script status: {}\nTaskVine Status: {}"
414
- .format(t.exit_code, t.result))
415
- logger.debug("Task with executor id {} / vine id {} failed because:\n{}"
416
- .format(executor_task_id, t.id, reason))
417
- finished_task_queue.put_nowait(VineTaskToParsl(executor_id=executor_task_id,
418
- result_received=False,
419
- result_file=None,
420
- reason=reason,
421
- status=t.exit_code))
400
+ while not m.empty() and task_found and not should_stop.is_set():
401
+ # Obtain the task from the queue
402
+ t = m.wait(1)
403
+ if t is None:
404
+ task_found = False
405
+ continue
406
+ logger.debug('Found a task')
407
+ executor_task_id = vine_id_to_executor_task_id[str(t.id)][0]
408
+ vine_id_to_executor_task_id.pop(str(t.id))
409
+
410
+ # When a task is found
411
+ result_file = result_file_of_task_id.pop(executor_task_id)
412
+
413
+ logger.debug(f"completed executor task info: {executor_task_id}, {t.category}, {t.command}, {t.std_output}")
414
+
415
+ # A tasks completes 'succesfully' if it has result file.
416
+ # A check whether the Python object represented using this file can be
417
+ # deserialized happens later in the collector thread of the executor
418
+ # process.
419
+ logger.debug("Looking for result in {}".format(result_file))
420
+ if os.path.exists(result_file):
421
+ logger.debug("Found result in {}".format(result_file))
422
+ finished_task_queue.put_nowait(VineTaskToParsl(executor_id=executor_task_id,
423
+ result_received=True,
424
+ result_file=result_file,
425
+ reason=None,
426
+ status=t.exit_code))
427
+ # If a result file could not be generated, explain the
428
+ # failure according to taskvine error codes.
429
+ else:
430
+ reason = _explain_taskvine_result(t)
431
+ logger.debug("Did not find result in {}".format(result_file))
432
+ logger.debug("Wrapper Script status: {}\nTaskVine Status: {}".format(t.exit_code, t.result))
433
+ logger.debug("Task with executor id {} / vine id {} failed because:\n{}".format(executor_task_id, t.id, reason))
434
+ finished_task_queue.put_nowait(VineTaskToParsl(executor_id=executor_task_id,
435
+ result_received=False,
436
+ result_file=None,
437
+ reason=reason,
438
+ status=t.exit_code))
422
439
 
423
440
  logger.debug("Exiting TaskVine Monitoring Process")
424
441
  return 0
@@ -84,6 +84,19 @@ class TaskVineManagerConfig:
84
84
  forever.
85
85
  Default is 1.
86
86
 
87
+ library_config: Optional[dict]
88
+ Only and must specify when functions are executed in the serverless mode.
89
+ Configure the number of function slots and amount of resources
90
+ a library task can run. A library task is a stateful object that executes
91
+ functions in the serverless way. Accept the following keywords:
92
+ 'num_slots', 'cores', 'memory (MBs)', 'disk (MBs)'.
93
+ Default is {'num_slots': 1, 'cores': None, 'memory': None, 'disk': None},
94
+ which will take all resources of a worker node and run at most 1 function
95
+ invocation at any given time.
96
+ E.g., {'num_slots': 4, 'cores': 16, 'memory': 16000, 'disk': 16000} will
97
+ reserve those resources to the library task to run at most 4 function
98
+ invocations.
99
+
87
100
  shared_fs: bool
88
101
  Whether workers will use a shared filesystem or not. If so, TaskVine
89
102
  will not track and transfer files for execution, in exchange for
@@ -159,6 +172,7 @@ class TaskVineManagerConfig:
159
172
  app_pack: bool = False
160
173
  extra_pkgs: Optional[list] = None
161
174
  max_retries: int = 1
175
+ library_config: Optional[dict] = None
162
176
 
163
177
  # Performance-specific settings
164
178
  shared_fs: bool = False
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  import os
2
4
  import socket
3
5
  import time
@@ -11,7 +13,8 @@ import queue
11
13
  import parsl.monitoring.remote
12
14
 
13
15
  from parsl.multiprocessing import ForkProcess, SizedQueue
14
- from multiprocessing import Process, Queue
16
+ from multiprocessing import Process
17
+ from multiprocessing.queues import Queue
15
18
  from parsl.utils import RepresentationMixin
16
19
  from parsl.process_loggers import wrap_with_logs
17
20
  from parsl.utils import setproctitle
@@ -20,7 +23,7 @@ from parsl.serialize import deserialize
20
23
 
21
24
  from parsl.monitoring.message_type import MessageType
22
25
  from parsl.monitoring.types import AddressedMonitoringMessage, TaggedMonitoringMessage
23
- from typing import cast, Any, Callable, Dict, Optional, Sequence, Tuple, Union
26
+ from typing import cast, Any, Callable, Dict, Optional, Sequence, Tuple, Union, TYPE_CHECKING
24
27
 
25
28
  _db_manager_excepts: Optional[Exception]
26
29
 
@@ -125,7 +128,10 @@ class MonitoringHub(RepresentationMixin):
125
128
  This will include environment information such as start time, hostname and block id,
126
129
  along with periodic resource usage of each task. Default: True
127
130
  resource_monitoring_interval : float
128
- The time interval, in seconds, at which the monitoring records the resource usage of each task. Default: 30 seconds
131
+ The time interval, in seconds, at which the monitoring records the resource usage of each task.
132
+ If set to 0, only start and end information will be logged, and no periodic monitoring will
133
+ be made.
134
+ Default: 30 seconds
129
135
  """
130
136
 
131
137
  self.logger = logger
@@ -168,7 +174,19 @@ class MonitoringHub(RepresentationMixin):
168
174
  self.logger.debug("Initializing ZMQ Pipes to client")
169
175
  self.monitoring_hub_active = True
170
176
 
171
- comm_q: Queue[Union[Tuple[int, int], str]]
177
+ # This annotation is incompatible with typeguard 4.x instrumentation
178
+ # of local variables: Queue is not subscriptable at runtime, as far
179
+ # as typeguard is concerned. The more general Queue annotation works,
180
+ # but does not restrict the contents of the Queue. Using TYPE_CHECKING
181
+ # here allows the stricter definition to be seen by mypy, and the
182
+ # simpler definition to be seen by typeguard. Hopefully at some point
183
+ # in the future, Queue will allow runtime subscripts.
184
+
185
+ if TYPE_CHECKING:
186
+ comm_q: Queue[Union[Tuple[int, int], str]]
187
+ else:
188
+ comm_q: Queue
189
+
172
190
  comm_q = SizedQueue(maxsize=10)
173
191
 
174
192
  self.exception_q: Queue[Tuple[str, str]]
@@ -43,7 +43,7 @@ def monitor_wrapper(f: Any, # per app
43
43
  radio_mode,
44
44
  run_dir)
45
45
 
46
- if monitor_resources:
46
+ if monitor_resources and sleep_dur > 0:
47
47
  # create the monitor process and start
48
48
  pp = ForkProcess(target=monitor,
49
49
  args=(os.getpid(),
parsl/providers/errors.py CHANGED
@@ -51,20 +51,18 @@ class SubmitException(ExecutionProviderException):
51
51
  '''Raised by the submit() method of a provider if there is an error in launching a job.
52
52
  '''
53
53
 
54
- def __init__(self, job_name, message, stdout=None, stderr=None):
54
+ def __init__(self, job_name, message, stdout=None, stderr=None, retcode=None):
55
55
  self.job_name = job_name
56
56
  self.message = message
57
57
  self.stdout = stdout
58
58
  self.stderr = stderr
59
+ self.retcode = retcode
59
60
 
60
61
  @property
61
62
  def task_name(self) -> str:
62
63
  warnings.warn("task_name is deprecated; use .job_name instead. This will be removed after 2024-06.", DeprecationWarning)
63
64
  return self.job_name
64
65
 
65
- def __str__(self):
66
+ def __str__(self) -> str:
66
67
  # TODO: make this more user-friendly
67
- return "Cannot launch job {0}: {1}; stdout={2}, stderr={3}".format(self.job_name,
68
- self.message,
69
- self.stdout,
70
- self.stderr)
68
+ return f"Cannot launch job {self.job_name}: {self.message}; recode={self.retcode}, stdout={self.stdout}, stderr={self.stderr}"
@@ -13,6 +13,7 @@ from parsl.jobs.states import JobState, JobStatus
13
13
  from parsl.launchers import SingleNodeLauncher
14
14
  from parsl.launchers.base import Launcher
15
15
  from parsl.providers.cluster_provider import ClusterProvider
16
+ from parsl.providers.errors import SubmitException
16
17
  from parsl.providers.slurm.template import template_string
17
18
  from parsl.utils import RepresentationMixin, wtime_to_minutes
18
19
 
@@ -194,7 +195,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
194
195
  logger.debug("Updating missing job {} to completed status".format(missing_job))
195
196
  self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED)
196
197
 
197
- def submit(self, command, tasks_per_node, job_name="parsl.slurm"):
198
+ def submit(self, command: str, tasks_per_node: int, job_name="parsl.slurm") -> str:
198
199
  """Submit the command as a slurm job.
199
200
 
200
201
  Parameters
@@ -207,8 +208,8 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
207
208
  Name for the job
208
209
  Returns
209
210
  -------
210
- None or str
211
- If at capacity, returns None; otherwise, a string identifier for the job
211
+ job id : str
212
+ A string identifier for the job
212
213
  """
213
214
 
214
215
  scheduler_options = self.scheduler_options
@@ -254,21 +255,21 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
254
255
 
255
256
  retcode, stdout, stderr = self.execute_wait("sbatch {0}".format(channel_script_path))
256
257
 
257
- job_id = None
258
258
  if retcode == 0:
259
259
  for line in stdout.split('\n'):
260
260
  match = re.match(self.regex_job_id, line)
261
261
  if match:
262
262
  job_id = match.group("id")
263
263
  self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}
264
- break
264
+ return job_id
265
265
  else:
266
266
  logger.error("Could not read job ID from submit command standard output.")
267
267
  logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
268
+ raise SubmitException(job_name, "Could not read job ID from submit command standard output", stdout=stdout, stderr=stderr, retcode=retcode)
268
269
  else:
269
270
  logger.error("Submit command failed")
270
271
  logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
271
- return job_id
272
+ raise SubmitException(job_name, "Could not read job ID from submit command standard output", stdout=stdout, stderr=stderr, retcode=retcode)
272
273
 
273
274
  def cancel(self, job_ids):
274
275
  ''' Cancels the jobs specified by a list of job ids
@@ -18,6 +18,7 @@ config = Config(
18
18
  label='remote_htex',
19
19
  max_workers=2,
20
20
  worker_logdir_root=user_opts['adhoc']['script_dir'],
21
+ encrypted=True,
21
22
  provider=AdHocProvider(
22
23
  # Command to be run before starting a worker, such as:
23
24
  # 'module load Anaconda; source activate parsl_env'.
@@ -40,6 +40,7 @@ config = Config(
40
40
  storage_access=[HTTPInTaskStaging(), FTPInTaskStaging(), RSyncStaging(getpass.getuser() + "@" + user_opts['public_ip'])],
41
41
  label='azure_single_node',
42
42
  address=user_opts['public_ip'],
43
+ encrypted=True,
43
44
  provider=provider
44
45
  )
45
46
  ]
@@ -14,6 +14,7 @@ def fresh_config():
14
14
  cores_per_worker=1,
15
15
  worker_debug=False,
16
16
  max_workers=1,
17
+ encrypted=True,
17
18
  provider=TorqueProvider(
18
19
  queue='normal',
19
20
  launcher=AprunLauncher(overrides="-b -- bwpy-environ --"),
@@ -14,6 +14,7 @@ def fresh_config():
14
14
  # which compute nodes can communicate
15
15
  # address=address_by_interface('bond0.144'),
16
16
  max_workers=1,
17
+ encrypted=True,
17
18
  provider=SlurmProvider(
18
19
  user_opts['bridges']['partition'], # Partition / QOS
19
20
  nodes_per_block=2,
@@ -12,6 +12,7 @@ def fresh_config():
12
12
  HighThroughputExecutor(
13
13
  label='cc_in2p3_htex',
14
14
  max_workers=1,
15
+ encrypted=True,
15
16
  provider=GridEngineProvider(
16
17
  channel=LocalChannel(),
17
18
  nodes_per_block=2,
@@ -11,6 +11,7 @@ def fresh_config():
11
11
  HighThroughputExecutor(
12
12
  label='Comet_HTEX_multinode',
13
13
  max_workers=1,
14
+ encrypted=True,
14
15
  provider=SlurmProvider(
15
16
  'debug',
16
17
  launcher=SrunLauncher(),
@@ -18,6 +18,7 @@ config = Config(
18
18
  label="cooley_htex",
19
19
  worker_debug=False,
20
20
  cores_per_worker=1,
21
+ encrypted=True,
21
22
  provider=CobaltProvider(
22
23
  queue='debug',
23
24
  account=user_opts['cooley']['account'],
@@ -28,6 +28,7 @@ config = Config(
28
28
  HighThroughputExecutor(
29
29
  label='ec2_single_node',
30
30
  address=user_opts['public_ip'],
31
+ encrypted=True,
31
32
  provider=AWSProvider(
32
33
  user_opts['ec2']['image_id'],
33
34
  region=user_opts['ec2']['region'],
@@ -15,6 +15,7 @@ config = Config(
15
15
  HighThroughputExecutor(
16
16
  label='ec2_single_node',
17
17
  address=user_opts['public_ip'],
18
+ encrypted=True,
18
19
  provider=AWSProvider(
19
20
  user_opts['ec2']['image_id'],
20
21
  region=user_opts['ec2']['region'],
@@ -16,6 +16,7 @@ def fresh_config():
16
16
  HighThroughputExecutor(
17
17
  label="frontera_htex",
18
18
  max_workers=1,
19
+ encrypted=True,
19
20
  provider=SlurmProvider(
20
21
  cmd_timeout=60, # Add extra time for slow scheduler responses
21
22
  channel=LocalChannel(),
@@ -13,6 +13,7 @@ config = Config(
13
13
  cores_per_worker=1,
14
14
  worker_debug=False,
15
15
  address=user_opts['public_ip'],
16
+ encrypted=True,
16
17
  provider=AdHocProvider(
17
18
  move_files=False,
18
19
  parallelism=1,
@@ -13,6 +13,7 @@ def fresh_config():
13
13
  label="htex_local",
14
14
  worker_debug=True,
15
15
  cores_per_worker=1,
16
+ encrypted=True,
16
17
  provider=LocalProvider(
17
18
  channel=LocalChannel(),
18
19
  init_blocks=1,
@@ -48,6 +48,7 @@ def fresh_config():
48
48
  heartbeat_period=2,
49
49
  heartbeat_threshold=5,
50
50
  poll_period=100,
51
+ encrypted=True,
51
52
  provider=LocalProvider(
52
53
  channel=LocalChannel(),
53
54
  init_blocks=0,
@@ -15,6 +15,7 @@ config = Config(
15
15
  label="htex_Local",
16
16
  worker_debug=True,
17
17
  cores_per_worker=1,
18
+ encrypted=True,
18
19
  provider=LocalProvider(
19
20
  channel=LocalChannel(),
20
21
  init_blocks=1,
@@ -16,6 +16,7 @@ config = Config(
16
16
  worker_debug=True,
17
17
  cores_per_worker=1,
18
18
  working_dir="./rsync-workdir/",
19
+ encrypted=True,
19
20
  provider=LocalProvider(
20
21
  channel=LocalChannel(),
21
22
  init_blocks=1,
@@ -9,6 +9,7 @@ def fresh_config():
9
9
  executors=[
10
10
  HighThroughputExecutor(
11
11
  label='AdHoc',
12
+ encrypted=True,
12
13
  provider=AdHocProvider(
13
14
  channels=[LocalChannel(), LocalChannel()]
14
15
  )
@@ -13,6 +13,7 @@ def fresh_config():
13
13
  label='Midway_HTEX_multinode',
14
14
  worker_debug=False,
15
15
  max_workers=1,
16
+ encrypted=True,
16
17
  provider=SlurmProvider(
17
18
  'broadwl', # Partition name, e.g 'broadwl'
18
19
  launcher=SrunLauncher(),
@@ -17,6 +17,7 @@ def fresh_config():
17
17
  worker_debug=False,
18
18
  max_workers=1,
19
19
  address=address_by_interface('ib0'),
20
+ encrypted=True,
20
21
  provider=PBSProProvider(
21
22
  launcher=MpiRunLauncher(),
22
23
  # string to prepend to #PBS blocks in the submit
@@ -14,6 +14,7 @@ config = Config(
14
14
  HighThroughputExecutor(
15
15
  label='OSG_HTEX',
16
16
  max_workers=1,
17
+ encrypted=True,
17
18
  provider=CondorProvider(
18
19
  nodes_per_block=1,
19
20
  init_blocks=4,
@@ -23,6 +23,7 @@ def fresh_config():
23
23
 
24
24
  # Address for the pod worker to connect back
25
25
  address=address_by_route(),
26
+ encrypted=True,
26
27
  provider=KubernetesProvider(
27
28
  namespace="dlhub-privileged",
28
29
 
@@ -21,6 +21,7 @@ def fresh_config():
21
21
  # address=address_by_interface('ib0'), # This assumes Parsl is running on login node
22
22
  worker_port_range=(50000, 55000),
23
23
  max_workers=1,
24
+ encrypted=True,
24
25
  provider=LSFProvider(
25
26
  launcher=JsrunLauncher(),
26
27
  walltime="00:10:00",
@@ -24,6 +24,7 @@ config = Config(
24
24
  executors=[
25
25
  HighThroughputExecutor(
26
26
  label='swan_htex',
27
+ encrypted=True,
27
28
  provider=TorqueProvider(
28
29
  channel=SSHChannel(
29
30
  hostname='swan.cray.com',
@@ -12,6 +12,7 @@ def fresh_config():
12
12
  HighThroughputExecutor(
13
13
  label='theta_local_htex_multinode',
14
14
  max_workers=1,
15
+ encrypted=True,
15
16
  provider=CobaltProvider(
16
17
  queue=user_opts['theta']['queue'],
17
18
  account=user_opts['theta']['account'],