parsl 2024.2.19__py3-none-any.whl → 2024.2.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/channels/errors.py +1 -4
- parsl/configs/{comet.py → expanse.py} +5 -5
- parsl/dataflow/dflow.py +12 -12
- parsl/executors/high_throughput/executor.py +19 -2
- parsl/jobs/states.py +5 -5
- parsl/monitoring/monitoring.py +7 -4
- parsl/multiprocessing.py +3 -4
- parsl/providers/cobalt/cobalt.py +6 -0
- parsl/providers/pbspro/pbspro.py +18 -4
- parsl/providers/pbspro/template.py +2 -2
- parsl/providers/slurm/slurm.py +17 -4
- parsl/providers/slurm/template.py +2 -2
- parsl/tests/test_htex/test_htex.py +66 -3
- parsl/tests/test_mpi_apps/test_mpi_scheduler.py +1 -1
- parsl/tests/test_providers/test_cobalt_deprecation_warning.py +16 -0
- parsl/tests/test_providers/test_pbspro_template.py +28 -0
- parsl/tests/test_providers/test_slurm_template.py +29 -0
- parsl/tests/test_radical/test_mpi_funcs.py +1 -0
- parsl/tests/test_serialization/test_htex_code_cache.py +57 -0
- parsl/usage_tracking/usage.py +12 -9
- parsl/version.py +1 -1
- {parsl-2024.2.19.dist-info → parsl-2024.2.26.dist-info}/METADATA +2 -2
- {parsl-2024.2.19.dist-info → parsl-2024.2.26.dist-info}/RECORD +30 -28
- parsl/configs/cooley.py +0 -29
- parsl/configs/theta.py +0 -33
- {parsl-2024.2.19.data → parsl-2024.2.26.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2024.2.19.data → parsl-2024.2.26.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2024.2.19.data → parsl-2024.2.26.data}/scripts/process_worker_pool.py +0 -0
- {parsl-2024.2.19.dist-info → parsl-2024.2.26.dist-info}/LICENSE +0 -0
- {parsl-2024.2.19.dist-info → parsl-2024.2.26.dist-info}/WHEEL +0 -0
- {parsl-2024.2.19.dist-info → parsl-2024.2.26.dist-info}/entry_points.txt +0 -0
- {parsl-2024.2.19.dist-info → parsl-2024.2.26.dist-info}/top_level.txt +0 -0
parsl/channels/errors.py
CHANGED
@@ -14,11 +14,8 @@ class ChannelError(ParslError):
|
|
14
14
|
self.e = e
|
15
15
|
self.hostname = hostname
|
16
16
|
|
17
|
-
def __repr__(self) -> str:
|
18
|
-
return "Hostname:{0}, Reason:{1}".format(self.hostname, self.reason)
|
19
|
-
|
20
17
|
def __str__(self) -> str:
|
21
|
-
return self.
|
18
|
+
return "Hostname:{0}, Reason:{1}".format(self.hostname, self.reason)
|
22
19
|
|
23
20
|
|
24
21
|
class BadHostKeyException(ChannelError):
|
@@ -7,11 +7,11 @@ from parsl.executors import HighThroughputExecutor
|
|
7
7
|
config = Config(
|
8
8
|
executors=[
|
9
9
|
HighThroughputExecutor(
|
10
|
-
label='
|
11
|
-
|
12
|
-
max_workers=2,
|
10
|
+
label='Expanse_CPU_Multinode',
|
11
|
+
max_workers=32,
|
13
12
|
provider=SlurmProvider(
|
14
|
-
'
|
13
|
+
'compute',
|
14
|
+
account='YOUR_ALLOCATION_ON_EXPANSE',
|
15
15
|
launcher=SrunLauncher(),
|
16
16
|
# string to prepend to #SBATCH blocks in the submit
|
17
17
|
# script to the scheduler
|
@@ -19,7 +19,7 @@ config = Config(
|
|
19
19
|
# Command to be run before starting a worker, such as:
|
20
20
|
# 'module load Anaconda; source activate parsl_env'.
|
21
21
|
worker_init='',
|
22
|
-
walltime='00:
|
22
|
+
walltime='01:00:00',
|
23
23
|
init_blocks=1,
|
24
24
|
max_blocks=1,
|
25
25
|
nodes_per_block=2,
|
parsl/dataflow/dflow.py
CHANGED
@@ -113,7 +113,7 @@ class DataFlowKernel:
|
|
113
113
|
if self.monitoring.logdir is None:
|
114
114
|
self.monitoring.logdir = self.run_dir
|
115
115
|
self.hub_address = self.monitoring.hub_address
|
116
|
-
self.hub_interchange_port = self.monitoring.start(self.run_id, self.run_dir)
|
116
|
+
self.hub_interchange_port = self.monitoring.start(self.run_id, self.run_dir, self.config.run_dir)
|
117
117
|
|
118
118
|
self.time_began = datetime.datetime.now()
|
119
119
|
self.time_completed: Optional[datetime.datetime] = None
|
@@ -678,10 +678,10 @@ class DataFlowKernel:
|
|
678
678
|
task_record : The task record
|
679
679
|
|
680
680
|
Returns:
|
681
|
-
Future that tracks the execution of the submitted
|
681
|
+
Future that tracks the execution of the submitted function
|
682
682
|
"""
|
683
683
|
task_id = task_record['id']
|
684
|
-
|
684
|
+
function = task_record['func']
|
685
685
|
args = task_record['args']
|
686
686
|
kwargs = task_record['kwargs']
|
687
687
|
|
@@ -706,17 +706,17 @@ class DataFlowKernel:
|
|
706
706
|
|
707
707
|
if self.monitoring is not None and self.monitoring.resource_monitoring_enabled:
|
708
708
|
wrapper_logging_level = logging.DEBUG if self.monitoring.monitoring_debug else logging.INFO
|
709
|
-
(
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
709
|
+
(function, args, kwargs) = self.monitoring.monitor_wrapper(function, args, kwargs, try_id, task_id,
|
710
|
+
self.monitoring.monitoring_hub_url,
|
711
|
+
self.run_id,
|
712
|
+
wrapper_logging_level,
|
713
|
+
self.monitoring.resource_monitoring_interval,
|
714
|
+
executor.radio_mode,
|
715
|
+
executor.monitor_resources(),
|
716
|
+
self.run_dir)
|
717
717
|
|
718
718
|
with self.submitter_lock:
|
719
|
-
exec_fu = executor.submit(
|
719
|
+
exec_fu = executor.submit(function, task_record['resource_specification'], *args, **kwargs)
|
720
720
|
self.update_task_state(task_record, States.launched)
|
721
721
|
|
722
722
|
self._send_task_log_info(task_record)
|
@@ -6,7 +6,7 @@ import threading
|
|
6
6
|
import queue
|
7
7
|
import datetime
|
8
8
|
import pickle
|
9
|
-
from multiprocessing import Queue
|
9
|
+
from multiprocessing import Process, Queue
|
10
10
|
from typing import Dict, Sequence
|
11
11
|
from typing import List, Optional, Tuple, Union, Callable
|
12
12
|
import math
|
@@ -290,6 +290,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
|
|
290
290
|
self.hub_port = None # set to the correct hub port in dfk
|
291
291
|
self.worker_ports = worker_ports
|
292
292
|
self.worker_port_range = worker_port_range
|
293
|
+
self.interchange_proc: Optional[Process] = None
|
293
294
|
self.interchange_port_range = interchange_port_range
|
294
295
|
self.heartbeat_threshold = heartbeat_threshold
|
295
296
|
self.heartbeat_period = heartbeat_period
|
@@ -766,12 +767,28 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
|
|
766
767
|
)
|
767
768
|
return job_status
|
768
769
|
|
769
|
-
def shutdown(self):
|
770
|
+
def shutdown(self, timeout: float = 10.0):
|
770
771
|
"""Shutdown the executor, including the interchange. This does not
|
771
772
|
shut down any workers directly - workers should be terminated by the
|
772
773
|
scaling mechanism or by heartbeat timeout.
|
774
|
+
|
775
|
+
Parameters
|
776
|
+
----------
|
777
|
+
|
778
|
+
timeout : float
|
779
|
+
Amount of time to wait for the Interchange process to terminate before
|
780
|
+
we forcefully kill it.
|
773
781
|
"""
|
782
|
+
if self.interchange_proc is None:
|
783
|
+
logger.info("HighThroughputExecutor has not started; skipping shutdown")
|
784
|
+
return
|
774
785
|
|
775
786
|
logger.info("Attempting HighThroughputExecutor shutdown")
|
787
|
+
|
776
788
|
self.interchange_proc.terminate()
|
789
|
+
self.interchange_proc.join(timeout=timeout)
|
790
|
+
if self.interchange_proc.is_alive():
|
791
|
+
logger.info("Unable to terminate Interchange process; sending SIGKILL")
|
792
|
+
self.interchange_proc.kill()
|
793
|
+
|
777
794
|
logger.info("Finished HighThroughputExecutor shutdown attempt")
|
parsl/jobs/states.py
CHANGED
@@ -47,7 +47,7 @@ class JobState(IntEnum):
|
|
47
47
|
"""
|
48
48
|
|
49
49
|
def __str__(self) -> str:
|
50
|
-
return self.__class__.__name__
|
50
|
+
return f"{self.__class__.__name__}.{self.name}"
|
51
51
|
|
52
52
|
|
53
53
|
TERMINAL_STATES = [JobState.CANCELLED, JobState.COMPLETED, JobState.FAILED,
|
@@ -84,16 +84,16 @@ class JobStatus:
|
|
84
84
|
|
85
85
|
def __repr__(self) -> str:
|
86
86
|
if self.message is not None:
|
87
|
-
extra = f"state={self.state} message={self.message}"
|
87
|
+
extra = f"state={self.state} message={self.message}"
|
88
88
|
else:
|
89
|
-
extra = f"state={self.state}"
|
89
|
+
extra = f"state={self.state}"
|
90
90
|
return f"<{type(self).__module__}.{type(self).__qualname__} object at {hex(id(self))}, {extra}>"
|
91
91
|
|
92
92
|
def __str__(self) -> str:
|
93
93
|
if self.message is not None:
|
94
|
-
return "{} ({
|
94
|
+
return f"{self.state} ({self.message})"
|
95
95
|
else:
|
96
|
-
return "{
|
96
|
+
return f"{self.state}"
|
97
97
|
|
98
98
|
@property
|
99
99
|
def stdout(self) -> Optional[str]:
|
parsl/monitoring/monitoring.py
CHANGED
@@ -84,7 +84,7 @@ class MonitoringHub(RepresentationMixin):
|
|
84
84
|
|
85
85
|
workflow_name: Optional[str] = None,
|
86
86
|
workflow_version: Optional[str] = None,
|
87
|
-
logging_endpoint: str =
|
87
|
+
logging_endpoint: Optional[str] = None,
|
88
88
|
logdir: Optional[str] = None,
|
89
89
|
monitoring_debug: bool = False,
|
90
90
|
resource_monitoring_enabled: bool = True,
|
@@ -118,7 +118,7 @@ class MonitoringHub(RepresentationMixin):
|
|
118
118
|
logging_endpoint : str
|
119
119
|
The database connection url for monitoring to log the information.
|
120
120
|
These URLs follow RFC-1738, and can include username, password, hostname, database name.
|
121
|
-
Default:
|
121
|
+
Default: sqlite, in the configured run_dir.
|
122
122
|
logdir : str
|
123
123
|
Parsl log directory paths. Logs and temp files go here. Default: '.'
|
124
124
|
monitoring_debug : Bool
|
@@ -162,11 +162,14 @@ class MonitoringHub(RepresentationMixin):
|
|
162
162
|
self.resource_monitoring_enabled = resource_monitoring_enabled
|
163
163
|
self.resource_monitoring_interval = resource_monitoring_interval
|
164
164
|
|
165
|
-
def start(self, run_id: str,
|
165
|
+
def start(self, run_id: str, dfk_run_dir: str, config_run_dir: Union[str, os.PathLike]) -> int:
|
166
166
|
|
167
167
|
if self.logdir is None:
|
168
168
|
self.logdir = "."
|
169
169
|
|
170
|
+
if self.logging_endpoint is None:
|
171
|
+
self.logging_endpoint = f"sqlite:///{os.fspath(config_run_dir)}/monitoring.db"
|
172
|
+
|
170
173
|
os.makedirs(self.logdir, exist_ok=True)
|
171
174
|
|
172
175
|
# Initialize the ZMQ pipe to the Parsl Client
|
@@ -231,7 +234,7 @@ class MonitoringHub(RepresentationMixin):
|
|
231
234
|
self.logger.info("Started the router process {} and DBM process {}".format(self.router_proc.pid, self.dbm_proc.pid))
|
232
235
|
|
233
236
|
self.filesystem_proc = Process(target=filesystem_receiver,
|
234
|
-
args=(self.logdir, self.resource_msgs,
|
237
|
+
args=(self.logdir, self.resource_msgs, dfk_run_dir),
|
235
238
|
name="Monitoring-Filesystem-Process",
|
236
239
|
daemon=True
|
237
240
|
)
|
parsl/multiprocessing.py
CHANGED
@@ -5,17 +5,16 @@ import logging
|
|
5
5
|
import multiprocessing
|
6
6
|
import multiprocessing.queues
|
7
7
|
import platform
|
8
|
+
from multiprocessing.context import ForkProcess as ForkProcessType
|
8
9
|
|
9
|
-
from typing import Callable
|
10
|
+
from typing import Callable
|
10
11
|
|
11
12
|
logger = logging.getLogger(__name__)
|
12
13
|
|
13
14
|
ForkContext = multiprocessing.get_context("fork")
|
14
15
|
SpawnContext = multiprocessing.get_context("spawn")
|
15
16
|
|
16
|
-
|
17
|
-
# it clear that it returns a Process always to the type checker?
|
18
|
-
ForkProcess: Type = ForkContext.Process
|
17
|
+
ForkProcess: Callable[..., ForkProcessType] = ForkContext.Process
|
19
18
|
|
20
19
|
|
21
20
|
class MacSafeQueue(multiprocessing.queues.Queue):
|
parsl/providers/cobalt/cobalt.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
import logging
|
2
2
|
import os
|
3
3
|
import time
|
4
|
+
import warnings
|
4
5
|
|
5
6
|
from parsl.providers.errors import ScaleOutFailed
|
6
7
|
from parsl.channels import LocalChannel
|
@@ -24,6 +25,8 @@ translate_table = {
|
|
24
25
|
class CobaltProvider(ClusterProvider, RepresentationMixin):
|
25
26
|
""" Cobalt Execution Provider
|
26
27
|
|
28
|
+
WARNING: CobaltProvider is deprecated and will be removed by 2024.04
|
29
|
+
|
27
30
|
This provider uses cobalt to submit (qsub), obtain the status of (qstat), and cancel (qdel)
|
28
31
|
jobs. Theo script to be used is created from a template file in this
|
29
32
|
same module.
|
@@ -86,6 +89,9 @@ class CobaltProvider(ClusterProvider, RepresentationMixin):
|
|
86
89
|
self.queue = queue
|
87
90
|
self.scheduler_options = scheduler_options
|
88
91
|
self.worker_init = worker_init
|
92
|
+
warnings.warn("CobaltProvider is deprecated; This will be removed after 2024-04",
|
93
|
+
DeprecationWarning,
|
94
|
+
stacklevel=2)
|
89
95
|
|
90
96
|
def _status(self):
|
91
97
|
"""Returns the status list for a list of job_ids
|
parsl/providers/pbspro/pbspro.py
CHANGED
@@ -119,13 +119,17 @@ class PBSProProvider(TorqueProvider):
|
|
119
119
|
|
120
120
|
job_state = job.get('job_state', JobState.UNKNOWN)
|
121
121
|
state = translate_table.get(job_state, JobState.UNKNOWN)
|
122
|
-
self.resources[job_id]['status'] = JobStatus(state
|
122
|
+
self.resources[job_id]['status'] = JobStatus(state,
|
123
|
+
stdout_path=self.resources[job_id]['job_stdout_path'],
|
124
|
+
stderr_path=self.resources[job_id]['job_stderr_path'])
|
123
125
|
jobs_missing.remove(job_id)
|
124
126
|
|
125
127
|
# squeue does not report on jobs that are not running. So we are filling in the
|
126
128
|
# blanks for missing jobs, we might lose some information about why the jobs failed.
|
127
129
|
for missing_job in jobs_missing:
|
128
|
-
self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED
|
130
|
+
self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED,
|
131
|
+
stdout_path=self.resources[missing_job]['job_stdout_path'],
|
132
|
+
stderr_path=self.resources[missing_job]['job_stderr_path'])
|
129
133
|
|
130
134
|
def submit(self, command, tasks_per_node, job_name="parsl"):
|
131
135
|
"""Submits the command job.
|
@@ -149,7 +153,11 @@ class PBSProProvider(TorqueProvider):
|
|
149
153
|
|
150
154
|
job_name = "{0}.{1}".format(job_name, time.time())
|
151
155
|
|
152
|
-
|
156
|
+
assert self.script_dir, "Expected script_dir to be set"
|
157
|
+
script_path = os.path.join(self.script_dir, job_name)
|
158
|
+
script_path = os.path.abspath(script_path)
|
159
|
+
job_stdout_path = script_path + ".stdout"
|
160
|
+
job_stderr_path = script_path + ".stderr"
|
153
161
|
|
154
162
|
logger.debug("Requesting {} nodes_per_block, {} tasks_per_node".format(
|
155
163
|
self.nodes_per_block, tasks_per_node)
|
@@ -163,6 +171,8 @@ class PBSProProvider(TorqueProvider):
|
|
163
171
|
job_config["scheduler_options"] = self.scheduler_options
|
164
172
|
job_config["worker_init"] = self.worker_init
|
165
173
|
job_config["user_script"] = command
|
174
|
+
job_config["job_stdout_path"] = job_stdout_path
|
175
|
+
job_config["job_stderr_path"] = job_stderr_path
|
166
176
|
|
167
177
|
# Add a colon to select_options if one isn't included
|
168
178
|
if self.select_options and not self.select_options.startswith(":"):
|
@@ -194,7 +204,11 @@ class PBSProProvider(TorqueProvider):
|
|
194
204
|
for line in stdout.split('\n'):
|
195
205
|
if line.strip():
|
196
206
|
job_id = line.strip()
|
197
|
-
self.resources[job_id] = {'job_id': job_id,
|
207
|
+
self.resources[job_id] = {'job_id': job_id,
|
208
|
+
'status': JobStatus(JobState.PENDING),
|
209
|
+
'job_stdout_path': job_stdout_path,
|
210
|
+
'job_stderr_path': job_stderr_path,
|
211
|
+
}
|
198
212
|
else:
|
199
213
|
message = "Command '{}' failed with return code {}".format(launch_cmd, retcode)
|
200
214
|
if (stdout is not None) and (stderr is not None):
|
@@ -5,8 +5,8 @@ template_string = '''#!/bin/bash
|
|
5
5
|
#PBS -m n
|
6
6
|
#PBS -l walltime=$walltime
|
7
7
|
#PBS -l select=${nodes_per_block}:ncpus=${ncpus}${select_options}
|
8
|
-
#PBS -o ${
|
9
|
-
#PBS -e ${
|
8
|
+
#PBS -o ${job_stdout_path}
|
9
|
+
#PBS -e ${job_stderr_path}
|
10
10
|
${scheduler_options}
|
11
11
|
|
12
12
|
${worker_init}
|
parsl/providers/slurm/slurm.py
CHANGED
@@ -188,14 +188,18 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
188
188
|
logger.warning(f"Slurm status {slurm_state} is not recognized")
|
189
189
|
status = translate_table.get(slurm_state, JobState.UNKNOWN)
|
190
190
|
logger.debug("Updating job {} with slurm status {} to parsl state {!s}".format(job_id, slurm_state, status))
|
191
|
-
self.resources[job_id]['status'] = JobStatus(status
|
191
|
+
self.resources[job_id]['status'] = JobStatus(status,
|
192
|
+
stdout_path=self.resources[job_id]['job_stdout_path'],
|
193
|
+
stderr_path=self.resources[job_id]['job_stderr_path'])
|
192
194
|
jobs_missing.remove(job_id)
|
193
195
|
|
194
196
|
# squeue does not report on jobs that are not running. So we are filling in the
|
195
197
|
# blanks for missing jobs, we might lose some information about why the jobs failed.
|
196
198
|
for missing_job in jobs_missing:
|
197
199
|
logger.debug("Updating missing job {} to completed status".format(missing_job))
|
198
|
-
self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED
|
200
|
+
self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED,
|
201
|
+
stdout_path=self.resources[missing_job]['job_stdout_path'],
|
202
|
+
stderr_path=self.resources[missing_job]['job_stderr_path'])
|
199
203
|
|
200
204
|
def submit(self, command: str, tasks_per_node: int, job_name="parsl.slurm") -> str:
|
201
205
|
"""Submit the command as a slurm job.
|
@@ -226,8 +230,11 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
226
230
|
|
227
231
|
job_name = "{0}.{1}".format(job_name, time.time())
|
228
232
|
|
229
|
-
|
233
|
+
assert self.script_dir, "Expected script_dir to be set"
|
234
|
+
script_path = os.path.join(self.script_dir, job_name)
|
230
235
|
script_path = os.path.abspath(script_path)
|
236
|
+
job_stdout_path = script_path + ".stdout"
|
237
|
+
job_stderr_path = script_path + ".stderr"
|
231
238
|
|
232
239
|
logger.debug("Requesting one block with {} nodes".format(self.nodes_per_block))
|
233
240
|
|
@@ -239,6 +246,8 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
239
246
|
job_config["scheduler_options"] = scheduler_options
|
240
247
|
job_config["worker_init"] = worker_init
|
241
248
|
job_config["user_script"] = command
|
249
|
+
job_config["job_stdout_path"] = job_stdout_path
|
250
|
+
job_config["job_stderr_path"] = job_stderr_path
|
242
251
|
|
243
252
|
# Wrap the command
|
244
253
|
job_config["user_script"] = self.launcher(command,
|
@@ -262,7 +271,11 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
262
271
|
match = re.match(self.regex_job_id, line)
|
263
272
|
if match:
|
264
273
|
job_id = match.group("id")
|
265
|
-
self.resources[job_id] = {'job_id': job_id,
|
274
|
+
self.resources[job_id] = {'job_id': job_id,
|
275
|
+
'status': JobStatus(JobState.PENDING),
|
276
|
+
'job_stdout_path': job_stdout_path,
|
277
|
+
'job_stderr_path': job_stderr_path,
|
278
|
+
}
|
266
279
|
return job_id
|
267
280
|
else:
|
268
281
|
logger.error("Could not read job ID from submit command standard output.")
|
@@ -1,8 +1,8 @@
|
|
1
1
|
template_string = '''#!/bin/bash
|
2
2
|
|
3
3
|
#SBATCH --job-name=${jobname}
|
4
|
-
#SBATCH --output=${
|
5
|
-
#SBATCH --error=${
|
4
|
+
#SBATCH --output=${job_stdout_path}
|
5
|
+
#SBATCH --error=${job_stderr_path}
|
6
6
|
#SBATCH --nodes=${nodes}
|
7
7
|
#SBATCH --time=${walltime}
|
8
8
|
#SBATCH --ntasks-per-node=${tasks_per_node}
|
@@ -1,18 +1,40 @@
|
|
1
1
|
import pathlib
|
2
|
+
from unittest import mock
|
2
3
|
|
3
4
|
import pytest
|
4
5
|
|
5
6
|
from parsl import curvezmq
|
6
7
|
from parsl import HighThroughputExecutor
|
8
|
+
from parsl.multiprocessing import ForkProcess
|
9
|
+
|
10
|
+
_MOCK_BASE = "parsl.executors.high_throughput.executor"
|
11
|
+
|
12
|
+
|
13
|
+
@pytest.fixture
|
14
|
+
def encrypted(request: pytest.FixtureRequest):
|
15
|
+
if hasattr(request, "param"):
|
16
|
+
return request.param
|
17
|
+
return True
|
18
|
+
|
19
|
+
|
20
|
+
@pytest.fixture
|
21
|
+
def htex(encrypted: bool):
|
22
|
+
htex = HighThroughputExecutor(encrypted=encrypted)
|
23
|
+
|
24
|
+
yield htex
|
25
|
+
|
26
|
+
htex.shutdown()
|
7
27
|
|
8
28
|
|
9
29
|
@pytest.mark.local
|
10
|
-
@pytest.mark.parametrize("encrypted", (True, False))
|
30
|
+
@pytest.mark.parametrize("encrypted", (True, False), indirect=True)
|
11
31
|
@pytest.mark.parametrize("cert_dir_provided", (True, False))
|
12
32
|
def test_htex_start_encrypted(
|
13
|
-
encrypted: bool,
|
33
|
+
encrypted: bool,
|
34
|
+
cert_dir_provided: bool,
|
35
|
+
htex: HighThroughputExecutor,
|
36
|
+
tmpd_cwd: pathlib.Path,
|
14
37
|
):
|
15
|
-
htex = HighThroughputExecutor(encrypted=encrypted)
|
16
38
|
htex.run_dir = str(tmpd_cwd)
|
17
39
|
if cert_dir_provided:
|
18
40
|
provided_base_dir = tmpd_cwd / "provided"
|
@@ -44,3 +66,44 @@ def test_htex_start_encrypted(
|
|
44
66
|
assert htex.outgoing_q.zmq_context.cert_dir is None
|
45
67
|
assert htex.incoming_q.zmq_context.cert_dir is None
|
46
68
|
assert htex.command_client.zmq_context.cert_dir is None
|
69
|
+
|
70
|
+
|
71
|
+
@pytest.mark.local
|
72
|
+
@pytest.mark.parametrize("started", (True, False))
|
73
|
+
@pytest.mark.parametrize("timeout_expires", (True, False))
|
74
|
+
@mock.patch(f"{_MOCK_BASE}.logger")
|
75
|
+
def test_htex_shutdown(
|
76
|
+
mock_logger: mock.MagicMock,
|
77
|
+
started: bool,
|
78
|
+
timeout_expires: bool,
|
79
|
+
htex: HighThroughputExecutor,
|
80
|
+
):
|
81
|
+
mock_ix_proc = mock.Mock(spec=ForkProcess)
|
82
|
+
|
83
|
+
if started:
|
84
|
+
htex.interchange_proc = mock_ix_proc
|
85
|
+
mock_ix_proc.is_alive.return_value = True
|
86
|
+
|
87
|
+
if not timeout_expires:
|
88
|
+
# Simulate termination of the Interchange process
|
89
|
+
def kill_interchange(*args, **kwargs):
|
90
|
+
mock_ix_proc.is_alive.return_value = False
|
91
|
+
|
92
|
+
mock_ix_proc.terminate.side_effect = kill_interchange
|
93
|
+
|
94
|
+
htex.shutdown()
|
95
|
+
|
96
|
+
mock_logs = mock_logger.info.call_args_list
|
97
|
+
if started:
|
98
|
+
assert mock_ix_proc.terminate.called
|
99
|
+
assert mock_ix_proc.join.called
|
100
|
+
assert {"timeout": 10} == mock_ix_proc.join.call_args[1]
|
101
|
+
if timeout_expires:
|
102
|
+
assert "Unable to terminate Interchange" in mock_logs[1][0][0]
|
103
|
+
assert mock_ix_proc.kill.called
|
104
|
+
assert "Attempting" in mock_logs[0][0][0]
|
105
|
+
assert "Finished" in mock_logs[-1][0][0]
|
106
|
+
else:
|
107
|
+
assert not mock_ix_proc.terminate.called
|
108
|
+
assert not mock_ix_proc.join.called
|
109
|
+
assert "has not started" in mock_logs[0][0][0]
|
@@ -0,0 +1,16 @@
|
|
1
|
+
import warnings
|
2
|
+
import pytest
|
3
|
+
from parsl.providers import CobaltProvider
|
4
|
+
|
5
|
+
|
6
|
+
@pytest.mark.local
|
7
|
+
def test_deprecation_warning():
|
8
|
+
|
9
|
+
with warnings.catch_warnings(record=True) as w:
|
10
|
+
warnings.simplefilter("always")
|
11
|
+
|
12
|
+
CobaltProvider()
|
13
|
+
|
14
|
+
assert len(w) == 1
|
15
|
+
assert issubclass(w[-1].category, DeprecationWarning)
|
16
|
+
assert "CobaltProvider" in str(w[-1].message)
|
@@ -0,0 +1,28 @@
|
|
1
|
+
import random
|
2
|
+
|
3
|
+
from unittest import mock
|
4
|
+
import pytest
|
5
|
+
|
6
|
+
from parsl.channels import LocalChannel
|
7
|
+
from parsl.providers import PBSProProvider
|
8
|
+
|
9
|
+
|
10
|
+
@pytest.mark.local
|
11
|
+
def test_submit_script_basic(tmp_path):
|
12
|
+
"""Test slurm resources table"""
|
13
|
+
|
14
|
+
provider = PBSProProvider(
|
15
|
+
queue="debug", channel=LocalChannel(script_dir=tmp_path)
|
16
|
+
)
|
17
|
+
provider.script_dir = tmp_path
|
18
|
+
job_id = str(random.randint(55000, 59000))
|
19
|
+
provider.execute_wait = mock.Mock(spec=PBSProProvider.execute_wait)
|
20
|
+
provider.execute_wait.return_value = (0, job_id, "")
|
21
|
+
result_job_id = provider.submit("test", tasks_per_node=1)
|
22
|
+
assert job_id == result_job_id
|
23
|
+
provider.execute_wait.assert_called()
|
24
|
+
assert job_id in provider.resources
|
25
|
+
|
26
|
+
job_info = provider.resources[job_id]
|
27
|
+
assert "job_stdout_path" in job_info
|
28
|
+
assert "job_stderr_path" in job_info
|
@@ -0,0 +1,29 @@
|
|
1
|
+
import logging
|
2
|
+
import random
|
3
|
+
|
4
|
+
from unittest import mock
|
5
|
+
import pytest
|
6
|
+
|
7
|
+
from parsl.channels import LocalChannel
|
8
|
+
from parsl.providers import SlurmProvider
|
9
|
+
|
10
|
+
|
11
|
+
@pytest.mark.local
|
12
|
+
def test_submit_script_basic(tmp_path):
|
13
|
+
"""Test slurm resources table"""
|
14
|
+
|
15
|
+
provider = SlurmProvider(
|
16
|
+
partition="debug", channel=LocalChannel(script_dir=tmp_path)
|
17
|
+
)
|
18
|
+
provider.script_dir = tmp_path
|
19
|
+
job_id = str(random.randint(55000, 59000))
|
20
|
+
provider.execute_wait = mock.MagicMock(spec=SlurmProvider.execute_wait)
|
21
|
+
provider.execute_wait.return_value = (0, f"Submitted batch job {job_id}", "")
|
22
|
+
result_job_id = provider.submit("test", tasks_per_node=1)
|
23
|
+
assert job_id == result_job_id
|
24
|
+
provider.execute_wait.assert_called()
|
25
|
+
assert job_id in provider.resources
|
26
|
+
|
27
|
+
job_info = provider.resources[job_id]
|
28
|
+
assert "job_stdout_path" in job_info
|
29
|
+
assert "job_stderr_path" in job_info
|
@@ -0,0 +1,57 @@
|
|
1
|
+
import parsl
|
2
|
+
import pytest
|
3
|
+
|
4
|
+
from typing import Any
|
5
|
+
|
6
|
+
from parsl.serialize.facade import methods_for_code
|
7
|
+
|
8
|
+
from parsl.tests.configs.htex_local import fresh_config as local_config
|
9
|
+
|
10
|
+
|
11
|
+
@parsl.python_app
|
12
|
+
def f(x):
|
13
|
+
return x + 1
|
14
|
+
|
15
|
+
|
16
|
+
@pytest.mark.local
|
17
|
+
def test_caching() -> None:
|
18
|
+
# for future serializer devs: if this is failing because you added another
|
19
|
+
# code serializer, you'll also probably need to re-think what is being tested
|
20
|
+
# about serialization caching here.
|
21
|
+
assert len(methods_for_code) == 1
|
22
|
+
|
23
|
+
serializer = methods_for_code[b'C2']
|
24
|
+
|
25
|
+
# force type to Any here because a serializer method coming from
|
26
|
+
# methods_for_code doesn't statically have any cache management
|
27
|
+
# methods on itself such as cache_clear or cache_info.
|
28
|
+
serialize_method: Any = serializer.serialize
|
29
|
+
|
30
|
+
serialize_method.cache_clear()
|
31
|
+
|
32
|
+
assert serialize_method.cache_info().hits == 0
|
33
|
+
assert serialize_method.cache_info().misses == 0
|
34
|
+
assert serialize_method.cache_info().currsize == 0
|
35
|
+
|
36
|
+
assert f(7).result() == 8
|
37
|
+
|
38
|
+
# the code serializer cache should now contain only a (probably wrapped) f ...
|
39
|
+
assert serialize_method.cache_info().currsize == 1
|
40
|
+
|
41
|
+
# ... which was not already in the cache.
|
42
|
+
assert serialize_method.cache_info().misses == 1
|
43
|
+
assert serialize_method.cache_info().hits == 0
|
44
|
+
|
45
|
+
assert f(100).result() == 101
|
46
|
+
|
47
|
+
# this time round, we should have got a single cache hit...
|
48
|
+
assert serialize_method.cache_info().hits == 1
|
49
|
+
assert serialize_method.cache_info().misses == 1
|
50
|
+
assert serialize_method.cache_info().currsize == 1
|
51
|
+
|
52
|
+
assert f(200).result() == 201
|
53
|
+
|
54
|
+
# this time round, we should have got another single cache hit...
|
55
|
+
assert serialize_method.cache_info().hits == 2
|
56
|
+
assert serialize_method.cache_info().misses == 1
|
57
|
+
assert serialize_method.cache_info().currsize == 1
|
parsl/usage_tracking/usage.py
CHANGED
@@ -138,9 +138,6 @@ class UsageTracker:
|
|
138
138
|
- Message dict dumped as json string, ready for UDP
|
139
139
|
"""
|
140
140
|
message = {'uuid': self.uuid,
|
141
|
-
'test': False, # this field previously indicated if parsl
|
142
|
-
# was being run in test mode, and is
|
143
|
-
# retained for protocol compatibility
|
144
141
|
'parsl_v': self.parsl_version,
|
145
142
|
'python_v': self.python_version,
|
146
143
|
'os': platform.system(),
|
@@ -165,9 +162,7 @@ class UsageTracker:
|
|
165
162
|
'end': time.time(),
|
166
163
|
't_apps': app_count,
|
167
164
|
'sites': site_count,
|
168
|
-
'
|
169
|
-
'failed': app_fails,
|
170
|
-
'test': False, # see comment in construct_start_message
|
165
|
+
'failed': app_fails
|
171
166
|
}
|
172
167
|
|
173
168
|
return json.dumps(message)
|
@@ -192,7 +187,15 @@ class UsageTracker:
|
|
192
187
|
|
193
188
|
self.send_UDP_message(message)
|
194
189
|
|
195
|
-
def close(self) -> None:
|
196
|
-
"""
|
190
|
+
def close(self, timeout: float = 10.0) -> None:
|
191
|
+
"""First give each process one timeout period to finish what it is
|
192
|
+
doing, then kill it (SIGKILL). There's no softer SIGTERM step,
|
193
|
+
because that adds one join period of delay for what is almost
|
194
|
+
definitely either: going to behave broadly the same as to SIGKILL,
|
195
|
+
or won't respond to SIGTERM.
|
196
|
+
"""
|
197
197
|
for proc in self.procs:
|
198
|
-
proc.
|
198
|
+
proc.join(timeout=timeout)
|
199
|
+
if proc.is_alive():
|
200
|
+
logger.info("Usage tracking process did not end itself; sending SIGKILL")
|
201
|
+
proc.kill()
|
parsl/version.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2024.2.
|
3
|
+
Version: 2024.2.26
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2024.02.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2024.02.26.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|
@@ -4,11 +4,11 @@ parsl/config.py,sha256=ysUWBfm9bygayHHdItaJbP4oozkHJJmVQVnWCt5igjE,6808
|
|
4
4
|
parsl/curvezmq.py,sha256=FtZEYP1IWDry39cH-tOKUm9TnaR1U7krOmvVYpATcOk,6939
|
5
5
|
parsl/errors.py,sha256=SzINzQFZDBDbj9l-DPQznD0TbGkNhHIRAPkcBCogf_A,1019
|
6
6
|
parsl/log_utils.py,sha256=AGem-dhQs5TYUyJg6GKkRuHxAw8FHhYlWB_0s7_ROw4,3175
|
7
|
-
parsl/multiprocessing.py,sha256=
|
7
|
+
parsl/multiprocessing.py,sha256=hakfdg-sgxEjwloZeDrt6EhzwdzecvjJhkPHHxh8lII,1938
|
8
8
|
parsl/process_loggers.py,sha256=1G3Rfrh5wuZNo2X03grG4kTYPGOxz7hHCyG6L_A3b0A,1137
|
9
9
|
parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
parsl/utils.py,sha256=PyJSz5BTlWTmVt01V2ueJPQYnrHMUwJlilSmW40oUJw,10956
|
11
|
-
parsl/version.py,sha256
|
11
|
+
parsl/version.py,sha256=jKXzmgCDPiDLSA9Hr-xq6py2ngnRNlIw26Ju3jmxO_U,131
|
12
12
|
parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
parsl/app/app.py,sha256=wAHchJetgnicT1pn0NJKDeDX0lV3vDFlG8cQd_Ciax4,8522
|
14
14
|
parsl/app/bash.py,sha256=bx9x1XFwkOTpZZD3CPwnVL9SyNRDjbUGtOnuGLvxN_8,5396
|
@@ -19,7 +19,7 @@ parsl/benchmark/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
19
19
|
parsl/benchmark/perf.py,sha256=jWQo47D6Coq9XSwvisHBfwwoy4d9_S8RzTBfJ9AteHo,3095
|
20
20
|
parsl/channels/__init__.py,sha256=x-GGbnZJXpQJ5eNQNb0UmCCqAnYiBqk0aJKDtqrkMBM,371
|
21
21
|
parsl/channels/base.py,sha256=1Yj4hBCq5APBWNystXK3wF1Ho8OgkplWA07WQZNAhP8,4294
|
22
|
-
parsl/channels/errors.py,sha256=
|
22
|
+
parsl/channels/errors.py,sha256=L9DPiYIP5E90N1moG4aRS_mpdrukQZ222n2-xAZCFM0,3282
|
23
23
|
parsl/channels/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
24
|
parsl/channels/local/local.py,sha256=rTRlYfhRHMFIeLsEIFGigpvgX3XnacNguvfp6ij_fAw,5087
|
25
25
|
parsl/channels/oauth_ssh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -36,9 +36,8 @@ parsl/configs/ad_hoc.py,sha256=6WzFfBLtoMe3fi2Zs4vVpd_J5hcabD3qLLaUyEDrj4Q,1266
|
|
36
36
|
parsl/configs/bluewaters.py,sha256=iQEYxsm4V3nEY8o-d2npxi9UzZ7WohIFvuK8TXaPlSo,891
|
37
37
|
parsl/configs/bridges.py,sha256=ee5H4-6mvdwxUY-GttiCWSsvoL3pNQqXN5UDrK5Qlx4,1437
|
38
38
|
parsl/configs/cc_in2p3.py,sha256=AiYgv-Ox5RilqouIQ0yQUBZNwCaITQpCeLBrF2FV0ek,700
|
39
|
-
parsl/configs/comet.py,sha256=AzR6y9bdHXtSD49K02U7Q-ycyUvy8SyEF1Veh06qOHs,945
|
40
|
-
parsl/configs/cooley.py,sha256=Xt1wvhezZvErVWkkcb3IwCXJIRhty1gQ6szoVuPe9aY,969
|
41
39
|
parsl/configs/ec2.py,sha256=-vzlv3YpFfX7kpCYWHJFvdDqFZfj0leR6sEaus3Ydoo,868
|
40
|
+
parsl/configs/expanse.py,sha256=t-v_al6qnMF_VZliIYtV0rpoidXuqN162YfBu4jJumw,948
|
42
41
|
parsl/configs/frontera.py,sha256=79oniQz83DPW67bOQ6-S5fjrjsHopwdIZaHhUSmp6-Y,1411
|
43
42
|
parsl/configs/htex_local.py,sha256=njBRUU56Tt70vXtv3-ogjflsBAjdk5W-GpR0YuYxBEQ,467
|
44
43
|
parsl/configs/illinoiscluster.py,sha256=L-dFOpL-2VKeBBMq82TiM27b0pNE9Y9xwS1PXcPI1YU,1093
|
@@ -49,7 +48,6 @@ parsl/configs/osg.py,sha256=1WNOt6hmG-DcQdH2dBZCpaN1yNZvcXX-TsU1FgAMj24,1134
|
|
49
48
|
parsl/configs/polaris.py,sha256=SaMvllw_1rGwhJyuwJ15Sfpzh4attHfGDUmUQHzWCzM,1654
|
50
49
|
parsl/configs/stampede2.py,sha256=CgweBZWTSY3P--Zk2iTarh6wmfO1cLz7JB-4wlu3Vdc,1321
|
51
50
|
parsl/configs/summit.py,sha256=kssMiF_vYwpMxF4CYlr9jbL-Vz4zOekNCPryinCEef0,1079
|
52
|
-
parsl/configs/theta.py,sha256=B1RtH5HNY8z-gYPssOtYhlsBwOYLLoekrAO032YtBLA,1250
|
53
51
|
parsl/configs/toss3_llnl.py,sha256=9VAqKpDvwTSlvDXFw2gSI9ViVv0jJbr1cPBTnCGZlp4,998
|
54
52
|
parsl/configs/vineex_local.py,sha256=0BkxSSsTMkvKpGmQplXW-59qsvHYjdhN5TGyIOfHm6s,695
|
55
53
|
parsl/configs/wqex_local.py,sha256=QocsrCKR94agZndabH7vX3NTGLqx_y126Wgmo-17xNs,794
|
@@ -63,7 +61,7 @@ parsl/data_provider/http.py,sha256=nDHTW7XmJqAukWJjPRQjyhUXt8r6GsQ36mX9mv_wOig,2
|
|
63
61
|
parsl/data_provider/rsync.py,sha256=2-ZxqrT-hBj39x082NusJaBqsGW4Jd2qCW6JkVPpEl0,4254
|
64
62
|
parsl/data_provider/staging.py,sha256=l-mAXFburs3BWPjkSmiQKuAgJpsxCG62yATPDbrafYI,4523
|
65
63
|
parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
66
|
-
parsl/dataflow/dflow.py,sha256=
|
64
|
+
parsl/dataflow/dflow.py,sha256=thxwgWI5F2twW4P_bsy1-ZERAfJIbqb1uI7uGGbhmRU,63873
|
67
65
|
parsl/dataflow/errors.py,sha256=w2vOt_ymzG2dOqJUO4IDcmTlrCIHlMZL8nBVyVq0O_8,2176
|
68
66
|
parsl/dataflow/futures.py,sha256=aVfEUTzp4-EdunDAtNcqVQf8l_A7ArDi2c82KZMwxfY,5256
|
69
67
|
parsl/dataflow/memoization.py,sha256=AsJO6c6cRp2ac6H8uGn2USlEi78_nX3QWvpxYt4XdYE,9583
|
@@ -81,7 +79,7 @@ parsl/executors/flux/executor.py,sha256=0omXRPvykdW5VZb8mwgBJjxVk4H6G8xoL5D_R9pu
|
|
81
79
|
parsl/executors/flux/flux_instance_manager.py,sha256=tTEOATClm9SwdgLeBRWPC6D55iNDuh0YxqJOw3c3eQ4,2036
|
82
80
|
parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
83
81
|
parsl/executors/high_throughput/errors.py,sha256=vl69wLuVOplbKxHI9WphEGBExHWkTn5n8T9QhBXuNH0,380
|
84
|
-
parsl/executors/high_throughput/executor.py,sha256=
|
82
|
+
parsl/executors/high_throughput/executor.py,sha256=TOLBLvBc6yYFGJ5sgxnQV8IISkkAejMoljq9vg5kM2Q,35839
|
85
83
|
parsl/executors/high_throughput/interchange.py,sha256=TRxo0XWoFiDXEUIPTI_ILOClG3nKMJs7RtXz4NCLhhg,29852
|
86
84
|
parsl/executors/high_throughput/manager_record.py,sha256=T8-JVMfDJU6SJfzJRooD0mO8AHGMXlcn3PBOM0m_vng,366
|
87
85
|
parsl/executors/high_throughput/monitoring_info.py,sha256=3gQpwQjjNDEBz0cQqJZB6hRiwLiWwXs83zkQDmbOwxY,297
|
@@ -114,7 +112,7 @@ parsl/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
114
112
|
parsl/jobs/error_handlers.py,sha256=dvfZFqTwAcFXrIx3sjFTcjtwOB0-xGn6QnPwJEG-kAI,2311
|
115
113
|
parsl/jobs/errors.py,sha256=cpSQXCrlKtuHsQf7usjF-lX8XsDkFnE5kWpmFjiN6OU,178
|
116
114
|
parsl/jobs/job_status_poller.py,sha256=xQQauyNpmK23t6ViYm-AvvDLHsxVTmghjlACZvfL6LQ,4973
|
117
|
-
parsl/jobs/states.py,sha256=
|
115
|
+
parsl/jobs/states.py,sha256=rPBoAEEudKngWFijlwvXXhAagDs_9DCXvQP9rwzVgCM,4855
|
118
116
|
parsl/jobs/strategy.py,sha256=9V07D8bydpyxvNNRH89JZa0Pt-bjjowrSmCc5mv6awY,12903
|
119
117
|
parsl/launchers/__init__.py,sha256=k8zAB3IBP-brfqXUptKwGkvsIRaXjAJZNBJa2XVtY1A,546
|
120
118
|
parsl/launchers/base.py,sha256=CblcvPTJiu-MNLWaRtFe29SZQ0BpTOlaY8CGcHdlHIE,538
|
@@ -123,7 +121,7 @@ parsl/launchers/launchers.py,sha256=t9nuX38GMlAUPHs0aksnMd_5jI59IcqwlUD667w1cu4,
|
|
123
121
|
parsl/monitoring/__init__.py,sha256=0ywNz6i0lM1xo_7_BIxhETDGeVd2C_0wwD7qgeaMR4c,83
|
124
122
|
parsl/monitoring/db_manager.py,sha256=impNMJ92oACMCxXS0qBizRviN7-29Kjvs5m8p3Em1ZQ,36308
|
125
123
|
parsl/monitoring/message_type.py,sha256=Khn88afNxcOIciKiCK4GLnn90I5BlRTiOL3zK-P07yQ,401
|
126
|
-
parsl/monitoring/monitoring.py,sha256=
|
124
|
+
parsl/monitoring/monitoring.py,sha256=RlfbdH6kre3CXWi0kppUKjEVQrp1DQQxMfvZCwq7Vus,24855
|
127
125
|
parsl/monitoring/radios.py,sha256=T2_6QuUjC-dd_7qMnIk6WHQead1iWz7m_P6ZC4QAqdA,5265
|
128
126
|
parsl/monitoring/remote.py,sha256=88KYckdqCcGYWNxpkBETdNg4YVP8UkMLNcJDT4ffq_s,12552
|
129
127
|
parsl/monitoring/types.py,sha256=SO6Fjjbb83sv_MtbutoxGssiWh6oXKkEEsD4EvwOnZ4,629
|
@@ -163,7 +161,7 @@ parsl/providers/azure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
|
|
163
161
|
parsl/providers/azure/azure.py,sha256=wHM0IDSdPDi240zHgB_BxShahe6wOi_MffVZ4u900ao,18396
|
164
162
|
parsl/providers/azure/template.py,sha256=JJNW8zr30uYcfK-RqQX2FHZVWrxvYE8E6VbaYuAFEqw,347
|
165
163
|
parsl/providers/cobalt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
166
|
-
parsl/providers/cobalt/cobalt.py,sha256=
|
164
|
+
parsl/providers/cobalt/cobalt.py,sha256=D8F1z0Ye3WJWl_R1Zl0HFyyc29JndSm6oNB2PZ-3MWM,8478
|
167
165
|
parsl/providers/cobalt/template.py,sha256=a7fIkMLrYZ6uHgOPQgp8YPJA8f7SgxloFdEx51OJvuY,273
|
168
166
|
parsl/providers/condor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
169
167
|
parsl/providers/condor/condor.py,sha256=NmC6IcIlhPT2ojmNn6TeRro3E-rPdGdK5IAoLXR9g24,13175
|
@@ -182,11 +180,11 @@ parsl/providers/lsf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
182
180
|
parsl/providers/lsf/lsf.py,sha256=AECVpjl_CTreE-APFQSjMVVIb3HheG6zculJn-zYtdM,11502
|
183
181
|
parsl/providers/lsf/template.py,sha256=leQ_TpXv7ePMzbHfLaWvqMR0VORxlp-hjX5JxtkcwwU,269
|
184
182
|
parsl/providers/pbspro/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
185
|
-
parsl/providers/pbspro/pbspro.py,sha256=
|
186
|
-
parsl/providers/pbspro/template.py,sha256=
|
183
|
+
parsl/providers/pbspro/pbspro.py,sha256=Uo6XvXZ3HwotfvrzFtaE1Wf4pta1LoqM4GgkQUTXv4A,8822
|
184
|
+
parsl/providers/pbspro/template.py,sha256=y-Dher--t5Eury-c7cAuSZs9FEUXWiruFUI07v81558,315
|
187
185
|
parsl/providers/slurm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
188
|
-
parsl/providers/slurm/slurm.py,sha256=
|
189
|
-
parsl/providers/slurm/template.py,sha256=
|
186
|
+
parsl/providers/slurm/slurm.py,sha256=ormNss2s8NVrrS-QZobuzrDlbEolDm6ZdyHD5vgrzsU,13720
|
187
|
+
parsl/providers/slurm/template.py,sha256=KpgBEFMc1ps-38jdrk13xUGx9TCivu-iF90jgQDdiEQ,315
|
190
188
|
parsl/providers/torque/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
191
189
|
parsl/providers/torque/template.py,sha256=4qfc2gmlEhRCAD7erFDOs4prJQ43I8s4E8DSUSVQx3A,358
|
192
190
|
parsl/providers/torque/torque.py,sha256=h8CbLL594sDqcEHu-MnFIT0I_Sjx2IsOQJ5lMvjHHG4,9497
|
@@ -343,7 +341,7 @@ parsl/tests/test_htex/test_basic.py,sha256=VRP_-Ro2SYp8TqfjpG_zCBJOZWuVFFCr3E0WK
|
|
343
341
|
parsl/tests/test_htex/test_connected_blocks.py,sha256=ayxoj-44jtYc-diedlUKG413JZAFnzFvZhM6G_IlxaY,1630
|
344
342
|
parsl/tests/test_htex/test_cpu_affinity_explicit.py,sha256=9t6pI6VwFFHRv0cZDJB-ohIlYOezc9ugysouQbOzSqQ,1413
|
345
343
|
parsl/tests/test_htex/test_disconnected_blocks.py,sha256=k3hcDTCbhv8_lD5jn687r8zT95ugpEvMCZ14fTmOefM,1881
|
346
|
-
parsl/tests/test_htex/test_htex.py,sha256=
|
344
|
+
parsl/tests/test_htex/test_htex.py,sha256=9CZRufq4YCox0c1d5PcGOb-X5YPrz-cTIP9hpgKXduk,3461
|
347
345
|
parsl/tests/test_htex/test_manager_failure.py,sha256=5YsCS1z7wOfUcFCD7uzR7t_rD3x5toZnoaCKVrHaMb0,1152
|
348
346
|
parsl/tests/test_htex/test_missing_worker.py,sha256=Tux0Xla719eup7RdWj8LmxNH-CTscMN0NM4CPuPP1ng,967
|
349
347
|
parsl/tests/test_htex/test_multiple_disconnected_blocks.py,sha256=KxEWjfKXYPIllAGsErjA2J1-2rQSg4WV9xbl1QqkL7U,1984
|
@@ -361,11 +359,14 @@ parsl/tests/test_mpi_apps/test_bad_mpi_config.py,sha256=mB-ASx0S-wh1iP6MYZ-CdOwM
|
|
361
359
|
parsl/tests/test_mpi_apps/test_mpi_mode_disabled.py,sha256=IuCYP-7_VytwShZ8nZ5mLrymgyEgdK_9wKDX2lqR9lE,1342
|
362
360
|
parsl/tests/test_mpi_apps/test_mpi_mode_enabled.py,sha256=dST8nlagWZJ4zRuGV7klXEm6AHR7aw8FOJxWRED8QRo,5025
|
363
361
|
parsl/tests/test_mpi_apps/test_mpi_prefix.py,sha256=OJ95-95t7DmDF86uYCtT2iRcY2gn9LFH45OWyjjL2h8,1950
|
364
|
-
parsl/tests/test_mpi_apps/test_mpi_scheduler.py,sha256=
|
362
|
+
parsl/tests/test_mpi_apps/test_mpi_scheduler.py,sha256=dZ8_mzLMx5Us7mra2nPfxaeWZnhZyqNJ8vPWkWHaEB8,6317
|
365
363
|
parsl/tests/test_mpi_apps/test_resource_spec.py,sha256=fIR-sellr4r9yxmklHI3VDiIl-VKieyYVh403XF_nlQ,3701
|
366
364
|
parsl/tests/test_providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
365
|
+
parsl/tests/test_providers/test_cobalt_deprecation_warning.py,sha256=Fy5XXDkVs3KIX3tHyRjyReXEr35X1LWyEXcVXmWccDs,389
|
367
366
|
parsl/tests/test_providers/test_local_provider.py,sha256=G6Fuko22SvAtD7xhfQv8k_8HtJuFhZ8aHYcWQt073Pg,6968
|
367
|
+
parsl/tests/test_providers/test_pbspro_template.py,sha256=qlVJFr4fLNXa1Fk42sEEwp2MYBJ5NodGLZD1rc03rq0,855
|
368
368
|
parsl/tests/test_providers/test_slurm_instantiate.py,sha256=eW3pEZRIzZO1-eKFrBc7N5uoN5otwghgbqut74Kyqoc,500
|
369
|
+
parsl/tests/test_providers/test_slurm_template.py,sha256=Au6DbIgCxWPTNCvipwWh_39zlUbWdOCZJhtYRmW4Yc0,901
|
369
370
|
parsl/tests/test_providers/test_submiterror_deprecation.py,sha256=ZutVj_0VJ7M-5UZV0qisMwId7lT783LAxGEAsMjkeZU,501
|
370
371
|
parsl/tests/test_python_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
371
372
|
parsl/tests/test_python_apps/test_arg_input_types.py,sha256=JXpfHiu8lr9BN6u1OzqFvGwBhxzsGTPMewHx6Wdo-HI,670
|
@@ -395,7 +396,7 @@ parsl/tests/test_python_apps/test_simple.py,sha256=LYGjdHvRizTpYzZePPvwKSPwrr2MP
|
|
395
396
|
parsl/tests/test_python_apps/test_timeout.py,sha256=uENfT-1DharQkqkeG7a89E-gU1gjE7ATJrBZGUKvZSA,998
|
396
397
|
parsl/tests/test_python_apps/test_type5.py,sha256=kUyA1NuFu-DDXsJNNvJLZVyewZBt7QAOhcGm2DWFTQw,777
|
397
398
|
parsl/tests/test_radical/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
398
|
-
parsl/tests/test_radical/test_mpi_funcs.py,sha256=
|
399
|
+
parsl/tests/test_radical/test_mpi_funcs.py,sha256=vEG3tSZS2BNKF65acmoboLLmudIUHzz7UWQYsMUwAbw,838
|
399
400
|
parsl/tests/test_regression/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
400
401
|
parsl/tests/test_regression/test_1480.py,sha256=HNhuw7OYkBGMhN--XgKIl2JPHUj_hXlgL74oS3FqWk4,545
|
401
402
|
parsl/tests/test_regression/test_1606_wait_for_current_tasks.py,sha256=frqPtaiVysevj9nCWoQlAeh9K1jQO5zaahr9ev_Mx_0,1134
|
@@ -414,6 +415,7 @@ parsl/tests/test_scaling/test_scale_down.py,sha256=xg9ACrGCsNraarkslS88PTBjWSBqh
|
|
414
415
|
parsl/tests/test_serialization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
415
416
|
parsl/tests/test_serialization/test_2555_caching_deserializer.py,sha256=J8__b4djA5tErd8FUSXGkGcdXlW2KHbBWRbCTAnV08Q,767
|
416
417
|
parsl/tests/test_serialization/test_basic.py,sha256=51KshqIk2RNr7S2iSkl5tZo40CJBb0h6uby8YPgOGlg,543
|
418
|
+
parsl/tests/test_serialization/test_htex_code_cache.py,sha256=YW9ab4GCpeZWRdYsVEj4irTI3zYkERJXhuvFIroqYN4,1841
|
417
419
|
parsl/tests/test_serialization/test_pack_resource_spec.py,sha256=eZ_gykB4uTDyEEF1HkExTUn98j9pTljxAnn6ucFhdvo,640
|
418
420
|
parsl/tests/test_serialization/test_proxystore_configured.py,sha256=_JbMzeUgcR-1Ss2hGAb2v0LBA0fzKpNpfO-HaUCR7Yo,2293
|
419
421
|
parsl/tests/test_serialization/test_proxystore_impl.py,sha256=Pn_4ulwCd7Tc6Qlmypq2ImT4DtErGDIfqHHmPTr7aOI,1226
|
@@ -433,13 +435,13 @@ parsl/tests/test_threads/test_lazy_errors.py,sha256=nGhYfCMHFZYSy6YJ4gnAmiLl9SfY
|
|
433
435
|
parsl/tests/test_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
434
436
|
parsl/tests/test_utils/test_representation_mixin.py,sha256=kUZeIDwA2rlbJ3-beGzLLwf3dOplTMCrWJN87etHcyY,1633
|
435
437
|
parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
436
|
-
parsl/usage_tracking/usage.py,sha256=
|
437
|
-
parsl-2024.2.
|
438
|
-
parsl-2024.2.
|
439
|
-
parsl-2024.2.
|
440
|
-
parsl-2024.2.
|
441
|
-
parsl-2024.2.
|
442
|
-
parsl-2024.2.
|
443
|
-
parsl-2024.2.
|
444
|
-
parsl-2024.2.
|
445
|
-
parsl-2024.2.
|
438
|
+
parsl/usage_tracking/usage.py,sha256=WSfllOV4bDveCaHSG2i39-si1lNHAqi__iY_hbf5OPk,6817
|
439
|
+
parsl-2024.2.26.data/scripts/exec_parsl_function.py,sha256=NtWNeBvRqksej38eRPw8zPBJ1CeW6vgaitve0tfz_qc,7801
|
440
|
+
parsl-2024.2.26.data/scripts/parsl_coprocess.py,sha256=kzX_1RI3V2KMKs6L-il4I1qkLNVodDKFXN_1FHB9fmM,6031
|
441
|
+
parsl-2024.2.26.data/scripts/process_worker_pool.py,sha256=cgnfXpm1s3aVX33hgVL_cz6DQLzDuifHG78KR68A38Q,38434
|
442
|
+
parsl-2024.2.26.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
443
|
+
parsl-2024.2.26.dist-info/METADATA,sha256=QnkK0o9wcEMdov7ekRXMKRPwG-ipixQLQIwe_hKkmnY,3960
|
444
|
+
parsl-2024.2.26.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
445
|
+
parsl-2024.2.26.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
|
446
|
+
parsl-2024.2.26.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
|
447
|
+
parsl-2024.2.26.dist-info/RECORD,,
|
parsl/configs/cooley.py
DELETED
@@ -1,29 +0,0 @@
|
|
1
|
-
from parsl.config import Config
|
2
|
-
from parsl.executors import HighThroughputExecutor
|
3
|
-
from parsl.launchers import MpiRunLauncher
|
4
|
-
from parsl.providers import CobaltProvider
|
5
|
-
|
6
|
-
|
7
|
-
config = Config(
|
8
|
-
executors=[
|
9
|
-
HighThroughputExecutor(
|
10
|
-
label="cooley_htex",
|
11
|
-
worker_debug=False,
|
12
|
-
cores_per_worker=1,
|
13
|
-
provider=CobaltProvider(
|
14
|
-
queue='debug',
|
15
|
-
account='YOUR_ACCOUNT', # project name to submit the job
|
16
|
-
launcher=MpiRunLauncher(),
|
17
|
-
scheduler_options='', # string to prepend to #COBALT blocks in the submit script to the scheduler
|
18
|
-
worker_init='', # command to run before starting a worker, such as 'source activate env'
|
19
|
-
init_blocks=1,
|
20
|
-
max_blocks=1,
|
21
|
-
min_blocks=1,
|
22
|
-
nodes_per_block=4,
|
23
|
-
cmd_timeout=60,
|
24
|
-
walltime='00:10:00',
|
25
|
-
),
|
26
|
-
)
|
27
|
-
],
|
28
|
-
|
29
|
-
)
|
parsl/configs/theta.py
DELETED
@@ -1,33 +0,0 @@
|
|
1
|
-
from parsl.config import Config
|
2
|
-
from parsl.providers import CobaltProvider
|
3
|
-
from parsl.launchers import AprunLauncher
|
4
|
-
from parsl.executors import HighThroughputExecutor
|
5
|
-
from parsl.addresses import address_by_interface
|
6
|
-
|
7
|
-
config = Config(
|
8
|
-
executors=[
|
9
|
-
HighThroughputExecutor(
|
10
|
-
label='theta_local_htex_multinode',
|
11
|
-
address=address_by_interface('vlan2360'),
|
12
|
-
max_workers=4,
|
13
|
-
cpu_affinity='block', # Ensures that workers use cores on the same tile
|
14
|
-
provider=CobaltProvider(
|
15
|
-
queue='YOUR_QUEUE',
|
16
|
-
account='YOUR_ACCOUNT',
|
17
|
-
launcher=AprunLauncher(overrides="-d 64 --cc depth"),
|
18
|
-
walltime='00:30:00',
|
19
|
-
nodes_per_block=2,
|
20
|
-
init_blocks=1,
|
21
|
-
min_blocks=1,
|
22
|
-
max_blocks=1,
|
23
|
-
# string to prepend to #COBALT blocks in the submit
|
24
|
-
# script to the scheduler eg: '#COBALT -t 50'
|
25
|
-
scheduler_options='',
|
26
|
-
# Command to be run before starting a worker, such as:
|
27
|
-
# 'module load Anaconda; source activate parsl_env'.
|
28
|
-
worker_init='',
|
29
|
-
cmd_timeout=120,
|
30
|
-
),
|
31
|
-
)
|
32
|
-
],
|
33
|
-
)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|