parsl 2024.10.28__py3-none-any.whl → 2024.11.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/dataflow/dflow.py +5 -5
- parsl/executors/high_throughput/executor.py +0 -1
- parsl/executors/high_throughput/mpi_resource_management.py +0 -12
- parsl/monitoring/monitoring.py +4 -6
- parsl/monitoring/radios.py +3 -14
- parsl/monitoring/remote.py +3 -5
- parsl/providers/__init__.py +0 -2
- parsl/providers/base.py +1 -1
- parsl/tests/conftest.py +4 -0
- parsl/tests/site_tests/site_config_selector.py +1 -6
- parsl/tests/test_bash_apps/test_basic.py +3 -0
- parsl/tests/test_bash_apps/test_error_codes.py +4 -0
- parsl/tests/test_bash_apps/test_kwarg_storage.py +1 -0
- parsl/tests/test_bash_apps/test_memoize.py +2 -6
- parsl/tests/test_bash_apps/test_memoize_ignore_args.py +3 -0
- parsl/tests/test_bash_apps/test_memoize_ignore_args_regr.py +1 -0
- parsl/tests/test_bash_apps/test_multiline.py +1 -0
- parsl/tests/test_bash_apps/test_stdout.py +2 -0
- parsl/tests/test_docs/test_from_slides.py +3 -0
- parsl/tests/test_docs/test_kwargs.py +3 -0
- parsl/tests/test_monitoring/test_basic.py +13 -1
- parsl/tests/test_python_apps/test_outputs.py +1 -0
- parsl/tests/test_regression/test_226.py +1 -0
- parsl/tests/test_staging/test_docs_1.py +1 -0
- parsl/tests/test_staging/test_output_chain_filenames.py +3 -0
- parsl/tests/test_staging/test_staging_ftp.py +1 -0
- parsl/tests/test_staging/test_staging_https.py +3 -0
- parsl/tests/test_staging/test_staging_stdout.py +2 -0
- parsl/version.py +1 -1
- {parsl-2024.10.28.dist-info → parsl-2024.11.4.dist-info}/METADATA +2 -2
- {parsl-2024.10.28.dist-info → parsl-2024.11.4.dist-info}/RECORD +39 -46
- parsl/providers/cobalt/__init__.py +0 -0
- parsl/providers/cobalt/cobalt.py +0 -236
- parsl/providers/cobalt/template.py +0 -17
- parsl/tests/configs/cooley_htex.py +0 -37
- parsl/tests/configs/theta.py +0 -37
- parsl/tests/manual_tests/test_fan_in_out_htex_remote.py +0 -88
- parsl/tests/test_providers/test_cobalt_deprecation_warning.py +0 -18
- {parsl-2024.10.28.data → parsl-2024.11.4.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2024.10.28.data → parsl-2024.11.4.data}/scripts/interchange.py +0 -0
- {parsl-2024.10.28.data → parsl-2024.11.4.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2024.10.28.data → parsl-2024.11.4.data}/scripts/process_worker_pool.py +0 -0
- {parsl-2024.10.28.dist-info → parsl-2024.11.4.dist-info}/LICENSE +0 -0
- {parsl-2024.10.28.dist-info → parsl-2024.11.4.dist-info}/WHEEL +0 -0
- {parsl-2024.10.28.dist-info → parsl-2024.11.4.dist-info}/entry_points.txt +0 -0
- {parsl-2024.10.28.dist-info → parsl-2024.11.4.dist-info}/top_level.txt +0 -0
parsl/dataflow/dflow.py
CHANGED
@@ -162,8 +162,8 @@ class DataFlowKernel:
|
|
162
162
|
}
|
163
163
|
|
164
164
|
if self.monitoring:
|
165
|
-
self.monitoring.send(MessageType.WORKFLOW_INFO,
|
166
|
-
workflow_info)
|
165
|
+
self.monitoring.send((MessageType.WORKFLOW_INFO,
|
166
|
+
workflow_info))
|
167
167
|
|
168
168
|
if config.checkpoint_files is not None:
|
169
169
|
checkpoints = self.load_checkpoints(config.checkpoint_files)
|
@@ -238,7 +238,7 @@ class DataFlowKernel:
|
|
238
238
|
def _send_task_log_info(self, task_record: TaskRecord) -> None:
|
239
239
|
if self.monitoring:
|
240
240
|
task_log_info = self._create_task_log_info(task_record)
|
241
|
-
self.monitoring.send(MessageType.TASK_INFO, task_log_info)
|
241
|
+
self.monitoring.send((MessageType.TASK_INFO, task_log_info))
|
242
242
|
|
243
243
|
def _create_task_log_info(self, task_record: TaskRecord) -> Dict[str, Any]:
|
244
244
|
"""
|
@@ -1295,12 +1295,12 @@ class DataFlowKernel:
|
|
1295
1295
|
|
1296
1296
|
if self.monitoring:
|
1297
1297
|
logger.info("Sending final monitoring message")
|
1298
|
-
self.monitoring.send(MessageType.WORKFLOW_INFO,
|
1298
|
+
self.monitoring.send((MessageType.WORKFLOW_INFO,
|
1299
1299
|
{'tasks_failed_count': self.task_state_counts[States.failed],
|
1300
1300
|
'tasks_completed_count': self.task_state_counts[States.exec_done],
|
1301
1301
|
"time_began": self.time_began,
|
1302
1302
|
'time_completed': self.time_completed,
|
1303
|
-
'run_id': self.run_id, 'rundir': self.run_dir})
|
1303
|
+
'run_id': self.run_id, 'rundir': self.run_dir}))
|
1304
1304
|
|
1305
1305
|
logger.info("Terminating monitoring")
|
1306
1306
|
self.monitoring.close()
|
@@ -63,7 +63,6 @@ DEFAULT_INTERCHANGE_LAUNCH_CMD = ["interchange.py"]
|
|
63
63
|
|
64
64
|
GENERAL_HTEX_PARAM_DOCS = """provider : :class:`~parsl.providers.base.ExecutionProvider`
|
65
65
|
Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,
|
66
|
-
:class:`~parsl.providers.cobalt.cobalt.Cobalt`,
|
67
66
|
:class:`~parsl.providers.condor.condor.Condor`,
|
68
67
|
:class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,
|
69
68
|
:class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,
|
@@ -17,7 +17,6 @@ class Scheduler(Enum):
|
|
17
17
|
Unknown = 0
|
18
18
|
Slurm = 1
|
19
19
|
PBS = 2
|
20
|
-
Cobalt = 3
|
21
20
|
|
22
21
|
|
23
22
|
def get_slurm_hosts_list() -> List[str]:
|
@@ -37,13 +36,6 @@ def get_pbs_hosts_list() -> List[str]:
|
|
37
36
|
return [line.strip() for line in f.readlines()]
|
38
37
|
|
39
38
|
|
40
|
-
def get_cobalt_hosts_list() -> List[str]:
|
41
|
-
"""Get list of COBALT hosts from envvar: COBALT_NODEFILE"""
|
42
|
-
nodefile_name = os.environ["COBALT_NODEFILE"]
|
43
|
-
with open(nodefile_name) as f:
|
44
|
-
return [line.strip() for line in f.readlines()]
|
45
|
-
|
46
|
-
|
47
39
|
def get_nodes_in_batchjob(scheduler: Scheduler) -> List[str]:
|
48
40
|
"""Get nodelist from all supported schedulers"""
|
49
41
|
nodelist = []
|
@@ -51,8 +43,6 @@ def get_nodes_in_batchjob(scheduler: Scheduler) -> List[str]:
|
|
51
43
|
nodelist = get_slurm_hosts_list()
|
52
44
|
elif scheduler == Scheduler.PBS:
|
53
45
|
nodelist = get_pbs_hosts_list()
|
54
|
-
elif scheduler == Scheduler.Cobalt:
|
55
|
-
nodelist = get_cobalt_hosts_list()
|
56
46
|
else:
|
57
47
|
raise RuntimeError(f"mpi_mode does not support scheduler:{scheduler}")
|
58
48
|
return nodelist
|
@@ -64,8 +54,6 @@ def identify_scheduler() -> Scheduler:
|
|
64
54
|
return Scheduler.Slurm
|
65
55
|
elif os.environ.get("PBS_NODEFILE"):
|
66
56
|
return Scheduler.PBS
|
67
|
-
elif os.environ.get("COBALT_NODEFILE"):
|
68
|
-
return Scheduler.Cobalt
|
69
57
|
else:
|
70
58
|
return Scheduler.Unknown
|
71
59
|
|
parsl/monitoring/monitoring.py
CHANGED
@@ -7,13 +7,12 @@ import queue
|
|
7
7
|
import time
|
8
8
|
from multiprocessing import Event, Process
|
9
9
|
from multiprocessing.queues import Queue
|
10
|
-
from typing import TYPE_CHECKING,
|
10
|
+
from typing import TYPE_CHECKING, Literal, Optional, Tuple, Union, cast
|
11
11
|
|
12
12
|
import typeguard
|
13
13
|
|
14
14
|
from parsl.log_utils import set_file_logger
|
15
15
|
from parsl.monitoring.errors import MonitoringHubStartError
|
16
|
-
from parsl.monitoring.message_type import MessageType
|
17
16
|
from parsl.monitoring.radios import MultiprocessingQueueRadioSender
|
18
17
|
from parsl.monitoring.router import router_starter
|
19
18
|
from parsl.monitoring.types import TaggedMonitoringMessage
|
@@ -202,10 +201,9 @@ class MonitoringHub(RepresentationMixin):
|
|
202
201
|
|
203
202
|
self.hub_zmq_port = zmq_port
|
204
203
|
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
self.radio.send((mtype, message))
|
204
|
+
def send(self, message: TaggedMonitoringMessage) -> None:
|
205
|
+
logger.debug("Sending message type {}".format(message[0]))
|
206
|
+
self.radio.send(message)
|
209
207
|
|
210
208
|
def close(self) -> None:
|
211
209
|
logger.info("Terminating Monitoring Hub")
|
parsl/monitoring/radios.py
CHANGED
@@ -5,15 +5,11 @@ import socket
|
|
5
5
|
import uuid
|
6
6
|
from abc import ABCMeta, abstractmethod
|
7
7
|
from multiprocessing.queues import Queue
|
8
|
-
from typing import Optional
|
9
8
|
|
10
9
|
import zmq
|
11
10
|
|
12
11
|
from parsl.serialize import serialize
|
13
12
|
|
14
|
-
_db_manager_excepts: Optional[Exception]
|
15
|
-
|
16
|
-
|
17
13
|
logger = logging.getLogger(__name__)
|
18
14
|
|
19
15
|
|
@@ -41,9 +37,8 @@ class FilesystemRadioSender(MonitoringRadioSender):
|
|
41
37
|
the UDP radio, but should be much more reliable.
|
42
38
|
"""
|
43
39
|
|
44
|
-
def __init__(self, *, monitoring_url: str,
|
40
|
+
def __init__(self, *, monitoring_url: str, timeout: int = 10, run_dir: str):
|
45
41
|
logger.info("filesystem based monitoring channel initializing")
|
46
|
-
self.source_id = source_id
|
47
42
|
self.base_path = f"{run_dir}/monitor-fs-radio/"
|
48
43
|
self.tmp_path = f"{self.base_path}/tmp"
|
49
44
|
self.new_path = f"{self.base_path}/new"
|
@@ -70,19 +65,16 @@ class FilesystemRadioSender(MonitoringRadioSender):
|
|
70
65
|
|
71
66
|
class HTEXRadioSender(MonitoringRadioSender):
|
72
67
|
|
73
|
-
def __init__(self, monitoring_url: str,
|
68
|
+
def __init__(self, monitoring_url: str, timeout: int = 10):
|
74
69
|
"""
|
75
70
|
Parameters
|
76
71
|
----------
|
77
72
|
|
78
73
|
monitoring_url : str
|
79
74
|
URL of the form <scheme>://<IP>:<PORT>
|
80
|
-
source_id : str
|
81
|
-
String identifier of the source
|
82
75
|
timeout : int
|
83
76
|
timeout, default=10s
|
84
77
|
"""
|
85
|
-
self.source_id = source_id
|
86
78
|
logger.info("htex-based monitoring channel initialising")
|
87
79
|
|
88
80
|
def send(self, message: object) -> None:
|
@@ -124,21 +116,18 @@ class HTEXRadioSender(MonitoringRadioSender):
|
|
124
116
|
|
125
117
|
class UDPRadioSender(MonitoringRadioSender):
|
126
118
|
|
127
|
-
def __init__(self, monitoring_url: str,
|
119
|
+
def __init__(self, monitoring_url: str, timeout: int = 10):
|
128
120
|
"""
|
129
121
|
Parameters
|
130
122
|
----------
|
131
123
|
|
132
124
|
monitoring_url : str
|
133
125
|
URL of the form <scheme>://<IP>:<PORT>
|
134
|
-
source_id : str
|
135
|
-
String identifier of the source
|
136
126
|
timeout : int
|
137
127
|
timeout, default=10s
|
138
128
|
"""
|
139
129
|
self.monitoring_url = monitoring_url
|
140
130
|
self.sock_timeout = timeout
|
141
|
-
self.source_id = source_id
|
142
131
|
try:
|
143
132
|
self.scheme, self.ip, port = (x.strip('/') for x in monitoring_url.split(':'))
|
144
133
|
self.port = int(port)
|
parsl/monitoring/remote.py
CHANGED
@@ -103,14 +103,12 @@ def monitor_wrapper(*,
|
|
103
103
|
def get_radio(radio_mode: str, monitoring_hub_url: str, task_id: int, run_dir: str) -> MonitoringRadioSender:
|
104
104
|
radio: MonitoringRadioSender
|
105
105
|
if radio_mode == "udp":
|
106
|
-
radio = UDPRadioSender(monitoring_hub_url
|
107
|
-
source_id=task_id)
|
106
|
+
radio = UDPRadioSender(monitoring_hub_url)
|
108
107
|
elif radio_mode == "htex":
|
109
|
-
radio = HTEXRadioSender(monitoring_hub_url
|
110
|
-
source_id=task_id)
|
108
|
+
radio = HTEXRadioSender(monitoring_hub_url)
|
111
109
|
elif radio_mode == "filesystem":
|
112
110
|
radio = FilesystemRadioSender(monitoring_url=monitoring_hub_url,
|
113
|
-
|
111
|
+
run_dir=run_dir)
|
114
112
|
else:
|
115
113
|
raise RuntimeError(f"Unknown radio mode: {radio_mode}")
|
116
114
|
return radio
|
parsl/providers/__init__.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1
1
|
# Cloud Providers
|
2
2
|
from parsl.providers.aws.aws import AWSProvider
|
3
3
|
from parsl.providers.azure.azure import AzureProvider
|
4
|
-
from parsl.providers.cobalt.cobalt import CobaltProvider
|
5
4
|
from parsl.providers.condor.condor import CondorProvider
|
6
5
|
from parsl.providers.googlecloud.googlecloud import GoogleCloudProvider
|
7
6
|
from parsl.providers.grid_engine.grid_engine import GridEngineProvider
|
@@ -15,7 +14,6 @@ from parsl.providers.slurm.slurm import SlurmProvider
|
|
15
14
|
from parsl.providers.torque.torque import TorqueProvider
|
16
15
|
|
17
16
|
__all__ = ['LocalProvider',
|
18
|
-
'CobaltProvider',
|
19
17
|
'CondorProvider',
|
20
18
|
'GridEngineProvider',
|
21
19
|
'SlurmProvider',
|
parsl/providers/base.py
CHANGED
@@ -11,7 +11,7 @@ class ExecutionProvider(metaclass=ABCMeta):
|
|
11
11
|
"""Execution providers are responsible for managing execution resources
|
12
12
|
that have a Local Resource Manager (LRM). For instance, campus clusters
|
13
13
|
and supercomputers generally have LRMs (schedulers) such as Slurm,
|
14
|
-
Torque/PBS,
|
14
|
+
Torque/PBS, and Condor. Clouds, on the other hand, have API
|
15
15
|
interfaces that allow much more fine-grained composition of an execution
|
16
16
|
environment. An execution provider abstracts these types of resources and
|
17
17
|
provides a single uniform interface to them.
|
parsl/tests/conftest.py
CHANGED
@@ -163,6 +163,10 @@ def pytest_configure(config):
|
|
163
163
|
'markers',
|
164
164
|
'executor_supports_std_stream_tuples: Marks tests that require tuple support for stdout/stderr'
|
165
165
|
)
|
166
|
+
config.addinivalue_line(
|
167
|
+
'markers',
|
168
|
+
'shared_fs: Marks tests that require a shared_fs between the workers are the test client'
|
169
|
+
)
|
166
170
|
|
167
171
|
|
168
172
|
@pytest.fixture(autouse=True, scope='session')
|
@@ -7,12 +7,7 @@ def fresh_config():
|
|
7
7
|
hostname = os.getenv('PARSL_HOSTNAME', platform.uname().node)
|
8
8
|
print("Loading config for {}".format(hostname))
|
9
9
|
|
10
|
-
if '
|
11
|
-
from parsl.tests.configs.theta import fresh_config
|
12
|
-
config = fresh_config()
|
13
|
-
print("Loading Theta config")
|
14
|
-
|
15
|
-
elif 'frontera' in hostname:
|
10
|
+
if 'frontera' in hostname:
|
16
11
|
print("Loading Frontera config")
|
17
12
|
from parsl.tests.configs.frontera import fresh_config
|
18
13
|
config = fresh_config()
|
@@ -24,6 +24,7 @@ def foo(x, y, z=10, stdout=None, label=None):
|
|
24
24
|
return f"echo {x} {y} {z}"
|
25
25
|
|
26
26
|
|
27
|
+
@pytest.mark.shared_fs
|
27
28
|
def test_command_format_1(tmpd_cwd):
|
28
29
|
"""Testing command format for BashApps"""
|
29
30
|
|
@@ -38,6 +39,7 @@ def test_command_format_1(tmpd_cwd):
|
|
38
39
|
assert so_content == "1 4 10"
|
39
40
|
|
40
41
|
|
42
|
+
@pytest.mark.shared_fs
|
41
43
|
def test_auto_log_filename_format(caplog):
|
42
44
|
"""Testing auto log filename format for BashApps
|
43
45
|
"""
|
@@ -66,6 +68,7 @@ def test_auto_log_filename_format(caplog):
|
|
66
68
|
assert record.levelno < logging.ERROR
|
67
69
|
|
68
70
|
|
71
|
+
@pytest.mark.shared_fs
|
69
72
|
def test_parallel_for(tmpd_cwd, n=3):
|
70
73
|
"""Testing a simple parallel for loop"""
|
71
74
|
outdir = tmpd_cwd / "outputs/test_parallel"
|
@@ -58,6 +58,7 @@ test_matrix = {
|
|
58
58
|
whitelist = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'configs', '*threads*')
|
59
59
|
|
60
60
|
|
61
|
+
@pytest.mark.shared_fs
|
61
62
|
def test_div_0(test_fn=div_0):
|
62
63
|
err_code = test_matrix[test_fn]['exit_code']
|
63
64
|
f = test_fn()
|
@@ -73,6 +74,7 @@ def test_div_0(test_fn=div_0):
|
|
73
74
|
os.remove('std.out')
|
74
75
|
|
75
76
|
|
77
|
+
@pytest.mark.shared_fs
|
76
78
|
def test_bash_misuse(test_fn=bash_misuse):
|
77
79
|
err_code = test_matrix[test_fn]['exit_code']
|
78
80
|
f = test_fn()
|
@@ -87,6 +89,7 @@ def test_bash_misuse(test_fn=bash_misuse):
|
|
87
89
|
os.remove('std.out')
|
88
90
|
|
89
91
|
|
92
|
+
@pytest.mark.shared_fs
|
90
93
|
def test_command_not_found(test_fn=command_not_found):
|
91
94
|
err_code = test_matrix[test_fn]['exit_code']
|
92
95
|
f = test_fn()
|
@@ -103,6 +106,7 @@ def test_command_not_found(test_fn=command_not_found):
|
|
103
106
|
return True
|
104
107
|
|
105
108
|
|
109
|
+
@pytest.mark.shared_fs
|
106
110
|
def test_not_executable(test_fn=not_executable):
|
107
111
|
err_code = test_matrix[test_fn]['exit_code']
|
108
112
|
f = test_fn()
|
@@ -9,9 +9,7 @@ def fail_on_presence(outputs=()):
|
|
9
9
|
return 'if [ -f {0} ] ; then exit 1 ; else touch {0}; fi'.format(outputs[0])
|
10
10
|
|
11
11
|
|
12
|
-
|
13
|
-
# won't work if there's a staging provider.
|
14
|
-
# @pytest.mark.sharedFS_required
|
12
|
+
@pytest.mark.shared_fs
|
15
13
|
def test_bash_memoization(tmpd_cwd, n=2):
|
16
14
|
"""Testing bash memoization
|
17
15
|
"""
|
@@ -29,9 +27,7 @@ def fail_on_presence_kw(outputs=(), foo=None):
|
|
29
27
|
return 'if [ -f {0} ] ; then exit 1 ; else touch {0}; fi'.format(outputs[0])
|
30
28
|
|
31
29
|
|
32
|
-
|
33
|
-
# won't work if there's a staging provider.
|
34
|
-
# @pytest.mark.sharedFS_required
|
30
|
+
@pytest.mark.shared_fs
|
35
31
|
def test_bash_memoization_keywords(tmpd_cwd, n=2):
|
36
32
|
"""Testing bash memoization
|
37
33
|
"""
|
@@ -1,5 +1,7 @@
|
|
1
1
|
import os
|
2
2
|
|
3
|
+
import pytest
|
4
|
+
|
3
5
|
import parsl
|
4
6
|
from parsl.app.app import bash_app
|
5
7
|
|
@@ -21,6 +23,7 @@ def no_checkpoint_stdout_app_ignore_args(stdout=None):
|
|
21
23
|
return "echo X"
|
22
24
|
|
23
25
|
|
26
|
+
@pytest.mark.shared_fs
|
24
27
|
def test_memo_stdout(tmpd_cwd):
|
25
28
|
path_x = tmpd_cwd / "test.memo.stdout.x"
|
26
29
|
|
@@ -91,6 +91,7 @@ def test_bad_stderr_file():
|
|
91
91
|
|
92
92
|
|
93
93
|
@pytest.mark.executor_supports_std_stream_tuples
|
94
|
+
@pytest.mark.shared_fs
|
94
95
|
def test_stdout_truncate(tmpd_cwd, caplog):
|
95
96
|
"""Testing truncation of prior content of stdout"""
|
96
97
|
|
@@ -110,6 +111,7 @@ def test_stdout_truncate(tmpd_cwd, caplog):
|
|
110
111
|
assert record.levelno < logging.ERROR
|
111
112
|
|
112
113
|
|
114
|
+
@pytest.mark.shared_fs
|
113
115
|
def test_stdout_append(tmpd_cwd, caplog):
|
114
116
|
"""Testing appending to prior content of stdout (default open() mode)"""
|
115
117
|
|
@@ -1,5 +1,7 @@
|
|
1
1
|
import os
|
2
2
|
|
3
|
+
import pytest
|
4
|
+
|
3
5
|
from parsl.app.app import bash_app, python_app
|
4
6
|
from parsl.data_provider.files import File
|
5
7
|
|
@@ -15,6 +17,7 @@ def cat(inputs=[]):
|
|
15
17
|
return f.readlines()
|
16
18
|
|
17
19
|
|
20
|
+
@pytest.mark.staging_required
|
18
21
|
def test_slides():
|
19
22
|
"""Testing code snippet from slides """
|
20
23
|
|
@@ -1,6 +1,8 @@
|
|
1
1
|
"""Functions used to explain kwargs"""
|
2
2
|
from pathlib import Path
|
3
3
|
|
4
|
+
import pytest
|
5
|
+
|
4
6
|
from parsl import File, python_app
|
5
7
|
|
6
8
|
|
@@ -19,6 +21,7 @@ def test_inputs():
|
|
19
21
|
assert reduce_future.result() == 6
|
20
22
|
|
21
23
|
|
24
|
+
@pytest.mark.shared_fs
|
22
25
|
def test_outputs(tmpd_cwd):
|
23
26
|
@python_app()
|
24
27
|
def write_app(message, outputs=()):
|
@@ -42,6 +42,18 @@ def htex_udp_config():
|
|
42
42
|
return c
|
43
43
|
|
44
44
|
|
45
|
+
def htex_filesystem_config():
|
46
|
+
"""This config will force filesystem radio"""
|
47
|
+
from parsl.tests.configs.htex_local_alternate import fresh_config
|
48
|
+
c = fresh_config()
|
49
|
+
assert len(c.executors) == 1
|
50
|
+
|
51
|
+
assert c.executors[0].radio_mode == "htex", "precondition: htex has a radio mode attribute, configured for htex radio"
|
52
|
+
c.executors[0].radio_mode = "filesystem"
|
53
|
+
|
54
|
+
return c
|
55
|
+
|
56
|
+
|
45
57
|
def workqueue_config():
|
46
58
|
from parsl.tests.configs.workqueue_ex import fresh_config
|
47
59
|
c = fresh_config()
|
@@ -61,7 +73,7 @@ def taskvine_config():
|
|
61
73
|
|
62
74
|
|
63
75
|
@pytest.mark.local
|
64
|
-
@pytest.mark.parametrize("fresh_config", [htex_config, htex_udp_config, workqueue_config, taskvine_config])
|
76
|
+
@pytest.mark.parametrize("fresh_config", [htex_config, htex_filesystem_config, htex_udp_config, workqueue_config, taskvine_config])
|
65
77
|
def test_row_counts(tmpd_cwd, fresh_config):
|
66
78
|
# this is imported here rather than at module level because
|
67
79
|
# it isn't available in a plain parsl install, so this module
|
@@ -1,5 +1,7 @@
|
|
1
1
|
from concurrent.futures import Future
|
2
2
|
|
3
|
+
import pytest
|
4
|
+
|
3
5
|
from parsl import File
|
4
6
|
from parsl.app.app import bash_app
|
5
7
|
|
@@ -14,6 +16,7 @@ def app2(inputs=(), outputs=(), stdout=None, stderr=None, mock=False):
|
|
14
16
|
return f"echo '{inputs[0]}' > {outputs[0]}"
|
15
17
|
|
16
18
|
|
19
|
+
@pytest.mark.shared_fs
|
17
20
|
def test_behavior(tmpd_cwd):
|
18
21
|
expected_path = str(tmpd_cwd / "simple-out.txt")
|
19
22
|
app1_future = app1(
|
@@ -48,6 +48,7 @@ def sort_strings_additional_executor(inputs=(), outputs=()):
|
|
48
48
|
|
49
49
|
|
50
50
|
@pytest.mark.cleannet
|
51
|
+
@pytest.mark.staging_required
|
51
52
|
def test_staging_https_cleannet(tmpd_cwd):
|
52
53
|
unsorted_file = File(_unsorted_url)
|
53
54
|
sorted_file = File(tmpd_cwd / 'sorted.txt')
|
@@ -68,6 +69,7 @@ def test_staging_https_local(tmpd_cwd):
|
|
68
69
|
|
69
70
|
|
70
71
|
@pytest.mark.cleannet
|
72
|
+
@pytest.mark.staging_required
|
71
73
|
def test_staging_https_kwargs(tmpd_cwd):
|
72
74
|
unsorted_file = File(_unsorted_url)
|
73
75
|
sorted_file = File(tmpd_cwd / 'sorted.txt')
|
@@ -78,6 +80,7 @@ def test_staging_https_kwargs(tmpd_cwd):
|
|
78
80
|
|
79
81
|
|
80
82
|
@pytest.mark.cleannet
|
83
|
+
@pytest.mark.staging_required
|
81
84
|
def test_staging_https_args(tmpd_cwd):
|
82
85
|
unsorted_file = File(_unsorted_url)
|
83
86
|
sorted_file = File(tmpd_cwd / 'sorted.txt')
|
@@ -15,6 +15,7 @@ def output_to_stds(*, stdout=parsl.AUTO_LOGNAME, stderr=parsl.AUTO_LOGNAME):
|
|
15
15
|
return "echo hello ; echo goodbye >&2"
|
16
16
|
|
17
17
|
|
18
|
+
@pytest.mark.staging_required
|
18
19
|
def test_stdout_staging_file(tmpd_cwd, caplog):
|
19
20
|
basename = str(tmpd_cwd) + "/stdout.txt"
|
20
21
|
stdout_file = File("file://" + basename)
|
@@ -30,6 +31,7 @@ def test_stdout_staging_file(tmpd_cwd, caplog):
|
|
30
31
|
assert record.levelno < logging.ERROR
|
31
32
|
|
32
33
|
|
34
|
+
@pytest.mark.staging_required
|
33
35
|
def test_stdout_stderr_staging_zip(tmpd_cwd, caplog):
|
34
36
|
zipfile_name = str(tmpd_cwd) + "/staging.zip"
|
35
37
|
stdout_relative_path = "somewhere/test-out.txt"
|
parsl/version.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2024.
|
3
|
+
Version: 2024.11.4
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2024.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2024.11.04.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|