parsl 2024.11.11__py3-none-any.whl → 2024.11.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/addresses.py +19 -1
- parsl/curvezmq.py +4 -0
- parsl/dataflow/dflow.py +0 -2
- parsl/executors/execute_task.py +37 -0
- parsl/executors/flux/execute_parsl_task.py +1 -1
- parsl/executors/high_throughput/executor.py +14 -5
- parsl/executors/high_throughput/interchange.py +8 -7
- parsl/executors/high_throughput/mpi_executor.py +2 -0
- parsl/executors/high_throughput/mpi_resource_management.py +2 -3
- parsl/executors/high_throughput/probe.py +4 -4
- parsl/executors/high_throughput/process_worker_pool.py +15 -43
- parsl/executors/high_throughput/zmq_pipes.py +5 -4
- parsl/executors/radical/rpex_worker.py +2 -2
- parsl/executors/workqueue/exec_parsl_function.py +1 -1
- parsl/monitoring/db_manager.py +6 -6
- parsl/monitoring/monitoring.py +9 -14
- parsl/monitoring/router.py +11 -11
- parsl/providers/slurm/slurm.py +25 -3
- parsl/serialize/facade.py +3 -3
- parsl/tests/configs/htex_local.py +1 -0
- parsl/tests/test_execute_task.py +29 -0
- parsl/tests/test_htex/test_zmq_binding.py +3 -2
- parsl/tests/unit/test_address.py +20 -0
- parsl/version.py +1 -1
- {parsl-2024.11.11.data → parsl-2024.11.25.data}/scripts/exec_parsl_function.py +1 -1
- {parsl-2024.11.11.data → parsl-2024.11.25.data}/scripts/interchange.py +8 -7
- {parsl-2024.11.11.data → parsl-2024.11.25.data}/scripts/process_worker_pool.py +15 -43
- {parsl-2024.11.11.dist-info → parsl-2024.11.25.dist-info}/METADATA +2 -2
- {parsl-2024.11.11.dist-info → parsl-2024.11.25.dist-info}/RECORD +34 -31
- {parsl-2024.11.11.dist-info → parsl-2024.11.25.dist-info}/WHEEL +1 -1
- {parsl-2024.11.11.data → parsl-2024.11.25.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2024.11.11.dist-info → parsl-2024.11.25.dist-info}/LICENSE +0 -0
- {parsl-2024.11.11.dist-info → parsl-2024.11.25.dist-info}/entry_points.txt +0 -0
- {parsl-2024.11.11.dist-info → parsl-2024.11.25.dist-info}/top_level.txt +0 -0
parsl/addresses.py
CHANGED
@@ -6,6 +6,7 @@ The helper to use depends on the network environment around the submitter,
|
|
6
6
|
so some experimentation will probably be needed to choose the correct one.
|
7
7
|
"""
|
8
8
|
|
9
|
+
import ipaddress
|
9
10
|
import logging
|
10
11
|
import platform
|
11
12
|
import socket
|
@@ -17,7 +18,7 @@ try:
|
|
17
18
|
except ImportError:
|
18
19
|
fcntl = None # type: ignore[assignment]
|
19
20
|
import struct
|
20
|
-
from typing import Callable, List, Set
|
21
|
+
from typing import Callable, List, Set, Union
|
21
22
|
|
22
23
|
import psutil
|
23
24
|
import typeguard
|
@@ -156,3 +157,20 @@ def get_any_address() -> str:
|
|
156
157
|
if addr == '':
|
157
158
|
raise Exception('Cannot find address of the local machine.')
|
158
159
|
return addr
|
160
|
+
|
161
|
+
|
162
|
+
def tcp_url(address: str, port: Union[str, int, None] = None) -> str:
|
163
|
+
"""Construct a tcp url safe for IPv4 and IPv6"""
|
164
|
+
if address == "*":
|
165
|
+
return "tcp://*"
|
166
|
+
|
167
|
+
ip_addr = ipaddress.ip_address(address)
|
168
|
+
|
169
|
+
port_suffix = f":{port}" if port else ""
|
170
|
+
|
171
|
+
if ip_addr.version == 6 and port_suffix:
|
172
|
+
url = f"tcp://[{address}]{port_suffix}"
|
173
|
+
else:
|
174
|
+
url = f"tcp://{address}{port_suffix}"
|
175
|
+
|
176
|
+
return url
|
parsl/curvezmq.py
CHANGED
@@ -160,6 +160,9 @@ class ServerContext(BaseContext):
|
|
160
160
|
except zmq.ZMQError as e:
|
161
161
|
raise ValueError("Invalid CurveZMQ key format") from e
|
162
162
|
sock.setsockopt(zmq.CURVE_SERVER, True) # Must come before bind
|
163
|
+
|
164
|
+
# This flag enables IPV6 in addition to IPV4
|
165
|
+
sock.setsockopt(zmq.IPV6, True)
|
163
166
|
return sock
|
164
167
|
|
165
168
|
def term(self):
|
@@ -202,4 +205,5 @@ class ClientContext(BaseContext):
|
|
202
205
|
sock.setsockopt(zmq.CURVE_SERVERKEY, server_public_key)
|
203
206
|
except zmq.ZMQError as e:
|
204
207
|
raise ValueError("Invalid CurveZMQ key format") from e
|
208
|
+
sock.setsockopt(zmq.IPV6, True)
|
205
209
|
return sock
|
parsl/dataflow/dflow.py
CHANGED
@@ -111,8 +111,6 @@ class DataFlowKernel:
|
|
111
111
|
self.monitoring = config.monitoring
|
112
112
|
|
113
113
|
if self.monitoring:
|
114
|
-
if self.monitoring.logdir is None:
|
115
|
-
self.monitoring.logdir = self.run_dir
|
116
114
|
self.monitoring.start(self.run_dir, self.config.run_dir)
|
117
115
|
|
118
116
|
self.time_began = datetime.datetime.now()
|
@@ -0,0 +1,37 @@
|
|
1
|
+
import os
|
2
|
+
|
3
|
+
from parsl.serialize import unpack_res_spec_apply_message
|
4
|
+
|
5
|
+
|
6
|
+
def execute_task(bufs: bytes):
|
7
|
+
"""Deserialize the buffer and execute the task.
|
8
|
+
Returns the result or throws exception.
|
9
|
+
"""
|
10
|
+
f, args, kwargs, resource_spec = unpack_res_spec_apply_message(bufs)
|
11
|
+
|
12
|
+
for varname in resource_spec:
|
13
|
+
envname = "PARSL_" + str(varname).upper()
|
14
|
+
os.environ[envname] = str(resource_spec[varname])
|
15
|
+
|
16
|
+
# We might need to look into callability of the function from itself
|
17
|
+
# since we change it's name in the new namespace
|
18
|
+
prefix = "parsl_"
|
19
|
+
fname = prefix + "f"
|
20
|
+
argname = prefix + "args"
|
21
|
+
kwargname = prefix + "kwargs"
|
22
|
+
resultname = prefix + "result"
|
23
|
+
|
24
|
+
code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
|
25
|
+
argname, kwargname)
|
26
|
+
|
27
|
+
user_ns = locals()
|
28
|
+
user_ns.update({
|
29
|
+
'__builtins__': __builtins__,
|
30
|
+
fname: f,
|
31
|
+
argname: args,
|
32
|
+
kwargname: kwargs,
|
33
|
+
resultname: resultname
|
34
|
+
})
|
35
|
+
|
36
|
+
exec(code, user_ns, user_ns)
|
37
|
+
return user_ns.get(resultname)
|
@@ -4,8 +4,8 @@ import argparse
|
|
4
4
|
import logging
|
5
5
|
import os
|
6
6
|
|
7
|
+
from parsl.executors.execute_task import execute_task
|
7
8
|
from parsl.executors.flux import TaskResult
|
8
|
-
from parsl.executors.high_throughput.process_worker_pool import execute_task
|
9
9
|
from parsl.serialize import serialize
|
10
10
|
|
11
11
|
|
@@ -86,7 +86,7 @@ GENERAL_HTEX_PARAM_DOCS = """provider : :class:`~parsl.providers.base.ExecutionP
|
|
86
86
|
|
87
87
|
address : string
|
88
88
|
An address to connect to the main Parsl process which is reachable from the network in which
|
89
|
-
workers will be running. This field expects an IPv4 address
|
89
|
+
workers will be running. This field expects an IPv4 or IPv6 address.
|
90
90
|
Most login nodes on clusters have several network interfaces available, only some of which
|
91
91
|
can be reached from the compute nodes. This field can be used to limit the executor to listen
|
92
92
|
only on a specific interface, and limiting connections to the internal network.
|
@@ -94,6 +94,11 @@ GENERAL_HTEX_PARAM_DOCS = """provider : :class:`~parsl.providers.base.ExecutionP
|
|
94
94
|
Setting an address here overrides the default behavior.
|
95
95
|
default=None
|
96
96
|
|
97
|
+
loopback_address: string
|
98
|
+
Specify address used for internal communication between executor and interchange.
|
99
|
+
Supports IPv4 and IPv6 addresses
|
100
|
+
default=127.0.0.1
|
101
|
+
|
97
102
|
worker_ports : (int, int)
|
98
103
|
Specify the ports to be used by workers to connect to Parsl. If this option is specified,
|
99
104
|
worker_port_range will not be honored.
|
@@ -224,6 +229,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
224
229
|
Parsl will create names as integers starting with 0.
|
225
230
|
|
226
231
|
default: empty list
|
232
|
+
|
227
233
|
"""
|
228
234
|
|
229
235
|
@typeguard.typechecked
|
@@ -233,6 +239,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
233
239
|
launch_cmd: Optional[str] = None,
|
234
240
|
interchange_launch_cmd: Optional[Sequence[str]] = None,
|
235
241
|
address: Optional[str] = None,
|
242
|
+
loopback_address: str = "127.0.0.1",
|
236
243
|
worker_ports: Optional[Tuple[int, int]] = None,
|
237
244
|
worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),
|
238
245
|
interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),
|
@@ -268,6 +275,8 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
268
275
|
self.address = address
|
269
276
|
self.address_probe_timeout = address_probe_timeout
|
270
277
|
self.manager_selector = manager_selector
|
278
|
+
self.loopback_address = loopback_address
|
279
|
+
|
271
280
|
if self.address:
|
272
281
|
self.all_addresses = address
|
273
282
|
else:
|
@@ -408,13 +417,13 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
408
417
|
)
|
409
418
|
|
410
419
|
self.outgoing_q = zmq_pipes.TasksOutgoing(
|
411
|
-
|
420
|
+
self.loopback_address, self.interchange_port_range, self.cert_dir
|
412
421
|
)
|
413
422
|
self.incoming_q = zmq_pipes.ResultsIncoming(
|
414
|
-
|
423
|
+
self.loopback_address, self.interchange_port_range, self.cert_dir
|
415
424
|
)
|
416
425
|
self.command_client = zmq_pipes.CommandClient(
|
417
|
-
|
426
|
+
self.loopback_address, self.interchange_port_range, self.cert_dir
|
418
427
|
)
|
419
428
|
|
420
429
|
self._result_queue_thread = None
|
@@ -515,7 +524,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
|
|
515
524
|
get the worker task and result ports that the interchange has bound to.
|
516
525
|
"""
|
517
526
|
|
518
|
-
interchange_config = {"client_address":
|
527
|
+
interchange_config = {"client_address": self.loopback_address,
|
519
528
|
"client_ports": (self.outgoing_q.port,
|
520
529
|
self.incoming_q.port,
|
521
530
|
self.command_client.port),
|
@@ -14,6 +14,7 @@ from typing import Any, Dict, List, NoReturn, Optional, Sequence, Set, Tuple, ca
|
|
14
14
|
import zmq
|
15
15
|
|
16
16
|
from parsl import curvezmq
|
17
|
+
from parsl.addresses import tcp_url
|
17
18
|
from parsl.app.errors import RemoteExceptionWrapper
|
18
19
|
from parsl.executors.high_throughput.errors import ManagerLost, VersionMismatch
|
19
20
|
from parsl.executors.high_throughput.manager_record import ManagerRecord
|
@@ -115,13 +116,13 @@ class Interchange:
|
|
115
116
|
self.zmq_context = curvezmq.ServerContext(self.cert_dir)
|
116
117
|
self.task_incoming = self.zmq_context.socket(zmq.DEALER)
|
117
118
|
self.task_incoming.set_hwm(0)
|
118
|
-
self.task_incoming.connect(
|
119
|
+
self.task_incoming.connect(tcp_url(client_address, client_ports[0]))
|
119
120
|
self.results_outgoing = self.zmq_context.socket(zmq.DEALER)
|
120
121
|
self.results_outgoing.set_hwm(0)
|
121
|
-
self.results_outgoing.connect(
|
122
|
+
self.results_outgoing.connect(tcp_url(client_address, client_ports[1]))
|
122
123
|
|
123
124
|
self.command_channel = self.zmq_context.socket(zmq.REP)
|
124
|
-
self.command_channel.connect(
|
125
|
+
self.command_channel.connect(tcp_url(client_address, client_ports[2]))
|
125
126
|
logger.info("Connected to client")
|
126
127
|
|
127
128
|
self.run_id = run_id
|
@@ -144,14 +145,14 @@ class Interchange:
|
|
144
145
|
self.worker_task_port = self.worker_ports[0]
|
145
146
|
self.worker_result_port = self.worker_ports[1]
|
146
147
|
|
147
|
-
self.task_outgoing.bind(
|
148
|
-
self.results_incoming.bind(
|
148
|
+
self.task_outgoing.bind(tcp_url(self.interchange_address, self.worker_task_port))
|
149
|
+
self.results_incoming.bind(tcp_url(self.interchange_address, self.worker_result_port))
|
149
150
|
|
150
151
|
else:
|
151
|
-
self.worker_task_port = self.task_outgoing.bind_to_random_port(
|
152
|
+
self.worker_task_port = self.task_outgoing.bind_to_random_port(tcp_url(self.interchange_address),
|
152
153
|
min_port=worker_port_range[0],
|
153
154
|
max_port=worker_port_range[1], max_tries=100)
|
154
|
-
self.worker_result_port = self.results_incoming.bind_to_random_port(
|
155
|
+
self.worker_result_port = self.results_incoming.bind_to_random_port(tcp_url(self.interchange_address),
|
155
156
|
min_port=worker_port_range[0],
|
156
157
|
max_port=worker_port_range[1], max_tries=100)
|
157
158
|
|
@@ -50,6 +50,7 @@ class MPIExecutor(HighThroughputExecutor):
|
|
50
50
|
launch_cmd: Optional[str] = None,
|
51
51
|
interchange_launch_cmd: Optional[str] = None,
|
52
52
|
address: Optional[str] = None,
|
53
|
+
loopback_address: str = "127.0.0.1",
|
53
54
|
worker_ports: Optional[Tuple[int, int]] = None,
|
54
55
|
worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),
|
55
56
|
interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),
|
@@ -78,6 +79,7 @@ class MPIExecutor(HighThroughputExecutor):
|
|
78
79
|
launch_cmd=launch_cmd,
|
79
80
|
interchange_launch_cmd=interchange_launch_cmd,
|
80
81
|
address=address,
|
82
|
+
loopback_address=loopback_address,
|
81
83
|
worker_ports=worker_ports,
|
82
84
|
worker_port_range=worker_port_range,
|
83
85
|
interchange_port_range=interchange_port_range,
|
@@ -160,9 +160,7 @@ class MPITaskScheduler(TaskScheduler):
|
|
160
160
|
"""Schedule task if resources are available otherwise backlog the task"""
|
161
161
|
user_ns = locals()
|
162
162
|
user_ns.update({"__builtins__": __builtins__})
|
163
|
-
_f, _args, _kwargs, resource_spec = unpack_res_spec_apply_message(
|
164
|
-
task_package["buffer"], user_ns, copy=False
|
165
|
-
)
|
163
|
+
_f, _args, _kwargs, resource_spec = unpack_res_spec_apply_message(task_package["buffer"])
|
166
164
|
|
167
165
|
nodes_needed = resource_spec.get("num_nodes")
|
168
166
|
if nodes_needed:
|
@@ -177,6 +175,7 @@ class MPITaskScheduler(TaskScheduler):
|
|
177
175
|
self._map_tasks_to_nodes[task_package["task_id"]] = allocated_nodes
|
178
176
|
buffer = pack_res_spec_apply_message(_f, _args, _kwargs, resource_spec)
|
179
177
|
task_package["buffer"] = buffer
|
178
|
+
task_package["resource_spec"] = resource_spec
|
180
179
|
|
181
180
|
self.pending_task_q.put(task_package)
|
182
181
|
|
@@ -6,7 +6,7 @@ import uuid
|
|
6
6
|
import zmq
|
7
7
|
from zmq.utils.monitor import recv_monitor_message
|
8
8
|
|
9
|
-
from parsl.addresses import get_all_addresses
|
9
|
+
from parsl.addresses import get_all_addresses, tcp_url
|
10
10
|
|
11
11
|
logger = logging.getLogger(__name__)
|
12
12
|
|
@@ -32,7 +32,8 @@ def probe_addresses(addresses, task_port, timeout=120):
|
|
32
32
|
for addr in addresses:
|
33
33
|
socket = context.socket(zmq.DEALER)
|
34
34
|
socket.setsockopt(zmq.LINGER, 0)
|
35
|
-
|
35
|
+
socket.setsockopt(zmq.IPV6, True)
|
36
|
+
url = tcp_url(addr, task_port)
|
36
37
|
logger.debug("Trying to connect back on {}".format(url))
|
37
38
|
socket.connect(url)
|
38
39
|
addr_map[addr] = {'sock': socket,
|
@@ -71,8 +72,7 @@ class TestWorker:
|
|
71
72
|
|
72
73
|
address = probe_addresses(addresses, port)
|
73
74
|
print("Viable address :", address)
|
74
|
-
self.task_incoming.connect(
|
75
|
-
print("Here")
|
75
|
+
self.task_incoming.connect(tcp_url(address, port))
|
76
76
|
|
77
77
|
def heartbeat(self):
|
78
78
|
""" Send heartbeat to the incoming task queue
|
@@ -22,7 +22,9 @@ import psutil
|
|
22
22
|
import zmq
|
23
23
|
|
24
24
|
from parsl import curvezmq
|
25
|
+
from parsl.addresses import tcp_url
|
25
26
|
from parsl.app.errors import RemoteExceptionWrapper
|
27
|
+
from parsl.executors.execute_task import execute_task
|
26
28
|
from parsl.executors.high_throughput.errors import WorkerLost
|
27
29
|
from parsl.executors.high_throughput.mpi_prefix_composer import (
|
28
30
|
VALID_LAUNCHERS,
|
@@ -35,7 +37,7 @@ from parsl.executors.high_throughput.mpi_resource_management import (
|
|
35
37
|
from parsl.executors.high_throughput.probe import probe_addresses
|
36
38
|
from parsl.multiprocessing import SpawnContext
|
37
39
|
from parsl.process_loggers import wrap_with_logs
|
38
|
-
from parsl.serialize import serialize
|
40
|
+
from parsl.serialize import serialize
|
39
41
|
from parsl.version import VERSION as PARSL_VERSION
|
40
42
|
|
41
43
|
HEARTBEAT_CODE = (2 ** 32) - 1
|
@@ -158,8 +160,8 @@ class Manager:
|
|
158
160
|
raise Exception("No viable address found")
|
159
161
|
else:
|
160
162
|
logger.info("Connection to Interchange successful on {}".format(ix_address))
|
161
|
-
task_q_url =
|
162
|
-
result_q_url =
|
163
|
+
task_q_url = tcp_url(ix_address, task_port)
|
164
|
+
result_q_url = tcp_url(ix_address, result_port)
|
163
165
|
logger.info("Task url : {}".format(task_q_url))
|
164
166
|
logger.info("Result url : {}".format(result_q_url))
|
165
167
|
except Exception:
|
@@ -590,45 +592,13 @@ def update_resource_spec_env_vars(mpi_launcher: str, resource_spec: Dict, node_i
|
|
590
592
|
os.environ[key] = prefix_table[key]
|
591
593
|
|
592
594
|
|
593
|
-
def
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
f, args, kwargs, resource_spec = unpack_res_spec_apply_message(bufs, user_ns, copy=False)
|
602
|
-
|
603
|
-
for varname in resource_spec:
|
604
|
-
envname = "PARSL_" + str(varname).upper()
|
605
|
-
os.environ[envname] = str(resource_spec[varname])
|
606
|
-
|
607
|
-
if resource_spec.get("MPI_NODELIST"):
|
608
|
-
worker_id = os.environ['PARSL_WORKER_RANK']
|
609
|
-
nodes_for_task = resource_spec["MPI_NODELIST"].split(',')
|
610
|
-
logger.info(f"Launching task on provisioned nodes: {nodes_for_task}")
|
611
|
-
assert mpi_launcher
|
612
|
-
update_resource_spec_env_vars(mpi_launcher,
|
613
|
-
resource_spec=resource_spec,
|
614
|
-
node_info=nodes_for_task)
|
615
|
-
# We might need to look into callability of the function from itself
|
616
|
-
# since we change it's name in the new namespace
|
617
|
-
prefix = "parsl_"
|
618
|
-
fname = prefix + "f"
|
619
|
-
argname = prefix + "args"
|
620
|
-
kwargname = prefix + "kwargs"
|
621
|
-
resultname = prefix + "result"
|
622
|
-
|
623
|
-
user_ns.update({fname: f,
|
624
|
-
argname: args,
|
625
|
-
kwargname: kwargs,
|
626
|
-
resultname: resultname})
|
627
|
-
|
628
|
-
code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
|
629
|
-
argname, kwargname)
|
630
|
-
exec(code, user_ns, user_ns)
|
631
|
-
return user_ns.get(resultname)
|
595
|
+
def _init_mpi_env(mpi_launcher: str, resource_spec: Dict):
|
596
|
+
node_list = resource_spec.get("MPI_NODELIST")
|
597
|
+
if node_list is None:
|
598
|
+
return
|
599
|
+
nodes_for_task = node_list.split(',')
|
600
|
+
logger.info(f"Launching task on provisioned nodes: {nodes_for_task}")
|
601
|
+
update_resource_spec_env_vars(mpi_launcher=mpi_launcher, resource_spec=resource_spec, node_info=nodes_for_task)
|
632
602
|
|
633
603
|
|
634
604
|
@wrap_with_logs(target="worker_log")
|
@@ -786,8 +756,10 @@ def worker(
|
|
786
756
|
ready_worker_count.value -= 1
|
787
757
|
worker_enqueued = False
|
788
758
|
|
759
|
+
_init_mpi_env(mpi_launcher=mpi_launcher, resource_spec=req["resource_spec"])
|
760
|
+
|
789
761
|
try:
|
790
|
-
result = execute_task(req['buffer']
|
762
|
+
result = execute_task(req['buffer'])
|
791
763
|
serialized_result = serialize(result, buffer_threshold=1000000)
|
792
764
|
except Exception as e:
|
793
765
|
logger.info('Caught an exception: {}'.format(e))
|
@@ -8,6 +8,7 @@ from typing import Optional
|
|
8
8
|
import zmq
|
9
9
|
|
10
10
|
from parsl import curvezmq
|
11
|
+
from parsl.addresses import tcp_url
|
11
12
|
from parsl.errors import InternalConsistencyError
|
12
13
|
from parsl.executors.high_throughput.errors import (
|
13
14
|
CommandClientBadError,
|
@@ -52,11 +53,11 @@ class CommandClient:
|
|
52
53
|
self.zmq_socket = self.zmq_context.socket(zmq.REQ)
|
53
54
|
self.zmq_socket.setsockopt(zmq.LINGER, 0)
|
54
55
|
if self.port is None:
|
55
|
-
self.port = self.zmq_socket.bind_to_random_port(
|
56
|
+
self.port = self.zmq_socket.bind_to_random_port(tcp_url(self.ip_address),
|
56
57
|
min_port=self.port_range[0],
|
57
58
|
max_port=self.port_range[1])
|
58
59
|
else:
|
59
|
-
self.zmq_socket.bind(
|
60
|
+
self.zmq_socket.bind(tcp_url(self.ip_address, self.port))
|
60
61
|
|
61
62
|
def run(self, message, max_retries=3, timeout_s=None):
|
62
63
|
""" This function needs to be fast at the same time aware of the possibility of
|
@@ -146,7 +147,7 @@ class TasksOutgoing:
|
|
146
147
|
self.zmq_context = curvezmq.ClientContext(cert_dir)
|
147
148
|
self.zmq_socket = self.zmq_context.socket(zmq.DEALER)
|
148
149
|
self.zmq_socket.set_hwm(0)
|
149
|
-
self.port = self.zmq_socket.bind_to_random_port(
|
150
|
+
self.port = self.zmq_socket.bind_to_random_port(tcp_url(ip_address),
|
150
151
|
min_port=port_range[0],
|
151
152
|
max_port=port_range[1])
|
152
153
|
self.poller = zmq.Poller()
|
@@ -202,7 +203,7 @@ class ResultsIncoming:
|
|
202
203
|
self.zmq_context = curvezmq.ClientContext(cert_dir)
|
203
204
|
self.results_receiver = self.zmq_context.socket(zmq.DEALER)
|
204
205
|
self.results_receiver.set_hwm(0)
|
205
|
-
self.port = self.results_receiver.bind_to_random_port(
|
206
|
+
self.port = self.results_receiver.bind_to_random_port(tcp_url(ip_address),
|
206
207
|
min_port=port_range[0],
|
207
208
|
max_port=port_range[1])
|
208
209
|
|
@@ -4,7 +4,7 @@ import radical.pilot as rp
|
|
4
4
|
|
5
5
|
import parsl.app.errors as pe
|
6
6
|
from parsl.app.bash import remote_side_bash_executor
|
7
|
-
from parsl.executors.
|
7
|
+
from parsl.executors.execute_task import execute_task
|
8
8
|
from parsl.serialize import serialize, unpack_res_spec_apply_message
|
9
9
|
|
10
10
|
|
@@ -33,7 +33,7 @@ class ParslWorker:
|
|
33
33
|
|
34
34
|
try:
|
35
35
|
buffer = rp.utils.deserialize_bson(task['description']['executable'])
|
36
|
-
func, args, kwargs, _resource_spec = unpack_res_spec_apply_message(buffer
|
36
|
+
func, args, kwargs, _resource_spec = unpack_res_spec_apply_message(buffer)
|
37
37
|
ret = remote_side_bash_executor(func, *args, **kwargs)
|
38
38
|
exc = (None, None)
|
39
39
|
val = None
|
@@ -94,7 +94,7 @@ def unpack_source_code_function(function_info, user_namespace):
|
|
94
94
|
|
95
95
|
def unpack_byte_code_function(function_info, user_namespace):
|
96
96
|
from parsl.serialize import unpack_apply_message
|
97
|
-
func, args, kwargs = unpack_apply_message(function_info["byte code"]
|
97
|
+
func, args, kwargs = unpack_apply_message(function_info["byte code"])
|
98
98
|
return (func, 'parsl_function_name', args, kwargs)
|
99
99
|
|
100
100
|
|
parsl/monitoring/db_manager.py
CHANGED
@@ -279,7 +279,7 @@ class Database:
|
|
279
279
|
class DatabaseManager:
|
280
280
|
def __init__(self,
|
281
281
|
db_url: str = 'sqlite:///runinfo/monitoring.db',
|
282
|
-
|
282
|
+
run_dir: str = '.',
|
283
283
|
logging_level: int = logging.INFO,
|
284
284
|
batching_interval: float = 1,
|
285
285
|
batching_threshold: float = 99999,
|
@@ -287,12 +287,12 @@ class DatabaseManager:
|
|
287
287
|
|
288
288
|
self.workflow_end = False
|
289
289
|
self.workflow_start_message: Optional[MonitoringMessage] = None
|
290
|
-
self.
|
291
|
-
os.makedirs(self.
|
290
|
+
self.run_dir = run_dir
|
291
|
+
os.makedirs(self.run_dir, exist_ok=True)
|
292
292
|
|
293
293
|
logger.propagate = False
|
294
294
|
|
295
|
-
set_file_logger("{}/database_manager.log"
|
295
|
+
set_file_logger(f"{self.run_dir}/database_manager.log", level=logging_level,
|
296
296
|
format_string="%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] [%(threadName)s %(thread)d] %(message)s",
|
297
297
|
name="database_manager")
|
298
298
|
|
@@ -681,7 +681,7 @@ class DatabaseManager:
|
|
681
681
|
def dbm_starter(exception_q: mpq.Queue,
|
682
682
|
resource_msgs: mpq.Queue,
|
683
683
|
db_url: str,
|
684
|
-
|
684
|
+
run_dir: str,
|
685
685
|
logging_level: int) -> None:
|
686
686
|
"""Start the database manager process
|
687
687
|
|
@@ -692,7 +692,7 @@ def dbm_starter(exception_q: mpq.Queue,
|
|
692
692
|
|
693
693
|
try:
|
694
694
|
dbm = DatabaseManager(db_url=db_url,
|
695
|
-
|
695
|
+
run_dir=run_dir,
|
696
696
|
logging_level=logging_level)
|
697
697
|
logger.info("Starting dbm in dbm starter")
|
698
698
|
dbm.start(resource_msgs)
|
parsl/monitoring/monitoring.py
CHANGED
@@ -44,7 +44,6 @@ class MonitoringHub(RepresentationMixin):
|
|
44
44
|
workflow_name: Optional[str] = None,
|
45
45
|
workflow_version: Optional[str] = None,
|
46
46
|
logging_endpoint: Optional[str] = None,
|
47
|
-
logdir: Optional[str] = None,
|
48
47
|
monitoring_debug: bool = False,
|
49
48
|
resource_monitoring_enabled: bool = True,
|
50
49
|
resource_monitoring_interval: float = 30): # in seconds
|
@@ -73,8 +72,6 @@ class MonitoringHub(RepresentationMixin):
|
|
73
72
|
The database connection url for monitoring to log the information.
|
74
73
|
These URLs follow RFC-1738, and can include username, password, hostname, database name.
|
75
74
|
Default: sqlite, in the configured run_dir.
|
76
|
-
logdir : str
|
77
|
-
Parsl log directory paths. Logs and temp files go here. Default: '.'
|
78
75
|
monitoring_debug : Bool
|
79
76
|
Enable monitoring debug logging. Default: False
|
80
77
|
resource_monitoring_enabled : boolean
|
@@ -96,7 +93,6 @@ class MonitoringHub(RepresentationMixin):
|
|
96
93
|
self.hub_port_range = hub_port_range
|
97
94
|
|
98
95
|
self.logging_endpoint = logging_endpoint
|
99
|
-
self.logdir = logdir
|
100
96
|
self.monitoring_debug = monitoring_debug
|
101
97
|
|
102
98
|
self.workflow_name = workflow_name
|
@@ -109,13 +105,10 @@ class MonitoringHub(RepresentationMixin):
|
|
109
105
|
|
110
106
|
logger.debug("Starting MonitoringHub")
|
111
107
|
|
112
|
-
if self.logdir is None:
|
113
|
-
self.logdir = "."
|
114
|
-
|
115
108
|
if self.logging_endpoint is None:
|
116
109
|
self.logging_endpoint = f"sqlite:///{os.fspath(config_run_dir)}/monitoring.db"
|
117
110
|
|
118
|
-
os.makedirs(
|
111
|
+
os.makedirs(dfk_run_dir, exist_ok=True)
|
119
112
|
|
120
113
|
self.monitoring_hub_active = True
|
121
114
|
|
@@ -151,7 +144,7 @@ class MonitoringHub(RepresentationMixin):
|
|
151
144
|
"hub_address": self.hub_address,
|
152
145
|
"udp_port": self.hub_port,
|
153
146
|
"zmq_port_range": self.hub_port_range,
|
154
|
-
"
|
147
|
+
"run_dir": dfk_run_dir,
|
155
148
|
"logging_level": logging.DEBUG if self.monitoring_debug else logging.INFO,
|
156
149
|
},
|
157
150
|
name="Monitoring-Router-Process",
|
@@ -161,7 +154,7 @@ class MonitoringHub(RepresentationMixin):
|
|
161
154
|
|
162
155
|
self.dbm_proc = ForkProcess(target=dbm_starter,
|
163
156
|
args=(self.exception_q, self.resource_msgs,),
|
164
|
-
kwargs={"
|
157
|
+
kwargs={"run_dir": dfk_run_dir,
|
165
158
|
"logging_level": logging.DEBUG if self.monitoring_debug else logging.INFO,
|
166
159
|
"db_url": self.logging_endpoint,
|
167
160
|
},
|
@@ -172,7 +165,7 @@ class MonitoringHub(RepresentationMixin):
|
|
172
165
|
logger.info("Started the router process %s and DBM process %s", self.router_proc.pid, self.dbm_proc.pid)
|
173
166
|
|
174
167
|
self.filesystem_proc = ForkProcess(target=filesystem_receiver,
|
175
|
-
args=(self.
|
168
|
+
args=(self.resource_msgs, dfk_run_dir),
|
176
169
|
name="Monitoring-Filesystem-Process",
|
177
170
|
daemon=True
|
178
171
|
)
|
@@ -258,8 +251,8 @@ class MonitoringHub(RepresentationMixin):
|
|
258
251
|
|
259
252
|
|
260
253
|
@wrap_with_logs
|
261
|
-
def filesystem_receiver(
|
262
|
-
logger = set_file_logger("{}/monitoring_filesystem_radio.log"
|
254
|
+
def filesystem_receiver(q: Queue[TaggedMonitoringMessage], run_dir: str) -> None:
|
255
|
+
logger = set_file_logger(f"{run_dir}/monitoring_filesystem_radio.log",
|
263
256
|
name="monitoring_filesystem_radio",
|
264
257
|
level=logging.INFO)
|
265
258
|
|
@@ -270,6 +263,8 @@ def filesystem_receiver(logdir: str, q: Queue[TaggedMonitoringMessage], run_dir:
|
|
270
263
|
new_dir = f"{base_path}/new/"
|
271
264
|
logger.debug("Creating new and tmp paths under %s", base_path)
|
272
265
|
|
266
|
+
target_radio = MultiprocessingQueueRadioSender(q)
|
267
|
+
|
273
268
|
os.makedirs(tmp_dir, exist_ok=True)
|
274
269
|
os.makedirs(new_dir, exist_ok=True)
|
275
270
|
|
@@ -285,7 +280,7 @@ def filesystem_receiver(logdir: str, q: Queue[TaggedMonitoringMessage], run_dir:
|
|
285
280
|
message = pickle.load(f)
|
286
281
|
logger.debug("Message received is: %s", message)
|
287
282
|
assert isinstance(message, tuple)
|
288
|
-
|
283
|
+
target_radio.send(cast(TaggedMonitoringMessage, message))
|
289
284
|
os.remove(full_path_filename)
|
290
285
|
except Exception:
|
291
286
|
logger.exception("Exception processing %s - probably will be retried next iteration", filename)
|
parsl/monitoring/router.py
CHANGED
@@ -14,6 +14,7 @@ import typeguard
|
|
14
14
|
import zmq
|
15
15
|
|
16
16
|
from parsl.log_utils import set_file_logger
|
17
|
+
from parsl.monitoring.radios import MultiprocessingQueueRadioSender
|
17
18
|
from parsl.monitoring.types import TaggedMonitoringMessage
|
18
19
|
from parsl.process_loggers import wrap_with_logs
|
19
20
|
from parsl.utils import setproctitle
|
@@ -30,7 +31,7 @@ class MonitoringRouter:
|
|
30
31
|
zmq_port_range: Tuple[int, int] = (55050, 56000),
|
31
32
|
|
32
33
|
monitoring_hub_address: str = "127.0.0.1",
|
33
|
-
|
34
|
+
run_dir: str = ".",
|
34
35
|
logging_level: int = logging.INFO,
|
35
36
|
atexit_timeout: int = 3, # in seconds
|
36
37
|
resource_msgs: mpq.Queue,
|
@@ -47,7 +48,7 @@ class MonitoringRouter:
|
|
47
48
|
zmq_port_range : tuple(int, int)
|
48
49
|
The MonitoringHub picks ports at random from the range which will be used by Hub.
|
49
50
|
Default: (55050, 56000)
|
50
|
-
|
51
|
+
run_dir : str
|
51
52
|
Parsl log directory paths. Logs and temp files go here. Default: '.'
|
52
53
|
logging_level : int
|
53
54
|
Logging level as defined in the logging module. Default: logging.INFO
|
@@ -55,12 +56,11 @@ class MonitoringRouter:
|
|
55
56
|
The amount of time in seconds to terminate the hub without receiving any messages, after the last dfk workflow message is received.
|
56
57
|
resource_msgs : multiprocessing.Queue
|
57
58
|
A multiprocessing queue to receive messages to be routed onwards to the database process
|
58
|
-
|
59
59
|
exit_event : Event
|
60
60
|
An event that the main Parsl process will set to signal that the monitoring router should shut down.
|
61
61
|
"""
|
62
|
-
os.makedirs(
|
63
|
-
self.logger = set_file_logger("{}/monitoring_router.log"
|
62
|
+
os.makedirs(run_dir, exist_ok=True)
|
63
|
+
self.logger = set_file_logger(f"{run_dir}/monitoring_router.log",
|
64
64
|
name="monitoring_router",
|
65
65
|
level=logging_level)
|
66
66
|
self.logger.debug("Monitoring router starting")
|
@@ -98,7 +98,7 @@ class MonitoringRouter:
|
|
98
98
|
min_port=zmq_port_range[0],
|
99
99
|
max_port=zmq_port_range[1])
|
100
100
|
|
101
|
-
self.
|
101
|
+
self.target_radio = MultiprocessingQueueRadioSender(resource_msgs)
|
102
102
|
self.exit_event = exit_event
|
103
103
|
|
104
104
|
@wrap_with_logs(target="monitoring_router")
|
@@ -125,7 +125,7 @@ class MonitoringRouter:
|
|
125
125
|
data, addr = self.udp_sock.recvfrom(2048)
|
126
126
|
resource_msg = pickle.loads(data)
|
127
127
|
self.logger.debug("Got UDP Message from {}: {}".format(addr, resource_msg))
|
128
|
-
self.
|
128
|
+
self.target_radio.send(resource_msg)
|
129
129
|
except socket.timeout:
|
130
130
|
pass
|
131
131
|
|
@@ -136,7 +136,7 @@ class MonitoringRouter:
|
|
136
136
|
data, addr = self.udp_sock.recvfrom(2048)
|
137
137
|
msg = pickle.loads(data)
|
138
138
|
self.logger.debug("Got UDP Message from {}: {}".format(addr, msg))
|
139
|
-
self.
|
139
|
+
self.target_radio.send(msg)
|
140
140
|
last_msg_received_time = time.time()
|
141
141
|
except socket.timeout:
|
142
142
|
pass
|
@@ -160,7 +160,7 @@ class MonitoringRouter:
|
|
160
160
|
assert len(msg) >= 1, "ZMQ Receiver expects tuples of length at least 1, got {}".format(msg)
|
161
161
|
assert len(msg) == 2, "ZMQ Receiver expects message tuples of exactly length 2, got {}".format(msg)
|
162
162
|
|
163
|
-
self.
|
163
|
+
self.target_radio.send(msg)
|
164
164
|
except zmq.Again:
|
165
165
|
pass
|
166
166
|
except Exception:
|
@@ -187,14 +187,14 @@ def router_starter(*,
|
|
187
187
|
udp_port: Optional[int],
|
188
188
|
zmq_port_range: Tuple[int, int],
|
189
189
|
|
190
|
-
|
190
|
+
run_dir: str,
|
191
191
|
logging_level: int) -> None:
|
192
192
|
setproctitle("parsl: monitoring router")
|
193
193
|
try:
|
194
194
|
router = MonitoringRouter(hub_address=hub_address,
|
195
195
|
udp_port=udp_port,
|
196
196
|
zmq_port_range=zmq_port_range,
|
197
|
-
|
197
|
+
run_dir=run_dir,
|
198
198
|
logging_level=logging_level,
|
199
199
|
resource_msgs=resource_msgs,
|
200
200
|
exit_event=exit_event)
|
parsl/providers/slurm/slurm.py
CHANGED
@@ -70,6 +70,9 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
70
70
|
Slurm queue to place job in. If unspecified or ``None``, no queue slurm directive will be specified.
|
71
71
|
constraint : str
|
72
72
|
Slurm job constraint, often used to choose cpu or gpu type. If unspecified or ``None``, no constraint slurm directive will be added.
|
73
|
+
clusters : str
|
74
|
+
Slurm cluster name, or comma seperated cluster list, used to choose between different clusters in a federated Slurm instance.
|
75
|
+
If unspecified or ``None``, no slurm directive for clusters will be added.
|
73
76
|
channel : Channel
|
74
77
|
Channel for accessing this provider.
|
75
78
|
nodes_per_block : int
|
@@ -116,6 +119,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
116
119
|
account: Optional[str] = None,
|
117
120
|
qos: Optional[str] = None,
|
118
121
|
constraint: Optional[str] = None,
|
122
|
+
clusters: Optional[str] = None,
|
119
123
|
channel: Channel = LocalChannel(),
|
120
124
|
nodes_per_block: int = 1,
|
121
125
|
cores_per_node: Optional[int] = None,
|
@@ -152,6 +156,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
152
156
|
self.account = account
|
153
157
|
self.qos = qos
|
154
158
|
self.constraint = constraint
|
159
|
+
self.clusters = clusters
|
155
160
|
self.scheduler_options = scheduler_options + '\n'
|
156
161
|
if exclusive:
|
157
162
|
self.scheduler_options += "#SBATCH --exclusive\n"
|
@@ -163,6 +168,8 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
163
168
|
self.scheduler_options += "#SBATCH --qos={}\n".format(qos)
|
164
169
|
if constraint:
|
165
170
|
self.scheduler_options += "#SBATCH --constraint={}\n".format(constraint)
|
171
|
+
if clusters:
|
172
|
+
self.scheduler_options += "#SBATCH --clusters={}\n".format(clusters)
|
166
173
|
|
167
174
|
self.regex_job_id = regex_job_id
|
168
175
|
self.worker_init = worker_init + '\n'
|
@@ -174,14 +181,22 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
174
181
|
logger.debug(f"sacct returned retcode={retcode} stderr={stderr}")
|
175
182
|
if retcode == 0:
|
176
183
|
logger.debug("using sacct to get job status")
|
184
|
+
_cmd = "sacct"
|
185
|
+
# Add clusters option to sacct if provided
|
186
|
+
if self.clusters:
|
187
|
+
_cmd += f" --clusters={self.clusters}"
|
177
188
|
# Using state%20 to get enough characters to not truncate output
|
178
189
|
# of the state. Without output can look like "<job_id> CANCELLED+"
|
179
|
-
self._cmd = "
|
190
|
+
self._cmd = _cmd + " -X --noheader --format=jobid,state%20 --job '{0}'"
|
180
191
|
self._translate_table = sacct_translate_table
|
181
192
|
else:
|
182
193
|
logger.debug(f"sacct failed with retcode={retcode}")
|
183
194
|
logger.debug("falling back to using squeue to get job status")
|
184
|
-
|
195
|
+
_cmd = "squeue"
|
196
|
+
# Add clusters option to squeue if provided
|
197
|
+
if self.clusters:
|
198
|
+
_cmd += f" --clusters={self.clusters}"
|
199
|
+
self._cmd = _cmd + " --noheader --format='%i %t' --job '{0}'"
|
185
200
|
self._translate_table = squeue_translate_table
|
186
201
|
|
187
202
|
def _status(self):
|
@@ -344,7 +359,14 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
|
|
344
359
|
'''
|
345
360
|
|
346
361
|
job_id_list = ' '.join(job_ids)
|
347
|
-
|
362
|
+
|
363
|
+
# Make the command to cancel jobs
|
364
|
+
_cmd = "scancel"
|
365
|
+
if self.clusters:
|
366
|
+
_cmd += f" --clusters={self.clusters}"
|
367
|
+
_cmd += " {0}"
|
368
|
+
|
369
|
+
retcode, stdout, stderr = self.execute_wait(_cmd.format(job_id_list))
|
348
370
|
rets = None
|
349
371
|
if retcode == 0:
|
350
372
|
for jid in job_ids:
|
parsl/serialize/facade.py
CHANGED
@@ -87,16 +87,16 @@ def pack_res_spec_apply_message(func: Any, args: Any, kwargs: Any, resource_spec
|
|
87
87
|
return pack_apply_message(func, args, (kwargs, resource_specification), buffer_threshold=buffer_threshold)
|
88
88
|
|
89
89
|
|
90
|
-
def unpack_apply_message(packed_buffer: bytes
|
90
|
+
def unpack_apply_message(packed_buffer: bytes) -> List[Any]:
|
91
91
|
""" Unpack and deserialize function and parameters
|
92
92
|
"""
|
93
93
|
return [deserialize(buf) for buf in unpack_buffers(packed_buffer)]
|
94
94
|
|
95
95
|
|
96
|
-
def unpack_res_spec_apply_message(packed_buffer: bytes
|
96
|
+
def unpack_res_spec_apply_message(packed_buffer: bytes) -> List[Any]:
|
97
97
|
""" Unpack and deserialize function, parameters, and resource_specification
|
98
98
|
"""
|
99
|
-
func, args, (kwargs, resource_spec) = unpack_apply_message(packed_buffer
|
99
|
+
func, args, (kwargs, resource_spec) = unpack_apply_message(packed_buffer)
|
100
100
|
return [func, args, kwargs, resource_spec]
|
101
101
|
|
102
102
|
|
@@ -0,0 +1,29 @@
|
|
1
|
+
import os
|
2
|
+
|
3
|
+
import pytest
|
4
|
+
|
5
|
+
from parsl.executors.execute_task import execute_task
|
6
|
+
from parsl.serialize.facade import pack_res_spec_apply_message
|
7
|
+
|
8
|
+
|
9
|
+
def addemup(*args: int, name: str = "apples"):
|
10
|
+
total = sum(args)
|
11
|
+
return f"{total} {name}"
|
12
|
+
|
13
|
+
|
14
|
+
@pytest.mark.local
|
15
|
+
def test_execute_task():
|
16
|
+
args = (1, 2, 3)
|
17
|
+
kwargs = {"name": "boots"}
|
18
|
+
buff = pack_res_spec_apply_message(addemup, args, kwargs, {})
|
19
|
+
res = execute_task(buff)
|
20
|
+
assert res == addemup(*args, **kwargs)
|
21
|
+
|
22
|
+
|
23
|
+
@pytest.mark.local
|
24
|
+
def test_execute_task_resource_spec():
|
25
|
+
resource_spec = {"num_nodes": 2, "ranks_per_node": 2, "num_ranks": 4}
|
26
|
+
buff = pack_res_spec_apply_message(addemup, (1, 2), {}, resource_spec)
|
27
|
+
execute_task(buff)
|
28
|
+
for key, val in resource_spec.items():
|
29
|
+
assert os.environ[f"PARSL_{key.upper()}"] == str(val)
|
@@ -87,7 +87,7 @@ def test_interchange_binding_with_non_ipv4_address(cert_dir: Optional[str]):
|
|
87
87
|
def test_interchange_binding_bad_address(cert_dir: Optional[str]):
|
88
88
|
"""Confirm that we raise a ZMQError when a bad address is supplied"""
|
89
89
|
address = "550.0.0.0"
|
90
|
-
with pytest.raises(
|
90
|
+
with pytest.raises(ValueError):
|
91
91
|
make_interchange(interchange_address=address, cert_dir=cert_dir)
|
92
92
|
|
93
93
|
|
@@ -103,4 +103,5 @@ def test_limited_interface_binding(cert_dir: Optional[str]):
|
|
103
103
|
|
104
104
|
matched_conns = [conn for conn in conns if conn.laddr.port == ix.worker_result_port]
|
105
105
|
assert len(matched_conns) == 1
|
106
|
-
|
106
|
+
# laddr.ip can return ::ffff:127.0.0.1 when using IPv6
|
107
|
+
assert address in matched_conns[0].laddr.ip
|
@@ -0,0 +1,20 @@
|
|
1
|
+
import pytest
|
2
|
+
|
3
|
+
from parsl.addresses import tcp_url
|
4
|
+
|
5
|
+
|
6
|
+
@pytest.mark.local
|
7
|
+
@pytest.mark.parametrize("address, port,expected", [
|
8
|
+
("127.0.0.1", 55001, "tcp://127.0.0.1:55001"),
|
9
|
+
("127.0.0.1", "55001", "tcp://127.0.0.1:55001"),
|
10
|
+
("127.0.0.1", None, "tcp://127.0.0.1"),
|
11
|
+
("::1", "55001", "tcp://[::1]:55001"),
|
12
|
+
("::ffff:127.0.0.1", 55001, "tcp://[::ffff:127.0.0.1]:55001"),
|
13
|
+
("::ffff:127.0.0.1", None, "tcp://::ffff:127.0.0.1"),
|
14
|
+
("::ffff:127.0.0.1", None, "tcp://::ffff:127.0.0.1"),
|
15
|
+
("*", None, "tcp://*"),
|
16
|
+
])
|
17
|
+
def test_tcp_url(address, port, expected):
|
18
|
+
"""Confirm valid address generation"""
|
19
|
+
result = tcp_url(address, port)
|
20
|
+
assert result == expected
|
parsl/version.py
CHANGED
@@ -94,7 +94,7 @@ def unpack_source_code_function(function_info, user_namespace):
|
|
94
94
|
|
95
95
|
def unpack_byte_code_function(function_info, user_namespace):
|
96
96
|
from parsl.serialize import unpack_apply_message
|
97
|
-
func, args, kwargs = unpack_apply_message(function_info["byte code"]
|
97
|
+
func, args, kwargs = unpack_apply_message(function_info["byte code"])
|
98
98
|
return (func, 'parsl_function_name', args, kwargs)
|
99
99
|
|
100
100
|
|
@@ -14,6 +14,7 @@ from typing import Any, Dict, List, NoReturn, Optional, Sequence, Set, Tuple, ca
|
|
14
14
|
import zmq
|
15
15
|
|
16
16
|
from parsl import curvezmq
|
17
|
+
from parsl.addresses import tcp_url
|
17
18
|
from parsl.app.errors import RemoteExceptionWrapper
|
18
19
|
from parsl.executors.high_throughput.errors import ManagerLost, VersionMismatch
|
19
20
|
from parsl.executors.high_throughput.manager_record import ManagerRecord
|
@@ -115,13 +116,13 @@ class Interchange:
|
|
115
116
|
self.zmq_context = curvezmq.ServerContext(self.cert_dir)
|
116
117
|
self.task_incoming = self.zmq_context.socket(zmq.DEALER)
|
117
118
|
self.task_incoming.set_hwm(0)
|
118
|
-
self.task_incoming.connect(
|
119
|
+
self.task_incoming.connect(tcp_url(client_address, client_ports[0]))
|
119
120
|
self.results_outgoing = self.zmq_context.socket(zmq.DEALER)
|
120
121
|
self.results_outgoing.set_hwm(0)
|
121
|
-
self.results_outgoing.connect(
|
122
|
+
self.results_outgoing.connect(tcp_url(client_address, client_ports[1]))
|
122
123
|
|
123
124
|
self.command_channel = self.zmq_context.socket(zmq.REP)
|
124
|
-
self.command_channel.connect(
|
125
|
+
self.command_channel.connect(tcp_url(client_address, client_ports[2]))
|
125
126
|
logger.info("Connected to client")
|
126
127
|
|
127
128
|
self.run_id = run_id
|
@@ -144,14 +145,14 @@ class Interchange:
|
|
144
145
|
self.worker_task_port = self.worker_ports[0]
|
145
146
|
self.worker_result_port = self.worker_ports[1]
|
146
147
|
|
147
|
-
self.task_outgoing.bind(
|
148
|
-
self.results_incoming.bind(
|
148
|
+
self.task_outgoing.bind(tcp_url(self.interchange_address, self.worker_task_port))
|
149
|
+
self.results_incoming.bind(tcp_url(self.interchange_address, self.worker_result_port))
|
149
150
|
|
150
151
|
else:
|
151
|
-
self.worker_task_port = self.task_outgoing.bind_to_random_port(
|
152
|
+
self.worker_task_port = self.task_outgoing.bind_to_random_port(tcp_url(self.interchange_address),
|
152
153
|
min_port=worker_port_range[0],
|
153
154
|
max_port=worker_port_range[1], max_tries=100)
|
154
|
-
self.worker_result_port = self.results_incoming.bind_to_random_port(
|
155
|
+
self.worker_result_port = self.results_incoming.bind_to_random_port(tcp_url(self.interchange_address),
|
155
156
|
min_port=worker_port_range[0],
|
156
157
|
max_port=worker_port_range[1], max_tries=100)
|
157
158
|
|
@@ -22,7 +22,9 @@ import psutil
|
|
22
22
|
import zmq
|
23
23
|
|
24
24
|
from parsl import curvezmq
|
25
|
+
from parsl.addresses import tcp_url
|
25
26
|
from parsl.app.errors import RemoteExceptionWrapper
|
27
|
+
from parsl.executors.execute_task import execute_task
|
26
28
|
from parsl.executors.high_throughput.errors import WorkerLost
|
27
29
|
from parsl.executors.high_throughput.mpi_prefix_composer import (
|
28
30
|
VALID_LAUNCHERS,
|
@@ -35,7 +37,7 @@ from parsl.executors.high_throughput.mpi_resource_management import (
|
|
35
37
|
from parsl.executors.high_throughput.probe import probe_addresses
|
36
38
|
from parsl.multiprocessing import SpawnContext
|
37
39
|
from parsl.process_loggers import wrap_with_logs
|
38
|
-
from parsl.serialize import serialize
|
40
|
+
from parsl.serialize import serialize
|
39
41
|
from parsl.version import VERSION as PARSL_VERSION
|
40
42
|
|
41
43
|
HEARTBEAT_CODE = (2 ** 32) - 1
|
@@ -158,8 +160,8 @@ class Manager:
|
|
158
160
|
raise Exception("No viable address found")
|
159
161
|
else:
|
160
162
|
logger.info("Connection to Interchange successful on {}".format(ix_address))
|
161
|
-
task_q_url =
|
162
|
-
result_q_url =
|
163
|
+
task_q_url = tcp_url(ix_address, task_port)
|
164
|
+
result_q_url = tcp_url(ix_address, result_port)
|
163
165
|
logger.info("Task url : {}".format(task_q_url))
|
164
166
|
logger.info("Result url : {}".format(result_q_url))
|
165
167
|
except Exception:
|
@@ -590,45 +592,13 @@ def update_resource_spec_env_vars(mpi_launcher: str, resource_spec: Dict, node_i
|
|
590
592
|
os.environ[key] = prefix_table[key]
|
591
593
|
|
592
594
|
|
593
|
-
def
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
f, args, kwargs, resource_spec = unpack_res_spec_apply_message(bufs, user_ns, copy=False)
|
602
|
-
|
603
|
-
for varname in resource_spec:
|
604
|
-
envname = "PARSL_" + str(varname).upper()
|
605
|
-
os.environ[envname] = str(resource_spec[varname])
|
606
|
-
|
607
|
-
if resource_spec.get("MPI_NODELIST"):
|
608
|
-
worker_id = os.environ['PARSL_WORKER_RANK']
|
609
|
-
nodes_for_task = resource_spec["MPI_NODELIST"].split(',')
|
610
|
-
logger.info(f"Launching task on provisioned nodes: {nodes_for_task}")
|
611
|
-
assert mpi_launcher
|
612
|
-
update_resource_spec_env_vars(mpi_launcher,
|
613
|
-
resource_spec=resource_spec,
|
614
|
-
node_info=nodes_for_task)
|
615
|
-
# We might need to look into callability of the function from itself
|
616
|
-
# since we change it's name in the new namespace
|
617
|
-
prefix = "parsl_"
|
618
|
-
fname = prefix + "f"
|
619
|
-
argname = prefix + "args"
|
620
|
-
kwargname = prefix + "kwargs"
|
621
|
-
resultname = prefix + "result"
|
622
|
-
|
623
|
-
user_ns.update({fname: f,
|
624
|
-
argname: args,
|
625
|
-
kwargname: kwargs,
|
626
|
-
resultname: resultname})
|
627
|
-
|
628
|
-
code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
|
629
|
-
argname, kwargname)
|
630
|
-
exec(code, user_ns, user_ns)
|
631
|
-
return user_ns.get(resultname)
|
595
|
+
def _init_mpi_env(mpi_launcher: str, resource_spec: Dict):
|
596
|
+
node_list = resource_spec.get("MPI_NODELIST")
|
597
|
+
if node_list is None:
|
598
|
+
return
|
599
|
+
nodes_for_task = node_list.split(',')
|
600
|
+
logger.info(f"Launching task on provisioned nodes: {nodes_for_task}")
|
601
|
+
update_resource_spec_env_vars(mpi_launcher=mpi_launcher, resource_spec=resource_spec, node_info=nodes_for_task)
|
632
602
|
|
633
603
|
|
634
604
|
@wrap_with_logs(target="worker_log")
|
@@ -786,8 +756,10 @@ def worker(
|
|
786
756
|
ready_worker_count.value -= 1
|
787
757
|
worker_enqueued = False
|
788
758
|
|
759
|
+
_init_mpi_env(mpi_launcher=mpi_launcher, resource_spec=req["resource_spec"])
|
760
|
+
|
789
761
|
try:
|
790
|
-
result = execute_task(req['buffer']
|
762
|
+
result = execute_task(req['buffer'])
|
791
763
|
serialized_result = serialize(result, buffer_threshold=1000000)
|
792
764
|
except Exception as e:
|
793
765
|
logger.info('Caught an exception: {}'.format(e))
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2024.11.
|
3
|
+
Version: 2024.11.25
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2024.11.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2024.11.25.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|
@@ -1,14 +1,14 @@
|
|
1
1
|
parsl/__init__.py,sha256=65VfBnxw2k8V3sHsbhKoUCqG-ps2XP2l3x3ALMqQ13Y,1777
|
2
|
-
parsl/addresses.py,sha256=
|
2
|
+
parsl/addresses.py,sha256=gzQzyIoamo3Eq7AQ4MnyCwyZDRuu0gf7jXPEkb2YA8Y,5277
|
3
3
|
parsl/config.py,sha256=p5HQoxLj5aMagUAYfngcXG2kw0s6SJoc6u7vH2sVhPU,9635
|
4
|
-
parsl/curvezmq.py,sha256=
|
4
|
+
parsl/curvezmq.py,sha256=6Zi7RqTP_eKWi3DFgapfK2t-Jw8vJS-ZtN1bsrByPeo,7073
|
5
5
|
parsl/errors.py,sha256=SzINzQFZDBDbj9l-DPQznD0TbGkNhHIRAPkcBCogf_A,1019
|
6
6
|
parsl/log_utils.py,sha256=u14Fkl5eDfS4HMpl0JjseNNPdbvPaugWPRQj1_af_Zo,3273
|
7
7
|
parsl/multiprocessing.py,sha256=MyaEcEq-Qf860u7V98u-PZrPNdtzOZL_NW6EhIJnmfQ,1937
|
8
8
|
parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
|
9
9
|
parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
parsl/utils.py,sha256=rMLKeadEsQ9jGwm4ogqiLIXPS3zOAyfznQJXVkJSY8E,13107
|
11
|
-
parsl/version.py,sha256=
|
11
|
+
parsl/version.py,sha256=4kJ3r0GSxsTtdp-nw_5pYzhehKY87nlvzt-KMWN2-Lw,131
|
12
12
|
parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
|
14
14
|
parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
|
@@ -56,7 +56,7 @@ parsl/data_provider/staging.py,sha256=ZDZuuFg38pjUStegKPcvPsfGp3iMeReMzfU6DSwtJj
|
|
56
56
|
parsl/data_provider/zip.py,sha256=S4kVuH9lxAegRURYbvIUR7EYYBOccyslaqyCrVWUBhw,4497
|
57
57
|
parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
58
58
|
parsl/dataflow/dependency_resolvers.py,sha256=Om8Dgh7a0ZwgXAc6TlhxLSzvxXHDlNNV1aBNiD3JTNY,3325
|
59
|
-
parsl/dataflow/dflow.py,sha256=
|
59
|
+
parsl/dataflow/dflow.py,sha256=_EsavFW9vLnWkqTKuATSg-TGv0Cvnu5xnCLWWZEsXeA,65342
|
60
60
|
parsl/dataflow/errors.py,sha256=9SxVhIJY_53FQx8x4OU8UA8nd7lvUbDllH7KfMXpYaY,2177
|
61
61
|
parsl/dataflow/futures.py,sha256=08LuP-HFiHBIZmeKCjlsazw_WpQ5fwevrU2_WbidkYw,6080
|
62
62
|
parsl/dataflow/memoization.py,sha256=l9uw1Bu50GucBF70M5relpGKFkE4dIM9T3R1KrxW0v0,9583
|
@@ -66,29 +66,30 @@ parsl/dataflow/taskrecord.py,sha256=-FuujdZQ1y5GSc-PJ91QKGT-Kp0lrg70MFDoxpbWI1Q,
|
|
66
66
|
parsl/executors/__init__.py,sha256=Cg8e-F2NUaBD8A9crDAXKCSdoBEwQVIdgm4FlXd-wvk,476
|
67
67
|
parsl/executors/base.py,sha256=5A59mCXPjYNCep9JgfvIjBdZvGV-1mNVHklr-ZIEojg,5200
|
68
68
|
parsl/executors/errors.py,sha256=ZxL3nK5samPos8Xixo_jpRtPIiRJfZ5D397_qaXj2g0,2515
|
69
|
+
parsl/executors/execute_task.py,sha256=PtqHxk778UQaNah1AN-TJV5emZbOcU5TGtWDxFn3_F4,1079
|
69
70
|
parsl/executors/status_handling.py,sha256=nxbkiGr6f3xDc0nsUeSrMMxlj7UD32K7nOLCLzfthDs,15416
|
70
71
|
parsl/executors/threads.py,sha256=_LA5NA3GSvtjDend-1HVpjoDoNHHW13rAD0CET99fjQ,3463
|
71
72
|
parsl/executors/flux/__init__.py,sha256=P9grTTeRPXfqXurFhlSS7XhmE6tTbnCnyQ1f9b-oYHE,136
|
72
|
-
parsl/executors/flux/execute_parsl_task.py,sha256=
|
73
|
+
parsl/executors/flux/execute_parsl_task.py,sha256=zHP5M7ILGiwnoalZ8WsfVVdZM7uP4iQo2ThVh4crxpM,1530
|
73
74
|
parsl/executors/flux/executor.py,sha256=8_xakLUu5zNJAHL0LbeTCFEWqWzRK1eE-3ep4GIIIrY,17017
|
74
75
|
parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaPrSF9ec7zvRfLfYJw,2129
|
75
76
|
parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
76
77
|
parsl/executors/high_throughput/errors.py,sha256=Sak8e8UpiEcXefUjMHbhyXc4Rn7kJtOoh7L8wreBQdk,1638
|
77
|
-
parsl/executors/high_throughput/executor.py,sha256=
|
78
|
-
parsl/executors/high_throughput/interchange.py,sha256=
|
78
|
+
parsl/executors/high_throughput/executor.py,sha256=huaG9bMUUbJLRG8F44O3bU7atS4ItQzrwFEq20D-jLM,37777
|
79
|
+
parsl/executors/high_throughput/interchange.py,sha256=WXpSVTfx6OCk8ZUChd01i4BF6R4xu-ewcvzEMgnTWLw,30098
|
79
80
|
parsl/executors/high_throughput/manager_record.py,sha256=yn3L8TUJFkgm2lX1x0SeS9mkvJowC0s2VIMCFiU7ThM,455
|
80
81
|
parsl/executors/high_throughput/manager_selector.py,sha256=UKcUE6v0tO7PDMTThpKSKxVpOpOUilxDL7UbNgpZCxo,2116
|
81
82
|
parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
|
82
|
-
parsl/executors/high_throughput/mpi_executor.py,sha256=
|
83
|
+
parsl/executors/high_throughput/mpi_executor.py,sha256=U-aatbLF_Mu1p6lP0HmT7Yn1Swn3cc7hPmDfuUb9TpI,4797
|
83
84
|
parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=DmpKugANNa1bdYlqQBLHkrFc15fJpefPPhW9hkAlh1s,4308
|
84
|
-
parsl/executors/high_throughput/mpi_resource_management.py,sha256=
|
85
|
-
parsl/executors/high_throughput/probe.py,sha256=
|
86
|
-
parsl/executors/high_throughput/process_worker_pool.py,sha256=
|
87
|
-
parsl/executors/high_throughput/zmq_pipes.py,sha256=
|
85
|
+
parsl/executors/high_throughput/mpi_resource_management.py,sha256=hqotZLn3Q_iPRfMVmvvpKiGdguw55iYq1L_Gp9x6y4Y,7790
|
86
|
+
parsl/executors/high_throughput/probe.py,sha256=QOEaliO3x5cB6ltMOZMsZQ-ath9AAuFqXcBzRgWOM60,2754
|
87
|
+
parsl/executors/high_throughput/process_worker_pool.py,sha256=3zXe3_X5GvbTOlfeJJD_E0ssfJqkAfkqXHfeU7mymdI,41865
|
88
|
+
parsl/executors/high_throughput/zmq_pipes.py,sha256=GKi1cp8a2lU-P7AJCgFVmZiHntaN16_I_kMyaezkl4g,8574
|
88
89
|
parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
|
89
90
|
parsl/executors/radical/executor.py,sha256=426cMt6d8uJFZ_7Ub1kCslaND4OKtBX5WZdz-0RXjMk,22554
|
90
91
|
parsl/executors/radical/rpex_resources.py,sha256=Q7-0u3K447LBCe2y7mVcdw6jqWI7SdPXxCKhkr6FoRQ,5139
|
91
|
-
parsl/executors/radical/rpex_worker.py,sha256=
|
92
|
+
parsl/executors/radical/rpex_worker.py,sha256=z6r82ZujKb6sdKIdHsQ_5EBMDIQieeGcrlt6kGLFo4M,1830
|
92
93
|
parsl/executors/taskvine/__init__.py,sha256=9rwp3M8B0YyEhZMLO0RHaNw7u1nc01WHbXLqnBTanu0,293
|
93
94
|
parsl/executors/taskvine/errors.py,sha256=euIYkSslrNSI85kyi2s0xzOaO9ik4c1fYHstMIeiBJk,652
|
94
95
|
parsl/executors/taskvine/exec_parsl_function.py,sha256=ftGdJU78lKPPkphSHlEi4rj164mhuMHJjghVqfgeXKk,7085
|
@@ -100,7 +101,7 @@ parsl/executors/taskvine/manager_config.py,sha256=Lf3dxcDR5Jo97Odv4JFXfuRLclVX-x
|
|
100
101
|
parsl/executors/taskvine/utils.py,sha256=iSrIogeiauL3UNy_9tiZp1cBSNn6fIJkMYQRVi1n_r8,4156
|
101
102
|
parsl/executors/workqueue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
102
103
|
parsl/executors/workqueue/errors.py,sha256=XO2naYhAsHHyiOBH6hpObg3mPNDmvMoFqErsj0-v7jc,541
|
103
|
-
parsl/executors/workqueue/exec_parsl_function.py,sha256=
|
104
|
+
parsl/executors/workqueue/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
|
104
105
|
parsl/executors/workqueue/executor.py,sha256=_Jv35gRAzUjC-pyDrSs6sEOFc7MxOFJ5cvWXt9WGRwU,49969
|
105
106
|
parsl/executors/workqueue/parsl_coprocess.py,sha256=cF1UmTgVLoey6QzBcbYgEiEsRidSaFfuO54f1HFw_EM,5737
|
106
107
|
parsl/executors/workqueue/parsl_coprocess_stub.py,sha256=_bJmpPIgL42qM6bVzeEKt1Mn1trSP41rtJguXxPGfHI,735
|
@@ -115,13 +116,13 @@ parsl/launchers/base.py,sha256=CblcvPTJiu-MNLWaRtFe29SZQ0BpTOlaY8CGcHdlHIE,538
|
|
115
116
|
parsl/launchers/errors.py,sha256=8YMV_CHpBNVa4eXkGE4x5DaFQlZkDCRCHmBktYcY6TA,467
|
116
117
|
parsl/launchers/launchers.py,sha256=cQsNsHuCOL_nQTjPXf0--YsgsDoMoJ77bO1Wt4ncLjs,15134
|
117
118
|
parsl/monitoring/__init__.py,sha256=0ywNz6i0lM1xo_7_BIxhETDGeVd2C_0wwD7qgeaMR4c,83
|
118
|
-
parsl/monitoring/db_manager.py,sha256=
|
119
|
+
parsl/monitoring/db_manager.py,sha256=D8lrngFGxbFhyWVkF8JZRTbGxRYmd3SY6_zu8KV0FJs,33330
|
119
120
|
parsl/monitoring/errors.py,sha256=D6jpYzEzp0d6FmVKGqhvjAxr4ztZfJX2s-aXemH9bBU,148
|
120
121
|
parsl/monitoring/message_type.py,sha256=Khn88afNxcOIciKiCK4GLnn90I5BlRTiOL3zK-P07yQ,401
|
121
|
-
parsl/monitoring/monitoring.py,sha256=
|
122
|
+
parsl/monitoring/monitoring.py,sha256=8uy-7ua3FyTWfGgxGavCzM9_r56gCJ-KLpUysAqFI5Q,12671
|
122
123
|
parsl/monitoring/radios.py,sha256=l-a7GiWRBR3OaeLeHD_gBo2lMrqpjiQjLNaPTCr29ck,6021
|
123
124
|
parsl/monitoring/remote.py,sha256=WfSqQWYPMx3gT6u4T171ngMPzt8ialR1jRSsrD-4O24,13619
|
124
|
-
parsl/monitoring/router.py,sha256=
|
125
|
+
parsl/monitoring/router.py,sha256=VvzzsxLpwSSn0VUZOJtf0uvP9Kcr1znDAR1_MoHdAeU,9208
|
125
126
|
parsl/monitoring/types.py,sha256=oOCrzv-ab-_rv4pb8o58Sdb8G_RGp1aZriRbdf9zBEk,339
|
126
127
|
parsl/monitoring/queries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
127
128
|
parsl/monitoring/queries/pandas.py,sha256=0Z2r0rjTKCemf0eaDkF1irvVHn5g7KC5SYETvQPRxwU,2232
|
@@ -176,7 +177,7 @@ parsl/providers/pbspro/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
|
|
176
177
|
parsl/providers/pbspro/pbspro.py,sha256=luPUxBA0QMax7tKICsmesESQcOhcGnLi6GUlfGeO5pQ,8598
|
177
178
|
parsl/providers/pbspro/template.py,sha256=y-Dher--t5Eury-c7cAuSZs9FEUXWiruFUI07v81558,315
|
178
179
|
parsl/providers/slurm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
179
|
-
parsl/providers/slurm/slurm.py,sha256=
|
180
|
+
parsl/providers/slurm/slurm.py,sha256=6tnDB2rLNdnY_FGtmNg6tPSdU9dP5DuWBg4GGEMTPYI,16442
|
180
181
|
parsl/providers/slurm/template.py,sha256=KpgBEFMc1ps-38jdrk13xUGx9TCivu-iF90jgQDdiEQ,315
|
181
182
|
parsl/providers/torque/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
182
183
|
parsl/providers/torque/template.py,sha256=4qfc2gmlEhRCAD7erFDOs4prJQ43I8s4E8DSUSVQx3A,358
|
@@ -185,7 +186,7 @@ parsl/serialize/__init__.py,sha256=-tQNsFsHTfWxZL6iydt08S9t8QP2rk9Q6RKnXYwbkfY,4
|
|
185
186
|
parsl/serialize/base.py,sha256=5GyJRr3PQohp5Zv9YQUEyes61mfUK7wTctTaXITYpSQ,1082
|
186
187
|
parsl/serialize/concretes.py,sha256=JPWmltkm-XH2S22ugXCYWYmxwukCUEXWYKzPkKXJO60,1911
|
187
188
|
parsl/serialize/errors.py,sha256=TmTjGI4jf8p2hH454jpp_CPbhxwPXcj5MdOMEmF6so4,1066
|
188
|
-
parsl/serialize/facade.py,sha256=
|
189
|
+
parsl/serialize/facade.py,sha256=3uOuVp0epfyLn7qDzuWqLfsy971YVGD3sqwqcAiRwh0,6687
|
189
190
|
parsl/serialize/proxystore.py,sha256=o-ha9QAvVhbN8y9S1itk3W0O75eyHYZw2AvB2xu5_Lg,1624
|
190
191
|
parsl/tests/__init__.py,sha256=VTtJzOzz_x6fWNh8IOnsgFqVbdiJShi2AZH21mcmID4,204
|
191
192
|
parsl/tests/callables_helper.py,sha256=ceP1YYsNtrZgKT6MAIvpgdccEjQ_CpFEOnZBGHKGOx0,30
|
@@ -193,6 +194,7 @@ parsl/tests/conftest.py,sha256=njhszRuR15nZDufKF2S90lgkL8bSnQY4vH7dckx9q24,14851
|
|
193
194
|
parsl/tests/test_aalst_patterns.py,sha256=lNIxb7nIgh1yX7hR2fr_ck_mxYJxx8ASKK9zHUVqPno,9614
|
194
195
|
parsl/tests/test_callables.py,sha256=97vrIF1_hfDGd81FM1bhR6FemZMWFcALrH6pVHMTCt8,1974
|
195
196
|
parsl/tests/test_curvezmq.py,sha256=yyhlS4vmaZdMitiySoy4l_ih9H1bsPiN-tMdwIh3H20,12431
|
197
|
+
parsl/tests/test_execute_task.py,sha256=lVZEcRocBTQHOQNEp8Gq858lQiYsTb6uI2jNxEUVog8,816
|
196
198
|
parsl/tests/test_flux.py,sha256=TxkVPjksl1usdE9Y6y2FYhdOOmYFTlbEv_V9WnvF41A,5098
|
197
199
|
parsl/tests/test_summary.py,sha256=x1RfWCFLzHjBw2ukwoRZPW1LFCKiwDmxx86ES-6yGRA,552
|
198
200
|
parsl/tests/test_thread_parallelism.py,sha256=TVNeQ1NkUhaf3YbbzUSH-ozFFdX_GbX-5ygommjVxvc,1653
|
@@ -207,7 +209,7 @@ parsl/tests/configs/ec2_single_node.py,sha256=rK9AfMf4C84CXMhS5nhgHA_dNG2An7Yiq2
|
|
207
209
|
parsl/tests/configs/ec2_spot.py,sha256=NKDCKgKxYNOHGVLBl2DFfiUwkR6xQnyhNb_E04TBs28,1253
|
208
210
|
parsl/tests/configs/flux_local.py,sha256=xliKQfB5FFpfNHWYEHoA8FKOTVHFCXVhWNuKQ5VJNTk,182
|
209
211
|
parsl/tests/configs/frontera.py,sha256=VXaRcvsi9ZjqJHi71BbKXSJBuQXdhCzPxXKW7H3LRBI,1567
|
210
|
-
parsl/tests/configs/htex_local.py,sha256=
|
212
|
+
parsl/tests/configs/htex_local.py,sha256=xDjEMktlv_CEwzKlRiBKcZcoT6ttc-vYfjYEvkD8oLk,759
|
211
213
|
parsl/tests/configs/htex_local_alternate.py,sha256=JJN4OASr-RXsXuLGVS3ciCrcczf8VVzbuTDWn9Wu0g4,2577
|
212
214
|
parsl/tests/configs/htex_local_intask_staging.py,sha256=E7uZD_AIAbxavkw4VrVXlGG7k42YJZv2qluAO-W0VvI,886
|
213
215
|
parsl/tests/configs/htex_local_rsync_staging.py,sha256=cqTRcHLjqYnOL07Lb8ecTzQuzP-dWDpWdKhgtTwo-fU,940
|
@@ -329,7 +331,7 @@ parsl/tests/test_htex/test_missing_worker.py,sha256=gyp5i7_t-JHyJGtz_eXZKKBY5w8o
|
|
329
331
|
parsl/tests/test_htex/test_multiple_disconnected_blocks.py,sha256=2vXZoIx4NuAWYuiNoL5Gxr85w72qZ7Kdb3JGh0FufTg,1867
|
330
332
|
parsl/tests/test_htex/test_resource_spec_validation.py,sha256=VzOk4rjMNiDcEVLb-3YdlYZND7HRoGACJkTwq8NUTnc,1102
|
331
333
|
parsl/tests/test_htex/test_worker_failure.py,sha256=Uz-RHI-LK78FMjXUvrUFmo4iYfmpDVBUcBxxRb3UG9M,603
|
332
|
-
parsl/tests/test_htex/test_zmq_binding.py,sha256=
|
334
|
+
parsl/tests/test_htex/test_zmq_binding.py,sha256=WNFsCKKfid2uEfem0WLgl1wnBncIabpAv6kmg3imBxk,4001
|
333
335
|
parsl/tests/test_monitoring/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
334
336
|
parsl/tests/test_monitoring/test_app_names.py,sha256=ayyxySGWpKSe9dDw2UeJo1dicxjpALRuLsJfprZV4Eg,2174
|
335
337
|
parsl/tests/test_monitoring/test_basic.py,sha256=VdF6JHfqsEOIMg-ysIAREgygZIjHWNDVLNVQ7jhWxmQ,4592
|
@@ -442,19 +444,20 @@ parsl/tests/test_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
|
|
442
444
|
parsl/tests/test_utils/test_representation_mixin.py,sha256=kUZeIDwA2rlbJ3-beGzLLwf3dOplTMCrWJN87etHcyY,1633
|
443
445
|
parsl/tests/test_utils/test_sanitize_dns.py,sha256=8P_v5a5JLGU76OYf0LtclAwqJxGU0fMh_OZMVkMke3I,2954
|
444
446
|
parsl/tests/unit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
447
|
+
parsl/tests/unit/test_address.py,sha256=LL9qhp00JFG5lDN7-lY1YtuhTDlKHXHHp3a9TX06c84,682
|
445
448
|
parsl/tests/unit/test_file.py,sha256=vLycnYcv3bvSzL-FV8WdoibqTyb41BrH1LUYBavobsg,2850
|
446
449
|
parsl/tests/unit/test_usage_tracking.py,sha256=xEfUlbBRpsFdUdOrCsk1Kz5AfmMxJT7f0_esZl8Ft-0,1884
|
447
450
|
parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
448
451
|
parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
|
449
452
|
parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
|
450
453
|
parsl/usage_tracking/usage.py,sha256=tcoZ2OUjsQVakG8Uu9_HFuEdzpSHyt4JarSRcLGnSMw,8918
|
451
|
-
parsl-2024.11.
|
452
|
-
parsl-2024.11.
|
453
|
-
parsl-2024.11.
|
454
|
-
parsl-2024.11.
|
455
|
-
parsl-2024.11.
|
456
|
-
parsl-2024.11.
|
457
|
-
parsl-2024.11.
|
458
|
-
parsl-2024.11.
|
459
|
-
parsl-2024.11.
|
460
|
-
parsl-2024.11.
|
454
|
+
parsl-2024.11.25.data/scripts/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
|
455
|
+
parsl-2024.11.25.data/scripts/interchange.py,sha256=uRG_3K-XcSHzCNzI1_ab9k9UUiuy6llITgG1BF1N_Mw,30085
|
456
|
+
parsl-2024.11.25.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
|
457
|
+
parsl-2024.11.25.data/scripts/process_worker_pool.py,sha256=82FoJTye2SysJzPg-N8BpenuHGU7hOI8-Bedq8HV9C0,41851
|
458
|
+
parsl-2024.11.25.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
459
|
+
parsl-2024.11.25.dist-info/METADATA,sha256=yA5YMJUttDX3G8uZgVVXMWTjFE4MWqL_dDQL7fpW1bA,3848
|
460
|
+
parsl-2024.11.25.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
461
|
+
parsl-2024.11.25.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
|
462
|
+
parsl-2024.11.25.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
|
463
|
+
parsl-2024.11.25.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|