parsl 2024.11.18__py3-none-any.whl → 2024.12.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. parsl/addresses.py +19 -1
  2. parsl/channels/base.py +0 -28
  3. parsl/channels/local/local.py +0 -36
  4. parsl/configs/ASPIRE1.py +0 -1
  5. parsl/curvezmq.py +4 -0
  6. parsl/executors/execute_task.py +37 -0
  7. parsl/executors/flux/execute_parsl_task.py +1 -1
  8. parsl/executors/high_throughput/executor.py +29 -7
  9. parsl/executors/high_throughput/interchange.py +8 -7
  10. parsl/executors/high_throughput/mpi_executor.py +2 -0
  11. parsl/executors/high_throughput/mpi_resource_management.py +2 -3
  12. parsl/executors/high_throughput/probe.py +4 -4
  13. parsl/executors/high_throughput/process_worker_pool.py +15 -43
  14. parsl/executors/high_throughput/zmq_pipes.py +18 -8
  15. parsl/executors/radical/rpex_worker.py +2 -2
  16. parsl/executors/workqueue/exec_parsl_function.py +1 -1
  17. parsl/providers/condor/condor.py +3 -5
  18. parsl/providers/grid_engine/grid_engine.py +2 -3
  19. parsl/providers/local/local.py +1 -15
  20. parsl/providers/lsf/lsf.py +2 -12
  21. parsl/providers/pbspro/pbspro.py +1 -3
  22. parsl/providers/slurm/slurm.py +1 -11
  23. parsl/providers/torque/torque.py +1 -3
  24. parsl/serialize/facade.py +3 -3
  25. parsl/tests/configs/htex_local.py +1 -0
  26. parsl/tests/configs/htex_local_alternate.py +0 -1
  27. parsl/tests/configs/local_threads_monitoring.py +0 -1
  28. parsl/tests/manual_tests/test_udp_simple.py +0 -1
  29. parsl/tests/test_execute_task.py +29 -0
  30. parsl/tests/test_htex/test_zmq_binding.py +3 -2
  31. parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py +0 -1
  32. parsl/tests/test_monitoring/test_stdouterr.py +0 -1
  33. parsl/tests/unit/test_address.py +20 -0
  34. parsl/version.py +1 -1
  35. {parsl-2024.11.18.data → parsl-2024.12.2.data}/scripts/exec_parsl_function.py +1 -1
  36. {parsl-2024.11.18.data → parsl-2024.12.2.data}/scripts/interchange.py +8 -7
  37. {parsl-2024.11.18.data → parsl-2024.12.2.data}/scripts/process_worker_pool.py +15 -43
  38. {parsl-2024.11.18.dist-info → parsl-2024.12.2.dist-info}/METADATA +2 -2
  39. {parsl-2024.11.18.dist-info → parsl-2024.12.2.dist-info}/RECORD +44 -41
  40. {parsl-2024.11.18.dist-info → parsl-2024.12.2.dist-info}/WHEEL +1 -1
  41. {parsl-2024.11.18.data → parsl-2024.12.2.data}/scripts/parsl_coprocess.py +0 -0
  42. {parsl-2024.11.18.dist-info → parsl-2024.12.2.dist-info}/LICENSE +0 -0
  43. {parsl-2024.11.18.dist-info → parsl-2024.12.2.dist-info}/entry_points.txt +0 -0
  44. {parsl-2024.11.18.dist-info → parsl-2024.12.2.dist-info}/top_level.txt +0 -0
@@ -32,9 +32,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
32
32
  Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
33
33
  scaling where as many resources as possible are used; parallelism close to 0 represents
34
34
  the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
35
- move_files : Optional[Bool]
36
- Should files be moved? By default, Parsl will try to figure this out itself (= None).
37
- If True, then will always move. If False, will never move.
38
35
  worker_init : str
39
36
  Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
40
37
  """
@@ -48,8 +45,7 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
48
45
  max_blocks=1,
49
46
  worker_init='',
50
47
  cmd_timeout=30,
51
- parallelism=1,
52
- move_files=None):
48
+ parallelism=1):
53
49
  self.channel = channel
54
50
  self._label = 'local'
55
51
  self.nodes_per_block = nodes_per_block
@@ -61,7 +57,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
61
57
  self.parallelism = parallelism
62
58
  self.script_dir = None
63
59
  self.cmd_timeout = cmd_timeout
64
- self.move_files = move_files
65
60
 
66
61
  # Dictionary that keeps track of jobs, keyed on job_id
67
62
  self.resources = {}
@@ -83,7 +78,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
83
78
  if job_dict['status'] and job_dict['status'].terminal:
84
79
  # We already checked this and it can't change after that
85
80
  continue
86
- # Script path should point to remote path if _should_move_files() is True
87
81
  script_path = job_dict['script_path']
88
82
 
89
83
  alive = self._is_alive(job_dict)
@@ -137,8 +131,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
137
131
 
138
132
  def _job_file_path(self, script_path: str, suffix: str) -> str:
139
133
  path = '{0}{1}'.format(script_path, suffix)
140
- if self._should_move_files():
141
- path = self.channel.pull_file(path, self.script_dir)
142
134
  return path
143
135
 
144
136
  def _read_job_file(self, script_path: str, suffix: str) -> str:
@@ -216,9 +208,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
216
208
 
217
209
  job_id = None
218
210
  remote_pid = None
219
- if self._should_move_files():
220
- logger.debug("Pushing start script")
221
- script_path = self.channel.push_file(script_path, self.channel.script_dir)
222
211
 
223
212
  logger.debug("Launching")
224
213
  # We need to capture the exit code and the streams, so we put them in files. We also write
@@ -254,9 +243,6 @@ class LocalProvider(ExecutionProvider, RepresentationMixin):
254
243
 
255
244
  return job_id
256
245
 
257
- def _should_move_files(self):
258
- return (self.move_files is None and not isinstance(self.channel, LocalChannel)) or (self.move_files)
259
-
260
246
  def cancel(self, job_ids):
261
247
  ''' Cancels the jobs specified by a list of job ids
262
248
 
@@ -68,7 +68,6 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
68
68
  :class:`~parsl.launchers.SingleNodeLauncher` (the default),
69
69
  :class:`~parsl.launchers.SrunLauncher`, or
70
70
  :class:`~parsl.launchers.AprunLauncher`
71
- move_files : Optional[Bool]: should files be moved? by default, Parsl will try to move files.
72
71
  bsub_redirection: Bool
73
72
  Should a redirection symbol "<" be included when submitting jobs, i.e., Bsub < job_script.
74
73
  request_by_nodes: Bool
@@ -92,7 +91,6 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
92
91
  project=None,
93
92
  queue=None,
94
93
  cmd_timeout=120,
95
- move_files=True,
96
94
  bsub_redirection=False,
97
95
  request_by_nodes=True,
98
96
  launcher=SingleNodeLauncher()):
@@ -112,7 +110,6 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
112
110
  self.queue = queue
113
111
  self.cores_per_block = cores_per_block
114
112
  self.cores_per_node = cores_per_node
115
- self.move_files = move_files
116
113
  self.bsub_redirection = bsub_redirection
117
114
  self.request_by_nodes = request_by_nodes
118
115
 
@@ -230,17 +227,10 @@ class LSFProvider(ClusterProvider, RepresentationMixin):
230
227
  logger.debug("Writing submit script")
231
228
  self._write_submit_script(template_string, script_path, job_name, job_config)
232
229
 
233
- if self.move_files:
234
- logger.debug("moving files")
235
- channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
236
- else:
237
- logger.debug("not moving files")
238
- channel_script_path = script_path
239
-
240
230
  if self.bsub_redirection:
241
- cmd = "bsub < {0}".format(channel_script_path)
231
+ cmd = "bsub < {0}".format(script_path)
242
232
  else:
243
- cmd = "bsub {0}".format(channel_script_path)
233
+ cmd = "bsub {0}".format(script_path)
244
234
  retcode, stdout, stderr = super().execute_wait(cmd)
245
235
 
246
236
  job_id = None
@@ -183,15 +183,13 @@ class PBSProProvider(TorqueProvider):
183
183
  logger.debug("Writing submit script")
184
184
  self._write_submit_script(self.template_string, script_path, job_name, job_config)
185
185
 
186
- channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
187
-
188
186
  submit_options = ''
189
187
  if self.queue is not None:
190
188
  submit_options = '{0} -q {1}'.format(submit_options, self.queue)
191
189
  if self.account is not None:
192
190
  submit_options = '{0} -A {1}'.format(submit_options, self.account)
193
191
 
194
- launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
192
+ launch_cmd = "qsub {0} {1}".format(submit_options, script_path)
195
193
  retcode, stdout, stderr = self.execute_wait(launch_cmd)
196
194
 
197
195
  job_id = None
@@ -110,7 +110,6 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
110
110
  :class:`~parsl.launchers.SingleNodeLauncher` (the default),
111
111
  :class:`~parsl.launchers.SrunLauncher`, or
112
112
  :class:`~parsl.launchers.AprunLauncher`
113
- move_files : Optional[Bool]: should files be moved? by default, Parsl will try to move files.
114
113
  """
115
114
 
116
115
  @typeguard.typechecked
@@ -134,7 +133,6 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
134
133
  worker_init: str = '',
135
134
  cmd_timeout: int = 10,
136
135
  exclusive: bool = True,
137
- move_files: bool = True,
138
136
  launcher: Launcher = SingleNodeLauncher()):
139
137
  label = 'slurm'
140
138
  super().__init__(label,
@@ -152,7 +150,6 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
152
150
  self.cores_per_node = cores_per_node
153
151
  self.mem_per_node = mem_per_node
154
152
  self.exclusive = exclusive
155
- self.move_files = move_files
156
153
  self.account = account
157
154
  self.qos = qos
158
155
  self.constraint = constraint
@@ -308,14 +305,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
308
305
  logger.debug("Writing submit script")
309
306
  self._write_submit_script(template_string, script_path, job_name, job_config)
310
307
 
311
- if self.move_files:
312
- logger.debug("moving files")
313
- channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
314
- else:
315
- logger.debug("not moving files")
316
- channel_script_path = script_path
317
-
318
- retcode, stdout, stderr = self.execute_wait("sbatch {0}".format(channel_script_path))
308
+ retcode, stdout, stderr = self.execute_wait("sbatch {0}".format(script_path))
319
309
 
320
310
  if retcode == 0:
321
311
  for line in stdout.split('\n'):
@@ -189,15 +189,13 @@ class TorqueProvider(ClusterProvider, RepresentationMixin):
189
189
  logger.debug("Writing submit script")
190
190
  self._write_submit_script(self.template_string, script_path, job_name, job_config)
191
191
 
192
- channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
193
-
194
192
  submit_options = ''
195
193
  if self.queue is not None:
196
194
  submit_options = '{0} -q {1}'.format(submit_options, self.queue)
197
195
  if self.account is not None:
198
196
  submit_options = '{0} -A {1}'.format(submit_options, self.account)
199
197
 
200
- launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
198
+ launch_cmd = "qsub {0} {1}".format(submit_options, script_path)
201
199
  retcode, stdout, stderr = self.execute_wait(launch_cmd)
202
200
 
203
201
  job_id = None
parsl/serialize/facade.py CHANGED
@@ -87,16 +87,16 @@ def pack_res_spec_apply_message(func: Any, args: Any, kwargs: Any, resource_spec
87
87
  return pack_apply_message(func, args, (kwargs, resource_specification), buffer_threshold=buffer_threshold)
88
88
 
89
89
 
90
- def unpack_apply_message(packed_buffer: bytes, user_ns: Any = None, copy: Any = False) -> List[Any]:
90
+ def unpack_apply_message(packed_buffer: bytes) -> List[Any]:
91
91
  """ Unpack and deserialize function and parameters
92
92
  """
93
93
  return [deserialize(buf) for buf in unpack_buffers(packed_buffer)]
94
94
 
95
95
 
96
- def unpack_res_spec_apply_message(packed_buffer: bytes, user_ns: Any = None, copy: Any = False) -> List[Any]:
96
+ def unpack_res_spec_apply_message(packed_buffer: bytes) -> List[Any]:
97
97
  """ Unpack and deserialize function, parameters, and resource_specification
98
98
  """
99
- func, args, (kwargs, resource_spec) = unpack_apply_message(packed_buffer, user_ns=user_ns, copy=copy)
99
+ func, args, (kwargs, resource_spec) = unpack_apply_message(packed_buffer)
100
100
  return [func, args, kwargs, resource_spec]
101
101
 
102
102
 
@@ -10,6 +10,7 @@ def fresh_config():
10
10
  executors=[
11
11
  HighThroughputExecutor(
12
12
  label="htex_local",
13
+ loopback_address="::1",
13
14
  worker_debug=True,
14
15
  cores_per_worker=1,
15
16
  encrypted=True,
@@ -62,7 +62,6 @@ def fresh_config():
62
62
  retries=2,
63
63
  monitoring=MonitoringHub(
64
64
  hub_address="localhost",
65
- hub_port=55055,
66
65
  monitoring_debug=False,
67
66
  resource_monitoring_interval=1,
68
67
  ),
@@ -5,7 +5,6 @@ from parsl.monitoring import MonitoringHub
5
5
  config = Config(executors=[ThreadPoolExecutor(label='threads', max_threads=4)],
6
6
  monitoring=MonitoringHub(
7
7
  hub_address="localhost",
8
- hub_port=55055,
9
8
  resource_monitoring_interval=3,
10
9
  )
11
10
  )
@@ -15,7 +15,6 @@ def local_setup():
15
15
  ],
16
16
  monitoring=MonitoringHub(
17
17
  hub_address="127.0.0.1",
18
- hub_port=55055,
19
18
  logging_level=logging.INFO,
20
19
  resource_monitoring_interval=10))
21
20
 
@@ -0,0 +1,29 @@
1
+ import os
2
+
3
+ import pytest
4
+
5
+ from parsl.executors.execute_task import execute_task
6
+ from parsl.serialize.facade import pack_res_spec_apply_message
7
+
8
+
9
+ def addemup(*args: int, name: str = "apples"):
10
+ total = sum(args)
11
+ return f"{total} {name}"
12
+
13
+
14
+ @pytest.mark.local
15
+ def test_execute_task():
16
+ args = (1, 2, 3)
17
+ kwargs = {"name": "boots"}
18
+ buff = pack_res_spec_apply_message(addemup, args, kwargs, {})
19
+ res = execute_task(buff)
20
+ assert res == addemup(*args, **kwargs)
21
+
22
+
23
+ @pytest.mark.local
24
+ def test_execute_task_resource_spec():
25
+ resource_spec = {"num_nodes": 2, "ranks_per_node": 2, "num_ranks": 4}
26
+ buff = pack_res_spec_apply_message(addemup, (1, 2), {}, resource_spec)
27
+ execute_task(buff)
28
+ for key, val in resource_spec.items():
29
+ assert os.environ[f"PARSL_{key.upper()}"] == str(val)
@@ -87,7 +87,7 @@ def test_interchange_binding_with_non_ipv4_address(cert_dir: Optional[str]):
87
87
  def test_interchange_binding_bad_address(cert_dir: Optional[str]):
88
88
  """Confirm that we raise a ZMQError when a bad address is supplied"""
89
89
  address = "550.0.0.0"
90
- with pytest.raises(zmq.error.ZMQError):
90
+ with pytest.raises(ValueError):
91
91
  make_interchange(interchange_address=address, cert_dir=cert_dir)
92
92
 
93
93
 
@@ -103,4 +103,5 @@ def test_limited_interface_binding(cert_dir: Optional[str]):
103
103
 
104
104
  matched_conns = [conn for conn in conns if conn.laddr.port == ix.worker_result_port]
105
105
  assert len(matched_conns) == 1
106
- assert matched_conns[0].laddr.ip == address
106
+ # laddr.ip can return ::ffff:127.0.0.1 when using IPv6
107
+ assert address in matched_conns[0].laddr.ip
@@ -37,7 +37,6 @@ def fresh_config(run_dir, strategy, db_url):
37
37
  strategy_period=0.1,
38
38
  monitoring=MonitoringHub(
39
39
  hub_address="localhost",
40
- hub_port=55055,
41
40
  logging_endpoint=db_url
42
41
  )
43
42
  )
@@ -37,7 +37,6 @@ def fresh_config(run_dir):
37
37
  strategy_period=0.1,
38
38
  monitoring=MonitoringHub(
39
39
  hub_address="localhost",
40
- hub_port=55055,
41
40
  )
42
41
  )
43
42
 
@@ -0,0 +1,20 @@
1
+ import pytest
2
+
3
+ from parsl.addresses import tcp_url
4
+
5
+
6
+ @pytest.mark.local
7
+ @pytest.mark.parametrize("address, port,expected", [
8
+ ("127.0.0.1", 55001, "tcp://127.0.0.1:55001"),
9
+ ("127.0.0.1", "55001", "tcp://127.0.0.1:55001"),
10
+ ("127.0.0.1", None, "tcp://127.0.0.1"),
11
+ ("::1", "55001", "tcp://[::1]:55001"),
12
+ ("::ffff:127.0.0.1", 55001, "tcp://[::ffff:127.0.0.1]:55001"),
13
+ ("::ffff:127.0.0.1", None, "tcp://::ffff:127.0.0.1"),
14
+ ("::ffff:127.0.0.1", None, "tcp://::ffff:127.0.0.1"),
15
+ ("*", None, "tcp://*"),
16
+ ])
17
+ def test_tcp_url(address, port, expected):
18
+ """Confirm valid address generation"""
19
+ result = tcp_url(address, port)
20
+ assert result == expected
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2024.11.18'
6
+ VERSION = '2024.12.02'
@@ -94,7 +94,7 @@ def unpack_source_code_function(function_info, user_namespace):
94
94
 
95
95
  def unpack_byte_code_function(function_info, user_namespace):
96
96
  from parsl.serialize import unpack_apply_message
97
- func, args, kwargs = unpack_apply_message(function_info["byte code"], user_namespace, copy=False)
97
+ func, args, kwargs = unpack_apply_message(function_info["byte code"])
98
98
  return (func, 'parsl_function_name', args, kwargs)
99
99
 
100
100
 
@@ -14,6 +14,7 @@ from typing import Any, Dict, List, NoReturn, Optional, Sequence, Set, Tuple, ca
14
14
  import zmq
15
15
 
16
16
  from parsl import curvezmq
17
+ from parsl.addresses import tcp_url
17
18
  from parsl.app.errors import RemoteExceptionWrapper
18
19
  from parsl.executors.high_throughput.errors import ManagerLost, VersionMismatch
19
20
  from parsl.executors.high_throughput.manager_record import ManagerRecord
@@ -115,13 +116,13 @@ class Interchange:
115
116
  self.zmq_context = curvezmq.ServerContext(self.cert_dir)
116
117
  self.task_incoming = self.zmq_context.socket(zmq.DEALER)
117
118
  self.task_incoming.set_hwm(0)
118
- self.task_incoming.connect("tcp://{}:{}".format(client_address, client_ports[0]))
119
+ self.task_incoming.connect(tcp_url(client_address, client_ports[0]))
119
120
  self.results_outgoing = self.zmq_context.socket(zmq.DEALER)
120
121
  self.results_outgoing.set_hwm(0)
121
- self.results_outgoing.connect("tcp://{}:{}".format(client_address, client_ports[1]))
122
+ self.results_outgoing.connect(tcp_url(client_address, client_ports[1]))
122
123
 
123
124
  self.command_channel = self.zmq_context.socket(zmq.REP)
124
- self.command_channel.connect("tcp://{}:{}".format(client_address, client_ports[2]))
125
+ self.command_channel.connect(tcp_url(client_address, client_ports[2]))
125
126
  logger.info("Connected to client")
126
127
 
127
128
  self.run_id = run_id
@@ -144,14 +145,14 @@ class Interchange:
144
145
  self.worker_task_port = self.worker_ports[0]
145
146
  self.worker_result_port = self.worker_ports[1]
146
147
 
147
- self.task_outgoing.bind(f"tcp://{self.interchange_address}:{self.worker_task_port}")
148
- self.results_incoming.bind(f"tcp://{self.interchange_address}:{self.worker_result_port}")
148
+ self.task_outgoing.bind(tcp_url(self.interchange_address, self.worker_task_port))
149
+ self.results_incoming.bind(tcp_url(self.interchange_address, self.worker_result_port))
149
150
 
150
151
  else:
151
- self.worker_task_port = self.task_outgoing.bind_to_random_port(f"tcp://{self.interchange_address}",
152
+ self.worker_task_port = self.task_outgoing.bind_to_random_port(tcp_url(self.interchange_address),
152
153
  min_port=worker_port_range[0],
153
154
  max_port=worker_port_range[1], max_tries=100)
154
- self.worker_result_port = self.results_incoming.bind_to_random_port(f"tcp://{self.interchange_address}",
155
+ self.worker_result_port = self.results_incoming.bind_to_random_port(tcp_url(self.interchange_address),
155
156
  min_port=worker_port_range[0],
156
157
  max_port=worker_port_range[1], max_tries=100)
157
158
 
@@ -22,7 +22,9 @@ import psutil
22
22
  import zmq
23
23
 
24
24
  from parsl import curvezmq
25
+ from parsl.addresses import tcp_url
25
26
  from parsl.app.errors import RemoteExceptionWrapper
27
+ from parsl.executors.execute_task import execute_task
26
28
  from parsl.executors.high_throughput.errors import WorkerLost
27
29
  from parsl.executors.high_throughput.mpi_prefix_composer import (
28
30
  VALID_LAUNCHERS,
@@ -35,7 +37,7 @@ from parsl.executors.high_throughput.mpi_resource_management import (
35
37
  from parsl.executors.high_throughput.probe import probe_addresses
36
38
  from parsl.multiprocessing import SpawnContext
37
39
  from parsl.process_loggers import wrap_with_logs
38
- from parsl.serialize import serialize, unpack_res_spec_apply_message
40
+ from parsl.serialize import serialize
39
41
  from parsl.version import VERSION as PARSL_VERSION
40
42
 
41
43
  HEARTBEAT_CODE = (2 ** 32) - 1
@@ -158,8 +160,8 @@ class Manager:
158
160
  raise Exception("No viable address found")
159
161
  else:
160
162
  logger.info("Connection to Interchange successful on {}".format(ix_address))
161
- task_q_url = "tcp://{}:{}".format(ix_address, task_port)
162
- result_q_url = "tcp://{}:{}".format(ix_address, result_port)
163
+ task_q_url = tcp_url(ix_address, task_port)
164
+ result_q_url = tcp_url(ix_address, result_port)
163
165
  logger.info("Task url : {}".format(task_q_url))
164
166
  logger.info("Result url : {}".format(result_q_url))
165
167
  except Exception:
@@ -590,45 +592,13 @@ def update_resource_spec_env_vars(mpi_launcher: str, resource_spec: Dict, node_i
590
592
  os.environ[key] = prefix_table[key]
591
593
 
592
594
 
593
- def execute_task(bufs, mpi_launcher: Optional[str] = None):
594
- """Deserialize the buffer and execute the task.
595
-
596
- Returns the result or throws exception.
597
- """
598
- user_ns = locals()
599
- user_ns.update({'__builtins__': __builtins__})
600
-
601
- f, args, kwargs, resource_spec = unpack_res_spec_apply_message(bufs, user_ns, copy=False)
602
-
603
- for varname in resource_spec:
604
- envname = "PARSL_" + str(varname).upper()
605
- os.environ[envname] = str(resource_spec[varname])
606
-
607
- if resource_spec.get("MPI_NODELIST"):
608
- worker_id = os.environ['PARSL_WORKER_RANK']
609
- nodes_for_task = resource_spec["MPI_NODELIST"].split(',')
610
- logger.info(f"Launching task on provisioned nodes: {nodes_for_task}")
611
- assert mpi_launcher
612
- update_resource_spec_env_vars(mpi_launcher,
613
- resource_spec=resource_spec,
614
- node_info=nodes_for_task)
615
- # We might need to look into callability of the function from itself
616
- # since we change it's name in the new namespace
617
- prefix = "parsl_"
618
- fname = prefix + "f"
619
- argname = prefix + "args"
620
- kwargname = prefix + "kwargs"
621
- resultname = prefix + "result"
622
-
623
- user_ns.update({fname: f,
624
- argname: args,
625
- kwargname: kwargs,
626
- resultname: resultname})
627
-
628
- code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
629
- argname, kwargname)
630
- exec(code, user_ns, user_ns)
631
- return user_ns.get(resultname)
595
+ def _init_mpi_env(mpi_launcher: str, resource_spec: Dict):
596
+ node_list = resource_spec.get("MPI_NODELIST")
597
+ if node_list is None:
598
+ return
599
+ nodes_for_task = node_list.split(',')
600
+ logger.info(f"Launching task on provisioned nodes: {nodes_for_task}")
601
+ update_resource_spec_env_vars(mpi_launcher=mpi_launcher, resource_spec=resource_spec, node_info=nodes_for_task)
632
602
 
633
603
 
634
604
  @wrap_with_logs(target="worker_log")
@@ -786,8 +756,10 @@ def worker(
786
756
  ready_worker_count.value -= 1
787
757
  worker_enqueued = False
788
758
 
759
+ _init_mpi_env(mpi_launcher=mpi_launcher, resource_spec=req["resource_spec"])
760
+
789
761
  try:
790
- result = execute_task(req['buffer'], mpi_launcher=mpi_launcher)
762
+ result = execute_task(req['buffer'])
791
763
  serialized_result = serialize(result, buffer_threshold=1000000)
792
764
  except Exception as e:
793
765
  logger.info('Caught an exception: {}'.format(e))
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2024.11.18
3
+ Version: 2024.12.2
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2024.11.18.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2024.12.02.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0