parsl 2025.3.24__py3-none-any.whl → 2025.3.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
parsl/dataflow/dflow.py CHANGED
@@ -45,6 +45,7 @@ from parsl.executors.threads import ThreadPoolExecutor
45
45
  from parsl.jobs.job_status_poller import JobStatusPoller
46
46
  from parsl.monitoring import MonitoringHub
47
47
  from parsl.monitoring.message_type import MessageType
48
+ from parsl.monitoring.radios.multiprocessing import MultiprocessingQueueRadioSender
48
49
  from parsl.monitoring.remote import monitor_wrapper
49
50
  from parsl.process_loggers import wrap_with_logs
50
51
  from parsl.usage_tracking.usage import UsageTracker
@@ -110,8 +111,11 @@ class DataFlowKernel:
110
111
  self.monitoring: Optional[MonitoringHub]
111
112
  self.monitoring = config.monitoring
112
113
 
114
+ self.monitoring_radio = None
115
+
113
116
  if self.monitoring:
114
117
  self.monitoring.start(self.run_dir, self.config.run_dir)
118
+ self.monitoring_radio = MultiprocessingQueueRadioSender(self.monitoring.resource_msgs)
115
119
 
116
120
  self.time_began = datetime.datetime.now()
117
121
  self.time_completed: Optional[datetime.datetime] = None
@@ -156,9 +160,9 @@ class DataFlowKernel:
156
160
  'host': gethostname(),
157
161
  }
158
162
 
159
- if self.monitoring:
160
- self.monitoring.send((MessageType.WORKFLOW_INFO,
161
- workflow_info))
163
+ if self.monitoring_radio:
164
+ self.monitoring_radio.send((MessageType.WORKFLOW_INFO,
165
+ workflow_info))
162
166
 
163
167
  if config.checkpoint_files is not None:
164
168
  checkpoint_files = config.checkpoint_files
@@ -231,9 +235,9 @@ class DataFlowKernel:
231
235
  raise InternalConsistencyError(f"Exit case for {mode} should be unreachable, validated by typeguard on Config()")
232
236
 
233
237
  def _send_task_log_info(self, task_record: TaskRecord) -> None:
234
- if self.monitoring:
238
+ if self.monitoring_radio:
235
239
  task_log_info = self._create_task_log_info(task_record)
236
- self.monitoring.send((MessageType.TASK_INFO, task_log_info))
240
+ self.monitoring_radio.send((MessageType.TASK_INFO, task_log_info))
237
241
 
238
242
  def _create_task_log_info(self, task_record: TaskRecord) -> Dict[str, Any]:
239
243
  """
@@ -1215,15 +1219,16 @@ class DataFlowKernel:
1215
1219
  logger.info("Terminated executors")
1216
1220
  self.time_completed = datetime.datetime.now()
1217
1221
 
1218
- if self.monitoring:
1222
+ if self.monitoring_radio:
1219
1223
  logger.info("Sending final monitoring message")
1220
- self.monitoring.send((MessageType.WORKFLOW_INFO,
1221
- {'tasks_failed_count': self.task_state_counts[States.failed],
1222
- 'tasks_completed_count': self.task_state_counts[States.exec_done],
1223
- "time_began": self.time_began,
1224
- 'time_completed': self.time_completed,
1225
- 'run_id': self.run_id, 'rundir': self.run_dir}))
1224
+ self.monitoring_radio.send((MessageType.WORKFLOW_INFO,
1225
+ {'tasks_failed_count': self.task_state_counts[States.failed],
1226
+ 'tasks_completed_count': self.task_state_counts[States.exec_done],
1227
+ "time_began": self.time_began,
1228
+ 'time_completed': self.time_completed,
1229
+ 'run_id': self.run_id, 'rundir': self.run_dir}))
1226
1230
 
1231
+ if self.monitoring:
1227
1232
  logger.info("Terminating monitoring")
1228
1233
  self.monitoring.close()
1229
1234
  logger.info("Terminated monitoring")
@@ -2,10 +2,11 @@ from __future__ import annotations
2
2
 
3
3
  import copy
4
4
  from concurrent.futures import Future
5
- from typing import Any, Callable, Dict
5
+ from typing import Any, Callable, Dict, List, Optional
6
6
 
7
7
  import typeguard
8
8
 
9
+ from parsl.data_provider.staging import Staging
9
10
  from parsl.errors import OptionalModuleMissing
10
11
  from parsl.executors.base import ParslExecutor
11
12
  from parsl.utils import RepresentationMixin
@@ -40,6 +41,8 @@ class GlobusComputeExecutor(ParslExecutor, RepresentationMixin):
40
41
  self,
41
42
  executor: Executor,
42
43
  label: str = 'GlobusComputeExecutor',
44
+ storage_access: Optional[List[Staging]] = None,
45
+ working_dir: Optional[str] = None,
43
46
  ):
44
47
  """
45
48
  Parameters
@@ -52,6 +55,12 @@ class GlobusComputeExecutor(ParslExecutor, RepresentationMixin):
52
55
 
53
56
  label:
54
57
  a label to name the executor
58
+
59
+ storage_access:
60
+ a list of staging providers that will be used for file staging
61
+
62
+ working_dir:
63
+ The working dir to be used for file staging
55
64
  """
56
65
  if not _globus_compute_enabled:
57
66
  raise OptionalModuleMissing(
@@ -64,6 +73,8 @@ class GlobusComputeExecutor(ParslExecutor, RepresentationMixin):
64
73
  self.resource_specification = self.executor.resource_specification
65
74
  self.user_endpoint_config = self.executor.user_endpoint_config
66
75
  self.label = label
76
+ self.storage_access = storage_access
77
+ self.working_dir = working_dir
67
78
 
68
79
  def start(self) -> None:
69
80
  """ Start the Globus Compute Executor """
@@ -328,7 +328,7 @@ class Interchange:
328
328
  self.process_results_incoming(interesting_managers, monitoring_radio)
329
329
  self.expire_bad_managers(interesting_managers, monitoring_radio)
330
330
  self.expire_drained_managers(interesting_managers, monitoring_radio)
331
- self.process_tasks_to_send(interesting_managers)
331
+ self.process_tasks_to_send(interesting_managers, monitoring_radio)
332
332
 
333
333
  self.zmq_context.destroy()
334
334
  delta = time.time() - start
@@ -452,7 +452,7 @@ class Interchange:
452
452
  m['active'] = False
453
453
  self._send_monitoring_info(monitoring_radio, m)
454
454
 
455
- def process_tasks_to_send(self, interesting_managers: Set[bytes]) -> None:
455
+ def process_tasks_to_send(self, interesting_managers: Set[bytes], monitoring_radio: Optional[MonitoringRadioSender]) -> None:
456
456
  # Check if there are tasks that could be sent to managers
457
457
 
458
458
  logger.debug(
@@ -481,13 +481,14 @@ class Interchange:
481
481
  m['idle_since'] = None
482
482
  logger.debug("Sent tasks: %s to manager %r", tids, manager_id)
483
483
  # recompute real_capacity after sending tasks
484
- real_capacity = m['max_capacity'] - tasks_inflight
484
+ real_capacity -= task_count
485
485
  if real_capacity > 0:
486
486
  logger.debug("Manager %r has free capacity %s", manager_id, real_capacity)
487
487
  # ... so keep it in the interesting_managers list
488
488
  else:
489
489
  logger.debug("Manager %r is now saturated", manager_id)
490
490
  interesting_managers.remove(manager_id)
491
+ self._send_monitoring_info(monitoring_radio, m)
491
492
  else:
492
493
  interesting_managers.remove(manager_id)
493
494
  # logger.debug("Nothing to send to manager {}".format(manager_id))
@@ -505,13 +506,24 @@ class Interchange:
505
506
  else:
506
507
  logger.debug("Got %s result items in batch from manager %r", len(all_messages), manager_id)
507
508
 
508
- b_messages = []
509
+ m = self._ready_managers[manager_id]
510
+ b_messages_to_send = []
509
511
 
510
512
  for p_message in all_messages:
511
513
  r = pickle.loads(p_message)
512
514
  if r['type'] == 'result':
513
515
  # process this for task ID and forward to executor
514
- b_messages.append((p_message, r))
516
+ logger.debug("Removing task %s from manager record %r", r["task_id"], manager_id)
517
+ try:
518
+ m['tasks'].remove(r['task_id'])
519
+ b_messages_to_send.append(p_message)
520
+ except Exception:
521
+ logger.exception(
522
+ "Ignoring exception removing task_id %s for manager %r with task list %s",
523
+ r['task_id'],
524
+ manager_id,
525
+ m["tasks"]
526
+ )
515
527
  elif r['type'] == 'monitoring':
516
528
  # the monitoring code makes the assumption that no
517
529
  # monitoring messages will be received if monitoring
@@ -525,43 +537,21 @@ class Interchange:
525
537
  else:
526
538
  logger.error("Interchange discarding result_queue message of unknown type: %s", r["type"])
527
539
 
528
- got_result = False
529
- m = self._ready_managers[manager_id]
530
- for (_, r) in b_messages:
531
- assert 'type' in r, f"Message is missing type entry: {r}"
532
- if r['type'] == 'result':
533
- got_result = True
534
- try:
535
- logger.debug("Removing task %s from manager record %r", r["task_id"], manager_id)
536
- m['tasks'].remove(r['task_id'])
537
- except Exception:
538
- # If we reach here, there's something very wrong.
539
- logger.exception(
540
- "Ignoring exception removing task_id %s for manager %r with task list %s",
541
- r['task_id'],
542
- manager_id,
543
- m["tasks"]
544
- )
545
-
546
- b_messages_to_send = []
547
- for (b_message, _) in b_messages:
548
- b_messages_to_send.append(b_message)
549
-
550
540
  if b_messages_to_send:
551
541
  logger.debug("Sending messages on results_outgoing")
552
542
  self.results_outgoing.send_multipart(b_messages_to_send)
553
543
  logger.debug("Sent messages on results_outgoing")
554
544
 
555
- logger.debug("Current tasks on manager %r: %s", manager_id, m["tasks"])
556
- if len(m['tasks']) == 0 and m['idle_since'] is None:
557
- m['idle_since'] = time.time()
558
-
559
- # A manager is only made interesting here if a result was
560
- # received, which means there should be capacity for a new
561
- # task now. Heartbeats and monitoring messages do not make a
562
- # manager become interesting.
563
- if got_result:
545
+ # At least one result received, so manager now has idle capacity
564
546
  interesting_managers.add(manager_id)
547
+
548
+ if len(m['tasks']) == 0 and m['idle_since'] is None:
549
+ m['idle_since'] = time.time()
550
+
551
+ self._send_monitoring_info(monitoring_radio, m)
552
+
553
+ logger.debug("Current tasks on manager %r: %s", manager_id, m["tasks"])
554
+
565
555
  logger.debug("leaving results_incoming section")
566
556
 
567
557
  def expire_bad_managers(self, interesting_managers: Set[bytes], monitoring_radio: Optional[MonitoringRadioSender]) -> None:
@@ -289,20 +289,20 @@ class BlockProviderExecutor(ParslExecutor):
289
289
  logger.debug("Sending block monitoring message: %r", msg)
290
290
  self.submit_monitoring_radio.send((MessageType.BLOCK_INFO, msg))
291
291
 
292
- def create_monitoring_info(self, status: Dict[str, JobStatus]) -> Sequence[object]:
292
+ def create_monitoring_info(self, status: Dict[str, JobStatus]) -> Sequence[Dict[str, Any]]:
293
293
  """Create a monitoring message for each block based on the poll status.
294
294
  """
295
- msg = []
296
- for bid, s in status.items():
297
- d: Dict[str, Any] = {}
298
- d['run_id'] = self.run_id
299
- d['status'] = s.status_name
300
- d['timestamp'] = datetime.datetime.now()
301
- d['executor_label'] = self.label
302
- d['job_id'] = self.blocks_to_job_id.get(bid, None)
303
- d['block_id'] = bid
304
- msg.append(d)
305
- return msg
295
+ return [
296
+ {
297
+ "run_id": self.run_id,
298
+ "status": s.status_name,
299
+ "timestamp": datetime.datetime.now(),
300
+ "executor_label": self.label,
301
+ "job_id": self.blocks_to_job_id.get(bid, None),
302
+ "block_id": bid
303
+ }
304
+ for bid, s in status.items()
305
+ ]
306
306
 
307
307
  def poll_facade(self) -> None:
308
308
  now = time.time()
@@ -40,6 +40,7 @@ from parsl.executors.taskvine.factory_config import TaskVineFactoryConfig
40
40
  from parsl.executors.taskvine.manager import _taskvine_submit_wait
41
41
  from parsl.executors.taskvine.manager_config import TaskVineManagerConfig
42
42
  from parsl.executors.taskvine.utils import ParslFileToVine, ParslTaskToVine
43
+ from parsl.multiprocessing import SpawnContext
43
44
  from parsl.process_loggers import wrap_with_logs
44
45
  from parsl.providers import CondorProvider, LocalProvider
45
46
  from parsl.providers.base import ExecutionProvider
@@ -134,13 +135,13 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
134
135
  self.storage_access = storage_access
135
136
 
136
137
  # Queue to send ready tasks from TaskVine executor process to TaskVine manager process
137
- self._ready_task_queue: multiprocessing.Queue = multiprocessing.Queue()
138
+ self._ready_task_queue: multiprocessing.Queue = SpawnContext.Queue()
138
139
 
139
140
  # Queue to send finished tasks from TaskVine manager process to TaskVine executor process
140
- self._finished_task_queue: multiprocessing.Queue = multiprocessing.Queue()
141
+ self._finished_task_queue: multiprocessing.Queue = SpawnContext.Queue()
141
142
 
142
143
  # Event to signal whether the manager and factory processes should stop running
143
- self._should_stop = multiprocessing.Event()
144
+ self._should_stop = SpawnContext.Event()
144
145
 
145
146
  # TaskVine manager process
146
147
  self._submit_process = None
@@ -253,17 +254,17 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
253
254
  "finished_task_queue": self._finished_task_queue,
254
255
  "should_stop": self._should_stop,
255
256
  "manager_config": self.manager_config}
256
- self._submit_process = multiprocessing.Process(target=_taskvine_submit_wait,
257
- name="TaskVine-Submit-Process",
258
- kwargs=submit_process_kwargs)
257
+ self._submit_process = SpawnContext.Process(target=_taskvine_submit_wait,
258
+ name="TaskVine-Submit-Process",
259
+ kwargs=submit_process_kwargs)
259
260
 
260
261
  # Create a process to run the TaskVine factory if enabled.
261
262
  if self.worker_launch_method == 'factory':
262
263
  factory_process_kwargs = {"should_stop": self._should_stop,
263
264
  "factory_config": self.factory_config}
264
- self._factory_process = multiprocessing.Process(target=_taskvine_factory,
265
- name="TaskVine-Factory-Process",
266
- kwargs=factory_process_kwargs)
265
+ self._factory_process = SpawnContext.Process(target=_taskvine_factory,
266
+ name="TaskVine-Factory-Process",
267
+ kwargs=factory_process_kwargs)
267
268
 
268
269
  # Run thread to collect results and set tasks' futures.
269
270
  self._collector_thread = threading.Thread(target=self._collect_taskvine_results,
@@ -622,8 +623,8 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
622
623
  with self._tasks_lock:
623
624
  future = self.tasks.pop(task_report.executor_id)
624
625
 
625
- logger.debug(f'Updating Future for Parsl Task: {task_report.executor_id}. \
626
- Task {task_report.executor_id} has result_received set to {task_report.result_received}')
626
+ logger.debug(f'Updating Future for Parsl Task: {task_report.executor_id}. '
627
+ f'Task {task_report.executor_id} has result_received set to {task_report.result_received}')
627
628
  if task_report.result_received:
628
629
  try:
629
630
  with open(task_report.result_file, 'rb') as f_in:
@@ -31,6 +31,7 @@ from parsl.errors import OptionalModuleMissing
31
31
  from parsl.executors.errors import ExecutorError, InvalidResourceSpecification
32
32
  from parsl.executors.status_handling import BlockProviderExecutor
33
33
  from parsl.executors.workqueue import exec_parsl_function
34
+ from parsl.multiprocessing import SpawnContext, SpawnProcess
34
35
  from parsl.process_loggers import wrap_with_logs
35
36
  from parsl.providers import CondorProvider, LocalProvider
36
37
  from parsl.providers.base import ExecutionProvider
@@ -260,8 +261,8 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
260
261
 
261
262
  self.scaling_cores_per_worker = scaling_cores_per_worker
262
263
  self.label = label
263
- self.task_queue = multiprocessing.Queue() # type: multiprocessing.Queue
264
- self.collector_queue = multiprocessing.Queue() # type: multiprocessing.Queue
264
+ self.task_queue: multiprocessing.Queue = SpawnContext.Queue()
265
+ self.collector_queue: multiprocessing.Queue = SpawnContext.Queue()
265
266
  self.address = address
266
267
  self.port = port
267
268
  self.executor_task_counter = -1
@@ -282,7 +283,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
282
283
  self.autolabel_window = autolabel_window
283
284
  self.autocategory = autocategory
284
285
  self.max_retries = max_retries
285
- self.should_stop = multiprocessing.Value(c_bool, False)
286
+ self.should_stop = SpawnContext.Value(c_bool, False)
286
287
  self.cached_envs = {} # type: Dict[int, str]
287
288
  self.worker_options = worker_options
288
289
  self.worker_executable = worker_executable
@@ -334,7 +335,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
334
335
 
335
336
  logger.debug("Starting WorkQueueExecutor")
336
337
 
337
- port_mailbox = multiprocessing.Queue()
338
+ port_mailbox = SpawnContext.Queue()
338
339
 
339
340
  # Create a Process to perform WorkQueue submissions
340
341
  submit_process_kwargs = {"task_queue": self.task_queue,
@@ -355,9 +356,9 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
355
356
  "port_mailbox": port_mailbox,
356
357
  "coprocess": self.coprocess
357
358
  }
358
- self.submit_process = multiprocessing.Process(target=_work_queue_submit_wait,
359
- name="WorkQueue-Submit-Process",
360
- kwargs=submit_process_kwargs)
359
+ self.submit_process = SpawnProcess(target=_work_queue_submit_wait,
360
+ name="WorkQueue-Submit-Process",
361
+ kwargs=submit_process_kwargs)
361
362
 
362
363
  self.collector_thread = threading.Thread(target=self._collect_work_queue_results,
363
364
  name="WorkQueue-collector-thread")
@@ -12,7 +12,6 @@ import typeguard
12
12
 
13
13
  from parsl.monitoring.errors import MonitoringHubStartError
14
14
  from parsl.monitoring.radios.filesystem_router import filesystem_router_starter
15
- from parsl.monitoring.radios.multiprocessing import MultiprocessingQueueRadioSender
16
15
  from parsl.monitoring.radios.udp_router import udp_router_starter
17
16
  from parsl.monitoring.types import TaggedMonitoringMessage
18
17
  from parsl.multiprocessing import (
@@ -180,8 +179,6 @@ class MonitoringHub(RepresentationMixin):
180
179
  self.filesystem_proc.start()
181
180
  logger.info("Started filesystem radio receiver process %s", self.filesystem_proc.pid)
182
181
 
183
- self.radio = MultiprocessingQueueRadioSender(self.resource_msgs)
184
-
185
182
  try:
186
183
  udp_comm_q_result = udp_comm_q.get(block=True, timeout=120)
187
184
  udp_comm_q.close()
@@ -199,10 +196,6 @@ class MonitoringHub(RepresentationMixin):
199
196
 
200
197
  logger.info("Monitoring Hub initialized")
201
198
 
202
- def send(self, message: TaggedMonitoringMessage) -> None:
203
- logger.debug("Sending message type %s", message[0])
204
- self.radio.send(message)
205
-
206
199
  def close(self) -> None:
207
200
  logger.info("Terminating Monitoring Hub")
208
201
  if self.monitoring_hub_active:
@@ -120,7 +120,7 @@ def test_row_counts(tmpd_cwd, fresh_config):
120
120
  # Two entries: one showing manager active, one inactive
121
121
  result = connection.execute(text("SELECT COUNT(*) FROM node"))
122
122
  (c, ) = result.first()
123
- assert c == 2
123
+ assert c == 4
124
124
 
125
125
  # There should be one block polling status
126
126
  # local provider has a status_polling_interval of 5s
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2025.03.24'
6
+ VERSION = '2025.03.31'
@@ -328,7 +328,7 @@ class Interchange:
328
328
  self.process_results_incoming(interesting_managers, monitoring_radio)
329
329
  self.expire_bad_managers(interesting_managers, monitoring_radio)
330
330
  self.expire_drained_managers(interesting_managers, monitoring_radio)
331
- self.process_tasks_to_send(interesting_managers)
331
+ self.process_tasks_to_send(interesting_managers, monitoring_radio)
332
332
 
333
333
  self.zmq_context.destroy()
334
334
  delta = time.time() - start
@@ -452,7 +452,7 @@ class Interchange:
452
452
  m['active'] = False
453
453
  self._send_monitoring_info(monitoring_radio, m)
454
454
 
455
- def process_tasks_to_send(self, interesting_managers: Set[bytes]) -> None:
455
+ def process_tasks_to_send(self, interesting_managers: Set[bytes], monitoring_radio: Optional[MonitoringRadioSender]) -> None:
456
456
  # Check if there are tasks that could be sent to managers
457
457
 
458
458
  logger.debug(
@@ -481,13 +481,14 @@ class Interchange:
481
481
  m['idle_since'] = None
482
482
  logger.debug("Sent tasks: %s to manager %r", tids, manager_id)
483
483
  # recompute real_capacity after sending tasks
484
- real_capacity = m['max_capacity'] - tasks_inflight
484
+ real_capacity -= task_count
485
485
  if real_capacity > 0:
486
486
  logger.debug("Manager %r has free capacity %s", manager_id, real_capacity)
487
487
  # ... so keep it in the interesting_managers list
488
488
  else:
489
489
  logger.debug("Manager %r is now saturated", manager_id)
490
490
  interesting_managers.remove(manager_id)
491
+ self._send_monitoring_info(monitoring_radio, m)
491
492
  else:
492
493
  interesting_managers.remove(manager_id)
493
494
  # logger.debug("Nothing to send to manager {}".format(manager_id))
@@ -505,13 +506,24 @@ class Interchange:
505
506
  else:
506
507
  logger.debug("Got %s result items in batch from manager %r", len(all_messages), manager_id)
507
508
 
508
- b_messages = []
509
+ m = self._ready_managers[manager_id]
510
+ b_messages_to_send = []
509
511
 
510
512
  for p_message in all_messages:
511
513
  r = pickle.loads(p_message)
512
514
  if r['type'] == 'result':
513
515
  # process this for task ID and forward to executor
514
- b_messages.append((p_message, r))
516
+ logger.debug("Removing task %s from manager record %r", r["task_id"], manager_id)
517
+ try:
518
+ m['tasks'].remove(r['task_id'])
519
+ b_messages_to_send.append(p_message)
520
+ except Exception:
521
+ logger.exception(
522
+ "Ignoring exception removing task_id %s for manager %r with task list %s",
523
+ r['task_id'],
524
+ manager_id,
525
+ m["tasks"]
526
+ )
515
527
  elif r['type'] == 'monitoring':
516
528
  # the monitoring code makes the assumption that no
517
529
  # monitoring messages will be received if monitoring
@@ -525,43 +537,21 @@ class Interchange:
525
537
  else:
526
538
  logger.error("Interchange discarding result_queue message of unknown type: %s", r["type"])
527
539
 
528
- got_result = False
529
- m = self._ready_managers[manager_id]
530
- for (_, r) in b_messages:
531
- assert 'type' in r, f"Message is missing type entry: {r}"
532
- if r['type'] == 'result':
533
- got_result = True
534
- try:
535
- logger.debug("Removing task %s from manager record %r", r["task_id"], manager_id)
536
- m['tasks'].remove(r['task_id'])
537
- except Exception:
538
- # If we reach here, there's something very wrong.
539
- logger.exception(
540
- "Ignoring exception removing task_id %s for manager %r with task list %s",
541
- r['task_id'],
542
- manager_id,
543
- m["tasks"]
544
- )
545
-
546
- b_messages_to_send = []
547
- for (b_message, _) in b_messages:
548
- b_messages_to_send.append(b_message)
549
-
550
540
  if b_messages_to_send:
551
541
  logger.debug("Sending messages on results_outgoing")
552
542
  self.results_outgoing.send_multipart(b_messages_to_send)
553
543
  logger.debug("Sent messages on results_outgoing")
554
544
 
555
- logger.debug("Current tasks on manager %r: %s", manager_id, m["tasks"])
556
- if len(m['tasks']) == 0 and m['idle_since'] is None:
557
- m['idle_since'] = time.time()
558
-
559
- # A manager is only made interesting here if a result was
560
- # received, which means there should be capacity for a new
561
- # task now. Heartbeats and monitoring messages do not make a
562
- # manager become interesting.
563
- if got_result:
545
+ # At least one result received, so manager now has idle capacity
564
546
  interesting_managers.add(manager_id)
547
+
548
+ if len(m['tasks']) == 0 and m['idle_since'] is None:
549
+ m['idle_since'] = time.time()
550
+
551
+ self._send_monitoring_info(monitoring_radio, m)
552
+
553
+ logger.debug("Current tasks on manager %r: %s", manager_id, m["tasks"])
554
+
565
555
  logger.debug("leaving results_incoming section")
566
556
 
567
557
  def expire_bad_managers(self, interesting_managers: Set[bytes], monitoring_radio: Optional[MonitoringRadioSender]) -> None:
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2025.3.24
3
+ Version: 2025.3.31
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2025.03.24.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2025.03.31.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=JNAfgdZvQSsxVyUp229OOUqWwf_ZUhpmw8X9CdF3i6k,3614
8
8
  parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
9
9
  parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  parsl/utils.py,sha256=codTX6_KLhgeTwNkRzc1lo4bgc1M93eJ-lkqOO98fvk,14331
11
- parsl/version.py,sha256=RGffnfvs_gOc_lEWgnZpd1jy_cuz-F39xp5lsIo6OyU,131
11
+ parsl/version.py,sha256=R7rMnpnIyEjWSGcfHgaVmi-pRpQKOwTJ9-s2r-71dKE,131
12
12
  parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
14
14
  parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
@@ -53,7 +53,7 @@ parsl/data_provider/staging.py,sha256=ZDZuuFg38pjUStegKPcvPsfGp3iMeReMzfU6DSwtJj
53
53
  parsl/data_provider/zip.py,sha256=S4kVuH9lxAegRURYbvIUR7EYYBOccyslaqyCrVWUBhw,4497
54
54
  parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
55
  parsl/dataflow/dependency_resolvers.py,sha256=Om8Dgh7a0ZwgXAc6TlhxLSzvxXHDlNNV1aBNiD3JTNY,3325
56
- parsl/dataflow/dflow.py,sha256=kAEziQPm9rpGKzqhOuHSnnIXQ3KgrRaeRbYReww7Amw,61620
56
+ parsl/dataflow/dflow.py,sha256=9-aD-CmOgjAkXykZsfZ6OAVbOhlNjMmM83J5dEq_Wrg,61941
57
57
  parsl/dataflow/errors.py,sha256=daVfr2BWs1zRsGD6JtosEMttWHvK1df1Npiu_MUvFKg,3998
58
58
  parsl/dataflow/futures.py,sha256=08LuP-HFiHBIZmeKCjlsazw_WpQ5fwevrU2_WbidkYw,6080
59
59
  parsl/dataflow/memoization.py,sha256=QUkTduZ_gdr8i08VWNWrqhfEvoMGsPDZegWUE2_7sGQ,12579
@@ -64,8 +64,8 @@ parsl/executors/__init__.py,sha256=PEuXYrnVqwlaz_nt82s9D_YNaVsX7ET29DeIZRUR8hw,5
64
64
  parsl/executors/base.py,sha256=_X-huuXKCoQatT_TYx9ApEuXiVVvUYI0S7uKlVMHP-U,4488
65
65
  parsl/executors/errors.py,sha256=ZxL3nK5samPos8Xixo_jpRtPIiRJfZ5D397_qaXj2g0,2515
66
66
  parsl/executors/execute_task.py,sha256=PtqHxk778UQaNah1AN-TJV5emZbOcU5TGtWDxFn3_F4,1079
67
- parsl/executors/globus_compute.py,sha256=818XKRobNRCs5-h30x2NP2XSLkoWlWoNeBZtv9hF-ec,4851
68
- parsl/executors/status_handling.py,sha256=oiy6SQUQWwVciZd9MxF0sna9miqkMfaKv6ZTrj3h-Pc,15772
67
+ parsl/executors/globus_compute.py,sha256=xIJawhdvnYgHbVd-mQD23Nmlldty13hURoPjOv4I_qE,5276
68
+ parsl/executors/status_handling.py,sha256=n2DLASEvKZNgFpviAMYDfqcAsxMiU7QxFemw91YSenc,15746
69
69
  parsl/executors/threads.py,sha256=_LA5NA3GSvtjDend-1HVpjoDoNHHW13rAD0CET99fjQ,3463
70
70
  parsl/executors/flux/__init__.py,sha256=P9grTTeRPXfqXurFhlSS7XhmE6tTbnCnyQ1f9b-oYHE,136
71
71
  parsl/executors/flux/execute_parsl_task.py,sha256=zHP5M7ILGiwnoalZ8WsfVVdZM7uP4iQo2ThVh4crxpM,1530
@@ -74,7 +74,7 @@ parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaP
74
74
  parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
75
  parsl/executors/high_throughput/errors.py,sha256=k2XuvvFdUfNs2foHFnxmS-BToRMfdXpYEa4EF3ELKq4,1554
76
76
  parsl/executors/high_throughput/executor.py,sha256=VVKe3gveCqa3rERAqhVNsWifSrLqyMcUwpdyHCsYdz8,39663
77
- parsl/executors/high_throughput/interchange.py,sha256=7sKIvxP3a7HSzqEq25ZCpABx-1Q2f585pFDGzUvo7_4,29459
77
+ parsl/executors/high_throughput/interchange.py,sha256=n_mQiaOFUushQl87nHugMeNShO0X5zCHHR3N1EF28aU,28975
78
78
  parsl/executors/high_throughput/manager_record.py,sha256=ZMsqFxvreGLRXAw3N-JnODDa9Qfizw2tMmcBhm4lco4,490
79
79
  parsl/executors/high_throughput/manager_selector.py,sha256=UKcUE6v0tO7PDMTThpKSKxVpOpOUilxDL7UbNgpZCxo,2116
80
80
  parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
@@ -91,7 +91,7 @@ parsl/executors/radical/rpex_worker.py,sha256=z6r82ZujKb6sdKIdHsQ_5EBMDIQieeGcrl
91
91
  parsl/executors/taskvine/__init__.py,sha256=9rwp3M8B0YyEhZMLO0RHaNw7u1nc01WHbXLqnBTanu0,293
92
92
  parsl/executors/taskvine/errors.py,sha256=euIYkSslrNSI85kyi2s0xzOaO9ik4c1fYHstMIeiBJk,652
93
93
  parsl/executors/taskvine/exec_parsl_function.py,sha256=ftGdJU78lKPPkphSHlEi4rj164mhuMHJjghVqfgeXKk,7085
94
- parsl/executors/taskvine/executor.py,sha256=PpsMPYx99mgKq_xOqnRdXo20NZOr7JFtOIKETIEK-z0,30930
94
+ parsl/executors/taskvine/executor.py,sha256=JNGag87n1NYHcevZQaw6aK0JlIqwUuNc0PfYbHQ8o-c,30950
95
95
  parsl/executors/taskvine/factory.py,sha256=GU5JryEAKJuYKwrSc162BN-lhcKhapvBZHT820pxwic,2772
96
96
  parsl/executors/taskvine/factory_config.py,sha256=ZQC5vyDe8cM0nuv7fbBCV2xnWGAZ87iLlT2UqmFFI1U,3695
97
97
  parsl/executors/taskvine/manager.py,sha256=SUi5mqqMm_rnkBLrZtTQe7RiHqWDn1oOejQscYzfwAU,25797
@@ -100,7 +100,7 @@ parsl/executors/taskvine/utils.py,sha256=iSrIogeiauL3UNy_9tiZp1cBSNn6fIJkMYQRVi1
100
100
  parsl/executors/workqueue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
101
101
  parsl/executors/workqueue/errors.py,sha256=XO2naYhAsHHyiOBH6hpObg3mPNDmvMoFqErsj0-v7jc,541
102
102
  parsl/executors/workqueue/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
103
- parsl/executors/workqueue/executor.py,sha256=aRvEt_BGO2AGNaoF8P109z81XhlFqjyjJnka3yIzy-4,49717
103
+ parsl/executors/workqueue/executor.py,sha256=dwsUuXoqEVJXSENgjTN_063OzjKrIi6tHIR3aYgYyMc,49717
104
104
  parsl/executors/workqueue/parsl_coprocess.py,sha256=cF1UmTgVLoey6QzBcbYgEiEsRidSaFfuO54f1HFw_EM,5737
105
105
  parsl/executors/workqueue/parsl_coprocess_stub.py,sha256=_bJmpPIgL42qM6bVzeEKt1Mn1trSP41rtJguXxPGfHI,735
106
106
  parsl/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -117,7 +117,7 @@ parsl/monitoring/__init__.py,sha256=0ywNz6i0lM1xo_7_BIxhETDGeVd2C_0wwD7qgeaMR4c,
117
117
  parsl/monitoring/db_manager.py,sha256=L0c5S9ockq0UIchT2bjmkSAWXS-t0G-Q_neOIBfLbm0,33444
118
118
  parsl/monitoring/errors.py,sha256=GParOWoCTp2w1Hmif0PaF5J6p5dWVOwyhO18bcvr_uo,277
119
119
  parsl/monitoring/message_type.py,sha256=Khn88afNxcOIciKiCK4GLnn90I5BlRTiOL3zK-P07yQ,401
120
- parsl/monitoring/monitoring.py,sha256=ZA-36DYtPRApJDc6cd8R8xoRGfCG4OMa9mzW4OPGCCs,10065
120
+ parsl/monitoring/monitoring.py,sha256=wWxcBnMSzHEPCXcavxrQQ-1ggNWSLLBc1y7b7v4usBs,9751
121
121
  parsl/monitoring/remote.py,sha256=t0qCTUMCzeJ_JOARFpjqlTNrAWdEb20BxhmZh9X7kEM,13728
122
122
  parsl/monitoring/types.py,sha256=oOCrzv-ab-_rv4pb8o58Sdb8G_RGp1aZriRbdf9zBEk,339
123
123
  parsl/monitoring/queries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -337,7 +337,7 @@ parsl/tests/test_htex/test_worker_failure.py,sha256=Uz-RHI-LK78FMjXUvrUFmo4iYfmp
337
337
  parsl/tests/test_htex/test_zmq_binding.py,sha256=WNFsCKKfid2uEfem0WLgl1wnBncIabpAv6kmg3imBxk,4001
338
338
  parsl/tests/test_monitoring/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
339
339
  parsl/tests/test_monitoring/test_app_names.py,sha256=A-mOMCVhZDnUyJp32fsTUkHdcyval8o7WPEWacDkbD4,2208
340
- parsl/tests/test_monitoring/test_basic.py,sha256=VdF6JHfqsEOIMg-ysIAREgygZIjHWNDVLNVQ7jhWxmQ,4592
340
+ parsl/tests/test_monitoring/test_basic.py,sha256=L31TOi1u1gKjcBPmhquYzXLwga6fkVU5wGKMNWWD0mg,4592
341
341
  parsl/tests/test_monitoring/test_db_locks.py,sha256=3s3c1xhKo230ZZIJ3f1Ca4U7LcEdXnanOGVXQyNlk2U,2895
342
342
  parsl/tests/test_monitoring/test_exit_helper.py,sha256=ob8Qd1hlkq_mowygfPetTnYN9LfuqeXHRpPilSfDSog,1232
343
343
  parsl/tests/test_monitoring/test_fuzz_zmq.py,sha256=SQNNHhXxHB_LwW4Ujqkgut3lbG0XVW-hliPagQQpiTc,3449
@@ -459,13 +459,13 @@ parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
459
459
  parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
460
460
  parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
461
461
  parsl/usage_tracking/usage.py,sha256=8hq1UPdFlVcC0V3aj0ve-MvCyvwK8Xr3CVuSto3dTW4,9165
462
- parsl-2025.3.24.data/scripts/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
463
- parsl-2025.3.24.data/scripts/interchange.py,sha256=17MrOc7-FXxKBWTwkzIbUoa8fvvDfPelfjByd3ZD2Wk,29446
464
- parsl-2025.3.24.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
465
- parsl-2025.3.24.data/scripts/process_worker_pool.py,sha256=__gFeFQJpV5moRofj3WKQCnKp6gmzieXjzkmzVuTmX4,41123
466
- parsl-2025.3.24.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
467
- parsl-2025.3.24.dist-info/METADATA,sha256=_Wl6Xlf9aCRKSbnQxEquJuYSA3_uNmlLKPgs-2B9nf8,4023
468
- parsl-2025.3.24.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
469
- parsl-2025.3.24.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
470
- parsl-2025.3.24.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
471
- parsl-2025.3.24.dist-info/RECORD,,
462
+ parsl-2025.3.31.data/scripts/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
463
+ parsl-2025.3.31.data/scripts/interchange.py,sha256=PnRAYR8nhJjfo6GcBEcWNbIVcNt4cjGbeIRVMd_HxbM,28962
464
+ parsl-2025.3.31.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
465
+ parsl-2025.3.31.data/scripts/process_worker_pool.py,sha256=__gFeFQJpV5moRofj3WKQCnKp6gmzieXjzkmzVuTmX4,41123
466
+ parsl-2025.3.31.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
467
+ parsl-2025.3.31.dist-info/METADATA,sha256=r1TeSEq-xgwIheUw2QV8RgqxzbwCc-Fxe8dP6cnEd_o,4023
468
+ parsl-2025.3.31.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
469
+ parsl-2025.3.31.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
470
+ parsl-2025.3.31.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
471
+ parsl-2025.3.31.dist-info/RECORD,,