parsl 2024.7.22__py3-none-any.whl → 2024.7.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
parsl/dataflow/dflow.py CHANGED
@@ -113,14 +113,10 @@ class DataFlowKernel:
113
113
  self.monitoring: Optional[MonitoringHub]
114
114
  self.monitoring = config.monitoring
115
115
 
116
- # hub address and port for interchange to connect
117
- self.hub_address = None # type: Optional[str]
118
- self.hub_zmq_port = None # type: Optional[int]
119
116
  if self.monitoring:
120
117
  if self.monitoring.logdir is None:
121
118
  self.monitoring.logdir = self.run_dir
122
- self.hub_address = self.monitoring.hub_address
123
- self.hub_zmq_port = self.monitoring.start(self.run_id, self.run_dir, self.config.run_dir)
119
+ self.monitoring.start(self.run_id, self.run_dir, self.config.run_dir)
124
120
 
125
121
  self.time_began = datetime.datetime.now()
126
122
  self.time_completed: Optional[datetime.datetime] = None
@@ -1181,9 +1177,9 @@ class DataFlowKernel:
1181
1177
  for executor in executors:
1182
1178
  executor.run_id = self.run_id
1183
1179
  executor.run_dir = self.run_dir
1184
- executor.hub_address = self.hub_address
1185
- executor.hub_zmq_port = self.hub_zmq_port
1186
1180
  if self.monitoring:
1181
+ executor.hub_address = self.monitoring.hub_address
1182
+ executor.hub_zmq_port = self.monitoring.hub_zmq_port
1187
1183
  executor.monitoring_radio = self.monitoring.radio
1188
1184
  if hasattr(executor, 'provider'):
1189
1185
  if hasattr(executor.provider, 'script_dir'):
@@ -1460,8 +1456,6 @@ class DataFlowKernel:
1460
1456
  Returns:
1461
1457
  - dict containing, hashed -> future mappings
1462
1458
  """
1463
- self.memo_lookup_table = None
1464
-
1465
1459
  if checkpointDirs:
1466
1460
  return self._load_checkpoints(checkpointDirs)
1467
1461
  else:
parsl/executors/base.py CHANGED
@@ -5,7 +5,7 @@ from typing import Any, Callable, Dict, Optional
5
5
 
6
6
  from typing_extensions import Literal, Self
7
7
 
8
- from parsl.monitoring.radios import MonitoringRadio
8
+ from parsl.monitoring.radios import MonitoringRadioSender
9
9
 
10
10
 
11
11
  class ParslExecutor(metaclass=ABCMeta):
@@ -52,7 +52,7 @@ class ParslExecutor(metaclass=ABCMeta):
52
52
  *,
53
53
  hub_address: Optional[str] = None,
54
54
  hub_zmq_port: Optional[int] = None,
55
- monitoring_radio: Optional[MonitoringRadio] = None,
55
+ monitoring_radio: Optional[MonitoringRadioSender] = None,
56
56
  run_dir: str = ".",
57
57
  run_id: Optional[str] = None,
58
58
  ):
@@ -147,11 +147,11 @@ class ParslExecutor(metaclass=ABCMeta):
147
147
  self._hub_zmq_port = value
148
148
 
149
149
  @property
150
- def monitoring_radio(self) -> Optional[MonitoringRadio]:
150
+ def monitoring_radio(self) -> Optional[MonitoringRadioSender]:
151
151
  """Local radio for sending monitoring messages
152
152
  """
153
153
  return self._monitoring_radio
154
154
 
155
155
  @monitoring_radio.setter
156
- def monitoring_radio(self, value: Optional[MonitoringRadio]) -> None:
156
+ def monitoring_radio(self, value: Optional[MonitoringRadioSender]) -> None:
157
157
  self._monitoring_radio = value
@@ -200,7 +200,6 @@ class FluxExecutor(ParslExecutor, RepresentationMixin):
200
200
  raise EnvironmentError("Cannot find Flux installation in PATH")
201
201
  self.flux_path = os.path.abspath(flux_path)
202
202
  self._task_id_counter = itertools.count()
203
- self._socket = zmq.Context().socket(zmq.REP)
204
203
  # Assumes a launch command cannot be None or empty
205
204
  self.launch_cmd = launch_cmd or self.DEFAULT_LAUNCH_CMD
206
205
  self._submission_queue: queue.Queue = queue.Queue()
@@ -213,7 +212,6 @@ class FluxExecutor(ParslExecutor, RepresentationMixin):
213
212
  args=(
214
213
  self._submission_queue,
215
214
  self._stop_event,
216
- self._socket,
217
215
  self.working_dir,
218
216
  self.flux_executor_kwargs,
219
217
  self.provider,
@@ -306,11 +304,13 @@ def _submit_wrapper(
306
304
 
307
305
  If an exception is thrown, error out all submitted tasks.
308
306
  """
309
- try:
310
- _submit_flux_jobs(submission_queue, stop_event, *args, **kwargs)
311
- except Exception as exc:
312
- _error_out_jobs(submission_queue, stop_event, exc)
313
- raise
307
+ with zmq.Context() as ctx:
308
+ with ctx.socket(zmq.REP) as socket:
309
+ try:
310
+ _submit_flux_jobs(submission_queue, stop_event, socket, *args, **kwargs)
311
+ except Exception as exc:
312
+ _error_out_jobs(submission_queue, stop_event, exc)
313
+ raise
314
314
 
315
315
 
316
316
  def _error_out_jobs(
@@ -456,8 +456,6 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
456
456
  "task_id" : <task_id>
457
457
  "exception" : serialized exception object, on failure
458
458
  }
459
-
460
- The `None` message is a die request.
461
459
  """
462
460
  logger.debug("Result queue worker starting")
463
461
 
@@ -475,58 +473,53 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
475
473
 
476
474
  else:
477
475
 
478
- if msgs is None:
479
- logger.debug("Got None, exiting")
480
- return
476
+ for serialized_msg in msgs:
477
+ try:
478
+ msg = pickle.loads(serialized_msg)
479
+ except pickle.UnpicklingError:
480
+ raise BadMessage("Message received could not be unpickled")
481
481
 
482
- else:
483
- for serialized_msg in msgs:
482
+ if msg['type'] == 'heartbeat':
483
+ continue
484
+ elif msg['type'] == 'result':
484
485
  try:
485
- msg = pickle.loads(serialized_msg)
486
- except pickle.UnpicklingError:
487
- raise BadMessage("Message received could not be unpickled")
486
+ tid = msg['task_id']
487
+ except Exception:
488
+ raise BadMessage("Message received does not contain 'task_id' field")
489
+
490
+ if tid == -1 and 'exception' in msg:
491
+ logger.warning("Executor shutting down due to exception from interchange")
492
+ exception = deserialize(msg['exception'])
493
+ self.set_bad_state_and_fail_all(exception)
494
+ break
495
+
496
+ task_fut = self.tasks.pop(tid)
497
+
498
+ if 'result' in msg:
499
+ result = deserialize(msg['result'])
500
+ task_fut.set_result(result)
488
501
 
489
- if msg['type'] == 'heartbeat':
490
- continue
491
- elif msg['type'] == 'result':
502
+ elif 'exception' in msg:
492
503
  try:
493
- tid = msg['task_id']
494
- except Exception:
495
- raise BadMessage("Message received does not contain 'task_id' field")
496
-
497
- if tid == -1 and 'exception' in msg:
498
- logger.warning("Executor shutting down due to exception from interchange")
499
- exception = deserialize(msg['exception'])
500
- self.set_bad_state_and_fail_all(exception)
501
- break
502
-
503
- task_fut = self.tasks.pop(tid)
504
-
505
- if 'result' in msg:
506
- result = deserialize(msg['result'])
507
- task_fut.set_result(result)
508
-
509
- elif 'exception' in msg:
510
- try:
511
- s = deserialize(msg['exception'])
512
- # s should be a RemoteExceptionWrapper... so we can reraise it
513
- if isinstance(s, RemoteExceptionWrapper):
514
- try:
515
- s.reraise()
516
- except Exception as e:
517
- task_fut.set_exception(e)
518
- elif isinstance(s, Exception):
519
- task_fut.set_exception(s)
520
- else:
521
- raise ValueError("Unknown exception-like type received: {}".format(type(s)))
522
- except Exception as e:
523
- # TODO could be a proper wrapped exception?
524
- task_fut.set_exception(
525
- DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
526
- else:
527
- raise BadMessage("Message received is neither result or exception")
504
+ s = deserialize(msg['exception'])
505
+ # s should be a RemoteExceptionWrapper... so we can reraise it
506
+ if isinstance(s, RemoteExceptionWrapper):
507
+ try:
508
+ s.reraise()
509
+ except Exception as e:
510
+ task_fut.set_exception(e)
511
+ elif isinstance(s, Exception):
512
+ task_fut.set_exception(s)
513
+ else:
514
+ raise ValueError("Unknown exception-like type received: {}".format(type(s)))
515
+ except Exception as e:
516
+ # TODO could be a proper wrapped exception?
517
+ task_fut.set_exception(
518
+ DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
528
519
  else:
529
- raise BadMessage("Message received with unknown type {}".format(msg['type']))
520
+ raise BadMessage("Message received is neither result or exception")
521
+ else:
522
+ raise BadMessage("Message received with unknown type {}".format(msg['type']))
530
523
 
531
524
  logger.info("Result queue worker finished")
532
525
 
@@ -410,6 +410,7 @@ class Interchange:
410
410
  self._ready_managers[manager_id] = {'last_heartbeat': time.time(),
411
411
  'idle_since': time.time(),
412
412
  'block_id': None,
413
+ 'start_time': msg['start_time'],
413
414
  'max_capacity': 0,
414
415
  'worker_count': 0,
415
416
  'active': True,
@@ -6,6 +6,7 @@ from typing_extensions import TypedDict
6
6
 
7
7
  class ManagerRecord(TypedDict, total=False):
8
8
  block_id: Optional[str]
9
+ start_time: float
9
10
  tasks: List[Any]
10
11
  worker_count: int
11
12
  max_capacity: int
@@ -184,6 +184,7 @@ class Manager:
184
184
 
185
185
  self.uid = uid
186
186
  self.block_id = block_id
187
+ self.start_time = time.time()
187
188
 
188
189
  self.enable_mpi_mode = enable_mpi_mode
189
190
  self.mpi_launcher = mpi_launcher
@@ -263,6 +264,7 @@ class Manager:
263
264
  'worker_count': self.worker_count,
264
265
  'uid': self.uid,
265
266
  'block_id': self.block_id,
267
+ 'start_time': self.start_time,
266
268
  'prefetch_capacity': self.prefetch_capacity,
267
269
  'max_capacity': self.worker_count + self.prefetch_capacity,
268
270
  'os': platform.system(),
@@ -174,6 +174,16 @@ class BlockProviderExecutor(ParslExecutor):
174
174
  # Filters first iterable by bool values in second
175
175
  return list(compress(to_kill, killed))
176
176
 
177
+ def scale_out_facade(self, n: int) -> List[str]:
178
+ block_ids = self._scale_out(n)
179
+ if block_ids is not None:
180
+ new_status = {}
181
+ for block_id in block_ids:
182
+ new_status[block_id] = JobStatus(JobState.PENDING)
183
+ self.send_monitoring_info(new_status)
184
+ self._status.update(new_status)
185
+ return block_ids
186
+
177
187
  def _scale_out(self, blocks: int = 1) -> List[str]:
178
188
  """Scales out the number of blocks by "blocks"
179
189
  """
@@ -193,15 +203,32 @@ class BlockProviderExecutor(ParslExecutor):
193
203
  self._simulated_status[block_id] = JobStatus(JobState.FAILED, "Failed to start block {}: {}".format(block_id, ex))
194
204
  return block_ids
195
205
 
196
- @abstractmethod
197
206
  def scale_in(self, blocks: int) -> List[str]:
198
207
  """Scale in method.
199
208
 
200
209
  Cause the executor to reduce the number of blocks by count.
201
210
 
211
+ The default implementation will kill blocks without regard to their
212
+ status or whether they are executing tasks. Executors with more
213
+ nuanced scaling strategies might overload this method to work with
214
+ that strategy - see the HighThroughputExecutor for an example of that.
215
+
202
216
  :return: A list of block ids corresponding to the blocks that were removed.
203
217
  """
204
- pass
218
+ # Obtain list of blocks to kill
219
+ to_kill = list(self.blocks_to_job_id.keys())[:blocks]
220
+ kill_ids = [self.blocks_to_job_id[block] for block in to_kill]
221
+
222
+ # Cancel the blocks provisioned
223
+ if self.provider:
224
+ logger.info(f"Scaling in jobs: {kill_ids}")
225
+ r = self.provider.cancel(kill_ids)
226
+ job_ids = self._filter_scale_in_ids(kill_ids, r)
227
+ block_ids_killed = [self.job_ids_to_block[jid] for jid in job_ids]
228
+ return block_ids_killed
229
+ else:
230
+ logger.error("No execution provider available to scale in")
231
+ return []
205
232
 
206
233
  def _launch_block(self, block_id: str) -> Any:
207
234
  launch_cmd = self._get_launch_command(block_id)
@@ -237,7 +264,7 @@ class BlockProviderExecutor(ParslExecutor):
237
264
  # Send monitoring info for HTEX when monitoring enabled
238
265
  if self.monitoring_radio:
239
266
  msg = self.create_monitoring_info(status)
240
- logger.debug("Sending message {} to hub from job status poller".format(msg))
267
+ logger.debug("Sending block monitoring message: %r", msg)
241
268
  self.monitoring_radio.send((MessageType.BLOCK_INFO, msg))
242
269
 
243
270
  def create_monitoring_info(self, status: Dict[str, JobStatus]) -> Sequence[object]:
@@ -310,13 +337,3 @@ class BlockProviderExecutor(ParslExecutor):
310
337
  del self._status[block_id]
311
338
  self.send_monitoring_info(new_status)
312
339
  return block_ids
313
-
314
- def scale_out_facade(self, n: int) -> List[str]:
315
- block_ids = self._scale_out(n)
316
- if block_ids is not None:
317
- new_status = {}
318
- for block_id in block_ids:
319
- new_status[block_id] = JobStatus(JobState.PENDING)
320
- self.send_monitoring_info(new_status)
321
- self._status.update(new_status)
322
- return block_ids
@@ -573,24 +573,6 @@ class TaskVineExecutor(BlockProviderExecutor, putils.RepresentationMixin):
573
573
  def workers_per_node(self) -> Union[int, float]:
574
574
  return 1
575
575
 
576
- def scale_in(self, count: int) -> List[str]:
577
- """Scale in method. Cancel a given number of blocks
578
- """
579
- # Obtain list of blocks to kill
580
- to_kill = list(self.blocks_to_job_id.keys())[:count]
581
- kill_ids = [self.blocks_to_job_id[block] for block in to_kill]
582
-
583
- # Cancel the blocks provisioned
584
- if self.provider:
585
- logger.info(f"Scaling in jobs: {kill_ids}")
586
- r = self.provider.cancel(kill_ids)
587
- job_ids = self._filter_scale_in_ids(kill_ids, r)
588
- block_ids_killed = [self.job_ids_to_block[jid] for jid in job_ids]
589
- return block_ids_killed
590
- else:
591
- logger.error("No execution provider available to scale")
592
- return []
593
-
594
576
  def shutdown(self, *args, **kwargs):
595
577
  """Shutdown the executor. Sets flag to cancel the submit process and
596
578
  collector thread, which shuts down the TaskVine system submission.
@@ -689,24 +689,6 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
689
689
  def workers_per_node(self) -> Union[int, float]:
690
690
  return self.scaling_cores_per_worker
691
691
 
692
- def scale_in(self, count: int) -> List[str]:
693
- """Scale in method.
694
- """
695
- # Obtain list of blocks to kill
696
- to_kill = list(self.blocks_to_job_id.keys())[:count]
697
- kill_ids = [self.blocks_to_job_id[block] for block in to_kill]
698
-
699
- # Cancel the blocks provisioned
700
- if self.provider:
701
- logger.info(f"Scaling in jobs: {kill_ids}")
702
- r = self.provider.cancel(kill_ids)
703
- job_ids = self._filter_scale_in_ids(kill_ids, r)
704
- block_ids_killed = [self.job_ids_to_block[jid] for jid in job_ids]
705
- return block_ids_killed
706
- else:
707
- logger.error("No execution provider available to scale in")
708
- return []
709
-
710
692
  def shutdown(self, *args, **kwargs):
711
693
  """Shutdown the executor. Sets flag to cancel the submit process and
712
694
  collector thread, which shuts down the Work Queue system submission.
@@ -13,7 +13,7 @@ import typeguard
13
13
 
14
14
  from parsl.log_utils import set_file_logger
15
15
  from parsl.monitoring.message_type import MessageType
16
- from parsl.monitoring.radios import MultiprocessingQueueRadio
16
+ from parsl.monitoring.radios import MultiprocessingQueueRadioSender
17
17
  from parsl.monitoring.router import router_starter
18
18
  from parsl.monitoring.types import AddressedMonitoringMessage
19
19
  from parsl.multiprocessing import ForkProcess, SizedQueue
@@ -105,7 +105,7 @@ class MonitoringHub(RepresentationMixin):
105
105
  self.resource_monitoring_enabled = resource_monitoring_enabled
106
106
  self.resource_monitoring_interval = resource_monitoring_interval
107
107
 
108
- def start(self, run_id: str, dfk_run_dir: str, config_run_dir: Union[str, os.PathLike]) -> int:
108
+ def start(self, run_id: str, dfk_run_dir: str, config_run_dir: Union[str, os.PathLike]) -> None:
109
109
 
110
110
  logger.debug("Starting MonitoringHub")
111
111
 
@@ -187,7 +187,7 @@ class MonitoringHub(RepresentationMixin):
187
187
  self.filesystem_proc.start()
188
188
  logger.info(f"Started filesystem radio receiver process {self.filesystem_proc.pid}")
189
189
 
190
- self.radio = MultiprocessingQueueRadio(self.block_msgs)
190
+ self.radio = MultiprocessingQueueRadioSender(self.block_msgs)
191
191
 
192
192
  try:
193
193
  comm_q_result = comm_q.get(block=True, timeout=120)
@@ -207,7 +207,7 @@ class MonitoringHub(RepresentationMixin):
207
207
 
208
208
  logger.info("Monitoring Hub initialized")
209
209
 
210
- return zmq_port
210
+ self.hub_zmq_port = zmq_port
211
211
 
212
212
  # TODO: tighten the Any message format
213
213
  def send(self, mtype: MessageType, message: Any) -> None:
@@ -15,14 +15,14 @@ _db_manager_excepts: Optional[Exception]
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
 
18
- class MonitoringRadio(metaclass=ABCMeta):
18
+ class MonitoringRadioSender(metaclass=ABCMeta):
19
19
  @abstractmethod
20
20
  def send(self, message: object) -> None:
21
21
  pass
22
22
 
23
23
 
24
- class FilesystemRadio(MonitoringRadio):
25
- """A MonitoringRadio that sends messages over a shared filesystem.
24
+ class FilesystemRadioSender(MonitoringRadioSender):
25
+ """A MonitoringRadioSender that sends messages over a shared filesystem.
26
26
 
27
27
  The messsage directory structure is based on maildir,
28
28
  https://en.wikipedia.org/wiki/Maildir
@@ -36,7 +36,7 @@ class FilesystemRadio(MonitoringRadio):
36
36
  This avoids a race condition of reading partially written messages.
37
37
 
38
38
  This radio is likely to give higher shared filesystem load compared to
39
- the UDPRadio, but should be much more reliable.
39
+ the UDP radio, but should be much more reliable.
40
40
  """
41
41
 
42
42
  def __init__(self, *, monitoring_url: str, source_id: int, timeout: int = 10, run_dir: str):
@@ -66,7 +66,7 @@ class FilesystemRadio(MonitoringRadio):
66
66
  os.rename(tmp_filename, new_filename)
67
67
 
68
68
 
69
- class HTEXRadio(MonitoringRadio):
69
+ class HTEXRadioSender(MonitoringRadioSender):
70
70
 
71
71
  def __init__(self, monitoring_url: str, source_id: int, timeout: int = 10):
72
72
  """
@@ -120,7 +120,7 @@ class HTEXRadio(MonitoringRadio):
120
120
  return
121
121
 
122
122
 
123
- class UDPRadio(MonitoringRadio):
123
+ class UDPRadioSender(MonitoringRadioSender):
124
124
 
125
125
  def __init__(self, monitoring_url: str, source_id: int, timeout: int = 10):
126
126
  """
@@ -174,7 +174,7 @@ class UDPRadio(MonitoringRadio):
174
174
  return
175
175
 
176
176
 
177
- class MultiprocessingQueueRadio(MonitoringRadio):
177
+ class MultiprocessingQueueRadioSender(MonitoringRadioSender):
178
178
  """A monitoring radio which connects over a multiprocessing Queue.
179
179
  This radio is intended to be used on the submit side, where components
180
180
  in the submit process, or processes launched by multiprocessing, will have
@@ -8,10 +8,10 @@ from typing import Any, Callable, Dict, List, Sequence, Tuple
8
8
 
9
9
  from parsl.monitoring.message_type import MessageType
10
10
  from parsl.monitoring.radios import (
11
- FilesystemRadio,
12
- HTEXRadio,
13
- MonitoringRadio,
14
- UDPRadio,
11
+ FilesystemRadioSender,
12
+ HTEXRadioSender,
13
+ MonitoringRadioSender,
14
+ UDPRadioSender,
15
15
  )
16
16
  from parsl.multiprocessing import ForkProcess
17
17
  from parsl.process_loggers import wrap_with_logs
@@ -100,17 +100,17 @@ def monitor_wrapper(*,
100
100
  return (wrapped, args, new_kwargs)
101
101
 
102
102
 
103
- def get_radio(radio_mode: str, monitoring_hub_url: str, task_id: int, run_dir: str) -> MonitoringRadio:
104
- radio: MonitoringRadio
103
+ def get_radio(radio_mode: str, monitoring_hub_url: str, task_id: int, run_dir: str) -> MonitoringRadioSender:
104
+ radio: MonitoringRadioSender
105
105
  if radio_mode == "udp":
106
- radio = UDPRadio(monitoring_hub_url,
107
- source_id=task_id)
106
+ radio = UDPRadioSender(monitoring_hub_url,
107
+ source_id=task_id)
108
108
  elif radio_mode == "htex":
109
- radio = HTEXRadio(monitoring_hub_url,
110
- source_id=task_id)
109
+ radio = HTEXRadioSender(monitoring_hub_url,
110
+ source_id=task_id)
111
111
  elif radio_mode == "filesystem":
112
- radio = FilesystemRadio(monitoring_url=monitoring_hub_url,
113
- source_id=task_id, run_dir=run_dir)
112
+ radio = FilesystemRadioSender(monitoring_url=monitoring_hub_url,
113
+ source_id=task_id, run_dir=run_dir)
114
114
  else:
115
115
  raise RuntimeError(f"Unknown radio mode: {radio_mode}")
116
116
  return radio
@@ -44,8 +44,8 @@ def test_row_counts():
44
44
  # the latter is what i'm most suspicious of in my present investigation
45
45
 
46
46
  # dig out the interchange port...
47
- hub_address = parsl.dfk().hub_address
48
- hub_zmq_port = parsl.dfk().hub_zmq_port
47
+ hub_address = parsl.dfk().monitoring.hub_address
48
+ hub_zmq_port = parsl.dfk().monitoring.hub_zmq_port
49
49
 
50
50
  # this will send a string to a new socket connection
51
51
  with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2024.07.22'
6
+ VERSION = '2024.07.29'
@@ -410,6 +410,7 @@ class Interchange:
410
410
  self._ready_managers[manager_id] = {'last_heartbeat': time.time(),
411
411
  'idle_since': time.time(),
412
412
  'block_id': None,
413
+ 'start_time': msg['start_time'],
413
414
  'max_capacity': 0,
414
415
  'worker_count': 0,
415
416
  'active': True,
@@ -184,6 +184,7 @@ class Manager:
184
184
 
185
185
  self.uid = uid
186
186
  self.block_id = block_id
187
+ self.start_time = time.time()
187
188
 
188
189
  self.enable_mpi_mode = enable_mpi_mode
189
190
  self.mpi_launcher = mpi_launcher
@@ -263,6 +264,7 @@ class Manager:
263
264
  'worker_count': self.worker_count,
264
265
  'uid': self.uid,
265
266
  'block_id': self.block_id,
267
+ 'start_time': self.start_time,
266
268
  'prefetch_capacity': self.prefetch_capacity,
267
269
  'max_capacity': self.worker_count + self.prefetch_capacity,
268
270
  'os': platform.system(),
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2024.7.22
3
+ Version: 2024.7.29
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2024.07.22.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2024.07.29.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=MyaEcEq-Qf860u7V98u-PZrPNdtzOZL_NW6EhIJnmfQ,1937
8
8
  parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
9
9
  parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  parsl/utils.py,sha256=91FjQiTUY383ueAjkBAgE21My9nba6SP2a2SrbB1r1Q,11250
11
- parsl/version.py,sha256=MTzhueu1_EeHa7_SmFzbomAqeyRCXKJ4cimHiEDRYbs,131
11
+ parsl/version.py,sha256=1_lxZuThXb8Z-pTgQtbYaRM-j9czEj-Gcgyrj-lOHRA,131
12
12
  parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
14
14
  parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
@@ -62,7 +62,7 @@ parsl/data_provider/staging.py,sha256=ZDZuuFg38pjUStegKPcvPsfGp3iMeReMzfU6DSwtJj
62
62
  parsl/data_provider/zip.py,sha256=S4kVuH9lxAegRURYbvIUR7EYYBOccyslaqyCrVWUBhw,4497
63
63
  parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
64
  parsl/dataflow/dependency_resolvers.py,sha256=Om8Dgh7a0ZwgXAc6TlhxLSzvxXHDlNNV1aBNiD3JTNY,3325
65
- parsl/dataflow/dflow.py,sha256=jgNOIk3xXz90RXwC38ujMz7092XRdLFv5BrMyALYhps,68513
65
+ parsl/dataflow/dflow.py,sha256=rdDWhhsPhCkCqxMIhagBKOZMQKDXsDxp7hnTgY_LPqI,68256
66
66
  parsl/dataflow/errors.py,sha256=9SxVhIJY_53FQx8x4OU8UA8nd7lvUbDllH7KfMXpYaY,2177
67
67
  parsl/dataflow/futures.py,sha256=08LuP-HFiHBIZmeKCjlsazw_WpQ5fwevrU2_WbidkYw,6080
68
68
  parsl/dataflow/memoization.py,sha256=l9uw1Bu50GucBF70M5relpGKFkE4dIM9T3R1KrxW0v0,9583
@@ -70,25 +70,25 @@ parsl/dataflow/rundirs.py,sha256=7aUg1cb0LLTocQxOdBzwtn7a8bIgpdMD5rjZV55UwaQ,115
70
70
  parsl/dataflow/states.py,sha256=hV6mfv-y4A6xrujeQglcomnfEs7y3Xm2g6JFwC6dvgQ,2612
71
71
  parsl/dataflow/taskrecord.py,sha256=-FuujdZQ1y5GSc-PJ91QKGT-Kp0lrg70MFDoxpbWI1Q,3113
72
72
  parsl/executors/__init__.py,sha256=Cg8e-F2NUaBD8A9crDAXKCSdoBEwQVIdgm4FlXd-wvk,476
73
- parsl/executors/base.py,sha256=10xMzqVa2vV7muet08Tm1iHBZ4m2jCESPPiRnbwzGUk,5120
73
+ parsl/executors/base.py,sha256=BECTvBfVRDATyhBmMdcH75xoFhx-LO3rQYawVhaUJ6M,5144
74
74
  parsl/executors/errors.py,sha256=xVswxgi7vmJcUMCeYDAPK8sQT2kHFFROVoOr0dnmcWE,2098
75
- parsl/executors/status_handling.py,sha256=Hwcp8eCJSc_vVXycZX2vPTfikAP1SigtQJEiYbarjLw,13784
75
+ parsl/executors/status_handling.py,sha256=BPv51pJbscV-HdOWgRN5JRPPyOm1b4m3qBbz7pTQjpc,14662
76
76
  parsl/executors/threads.py,sha256=hJt1LzxphqX4fe_9R9Cf1MU0lepWTU_eJe8O665B0Xo,3352
77
77
  parsl/executors/flux/__init__.py,sha256=P9grTTeRPXfqXurFhlSS7XhmE6tTbnCnyQ1f9b-oYHE,136
78
78
  parsl/executors/flux/execute_parsl_task.py,sha256=gRN7F4HhdrKQ-bvn4wXrquBzFOp_9WF88hMIeUaRg5I,1553
79
- parsl/executors/flux/executor.py,sha256=gPq49CQwtSZYZggLZ0dCXdpUlllKHJbvR8WRKeGh9xE,16977
79
+ parsl/executors/flux/executor.py,sha256=8_xakLUu5zNJAHL0LbeTCFEWqWzRK1eE-3ep4GIIIrY,17017
80
80
  parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaPrSF9ec7zvRfLfYJw,2129
81
81
  parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
82
  parsl/executors/high_throughput/errors.py,sha256=Sak8e8UpiEcXefUjMHbhyXc4Rn7kJtOoh7L8wreBQdk,1638
83
- parsl/executors/high_throughput/executor.py,sha256=bCtw_p2f1ztnqQiChKJBOiPyc6aKK39yRXSp5uFpRzk,38185
84
- parsl/executors/high_throughput/interchange.py,sha256=IRuiaBmks_R4cU-Sx0Q_Fjv4PdFtzU05GiPdeJstOoA,30578
85
- parsl/executors/high_throughput/manager_record.py,sha256=9XppKjDW0DJ7SMkPNxsiDs-HvXGPLrTg6Ceyh4b6gNs,433
83
+ parsl/executors/high_throughput/executor.py,sha256=fY-OuzStEgyM-ao11debyhbB3pRk4frEmXdcmBHWsvQ,37834
84
+ parsl/executors/high_throughput/interchange.py,sha256=vmsUEAgqshi_K5n9oqb-bOGIt9BOFW32Ei_3Ur1C8EE,30663
85
+ parsl/executors/high_throughput/manager_record.py,sha256=yn3L8TUJFkgm2lX1x0SeS9mkvJowC0s2VIMCFiU7ThM,455
86
86
  parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
87
87
  parsl/executors/high_throughput/mpi_executor.py,sha256=V07t1GOzFhcwdlZGuYUPqc1NarSr-vUbsNzbK4Cj0m8,3882
88
88
  parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=hah_IznfFqk-rzuHWmg6aiF_saiDRrpW-aSo4kH9Nso,4854
89
89
  parsl/executors/high_throughput/mpi_resource_management.py,sha256=LFBbJ3BnzTcY_v-jNu30uoIB2Enk4cleN4ygY3dncjY,8194
90
90
  parsl/executors/high_throughput/probe.py,sha256=TNpGTXb4_DEeg_h-LHu4zEKi1-hffboxvKcZUl2OZGk,2751
91
- parsl/executors/high_throughput/process_worker_pool.py,sha256=weEld9iZr669gGmPxJC77ISVop7Y47Lc8TkjEfmnAyk,42991
91
+ parsl/executors/high_throughput/process_worker_pool.py,sha256=3s-Ouo3ZEhod7hon8euyL37t1DbP5pSVjXyC23DSN_0,43075
92
92
  parsl/executors/high_throughput/zmq_pipes.py,sha256=tAjQB3aNVMuTXziN3dbJWre46YpXgliD55qMBbhYTLU,8581
93
93
  parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
94
94
  parsl/executors/radical/executor.py,sha256=426cMt6d8uJFZ_7Ub1kCslaND4OKtBX5WZdz-0RXjMk,22554
@@ -97,7 +97,7 @@ parsl/executors/radical/rpex_worker.py,sha256=qli6i6ejKubTSv3lAE3YiW8RlkHrfl4Jhr
97
97
  parsl/executors/taskvine/__init__.py,sha256=9rwp3M8B0YyEhZMLO0RHaNw7u1nc01WHbXLqnBTanu0,293
98
98
  parsl/executors/taskvine/errors.py,sha256=euIYkSslrNSI85kyi2s0xzOaO9ik4c1fYHstMIeiBJk,652
99
99
  parsl/executors/taskvine/exec_parsl_function.py,sha256=ftGdJU78lKPPkphSHlEi4rj164mhuMHJjghVqfgeXKk,7085
100
- parsl/executors/taskvine/executor.py,sha256=lZXfXXzkufLsoxAhf3hOMx05qTmwgd-osh2TMy1S4MM,31722
100
+ parsl/executors/taskvine/executor.py,sha256=XsUzFWgFWbxf06jDLMAKiXUF-F1_VLheZ5jhAwSphZk,30977
101
101
  parsl/executors/taskvine/factory.py,sha256=rWpEoFphLzqO3HEYyDEbQa14iyvgkdZg7hLZuaY39gQ,2638
102
102
  parsl/executors/taskvine/factory_config.py,sha256=AbE2fN2snrF5ITYrrS4DnGn2XkJHUFr_17DYHDHIwq0,3693
103
103
  parsl/executors/taskvine/manager.py,sha256=fwRSgYWpbsnr5jXlzvX0sQjOqryqn_77K_svJJ1HJ2U,25631
@@ -106,7 +106,7 @@ parsl/executors/taskvine/utils.py,sha256=iSrIogeiauL3UNy_9tiZp1cBSNn6fIJkMYQRVi1
106
106
  parsl/executors/workqueue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
107
107
  parsl/executors/workqueue/errors.py,sha256=XO2naYhAsHHyiOBH6hpObg3mPNDmvMoFqErsj0-v7jc,541
108
108
  parsl/executors/workqueue/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
109
- parsl/executors/workqueue/executor.py,sha256=K5q-poZU37LVF4YhX34jKVMIBw5jpBfQ1rUadgPeQBU,50519
109
+ parsl/executors/workqueue/executor.py,sha256=YaY_U5DxXU2NbswmlrcJ2BtXvSbV0ElM9ZlQzP_F_BU,49803
110
110
  parsl/executors/workqueue/parsl_coprocess.py,sha256=cF1UmTgVLoey6QzBcbYgEiEsRidSaFfuO54f1HFw_EM,5737
111
111
  parsl/executors/workqueue/parsl_coprocess_stub.py,sha256=_bJmpPIgL42qM6bVzeEKt1Mn1trSP41rtJguXxPGfHI,735
112
112
  parsl/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -122,9 +122,9 @@ parsl/launchers/launchers.py,sha256=VB--fiVv_IQne3DydTMSdGUY0o0g69puAs-Hd3mJ2vo,
122
122
  parsl/monitoring/__init__.py,sha256=0ywNz6i0lM1xo_7_BIxhETDGeVd2C_0wwD7qgeaMR4c,83
123
123
  parsl/monitoring/db_manager.py,sha256=qPdW_MINycSn6MxxFk2s_R-t8g1cbJhxncVR5mDgeGs,37011
124
124
  parsl/monitoring/message_type.py,sha256=Khn88afNxcOIciKiCK4GLnn90I5BlRTiOL3zK-P07yQ,401
125
- parsl/monitoring/monitoring.py,sha256=TCJsDbD3bLU77QrCqfCkLfC0e3Ih0FQLvtbYtgDs3RE,13522
126
- parsl/monitoring/radios.py,sha256=LkSW8LAW6MT9mNdHgRpUDPKkLcO-nyGU6uzWv3TLloE,5847
127
- parsl/monitoring/remote.py,sha256=IWDYCV8du2yB-YTqXjqGWELfi07ZWSENGq8mc4c4it0,13699
125
+ parsl/monitoring/monitoring.py,sha256=ggffHhtgh96lSmryvjjjaHg7oV54Ci-kz4wWOPJviKU,13548
126
+ parsl/monitoring/radios.py,sha256=VaVZEPDkLsJ3Jp2CFo5kQ94mxiqpN9xltKl8lnKksiQ,5908
127
+ parsl/monitoring/remote.py,sha256=qH1N3My8F473CHKxFrA2CsaL0Uege26tQi9-KrWQrr4,13771
128
128
  parsl/monitoring/router.py,sha256=l1LBT1hxCWQ2mxCR-PtxwYJ905In61E4pPJB2K2J7kM,9554
129
129
  parsl/monitoring/types.py,sha256=_WGizCTgQVOkJ2dvNfsvHpYBj21Ky3bJsmyIskIx10I,631
130
130
  parsl/monitoring/queries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -355,7 +355,7 @@ parsl/tests/test_monitoring/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
355
355
  parsl/tests/test_monitoring/test_app_names.py,sha256=ayyxySGWpKSe9dDw2UeJo1dicxjpALRuLsJfprZV4Eg,2174
356
356
  parsl/tests/test_monitoring/test_basic.py,sha256=lGyHEJt_rokawv_XeAx-bxV84IlZUFR4KI0PQAiLsFg,3714
357
357
  parsl/tests/test_monitoring/test_db_locks.py,sha256=3s3c1xhKo230ZZIJ3f1Ca4U7LcEdXnanOGVXQyNlk2U,2895
358
- parsl/tests/test_monitoring/test_fuzz_zmq.py,sha256=E7T2yEyA_Ti8WxDrGZKya-8m8Hfe9HobVBsCe2BFbKM,3414
358
+ parsl/tests/test_monitoring/test_fuzz_zmq.py,sha256=--3-pQUvXXbkr8v_BEJoPvVvNly1oXvrD2nJh6yl_0M,3436
359
359
  parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py,sha256=_WjymTgxWvZZwQpJQ3L2gmEt5VUkTss0hOT153AssdQ,2746
360
360
  parsl/tests/test_monitoring/test_incomplete_futures.py,sha256=ZnO1sFSwlWUBHX64C_zwfTVRVC_UFNlU4h0POgx6NEo,2005
361
361
  parsl/tests/test_monitoring/test_memoization_representation.py,sha256=dknv2nO7pNZ1jGxWGsC_AW3rs90gjMIeC5d7pIJ75Xc,2645
@@ -467,13 +467,13 @@ parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
467
467
  parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
468
468
  parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
469
469
  parsl/usage_tracking/usage.py,sha256=qNEJ7nPimqd3Y7OWFLdYmNwJ6XDKlyfV_fTzasxsQw8,8690
470
- parsl-2024.7.22.data/scripts/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
471
- parsl-2024.7.22.data/scripts/interchange.py,sha256=n0aOHLX64DEWx-OA4vWrYRVZfmaz8Rc8haNtafbgh4k,30565
472
- parsl-2024.7.22.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
473
- parsl-2024.7.22.data/scripts/process_worker_pool.py,sha256=Ar-HLibZxnEVSVanAbOnBFtYdwQ_bSOwXGznoVQIdqI,42977
474
- parsl-2024.7.22.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
475
- parsl-2024.7.22.dist-info/METADATA,sha256=D24HXi2DjCTXBzgd8QBjrkbk97stTxn18dY1_fv9tYM,4124
476
- parsl-2024.7.22.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
477
- parsl-2024.7.22.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
478
- parsl-2024.7.22.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
479
- parsl-2024.7.22.dist-info/RECORD,,
470
+ parsl-2024.7.29.data/scripts/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
471
+ parsl-2024.7.29.data/scripts/interchange.py,sha256=q3V1mqr0BC_CzsNfebTKFD5tyE0birXUvZh-bk05vLQ,30650
472
+ parsl-2024.7.29.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
473
+ parsl-2024.7.29.data/scripts/process_worker_pool.py,sha256=78QKnV5KbY_vcteC6k60gpDE4wEk6hsciet_qzs9QoU,43061
474
+ parsl-2024.7.29.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
475
+ parsl-2024.7.29.dist-info/METADATA,sha256=DN5bT4CBKxmmsnscGT_y61gVnmzLEGRZDg95HFbxD54,4124
476
+ parsl-2024.7.29.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
477
+ parsl-2024.7.29.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
478
+ parsl-2024.7.29.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
479
+ parsl-2024.7.29.dist-info/RECORD,,