parsl 2025.3.3__py3-none-any.whl → 2025.3.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,55 @@
1
+ import multiprocessing
2
+ import signal
3
+
4
+ import psutil
5
+ import pytest
6
+
7
+ from parsl.monitoring.monitoring import join_terminate_close_proc
8
+ from parsl.multiprocessing import ForkProcess
9
+
10
+
11
+ def noop():
12
+ pass
13
+
14
+
15
+ @pytest.mark.local
16
+ def test_end_process_already_exited():
17
+ p = ForkProcess(target=noop)
18
+ p.start()
19
+ p.join()
20
+ join_terminate_close_proc(p)
21
+
22
+
23
+ def hang():
24
+ while True:
25
+ pass
26
+
27
+
28
+ @pytest.mark.local
29
+ def test_end_hung_process():
30
+ """Test calling against a process that will not exit itself."""
31
+ p = ForkProcess(target=hang)
32
+ p.start()
33
+ pid = p.pid
34
+ join_terminate_close_proc(p, timeout=1)
35
+ assert not psutil.pid_exists(pid), "process should not exist any more"
36
+
37
+
38
+ def hang_no_sigint(e):
39
+ def s(*args, **kwargs):
40
+ e.set()
41
+ signal.signal(signal.SIGTERM, s)
42
+ while True:
43
+ pass
44
+
45
+
46
+ @pytest.mark.local
47
+ def test_end_hung_process_no_sigint():
48
+ """Test calling against a process that will not exit itself."""
49
+ e = multiprocessing.Event()
50
+ p = ForkProcess(target=hang_no_sigint, args=(e,))
51
+ p.start()
52
+ pid = p.pid
53
+ join_terminate_close_proc(p, timeout=1)
54
+ assert not psutil.pid_exists(pid), "process should not exist any more"
55
+ assert e.is_set(), "hung process should have set event on signal"
@@ -161,3 +161,28 @@ def test_MPISched_contention():
161
161
  assert task_on_worker_side['task_id'] == 2
162
162
  _, _, _, resource_spec = unpack_res_spec_apply_message(task_on_worker_side['buffer'])
163
163
  assert len(resource_spec['MPI_NODELIST'].split(',')) == 8
164
+
165
+
166
+ @pytest.mark.local
167
+ def test_hashable_backlog_queue():
168
+ """Run multiple large tasks that to force entry into backlog_queue
169
+ where queue.PriorityQueue expects hashability/comparability
170
+ """
171
+
172
+ task_q, result_q = SpawnContext.Queue(), SpawnContext.Queue()
173
+ scheduler = MPITaskScheduler(task_q, result_q)
174
+
175
+ assert scheduler.available_nodes
176
+ assert len(scheduler.available_nodes) == 8
177
+
178
+ assert scheduler._free_node_counter.value == 8
179
+
180
+ for i in range(3):
181
+ mock_task_buffer = pack_res_spec_apply_message("func", "args", "kwargs",
182
+ resource_specification={
183
+ "num_nodes": 8,
184
+ "ranks_per_node": 2
185
+ })
186
+ task_package = {"task_id": i, "buffer": mock_task_buffer}
187
+ scheduler.put_task(task_package)
188
+ assert scheduler._backlog_queue.qsize() == 2, "Expected 2 backlogged tasks"
@@ -14,6 +14,9 @@ from parsl.providers import LocalProvider
14
14
  # timeout later on.
15
15
  BLOCK_COUNT = 3
16
16
 
17
+ # the try_assert timeout for the above number of blocks to get started
18
+ PERMITTED_STARTUP_TIME_S = 30
19
+
17
20
 
18
21
  class AccumulatingLocalProvider(LocalProvider):
19
22
  def __init__(self, *args, **kwargs):
@@ -67,7 +70,7 @@ def test_shutdown_scalein_blocks(tmpd_cwd, try_assert):
67
70
 
68
71
  with parsl.load(config):
69
72
  # this will wait for everything to be scaled out fully
70
- try_assert(lambda: len(htex.connected_managers()) == BLOCK_COUNT)
73
+ try_assert(lambda: len(htex.connected_managers()) == BLOCK_COUNT, timeout_ms=PERMITTED_STARTUP_TIME_S * 1000)
71
74
 
72
75
  assert len(accumulating_provider.submit_job_ids) == BLOCK_COUNT, f"Exactly {BLOCK_COUNT} blocks should have been launched"
73
76
  assert len(accumulating_provider.cancel_job_ids) == BLOCK_COUNT, f"Exactly {BLOCK_COUNT} blocks should have been scaled in"
@@ -30,7 +30,7 @@ def test_no_kills():
30
30
 
31
31
  @pytest.mark.local
32
32
  @pytest.mark.parametrize("sig", [signal.SIGINT, signal.SIGTERM, signal.SIGKILL, signal.SIGQUIT])
33
- @pytest.mark.parametrize("process_attr", ["router_proc", "dbm_proc"])
33
+ @pytest.mark.parametrize("process_attr", ["zmq_router_proc", "udp_router_proc", "dbm_proc", "filesystem_proc"])
34
34
  def test_kill_monitoring_helper_process(sig, process_attr, try_assert):
35
35
  """This tests that we can kill a monitoring process and still have successful shutdown.
36
36
  SIGINT emulates some racy behaviour when ctrl-C is pressed: that
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2025.03.03'
6
+ VERSION = '2025.03.17'
@@ -15,6 +15,7 @@ import threading
15
15
  import time
16
16
  import uuid
17
17
  from importlib.metadata import distributions
18
+ from multiprocessing.context import SpawnProcess
18
19
  from multiprocessing.managers import DictProxy
19
20
  from multiprocessing.sharedctypes import Synchronized
20
21
  from typing import Dict, List, Optional, Sequence
@@ -403,52 +404,34 @@ class Manager:
403
404
  result_outgoing.connect(self._result_q_url)
404
405
  logger.info("Manager result pipe connected to interchange")
405
406
 
406
- push_poll_period = max(10, self.poll_period) / 1000 # push_poll_period must be atleast 10 ms
407
- logger.debug("push poll period: {}".format(push_poll_period))
408
-
409
- last_beat = time.time()
410
- last_result_beat = time.time()
411
- items = []
412
-
413
407
  while not self._stop_event.is_set():
408
+ logger.debug("Starting pending_result_queue get")
414
409
  try:
415
- logger.debug("Starting pending_result_queue get")
416
- r = self.task_scheduler.get_result(block=True, timeout=push_poll_period)
417
- logger.debug("Got a result item")
418
- items.append(r)
419
- except queue.Empty:
420
- logger.debug("pending_result_queue get timeout without result item")
421
- except Exception as e:
422
- logger.exception("Got an exception: {}".format(e))
423
-
424
- if time.time() > last_result_beat + self.heartbeat_period:
425
- heartbeat_message = f"last_result_beat={last_result_beat} heartbeat_period={self.heartbeat_period} seconds"
426
- logger.info(f"Sending heartbeat via results connection: {heartbeat_message}")
427
- last_result_beat = time.time()
428
- items.append(pickle.dumps({'type': 'heartbeat'}))
429
-
430
- if len(items) >= self.max_queue_size or time.time() > last_beat + push_poll_period:
431
- last_beat = time.time()
432
- if items:
433
- logger.debug(f"Result send: Pushing {len(items)} items")
434
- result_outgoing.send_multipart(items)
435
- logger.debug("Result send: Pushed")
436
- items = []
437
- else:
438
- logger.debug("Result send: No items to push")
439
- else:
440
- logger.debug(f"Result send: check condition not met - deferring {len(items)} result items")
410
+ r = self.task_scheduler.get_result()
411
+ if r is None:
412
+ continue
413
+ logger.debug("Result received from worker: %s", id(r))
414
+ result_outgoing.send(r)
415
+ logger.debug("Result sent to interchange: %s", id(r))
416
+ except Exception:
417
+ logger.exception("Failed to send result to interchange")
441
418
 
442
419
  result_outgoing.close()
443
- logger.info("Exiting")
420
+ logger.debug("Exiting")
444
421
 
445
422
  @wrap_with_logs
446
- def worker_watchdog(self):
423
+ def heartbeater(self):
424
+ while not self._stop_event.wait(self.heartbeat_period):
425
+ heartbeat_message = f"heartbeat_period={self.heartbeat_period} seconds"
426
+ logger.info(f"Sending heartbeat via results connection: {heartbeat_message}")
427
+ self.pending_result_queue.put(pickle.dumps({'type': 'heartbeat'}))
428
+
429
+ def worker_watchdog(self, procs: dict[int, SpawnProcess]):
447
430
  """Keeps workers alive."""
448
431
  logger.debug("Starting worker watchdog")
449
432
 
450
433
  while not self._stop_event.wait(self.heartbeat_period):
451
- for worker_id, p in self.procs.items():
434
+ for worker_id, p in procs.items():
452
435
  if not p.is_alive():
453
436
  logger.error("Worker {} has died".format(worker_id))
454
437
  try:
@@ -466,11 +449,10 @@ class Manager:
466
449
  except KeyError:
467
450
  logger.info("Worker {} was not busy when it died".format(worker_id))
468
451
 
469
- p = self._start_worker(worker_id)
470
- self.procs[worker_id] = p
452
+ procs[worker_id] = self._start_worker(worker_id)
471
453
  logger.info("Worker {} has been restarted".format(worker_id))
472
454
 
473
- logger.critical("Exiting")
455
+ logger.debug("Exiting")
474
456
 
475
457
  @wrap_with_logs
476
458
  def handle_monitoring_messages(self):
@@ -485,32 +467,28 @@ class Manager:
485
467
  """
486
468
  logger.debug("Starting monitoring handler thread")
487
469
 
488
- poll_period_s = max(10, self.poll_period) / 1000 # Must be at least 10 ms
489
-
490
470
  while not self._stop_event.is_set():
491
471
  try:
492
472
  logger.debug("Starting monitor_queue.get()")
493
- msg = self.monitoring_queue.get(block=True, timeout=poll_period_s)
494
- except queue.Empty:
495
- logger.debug("monitoring_queue.get() has timed out")
496
- except Exception as e:
497
- logger.exception(f"Got an exception: {e}")
498
- else:
473
+ msg = self.monitoring_queue.get(block=True)
474
+ if msg is None:
475
+ continue
499
476
  logger.debug("Got a monitoring message")
500
477
  self.pending_result_queue.put(msg)
501
478
  logger.debug("Put monitoring message on pending_result_queue")
479
+ except Exception:
480
+ logger.exception("Failed to forward monitoring message")
502
481
 
503
- logger.critical("Exiting")
482
+ logger.debug("Exiting")
504
483
 
505
484
  def start(self):
506
485
  """ Start the worker processes.
507
486
 
508
487
  TODO: Move task receiving to a thread
509
488
  """
510
- self.procs = {}
489
+ procs: dict[int, SpawnProcess] = {}
511
490
  for worker_id in range(self.worker_count):
512
- p = self._start_worker(worker_id)
513
- self.procs[worker_id] = p
491
+ procs[worker_id] = self._start_worker(worker_id)
514
492
 
515
493
  logger.debug("Workers started")
516
494
 
@@ -519,40 +497,69 @@ class Manager:
519
497
  target=self.push_results, name="Result-Pusher"
520
498
  )
521
499
  thr_worker_watchdog = threading.Thread(
522
- target=self.worker_watchdog, name="worker-watchdog"
500
+ target=self.worker_watchdog, args=(procs,), name="worker-watchdog"
523
501
  )
524
502
  thr_monitoring_handler = threading.Thread(
525
503
  target=self.handle_monitoring_messages, name="Monitoring-Handler"
526
504
  )
505
+ thr_heartbeater = threading.Thread(target=self.heartbeater, name="Heartbeater")
527
506
 
528
507
  thr_task_puller.start()
529
508
  thr_result_pusher.start()
530
509
  thr_worker_watchdog.start()
531
510
  thr_monitoring_handler.start()
511
+ thr_heartbeater.start()
532
512
 
533
513
  logger.info("Manager threads started")
534
514
 
535
515
  # This might need a multiprocessing event to signal back.
536
516
  self._stop_event.wait()
537
- logger.critical("Received kill event, terminating worker processes")
517
+ logger.info("Stop event set; terminating worker processes")
518
+
519
+ # Invite blocking threads to quit
520
+ self.monitoring_queue.put(None)
521
+ self.pending_result_queue.put(None)
538
522
 
523
+ thr_heartbeater.join()
539
524
  thr_task_puller.join()
540
525
  thr_result_pusher.join()
541
526
  thr_worker_watchdog.join()
542
527
  thr_monitoring_handler.join()
543
- for proc_id in self.procs:
544
- self.procs[proc_id].terminate()
545
- logger.critical("Terminating worker {}: is_alive()={}".format(self.procs[proc_id],
546
- self.procs[proc_id].is_alive()))
547
- self.procs[proc_id].join()
548
- logger.debug("Worker {} joined successfully".format(self.procs[proc_id]))
528
+
529
+ for worker_id in procs:
530
+ p = procs[worker_id]
531
+ proc_info = f"(PID: {p.pid}, Worker ID: {worker_id})"
532
+ logger.debug(f"Signaling worker {p.name} (TERM). {proc_info}")
533
+ p.terminate()
549
534
 
550
535
  self.zmq_context.term()
536
+
537
+ # give processes 1 second to gracefully shut themselves down, based on the
538
+ # SIGTERM (.terminate()) just sent; after then, we pull the plug.
539
+ force_child_shutdown_at = time.monotonic() + 1
540
+ while procs:
541
+ worker_id, p = procs.popitem()
542
+ timeout = max(force_child_shutdown_at - time.monotonic(), 0.000001)
543
+ p.join(timeout=timeout)
544
+ proc_info = f"(PID: {p.pid}, Worker ID: {worker_id})"
545
+ if p.exitcode is not None:
546
+ logger.debug(
547
+ "Worker joined successfully. %s (exitcode: %s)", proc_info, p.exitcode
548
+ )
549
+
550
+ else:
551
+ logger.warning(
552
+ f"Worker {p.name} ({worker_id}) failed to terminate in a timely"
553
+ f" manner; sending KILL signal to process. {proc_info}"
554
+ )
555
+ p.kill()
556
+ p.join()
557
+ p.close()
558
+
551
559
  delta = time.time() - self._start_time
552
560
  logger.info("process_worker_pool ran for {} seconds".format(delta))
553
- return
554
561
 
555
- def _start_worker(self, worker_id: int):
562
+ def _start_worker(self, worker_id: int) -> SpawnProcess:
556
563
  p = SpawnContext.Process(
557
564
  target=worker,
558
565
  args=(
@@ -939,27 +946,27 @@ if __name__ == "__main__":
939
946
  )
940
947
  logger.info(
941
948
  f"\n Python version: {sys.version}"
942
- f" Debug logging: {args.debug}"
943
- f" Certificates dir: {args.cert_dir}"
944
- f" Log dir: {args.logdir}"
945
- f" Manager ID: {args.uid}"
946
- f" Block ID: {args.block_id}"
947
- f" cores_per_worker: {args.cores_per_worker}"
948
- f" mem_per_worker: {args.mem_per_worker}"
949
- f" task_port: {args.task_port}"
950
- f" result_port: {args.result_port}"
951
- f" addresses: {args.addresses}"
952
- f" max_workers_per_node: {args.max_workers_per_node}"
953
- f" poll_period: {args.poll}"
954
- f" address_probe_timeout: {args.address_probe_timeout}"
955
- f" Prefetch capacity: {args.prefetch_capacity}"
956
- f" Heartbeat threshold: {args.hb_threshold}"
957
- f" Heartbeat period: {args.hb_period}"
958
- f" Drain period: {args.drain_period}"
959
- f" CPU affinity: {args.cpu_affinity}"
960
- f" Accelerators: {' '.join(args.available_accelerators)}"
961
- f" enable_mpi_mode: {args.enable_mpi_mode}"
962
- f" mpi_launcher: {args.mpi_launcher}"
949
+ f"\n Debug logging: {args.debug}"
950
+ f"\n Certificates dir: {args.cert_dir}"
951
+ f"\n Log dir: {args.logdir}"
952
+ f"\n Manager ID: {args.uid}"
953
+ f"\n Block ID: {args.block_id}"
954
+ f"\n cores_per_worker: {args.cores_per_worker}"
955
+ f"\n mem_per_worker: {args.mem_per_worker}"
956
+ f"\n task_port: {args.task_port}"
957
+ f"\n result_port: {args.result_port}"
958
+ f"\n addresses: {args.addresses}"
959
+ f"\n max_workers_per_node: {args.max_workers_per_node}"
960
+ f"\n poll_period: {args.poll}"
961
+ f"\n address_probe_timeout: {args.address_probe_timeout}"
962
+ f"\n Prefetch capacity: {args.prefetch_capacity}"
963
+ f"\n Heartbeat threshold: {args.hb_threshold}"
964
+ f"\n Heartbeat period: {args.hb_period}"
965
+ f"\n Drain period: {args.drain_period}"
966
+ f"\n CPU affinity: {args.cpu_affinity}"
967
+ f"\n Accelerators: {' '.join(args.available_accelerators)}"
968
+ f"\n enable_mpi_mode: {args.enable_mpi_mode}"
969
+ f"\n mpi_launcher: {args.mpi_launcher}"
963
970
  )
964
971
  try:
965
972
  manager = Manager(task_port=args.task_port,
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2025.3.3
3
+ Version: 2025.3.17
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2025.03.03.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2025.03.17.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -40,7 +40,7 @@ Requires-Dist: boto3; extra == "all"
40
40
  Requires-Dist: kubernetes; extra == "all"
41
41
  Requires-Dist: ipython<=8.6.0; extra == "all"
42
42
  Requires-Dist: nbsphinx; extra == "all"
43
- Requires-Dist: sphinx<7.2,>=7.1; extra == "all"
43
+ Requires-Dist: sphinx<8,>=7.4; extra == "all"
44
44
  Requires-Dist: sphinx-rtd-theme; extra == "all"
45
45
  Requires-Dist: google-auth; extra == "all"
46
46
  Requires-Dist: google-api-python-client; extra == "all"
@@ -63,7 +63,7 @@ Requires-Dist: msrestazure; extra == "azure"
63
63
  Provides-Extra: docs
64
64
  Requires-Dist: ipython<=8.6.0; extra == "docs"
65
65
  Requires-Dist: nbsphinx; extra == "docs"
66
- Requires-Dist: sphinx<7.2,>=7.1; extra == "docs"
66
+ Requires-Dist: sphinx<8,>=7.4; extra == "docs"
67
67
  Requires-Dist: sphinx-rtd-theme; extra == "docs"
68
68
  Provides-Extra: flux
69
69
  Requires-Dist: pyyaml; extra == "flux"
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=MyaEcEq-Qf860u7V98u-PZrPNdtzOZL_NW6EhIJnmfQ,1937
8
8
  parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
9
9
  parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  parsl/utils.py,sha256=codTX6_KLhgeTwNkRzc1lo4bgc1M93eJ-lkqOO98fvk,14331
11
- parsl/version.py,sha256=JZR2YCezBq1F5cw4-KEsJxZK4DKSK_Po-wBKZDC4T7o,131
11
+ parsl/version.py,sha256=_yYxGBkoJMDKADe5yJ2dAkIINmdZgyRTRJnodIasABw,131
12
12
  parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
14
14
  parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
@@ -73,16 +73,16 @@ parsl/executors/flux/executor.py,sha256=8_xakLUu5zNJAHL0LbeTCFEWqWzRK1eE-3ep4GII
73
73
  parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaPrSF9ec7zvRfLfYJw,2129
74
74
  parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
75
  parsl/executors/high_throughput/errors.py,sha256=k2XuvvFdUfNs2foHFnxmS-BToRMfdXpYEa4EF3ELKq4,1554
76
- parsl/executors/high_throughput/executor.py,sha256=Otf0k9Ia_ZX9mFBs6HOfF4x1LALlwG-i_ga_RsrUBJY,38747
76
+ parsl/executors/high_throughput/executor.py,sha256=esMYMgPHmgD0wPTb0U61vBX96DaPrWj9sQLzpKvB06k,38752
77
77
  parsl/executors/high_throughput/interchange.py,sha256=7sKIvxP3a7HSzqEq25ZCpABx-1Q2f585pFDGzUvo7_4,29459
78
78
  parsl/executors/high_throughput/manager_record.py,sha256=ZMsqFxvreGLRXAw3N-JnODDa9Qfizw2tMmcBhm4lco4,490
79
79
  parsl/executors/high_throughput/manager_selector.py,sha256=UKcUE6v0tO7PDMTThpKSKxVpOpOUilxDL7UbNgpZCxo,2116
80
80
  parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
81
81
  parsl/executors/high_throughput/mpi_executor.py,sha256=U-aatbLF_Mu1p6lP0HmT7Yn1Swn3cc7hPmDfuUb9TpI,4797
82
82
  parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=DmpKugANNa1bdYlqQBLHkrFc15fJpefPPhW9hkAlh1s,4308
83
- parsl/executors/high_throughput/mpi_resource_management.py,sha256=d3NSt3-isdr7pj-oXg9XGRX9D9VsK5e9zSpp7-nyybc,7854
83
+ parsl/executors/high_throughput/mpi_resource_management.py,sha256=73bTW2ZbHRfcrPN318cyjiqDN50AM1cOCQqUGJDIlBg,8199
84
84
  parsl/executors/high_throughput/probe.py,sha256=QOEaliO3x5cB6ltMOZMsZQ-ath9AAuFqXcBzRgWOM60,2754
85
- parsl/executors/high_throughput/process_worker_pool.py,sha256=YOJvTUMg3eIHr9fYfBWFHRiI1QQ898IGiuXyj5VRQNo,41084
85
+ parsl/executors/high_throughput/process_worker_pool.py,sha256=Q7FN0MdXIAOouxDarim6etYVHEgbXFiaMhBahC2ZtIQ,41137
86
86
  parsl/executors/high_throughput/zmq_pipes.py,sha256=NUK25IEh0UkxzdqQQyM8tMtuZmjSiTeWu1DzkkAIOhA,8980
87
87
  parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
88
88
  parsl/executors/radical/executor.py,sha256=en2TKzZnJYU_juojkM_aZUdWhbAgutAYn_EL6HGpfSY,22835
@@ -114,22 +114,24 @@ parsl/launchers/base.py,sha256=CblcvPTJiu-MNLWaRtFe29SZQ0BpTOlaY8CGcHdlHIE,538
114
114
  parsl/launchers/errors.py,sha256=8YMV_CHpBNVa4eXkGE4x5DaFQlZkDCRCHmBktYcY6TA,467
115
115
  parsl/launchers/launchers.py,sha256=cQsNsHuCOL_nQTjPXf0--YsgsDoMoJ77bO1Wt4ncLjs,15134
116
116
  parsl/monitoring/__init__.py,sha256=0ywNz6i0lM1xo_7_BIxhETDGeVd2C_0wwD7qgeaMR4c,83
117
- parsl/monitoring/db_manager.py,sha256=ra5PqmbUstfDx0o_bkBYI8GIUi461-GV3b4A-Q6DVVE,33300
117
+ parsl/monitoring/db_manager.py,sha256=L0c5S9ockq0UIchT2bjmkSAWXS-t0G-Q_neOIBfLbm0,33444
118
118
  parsl/monitoring/errors.py,sha256=D6jpYzEzp0d6FmVKGqhvjAxr4ztZfJX2s-aXemH9bBU,148
119
119
  parsl/monitoring/message_type.py,sha256=Khn88afNxcOIciKiCK4GLnn90I5BlRTiOL3zK-P07yQ,401
120
- parsl/monitoring/monitoring.py,sha256=fkBZU4fWp7qBQUcKYtWjd4d-SsFlJUZNMZacFOh0IoA,12687
120
+ parsl/monitoring/monitoring.py,sha256=PspFFtf3Iaj5tl23ITRRdHrBDAocSOSvP2IVP_pmW-Y,13134
121
121
  parsl/monitoring/remote.py,sha256=t0qCTUMCzeJ_JOARFpjqlTNrAWdEb20BxhmZh9X7kEM,13728
122
- parsl/monitoring/router.py,sha256=GUNdvixMcVGITk5LHEfgtbKBE7DqRtQIO-fkR4Z_VYM,9224
123
122
  parsl/monitoring/types.py,sha256=oOCrzv-ab-_rv4pb8o58Sdb8G_RGp1aZriRbdf9zBEk,339
124
123
  parsl/monitoring/queries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
125
124
  parsl/monitoring/queries/pandas.py,sha256=0Z2r0rjTKCemf0eaDkF1irvVHn5g7KC5SYETvQPRxwU,2232
126
125
  parsl/monitoring/radios/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
127
126
  parsl/monitoring/radios/base.py,sha256=Ep5kHf07Sm-ApMBJVudRhoWRyuiu0udjO4NvEir5LEk,291
128
127
  parsl/monitoring/radios/filesystem.py,sha256=ioZ3jOKX5Qf0DYRtWmpCEorfuMVbS58OMS_QV7DOFOs,1765
128
+ parsl/monitoring/radios/filesystem_router.py,sha256=kQkinktSpsVwfNESfUggSzBlRZ5JgwjM7IDN-jARAhM,2146
129
129
  parsl/monitoring/radios/htex.py,sha256=qBu4O5NYnSETHX0ptdwxSpqa2Pp3Z_V6a6lb3TbjKm4,1643
130
130
  parsl/monitoring/radios/multiprocessing.py,sha256=fsfaaoMDp6VJv1DSAl-P0R2ofO6jp13byx6NsPItV3Y,655
131
131
  parsl/monitoring/radios/udp.py,sha256=bTpt7JYp-5hyBBLzgiLj1_BlSTn28UVp39OYgVGLXCw,1613
132
+ parsl/monitoring/radios/udp_router.py,sha256=LEiHZVhw3lVFhqUK1FAFFtpvNOWbB6RNRBK8FaMvtDw,5771
132
133
  parsl/monitoring/radios/zmq.py,sha256=fhoHp9ylhf-D3eTJb2aSHRsuic8-FJ_oRNGnniGkCAI,592
134
+ parsl/monitoring/radios/zmq_router.py,sha256=pYhol8-SV8FThv7YIjqc5tv149E4ktDLb-l7-ot4nfg,5579
133
135
  parsl/monitoring/visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
134
136
  parsl/monitoring/visualization/app.py,sha256=xMeRlAnzl5lHddAOdSBcqY3D5lmOYw3Z3Z2_YyoVwnw,1425
135
137
  parsl/monitoring/visualization/models.py,sha256=C7CcF6w6PhtrdvDX9VgDH-aSrpLfvYU1fJ4-HDUeFVQ,5138
@@ -337,6 +339,7 @@ parsl/tests/test_monitoring/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
337
339
  parsl/tests/test_monitoring/test_app_names.py,sha256=A-mOMCVhZDnUyJp32fsTUkHdcyval8o7WPEWacDkbD4,2208
338
340
  parsl/tests/test_monitoring/test_basic.py,sha256=VdF6JHfqsEOIMg-ysIAREgygZIjHWNDVLNVQ7jhWxmQ,4592
339
341
  parsl/tests/test_monitoring/test_db_locks.py,sha256=3s3c1xhKo230ZZIJ3f1Ca4U7LcEdXnanOGVXQyNlk2U,2895
342
+ parsl/tests/test_monitoring/test_exit_helper.py,sha256=FsMcQ1GF70vPXEfexDyo674_c5cglJBrLXKBzAYIfOk,1266
340
343
  parsl/tests/test_monitoring/test_fuzz_zmq.py,sha256=--3-pQUvXXbkr8v_BEJoPvVvNly1oXvrD2nJh6yl_0M,3436
341
344
  parsl/tests/test_monitoring/test_htex_init_blocks_vs_monitoring.py,sha256=_QV8zjBKVF_qBbBnhT0C3X9AmfS7IKLcOnEw_cU6HeM,2622
342
345
  parsl/tests/test_monitoring/test_incomplete_futures.py,sha256=ZnO1sFSwlWUBHX64C_zwfTVRVC_UFNlU4h0POgx6NEo,2005
@@ -347,7 +350,7 @@ parsl/tests/test_mpi_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
347
350
  parsl/tests/test_mpi_apps/test_bad_mpi_config.py,sha256=QKvEUSrHIBrvqu2fRj1MAqxsYxDfcrdQ7dzWdOZejuU,1320
348
351
  parsl/tests/test_mpi_apps/test_mpi_mode_enabled.py,sha256=_fpiaDq9yEUuBxTiuxLFsBt5r1oX9S-3S-YL5yRB13E,5423
349
352
  parsl/tests/test_mpi_apps/test_mpi_prefix.py,sha256=yJslZvYK3JeL9UgxMwF9DDPR9QD4zJLGVjubD0F-utc,1950
350
- parsl/tests/test_mpi_apps/test_mpi_scheduler.py,sha256=YdV8A-m67DHk9wxgNpj69wwGEKrFGL20KAC1TzLke3c,6332
353
+ parsl/tests/test_mpi_apps/test_mpi_scheduler.py,sha256=LPvk5wywYANQNCoQ8muwOLEznnZqwler4jJglinAT9I,7370
351
354
  parsl/tests/test_mpi_apps/test_mpiex.py,sha256=mlFdHK3A1B6NsEhxTQQX8lhs9qVza36FMG99vNrBRW4,2021
352
355
  parsl/tests/test_mpi_apps/test_resource_spec.py,sha256=5k6HM2jtb6sa7jetpI-Tl1nPQiN33VLaM7YT10c307E,3756
353
356
  parsl/tests/test_providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -409,7 +412,7 @@ parsl/tests/test_scaling/test_regression_3696_oscillation.py,sha256=7Xc3vgocXXUb
409
412
  parsl/tests/test_scaling/test_scale_down.py,sha256=vHJOMRUriW6xPtaY8GTKYXd5P0WJkQV6Q1IPui05aLU,2736
410
413
  parsl/tests/test_scaling/test_scale_down_htex_auto_scale.py,sha256=EnVNllKO2AGKkGa6927cLrzvvG6mpNQeFDzVktv6x08,4521
411
414
  parsl/tests/test_scaling/test_scale_down_htex_unregistered.py,sha256=OrdnYmd58n7UfkANPJ7mzha4WSCPdbgJRX1O1Zdu0tI,1954
412
- parsl/tests/test_scaling/test_shutdown_scalein.py,sha256=QMlby0g4SgRUqFYZy-d80a23L8FmYl_dwse67E86oVs,2325
415
+ parsl/tests/test_scaling/test_shutdown_scalein.py,sha256=sr40of5DwxeyQI97MDZxFqJILZSXZJb9Dv7qTf2gql8,2471
413
416
  parsl/tests/test_scaling/test_worker_interchange_bad_messages_3262.py,sha256=GaXmRli1srTal-JQmCGDTP4BAwAKI_daXMmrjULsZkY,2788
414
417
  parsl/tests/test_serialization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
415
418
  parsl/tests/test_serialization/test_2555_caching_deserializer.py,sha256=jEXJvbriaLVI7frV5t-iJRKYyeQ7a9_-t3X9lhhBWQo,767
@@ -420,7 +423,7 @@ parsl/tests/test_serialization/test_pack_resource_spec.py,sha256=-Vtyh8KyezZw8e7
420
423
  parsl/tests/test_serialization/test_proxystore_configured.py,sha256=lGWOSEWul16enDWhW-s7CK0d3eMDzm1324Fmj0cZMVU,2293
421
424
  parsl/tests/test_serialization/test_proxystore_impl.py,sha256=uGd45sfPm9rJhzqKV0rI3lqdSOAUddQf-diEpcJAlcY,1228
422
425
  parsl/tests/test_shutdown/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
423
- parsl/tests/test_shutdown/test_kill_monitoring.py,sha256=S9CnCziBk3sQMKgccqvNUEBHanf1hWMK1SLc2aF8uWs,1906
426
+ parsl/tests/test_shutdown/test_kill_monitoring.py,sha256=BycTDLwxhHbbV68Qkgrmn8UUzSr55SvbNvydp35UCTM,1948
424
427
  parsl/tests/test_staging/__init__.py,sha256=WZl9EHSkfYiSoE3Gbulcq2ifmn7IFGUkasJIobL5T5A,208
425
428
  parsl/tests/test_staging/staging_provider.py,sha256=6FDpImkWOLgysqM68NbCAoXZciZokI8dmBWRAxnggBk,3242
426
429
  parsl/tests/test_staging/test_1316.py,sha256=eS0e2BDM2vmPNF60aDr35wcuGgDPfXjTjRV6kyBZOQc,2652
@@ -455,13 +458,13 @@ parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
455
458
  parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
456
459
  parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
457
460
  parsl/usage_tracking/usage.py,sha256=f9k6QcpbQxkGyP5WTC9PVyv0CA05s9NDpRe5wwRdBTM,9163
458
- parsl-2025.3.3.data/scripts/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
459
- parsl-2025.3.3.data/scripts/interchange.py,sha256=17MrOc7-FXxKBWTwkzIbUoa8fvvDfPelfjByd3ZD2Wk,29446
460
- parsl-2025.3.3.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
461
- parsl-2025.3.3.data/scripts/process_worker_pool.py,sha256=BbVJ1PS7ZW2grz0iAPPV0BgJyRMyQ7bbXSzLzWCBkyU,41070
462
- parsl-2025.3.3.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
463
- parsl-2025.3.3.dist-info/METADATA,sha256=FiDSNMMf3JHayeZGotfxx9jG2XcVNwbF50cPkEtLBc8,4026
464
- parsl-2025.3.3.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
465
- parsl-2025.3.3.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
466
- parsl-2025.3.3.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
467
- parsl-2025.3.3.dist-info/RECORD,,
461
+ parsl-2025.3.17.data/scripts/exec_parsl_function.py,sha256=YXKVVIa4zXmOtz-0Ca4E_5nQfN_3S2bh2tB75uZZB4w,7774
462
+ parsl-2025.3.17.data/scripts/interchange.py,sha256=17MrOc7-FXxKBWTwkzIbUoa8fvvDfPelfjByd3ZD2Wk,29446
463
+ parsl-2025.3.17.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
464
+ parsl-2025.3.17.data/scripts/process_worker_pool.py,sha256=__gFeFQJpV5moRofj3WKQCnKp6gmzieXjzkmzVuTmX4,41123
465
+ parsl-2025.3.17.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
466
+ parsl-2025.3.17.dist-info/METADATA,sha256=d_WFIKY6wmq4VQQcz-BCh0yhu9i3i627EjutSTqSNH4,4023
467
+ parsl-2025.3.17.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
468
+ parsl-2025.3.17.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
469
+ parsl-2025.3.17.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
470
+ parsl-2025.3.17.dist-info/RECORD,,