parsl 2024.7.8__py3-none-any.whl → 2024.7.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
parsl/dataflow/dflow.py CHANGED
@@ -1277,6 +1277,23 @@ class DataFlowKernel:
1277
1277
  executor.shutdown()
1278
1278
  logger.info(f"Shut down executor {executor.label}")
1279
1279
 
1280
+ if hasattr(executor, 'provider'):
1281
+ if hasattr(executor.provider, 'script_dir'):
1282
+ logger.info(f"Closing channel(s) for {executor.label}")
1283
+
1284
+ if hasattr(executor.provider, 'channels'):
1285
+ for channel in executor.provider.channels:
1286
+ logger.info(f"Closing channel {channel}")
1287
+ channel.close()
1288
+ logger.info(f"Closed channel {channel}")
1289
+ else:
1290
+ assert hasattr(executor.provider, 'channel'), "If provider has no .channels, it must have .channel"
1291
+ logger.info(f"Closing channel {executor.provider.channel}")
1292
+ executor.provider.channel.close()
1293
+ logger.info(f"Closed channel {executor.provider.channel}")
1294
+
1295
+ logger.info(f"Closed executor channel(s) for {executor.label}")
1296
+
1280
1297
  logger.info("Terminated executors")
1281
1298
  self.time_completed = datetime.datetime.now()
1282
1299
 
@@ -27,30 +27,29 @@ def main():
27
27
  parser.add_argument("hostname", help="hostname of the parent executor's socket")
28
28
  parser.add_argument("port", help="Port of the parent executor's socket")
29
29
  args = parser.parse_args()
30
- context = zmq.Context()
31
- socket = context.socket(zmq.REQ)
32
- socket.connect(
33
- args.protocol + "://" + gethostbyname(args.hostname) + ":" + args.port
34
- )
35
- # send the path to the ``flux.job`` package
36
- socket.send(dirname(dirname(os.path.realpath(flux.__file__))).encode())
37
- logging.debug("Flux package path sent.")
38
- # collect the encapsulating Flux instance's URI
39
- local_uri = flux.Flux().attr_get("local-uri")
40
- hostname = gethostname()
41
- if args.hostname == hostname:
42
- flux_uri = local_uri
43
- else:
44
- flux_uri = "ssh://" + gethostname() + local_uri.replace("local://", "")
45
- logging.debug("Flux URI is %s", flux_uri)
46
- response = socket.recv() # get acknowledgment
47
- logging.debug("Received acknowledgment %s", response)
48
- socket.send(flux_uri.encode()) # send URI
49
- logging.debug("URI sent. Blocking for response...")
50
- response = socket.recv() # wait for shutdown message
51
- logging.debug("Response %s received, draining flux jobs...", response)
52
- flux.Flux().rpc("job-manager.drain").get()
53
- logging.debug("Flux jobs drained, exiting.")
30
+ with zmq.Context() as context, context.socket(zmq.REQ) as socket:
31
+ socket.connect(
32
+ args.protocol + "://" + gethostbyname(args.hostname) + ":" + args.port
33
+ )
34
+ # send the path to the ``flux.job`` package
35
+ socket.send(dirname(dirname(os.path.realpath(flux.__file__))).encode())
36
+ logging.debug("Flux package path sent.")
37
+ # collect the encapsulating Flux instance's URI
38
+ local_uri = flux.Flux().attr_get("local-uri")
39
+ hostname = gethostname()
40
+ if args.hostname == hostname:
41
+ flux_uri = local_uri
42
+ else:
43
+ flux_uri = "ssh://" + gethostname() + local_uri.replace("local://", "")
44
+ logging.debug("Flux URI is %s", flux_uri)
45
+ response = socket.recv() # get acknowledgment
46
+ logging.debug("Received acknowledgment %s", response)
47
+ socket.send(flux_uri.encode()) # send URI
48
+ logging.debug("URI sent. Blocking for response...")
49
+ response = socket.recv() # wait for shutdown message
50
+ logging.debug("Response %s received, draining flux jobs...", response)
51
+ flux.Flux().rpc("job-manager.drain").get()
52
+ logging.debug("Flux jobs drained, exiting.")
54
53
 
55
54
 
56
55
  if __name__ == "__main__":
@@ -168,7 +168,8 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
168
168
  | | | | batching | | |
169
169
  Parsl<---Fut-| | | load-balancing| result exception
170
170
  ^ | | | watchdogs | | |
171
- | | | Q_mngmnt | | V V
171
+ | | | Result | | | |
172
+ | | | Queue | | V V
172
173
  | | | Thread<--|-incoming_q<---|--- +---------+
173
174
  | | | | | |
174
175
  | | | | | |
@@ -429,20 +430,19 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
429
430
  "127.0.0.1", self.interchange_port_range, self.cert_dir
430
431
  )
431
432
 
432
- self._queue_management_thread = None
433
- self._start_queue_management_thread()
433
+ self._result_queue_thread = None
434
+ self._start_result_queue_thread()
434
435
  self._start_local_interchange_process()
435
436
 
436
- logger.debug("Created management thread: {}".format(self._queue_management_thread))
437
+ logger.debug("Created result queue thread: %s", self._result_queue_thread)
437
438
 
438
439
  self.initialize_scaling()
439
440
 
440
441
  @wrap_with_logs
441
- def _queue_management_worker(self):
442
- """Listen to the queue for task status messages and handle them.
442
+ def _result_queue_worker(self):
443
+ """Listen to the queue for task result messages and handle them.
443
444
 
444
- Depending on the message, tasks will be updated with results, exceptions,
445
- or updates. It expects the following messages:
445
+ Depending on the message, tasks will be updated with results or exceptions.
446
446
 
447
447
  .. code:: python
448
448
 
@@ -459,7 +459,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
459
459
 
460
460
  The `None` message is a die request.
461
461
  """
462
- logger.debug("Queue management worker starting")
462
+ logger.debug("Result queue worker starting")
463
463
 
464
464
  while not self.bad_state_is_set:
465
465
  try:
@@ -528,7 +528,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
528
528
  else:
529
529
  raise BadMessage("Message received with unknown type {}".format(msg['type']))
530
530
 
531
- logger.info("Queue management worker finished")
531
+ logger.info("Result queue worker finished")
532
532
 
533
533
  def _start_local_interchange_process(self) -> None:
534
534
  """ Starts the interchange process locally
@@ -571,21 +571,21 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
571
571
  raise Exception("Interchange failed to start")
572
572
  logger.debug("Got worker ports")
573
573
 
574
- def _start_queue_management_thread(self):
575
- """Method to start the management thread as a daemon.
574
+ def _start_result_queue_thread(self):
575
+ """Method to start the result queue thread as a daemon.
576
576
 
577
577
  Checks if a thread already exists, then starts it.
578
- Could be used later as a restart if the management thread dies.
578
+ Could be used later as a restart if the result queue thread dies.
579
579
  """
580
- if self._queue_management_thread is None:
581
- logger.debug("Starting queue management thread")
582
- self._queue_management_thread = threading.Thread(target=self._queue_management_worker, name="HTEX-Queue-Management-Thread")
583
- self._queue_management_thread.daemon = True
584
- self._queue_management_thread.start()
585
- logger.debug("Started queue management thread")
580
+ if self._result_queue_thread is None:
581
+ logger.debug("Starting result queue thread")
582
+ self._result_queue_thread = threading.Thread(target=self._result_queue_worker, name="HTEX-Result-Queue-Thread")
583
+ self._result_queue_thread.daemon = True
584
+ self._result_queue_thread.start()
585
+ logger.debug("Started result queue thread")
586
586
 
587
587
  else:
588
- logger.error("Management thread already exists, returning")
588
+ logger.error("Result queue thread already exists, returning")
589
589
 
590
590
  def hold_worker(self, worker_id: str) -> None:
591
591
  """Puts a worker on hold, preventing scheduling of additional tasks to it.
@@ -834,6 +834,23 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
834
834
  logger.info("Unable to terminate Interchange process; sending SIGKILL")
835
835
  self.interchange_proc.kill()
836
836
 
837
+ logger.info("Closing ZMQ pipes")
838
+
839
+ # These pipes are used in a thread unsafe manner. If you have traced a
840
+ # problem to this block of code, you might consider what is happening
841
+ # with other threads that access these.
842
+
843
+ # incoming_q is not closed here because it is used by the results queue
844
+ # worker which is not shut down at this point.
845
+
846
+ if hasattr(self, 'outgoing_q'):
847
+ logger.info("Closing outgoing_q")
848
+ self.outgoing_q.close()
849
+
850
+ if hasattr(self, 'command_client'):
851
+ logger.info("Closing command client")
852
+ self.command_client.close()
853
+
837
854
  logger.info("Finished HighThroughputExecutor shutdown attempt")
838
855
 
839
856
  def get_usage_information(self):
@@ -9,6 +9,7 @@ import os
9
9
  import pickle
10
10
  import platform
11
11
  import queue
12
+ import subprocess
12
13
  import sys
13
14
  import threading
14
15
  import time
@@ -731,9 +732,27 @@ def worker(
731
732
  os.sched_setaffinity(0, my_cores) # type: ignore[attr-defined, unused-ignore]
732
733
  logger.info("Set worker CPU affinity to {}".format(my_cores))
733
734
 
735
+ # If CUDA devices, find total number of devices to allow for MPS
736
+ # See: https://developer.nvidia.com/system-management-interface
737
+ nvidia_smi_cmd = "nvidia-smi -L > /dev/null && nvidia-smi -L | wc -l"
738
+ nvidia_smi_ret = subprocess.run(nvidia_smi_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
739
+ if nvidia_smi_ret.returncode == 0:
740
+ num_cuda_devices = int(nvidia_smi_ret.stdout.split()[0])
741
+ else:
742
+ num_cuda_devices = None
743
+
734
744
  # If desired, pin to accelerator
735
745
  if accelerator is not None:
736
- os.environ["CUDA_VISIBLE_DEVICES"] = accelerator
746
+ try:
747
+ if num_cuda_devices is not None:
748
+ procs_per_cuda_device = pool_size // num_cuda_devices
749
+ partitioned_accelerator = str(int(accelerator) // procs_per_cuda_device) # multiple workers will share a GPU
750
+ os.environ["CUDA_VISIBLE_DEVICES"] = partitioned_accelerator
751
+ logger.info(f'Pinned worker to partitioned cuda device: {partitioned_accelerator}')
752
+ else:
753
+ os.environ["CUDA_VISIBLE_DEVICES"] = accelerator
754
+ except (TypeError, ValueError, ZeroDivisionError):
755
+ os.environ["CUDA_VISIBLE_DEVICES"] = accelerator
737
756
  os.environ["ROCR_VISIBLE_DEVICES"] = accelerator
738
757
  os.environ["ZE_AFFINITY_MASK"] = accelerator
739
758
  os.environ["ZE_ENABLE_PCI_ID_DEVICE_ORDER"] = '1'
@@ -0,0 +1,26 @@
1
+ from unittest.mock import Mock
2
+
3
+ import pytest
4
+
5
+ import parsl
6
+ from parsl.channels.base import Channel
7
+ from parsl.executors import HighThroughputExecutor
8
+ from parsl.providers import LocalProvider
9
+
10
+
11
+ @pytest.mark.local
12
+ def test_dfk_close():
13
+
14
+ mock_channel = Mock(spec=Channel)
15
+
16
+ # block settings all 0 because the mock channel won't be able to
17
+ # do anything to make a block exist
18
+ p = LocalProvider(channel=mock_channel, init_blocks=0, min_blocks=0, max_blocks=0)
19
+
20
+ e = HighThroughputExecutor(provider=p)
21
+
22
+ c = parsl.Config(executors=[e])
23
+ with parsl.load(c):
24
+ pass
25
+
26
+ assert mock_channel.close.called
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2024.07.08'
6
+ VERSION = '2024.07.15'
@@ -9,6 +9,7 @@ import os
9
9
  import pickle
10
10
  import platform
11
11
  import queue
12
+ import subprocess
12
13
  import sys
13
14
  import threading
14
15
  import time
@@ -731,9 +732,27 @@ def worker(
731
732
  os.sched_setaffinity(0, my_cores) # type: ignore[attr-defined, unused-ignore]
732
733
  logger.info("Set worker CPU affinity to {}".format(my_cores))
733
734
 
735
+ # If CUDA devices, find total number of devices to allow for MPS
736
+ # See: https://developer.nvidia.com/system-management-interface
737
+ nvidia_smi_cmd = "nvidia-smi -L > /dev/null && nvidia-smi -L | wc -l"
738
+ nvidia_smi_ret = subprocess.run(nvidia_smi_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
739
+ if nvidia_smi_ret.returncode == 0:
740
+ num_cuda_devices = int(nvidia_smi_ret.stdout.split()[0])
741
+ else:
742
+ num_cuda_devices = None
743
+
734
744
  # If desired, pin to accelerator
735
745
  if accelerator is not None:
736
- os.environ["CUDA_VISIBLE_DEVICES"] = accelerator
746
+ try:
747
+ if num_cuda_devices is not None:
748
+ procs_per_cuda_device = pool_size // num_cuda_devices
749
+ partitioned_accelerator = str(int(accelerator) // procs_per_cuda_device) # multiple workers will share a GPU
750
+ os.environ["CUDA_VISIBLE_DEVICES"] = partitioned_accelerator
751
+ logger.info(f'Pinned worker to partitioned cuda device: {partitioned_accelerator}')
752
+ else:
753
+ os.environ["CUDA_VISIBLE_DEVICES"] = accelerator
754
+ except (TypeError, ValueError, ZeroDivisionError):
755
+ os.environ["CUDA_VISIBLE_DEVICES"] = accelerator
737
756
  os.environ["ROCR_VISIBLE_DEVICES"] = accelerator
738
757
  os.environ["ZE_AFFINITY_MASK"] = accelerator
739
758
  os.environ["ZE_ENABLE_PCI_ID_DEVICE_ORDER"] = '1'
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2024.7.8
3
+ Version: 2024.7.15
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2024.07.08.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2024.07.15.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=MyaEcEq-Qf860u7V98u-PZrPNdtzOZL_NW6EhIJnmfQ,1937
8
8
  parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
9
9
  parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  parsl/utils.py,sha256=91FjQiTUY383ueAjkBAgE21My9nba6SP2a2SrbB1r1Q,11250
11
- parsl/version.py,sha256=BfT6mwIfsHmz2c5mYK2wRlQ-QDy9kv3LUYAKvsC-of0,131
11
+ parsl/version.py,sha256=xBBY22CXKXmBYJqrmCPAgPlHvalhorEzfXaNGRSVeQU,131
12
12
  parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
14
14
  parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
@@ -62,7 +62,7 @@ parsl/data_provider/staging.py,sha256=ZDZuuFg38pjUStegKPcvPsfGp3iMeReMzfU6DSwtJj
62
62
  parsl/data_provider/zip.py,sha256=S4kVuH9lxAegRURYbvIUR7EYYBOccyslaqyCrVWUBhw,4497
63
63
  parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
64
  parsl/dataflow/dependency_resolvers.py,sha256=Om8Dgh7a0ZwgXAc6TlhxLSzvxXHDlNNV1aBNiD3JTNY,3325
65
- parsl/dataflow/dflow.py,sha256=j2FApaGbY45fL4fyqQEv2pyZ4m1CnHGrAAmPZxqs2kk,67555
65
+ parsl/dataflow/dflow.py,sha256=jgNOIk3xXz90RXwC38ujMz7092XRdLFv5BrMyALYhps,68513
66
66
  parsl/dataflow/errors.py,sha256=9SxVhIJY_53FQx8x4OU8UA8nd7lvUbDllH7KfMXpYaY,2177
67
67
  parsl/dataflow/futures.py,sha256=08LuP-HFiHBIZmeKCjlsazw_WpQ5fwevrU2_WbidkYw,6080
68
68
  parsl/dataflow/memoization.py,sha256=l9uw1Bu50GucBF70M5relpGKFkE4dIM9T3R1KrxW0v0,9583
@@ -77,10 +77,10 @@ parsl/executors/threads.py,sha256=hJt1LzxphqX4fe_9R9Cf1MU0lepWTU_eJe8O665B0Xo,33
77
77
  parsl/executors/flux/__init__.py,sha256=P9grTTeRPXfqXurFhlSS7XhmE6tTbnCnyQ1f9b-oYHE,136
78
78
  parsl/executors/flux/execute_parsl_task.py,sha256=gRN7F4HhdrKQ-bvn4wXrquBzFOp_9WF88hMIeUaRg5I,1553
79
79
  parsl/executors/flux/executor.py,sha256=gPq49CQwtSZYZggLZ0dCXdpUlllKHJbvR8WRKeGh9xE,16977
80
- parsl/executors/flux/flux_instance_manager.py,sha256=2KVcphlybF-ALYD_3_YjMUi0f5LkjdoJOT_783CW4H0,2036
80
+ parsl/executors/flux/flux_instance_manager.py,sha256=5T3Rp7ZM-mlT0Pf0Gxgs5_YmnaPrSF9ec7zvRfLfYJw,2129
81
81
  parsl/executors/high_throughput/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
82
  parsl/executors/high_throughput/errors.py,sha256=Sak8e8UpiEcXefUjMHbhyXc4Rn7kJtOoh7L8wreBQdk,1638
83
- parsl/executors/high_throughput/executor.py,sha256=c6NM9Z79bp9knSeIJ4UQ5exiLIVJ7IT2rpDxrWvZqQQ,37574
83
+ parsl/executors/high_throughput/executor.py,sha256=bCtw_p2f1ztnqQiChKJBOiPyc6aKK39yRXSp5uFpRzk,38185
84
84
  parsl/executors/high_throughput/interchange.py,sha256=IRuiaBmks_R4cU-Sx0Q_Fjv4PdFtzU05GiPdeJstOoA,30578
85
85
  parsl/executors/high_throughput/manager_record.py,sha256=9XppKjDW0DJ7SMkPNxsiDs-HvXGPLrTg6Ceyh4b6gNs,433
86
86
  parsl/executors/high_throughput/monitoring_info.py,sha256=HC0drp6nlXQpAop5PTUKNjdXMgtZVvrBL0JzZJebPP4,298
@@ -88,7 +88,7 @@ parsl/executors/high_throughput/mpi_executor.py,sha256=V07t1GOzFhcwdlZGuYUPqc1Na
88
88
  parsl/executors/high_throughput/mpi_prefix_composer.py,sha256=hah_IznfFqk-rzuHWmg6aiF_saiDRrpW-aSo4kH9Nso,4854
89
89
  parsl/executors/high_throughput/mpi_resource_management.py,sha256=LFBbJ3BnzTcY_v-jNu30uoIB2Enk4cleN4ygY3dncjY,8194
90
90
  parsl/executors/high_throughput/probe.py,sha256=TNpGTXb4_DEeg_h-LHu4zEKi1-hffboxvKcZUl2OZGk,2751
91
- parsl/executors/high_throughput/process_worker_pool.py,sha256=ROTp8v1i_07OtrC1Qfcn0Qe2vXiGFuO38wcVQFnA8UM,41893
91
+ parsl/executors/high_throughput/process_worker_pool.py,sha256=P1ZqQOyEpfvXxtfsevGpJvPH_PIxso3Mh0u8PyRbwD8,42958
92
92
  parsl/executors/high_throughput/zmq_pipes.py,sha256=tAjQB3aNVMuTXziN3dbJWre46YpXgliD55qMBbhYTLU,8581
93
93
  parsl/executors/radical/__init__.py,sha256=CKbtV2numw5QvgIBq1htMUrt9TqDCIC2zifyf2svTNU,186
94
94
  parsl/executors/radical/executor.py,sha256=426cMt6d8uJFZ_7Ub1kCslaND4OKtBX5WZdz-0RXjMk,22554
@@ -308,6 +308,7 @@ parsl/tests/test_bash_apps/test_pipeline.py,sha256=1kQDD8-Dh5H9SKFcKHzN_mSrdxAV_
308
308
  parsl/tests/test_bash_apps/test_std_uri.py,sha256=CvAt8BUhNl2pA5chq9YyhkD6eo2IUH6PjWfe3SQ-YRU,3752
309
309
  parsl/tests/test_bash_apps/test_stdout.py,sha256=hrzHXLt308qH2Gg_r0-qy5nFBNXI56vCZQBXIIocCPY,3198
310
310
  parsl/tests/test_channels/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
311
+ parsl/tests/test_channels/test_dfk_close.py,sha256=n7IF3Ud_vejg0VNRnvEgxCLmwMvPVvLbXvJdw-Mz_lw,628
311
312
  parsl/tests/test_channels/test_large_output.py,sha256=PGeNSW_sN5mR7KF1hVL2CPfktydYxo4oNz1wVQ-ENN0,595
312
313
  parsl/tests/test_checkpointing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
313
314
  parsl/tests/test_checkpointing/test_periodic.py,sha256=nfMgrG7sZ8rkMu6iOHS6lp_iTU4IsOyQLQ2Gur_FMmE,1509
@@ -466,13 +467,13 @@ parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
466
467
  parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
467
468
  parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
468
469
  parsl/usage_tracking/usage.py,sha256=qNEJ7nPimqd3Y7OWFLdYmNwJ6XDKlyfV_fTzasxsQw8,8690
469
- parsl-2024.7.8.data/scripts/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
470
- parsl-2024.7.8.data/scripts/interchange.py,sha256=n0aOHLX64DEWx-OA4vWrYRVZfmaz8Rc8haNtafbgh4k,30565
471
- parsl-2024.7.8.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
472
- parsl-2024.7.8.data/scripts/process_worker_pool.py,sha256=weug6_LAMbqEKQhiI6ZMg8r3e-XBDw1-L5_COEt7caM,41879
473
- parsl-2024.7.8.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
474
- parsl-2024.7.8.dist-info/METADATA,sha256=rdrjNbmucox8U4FQ0309nzvMX36d36ooD3PZmBDM6s8,4123
475
- parsl-2024.7.8.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
476
- parsl-2024.7.8.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
477
- parsl-2024.7.8.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
478
- parsl-2024.7.8.dist-info/RECORD,,
470
+ parsl-2024.7.15.data/scripts/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
471
+ parsl-2024.7.15.data/scripts/interchange.py,sha256=n0aOHLX64DEWx-OA4vWrYRVZfmaz8Rc8haNtafbgh4k,30565
472
+ parsl-2024.7.15.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
473
+ parsl-2024.7.15.data/scripts/process_worker_pool.py,sha256=pfIQ_JzqjviaiTfVI49qw4qy8FBS8AavN_12oL8DyzE,42944
474
+ parsl-2024.7.15.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
475
+ parsl-2024.7.15.dist-info/METADATA,sha256=bagqkFFK8EeAICbm5afqQ4--DJWNZ_900VszWxbxsZk,4124
476
+ parsl-2024.7.15.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
477
+ parsl-2024.7.15.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
478
+ parsl-2024.7.15.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
479
+ parsl-2024.7.15.dist-info/RECORD,,