parsl 2023.10.23__py3-none-any.whl → 2023.11.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. parsl/app/app.py +29 -21
  2. parsl/channels/base.py +12 -24
  3. parsl/config.py +19 -12
  4. parsl/configs/ad_hoc.py +2 -2
  5. parsl/dataflow/dflow.py +10 -4
  6. parsl/executors/base.py +1 -1
  7. parsl/executors/high_throughput/executor.py +2 -2
  8. parsl/executors/high_throughput/interchange.py +59 -53
  9. parsl/executors/high_throughput/process_worker_pool.py +2 -2
  10. parsl/executors/high_throughput/zmq_pipes.py +1 -1
  11. parsl/executors/status_handling.py +1 -1
  12. parsl/executors/taskvine/exec_parsl_function.py +3 -4
  13. parsl/executors/taskvine/executor.py +18 -4
  14. parsl/executors/taskvine/factory.py +1 -1
  15. parsl/executors/taskvine/manager.py +12 -16
  16. parsl/executors/taskvine/utils.py +5 -5
  17. parsl/executors/threads.py +1 -2
  18. parsl/executors/workqueue/exec_parsl_function.py +2 -1
  19. parsl/executors/workqueue/executor.py +34 -24
  20. parsl/monitoring/monitoring.py +6 -6
  21. parsl/monitoring/remote.py +1 -1
  22. parsl/monitoring/visualization/plots/default/workflow_plots.py +4 -4
  23. parsl/monitoring/visualization/plots/default/workflow_resource_plots.py +2 -2
  24. parsl/providers/slurm/slurm.py +1 -1
  25. parsl/tests/configs/ad_hoc_cluster_htex.py +3 -3
  26. parsl/tests/configs/htex_ad_hoc_cluster.py +1 -1
  27. parsl/tests/configs/local_threads_monitoring.py +1 -1
  28. parsl/tests/conftest.py +6 -2
  29. parsl/tests/scaling_tests/vineex_condor.py +1 -1
  30. parsl/tests/scaling_tests/vineex_local.py +1 -1
  31. parsl/tests/scaling_tests/wqex_condor.py +1 -1
  32. parsl/tests/scaling_tests/wqex_local.py +1 -1
  33. parsl/tests/test_docs/test_kwargs.py +37 -0
  34. parsl/tests/test_python_apps/test_lifted.py +3 -2
  35. parsl/utils.py +4 -4
  36. parsl/version.py +1 -1
  37. {parsl-2023.10.23.data → parsl-2023.11.13.data}/scripts/exec_parsl_function.py +2 -1
  38. {parsl-2023.10.23.data → parsl-2023.11.13.data}/scripts/process_worker_pool.py +2 -2
  39. {parsl-2023.10.23.dist-info → parsl-2023.11.13.dist-info}/METADATA +2 -2
  40. {parsl-2023.10.23.dist-info → parsl-2023.11.13.dist-info}/RECORD +45 -44
  41. {parsl-2023.10.23.dist-info → parsl-2023.11.13.dist-info}/WHEEL +1 -1
  42. {parsl-2023.10.23.data → parsl-2023.11.13.data}/scripts/parsl_coprocess.py +0 -0
  43. {parsl-2023.10.23.dist-info → parsl-2023.11.13.dist-info}/LICENSE +0 -0
  44. {parsl-2023.10.23.dist-info → parsl-2023.11.13.dist-info}/entry_points.txt +0 -0
  45. {parsl-2023.10.23.dist-info → parsl-2023.11.13.dist-info}/top_level.txt +0 -0
@@ -21,7 +21,7 @@ import inspect
21
21
  import shutil
22
22
  import itertools
23
23
 
24
- from parsl.serialize import pack_apply_message
24
+ from parsl.serialize import pack_apply_message, deserialize
25
25
  import parsl.utils as putils
26
26
  from parsl.executors.errors import ExecutorError
27
27
  from parsl.data_provider.files import File
@@ -66,11 +66,11 @@ ParslTaskToWq = namedtuple('ParslTaskToWq',
66
66
 
67
67
  # Support structure to communicate final status of work queue tasks to parsl
68
68
  # if result_received is True:
69
- # result is the result
69
+ # result_file is the path to the file containing the result.
70
70
  # if result_received is False:
71
71
  # reason and status are only valid if result_received is False
72
- # result is either None or an exception raised while looking for a result
73
- WqTaskToParsl = namedtuple('WqTaskToParsl', 'id result_received result reason status')
72
+ # result_file is None
73
+ WqTaskToParsl = namedtuple('WqTaskToParsl', 'id result_received result_file reason status')
74
74
 
75
75
  # Support structure to report parsl filenames to work queue.
76
76
  # parsl_name is the local_name or filepath attribute of a parsl file object.
@@ -449,7 +449,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
449
449
  input_files = []
450
450
  output_files = []
451
451
 
452
- # Determine the input and output files that will exist at the workes:
452
+ # Determine the input and output files that will exist at the workers:
453
453
  input_files += [self._register_file(f) for f in kwargs.get("inputs", []) if isinstance(f, File)]
454
454
  output_files += [self._register_file(f) for f in kwargs.get("outputs", []) if isinstance(f, File)]
455
455
 
@@ -707,7 +707,6 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
707
707
  self.collector_thread.join()
708
708
 
709
709
  logger.debug("Work Queue shutdown completed")
710
- return True
711
710
 
712
711
  @wrap_with_logs
713
712
  def _collect_work_queue_results(self):
@@ -729,14 +728,29 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
729
728
  with self.tasks_lock:
730
729
  future = self.tasks.pop(task_report.id)
731
730
  logger.debug("Updating Future for executor task {}".format(task_report.id))
731
+ # If result_received, then there's a result file. The object inside the file
732
+ # may be a valid result or an exception caused within the function invocation.
733
+ # Otherwise there's no result file, implying errors from WorkQueue.
732
734
  if task_report.result_received:
733
- future.set_result(task_report.result)
735
+ try:
736
+ with open(task_report.result_file, 'rb') as f_in:
737
+ result = deserialize(f_in.read())
738
+ except Exception as e:
739
+ logger.error(f'Cannot load result from result file {task_report.result_file}. Exception: {e}')
740
+ ex = WorkQueueTaskFailure('Cannot load result from result file', None)
741
+ ex.__cause__ = e
742
+ future.set_exception(ex)
743
+ else:
744
+ if isinstance(result, Exception):
745
+ ex = WorkQueueTaskFailure('Task execution raises an exception', result)
746
+ ex.__cause__ = result
747
+ future.set_exception(ex)
748
+ else:
749
+ future.set_result(result)
734
750
  else:
735
751
  # If there are no results, then the task failed according to one of
736
752
  # work queue modes, such as resource exhaustion.
737
- ex = WorkQueueTaskFailure(task_report.reason, task_report.result)
738
- if task_report.result is not None:
739
- ex.__cause__ = task_report.result
753
+ ex = WorkQueueTaskFailure(task_report.reason, None)
740
754
  future.set_exception(ex)
741
755
  finally:
742
756
  logger.debug("Marking all outstanding tasks as failed")
@@ -876,7 +890,7 @@ def _work_queue_submit_wait(*,
876
890
  logger.error("Unable to create task: {}".format(e))
877
891
  collector_queue.put_nowait(WqTaskToParsl(id=task.id,
878
892
  result_received=False,
879
- result=None,
893
+ result_file=None,
880
894
  reason="task could not be created by work queue",
881
895
  status=-1))
882
896
  continue
@@ -937,7 +951,7 @@ def _work_queue_submit_wait(*,
937
951
  logger.error("Unable to submit task to work queue: {}".format(e))
938
952
  collector_queue.put_nowait(WqTaskToParsl(id=task.id,
939
953
  result_received=False,
940
- result=None,
954
+ result_file=None,
941
955
  reason="task could not be submited to work queue",
942
956
  status=-1))
943
957
  continue
@@ -957,24 +971,20 @@ def _work_queue_submit_wait(*,
957
971
  logger.debug("Completed Work Queue task {}, executor task {}".format(t.id, t.tag))
958
972
  result_file = result_file_of_task_id.pop(t.tag)
959
973
 
960
- # A tasks completes 'succesfully' if it has result file,
961
- # and it can be loaded. This may mean that the 'success' is
962
- # an exception.
974
+ # A tasks completes 'succesfully' if it has result file.
975
+ # The check whether this file can load a serialized Python object
976
+ # happens later in the collector thread of the executor process.
963
977
  logger.debug("Looking for result in {}".format(result_file))
964
- try:
965
- with open(result_file, "rb") as f_in:
966
- result = pickle.load(f_in)
978
+ if os.path.exists(result_file):
967
979
  logger.debug("Found result in {}".format(result_file))
968
980
  collector_queue.put_nowait(WqTaskToParsl(id=executor_task_id,
969
981
  result_received=True,
970
- result=result,
982
+ result_file=result_file,
971
983
  reason=None,
972
984
  status=t.return_status))
973
985
  # If a result file could not be generated, explain the
974
- # failure according to work queue error codes. We generate
975
- # an exception and wrap it with RemoteExceptionWrapper, to
976
- # match the positive case.
977
- except Exception as e:
986
+ # failure according to work queue error codes.
987
+ else:
978
988
  reason = _explain_work_queue_result(t)
979
989
  logger.debug("Did not find result in {}".format(result_file))
980
990
  logger.debug("Wrapper Script status: {}\nWorkQueue Status: {}"
@@ -983,7 +993,7 @@ def _work_queue_submit_wait(*,
983
993
  .format(executor_task_id, t.id, reason))
984
994
  collector_queue.put_nowait(WqTaskToParsl(id=executor_task_id,
985
995
  result_received=False,
986
- result=e,
996
+ result_file=None,
987
997
  reason=reason,
988
998
  status=t.return_status))
989
999
  logger.debug("Exiting WorkQueue Monitoring Process")
@@ -194,10 +194,10 @@ class MonitoringHub(RepresentationMixin):
194
194
  "logdir": self.logdir,
195
195
  "logging_level": logging.DEBUG if self.monitoring_debug else logging.INFO,
196
196
  "run_id": run_id
197
- },
197
+ },
198
198
  name="Monitoring-Router-Process",
199
199
  daemon=True,
200
- )
200
+ )
201
201
  self.router_proc.start()
202
202
 
203
203
  self.dbm_proc = ForkProcess(target=dbm_starter,
@@ -205,10 +205,10 @@ class MonitoringHub(RepresentationMixin):
205
205
  kwargs={"logdir": self.logdir,
206
206
  "logging_level": logging.DEBUG if self.monitoring_debug else logging.INFO,
207
207
  "db_url": self.logging_endpoint,
208
- },
208
+ },
209
209
  name="Monitoring-DBM-Process",
210
210
  daemon=True,
211
- )
211
+ )
212
212
  self.dbm_proc.start()
213
213
  self.logger.info("Started the router process {} and DBM process {}".format(self.router_proc.pid, self.dbm_proc.pid))
214
214
 
@@ -216,7 +216,7 @@ class MonitoringHub(RepresentationMixin):
216
216
  args=(self.logdir, self.resource_msgs, run_dir),
217
217
  name="Monitoring-Filesystem-Process",
218
218
  daemon=True
219
- )
219
+ )
220
220
  self.filesystem_proc.start()
221
221
  self.logger.info(f"Started filesystem radio receiver process {self.filesystem_proc.pid}")
222
222
 
@@ -359,7 +359,7 @@ class MonitoringRouter:
359
359
  run_id: str,
360
360
  logging_level: int = logging.INFO,
361
361
  atexit_timeout: int = 3 # in seconds
362
- ):
362
+ ):
363
363
  """ Initializes a monitoring configuration class.
364
364
 
365
365
  Parameters
@@ -143,7 +143,7 @@ def send_first_last_message(try_id: int,
143
143
  'first_msg': not is_last,
144
144
  'last_msg': is_last,
145
145
  'timestamp': datetime.datetime.now()
146
- })
146
+ })
147
147
  radio.send(msg)
148
148
  return
149
149
 
@@ -22,7 +22,7 @@ gantt_colors = {'unsched': 'rgb(240, 240, 240)',
22
22
  'exec_done': 'rgb(0, 200, 0)',
23
23
  'memo_done': 'rgb(64, 200, 64)',
24
24
  'fail_retryable': 'rgb(200, 128,128)'
25
- }
25
+ }
26
26
 
27
27
 
28
28
  def task_gantt_plot(df_task, df_status, time_completed=None):
@@ -50,7 +50,7 @@ def task_gantt_plot(df_task, df_status, time_completed=None):
50
50
  'Start': last_status['timestamp'],
51
51
  'Finish': status['timestamp'],
52
52
  'Resource': last_status['task_status_name']
53
- }
53
+ }
54
54
  parsl_tasks.extend([last_status_bar])
55
55
  last_status = status
56
56
 
@@ -60,7 +60,7 @@ def task_gantt_plot(df_task, df_status, time_completed=None):
60
60
  'Start': last_status['timestamp'],
61
61
  'Finish': time_completed,
62
62
  'Resource': last_status['task_status_name']
63
- }
63
+ }
64
64
  parsl_tasks.extend([last_status_bar])
65
65
 
66
66
  fig = ff.create_gantt(parsl_tasks,
@@ -205,7 +205,7 @@ dag_state_colors = {"unsched": (0, 'rgb(240, 240, 240)'),
205
205
  "fail_retryable": (8, 'rgb(200, 128,128)'),
206
206
  "joining": (9, 'rgb(128, 128, 255)'),
207
207
  "running_ended": (10, 'rgb(64, 64, 255)')
208
- }
208
+ }
209
209
 
210
210
 
211
211
  def workflow_dag_plot(df_tasks, group_by_apps=True):
@@ -164,7 +164,7 @@ def worker_efficiency(task, node):
164
164
  y=[total_workers] * (end - start + 1),
165
165
  name='Total of workers in whole run',
166
166
  )
167
- ],
167
+ ],
168
168
  layout=go.Layout(xaxis=dict(autorange=True,
169
169
  title='Time (seconds)'),
170
170
  yaxis=dict(title='Number of workers'),
@@ -230,7 +230,7 @@ def resource_efficiency(resource, node, label):
230
230
  y=[total] * (end - start + 1),
231
231
  name=name2,
232
232
  )
233
- ],
233
+ ],
234
234
  layout=go.Layout(xaxis=dict(autorange=True,
235
235
  title='Time (seconds)'),
236
236
  yaxis=dict(title=yaxis),
@@ -250,7 +250,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
250
250
  self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}
251
251
  break
252
252
  else:
253
- logger.error("Could not read job ID from sumbit command standard output.")
253
+ logger.error("Could not read job ID from submit command standard output.")
254
254
  logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
255
255
  else:
256
256
  logger.error("Submit command failed")
@@ -9,8 +9,8 @@ user_opts = {'adhoc':
9
9
  {'username': 'YOUR_USERNAME',
10
10
  'script_dir': 'YOUR_SCRIPT_DIR',
11
11
  'remote_hostnames': ['REMOTE_HOST_URL_1', 'REMOTE_HOST_URL_2']
12
- }
13
- } # type: Dict[str, Dict[str, Any]]
12
+ }
13
+ } # type: Dict[str, Dict[str, Any]]
14
14
 
15
15
  config = Config(
16
16
  executors=[
@@ -25,7 +25,7 @@ config = Config(
25
25
  channels=[SSHChannel(hostname=m,
26
26
  username=user_opts['adhoc']['username'],
27
27
  script_dir=user_opts['adhoc']['script_dir'],
28
- ) for m in user_opts['adhoc']['remote_hostnames']]
28
+ ) for m in user_opts['adhoc']['remote_hostnames']]
29
29
  )
30
30
  )
31
31
  ],
@@ -20,7 +20,7 @@ config = Config(
20
20
  channels=[SSHChannel(hostname=m,
21
21
  username=user_opts['adhoc']['username'],
22
22
  script_dir=user_opts['adhoc']['script_dir'],
23
- ) for m in user_opts['adhoc']['remote_hostnames']]
23
+ ) for m in user_opts['adhoc']['remote_hostnames']]
24
24
  )
25
25
  )
26
26
  ],
@@ -8,4 +8,4 @@ config = Config(executors=[ThreadPoolExecutor(label='threads', max_threads=4)],
8
8
  hub_port=55055,
9
9
  resource_monitoring_interval=3,
10
10
  )
11
- )
11
+ )
parsl/tests/conftest.py CHANGED
@@ -243,7 +243,7 @@ def setup_data(tmpd_cwd):
243
243
 
244
244
 
245
245
  @pytest.fixture(autouse=True, scope='function')
246
- def wait_for_task_completion(pytestconfig):
246
+ def assert_no_outstanding_tasks(pytestconfig):
247
247
  """If we're in a config-file based mode, wait for task completion between
248
248
  each test. This will detect early on (by hanging) if particular test
249
249
  tasks are not finishing, rather than silently falling off the end of
@@ -254,7 +254,11 @@ def wait_for_task_completion(pytestconfig):
254
254
  config = pytestconfig.getoption('config')[0]
255
255
  yield
256
256
  if config != 'local':
257
- parsl.dfk().wait_for_current_tasks()
257
+ logger.info("Checking no outstanding tasks")
258
+ for task_record in parsl.dfk().tasks.values():
259
+ fut = task_record['app_fu']
260
+ assert fut.done(), f"Incomplete task found, task id {task_record['id']}"
261
+ logger.info("No outstanding tasks found")
258
262
 
259
263
 
260
264
  def pytest_make_collect_report(collector):
@@ -6,5 +6,5 @@ from parsl.providers import CondorProvider
6
6
  config = Config(
7
7
  executors=[TaskVineExecutor(manager_config=TaskVineManagerConfig(port=50055),
8
8
  provider=CondorProvider(),
9
- )]
9
+ )]
10
10
  )
@@ -7,5 +7,5 @@ config = Config(
7
7
  executors=[TaskVineExecutor(label='VineExec',
8
8
  worker_launch_method='factory',
9
9
  manager_config=TaskVineManagerConfig(port=50055),
10
- )]
10
+ )]
11
11
  )
@@ -8,5 +8,5 @@ config = Config(
8
8
  provider=CondorProvider(),
9
9
  # init_command='source /home/yadu/src/wq_parsl/setup_parsl_env.sh;
10
10
  # echo "Ran at $date" > /home/yadu/src/wq_parsl/parsl/tests/workqueue_tests/ran.log',
11
- )]
11
+ )]
12
12
  )
@@ -8,5 +8,5 @@ config = Config(
8
8
  provider=LocalProvider(),
9
9
  # init_command='source /home/yadu/src/wq_parsl/setup_parsl_env.sh;
10
10
  # echo "Ran at $date" > /home/yadu/src/wq_parsl/parsl/tests/workqueue_tests/ran.log',
11
- )]
11
+ )]
12
12
  )
@@ -0,0 +1,37 @@
1
+ """Functions used to explain kwargs"""
2
+ from pathlib import Path
3
+
4
+ from parsl import python_app, File
5
+
6
+
7
+ def test_inputs():
8
+ @python_app()
9
+ def map_app(x):
10
+ return x * 2
11
+
12
+ @python_app()
13
+ def reduce_app(inputs=()):
14
+ return sum(inputs)
15
+
16
+ map_futures = [map_app(x) for x in range(3)]
17
+ reduce_future = reduce_app(inputs=map_futures)
18
+
19
+ assert reduce_future.result() == 6
20
+
21
+
22
+ def test_outputs(tmpdir):
23
+ @python_app()
24
+ def write_app(message, outputs=()):
25
+ """Write a single message to every file in outputs"""
26
+ for path in outputs:
27
+ with open(path, 'w') as fp:
28
+ print(message, file=fp)
29
+
30
+ to_write = [
31
+ File(Path(tmpdir) / 'output-0.txt'),
32
+ File(Path(tmpdir) / 'output-1.txt')
33
+ ]
34
+ write_app('Hello!', outputs=to_write).result()
35
+ for path in to_write:
36
+ with open(path) as fp:
37
+ assert fp.read() == 'Hello!\n'
@@ -89,11 +89,12 @@ def test_returns_a_class_instance():
89
89
 
90
90
  def test_returns_a_class_instance_no_underscores():
91
91
  # test that _underscore attribute references are not lifted
92
+ f = returns_a_class_instance()
92
93
  with pytest.raises(AttributeError):
93
- returns_a_class_instance()._nosuchattribute.result()
94
+ f._nosuchattribute.result()
95
+ f.exception() # wait for f to complete before the test ends
94
96
 
95
97
 
96
- @pytest.mark.skip("returning classes is not supported in WorkQueue or Task Vine - see issue #2908")
97
98
  def test_returns_a_class():
98
99
 
99
100
  # precondition that returns_a_class behaves
parsl/utils.py CHANGED
@@ -7,7 +7,7 @@ import threading
7
7
  import time
8
8
  from contextlib import contextmanager
9
9
  from types import TracebackType
10
- from typing import Any, Callable, List, Tuple, Union, Generator, IO, AnyStr, Dict, Optional
10
+ from typing import Any, Callable, List, Sequence, Tuple, Union, Generator, IO, AnyStr, Dict, Optional
11
11
 
12
12
  import typeguard
13
13
  from typing_extensions import Type
@@ -47,7 +47,7 @@ def get_version() -> str:
47
47
 
48
48
 
49
49
  @typeguard.typechecked
50
- def get_all_checkpoints(rundir: str = "runinfo") -> List[str]:
50
+ def get_all_checkpoints(rundir: str = "runinfo") -> Sequence[str]:
51
51
  """Finds the checkpoints from all runs in the rundir.
52
52
 
53
53
  Kwargs:
@@ -76,7 +76,7 @@ def get_all_checkpoints(rundir: str = "runinfo") -> List[str]:
76
76
 
77
77
 
78
78
  @typeguard.typechecked
79
- def get_last_checkpoint(rundir: str = "runinfo") -> List[str]:
79
+ def get_last_checkpoint(rundir: str = "runinfo") -> Sequence[str]:
80
80
  """Finds the checkpoint from the last run, if one exists.
81
81
 
82
82
  Note that checkpoints are incremental, and this helper will not find
@@ -128,7 +128,7 @@ def get_std_fname_mode(
128
128
 
129
129
  @contextmanager
130
130
  def wait_for_file(path: str, seconds: int = 10) -> Generator[None, None, None]:
131
- for i in range(0, int(seconds * 100)):
131
+ for _ in range(0, int(seconds * 100)):
132
132
  time.sleep(seconds / 100.)
133
133
  if os.path.exists(path):
134
134
  break
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2023.10.23'
6
+ VERSION = '2023.11.13'
@@ -4,6 +4,7 @@ from parsl.utils import get_std_fname_mode
4
4
  import traceback
5
5
  import sys
6
6
  import pickle
7
+ from parsl.serialize import serialize
7
8
 
8
9
  # This scripts executes a parsl function which is pickled in a file:
9
10
  #
@@ -32,7 +33,7 @@ def load_pickled_file(filename):
32
33
 
33
34
  def dump_result_to_file(result_file, result_package):
34
35
  with open(result_file, "wb") as f_out:
35
- pickle.dump(result_package, f_out)
36
+ f_out.write(serialize(result_package))
36
37
 
37
38
 
38
39
  def remap_location(mapping, parsl_file):
@@ -234,7 +234,7 @@ class Manager:
234
234
  'dir': os.getcwd(),
235
235
  'cpu_count': psutil.cpu_count(logical=False),
236
236
  'total_memory': psutil.virtual_memory().total,
237
- }
237
+ }
238
238
  b_msg = json.dumps(msg).encode('utf-8')
239
239
  return b_msg
240
240
 
@@ -608,7 +608,7 @@ def worker(worker_id, pool_id, pool_size, task_queue, result_queue, worker_queue
608
608
  logger.exception("Caught exception while trying to pickle the result package")
609
609
  pkl_package = pickle.dumps({'type': 'result', 'task_id': tid,
610
610
  'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))
611
- })
611
+ })
612
612
 
613
613
  result_queue.put(pkl_package)
614
614
  tasks_in_progress.pop(worker_id)
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2023.10.23
3
+ Version: 2023.11.13
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2023.10.23.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2023.11.13.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0