parsl 2023.10.16__py3-none-any.whl → 2023.11.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. parsl/app/app.py +29 -21
  2. parsl/channels/base.py +12 -24
  3. parsl/config.py +19 -12
  4. parsl/configs/ad_hoc.py +2 -2
  5. parsl/dataflow/dflow.py +56 -49
  6. parsl/dataflow/futures.py +39 -9
  7. parsl/dataflow/taskrecord.py +7 -0
  8. parsl/executors/base.py +1 -1
  9. parsl/executors/high_throughput/executor.py +2 -2
  10. parsl/executors/high_throughput/interchange.py +59 -53
  11. parsl/executors/high_throughput/process_worker_pool.py +2 -2
  12. parsl/executors/high_throughput/zmq_pipes.py +1 -1
  13. parsl/executors/status_handling.py +1 -1
  14. parsl/executors/taskvine/exec_parsl_function.py +3 -4
  15. parsl/executors/taskvine/executor.py +18 -4
  16. parsl/executors/taskvine/factory.py +1 -1
  17. parsl/executors/taskvine/manager.py +12 -16
  18. parsl/executors/taskvine/utils.py +5 -5
  19. parsl/executors/threads.py +1 -2
  20. parsl/executors/workqueue/errors.py +4 -2
  21. parsl/executors/workqueue/exec_parsl_function.py +2 -1
  22. parsl/executors/workqueue/executor.py +38 -22
  23. parsl/executors/workqueue/parsl_coprocess.py +0 -1
  24. parsl/monitoring/monitoring.py +6 -6
  25. parsl/monitoring/remote.py +1 -1
  26. parsl/monitoring/visualization/plots/default/workflow_plots.py +4 -4
  27. parsl/monitoring/visualization/plots/default/workflow_resource_plots.py +2 -2
  28. parsl/providers/slurm/slurm.py +1 -1
  29. parsl/tests/configs/ad_hoc_cluster_htex.py +3 -3
  30. parsl/tests/configs/htex_ad_hoc_cluster.py +1 -1
  31. parsl/tests/configs/local_threads_monitoring.py +1 -1
  32. parsl/tests/conftest.py +6 -2
  33. parsl/tests/scaling_tests/vineex_condor.py +1 -1
  34. parsl/tests/scaling_tests/vineex_local.py +1 -1
  35. parsl/tests/scaling_tests/wqex_condor.py +1 -1
  36. parsl/tests/scaling_tests/wqex_local.py +1 -1
  37. parsl/tests/test_docs/test_kwargs.py +37 -0
  38. parsl/tests/test_error_handling/test_python_walltime.py +2 -2
  39. parsl/tests/test_monitoring/test_memoization_representation.py +5 -5
  40. parsl/tests/test_python_apps/test_lifted.py +138 -0
  41. parsl/utils.py +4 -4
  42. parsl/version.py +1 -1
  43. {parsl-2023.10.16.data → parsl-2023.11.13.data}/scripts/exec_parsl_function.py +2 -1
  44. {parsl-2023.10.16.data → parsl-2023.11.13.data}/scripts/parsl_coprocess.py +0 -1
  45. {parsl-2023.10.16.data → parsl-2023.11.13.data}/scripts/process_worker_pool.py +2 -2
  46. {parsl-2023.10.16.dist-info → parsl-2023.11.13.dist-info}/METADATA +10 -9
  47. {parsl-2023.10.16.dist-info → parsl-2023.11.13.dist-info}/RECORD +51 -49
  48. {parsl-2023.10.16.dist-info → parsl-2023.11.13.dist-info}/WHEEL +1 -1
  49. {parsl-2023.10.16.dist-info → parsl-2023.11.13.dist-info}/LICENSE +0 -0
  50. {parsl-2023.10.16.dist-info → parsl-2023.11.13.dist-info}/entry_points.txt +0 -0
  51. {parsl-2023.10.16.dist-info → parsl-2023.11.13.dist-info}/top_level.txt +0 -0
@@ -21,7 +21,7 @@ import inspect
21
21
  import shutil
22
22
  import itertools
23
23
 
24
- from parsl.serialize import pack_apply_message
24
+ from parsl.serialize import pack_apply_message, deserialize
25
25
  import parsl.utils as putils
26
26
  from parsl.executors.errors import ExecutorError
27
27
  from parsl.data_provider.files import File
@@ -65,9 +65,12 @@ ParslTaskToWq = namedtuple('ParslTaskToWq',
65
65
  'id category cores memory disk gpus priority running_time_min env_pkg map_file function_file result_file input_files output_files')
66
66
 
67
67
  # Support structure to communicate final status of work queue tasks to parsl
68
- # result is only valid if result_received is True
69
- # reason and status are only valid if result_received is False
70
- WqTaskToParsl = namedtuple('WqTaskToParsl', 'id result_received result reason status')
68
+ # if result_received is True:
69
+ # result_file is the path to the file containing the result.
70
+ # if result_received is False:
71
+ # reason and status are only valid if result_received is False
72
+ # result_file is None
73
+ WqTaskToParsl = namedtuple('WqTaskToParsl', 'id result_received result_file reason status')
71
74
 
72
75
  # Support structure to report parsl filenames to work queue.
73
76
  # parsl_name is the local_name or filepath attribute of a parsl file object.
@@ -446,7 +449,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
446
449
  input_files = []
447
450
  output_files = []
448
451
 
449
- # Determine the input and output files that will exist at the workes:
452
+ # Determine the input and output files that will exist at the workers:
450
453
  input_files += [self._register_file(f) for f in kwargs.get("inputs", []) if isinstance(f, File)]
451
454
  output_files += [self._register_file(f) for f in kwargs.get("outputs", []) if isinstance(f, File)]
452
455
 
@@ -704,7 +707,6 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
704
707
  self.collector_thread.join()
705
708
 
706
709
  logger.debug("Work Queue shutdown completed")
707
- return True
708
710
 
709
711
  @wrap_with_logs
710
712
  def _collect_work_queue_results(self):
@@ -726,12 +728,30 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
726
728
  with self.tasks_lock:
727
729
  future = self.tasks.pop(task_report.id)
728
730
  logger.debug("Updating Future for executor task {}".format(task_report.id))
731
+ # If result_received, then there's a result file. The object inside the file
732
+ # may be a valid result or an exception caused within the function invocation.
733
+ # Otherwise there's no result file, implying errors from WorkQueue.
729
734
  if task_report.result_received:
730
- future.set_result(task_report.result)
735
+ try:
736
+ with open(task_report.result_file, 'rb') as f_in:
737
+ result = deserialize(f_in.read())
738
+ except Exception as e:
739
+ logger.error(f'Cannot load result from result file {task_report.result_file}. Exception: {e}')
740
+ ex = WorkQueueTaskFailure('Cannot load result from result file', None)
741
+ ex.__cause__ = e
742
+ future.set_exception(ex)
743
+ else:
744
+ if isinstance(result, Exception):
745
+ ex = WorkQueueTaskFailure('Task execution raises an exception', result)
746
+ ex.__cause__ = result
747
+ future.set_exception(ex)
748
+ else:
749
+ future.set_result(result)
731
750
  else:
732
751
  # If there are no results, then the task failed according to one of
733
752
  # work queue modes, such as resource exhaustion.
734
- future.set_exception(WorkQueueTaskFailure(task_report.reason, task_report.result))
753
+ ex = WorkQueueTaskFailure(task_report.reason, None)
754
+ future.set_exception(ex)
735
755
  finally:
736
756
  logger.debug("Marking all outstanding tasks as failed")
737
757
  logger.debug("Acquiring tasks_lock")
@@ -870,7 +890,7 @@ def _work_queue_submit_wait(*,
870
890
  logger.error("Unable to create task: {}".format(e))
871
891
  collector_queue.put_nowait(WqTaskToParsl(id=task.id,
872
892
  result_received=False,
873
- result=None,
893
+ result_file=None,
874
894
  reason="task could not be created by work queue",
875
895
  status=-1))
876
896
  continue
@@ -931,7 +951,7 @@ def _work_queue_submit_wait(*,
931
951
  logger.error("Unable to submit task to work queue: {}".format(e))
932
952
  collector_queue.put_nowait(WqTaskToParsl(id=task.id,
933
953
  result_received=False,
934
- result=None,
954
+ result_file=None,
935
955
  reason="task could not be submited to work queue",
936
956
  status=-1))
937
957
  continue
@@ -951,24 +971,20 @@ def _work_queue_submit_wait(*,
951
971
  logger.debug("Completed Work Queue task {}, executor task {}".format(t.id, t.tag))
952
972
  result_file = result_file_of_task_id.pop(t.tag)
953
973
 
954
- # A tasks completes 'succesfully' if it has result file,
955
- # and it can be loaded. This may mean that the 'success' is
956
- # an exception.
974
+ # A tasks completes 'succesfully' if it has result file.
975
+ # The check whether this file can load a serialized Python object
976
+ # happens later in the collector thread of the executor process.
957
977
  logger.debug("Looking for result in {}".format(result_file))
958
- try:
959
- with open(result_file, "rb") as f_in:
960
- result = pickle.load(f_in)
978
+ if os.path.exists(result_file):
961
979
  logger.debug("Found result in {}".format(result_file))
962
980
  collector_queue.put_nowait(WqTaskToParsl(id=executor_task_id,
963
981
  result_received=True,
964
- result=result,
982
+ result_file=result_file,
965
983
  reason=None,
966
984
  status=t.return_status))
967
985
  # If a result file could not be generated, explain the
968
- # failure according to work queue error codes. We generate
969
- # an exception and wrap it with RemoteExceptionWrapper, to
970
- # match the positive case.
971
- except Exception as e:
986
+ # failure according to work queue error codes.
987
+ else:
972
988
  reason = _explain_work_queue_result(t)
973
989
  logger.debug("Did not find result in {}".format(result_file))
974
990
  logger.debug("Wrapper Script status: {}\nWorkQueue Status: {}"
@@ -977,7 +993,7 @@ def _work_queue_submit_wait(*,
977
993
  .format(executor_task_id, t.id, reason))
978
994
  collector_queue.put_nowait(WqTaskToParsl(id=executor_task_id,
979
995
  result_received=False,
980
- result=e,
996
+ result_file=None,
981
997
  reason=reason,
982
998
  status=t.return_status))
983
999
  logger.debug("Exiting WorkQueue Monitoring Process")
@@ -2,7 +2,6 @@
2
2
 
3
3
  import sys
4
4
  from parsl.app.errors import RemoteExceptionWrapper
5
- import parsl.executors.workqueue.exec_parsl_function as epf
6
5
 
7
6
  import socket
8
7
  import json
@@ -194,10 +194,10 @@ class MonitoringHub(RepresentationMixin):
194
194
  "logdir": self.logdir,
195
195
  "logging_level": logging.DEBUG if self.monitoring_debug else logging.INFO,
196
196
  "run_id": run_id
197
- },
197
+ },
198
198
  name="Monitoring-Router-Process",
199
199
  daemon=True,
200
- )
200
+ )
201
201
  self.router_proc.start()
202
202
 
203
203
  self.dbm_proc = ForkProcess(target=dbm_starter,
@@ -205,10 +205,10 @@ class MonitoringHub(RepresentationMixin):
205
205
  kwargs={"logdir": self.logdir,
206
206
  "logging_level": logging.DEBUG if self.monitoring_debug else logging.INFO,
207
207
  "db_url": self.logging_endpoint,
208
- },
208
+ },
209
209
  name="Monitoring-DBM-Process",
210
210
  daemon=True,
211
- )
211
+ )
212
212
  self.dbm_proc.start()
213
213
  self.logger.info("Started the router process {} and DBM process {}".format(self.router_proc.pid, self.dbm_proc.pid))
214
214
 
@@ -216,7 +216,7 @@ class MonitoringHub(RepresentationMixin):
216
216
  args=(self.logdir, self.resource_msgs, run_dir),
217
217
  name="Monitoring-Filesystem-Process",
218
218
  daemon=True
219
- )
219
+ )
220
220
  self.filesystem_proc.start()
221
221
  self.logger.info(f"Started filesystem radio receiver process {self.filesystem_proc.pid}")
222
222
 
@@ -359,7 +359,7 @@ class MonitoringRouter:
359
359
  run_id: str,
360
360
  logging_level: int = logging.INFO,
361
361
  atexit_timeout: int = 3 # in seconds
362
- ):
362
+ ):
363
363
  """ Initializes a monitoring configuration class.
364
364
 
365
365
  Parameters
@@ -143,7 +143,7 @@ def send_first_last_message(try_id: int,
143
143
  'first_msg': not is_last,
144
144
  'last_msg': is_last,
145
145
  'timestamp': datetime.datetime.now()
146
- })
146
+ })
147
147
  radio.send(msg)
148
148
  return
149
149
 
@@ -22,7 +22,7 @@ gantt_colors = {'unsched': 'rgb(240, 240, 240)',
22
22
  'exec_done': 'rgb(0, 200, 0)',
23
23
  'memo_done': 'rgb(64, 200, 64)',
24
24
  'fail_retryable': 'rgb(200, 128,128)'
25
- }
25
+ }
26
26
 
27
27
 
28
28
  def task_gantt_plot(df_task, df_status, time_completed=None):
@@ -50,7 +50,7 @@ def task_gantt_plot(df_task, df_status, time_completed=None):
50
50
  'Start': last_status['timestamp'],
51
51
  'Finish': status['timestamp'],
52
52
  'Resource': last_status['task_status_name']
53
- }
53
+ }
54
54
  parsl_tasks.extend([last_status_bar])
55
55
  last_status = status
56
56
 
@@ -60,7 +60,7 @@ def task_gantt_plot(df_task, df_status, time_completed=None):
60
60
  'Start': last_status['timestamp'],
61
61
  'Finish': time_completed,
62
62
  'Resource': last_status['task_status_name']
63
- }
63
+ }
64
64
  parsl_tasks.extend([last_status_bar])
65
65
 
66
66
  fig = ff.create_gantt(parsl_tasks,
@@ -205,7 +205,7 @@ dag_state_colors = {"unsched": (0, 'rgb(240, 240, 240)'),
205
205
  "fail_retryable": (8, 'rgb(200, 128,128)'),
206
206
  "joining": (9, 'rgb(128, 128, 255)'),
207
207
  "running_ended": (10, 'rgb(64, 64, 255)')
208
- }
208
+ }
209
209
 
210
210
 
211
211
  def workflow_dag_plot(df_tasks, group_by_apps=True):
@@ -164,7 +164,7 @@ def worker_efficiency(task, node):
164
164
  y=[total_workers] * (end - start + 1),
165
165
  name='Total of workers in whole run',
166
166
  )
167
- ],
167
+ ],
168
168
  layout=go.Layout(xaxis=dict(autorange=True,
169
169
  title='Time (seconds)'),
170
170
  yaxis=dict(title='Number of workers'),
@@ -230,7 +230,7 @@ def resource_efficiency(resource, node, label):
230
230
  y=[total] * (end - start + 1),
231
231
  name=name2,
232
232
  )
233
- ],
233
+ ],
234
234
  layout=go.Layout(xaxis=dict(autorange=True,
235
235
  title='Time (seconds)'),
236
236
  yaxis=dict(title=yaxis),
@@ -250,7 +250,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
250
250
  self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}
251
251
  break
252
252
  else:
253
- logger.error("Could not read job ID from sumbit command standard output.")
253
+ logger.error("Could not read job ID from submit command standard output.")
254
254
  logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
255
255
  else:
256
256
  logger.error("Submit command failed")
@@ -9,8 +9,8 @@ user_opts = {'adhoc':
9
9
  {'username': 'YOUR_USERNAME',
10
10
  'script_dir': 'YOUR_SCRIPT_DIR',
11
11
  'remote_hostnames': ['REMOTE_HOST_URL_1', 'REMOTE_HOST_URL_2']
12
- }
13
- } # type: Dict[str, Dict[str, Any]]
12
+ }
13
+ } # type: Dict[str, Dict[str, Any]]
14
14
 
15
15
  config = Config(
16
16
  executors=[
@@ -25,7 +25,7 @@ config = Config(
25
25
  channels=[SSHChannel(hostname=m,
26
26
  username=user_opts['adhoc']['username'],
27
27
  script_dir=user_opts['adhoc']['script_dir'],
28
- ) for m in user_opts['adhoc']['remote_hostnames']]
28
+ ) for m in user_opts['adhoc']['remote_hostnames']]
29
29
  )
30
30
  )
31
31
  ],
@@ -20,7 +20,7 @@ config = Config(
20
20
  channels=[SSHChannel(hostname=m,
21
21
  username=user_opts['adhoc']['username'],
22
22
  script_dir=user_opts['adhoc']['script_dir'],
23
- ) for m in user_opts['adhoc']['remote_hostnames']]
23
+ ) for m in user_opts['adhoc']['remote_hostnames']]
24
24
  )
25
25
  )
26
26
  ],
@@ -8,4 +8,4 @@ config = Config(executors=[ThreadPoolExecutor(label='threads', max_threads=4)],
8
8
  hub_port=55055,
9
9
  resource_monitoring_interval=3,
10
10
  )
11
- )
11
+ )
parsl/tests/conftest.py CHANGED
@@ -243,7 +243,7 @@ def setup_data(tmpd_cwd):
243
243
 
244
244
 
245
245
  @pytest.fixture(autouse=True, scope='function')
246
- def wait_for_task_completion(pytestconfig):
246
+ def assert_no_outstanding_tasks(pytestconfig):
247
247
  """If we're in a config-file based mode, wait for task completion between
248
248
  each test. This will detect early on (by hanging) if particular test
249
249
  tasks are not finishing, rather than silently falling off the end of
@@ -254,7 +254,11 @@ def wait_for_task_completion(pytestconfig):
254
254
  config = pytestconfig.getoption('config')[0]
255
255
  yield
256
256
  if config != 'local':
257
- parsl.dfk().wait_for_current_tasks()
257
+ logger.info("Checking no outstanding tasks")
258
+ for task_record in parsl.dfk().tasks.values():
259
+ fut = task_record['app_fu']
260
+ assert fut.done(), f"Incomplete task found, task id {task_record['id']}"
261
+ logger.info("No outstanding tasks found")
258
262
 
259
263
 
260
264
  def pytest_make_collect_report(collector):
@@ -6,5 +6,5 @@ from parsl.providers import CondorProvider
6
6
  config = Config(
7
7
  executors=[TaskVineExecutor(manager_config=TaskVineManagerConfig(port=50055),
8
8
  provider=CondorProvider(),
9
- )]
9
+ )]
10
10
  )
@@ -7,5 +7,5 @@ config = Config(
7
7
  executors=[TaskVineExecutor(label='VineExec',
8
8
  worker_launch_method='factory',
9
9
  manager_config=TaskVineManagerConfig(port=50055),
10
- )]
10
+ )]
11
11
  )
@@ -8,5 +8,5 @@ config = Config(
8
8
  provider=CondorProvider(),
9
9
  # init_command='source /home/yadu/src/wq_parsl/setup_parsl_env.sh;
10
10
  # echo "Ran at $date" > /home/yadu/src/wq_parsl/parsl/tests/workqueue_tests/ran.log',
11
- )]
11
+ )]
12
12
  )
@@ -8,5 +8,5 @@ config = Config(
8
8
  provider=LocalProvider(),
9
9
  # init_command='source /home/yadu/src/wq_parsl/setup_parsl_env.sh;
10
10
  # echo "Ran at $date" > /home/yadu/src/wq_parsl/parsl/tests/workqueue_tests/ran.log',
11
- )]
11
+ )]
12
12
  )
@@ -0,0 +1,37 @@
1
+ """Functions used to explain kwargs"""
2
+ from pathlib import Path
3
+
4
+ from parsl import python_app, File
5
+
6
+
7
+ def test_inputs():
8
+ @python_app()
9
+ def map_app(x):
10
+ return x * 2
11
+
12
+ @python_app()
13
+ def reduce_app(inputs=()):
14
+ return sum(inputs)
15
+
16
+ map_futures = [map_app(x) for x in range(3)]
17
+ reduce_future = reduce_app(inputs=map_futures)
18
+
19
+ assert reduce_future.result() == 6
20
+
21
+
22
+ def test_outputs(tmpdir):
23
+ @python_app()
24
+ def write_app(message, outputs=()):
25
+ """Write a single message to every file in outputs"""
26
+ for path in outputs:
27
+ with open(path, 'w') as fp:
28
+ print(message, file=fp)
29
+
30
+ to_write = [
31
+ File(Path(tmpdir) / 'output-0.txt'),
32
+ File(Path(tmpdir) / 'output-1.txt')
33
+ ]
34
+ write_app('Hello!', outputs=to_write).result()
35
+ for path in to_write:
36
+ with open(path) as fp:
37
+ assert fp.read() == 'Hello!\n'
@@ -27,8 +27,8 @@ def test_python_longer_walltime_at_invocation():
27
27
  def test_python_walltime_wrapped_names():
28
28
  f = my_app(0.01, walltime=6)
29
29
  assert f.result() == 7
30
- assert f.task_def['func'].__name__ == "my_app"
31
- assert f.task_def['func_name'] == "my_app"
30
+ assert f.task_record['func'].__name__ == "my_app"
31
+ assert f.task_record['func_name'] == "my_app"
32
32
 
33
33
 
34
34
  def test_python_bad_decorator_args():
@@ -41,9 +41,9 @@ def test_hashsum():
41
41
  f4 = this_app(4)
42
42
  assert f4.result() == 5
43
43
 
44
- assert f1.task_def['hashsum'] == f3.task_def['hashsum']
45
- assert f1.task_def['hashsum'] == f4.task_def['hashsum']
46
- assert f1.task_def['hashsum'] != f2.task_def['hashsum']
44
+ assert f1.task_record['hashsum'] == f3.task_record['hashsum']
45
+ assert f1.task_record['hashsum'] == f4.task_record['hashsum']
46
+ assert f1.task_record['hashsum'] != f2.task_record['hashsum']
47
47
 
48
48
  logger.info("cleaning up parsl")
49
49
  parsl.dfk().cleanup()
@@ -62,11 +62,11 @@ def test_hashsum():
62
62
  assert task_count == 4
63
63
 
64
64
  # this will check that the number of task rows for each hashsum matches the above app invocations
65
- result = connection.execute(text(f"SELECT COUNT(task_hashsum) FROM task WHERE task_hashsum='{f1.task_def['hashsum']}'"))
65
+ result = connection.execute(text(f"SELECT COUNT(task_hashsum) FROM task WHERE task_hashsum='{f1.task_record['hashsum']}'"))
66
66
  (hashsum_count, ) = result.first()
67
67
  assert hashsum_count == 3
68
68
 
69
- result = connection.execute(text(f"SELECT COUNT(task_hashsum) FROM task WHERE task_hashsum='{f2.task_def['hashsum']}'"))
69
+ result = connection.execute(text(f"SELECT COUNT(task_hashsum) FROM task WHERE task_hashsum='{f2.task_record['hashsum']}'"))
70
70
  (hashsum_count, ) = result.first()
71
71
  assert hashsum_count == 1
72
72
 
@@ -0,0 +1,138 @@
1
+ import pytest
2
+
3
+ from concurrent.futures import Future
4
+ from parsl import python_app
5
+
6
+
7
+ @python_app
8
+ def returns_a_dict():
9
+ return {"a": "X", "b": "Y"}
10
+
11
+
12
+ @python_app
13
+ def returns_a_list():
14
+ return ["X", "Y"]
15
+
16
+
17
+ @python_app
18
+ def returns_a_tuple():
19
+ return ("X", "Y")
20
+
21
+
22
+ @python_app
23
+ def returns_a_class():
24
+ from dataclasses import dataclass
25
+
26
+ @dataclass
27
+ class MyClass:
28
+ a: str = "X"
29
+ b: str = "Y"
30
+
31
+ return MyClass
32
+
33
+
34
+ class MyOuterClass():
35
+ def __init__(self):
36
+ self.q = "A"
37
+ self.r = "B"
38
+
39
+
40
+ @python_app
41
+ def returns_a_class_instance():
42
+ return MyOuterClass()
43
+
44
+
45
+ def test_returns_a_dict():
46
+
47
+ # precondition that returns_a_dict behaves
48
+ # correctly
49
+ assert returns_a_dict().result()["a"] == "X"
50
+
51
+ # check that the deferred __getitem__ functionality works,
52
+ # allowing [] to be used on an AppFuture
53
+ assert returns_a_dict()["a"].result() == "X"
54
+
55
+
56
+ def test_returns_a_list():
57
+
58
+ # precondition that returns_a_list behaves
59
+ # correctly
60
+ assert returns_a_list().result()[0] == "X"
61
+
62
+ # check that the deferred __getitem__ functionality works,
63
+ # allowing [] to be used on an AppFuture
64
+ assert returns_a_list()[0].result() == "X"
65
+
66
+
67
+ def test_returns_a_tuple():
68
+
69
+ # precondition that returns_a_tuple behaves
70
+ # correctly
71
+ assert returns_a_tuple().result()[0] == "X"
72
+
73
+ # check that the deferred __getitem__ functionality works,
74
+ # allowing [] to be used on an AppFuture
75
+ assert returns_a_tuple()[0].result() == "X"
76
+
77
+
78
+ def test_lifted_getitem_on_dict_bad_key():
79
+ assert isinstance(returns_a_dict()["invalid"].exception(), KeyError)
80
+
81
+
82
+ def test_returns_a_class_instance():
83
+ # precondition
84
+ assert returns_a_class_instance().result().q == "A"
85
+
86
+ # test of commuting . and result()
87
+ assert returns_a_class_instance().q.result() == "A"
88
+
89
+
90
+ def test_returns_a_class_instance_no_underscores():
91
+ # test that _underscore attribute references are not lifted
92
+ f = returns_a_class_instance()
93
+ with pytest.raises(AttributeError):
94
+ f._nosuchattribute.result()
95
+ f.exception() # wait for f to complete before the test ends
96
+
97
+
98
+ def test_returns_a_class():
99
+
100
+ # precondition that returns_a_class behaves
101
+ # correctly
102
+ assert returns_a_class().result().a == "X"
103
+
104
+ # check that the deferred __getattr__ functionality works,
105
+ # allowing [] to be used on an AppFuture
106
+ assert returns_a_class().a.result() == "X"
107
+
108
+ # when the result is not indexable, a sensible error should
109
+ # appear in the appropriate future
110
+
111
+
112
+ @python_app
113
+ def passthrough(v):
114
+ return v
115
+
116
+
117
+ def test_lifted_getitem_ordering():
118
+ # this should test that lifting getitem has the correct execution
119
+ # order: that it does not defer the execution of following code
120
+
121
+ f_prereq = Future()
122
+
123
+ f_top = passthrough(f_prereq)
124
+
125
+ f_a = f_top['a']
126
+
127
+ # lifted ['a'] should not defer execution here (so it should not
128
+ # implicitly call result() on f_top). If it does, this test will
129
+ # hang at this point, waiting for f_top to get a value, which
130
+ # will not happen until f_prereq gets a value..
131
+ # which doesn't happen until:
132
+
133
+ f_prereq.set_result({"a": "X"})
134
+
135
+ # now at this point it should be safe to wait for f_a to get a result
136
+ # while passthrough and lifted getitem run...
137
+
138
+ assert f_a.result() == "X"
parsl/utils.py CHANGED
@@ -7,7 +7,7 @@ import threading
7
7
  import time
8
8
  from contextlib import contextmanager
9
9
  from types import TracebackType
10
- from typing import Any, Callable, List, Tuple, Union, Generator, IO, AnyStr, Dict, Optional
10
+ from typing import Any, Callable, List, Sequence, Tuple, Union, Generator, IO, AnyStr, Dict, Optional
11
11
 
12
12
  import typeguard
13
13
  from typing_extensions import Type
@@ -47,7 +47,7 @@ def get_version() -> str:
47
47
 
48
48
 
49
49
  @typeguard.typechecked
50
- def get_all_checkpoints(rundir: str = "runinfo") -> List[str]:
50
+ def get_all_checkpoints(rundir: str = "runinfo") -> Sequence[str]:
51
51
  """Finds the checkpoints from all runs in the rundir.
52
52
 
53
53
  Kwargs:
@@ -76,7 +76,7 @@ def get_all_checkpoints(rundir: str = "runinfo") -> List[str]:
76
76
 
77
77
 
78
78
  @typeguard.typechecked
79
- def get_last_checkpoint(rundir: str = "runinfo") -> List[str]:
79
+ def get_last_checkpoint(rundir: str = "runinfo") -> Sequence[str]:
80
80
  """Finds the checkpoint from the last run, if one exists.
81
81
 
82
82
  Note that checkpoints are incremental, and this helper will not find
@@ -128,7 +128,7 @@ def get_std_fname_mode(
128
128
 
129
129
  @contextmanager
130
130
  def wait_for_file(path: str, seconds: int = 10) -> Generator[None, None, None]:
131
- for i in range(0, int(seconds * 100)):
131
+ for _ in range(0, int(seconds * 100)):
132
132
  time.sleep(seconds / 100.)
133
133
  if os.path.exists(path):
134
134
  break
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2023.10.16'
6
+ VERSION = '2023.11.13'
@@ -4,6 +4,7 @@ from parsl.utils import get_std_fname_mode
4
4
  import traceback
5
5
  import sys
6
6
  import pickle
7
+ from parsl.serialize import serialize
7
8
 
8
9
  # This scripts executes a parsl function which is pickled in a file:
9
10
  #
@@ -32,7 +33,7 @@ def load_pickled_file(filename):
32
33
 
33
34
  def dump_result_to_file(result_file, result_package):
34
35
  with open(result_file, "wb") as f_out:
35
- pickle.dump(result_package, f_out)
36
+ f_out.write(serialize(result_package))
36
37
 
37
38
 
38
39
  def remap_location(mapping, parsl_file):
@@ -2,7 +2,6 @@
2
2
 
3
3
  import sys
4
4
  from parsl.app.errors import RemoteExceptionWrapper
5
- import parsl.executors.workqueue.exec_parsl_function as epf
6
5
 
7
6
  import socket
8
7
  import json
@@ -234,7 +234,7 @@ class Manager:
234
234
  'dir': os.getcwd(),
235
235
  'cpu_count': psutil.cpu_count(logical=False),
236
236
  'total_memory': psutil.virtual_memory().total,
237
- }
237
+ }
238
238
  b_msg = json.dumps(msg).encode('utf-8')
239
239
  return b_msg
240
240
 
@@ -608,7 +608,7 @@ def worker(worker_id, pool_id, pool_size, task_queue, result_queue, worker_queue
608
608
  logger.exception("Caught exception while trying to pickle the result package")
609
609
  pkl_package = pickle.dumps({'type': 'result', 'task_id': tid,
610
610
  'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))
611
- })
611
+ })
612
612
 
613
613
  result_queue.put(pkl_package)
614
614
  tasks_in_progress.pop(worker_id)