executorlib 0.2.0__tar.gz → 0.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. {executorlib-0.2.0/executorlib.egg-info → executorlib-0.2.1}/PKG-INFO +1 -1
  2. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/_version.py +3 -3
  3. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/interactive/shared.py +23 -2
  4. {executorlib-0.2.0 → executorlib-0.2.1/executorlib.egg-info}/PKG-INFO +1 -1
  5. executorlib-0.2.1/tests/test_dependencies_executor.py +347 -0
  6. executorlib-0.2.0/tests/test_dependencies_executor.py +0 -169
  7. {executorlib-0.2.0 → executorlib-0.2.1}/LICENSE +0 -0
  8. {executorlib-0.2.0 → executorlib-0.2.1}/MANIFEST.in +0 -0
  9. {executorlib-0.2.0 → executorlib-0.2.1}/README.md +0 -0
  10. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/__init__.py +0 -0
  11. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/backend/__init__.py +0 -0
  12. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/backend/cache_parallel.py +0 -0
  13. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/backend/cache_serial.py +0 -0
  14. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/backend/interactive_parallel.py +0 -0
  15. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/backend/interactive_serial.py +0 -0
  16. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/base/__init__.py +0 -0
  17. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/base/executor.py +0 -0
  18. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/cache/__init__.py +0 -0
  19. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/cache/backend.py +0 -0
  20. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/cache/executor.py +0 -0
  21. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/cache/queue_spawner.py +0 -0
  22. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/cache/shared.py +0 -0
  23. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/cache/subprocess_spawner.py +0 -0
  24. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/interactive/__init__.py +0 -0
  25. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/interactive/executor.py +0 -0
  26. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/interactive/flux.py +0 -0
  27. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/interactive/slurm.py +0 -0
  28. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/interfaces/__init__.py +0 -0
  29. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/interfaces/flux.py +0 -0
  30. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/interfaces/single.py +0 -0
  31. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/interfaces/slurm.py +0 -0
  32. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/__init__.py +0 -0
  33. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/command.py +0 -0
  34. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/hdf.py +0 -0
  35. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/inputcheck.py +0 -0
  36. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/interactive/__init__.py +0 -0
  37. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/interactive/backend.py +0 -0
  38. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/interactive/communication.py +0 -0
  39. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/interactive/spawner.py +0 -0
  40. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/plot.py +0 -0
  41. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/queue.py +0 -0
  42. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/serialize.py +0 -0
  43. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib/standalone/thread.py +0 -0
  44. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib.egg-info/SOURCES.txt +0 -0
  45. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib.egg-info/dependency_links.txt +0 -0
  46. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib.egg-info/requires.txt +0 -0
  47. {executorlib-0.2.0 → executorlib-0.2.1}/executorlib.egg-info/top_level.txt +0 -0
  48. {executorlib-0.2.0 → executorlib-0.2.1}/pyproject.toml +0 -0
  49. {executorlib-0.2.0 → executorlib-0.2.1}/setup.cfg +0 -0
  50. {executorlib-0.2.0 → executorlib-0.2.1}/setup.py +0 -0
  51. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_backend_serial.py +0 -0
  52. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_cache_executor_interactive.py +0 -0
  53. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_cache_executor_mpi.py +0 -0
  54. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_cache_executor_pysqa_flux.py +0 -0
  55. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_cache_executor_serial.py +0 -0
  56. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_cache_hdf.py +0 -0
  57. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_cache_shared.py +0 -0
  58. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_executor_backend_flux.py +0 -0
  59. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_executor_backend_mpi.py +0 -0
  60. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_executor_backend_mpi_noblock.py +0 -0
  61. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_flux_executor.py +0 -0
  62. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_integration_pyiron_workflow.py +0 -0
  63. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_local_executor.py +0 -0
  64. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_local_executor_future.py +0 -0
  65. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_plot_dependency.py +0 -0
  66. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_plot_dependency_flux.py +0 -0
  67. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_pysqa_subprocess.py +0 -0
  68. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_shared_backend.py +0 -0
  69. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_shared_communication.py +0 -0
  70. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_shared_executorbase.py +0 -0
  71. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_shared_input_check.py +0 -0
  72. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_shared_thread.py +0 -0
  73. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_shell_executor.py +0 -0
  74. {executorlib-0.2.0 → executorlib-0.2.1}/tests/test_shell_interactive.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: executorlib
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: Up-scale python functions for high performance computing (HPC) with executorlib.
5
5
  Author-email: Jan Janssen <janssen@lanl.gov>
6
6
  License: BSD 3-Clause License
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2025-02-11T12:47:28+0100",
11
+ "date": "2025-02-13T23:42:48+0100",
12
12
  "dirty": true,
13
13
  "error": null,
14
- "full-revisionid": "0ffd31288952f78be3b0c810eac1890759634d35",
15
- "version": "0.2.0"
14
+ "full-revisionid": "61e1d7cf7d04f01f89979f45c407e6309c1d3a48",
15
+ "version": "0.2.1"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -3,6 +3,7 @@ import os
3
3
  import queue
4
4
  import sys
5
5
  import time
6
+ from asyncio.exceptions import CancelledError
6
7
  from concurrent.futures import Future
7
8
  from time import sleep
8
9
  from typing import Any, Callable, Optional, Union
@@ -361,7 +362,10 @@ def execute_tasks_with_dependencies(
361
362
  task_dict is not None and "fn" in task_dict and "future" in task_dict
362
363
  ):
363
364
  future_lst, ready_flag = _get_future_objects_from_input(task_dict=task_dict)
364
- if len(future_lst) == 0 or ready_flag:
365
+ exception_lst = _get_exception_lst(future_lst=future_lst)
366
+ if len(exception_lst) > 0:
367
+ task_dict["future"].set_exception(exception_lst[0])
368
+ elif len(future_lst) == 0 or ready_flag:
365
369
  # No future objects are used in the input or all future objects are already done
366
370
  task_dict["args"], task_dict["kwargs"] = _update_futures_in_input(
367
371
  args=task_dict["args"], kwargs=task_dict["kwargs"]
@@ -455,7 +459,10 @@ def _submit_waiting_task(wait_lst: list[dict], executor_queue: queue.Queue) -> l
455
459
  """
456
460
  wait_tmp_lst = []
457
461
  for task_wait_dict in wait_lst:
458
- if all(future.done() for future in task_wait_dict["future_lst"]):
462
+ exception_lst = _get_exception_lst(future_lst=task_wait_dict["future_lst"])
463
+ if len(exception_lst) > 0:
464
+ task_wait_dict["future"].set_exception(exception_lst[0])
465
+ elif all(future.done() for future in task_wait_dict["future_lst"]):
459
466
  del task_wait_dict["future_lst"]
460
467
  task_wait_dict["args"], task_wait_dict["kwargs"] = _update_futures_in_input(
461
468
  args=task_wait_dict["args"], kwargs=task_wait_dict["kwargs"]
@@ -663,3 +670,17 @@ def _execute_task_with_cache(
663
670
  future = task_dict["future"]
664
671
  future.set_result(result)
665
672
  future_queue.task_done()
673
+
674
+
675
+ def _get_exception_lst(future_lst: list) -> list:
676
+ def get_exception(future_obj: Future) -> bool:
677
+ try:
678
+ excp = future_obj.exception(timeout=10**-10)
679
+ return excp is not None and not isinstance(excp, CancelledError)
680
+ except TimeoutError:
681
+ return False
682
+
683
+ if sys.version_info[0] >= 3 and sys.version_info[1] >= 11:
684
+ return [f.exception() for f in future_lst if get_exception(future_obj=f)]
685
+ else:
686
+ return []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: executorlib
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: Up-scale python functions for high performance computing (HPC) with executorlib.
5
5
  Author-email: Jan Janssen <janssen@lanl.gov>
6
6
  License: BSD 3-Clause License
@@ -0,0 +1,347 @@
1
+ from concurrent.futures import Future
2
+ import unittest
3
+ import sys
4
+ from time import sleep
5
+ from queue import Queue
6
+
7
+ from executorlib import SingleNodeExecutor
8
+ from executorlib.interfaces.single import create_single_node_executor
9
+ from executorlib.interactive.shared import execute_tasks_with_dependencies
10
+ from executorlib.standalone.serialize import cloudpickle_register
11
+ from executorlib.standalone.thread import RaisingThread
12
+
13
+
14
+ try:
15
+ import pygraphviz
16
+
17
+ skip_graphviz_test = False
18
+ except ImportError:
19
+ skip_graphviz_test = True
20
+
21
+
22
+ def add_function(parameter_1, parameter_2):
23
+ sleep(0.2)
24
+ return parameter_1 + parameter_2
25
+
26
+
27
+ def generate_tasks(length):
28
+ sleep(0.2)
29
+ return range(length)
30
+
31
+
32
+ def calc_from_lst(lst, ind, parameter):
33
+ sleep(0.2)
34
+ return lst[ind] + parameter
35
+
36
+
37
+ def merge(lst):
38
+ sleep(0.2)
39
+ return sum(lst)
40
+
41
+
42
+ def return_input_dict(input_dict):
43
+ return input_dict
44
+
45
+
46
+ def raise_error(parameter):
47
+ raise RuntimeError
48
+
49
+
50
+ class TestExecutorWithDependencies(unittest.TestCase):
51
+ def test_executor(self):
52
+ with SingleNodeExecutor(max_cores=1) as exe:
53
+ cloudpickle_register(ind=1)
54
+ future_1 = exe.submit(add_function, 1, parameter_2=2)
55
+ future_2 = exe.submit(add_function, 1, parameter_2=future_1)
56
+ self.assertEqual(future_2.result(), 4)
57
+
58
+ def test_dependency_steps(self):
59
+ cloudpickle_register(ind=1)
60
+ fs1 = Future()
61
+ fs2 = Future()
62
+ q = Queue()
63
+ q.put(
64
+ {
65
+ "fn": add_function,
66
+ "args": (),
67
+ "kwargs": {"parameter_1": 1, "parameter_2": 2},
68
+ "future": fs1,
69
+ "resource_dict": {"cores": 1},
70
+ }
71
+ )
72
+ q.put(
73
+ {
74
+ "fn": add_function,
75
+ "args": (),
76
+ "kwargs": {"parameter_1": 1, "parameter_2": fs1},
77
+ "future": fs2,
78
+ "resource_dict": {"cores": 1},
79
+ }
80
+ )
81
+ executor = create_single_node_executor(
82
+ max_workers=1,
83
+ max_cores=2,
84
+ resource_dict={
85
+ "cores": 1,
86
+ "threads_per_core": 1,
87
+ "gpus_per_core": 0,
88
+ "cwd": None,
89
+ "openmpi_oversubscribe": False,
90
+ "slurm_cmd_args": [],
91
+ },
92
+ )
93
+ process = RaisingThread(
94
+ target=execute_tasks_with_dependencies,
95
+ kwargs={
96
+ "future_queue": q,
97
+ "executor_queue": executor._future_queue,
98
+ "executor": executor,
99
+ "refresh_rate": 0.01,
100
+ },
101
+ )
102
+ process.start()
103
+ self.assertFalse(fs1.done())
104
+ self.assertFalse(fs2.done())
105
+ self.assertEqual(fs2.result(), 4)
106
+ self.assertTrue(fs1.done())
107
+ self.assertTrue(fs2.done())
108
+ q.put({"shutdown": True, "wait": True})
109
+
110
+ @unittest.skipIf(
111
+ condition=not (sys.version_info[0] >= 3 and sys.version_info[1] >= 11),
112
+ reason="requires Python 3.11 or higher",
113
+ )
114
+ def test_dependency_steps_error(self):
115
+ cloudpickle_register(ind=1)
116
+ fs1 = Future()
117
+ fs2 = Future()
118
+ q = Queue()
119
+ q.put(
120
+ {
121
+ "fn": raise_error,
122
+ "args": (),
123
+ "kwargs": {"parameter": 0},
124
+ "future": fs1,
125
+ "resource_dict": {"cores": 1},
126
+ }
127
+ )
128
+ q.put(
129
+ {
130
+ "fn": add_function,
131
+ "args": (),
132
+ "kwargs": {"parameter_1": 1, "parameter_2": fs1},
133
+ "future": fs2,
134
+ "resource_dict": {"cores": 1},
135
+ }
136
+ )
137
+ executor = create_single_node_executor(
138
+ max_workers=1,
139
+ max_cores=2,
140
+ resource_dict={
141
+ "cores": 1,
142
+ "threads_per_core": 1,
143
+ "gpus_per_core": 0,
144
+ "cwd": None,
145
+ "openmpi_oversubscribe": False,
146
+ "slurm_cmd_args": [],
147
+ },
148
+ )
149
+ process = RaisingThread(
150
+ target=execute_tasks_with_dependencies,
151
+ kwargs={
152
+ "future_queue": q,
153
+ "executor_queue": executor._future_queue,
154
+ "executor": executor,
155
+ "refresh_rate": 0.01,
156
+ },
157
+ )
158
+ process.start()
159
+ self.assertFalse(fs1.done())
160
+ self.assertFalse(fs2.done())
161
+ self.assertTrue(fs1.exception() is not None)
162
+ self.assertTrue(fs2.exception() is not None)
163
+ with self.assertRaises(RuntimeError):
164
+ fs2.result()
165
+ q.put({"shutdown": True, "wait": True})
166
+
167
+ @unittest.skipIf(
168
+ condition=not (sys.version_info[0] >= 3 and sys.version_info[1] >= 11),
169
+ reason="requires Python 3.11 or higher",
170
+ )
171
+ def test_dependency_steps_error_before(self):
172
+ cloudpickle_register(ind=1)
173
+ fs1 = Future()
174
+ fs1.set_exception(RuntimeError())
175
+ fs2 = Future()
176
+ q = Queue()
177
+ q.put(
178
+ {
179
+ "fn": add_function,
180
+ "args": (),
181
+ "kwargs": {"parameter_1": 1, "parameter_2": 2},
182
+ "future": fs1,
183
+ "resource_dict": {"cores": 1},
184
+ }
185
+ )
186
+ q.put(
187
+ {
188
+ "fn": add_function,
189
+ "args": (),
190
+ "kwargs": {"parameter_1": 1, "parameter_2": fs1},
191
+ "future": fs2,
192
+ "resource_dict": {"cores": 1},
193
+ }
194
+ )
195
+ executor = create_single_node_executor(
196
+ max_workers=1,
197
+ max_cores=2,
198
+ resource_dict={
199
+ "cores": 1,
200
+ "threads_per_core": 1,
201
+ "gpus_per_core": 0,
202
+ "cwd": None,
203
+ "openmpi_oversubscribe": False,
204
+ "slurm_cmd_args": [],
205
+ },
206
+ )
207
+ process = RaisingThread(
208
+ target=execute_tasks_with_dependencies,
209
+ kwargs={
210
+ "future_queue": q,
211
+ "executor_queue": executor._future_queue,
212
+ "executor": executor,
213
+ "refresh_rate": 0.01,
214
+ },
215
+ )
216
+ process.start()
217
+ self.assertTrue(fs1.exception() is not None)
218
+ self.assertTrue(fs2.exception() is not None)
219
+ with self.assertRaises(RuntimeError):
220
+ fs2.result()
221
+ q.put({"shutdown": True, "wait": True})
222
+
223
+ def test_many_to_one(self):
224
+ length = 5
225
+ parameter = 1
226
+ with SingleNodeExecutor(max_cores=2) as exe:
227
+ cloudpickle_register(ind=1)
228
+ future_lst = exe.submit(
229
+ generate_tasks,
230
+ length=length,
231
+ resource_dict={"cores": 1},
232
+ )
233
+ lst = []
234
+ for i in range(length):
235
+ lst.append(
236
+ exe.submit(
237
+ calc_from_lst,
238
+ lst=future_lst,
239
+ ind=i,
240
+ parameter=parameter,
241
+ resource_dict={"cores": 1},
242
+ )
243
+ )
244
+ future_sum = exe.submit(
245
+ merge,
246
+ lst=lst,
247
+ resource_dict={"cores": 1},
248
+ )
249
+ self.assertEqual(future_sum.result(), 15)
250
+
251
+ def test_future_input_dict(self):
252
+ with SingleNodeExecutor() as exe:
253
+ fs = exe.submit(
254
+ return_input_dict,
255
+ input_dict={"a": exe.submit(sum, [2, 2])},
256
+ )
257
+ self.assertEqual(fs.result()["a"], 4)
258
+
259
+
260
+ class TestExecutorErrors(unittest.TestCase):
261
+ def test_block_allocation_false_one_worker(self):
262
+ with self.assertRaises(RuntimeError):
263
+ with SingleNodeExecutor(max_cores=1, block_allocation=False) as exe:
264
+ cloudpickle_register(ind=1)
265
+ _ = exe.submit(raise_error, parameter=0)
266
+
267
+ def test_block_allocation_true_one_worker(self):
268
+ with self.assertRaises(RuntimeError):
269
+ with SingleNodeExecutor(max_cores=1, block_allocation=True) as exe:
270
+ cloudpickle_register(ind=1)
271
+ _ = exe.submit(raise_error, parameter=0)
272
+
273
+ def test_block_allocation_false_two_workers(self):
274
+ with self.assertRaises(RuntimeError):
275
+ with SingleNodeExecutor(max_cores=2, block_allocation=False) as exe:
276
+ cloudpickle_register(ind=1)
277
+ _ = exe.submit(raise_error, parameter=0)
278
+
279
+ def test_block_allocation_true_two_workers(self):
280
+ with self.assertRaises(RuntimeError):
281
+ with SingleNodeExecutor(max_cores=2, block_allocation=True) as exe:
282
+ cloudpickle_register(ind=1)
283
+ _ = exe.submit(raise_error, parameter=0)
284
+
285
+ @unittest.skipIf(
286
+ condition=not (sys.version_info[0] >= 3 and sys.version_info[1] >= 11),
287
+ reason="requires Python 3.11 or higher",
288
+ )
289
+ def test_block_allocation_false_one_worker_loop(self):
290
+ with self.assertRaises(RuntimeError):
291
+ with SingleNodeExecutor(max_cores=1, block_allocation=False) as exe:
292
+ cloudpickle_register(ind=1)
293
+ lst = []
294
+ for i in range(1, 4):
295
+ lst = exe.submit(
296
+ raise_error,
297
+ parameter=lst,
298
+ )
299
+ lst.result()
300
+
301
+ @unittest.skipIf(
302
+ condition=not (sys.version_info[0] >= 3 and sys.version_info[1] >= 11),
303
+ reason="requires Python 3.11 or higher",
304
+ )
305
+ def test_block_allocation_true_one_worker_loop(self):
306
+ with self.assertRaises(RuntimeError):
307
+ with SingleNodeExecutor(max_cores=1, block_allocation=True) as exe:
308
+ cloudpickle_register(ind=1)
309
+ lst = []
310
+ for i in range(1, 4):
311
+ lst = exe.submit(
312
+ raise_error,
313
+ parameter=lst,
314
+ )
315
+ lst.result()
316
+
317
+ @unittest.skipIf(
318
+ condition=not (sys.version_info[0] >= 3 and sys.version_info[1] >= 11),
319
+ reason="requires Python 3.11 or higher",
320
+ )
321
+ def test_block_allocation_false_two_workers_loop(self):
322
+ with self.assertRaises(RuntimeError):
323
+ with SingleNodeExecutor(max_cores=2, block_allocation=False) as exe:
324
+ cloudpickle_register(ind=1)
325
+ lst = []
326
+ for i in range(1, 4):
327
+ lst = exe.submit(
328
+ raise_error,
329
+ parameter=lst,
330
+ )
331
+ lst.result()
332
+
333
+ @unittest.skipIf(
334
+ condition=not (sys.version_info[0] >= 3 and sys.version_info[1] >= 11),
335
+ reason="requires Python 3.11 or higher",
336
+ )
337
+ def test_block_allocation_true_two_workers_loop(self):
338
+ with self.assertRaises(RuntimeError):
339
+ with SingleNodeExecutor(max_cores=2, block_allocation=True) as exe:
340
+ cloudpickle_register(ind=1)
341
+ lst = []
342
+ for i in range(1, 4):
343
+ lst = exe.submit(
344
+ raise_error,
345
+ parameter=lst,
346
+ )
347
+ lst.result()
@@ -1,169 +0,0 @@
1
- from concurrent.futures import Future
2
- import unittest
3
- from time import sleep
4
- from queue import Queue
5
-
6
- from executorlib import SingleNodeExecutor
7
- from executorlib.interfaces.single import create_single_node_executor
8
- from executorlib.interactive.shared import execute_tasks_with_dependencies
9
- from executorlib.standalone.serialize import cloudpickle_register
10
- from executorlib.standalone.thread import RaisingThread
11
-
12
-
13
- try:
14
- import pygraphviz
15
-
16
- skip_graphviz_test = False
17
- except ImportError:
18
- skip_graphviz_test = True
19
-
20
-
21
- def add_function(parameter_1, parameter_2):
22
- sleep(0.2)
23
- return parameter_1 + parameter_2
24
-
25
-
26
- def generate_tasks(length):
27
- sleep(0.2)
28
- return range(length)
29
-
30
-
31
- def calc_from_lst(lst, ind, parameter):
32
- sleep(0.2)
33
- return lst[ind] + parameter
34
-
35
-
36
- def merge(lst):
37
- sleep(0.2)
38
- return sum(lst)
39
-
40
-
41
- def return_input_dict(input_dict):
42
- return input_dict
43
-
44
-
45
- def raise_error():
46
- raise RuntimeError
47
-
48
-
49
- class TestExecutorWithDependencies(unittest.TestCase):
50
- def test_executor(self):
51
- with SingleNodeExecutor(max_cores=1) as exe:
52
- cloudpickle_register(ind=1)
53
- future_1 = exe.submit(add_function, 1, parameter_2=2)
54
- future_2 = exe.submit(add_function, 1, parameter_2=future_1)
55
- self.assertEqual(future_2.result(), 4)
56
-
57
- def test_dependency_steps(self):
58
- cloudpickle_register(ind=1)
59
- fs1 = Future()
60
- fs2 = Future()
61
- q = Queue()
62
- q.put(
63
- {
64
- "fn": add_function,
65
- "args": (),
66
- "kwargs": {"parameter_1": 1, "parameter_2": 2},
67
- "future": fs1,
68
- "resource_dict": {"cores": 1},
69
- }
70
- )
71
- q.put(
72
- {
73
- "fn": add_function,
74
- "args": (),
75
- "kwargs": {"parameter_1": 1, "parameter_2": fs1},
76
- "future": fs2,
77
- "resource_dict": {"cores": 1},
78
- }
79
- )
80
- executor = create_single_node_executor(
81
- max_workers=1,
82
- max_cores=2,
83
- resource_dict={
84
- "cores": 1,
85
- "threads_per_core": 1,
86
- "gpus_per_core": 0,
87
- "cwd": None,
88
- "openmpi_oversubscribe": False,
89
- "slurm_cmd_args": [],
90
- },
91
- )
92
- process = RaisingThread(
93
- target=execute_tasks_with_dependencies,
94
- kwargs={
95
- "future_queue": q,
96
- "executor_queue": executor._future_queue,
97
- "executor": executor,
98
- "refresh_rate": 0.01,
99
- },
100
- )
101
- process.start()
102
- self.assertFalse(fs1.done())
103
- self.assertFalse(fs2.done())
104
- self.assertEqual(fs2.result(), 4)
105
- self.assertTrue(fs1.done())
106
- self.assertTrue(fs2.done())
107
- q.put({"shutdown": True, "wait": True})
108
-
109
- def test_many_to_one(self):
110
- length = 5
111
- parameter = 1
112
- with SingleNodeExecutor(max_cores=2) as exe:
113
- cloudpickle_register(ind=1)
114
- future_lst = exe.submit(
115
- generate_tasks,
116
- length=length,
117
- resource_dict={"cores": 1},
118
- )
119
- lst = []
120
- for i in range(length):
121
- lst.append(
122
- exe.submit(
123
- calc_from_lst,
124
- lst=future_lst,
125
- ind=i,
126
- parameter=parameter,
127
- resource_dict={"cores": 1},
128
- )
129
- )
130
- future_sum = exe.submit(
131
- merge,
132
- lst=lst,
133
- resource_dict={"cores": 1},
134
- )
135
- self.assertEqual(future_sum.result(), 15)
136
-
137
- def test_future_input_dict(self):
138
- with SingleNodeExecutor() as exe:
139
- fs = exe.submit(
140
- return_input_dict,
141
- input_dict={"a": exe.submit(sum, [2, 2])},
142
- )
143
- self.assertEqual(fs.result()["a"], 4)
144
-
145
-
146
- class TestExecutorErrors(unittest.TestCase):
147
- def test_block_allocation_false_one_worker(self):
148
- with self.assertRaises(RuntimeError):
149
- with SingleNodeExecutor(max_cores=1, block_allocation=False) as exe:
150
- cloudpickle_register(ind=1)
151
- _ = exe.submit(raise_error)
152
-
153
- def test_block_allocation_true_one_worker(self):
154
- with self.assertRaises(RuntimeError):
155
- with SingleNodeExecutor(max_cores=1, block_allocation=True) as exe:
156
- cloudpickle_register(ind=1)
157
- _ = exe.submit(raise_error)
158
-
159
- def test_block_allocation_false_two_workers(self):
160
- with self.assertRaises(RuntimeError):
161
- with SingleNodeExecutor(max_cores=2, block_allocation=False) as exe:
162
- cloudpickle_register(ind=1)
163
- _ = exe.submit(raise_error)
164
-
165
- def test_block_allocation_true_two_workers(self):
166
- with self.assertRaises(RuntimeError):
167
- with SingleNodeExecutor(max_cores=2, block_allocation=True) as exe:
168
- cloudpickle_register(ind=1)
169
- _ = exe.submit(raise_error)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes