stouputils 1.17.0__py3-none-any.whl → 1.18.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- stouputils/collections.py +37 -7
- stouputils/collections.pyi +35 -8
- stouputils/continuous_delivery/stubs.py +1 -1
- stouputils/ctx.py +1 -3
- stouputils/ctx.pyi +1 -3
- stouputils/image.py +7 -9
- stouputils/image.pyi +3 -5
- stouputils/io.py +21 -0
- stouputils/io.pyi +6 -0
- stouputils/parallel/__init__.py +29 -0
- stouputils/parallel/__init__.pyi +4 -0
- stouputils/parallel/capturer.py +133 -0
- stouputils/parallel/capturer.pyi +38 -0
- stouputils/parallel/common.py +134 -0
- stouputils/parallel/common.pyi +53 -0
- stouputils/parallel/multi.py +309 -0
- stouputils/{parallel.pyi → parallel/multi.pyi} +14 -112
- stouputils/parallel/subprocess.py +163 -0
- stouputils/parallel/subprocess.pyi +64 -0
- stouputils/print.py +2 -3
- stouputils/print.pyi +1 -2
- {stouputils-1.17.0.dist-info → stouputils-1.18.1.dist-info}/METADATA +2 -1
- {stouputils-1.17.0.dist-info → stouputils-1.18.1.dist-info}/RECORD +25 -17
- stouputils/parallel.py +0 -556
- {stouputils-1.17.0.dist-info → stouputils-1.18.1.dist-info}/WHEEL +0 -0
- {stouputils-1.17.0.dist-info → stouputils-1.18.1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
|
|
3
|
+
CPU_COUNT: int
|
|
4
|
+
|
|
5
|
+
def nice_wrapper[T, R](args: tuple[int, Callable[[T], R], T]) -> R:
|
|
6
|
+
""" Wrapper that applies nice priority then executes the function.
|
|
7
|
+
|
|
8
|
+
\tArgs:
|
|
9
|
+
\t\targs (tuple): Tuple containing (nice_value, func, arg)
|
|
10
|
+
|
|
11
|
+
\tReturns:
|
|
12
|
+
\t\tR: Result of the function execution
|
|
13
|
+
\t"""
|
|
14
|
+
def set_process_priority(nice_value: int) -> None:
|
|
15
|
+
""" Set the priority of the current process.
|
|
16
|
+
|
|
17
|
+
\tArgs:
|
|
18
|
+
\t\tnice_value (int): Unix-style priority value (-20 to 19)
|
|
19
|
+
\t"""
|
|
20
|
+
def starmap[T, R](args: tuple[Callable[[T], R], list[T]]) -> R:
|
|
21
|
+
""" Private function to use starmap using args[0](\\*args[1])
|
|
22
|
+
|
|
23
|
+
\tArgs:
|
|
24
|
+
\t\targs (tuple): Tuple containing the function and the arguments list to pass to the function
|
|
25
|
+
\tReturns:
|
|
26
|
+
\t\tobject: Result of the function execution
|
|
27
|
+
\t"""
|
|
28
|
+
def delayed_call[T, R](args: tuple[Callable[[T], R], float, T]) -> R:
|
|
29
|
+
""" Private function to apply delay before calling the target function
|
|
30
|
+
|
|
31
|
+
\tArgs:
|
|
32
|
+
\t\targs (tuple): Tuple containing the function, delay in seconds, and the argument to pass to the function
|
|
33
|
+
\tReturns:
|
|
34
|
+
\t\tobject: Result of the function execution
|
|
35
|
+
\t"""
|
|
36
|
+
def handle_parameters[T, R](func: Callable[[T], R] | list[Callable[[T], R]], args: list[T], use_starmap: bool, delay_first_calls: float, max_workers: int, desc: str, color: str) -> tuple[str, Callable[[T], R], list[T]]:
|
|
37
|
+
''' Private function to handle the parameters for multiprocessing or multithreading functions
|
|
38
|
+
|
|
39
|
+
\tArgs:
|
|
40
|
+
\t\tfunc\t\t\t\t(Callable | list[Callable]):\tFunction to execute, or list of functions (one per argument)
|
|
41
|
+
\t\targs\t\t\t\t(list):\t\t\t\tList of arguments to pass to the function(s)
|
|
42
|
+
\t\tuse_starmap\t\t\t(bool):\t\t\t\tWhether to use starmap or not (Defaults to False):
|
|
43
|
+
\t\t\tTrue means the function will be called like func(\\*args[i]) instead of func(args[i])
|
|
44
|
+
\t\tdelay_first_calls\t(int):\t\t\t\tApply i*delay_first_calls seconds delay to the first "max_workers" calls.
|
|
45
|
+
\t\t\tFor instance, the first process will be delayed by 0 seconds, the second by 1 second, etc. (Defaults to 0):
|
|
46
|
+
\t\t\tThis can be useful to avoid functions being called in the same second.
|
|
47
|
+
\t\tmax_workers\t\t\t(int):\t\t\t\tNumber of workers to use
|
|
48
|
+
\t\tdesc\t\t\t\t(str):\t\t\t\tDescription of the function execution displayed in the progress bar
|
|
49
|
+
\t\tcolor\t\t\t\t(str):\t\t\t\tColor of the progress bar
|
|
50
|
+
|
|
51
|
+
\tReturns:
|
|
52
|
+
\t\ttuple[str, Callable[[T], R], list[T]]:\tTuple containing the description, function, and arguments
|
|
53
|
+
\t'''
|
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
|
|
2
|
+
# Imports
|
|
3
|
+
import time
|
|
4
|
+
from collections.abc import Callable, Iterable
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from ..ctx import SetMPStartMethod
|
|
8
|
+
from ..print import BAR_FORMAT, MAGENTA
|
|
9
|
+
from .capturer import CaptureOutput
|
|
10
|
+
from .common import CPU_COUNT, handle_parameters, nice_wrapper
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Small test functions for doctests
|
|
14
|
+
def doctest_square(x: int) -> int:
|
|
15
|
+
return x * x
|
|
16
|
+
def doctest_slow(x: int) -> int:
|
|
17
|
+
time.sleep(0.1)
|
|
18
|
+
return x
|
|
19
|
+
|
|
20
|
+
# Functions
|
|
21
|
+
def multiprocessing[T, R](
|
|
22
|
+
func: Callable[..., R] | list[Callable[..., R]],
|
|
23
|
+
args: Iterable[T],
|
|
24
|
+
use_starmap: bool = False,
|
|
25
|
+
chunksize: int = 1,
|
|
26
|
+
desc: str = "",
|
|
27
|
+
max_workers: int | float = CPU_COUNT,
|
|
28
|
+
capture_output: bool = False,
|
|
29
|
+
delay_first_calls: float = 0,
|
|
30
|
+
nice: int | None = None,
|
|
31
|
+
color: str = MAGENTA,
|
|
32
|
+
bar_format: str = BAR_FORMAT,
|
|
33
|
+
ascii: bool = False,
|
|
34
|
+
smooth_tqdm: bool = True,
|
|
35
|
+
**tqdm_kwargs: Any
|
|
36
|
+
) -> list[R]:
|
|
37
|
+
r""" Method to execute a function in parallel using multiprocessing
|
|
38
|
+
|
|
39
|
+
- For CPU-bound operations where the GIL (Global Interpreter Lock) is a bottleneck.
|
|
40
|
+
- When the task can be divided into smaller, independent sub-tasks that can be executed concurrently.
|
|
41
|
+
- For computationally intensive tasks like scientific simulations, data analysis, or machine learning workloads.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
func (Callable | list[Callable]): Function to execute, or list of functions (one per argument)
|
|
45
|
+
args (Iterable): Iterable of arguments to pass to the function(s)
|
|
46
|
+
use_starmap (bool): Whether to use starmap or not (Defaults to False):
|
|
47
|
+
True means the function will be called like func(\*args[i]) instead of func(args[i])
|
|
48
|
+
chunksize (int): Number of arguments to process at a time
|
|
49
|
+
(Defaults to 1 for proper progress bar display)
|
|
50
|
+
desc (str): Description displayed in the progress bar
|
|
51
|
+
(if not provided no progress bar will be displayed)
|
|
52
|
+
max_workers (int | float): Number of workers to use (Defaults to CPU_COUNT), -1 means CPU_COUNT.
|
|
53
|
+
If float between 0 and 1, it's treated as a percentage of CPU_COUNT.
|
|
54
|
+
If negative float between -1 and 0, it's treated as a percentage of len(args).
|
|
55
|
+
capture_output (bool): Whether to capture stdout/stderr from the worker processes (Defaults to True)
|
|
56
|
+
delay_first_calls (float): Apply i*delay_first_calls seconds delay to the first "max_workers" calls.
|
|
57
|
+
For instance, the first process will be delayed by 0 seconds, the second by 1 second, etc.
|
|
58
|
+
(Defaults to 0): This can be useful to avoid functions being called in the same second.
|
|
59
|
+
nice (int | None): Adjust the priority of worker processes (Defaults to None).
|
|
60
|
+
Use Unix-style values: -20 (highest priority) to 19 (lowest priority).
|
|
61
|
+
Positive values reduce priority, negative values increase it.
|
|
62
|
+
Automatically converted to appropriate priority class on Windows.
|
|
63
|
+
If None, no priority adjustment is made.
|
|
64
|
+
color (str): Color of the progress bar (Defaults to MAGENTA)
|
|
65
|
+
bar_format (str): Format of the progress bar (Defaults to BAR_FORMAT)
|
|
66
|
+
ascii (bool): Whether to use ASCII or Unicode characters for the progress bar
|
|
67
|
+
smooth_tqdm (bool): Whether to enable smooth progress bar updates by setting miniters and mininterval (Defaults to True)
|
|
68
|
+
**tqdm_kwargs (Any): Additional keyword arguments to pass to tqdm
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
list[object]: Results of the function execution
|
|
72
|
+
|
|
73
|
+
Examples:
|
|
74
|
+
.. code-block:: python
|
|
75
|
+
|
|
76
|
+
> multiprocessing(doctest_square, args=[1, 2, 3])
|
|
77
|
+
[1, 4, 9]
|
|
78
|
+
|
|
79
|
+
> multiprocessing(int.__mul__, [(1,2), (3,4), (5,6)], use_starmap=True)
|
|
80
|
+
[2, 12, 30]
|
|
81
|
+
|
|
82
|
+
> # Using a list of functions (one per argument)
|
|
83
|
+
> multiprocessing([doctest_square, doctest_square, doctest_square], [1, 2, 3])
|
|
84
|
+
[1, 4, 9]
|
|
85
|
+
|
|
86
|
+
> # Will process in parallel with progress bar
|
|
87
|
+
> multiprocessing(doctest_slow, range(10), desc="Processing")
|
|
88
|
+
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
|
89
|
+
|
|
90
|
+
> # Will process in parallel with progress bar and delay the first threads
|
|
91
|
+
> multiprocessing(
|
|
92
|
+
. doctest_slow,
|
|
93
|
+
. range(10),
|
|
94
|
+
. desc="Processing with delay",
|
|
95
|
+
. max_workers=2,
|
|
96
|
+
. delay_first_calls=0.6
|
|
97
|
+
. )
|
|
98
|
+
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
|
99
|
+
"""
|
|
100
|
+
# Imports
|
|
101
|
+
import multiprocessing as mp
|
|
102
|
+
from multiprocessing import Pool
|
|
103
|
+
|
|
104
|
+
from tqdm.auto import tqdm
|
|
105
|
+
from tqdm.contrib.concurrent import process_map # pyright: ignore[reportUnknownVariableType]
|
|
106
|
+
|
|
107
|
+
# Handle parameters
|
|
108
|
+
args = list(args) # Ensure we have a list (not other iterable)
|
|
109
|
+
if max_workers == -1:
|
|
110
|
+
max_workers = CPU_COUNT
|
|
111
|
+
if isinstance(max_workers, float):
|
|
112
|
+
if max_workers > 0:
|
|
113
|
+
assert max_workers <= 1, "max_workers as positive float must be between 0 and 1 (percentage of CPU_COUNT)"
|
|
114
|
+
max_workers = int(max_workers * CPU_COUNT)
|
|
115
|
+
else:
|
|
116
|
+
assert -1 <= max_workers < 0, "max_workers as negative float must be between -1 and 0 (percentage of len(args))"
|
|
117
|
+
max_workers = int(-max_workers * len(args))
|
|
118
|
+
verbose: bool = desc != ""
|
|
119
|
+
desc, func, args = handle_parameters(func, args, use_starmap, delay_first_calls, max_workers, desc, color)
|
|
120
|
+
if bar_format == BAR_FORMAT:
|
|
121
|
+
bar_format = bar_format.replace(MAGENTA, color)
|
|
122
|
+
if smooth_tqdm:
|
|
123
|
+
tqdm_kwargs.setdefault("mininterval", 0.0)
|
|
124
|
+
try:
|
|
125
|
+
total = len(args) # type: ignore
|
|
126
|
+
import shutil
|
|
127
|
+
width = shutil.get_terminal_size().columns
|
|
128
|
+
tqdm_kwargs.setdefault("miniters", max(1, total // width))
|
|
129
|
+
except (TypeError, OSError):
|
|
130
|
+
tqdm_kwargs.setdefault("miniters", 1)
|
|
131
|
+
|
|
132
|
+
# Do multiprocessing only if there is more than 1 argument and more than 1 CPU
|
|
133
|
+
if max_workers > 1 and len(args) > 1:
|
|
134
|
+
# Wrap function with nice if specified
|
|
135
|
+
if nice is not None:
|
|
136
|
+
wrapped_args = [(nice, func, arg) for arg in args]
|
|
137
|
+
wrapped_func = nice_wrapper
|
|
138
|
+
else:
|
|
139
|
+
wrapped_args = args
|
|
140
|
+
wrapped_func = func
|
|
141
|
+
|
|
142
|
+
# Capture output if specified
|
|
143
|
+
capturer: CaptureOutput | None = None
|
|
144
|
+
if capture_output:
|
|
145
|
+
capturer = CaptureOutput()
|
|
146
|
+
capturer.start_listener()
|
|
147
|
+
wrapped_args = [(capturer, wrapped_func, arg) for arg in wrapped_args]
|
|
148
|
+
wrapped_func = capture_subprocess_output
|
|
149
|
+
|
|
150
|
+
def process() -> list[Any]:
|
|
151
|
+
if verbose:
|
|
152
|
+
return list(process_map(
|
|
153
|
+
wrapped_func, wrapped_args, max_workers=max_workers, chunksize=chunksize, desc=desc, bar_format=bar_format, ascii=ascii, **tqdm_kwargs
|
|
154
|
+
)) # type: ignore
|
|
155
|
+
else:
|
|
156
|
+
with Pool(max_workers) as pool:
|
|
157
|
+
return list(pool.map(wrapped_func, wrapped_args, chunksize=chunksize)) # type: ignore
|
|
158
|
+
try:
|
|
159
|
+
return process()
|
|
160
|
+
except RuntimeError as e:
|
|
161
|
+
if "SemLock created in a fork context is being shared with a process in a spawn context" in str(e):
|
|
162
|
+
|
|
163
|
+
# Try with alternate start method
|
|
164
|
+
with SetMPStartMethod("spawn" if mp.get_start_method() != "spawn" else "fork"):
|
|
165
|
+
return process()
|
|
166
|
+
else: # Re-raise if it's not the SemLock error
|
|
167
|
+
raise
|
|
168
|
+
finally:
|
|
169
|
+
if capturer is not None:
|
|
170
|
+
capturer.parent_close_write()
|
|
171
|
+
capturer.join_listener(timeout=5.0)
|
|
172
|
+
|
|
173
|
+
# Single process execution
|
|
174
|
+
else:
|
|
175
|
+
if verbose:
|
|
176
|
+
return [func(arg) for arg in tqdm(args, total=len(args), desc=desc, bar_format=bar_format, ascii=ascii, **tqdm_kwargs)]
|
|
177
|
+
else:
|
|
178
|
+
return [func(arg) for arg in args]
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def multithreading[T, R](
|
|
182
|
+
func: Callable[..., R] | list[Callable[..., R]],
|
|
183
|
+
args: Iterable[T],
|
|
184
|
+
use_starmap: bool = False,
|
|
185
|
+
desc: str = "",
|
|
186
|
+
max_workers: int | float = CPU_COUNT,
|
|
187
|
+
delay_first_calls: float = 0,
|
|
188
|
+
color: str = MAGENTA,
|
|
189
|
+
bar_format: str = BAR_FORMAT,
|
|
190
|
+
ascii: bool = False,
|
|
191
|
+
smooth_tqdm: bool = True,
|
|
192
|
+
**tqdm_kwargs: Any
|
|
193
|
+
) -> list[R]:
|
|
194
|
+
r""" Method to execute a function in parallel using multithreading, you should use it:
|
|
195
|
+
|
|
196
|
+
- For I/O-bound operations where the GIL is not a bottleneck, such as network requests or disk operations.
|
|
197
|
+
- When the task involves waiting for external resources, such as network responses or user input.
|
|
198
|
+
- For operations that involve a lot of waiting, such as GUI event handling or handling user input.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
func (Callable | list[Callable]): Function to execute, or list of functions (one per argument)
|
|
202
|
+
args (Iterable): Iterable of arguments to pass to the function(s)
|
|
203
|
+
use_starmap (bool): Whether to use starmap or not (Defaults to False):
|
|
204
|
+
True means the function will be called like func(\*args[i]) instead of func(args[i])
|
|
205
|
+
desc (str): Description displayed in the progress bar
|
|
206
|
+
(if not provided no progress bar will be displayed)
|
|
207
|
+
max_workers (int | float): Number of workers to use (Defaults to CPU_COUNT), -1 means CPU_COUNT.
|
|
208
|
+
If float between 0 and 1, it's treated as a percentage of CPU_COUNT.
|
|
209
|
+
If negative float between -1 and 0, it's treated as a percentage of len(args).
|
|
210
|
+
delay_first_calls (float): Apply i*delay_first_calls seconds delay to the first "max_workers" calls.
|
|
211
|
+
For instance with value to 1, the first thread will be delayed by 0 seconds, the second by 1 second, etc.
|
|
212
|
+
(Defaults to 0): This can be useful to avoid functions being called in the same second.
|
|
213
|
+
color (str): Color of the progress bar (Defaults to MAGENTA)
|
|
214
|
+
bar_format (str): Format of the progress bar (Defaults to BAR_FORMAT)
|
|
215
|
+
ascii (bool): Whether to use ASCII or Unicode characters for the progress bar
|
|
216
|
+
smooth_tqdm (bool): Whether to enable smooth progress bar updates by setting miniters and mininterval (Defaults to True)
|
|
217
|
+
**tqdm_kwargs (Any): Additional keyword arguments to pass to tqdm
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
list[object]: Results of the function execution
|
|
221
|
+
|
|
222
|
+
Examples:
|
|
223
|
+
.. code-block:: python
|
|
224
|
+
|
|
225
|
+
> multithreading(doctest_square, args=[1, 2, 3])
|
|
226
|
+
[1, 4, 9]
|
|
227
|
+
|
|
228
|
+
> multithreading(int.__mul__, [(1,2), (3,4), (5,6)], use_starmap=True)
|
|
229
|
+
[2, 12, 30]
|
|
230
|
+
|
|
231
|
+
> # Using a list of functions (one per argument)
|
|
232
|
+
> multithreading([doctest_square, doctest_square, doctest_square], [1, 2, 3])
|
|
233
|
+
[1, 4, 9]
|
|
234
|
+
|
|
235
|
+
> # Will process in parallel with progress bar
|
|
236
|
+
> multithreading(doctest_slow, range(10), desc="Threading")
|
|
237
|
+
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
|
238
|
+
|
|
239
|
+
> # Will process in parallel with progress bar and delay the first threads
|
|
240
|
+
> multithreading(
|
|
241
|
+
. doctest_slow,
|
|
242
|
+
. range(10),
|
|
243
|
+
. desc="Threading with delay",
|
|
244
|
+
. max_workers=2,
|
|
245
|
+
. delay_first_calls=0.6
|
|
246
|
+
. )
|
|
247
|
+
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
|
248
|
+
"""
|
|
249
|
+
# Imports
|
|
250
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
251
|
+
|
|
252
|
+
from tqdm.auto import tqdm
|
|
253
|
+
|
|
254
|
+
# Handle parameters
|
|
255
|
+
args = list(args) # Ensure we have a list (not other iterable)
|
|
256
|
+
if max_workers == -1:
|
|
257
|
+
max_workers = CPU_COUNT
|
|
258
|
+
if isinstance(max_workers, float):
|
|
259
|
+
if max_workers > 0:
|
|
260
|
+
assert max_workers <= 1, "max_workers as positive float must be between 0 and 1 (percentage of CPU_COUNT)"
|
|
261
|
+
max_workers = int(max_workers * CPU_COUNT)
|
|
262
|
+
else:
|
|
263
|
+
assert -1 <= max_workers < 0, "max_workers as negative float must be between -1 and 0 (percentage of len(args))"
|
|
264
|
+
max_workers = int(-max_workers * len(args))
|
|
265
|
+
verbose: bool = desc != ""
|
|
266
|
+
desc, func, args = handle_parameters(func, args, use_starmap, delay_first_calls, max_workers, desc, color)
|
|
267
|
+
if bar_format == BAR_FORMAT:
|
|
268
|
+
bar_format = bar_format.replace(MAGENTA, color)
|
|
269
|
+
if smooth_tqdm:
|
|
270
|
+
tqdm_kwargs.setdefault("mininterval", 0.0)
|
|
271
|
+
try:
|
|
272
|
+
total = len(args) # type: ignore
|
|
273
|
+
import shutil
|
|
274
|
+
width = shutil.get_terminal_size().columns
|
|
275
|
+
tqdm_kwargs.setdefault("miniters", max(1, total // width))
|
|
276
|
+
except (TypeError, OSError):
|
|
277
|
+
tqdm_kwargs.setdefault("miniters", 1)
|
|
278
|
+
|
|
279
|
+
# Do multithreading only if there is more than 1 argument and more than 1 CPU
|
|
280
|
+
if max_workers > 1 and len(args) > 1:
|
|
281
|
+
if verbose:
|
|
282
|
+
with ThreadPoolExecutor(max_workers) as executor:
|
|
283
|
+
return list(tqdm(executor.map(func, args), total=len(args), desc=desc, bar_format=bar_format, ascii=ascii, **tqdm_kwargs))
|
|
284
|
+
else:
|
|
285
|
+
with ThreadPoolExecutor(max_workers) as executor:
|
|
286
|
+
return list(executor.map(func, args))
|
|
287
|
+
|
|
288
|
+
# Single process execution
|
|
289
|
+
else:
|
|
290
|
+
if verbose:
|
|
291
|
+
return [func(arg) for arg in tqdm(args, total=len(args), desc=desc, bar_format=bar_format, ascii=ascii, **tqdm_kwargs)]
|
|
292
|
+
else:
|
|
293
|
+
return [func(arg) for arg in args]
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
# "Private" function for capturing multiprocessing subprocess
|
|
297
|
+
def capture_subprocess_output[T, R](args: tuple[CaptureOutput, Callable[[T], R], T]) -> R:
|
|
298
|
+
""" Wrapper function to execute the target function in a subprocess with optional output capture.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
tuple[CaptureOutput,Callable,T]: Tuple containing:
|
|
302
|
+
CaptureOutput: Capturer object to redirect stdout/stderr
|
|
303
|
+
Callable: Target function to execute
|
|
304
|
+
T: Argument to pass to the target function
|
|
305
|
+
"""
|
|
306
|
+
capturer, func, arg = args
|
|
307
|
+
capturer.redirect()
|
|
308
|
+
return func(arg)
|
|
309
|
+
|
|
@@ -1,16 +1,13 @@
|
|
|
1
|
-
from
|
|
2
|
-
from
|
|
3
|
-
from
|
|
4
|
-
from
|
|
1
|
+
from ..ctx import SetMPStartMethod as SetMPStartMethod
|
|
2
|
+
from ..print import BAR_FORMAT as BAR_FORMAT, MAGENTA as MAGENTA
|
|
3
|
+
from .capturer import CaptureOutput as CaptureOutput
|
|
4
|
+
from .common import CPU_COUNT as CPU_COUNT, handle_parameters as handle_parameters, nice_wrapper as nice_wrapper
|
|
5
|
+
from collections.abc import Callable as Callable, Iterable
|
|
6
|
+
from typing import Any
|
|
5
7
|
|
|
6
8
|
def doctest_square(x: int) -> int: ...
|
|
7
9
|
def doctest_slow(x: int) -> int: ...
|
|
8
|
-
|
|
9
|
-
CPU_COUNT: int
|
|
10
|
-
T = TypeVar('T')
|
|
11
|
-
R = TypeVar('R')
|
|
12
|
-
|
|
13
|
-
def multiprocessing[T, R](func: Callable[..., R] | list[Callable[..., R]], args: Iterable[T], use_starmap: bool = False, chunksize: int = 1, desc: str = '', max_workers: int | float = ..., delay_first_calls: float = 0, nice: int | None = None, color: str = ..., bar_format: str = ..., ascii: bool = False, smooth_tqdm: bool = True, **tqdm_kwargs: Any) -> list[R]:
|
|
10
|
+
def multiprocessing[T, R](func: Callable[..., R] | list[Callable[..., R]], args: Iterable[T], use_starmap: bool = False, chunksize: int = 1, desc: str = '', max_workers: int | float = ..., capture_output: bool = False, delay_first_calls: float = 0, nice: int | None = None, color: str = ..., bar_format: str = ..., ascii: bool = False, smooth_tqdm: bool = True, **tqdm_kwargs: Any) -> list[R]:
|
|
14
11
|
''' Method to execute a function in parallel using multiprocessing
|
|
15
12
|
|
|
16
13
|
\t- For CPU-bound operations where the GIL (Global Interpreter Lock) is a bottleneck.
|
|
@@ -29,6 +26,7 @@ def multiprocessing[T, R](func: Callable[..., R] | list[Callable[..., R]], args:
|
|
|
29
26
|
\t\tmax_workers\t\t\t(int | float):\t\tNumber of workers to use (Defaults to CPU_COUNT), -1 means CPU_COUNT.
|
|
30
27
|
\t\t\tIf float between 0 and 1, it\'s treated as a percentage of CPU_COUNT.
|
|
31
28
|
\t\t\tIf negative float between -1 and 0, it\'s treated as a percentage of len(args).
|
|
29
|
+
\t\tcapture_output\t\t(bool):\t\t\t\tWhether to capture stdout/stderr from the worker processes (Defaults to True)
|
|
32
30
|
\t\tdelay_first_calls\t(float):\t\t\tApply i*delay_first_calls seconds delay to the first "max_workers" calls.
|
|
33
31
|
\t\t\tFor instance, the first process will be delayed by 0 seconds, the second by 1 second, etc.
|
|
34
32
|
\t\t\t(Defaults to 0): This can be useful to avoid functions being called in the same second.
|
|
@@ -129,108 +127,12 @@ def multithreading[T, R](func: Callable[..., R] | list[Callable[..., R]], args:
|
|
|
129
127
|
\t\t\t. )
|
|
130
128
|
\t\t\t[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
|
131
129
|
\t'''
|
|
132
|
-
def
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
\tThis is useful when you need to run a function in isolation to avoid memory leaks,
|
|
136
|
-
\tresource conflicts, or to ensure a clean execution environment. The subprocess will
|
|
137
|
-
\tbe created, run the function with the provided arguments, and return the result.
|
|
138
|
-
|
|
139
|
-
\tArgs:
|
|
140
|
-
\t\tfunc (Callable): The function to execute in a subprocess.
|
|
141
|
-
\t\t\t(SHOULD BE A TOP-LEVEL FUNCTION TO BE PICKLABLE)
|
|
142
|
-
\t\t*args (Any): Positional arguments to pass to the function.
|
|
143
|
-
\t\ttimeout (float | None): Maximum time in seconds to wait for the subprocess.
|
|
144
|
-
\t\t\tIf None, wait indefinitely. If the subprocess exceeds this time, it will be terminated.
|
|
145
|
-
\t\tno_join (bool): If True, do not wait for the subprocess to finish (fire-and-forget).
|
|
146
|
-
\t\t**kwargs (Any): Keyword arguments to pass to the function.
|
|
147
|
-
|
|
148
|
-
\tReturns:
|
|
149
|
-
\t\tR: The return value of the function.
|
|
150
|
-
|
|
151
|
-
\tRaises:
|
|
152
|
-
\t\tRuntimeError: If the subprocess exits with a non-zero exit code or times out.
|
|
153
|
-
\t\tTimeoutError: If the subprocess exceeds the specified timeout.
|
|
154
|
-
|
|
155
|
-
\tExamples:
|
|
156
|
-
\t\t.. code-block:: python
|
|
157
|
-
|
|
158
|
-
\t\t\t> # Simple function execution
|
|
159
|
-
\t\t\t> run_in_subprocess(doctest_square, 5)
|
|
160
|
-
\t\t\t25
|
|
161
|
-
|
|
162
|
-
\t\t\t> # Function with multiple arguments
|
|
163
|
-
\t\t\t> def add(a: int, b: int) -> int:
|
|
164
|
-
\t\t\t. return a + b
|
|
165
|
-
\t\t\t> run_in_subprocess(add, 10, 20)
|
|
166
|
-
\t\t\t30
|
|
167
|
-
|
|
168
|
-
\t\t\t> # Function with keyword arguments
|
|
169
|
-
\t\t\t> def greet(name: str, greeting: str = "Hello") -> str:
|
|
170
|
-
\t\t\t. return f"{greeting}, {name}!"
|
|
171
|
-
\t\t\t> run_in_subprocess(greet, "World", greeting="Hi")
|
|
172
|
-
\t\t\t\'Hi, World!\'
|
|
173
|
-
|
|
174
|
-
\t\t\t> # With timeout to prevent hanging
|
|
175
|
-
\t\t\t> run_in_subprocess(some_gpu_func, data, timeout=300.0)
|
|
176
|
-
\t'''
|
|
177
|
-
def _nice_wrapper[T, R](args: tuple[int, Callable[[T], R], T]) -> R:
|
|
178
|
-
""" Wrapper that applies nice priority then executes the function.
|
|
179
|
-
|
|
180
|
-
\tArgs:
|
|
181
|
-
\t\targs (tuple): Tuple containing (nice_value, func, arg)
|
|
182
|
-
|
|
183
|
-
\tReturns:
|
|
184
|
-
\t\tR: Result of the function execution
|
|
185
|
-
\t"""
|
|
186
|
-
def _set_process_priority(nice_value: int) -> None:
|
|
187
|
-
""" Set the priority of the current process.
|
|
130
|
+
def capture_subprocess_output[T, R](args: tuple[CaptureOutput, Callable[[T], R], T]) -> R:
|
|
131
|
+
""" Wrapper function to execute the target function in a subprocess with optional output capture.
|
|
188
132
|
|
|
189
133
|
\tArgs:
|
|
190
|
-
\t\
|
|
134
|
+
\t\ttuple[CaptureOutput,Callable,T]: Tuple containing:
|
|
135
|
+
\t\t\tCaptureOutput: Capturer object to redirect stdout/stderr
|
|
136
|
+
\t\t\tCallable: Target function to execute
|
|
137
|
+
\t\t\tT: Argument to pass to the target function
|
|
191
138
|
\t"""
|
|
192
|
-
def _subprocess_wrapper[R](result_queue: Any, func: Callable[..., R], args: tuple[Any, ...], kwargs: dict[str, Any]) -> None:
|
|
193
|
-
""" Wrapper function to execute the target function and store the result in the queue.
|
|
194
|
-
|
|
195
|
-
\tMust be at module level to be pickable on Windows (spawn context).
|
|
196
|
-
|
|
197
|
-
\tArgs:
|
|
198
|
-
\t\tresult_queue (multiprocessing.Queue | None): Queue to store the result or exception (None if detached).
|
|
199
|
-
\t\tfunc (Callable): The target function to execute.
|
|
200
|
-
\t\targs (tuple): Positional arguments for the function.
|
|
201
|
-
\t\tkwargs (dict): Keyword arguments for the function.
|
|
202
|
-
\t"""
|
|
203
|
-
def _starmap[T, R](args: tuple[Callable[[T], R], list[T]]) -> R:
|
|
204
|
-
""" Private function to use starmap using args[0](\\*args[1])
|
|
205
|
-
|
|
206
|
-
\tArgs:
|
|
207
|
-
\t\targs (tuple): Tuple containing the function and the arguments list to pass to the function
|
|
208
|
-
\tReturns:
|
|
209
|
-
\t\tobject: Result of the function execution
|
|
210
|
-
\t"""
|
|
211
|
-
def _delayed_call[T, R](args: tuple[Callable[[T], R], float, T]) -> R:
|
|
212
|
-
""" Private function to apply delay before calling the target function
|
|
213
|
-
|
|
214
|
-
\tArgs:
|
|
215
|
-
\t\targs (tuple): Tuple containing the function, delay in seconds, and the argument to pass to the function
|
|
216
|
-
\tReturns:
|
|
217
|
-
\t\tobject: Result of the function execution
|
|
218
|
-
\t"""
|
|
219
|
-
def _handle_parameters[T, R](func: Callable[[T], R] | list[Callable[[T], R]], args: list[T], use_starmap: bool, delay_first_calls: float, max_workers: int, desc: str, color: str) -> tuple[str, Callable[[T], R], list[T]]:
|
|
220
|
-
''' Private function to handle the parameters for multiprocessing or multithreading functions
|
|
221
|
-
|
|
222
|
-
\tArgs:
|
|
223
|
-
\t\tfunc\t\t\t\t(Callable | list[Callable]):\tFunction to execute, or list of functions (one per argument)
|
|
224
|
-
\t\targs\t\t\t\t(list):\t\t\t\tList of arguments to pass to the function(s)
|
|
225
|
-
\t\tuse_starmap\t\t\t(bool):\t\t\t\tWhether to use starmap or not (Defaults to False):
|
|
226
|
-
\t\t\tTrue means the function will be called like func(\\*args[i]) instead of func(args[i])
|
|
227
|
-
\t\tdelay_first_calls\t(int):\t\t\t\tApply i*delay_first_calls seconds delay to the first "max_workers" calls.
|
|
228
|
-
\t\t\tFor instance, the first process will be delayed by 0 seconds, the second by 1 second, etc. (Defaults to 0):
|
|
229
|
-
\t\t\tThis can be useful to avoid functions being called in the same second.
|
|
230
|
-
\t\tmax_workers\t\t\t(int):\t\t\t\tNumber of workers to use (Defaults to CPU_COUNT)
|
|
231
|
-
\t\tdesc\t\t\t\t(str):\t\t\t\tDescription of the function execution displayed in the progress bar
|
|
232
|
-
\t\tcolor\t\t\t\t(str):\t\t\t\tColor of the progress bar
|
|
233
|
-
|
|
234
|
-
\tReturns:
|
|
235
|
-
\t\ttuple[str, Callable[[T], R], list[T]]:\tTuple containing the description, function, and arguments
|
|
236
|
-
\t'''
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
|
|
2
|
+
# Imports
|
|
3
|
+
import time
|
|
4
|
+
from collections.abc import Callable
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from .capturer import CaptureOutput
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def run_in_subprocess[R](
|
|
11
|
+
func: Callable[..., R],
|
|
12
|
+
*args: Any,
|
|
13
|
+
timeout: float | None = None,
|
|
14
|
+
no_join: bool = False,
|
|
15
|
+
capture_output: bool = False,
|
|
16
|
+
**kwargs: Any
|
|
17
|
+
) -> R:
|
|
18
|
+
""" Execute a function in a subprocess with positional and keyword arguments.
|
|
19
|
+
|
|
20
|
+
This is useful when you need to run a function in isolation to avoid memory leaks,
|
|
21
|
+
resource conflicts, or to ensure a clean execution environment. The subprocess will
|
|
22
|
+
be created, run the function with the provided arguments, and return the result.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
func (Callable): The function to execute in a subprocess.
|
|
26
|
+
(SHOULD BE A TOP-LEVEL FUNCTION TO BE PICKLABLE)
|
|
27
|
+
*args (Any): Positional arguments to pass to the function.
|
|
28
|
+
timeout (float | None): Maximum time in seconds to wait for the subprocess.
|
|
29
|
+
If None, wait indefinitely. If the subprocess exceeds this time, it will be terminated.
|
|
30
|
+
no_join (bool): If True, do not wait for the subprocess to finish (fire-and-forget).
|
|
31
|
+
capture_output (bool): If True, capture the subprocess' stdout/stderr and relay it
|
|
32
|
+
in real time to the parent's stdout. This enables seeing print() output
|
|
33
|
+
from the subprocess in the main process.
|
|
34
|
+
**kwargs (Any): Keyword arguments to pass to the function.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
R: The return value of the function.
|
|
38
|
+
|
|
39
|
+
Raises:
|
|
40
|
+
RuntimeError: If the subprocess exits with a non-zero exit code or times out.
|
|
41
|
+
TimeoutError: If the subprocess exceeds the specified timeout.
|
|
42
|
+
|
|
43
|
+
Examples:
|
|
44
|
+
.. code-block:: python
|
|
45
|
+
|
|
46
|
+
> # Simple function execution
|
|
47
|
+
> run_in_subprocess(doctest_square, 5)
|
|
48
|
+
25
|
|
49
|
+
|
|
50
|
+
> # Function with multiple arguments
|
|
51
|
+
> def add(a: int, b: int) -> int:
|
|
52
|
+
. return a + b
|
|
53
|
+
> run_in_subprocess(add, 10, 20)
|
|
54
|
+
30
|
|
55
|
+
|
|
56
|
+
> # Function with keyword arguments
|
|
57
|
+
> def greet(name: str, greeting: str = "Hello") -> str:
|
|
58
|
+
. return f"{greeting}, {name}!"
|
|
59
|
+
> run_in_subprocess(greet, "World", greeting="Hi")
|
|
60
|
+
'Hi, World!'
|
|
61
|
+
|
|
62
|
+
> # With timeout to prevent hanging
|
|
63
|
+
> run_in_subprocess(some_gpu_func, data, timeout=300.0)
|
|
64
|
+
"""
|
|
65
|
+
import multiprocessing as mp
|
|
66
|
+
from multiprocessing import Queue
|
|
67
|
+
|
|
68
|
+
# Create a queue to get the result from the subprocess (only if we need to wait)
|
|
69
|
+
result_queue: Queue[R | Exception] | None = None if no_join else Queue()
|
|
70
|
+
|
|
71
|
+
# Optionally setup output capture pipe and listener
|
|
72
|
+
capturer: CaptureOutput | None = None
|
|
73
|
+
if capture_output:
|
|
74
|
+
capturer = CaptureOutput()
|
|
75
|
+
|
|
76
|
+
# Create and start the subprocess using the module-level wrapper
|
|
77
|
+
process: mp.Process = mp.Process(
|
|
78
|
+
target=_subprocess_wrapper,
|
|
79
|
+
args=(result_queue, func, args, kwargs),
|
|
80
|
+
kwargs={"_capturer": capturer}
|
|
81
|
+
)
|
|
82
|
+
process.start()
|
|
83
|
+
|
|
84
|
+
# For capture_output we must close the parent's copy of the write fd and start listener
|
|
85
|
+
if capturer is not None:
|
|
86
|
+
capturer.parent_close_write()
|
|
87
|
+
capturer.start_listener()
|
|
88
|
+
|
|
89
|
+
# Detach process if no_join (fire-and-forget)
|
|
90
|
+
if result_queue is None:
|
|
91
|
+
# If capturing, leave listener running in background (daemon)
|
|
92
|
+
return None # type: ignore
|
|
93
|
+
|
|
94
|
+
# Use a single try/finally to ensure we always drain the listener once
|
|
95
|
+
# and avoid repeating join calls in multiple branches.
|
|
96
|
+
try:
|
|
97
|
+
process.join(timeout=timeout)
|
|
98
|
+
|
|
99
|
+
# Check if process is still alive (timed out)
|
|
100
|
+
if process.is_alive():
|
|
101
|
+
process.terminate()
|
|
102
|
+
time.sleep(0.5) # Give it a moment to terminate gracefully
|
|
103
|
+
if process.is_alive():
|
|
104
|
+
process.kill()
|
|
105
|
+
process.join()
|
|
106
|
+
raise TimeoutError(f"Subprocess exceeded timeout of {timeout} seconds and was terminated")
|
|
107
|
+
|
|
108
|
+
# Check exit code
|
|
109
|
+
if process.exitcode != 0:
|
|
110
|
+
# Try to get any exception from the queue (non-blocking)
|
|
111
|
+
if not result_queue.empty():
|
|
112
|
+
result_or_exception = result_queue.get_nowait()
|
|
113
|
+
if isinstance(result_or_exception, Exception):
|
|
114
|
+
raise result_or_exception
|
|
115
|
+
raise RuntimeError(f"Subprocess failed with exit code {process.exitcode}")
|
|
116
|
+
|
|
117
|
+
# Retrieve the result
|
|
118
|
+
try:
|
|
119
|
+
result_or_exception = result_queue.get_nowait()
|
|
120
|
+
if isinstance(result_or_exception, Exception):
|
|
121
|
+
raise result_or_exception
|
|
122
|
+
return result_or_exception
|
|
123
|
+
except Exception as e:
|
|
124
|
+
raise RuntimeError("Subprocess did not return any result") from e
|
|
125
|
+
finally:
|
|
126
|
+
if capturer is not None:
|
|
127
|
+
capturer.join_listener(timeout=5.0)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
# "Private" function for subprocess wrapper (must be at module level for pickling on Windows)
|
|
131
|
+
def _subprocess_wrapper[R](
|
|
132
|
+
result_queue: Any,
|
|
133
|
+
func: Callable[..., R],
|
|
134
|
+
args: tuple[Any, ...],
|
|
135
|
+
kwargs: dict[str, Any],
|
|
136
|
+
_capturer: CaptureOutput | None = None
|
|
137
|
+
) -> None:
|
|
138
|
+
""" Wrapper function to execute the target function and store the result in the queue.
|
|
139
|
+
|
|
140
|
+
Must be at module level to be pickable on Windows (spawn context).
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
result_queue (multiprocessing.Queue | None): Queue to store the result or exception (None if detached).
|
|
144
|
+
func (Callable): The target function to execute.
|
|
145
|
+
args (tuple): Positional arguments for the function.
|
|
146
|
+
kwargs (dict): Keyword arguments for the function.
|
|
147
|
+
_capturer (CaptureOutput | None): Optional CaptureOutput instance for stdout capture.
|
|
148
|
+
"""
|
|
149
|
+
try:
|
|
150
|
+
# If a CaptureOutput instance was passed, redirect stdout/stderr to the pipe.
|
|
151
|
+
if _capturer is not None:
|
|
152
|
+
_capturer.redirect()
|
|
153
|
+
|
|
154
|
+
# Execute the target function and put the result in the queue
|
|
155
|
+
result: R = func(*args, **kwargs)
|
|
156
|
+
if result_queue is not None:
|
|
157
|
+
result_queue.put(result)
|
|
158
|
+
|
|
159
|
+
# Handle cleanup and exceptions
|
|
160
|
+
except Exception as e:
|
|
161
|
+
if result_queue is not None:
|
|
162
|
+
result_queue.put(e)
|
|
163
|
+
|