stouputils 1.16.3__py3-none-any.whl → 1.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- stouputils/__init__.py +1 -0
- stouputils/__init__.pyi +1 -0
- stouputils/all_doctests.py +1 -1
- stouputils/collections.py +2 -5
- stouputils/collections.pyi +2 -4
- stouputils/continuous_delivery/stubs.py +1 -1
- stouputils/ctx.py +1 -3
- stouputils/ctx.pyi +1 -3
- stouputils/decorators.py +1 -1
- stouputils/image.py +8 -10
- stouputils/image.pyi +4 -6
- stouputils/io.py +22 -1
- stouputils/io.pyi +7 -1
- stouputils/lock/__init__.py +36 -0
- stouputils/lock/__init__.pyi +5 -0
- stouputils/lock/base.py +536 -0
- stouputils/lock/base.pyi +169 -0
- stouputils/lock/queue.py +377 -0
- stouputils/lock/queue.pyi +131 -0
- stouputils/lock/re_entrant.py +115 -0
- stouputils/lock/re_entrant.pyi +81 -0
- stouputils/lock/redis_fifo.py +299 -0
- stouputils/lock/redis_fifo.pyi +123 -0
- stouputils/lock/shared.py +30 -0
- stouputils/lock/shared.pyi +16 -0
- stouputils/parallel/__init__.py +29 -0
- stouputils/parallel/__init__.pyi +4 -0
- stouputils/parallel/capturer.py +133 -0
- stouputils/parallel/capturer.pyi +38 -0
- stouputils/parallel/common.py +134 -0
- stouputils/parallel/common.pyi +53 -0
- stouputils/parallel/multi.py +309 -0
- stouputils/{parallel.pyi → parallel/multi.pyi} +14 -112
- stouputils/parallel/subprocess.py +163 -0
- stouputils/parallel/subprocess.pyi +64 -0
- stouputils/print.py +2 -3
- stouputils/print.pyi +1 -2
- {stouputils-1.16.3.dist-info → stouputils-1.18.0.dist-info}/METADATA +4 -1
- {stouputils-1.16.3.dist-info → stouputils-1.18.0.dist-info}/RECORD +41 -21
- stouputils/parallel.py +0 -556
- {stouputils-1.16.3.dist-info → stouputils-1.18.0.dist-info}/WHEEL +0 -0
- {stouputils-1.16.3.dist-info → stouputils-1.18.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module provides utility functions for parallel processing, such as:
|
|
3
|
+
|
|
4
|
+
- multiprocessing(): Execute a function in parallel using multiprocessing
|
|
5
|
+
- multithreading(): Execute a function in parallel using multithreading
|
|
6
|
+
- run_in_subprocess(): Execute a function in a subprocess with args and kwargs
|
|
7
|
+
|
|
8
|
+
I highly encourage you to read the function docstrings to understand when to use each method.
|
|
9
|
+
|
|
10
|
+
Priority (nice) mapping for multiprocessing():
|
|
11
|
+
|
|
12
|
+
- Unix-style values from -20 (highest priority) to 19 (lowest priority)
|
|
13
|
+
- Windows automatic mapping:
|
|
14
|
+
* -20 to -10: HIGH_PRIORITY_CLASS
|
|
15
|
+
* -9 to -1: ABOVE_NORMAL_PRIORITY_CLASS
|
|
16
|
+
* 0: NORMAL_PRIORITY_CLASS
|
|
17
|
+
* 1 to 9: BELOW_NORMAL_PRIORITY_CLASS
|
|
18
|
+
* 10 to 19: IDLE_PRIORITY_CLASS
|
|
19
|
+
|
|
20
|
+
.. image:: https://raw.githubusercontent.com/Stoupy51/stouputils/refs/heads/main/assets/parallel_module.gif
|
|
21
|
+
:alt: stouputils parallel examples
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
# Imports
|
|
25
|
+
from .capturer import *
|
|
26
|
+
from .common import *
|
|
27
|
+
from .multi import *
|
|
28
|
+
from .subprocess import *
|
|
29
|
+
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
|
|
2
|
+
# Imports
|
|
3
|
+
import os
|
|
4
|
+
from typing import IO, Any
|
|
5
|
+
|
|
6
|
+
from ..io import safe_close
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class PipeWriter:
|
|
10
|
+
""" A writer that sends data to a multiprocessing Connection. """
|
|
11
|
+
def __init__(self, conn: Any, encoding: str, errors: str):
|
|
12
|
+
self.conn: Any = conn
|
|
13
|
+
self.encoding: str = encoding
|
|
14
|
+
self.errors: str = errors
|
|
15
|
+
|
|
16
|
+
def write(self, data: str) -> int:
|
|
17
|
+
self.conn.send_bytes(data.encode(self.encoding, errors=self.errors))
|
|
18
|
+
return len(data)
|
|
19
|
+
|
|
20
|
+
def flush(self) -> None:
|
|
21
|
+
pass
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class CaptureOutput:
|
|
25
|
+
""" Utility to capture stdout/stderr from a subprocess and relay it to the parent's stdout.
|
|
26
|
+
|
|
27
|
+
The class creates an os.pipe(), marks fds as inheritable (for spawn method),
|
|
28
|
+
provides methods to start a listener thread that reads from the pipe and writes
|
|
29
|
+
to the main process's sys.stdout/sys.stderr, and to close/join the listener.
|
|
30
|
+
"""
|
|
31
|
+
def __init__(self, encoding: str = "utf-8", errors: str = "replace", chunk_size: int = 1024):
|
|
32
|
+
import multiprocessing as mp
|
|
33
|
+
import threading
|
|
34
|
+
self.encoding: str = encoding
|
|
35
|
+
self.errors: str = errors
|
|
36
|
+
self.chunk_size: int = chunk_size
|
|
37
|
+
self.read_conn, self.write_conn = mp.Pipe(duplex=False)
|
|
38
|
+
self.read_fd = self.read_conn.fileno()
|
|
39
|
+
self.write_fd = self.write_conn.fileno()
|
|
40
|
+
# Internal state for the listener thread and reader handle
|
|
41
|
+
self._thread: threading.Thread | None = None
|
|
42
|
+
self._reader_file: IO[Any] | None = None
|
|
43
|
+
# Sentinel string that will terminate the listener when seen in the stream
|
|
44
|
+
try:
|
|
45
|
+
os.set_inheritable(self.read_fd, True)
|
|
46
|
+
os.set_inheritable(self.write_fd, True)
|
|
47
|
+
except Exception:
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
def __repr__(self) -> str:
|
|
51
|
+
return f"<CaptureOutput read_fd={self.read_fd} write_fd={self.write_fd}>"
|
|
52
|
+
|
|
53
|
+
# Pickle support: exclude unpicklable attributes
|
|
54
|
+
def __getstate__(self) -> dict[str, Any]:
|
|
55
|
+
state = self.__dict__.copy()
|
|
56
|
+
state["_thread"] = None
|
|
57
|
+
return state
|
|
58
|
+
|
|
59
|
+
def redirect(self) -> None:
|
|
60
|
+
""" Redirect sys.stdout and sys.stderr to the pipe's write end. """
|
|
61
|
+
import sys
|
|
62
|
+
writer = PipeWriter(self.write_conn, self.encoding, self.errors)
|
|
63
|
+
sys.stdout = writer
|
|
64
|
+
sys.stderr = writer
|
|
65
|
+
|
|
66
|
+
def parent_close_write(self) -> None:
|
|
67
|
+
""" Close the parent's copy of the write end; the child's copy remains. """
|
|
68
|
+
safe_close(self.write_fd)
|
|
69
|
+
self.write_conn.close()
|
|
70
|
+
self.write_fd = -1 # Prevent accidental reuse
|
|
71
|
+
|
|
72
|
+
def start_listener(self) -> None:
|
|
73
|
+
""" Start a daemon thread that forwards data from the pipe to sys.stdout/sys.stderr. """
|
|
74
|
+
import sys
|
|
75
|
+
if self._thread is not None:
|
|
76
|
+
return
|
|
77
|
+
|
|
78
|
+
# Handler function for reading from the pipe
|
|
79
|
+
buffer: str = ""
|
|
80
|
+
def _handle_buffer() -> None:
|
|
81
|
+
nonlocal buffer
|
|
82
|
+
if buffer:
|
|
83
|
+
try:
|
|
84
|
+
sys.stdout.write(buffer)
|
|
85
|
+
sys.stdout.flush()
|
|
86
|
+
except Exception:
|
|
87
|
+
pass
|
|
88
|
+
buffer = ""
|
|
89
|
+
|
|
90
|
+
# Thread target function
|
|
91
|
+
def _reader() -> None:
|
|
92
|
+
nonlocal buffer
|
|
93
|
+
try:
|
|
94
|
+
while True:
|
|
95
|
+
# Read a chunk from the pipe, stop loop on error
|
|
96
|
+
try:
|
|
97
|
+
data: bytes = self.read_conn.recv_bytes(self.chunk_size)
|
|
98
|
+
except EOFError:
|
|
99
|
+
_handle_buffer()
|
|
100
|
+
break
|
|
101
|
+
|
|
102
|
+
# Decode bytes to text & append to buffer
|
|
103
|
+
try:
|
|
104
|
+
chunk: str = data.decode(self.encoding, errors=self.errors)
|
|
105
|
+
except Exception:
|
|
106
|
+
chunk = data.decode(self.encoding, errors="replace")
|
|
107
|
+
buffer += chunk
|
|
108
|
+
|
|
109
|
+
# Periodically flush large buffers to avoid holding too much memory
|
|
110
|
+
if len(buffer) > self.chunk_size * 4:
|
|
111
|
+
_handle_buffer()
|
|
112
|
+
finally:
|
|
113
|
+
safe_close(self.read_fd)
|
|
114
|
+
self.read_conn.close()
|
|
115
|
+
self.read_fd = -1
|
|
116
|
+
self._thread = None # Mark thread as stopped so callers don't block unnecessarily
|
|
117
|
+
|
|
118
|
+
# Start the listener thread
|
|
119
|
+
import threading
|
|
120
|
+
self._thread = threading.Thread(target=_reader, daemon=True)
|
|
121
|
+
self._thread.start()
|
|
122
|
+
|
|
123
|
+
def join_listener(self, timeout: float | None = None) -> None:
|
|
124
|
+
""" Wait for the listener thread to finish (until EOF). """
|
|
125
|
+
if self._thread is None:
|
|
126
|
+
safe_close(self.read_fd)
|
|
127
|
+
return self.read_conn.close()
|
|
128
|
+
self._thread.join(timeout)
|
|
129
|
+
|
|
130
|
+
# If thread finished, ensure read fd is closed and clear thread
|
|
131
|
+
if self._thread and not self._thread.is_alive():
|
|
132
|
+
self._thread = None
|
|
133
|
+
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from ..io import safe_close as safe_close
|
|
2
|
+
from _typeshed import Incomplete
|
|
3
|
+
from typing import Any, IO
|
|
4
|
+
|
|
5
|
+
class PipeWriter:
|
|
6
|
+
""" A writer that sends data to a multiprocessing Connection. """
|
|
7
|
+
conn: Any
|
|
8
|
+
encoding: str
|
|
9
|
+
errors: str
|
|
10
|
+
def __init__(self, conn: Any, encoding: str, errors: str) -> None: ...
|
|
11
|
+
def write(self, data: str) -> int: ...
|
|
12
|
+
def flush(self) -> None: ...
|
|
13
|
+
|
|
14
|
+
class CaptureOutput:
|
|
15
|
+
""" Utility to capture stdout/stderr from a subprocess and relay it to the parent's stdout.
|
|
16
|
+
|
|
17
|
+
\tThe class creates an os.pipe(), marks fds as inheritable (for spawn method),
|
|
18
|
+
\tprovides methods to start a listener thread that reads from the pipe and writes
|
|
19
|
+
\tto the main process's sys.stdout/sys.stderr, and to close/join the listener.
|
|
20
|
+
\t"""
|
|
21
|
+
encoding: str
|
|
22
|
+
errors: str
|
|
23
|
+
chunk_size: int
|
|
24
|
+
read_fd: Incomplete
|
|
25
|
+
write_fd: Incomplete
|
|
26
|
+
_thread: threading.Thread | None
|
|
27
|
+
_reader_file: IO[Any] | None
|
|
28
|
+
def __init__(self, encoding: str = 'utf-8', errors: str = 'replace', chunk_size: int = 1024) -> None: ...
|
|
29
|
+
def __repr__(self) -> str: ...
|
|
30
|
+
def __getstate__(self) -> dict[str, Any]: ...
|
|
31
|
+
def redirect(self) -> None:
|
|
32
|
+
""" Redirect sys.stdout and sys.stderr to the pipe's write end. """
|
|
33
|
+
def parent_close_write(self) -> None:
|
|
34
|
+
""" Close the parent's copy of the write end; the child's copy remains. """
|
|
35
|
+
def start_listener(self) -> None:
|
|
36
|
+
""" Start a daemon thread that forwards data from the pipe to sys.stdout/sys.stderr. """
|
|
37
|
+
def join_listener(self, timeout: float | None = None) -> None:
|
|
38
|
+
""" Wait for the listener thread to finish (until EOF). """
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
|
|
2
|
+
# Imports
|
|
3
|
+
import os
|
|
4
|
+
import time
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from typing import cast
|
|
7
|
+
|
|
8
|
+
# Constants
|
|
9
|
+
CPU_COUNT: int = cast(int, os.cpu_count())
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# "Private" function to wrap function execution with nice priority (must be at module level for pickling)
|
|
13
|
+
def nice_wrapper[T, R](args: tuple[int, Callable[[T], R], T]) -> R:
|
|
14
|
+
""" Wrapper that applies nice priority then executes the function.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
args (tuple): Tuple containing (nice_value, func, arg)
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
R: Result of the function execution
|
|
21
|
+
"""
|
|
22
|
+
nice_value, func, arg = args
|
|
23
|
+
set_process_priority(nice_value)
|
|
24
|
+
return func(arg)
|
|
25
|
+
|
|
26
|
+
# "Private" function to set process priority (must be at module level for pickling on Windows)
|
|
27
|
+
def set_process_priority(nice_value: int) -> None:
|
|
28
|
+
""" Set the priority of the current process.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
nice_value (int): Unix-style priority value (-20 to 19)
|
|
32
|
+
"""
|
|
33
|
+
try:
|
|
34
|
+
import sys
|
|
35
|
+
if sys.platform == "win32":
|
|
36
|
+
# Map Unix nice values to Windows priority classes
|
|
37
|
+
# -20 to -10: HIGH, -9 to -1: ABOVE_NORMAL, 0: NORMAL, 1-9: BELOW_NORMAL, 10-19: IDLE
|
|
38
|
+
import ctypes
|
|
39
|
+
# Windows priority class constants
|
|
40
|
+
if nice_value <= -10:
|
|
41
|
+
priority = 0x00000080 # HIGH_PRIORITY_CLASS
|
|
42
|
+
elif nice_value < 0:
|
|
43
|
+
priority = 0x00008000 # ABOVE_NORMAL_PRIORITY_CLASS
|
|
44
|
+
elif nice_value == 0:
|
|
45
|
+
priority = 0x00000020 # NORMAL_PRIORITY_CLASS
|
|
46
|
+
elif nice_value < 10:
|
|
47
|
+
priority = 0x00004000 # BELOW_NORMAL_PRIORITY_CLASS
|
|
48
|
+
else:
|
|
49
|
+
priority = 0x00000040 # IDLE_PRIORITY_CLASS
|
|
50
|
+
kernel32 = ctypes.windll.kernel32
|
|
51
|
+
handle = kernel32.GetCurrentProcess()
|
|
52
|
+
kernel32.SetPriorityClass(handle, priority)
|
|
53
|
+
else:
|
|
54
|
+
# Unix-like systems
|
|
55
|
+
os.nice(nice_value)
|
|
56
|
+
except Exception:
|
|
57
|
+
pass # Silently ignore if we can't set priority
|
|
58
|
+
|
|
59
|
+
# "Private" function to use starmap using args[0](*args[1])
|
|
60
|
+
def starmap[T, R](args: tuple[Callable[[T], R], list[T]]) -> R:
|
|
61
|
+
r""" Private function to use starmap using args[0](\*args[1])
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
args (tuple): Tuple containing the function and the arguments list to pass to the function
|
|
65
|
+
Returns:
|
|
66
|
+
object: Result of the function execution
|
|
67
|
+
"""
|
|
68
|
+
func, arguments = args
|
|
69
|
+
return func(*arguments)
|
|
70
|
+
|
|
71
|
+
# "Private" function to apply delay before calling the target function
|
|
72
|
+
def delayed_call[T, R](args: tuple[Callable[[T], R], float, T]) -> R:
|
|
73
|
+
""" Private function to apply delay before calling the target function
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
args (tuple): Tuple containing the function, delay in seconds, and the argument to pass to the function
|
|
77
|
+
Returns:
|
|
78
|
+
object: Result of the function execution
|
|
79
|
+
"""
|
|
80
|
+
func, delay, arg = args
|
|
81
|
+
time.sleep(delay)
|
|
82
|
+
return func(arg)
|
|
83
|
+
|
|
84
|
+
# "Private" function to handle parameters for multiprocessing or multithreading functions
|
|
85
|
+
def handle_parameters[T, R](
|
|
86
|
+
func: Callable[[T], R] | list[Callable[[T], R]],
|
|
87
|
+
args: list[T],
|
|
88
|
+
use_starmap: bool,
|
|
89
|
+
delay_first_calls: float,
|
|
90
|
+
max_workers: int,
|
|
91
|
+
desc: str,
|
|
92
|
+
color: str
|
|
93
|
+
) -> tuple[str, Callable[[T], R], list[T]]:
|
|
94
|
+
r""" Private function to handle the parameters for multiprocessing or multithreading functions
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
func (Callable | list[Callable]): Function to execute, or list of functions (one per argument)
|
|
98
|
+
args (list): List of arguments to pass to the function(s)
|
|
99
|
+
use_starmap (bool): Whether to use starmap or not (Defaults to False):
|
|
100
|
+
True means the function will be called like func(\*args[i]) instead of func(args[i])
|
|
101
|
+
delay_first_calls (int): Apply i*delay_first_calls seconds delay to the first "max_workers" calls.
|
|
102
|
+
For instance, the first process will be delayed by 0 seconds, the second by 1 second, etc. (Defaults to 0):
|
|
103
|
+
This can be useful to avoid functions being called in the same second.
|
|
104
|
+
max_workers (int): Number of workers to use
|
|
105
|
+
desc (str): Description of the function execution displayed in the progress bar
|
|
106
|
+
color (str): Color of the progress bar
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
tuple[str, Callable[[T], R], list[T]]: Tuple containing the description, function, and arguments
|
|
110
|
+
"""
|
|
111
|
+
desc = color + desc
|
|
112
|
+
|
|
113
|
+
# Handle list of functions: validate and convert to starmap format
|
|
114
|
+
if isinstance(func, list):
|
|
115
|
+
func = cast(list[Callable[[T], R]], func)
|
|
116
|
+
assert len(func) == len(args), f"Length mismatch: {len(func)} functions but {len(args)} arguments"
|
|
117
|
+
args = [(f, arg if use_starmap else (arg,)) for f, arg in zip(func, args, strict=False)] # type: ignore
|
|
118
|
+
func = starmap # type: ignore
|
|
119
|
+
|
|
120
|
+
# If use_starmap is True, we use the _starmap function
|
|
121
|
+
elif use_starmap:
|
|
122
|
+
args = [(func, arg) for arg in args] # type: ignore
|
|
123
|
+
func = starmap # type: ignore
|
|
124
|
+
|
|
125
|
+
# Prepare delayed function calls if delay_first_calls is set
|
|
126
|
+
if delay_first_calls > 0:
|
|
127
|
+
args = [
|
|
128
|
+
(func, i * delay_first_calls if i < max_workers else 0, arg) # type: ignore
|
|
129
|
+
for i, arg in enumerate(args)
|
|
130
|
+
]
|
|
131
|
+
func = delayed_call # type: ignore
|
|
132
|
+
|
|
133
|
+
return desc, func, args # type: ignore
|
|
134
|
+
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
|
|
3
|
+
CPU_COUNT: int
|
|
4
|
+
|
|
5
|
+
def nice_wrapper[T, R](args: tuple[int, Callable[[T], R], T]) -> R:
|
|
6
|
+
""" Wrapper that applies nice priority then executes the function.
|
|
7
|
+
|
|
8
|
+
\tArgs:
|
|
9
|
+
\t\targs (tuple): Tuple containing (nice_value, func, arg)
|
|
10
|
+
|
|
11
|
+
\tReturns:
|
|
12
|
+
\t\tR: Result of the function execution
|
|
13
|
+
\t"""
|
|
14
|
+
def set_process_priority(nice_value: int) -> None:
|
|
15
|
+
""" Set the priority of the current process.
|
|
16
|
+
|
|
17
|
+
\tArgs:
|
|
18
|
+
\t\tnice_value (int): Unix-style priority value (-20 to 19)
|
|
19
|
+
\t"""
|
|
20
|
+
def starmap[T, R](args: tuple[Callable[[T], R], list[T]]) -> R:
|
|
21
|
+
""" Private function to use starmap using args[0](\\*args[1])
|
|
22
|
+
|
|
23
|
+
\tArgs:
|
|
24
|
+
\t\targs (tuple): Tuple containing the function and the arguments list to pass to the function
|
|
25
|
+
\tReturns:
|
|
26
|
+
\t\tobject: Result of the function execution
|
|
27
|
+
\t"""
|
|
28
|
+
def delayed_call[T, R](args: tuple[Callable[[T], R], float, T]) -> R:
|
|
29
|
+
""" Private function to apply delay before calling the target function
|
|
30
|
+
|
|
31
|
+
\tArgs:
|
|
32
|
+
\t\targs (tuple): Tuple containing the function, delay in seconds, and the argument to pass to the function
|
|
33
|
+
\tReturns:
|
|
34
|
+
\t\tobject: Result of the function execution
|
|
35
|
+
\t"""
|
|
36
|
+
def handle_parameters[T, R](func: Callable[[T], R] | list[Callable[[T], R]], args: list[T], use_starmap: bool, delay_first_calls: float, max_workers: int, desc: str, color: str) -> tuple[str, Callable[[T], R], list[T]]:
|
|
37
|
+
''' Private function to handle the parameters for multiprocessing or multithreading functions
|
|
38
|
+
|
|
39
|
+
\tArgs:
|
|
40
|
+
\t\tfunc\t\t\t\t(Callable | list[Callable]):\tFunction to execute, or list of functions (one per argument)
|
|
41
|
+
\t\targs\t\t\t\t(list):\t\t\t\tList of arguments to pass to the function(s)
|
|
42
|
+
\t\tuse_starmap\t\t\t(bool):\t\t\t\tWhether to use starmap or not (Defaults to False):
|
|
43
|
+
\t\t\tTrue means the function will be called like func(\\*args[i]) instead of func(args[i])
|
|
44
|
+
\t\tdelay_first_calls\t(int):\t\t\t\tApply i*delay_first_calls seconds delay to the first "max_workers" calls.
|
|
45
|
+
\t\t\tFor instance, the first process will be delayed by 0 seconds, the second by 1 second, etc. (Defaults to 0):
|
|
46
|
+
\t\t\tThis can be useful to avoid functions being called in the same second.
|
|
47
|
+
\t\tmax_workers\t\t\t(int):\t\t\t\tNumber of workers to use
|
|
48
|
+
\t\tdesc\t\t\t\t(str):\t\t\t\tDescription of the function execution displayed in the progress bar
|
|
49
|
+
\t\tcolor\t\t\t\t(str):\t\t\t\tColor of the progress bar
|
|
50
|
+
|
|
51
|
+
\tReturns:
|
|
52
|
+
\t\ttuple[str, Callable[[T], R], list[T]]:\tTuple containing the description, function, and arguments
|
|
53
|
+
\t'''
|