ez-a-sync 0.22.14__py3-none-any.whl → 0.22.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ez-a-sync might be problematic. Click here for more details.
- a_sync/ENVIRONMENT_VARIABLES.py +37 -5
- a_sync/__init__.py +53 -12
- a_sync/_smart.py +231 -28
- a_sync/_typing.py +112 -15
- a_sync/a_sync/__init__.py +35 -10
- a_sync/a_sync/_descriptor.py +248 -38
- a_sync/a_sync/_flags.py +78 -9
- a_sync/a_sync/_helpers.py +46 -13
- a_sync/a_sync/_kwargs.py +33 -8
- a_sync/a_sync/_meta.py +149 -28
- a_sync/a_sync/abstract.py +150 -28
- a_sync/a_sync/base.py +34 -16
- a_sync/a_sync/config.py +85 -14
- a_sync/a_sync/decorator.py +441 -139
- a_sync/a_sync/function.py +709 -147
- a_sync/a_sync/method.py +437 -110
- a_sync/a_sync/modifiers/__init__.py +85 -5
- a_sync/a_sync/modifiers/cache/__init__.py +116 -17
- a_sync/a_sync/modifiers/cache/memory.py +130 -20
- a_sync/a_sync/modifiers/limiter.py +101 -22
- a_sync/a_sync/modifiers/manager.py +142 -16
- a_sync/a_sync/modifiers/semaphores.py +121 -15
- a_sync/a_sync/property.py +383 -82
- a_sync/a_sync/singleton.py +44 -19
- a_sync/aliases.py +0 -1
- a_sync/asyncio/__init__.py +140 -1
- a_sync/asyncio/as_completed.py +213 -79
- a_sync/asyncio/create_task.py +70 -20
- a_sync/asyncio/gather.py +125 -58
- a_sync/asyncio/utils.py +3 -3
- a_sync/exceptions.py +248 -26
- a_sync/executor.py +164 -69
- a_sync/future.py +1227 -168
- a_sync/iter.py +173 -56
- a_sync/primitives/__init__.py +14 -2
- a_sync/primitives/_debug.py +72 -18
- a_sync/primitives/_loggable.py +41 -10
- a_sync/primitives/locks/__init__.py +5 -2
- a_sync/primitives/locks/counter.py +107 -38
- a_sync/primitives/locks/event.py +21 -7
- a_sync/primitives/locks/prio_semaphore.py +262 -63
- a_sync/primitives/locks/semaphore.py +138 -89
- a_sync/primitives/queue.py +601 -60
- a_sync/sphinx/__init__.py +0 -1
- a_sync/sphinx/ext.py +160 -50
- a_sync/task.py +313 -112
- a_sync/utils/__init__.py +12 -6
- a_sync/utils/iterators.py +170 -50
- {ez_a_sync-0.22.14.dist-info → ez_a_sync-0.22.16.dist-info}/METADATA +1 -1
- ez_a_sync-0.22.16.dist-info/RECORD +74 -0
- {ez_a_sync-0.22.14.dist-info → ez_a_sync-0.22.16.dist-info}/WHEEL +1 -1
- tests/conftest.py +1 -2
- tests/executor.py +250 -9
- tests/fixtures.py +61 -32
- tests/test_abstract.py +22 -4
- tests/test_as_completed.py +54 -21
- tests/test_base.py +264 -19
- tests/test_cache.py +31 -15
- tests/test_decorator.py +54 -28
- tests/test_executor.py +31 -13
- tests/test_future.py +45 -8
- tests/test_gather.py +8 -2
- tests/test_helpers.py +2 -0
- tests/test_iter.py +55 -13
- tests/test_limiter.py +5 -3
- tests/test_meta.py +23 -9
- tests/test_modified.py +4 -1
- tests/test_semaphore.py +15 -8
- tests/test_singleton.py +28 -11
- tests/test_task.py +162 -36
- ez_a_sync-0.22.14.dist-info/RECORD +0 -74
- {ez_a_sync-0.22.14.dist-info → ez_a_sync-0.22.16.dist-info}/LICENSE.txt +0 -0
- {ez_a_sync-0.22.14.dist-info → ez_a_sync-0.22.16.dist-info}/top_level.txt +0 -0
a_sync/executor.py
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
"""
|
|
2
2
|
With these executors, you can simply run sync functions in your executor with `await executor.run(fn, *args)`.
|
|
3
3
|
|
|
4
|
-
`executor.submit(fn, *args)` will work the same as the concurrent.futures implementation, but will return an asyncio.Future instead of a concurrent.futures.Future
|
|
4
|
+
`executor.submit(fn, *args)` will work the same as the `concurrent.futures` implementation, but will return an `asyncio.Future` instead of a `concurrent.futures.Future`.
|
|
5
5
|
|
|
6
6
|
This module provides several executor classes:
|
|
7
|
-
- _AsyncExecutorMixin: A mixin providing asynchronous run and submit methods.
|
|
7
|
+
- _AsyncExecutorMixin: A mixin providing asynchronous run and submit methods, with support for synchronous mode.
|
|
8
8
|
- AsyncProcessPoolExecutor: An async process pool executor.
|
|
9
9
|
- AsyncThreadPoolExecutor: An async thread pool executor.
|
|
10
|
-
- PruningThreadPoolExecutor: A thread pool executor that prunes inactive threads after a timeout.
|
|
10
|
+
- PruningThreadPoolExecutor: A thread pool executor that prunes inactive threads after a timeout, ensuring at least one thread remains active.
|
|
11
11
|
"""
|
|
12
12
|
|
|
13
13
|
import asyncio
|
|
@@ -27,42 +27,62 @@ TEN_MINUTES = 60 * 10
|
|
|
27
27
|
|
|
28
28
|
Initializer = Callable[..., object]
|
|
29
29
|
|
|
30
|
+
|
|
30
31
|
class _AsyncExecutorMixin(cf.Executor, _DebugDaemonMixin):
|
|
31
32
|
"""
|
|
32
33
|
A mixin for Executors to provide asynchronous run and submit methods.
|
|
33
34
|
"""
|
|
35
|
+
|
|
34
36
|
_max_workers: int
|
|
37
|
+
|
|
35
38
|
_workers: str
|
|
39
|
+
"""The type of workers used."""
|
|
40
|
+
|
|
36
41
|
__slots__ = "_max_workers", "_initializer", "_initargs", "_broken", "_shutdown_lock"
|
|
37
42
|
|
|
38
|
-
async def run(self, fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs)
|
|
43
|
+
async def run(self, fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs):
|
|
39
44
|
"""
|
|
40
|
-
A shorthand way to call `await asyncio.get_event_loop().run_in_executor(this_executor, fn, *args)
|
|
45
|
+
A shorthand way to call `await asyncio.get_event_loop().run_in_executor(this_executor, fn, *args)`.
|
|
41
46
|
Doesn't `await this_executor.run(fn, *args)` look so much better?
|
|
42
|
-
|
|
43
|
-
|
|
47
|
+
|
|
48
|
+
In synchronous mode, the function is executed directly in the current thread.
|
|
49
|
+
In asynchronous mode, the function is submitted to the executor and awaited.
|
|
44
50
|
|
|
45
51
|
Args:
|
|
46
|
-
fn
|
|
52
|
+
fn: The function to run.
|
|
47
53
|
*args: Positional arguments for the function.
|
|
48
54
|
**kwargs: Keyword arguments for the function.
|
|
49
55
|
|
|
50
|
-
|
|
51
|
-
|
|
56
|
+
Examples:
|
|
57
|
+
>>> async def example():
|
|
58
|
+
>>> result = await executor.run(some_function, arg1, arg2, kwarg1=value1)
|
|
59
|
+
>>> print(result)
|
|
60
|
+
|
|
61
|
+
See Also:
|
|
62
|
+
- :meth:`submit` for submitting functions to the executor.
|
|
52
63
|
"""
|
|
53
|
-
return
|
|
64
|
+
return (
|
|
65
|
+
fn(*args, **kwargs)
|
|
66
|
+
if self.sync_mode
|
|
67
|
+
else await self.submit(fn, *args, **kwargs)
|
|
68
|
+
)
|
|
54
69
|
|
|
55
70
|
def submit(self, fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> "asyncio.Future[T]": # type: ignore [override]
|
|
56
71
|
"""
|
|
57
|
-
Submits a job to the executor and returns an asyncio.Future that can be awaited for the result without blocking.
|
|
72
|
+
Submits a job to the executor and returns an `asyncio.Future` that can be awaited for the result without blocking.
|
|
58
73
|
|
|
59
74
|
Args:
|
|
60
|
-
fn
|
|
75
|
+
fn: The function to submit.
|
|
61
76
|
*args: Positional arguments for the function.
|
|
62
77
|
**kwargs: Keyword arguments for the function.
|
|
63
78
|
|
|
64
|
-
|
|
65
|
-
|
|
79
|
+
Examples:
|
|
80
|
+
>>> future = executor.submit(some_function, arg1, arg2, kwarg1=value1)
|
|
81
|
+
>>> result = await future
|
|
82
|
+
>>> print(result)
|
|
83
|
+
|
|
84
|
+
See Also:
|
|
85
|
+
- :meth:`run` for running functions with the executor.
|
|
66
86
|
"""
|
|
67
87
|
if self.sync_mode:
|
|
68
88
|
fut = asyncio.get_event_loop().create_future()
|
|
@@ -87,8 +107,9 @@ class _AsyncExecutorMixin(cf.Executor, _DebugDaemonMixin):
|
|
|
87
107
|
"""
|
|
88
108
|
Indicates if the executor is in synchronous mode (max_workers == 0).
|
|
89
109
|
|
|
90
|
-
|
|
91
|
-
|
|
110
|
+
Examples:
|
|
111
|
+
>>> if executor.sync_mode:
|
|
112
|
+
>>> print("Executor is in synchronous mode.")
|
|
92
113
|
"""
|
|
93
114
|
return self._max_workers == 0
|
|
94
115
|
|
|
@@ -97,8 +118,8 @@ class _AsyncExecutorMixin(cf.Executor, _DebugDaemonMixin):
|
|
|
97
118
|
"""
|
|
98
119
|
Returns the current number of workers.
|
|
99
120
|
|
|
100
|
-
|
|
101
|
-
|
|
121
|
+
Examples:
|
|
122
|
+
>>> print(f"Current worker count: {executor.worker_count_current}")
|
|
102
123
|
"""
|
|
103
124
|
return len(getattr(self, f"_{self._workers}"))
|
|
104
125
|
|
|
@@ -107,14 +128,17 @@ class _AsyncExecutorMixin(cf.Executor, _DebugDaemonMixin):
|
|
|
107
128
|
Runs until manually cancelled by the finished work item.
|
|
108
129
|
|
|
109
130
|
Args:
|
|
110
|
-
fut
|
|
131
|
+
fut: The future being debugged.
|
|
111
132
|
fn: The function being executed.
|
|
112
133
|
*args: Positional arguments for the function.
|
|
113
134
|
**kwargs: Keyword arguments for the function.
|
|
135
|
+
|
|
136
|
+
See Also:
|
|
137
|
+
- :meth:`_start_debug_daemon` to start the debug daemon.
|
|
114
138
|
"""
|
|
115
139
|
# TODO: make prettier strings for other types
|
|
116
140
|
if type(fn).__name__ == "function":
|
|
117
|
-
fnid = getattr(fn,
|
|
141
|
+
fnid = getattr(fn, "__qualname__", fn.__name__)
|
|
118
142
|
if fn.__module__:
|
|
119
143
|
fnid = f"{fn.__module__}.{fnid}"
|
|
120
144
|
else:
|
|
@@ -125,27 +149,44 @@ class _AsyncExecutorMixin(cf.Executor, _DebugDaemonMixin):
|
|
|
125
149
|
msg = f"{msg[:-1]} {', '.join(f'{k}={v}' for k, v in kwargs.items())})"
|
|
126
150
|
else:
|
|
127
151
|
msg = f"{msg[:-2]})"
|
|
128
|
-
|
|
152
|
+
|
|
129
153
|
while not fut.done():
|
|
130
154
|
await asyncio.sleep(15)
|
|
131
155
|
if not fut.done():
|
|
132
156
|
self.logger.debug(msg, self, fnid)
|
|
133
|
-
|
|
157
|
+
|
|
158
|
+
|
|
134
159
|
# Process
|
|
135
160
|
|
|
161
|
+
|
|
136
162
|
class AsyncProcessPoolExecutor(_AsyncExecutorMixin, cf.ProcessPoolExecutor):
|
|
137
163
|
"""
|
|
138
164
|
An async process pool executor that allows use of kwargs.
|
|
165
|
+
|
|
166
|
+
Attributes:
|
|
167
|
+
_workers:
|
|
139
168
|
"""
|
|
169
|
+
|
|
140
170
|
_workers = "processes"
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
171
|
+
"""The type of workers used, set to "processes"."""
|
|
172
|
+
|
|
173
|
+
__slots__ = (
|
|
174
|
+
"_mp_context",
|
|
175
|
+
"_processes",
|
|
176
|
+
"_pending_work_items",
|
|
177
|
+
"_call_queue",
|
|
178
|
+
"_result_queue",
|
|
179
|
+
"_queue_management_thread",
|
|
180
|
+
"_queue_count",
|
|
181
|
+
"_shutdown_thread",
|
|
182
|
+
"_work_ids",
|
|
183
|
+
"_queue_management_thread_wakeup",
|
|
184
|
+
)
|
|
144
185
|
|
|
145
186
|
def __init__(
|
|
146
|
-
self,
|
|
147
|
-
max_workers: Optional[int] = None,
|
|
148
|
-
mp_context: Optional[multiprocessing.context.BaseContext] = None,
|
|
187
|
+
self,
|
|
188
|
+
max_workers: Optional[int] = None,
|
|
189
|
+
mp_context: Optional[multiprocessing.context.BaseContext] = None,
|
|
149
190
|
initializer: Optional[Initializer] = None,
|
|
150
191
|
initargs: Tuple[Any, ...] = (),
|
|
151
192
|
) -> None:
|
|
@@ -153,10 +194,15 @@ class AsyncProcessPoolExecutor(_AsyncExecutorMixin, cf.ProcessPoolExecutor):
|
|
|
153
194
|
Initializes the AsyncProcessPoolExecutor.
|
|
154
195
|
|
|
155
196
|
Args:
|
|
156
|
-
max_workers
|
|
157
|
-
mp_context
|
|
158
|
-
initializer
|
|
159
|
-
initargs
|
|
197
|
+
max_workers: The maximum number of workers. Defaults to None.
|
|
198
|
+
mp_context: The multiprocessing context. Defaults to None.
|
|
199
|
+
initializer: An initializer callable. Defaults to None.
|
|
200
|
+
initargs: Arguments for the initializer. Defaults to ().
|
|
201
|
+
|
|
202
|
+
Examples:
|
|
203
|
+
>>> executor = AsyncProcessPoolExecutor(max_workers=4)
|
|
204
|
+
>>> future = executor.submit(some_function, arg1, arg2)
|
|
205
|
+
>>> result = await future
|
|
160
206
|
"""
|
|
161
207
|
if max_workers == 0:
|
|
162
208
|
super().__init__(1, mp_context, initializer, initargs)
|
|
@@ -164,19 +210,30 @@ class AsyncProcessPoolExecutor(_AsyncExecutorMixin, cf.ProcessPoolExecutor):
|
|
|
164
210
|
else:
|
|
165
211
|
super().__init__(max_workers, mp_context, initializer, initargs)
|
|
166
212
|
|
|
213
|
+
|
|
167
214
|
# Thread
|
|
168
215
|
|
|
216
|
+
|
|
169
217
|
class AsyncThreadPoolExecutor(_AsyncExecutorMixin, cf.ThreadPoolExecutor):
|
|
170
218
|
"""
|
|
171
219
|
An async thread pool executor that allows use of kwargs.
|
|
172
220
|
"""
|
|
221
|
+
|
|
173
222
|
_workers = "threads"
|
|
174
|
-
|
|
223
|
+
"""The type of workers used, set to "threads"."""
|
|
224
|
+
|
|
225
|
+
__slots__ = (
|
|
226
|
+
"_work_queue",
|
|
227
|
+
"_idle_semaphore",
|
|
228
|
+
"_threads",
|
|
229
|
+
"_shutdown",
|
|
230
|
+
"_thread_name_prefix",
|
|
231
|
+
)
|
|
175
232
|
|
|
176
233
|
def __init__(
|
|
177
|
-
self,
|
|
178
|
-
max_workers: Optional[int] = None,
|
|
179
|
-
thread_name_prefix: str =
|
|
234
|
+
self,
|
|
235
|
+
max_workers: Optional[int] = None,
|
|
236
|
+
thread_name_prefix: str = "",
|
|
180
237
|
initializer: Optional[Initializer] = None,
|
|
181
238
|
initargs: Tuple[Any, ...] = (),
|
|
182
239
|
) -> None:
|
|
@@ -184,24 +241,33 @@ class AsyncThreadPoolExecutor(_AsyncExecutorMixin, cf.ThreadPoolExecutor):
|
|
|
184
241
|
Initializes the AsyncThreadPoolExecutor.
|
|
185
242
|
|
|
186
243
|
Args:
|
|
187
|
-
max_workers
|
|
188
|
-
thread_name_prefix
|
|
189
|
-
initializer
|
|
190
|
-
initargs
|
|
244
|
+
max_workers: The maximum number of workers. Defaults to None.
|
|
245
|
+
thread_name_prefix: Prefix for thread names. Defaults to ''.
|
|
246
|
+
initializer: An initializer callable. Defaults to None.
|
|
247
|
+
initargs: Arguments for the initializer. Defaults to ().
|
|
248
|
+
|
|
249
|
+
Examples:
|
|
250
|
+
>>> executor = AsyncThreadPoolExecutor(max_workers=10, thread_name_prefix="MyThread")
|
|
251
|
+
>>> future = executor.submit(some_function, arg1, arg2)
|
|
252
|
+
>>> result = await future
|
|
191
253
|
"""
|
|
192
254
|
if max_workers == 0:
|
|
193
255
|
super().__init__(1, thread_name_prefix, initializer, initargs)
|
|
194
256
|
self._max_workers = 0
|
|
195
257
|
else:
|
|
196
258
|
super().__init__(max_workers, thread_name_prefix, initializer, initargs)
|
|
197
|
-
|
|
259
|
+
|
|
260
|
+
|
|
198
261
|
# For backward-compatibility
|
|
199
262
|
ProcessPoolExecutor = AsyncProcessPoolExecutor
|
|
200
263
|
ThreadPoolExecutor = AsyncThreadPoolExecutor
|
|
201
264
|
|
|
202
265
|
# Pruning thread pool
|
|
203
266
|
|
|
204
|
-
|
|
267
|
+
|
|
268
|
+
def _worker(
|
|
269
|
+
executor_reference, work_queue, initializer, initargs, timeout
|
|
270
|
+
): # NOTE: NEW 'timeout'
|
|
205
271
|
"""
|
|
206
272
|
Worker function for the PruningThreadPoolExecutor.
|
|
207
273
|
|
|
@@ -211,27 +277,29 @@ def _worker(executor_reference, work_queue, initializer, initargs, timeout): #
|
|
|
211
277
|
initializer: The initializer function.
|
|
212
278
|
initargs: Arguments for the initializer.
|
|
213
279
|
timeout: Timeout duration for pruning inactive threads.
|
|
280
|
+
|
|
281
|
+
See Also:
|
|
282
|
+
- :class:`PruningThreadPoolExecutor` for more details on thread pruning.
|
|
214
283
|
"""
|
|
215
284
|
if initializer is not None:
|
|
216
285
|
try:
|
|
217
286
|
initializer(*initargs)
|
|
218
287
|
except BaseException:
|
|
219
|
-
_base.LOGGER.critical(
|
|
288
|
+
_base.LOGGER.critical("Exception in initializer:", exc_info=True)
|
|
220
289
|
executor = executor_reference()
|
|
221
290
|
if executor is not None:
|
|
222
291
|
executor._initializer_failed()
|
|
223
292
|
return
|
|
224
|
-
|
|
293
|
+
|
|
225
294
|
try:
|
|
226
295
|
while True:
|
|
227
296
|
try: # NOTE: NEW
|
|
228
|
-
work_item = work_queue.get(block=True,
|
|
229
|
-
timeout=timeout) # NOTE: NEW
|
|
297
|
+
work_item = work_queue.get(block=True, timeout=timeout) # NOTE: NEW
|
|
230
298
|
except queue.Empty: # NOTE: NEW
|
|
231
299
|
# Its been 'timeout' seconds and there are no new work items. # NOTE: NEW
|
|
232
300
|
# Let's suicide the thread. # NOTE: NEW
|
|
233
301
|
executor = executor_reference() # NOTE: NEW
|
|
234
|
-
|
|
302
|
+
|
|
235
303
|
with executor._adjusting_lock: # NOTE: NEW
|
|
236
304
|
# NOTE: We keep a minimum of one thread active to prevent locks
|
|
237
305
|
if len(executor) > 1: # NOTE: NEW
|
|
@@ -240,9 +308,9 @@ def _worker(executor_reference, work_queue, initializer, initargs, timeout): #
|
|
|
240
308
|
thread._threads_queues.pop(t) # NOTE: NEW
|
|
241
309
|
# Let the executor know we have one less idle thread available
|
|
242
310
|
executor._idle_semaphore.acquire(blocking=False) # NOTE: NEW
|
|
243
|
-
return # NOTE: NEW
|
|
311
|
+
return # NOTE: NEW
|
|
244
312
|
continue
|
|
245
|
-
|
|
313
|
+
|
|
246
314
|
if work_item is not None:
|
|
247
315
|
work_item.run()
|
|
248
316
|
# Delete references to object. See issue16284
|
|
@@ -269,37 +337,59 @@ def _worker(executor_reference, work_queue, initializer, initargs, timeout): #
|
|
|
269
337
|
return
|
|
270
338
|
del executor
|
|
271
339
|
except BaseException:
|
|
272
|
-
_base.LOGGER.critical(
|
|
340
|
+
_base.LOGGER.critical("Exception in worker", exc_info=True)
|
|
341
|
+
|
|
273
342
|
|
|
274
343
|
class PruningThreadPoolExecutor(AsyncThreadPoolExecutor):
|
|
275
344
|
"""
|
|
276
345
|
This `AsyncThreadPoolExecutor` implementation prunes inactive threads after 'timeout' seconds without a work item.
|
|
277
346
|
Pruned threads will be automatically recreated as needed for future workloads. Up to 'max_threads' can be active at any one time.
|
|
347
|
+
A minimum of one thread will remain active to prevent locks.
|
|
278
348
|
"""
|
|
349
|
+
|
|
279
350
|
__slots__ = "_timeout", "_adjusting_lock"
|
|
280
351
|
|
|
281
|
-
def __init__(
|
|
282
|
-
|
|
352
|
+
def __init__(
|
|
353
|
+
self,
|
|
354
|
+
max_workers=None,
|
|
355
|
+
thread_name_prefix="",
|
|
356
|
+
initializer=None,
|
|
357
|
+
initargs=(),
|
|
358
|
+
timeout=TEN_MINUTES,
|
|
359
|
+
):
|
|
283
360
|
"""
|
|
284
361
|
Initializes the PruningThreadPoolExecutor.
|
|
285
362
|
|
|
286
363
|
Args:
|
|
287
|
-
max_workers
|
|
288
|
-
thread_name_prefix
|
|
289
|
-
initializer
|
|
290
|
-
initargs
|
|
291
|
-
timeout
|
|
364
|
+
max_workers: The maximum number of workers. Defaults to None.
|
|
365
|
+
thread_name_prefix: Prefix for thread names. Defaults to ''.
|
|
366
|
+
initializer: An initializer callable. Defaults to None.
|
|
367
|
+
initargs: Arguments for the initializer. Defaults to ().
|
|
368
|
+
timeout: Timeout duration for pruning inactive threads. Defaults to TEN_MINUTES.
|
|
369
|
+
|
|
370
|
+
Examples:
|
|
371
|
+
>>> executor = PruningThreadPoolExecutor(max_workers=5, timeout=300)
|
|
372
|
+
>>> future = executor.submit(some_function, arg1, arg2)
|
|
373
|
+
>>> result = await future
|
|
292
374
|
"""
|
|
293
|
-
|
|
375
|
+
|
|
376
|
+
self._timeout = timeout
|
|
377
|
+
"""Timeout duration for pruning inactive threads."""
|
|
378
|
+
|
|
294
379
|
self._adjusting_lock = threading.Lock()
|
|
380
|
+
"""Lock used to adjust the number of threads."""
|
|
381
|
+
|
|
295
382
|
super().__init__(max_workers, thread_name_prefix, initializer, initargs)
|
|
296
|
-
|
|
383
|
+
|
|
297
384
|
def __len__(self) -> int:
|
|
298
385
|
return len(self._threads)
|
|
299
|
-
|
|
386
|
+
|
|
300
387
|
def _adjust_thread_count(self):
|
|
301
388
|
"""
|
|
302
389
|
Adjusts the number of threads based on workload and idle threads.
|
|
390
|
+
|
|
391
|
+
See Also:
|
|
392
|
+
- :func:`_worker` for the worker function that handles thread pruning.
|
|
303
393
|
"""
|
|
304
394
|
with self._adjusting_lock:
|
|
305
395
|
# if idle threads are available, don't spin new threads
|
|
@@ -313,19 +403,24 @@ class PruningThreadPoolExecutor(AsyncThreadPoolExecutor):
|
|
|
313
403
|
|
|
314
404
|
num_threads = len(self._threads)
|
|
315
405
|
if num_threads < self._max_workers:
|
|
316
|
-
thread_name =
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
406
|
+
thread_name = "%s_%d" % (self._thread_name_prefix or self, num_threads)
|
|
407
|
+
t = threading.Thread(
|
|
408
|
+
name=thread_name,
|
|
409
|
+
target=_worker,
|
|
410
|
+
args=(
|
|
411
|
+
weakref.ref(self, weakref_cb),
|
|
412
|
+
self._work_queue,
|
|
413
|
+
self._initializer,
|
|
414
|
+
self._initargs,
|
|
415
|
+
self._timeout,
|
|
416
|
+
),
|
|
417
|
+
)
|
|
324
418
|
t.daemon = True
|
|
325
419
|
t.start()
|
|
326
420
|
self._threads.add(t)
|
|
327
421
|
thread._threads_queues[t] = self._work_queue
|
|
328
422
|
|
|
423
|
+
|
|
329
424
|
executor = PruningThreadPoolExecutor(128)
|
|
330
425
|
|
|
331
426
|
__all__ = [
|