ez-a-sync 0.32.29__cp310-cp310-win32.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ez-a-sync might be problematic. Click here for more details.

Files changed (177) hide show
  1. a_sync/ENVIRONMENT_VARIABLES.py +42 -0
  2. a_sync/__init__.pxd +2 -0
  3. a_sync/__init__.py +145 -0
  4. a_sync/_smart.c +22803 -0
  5. a_sync/_smart.cp310-win32.pyd +0 -0
  6. a_sync/_smart.pxd +2 -0
  7. a_sync/_smart.pyi +202 -0
  8. a_sync/_smart.pyx +674 -0
  9. a_sync/_typing.py +258 -0
  10. a_sync/a_sync/__init__.py +60 -0
  11. a_sync/a_sync/_descriptor.c +20528 -0
  12. a_sync/a_sync/_descriptor.cp310-win32.pyd +0 -0
  13. a_sync/a_sync/_descriptor.pyi +33 -0
  14. a_sync/a_sync/_descriptor.pyx +422 -0
  15. a_sync/a_sync/_flags.c +6074 -0
  16. a_sync/a_sync/_flags.cp310-win32.pyd +0 -0
  17. a_sync/a_sync/_flags.pxd +3 -0
  18. a_sync/a_sync/_flags.pyx +92 -0
  19. a_sync/a_sync/_helpers.c +14521 -0
  20. a_sync/a_sync/_helpers.cp310-win32.pyd +0 -0
  21. a_sync/a_sync/_helpers.pxd +3 -0
  22. a_sync/a_sync/_helpers.pyi +10 -0
  23. a_sync/a_sync/_helpers.pyx +167 -0
  24. a_sync/a_sync/_kwargs.c +12194 -0
  25. a_sync/a_sync/_kwargs.cp310-win32.pyd +0 -0
  26. a_sync/a_sync/_kwargs.pxd +2 -0
  27. a_sync/a_sync/_kwargs.pyx +64 -0
  28. a_sync/a_sync/_meta.py +210 -0
  29. a_sync/a_sync/abstract.c +12411 -0
  30. a_sync/a_sync/abstract.cp310-win32.pyd +0 -0
  31. a_sync/a_sync/abstract.pyi +141 -0
  32. a_sync/a_sync/abstract.pyx +221 -0
  33. a_sync/a_sync/base.c +14932 -0
  34. a_sync/a_sync/base.cp310-win32.pyd +0 -0
  35. a_sync/a_sync/base.pyi +60 -0
  36. a_sync/a_sync/base.pyx +271 -0
  37. a_sync/a_sync/config.py +168 -0
  38. a_sync/a_sync/decorator.py +651 -0
  39. a_sync/a_sync/flags.c +5272 -0
  40. a_sync/a_sync/flags.cp310-win32.pyd +0 -0
  41. a_sync/a_sync/flags.pxd +72 -0
  42. a_sync/a_sync/flags.pyi +74 -0
  43. a_sync/a_sync/flags.pyx +72 -0
  44. a_sync/a_sync/function.c +37846 -0
  45. a_sync/a_sync/function.cp310-win32.pyd +0 -0
  46. a_sync/a_sync/function.pxd +28 -0
  47. a_sync/a_sync/function.pyi +571 -0
  48. a_sync/a_sync/function.pyx +1381 -0
  49. a_sync/a_sync/method.c +29774 -0
  50. a_sync/a_sync/method.cp310-win32.pyd +0 -0
  51. a_sync/a_sync/method.pxd +9 -0
  52. a_sync/a_sync/method.pyi +525 -0
  53. a_sync/a_sync/method.pyx +1023 -0
  54. a_sync/a_sync/modifiers/__init__.pxd +1 -0
  55. a_sync/a_sync/modifiers/__init__.py +101 -0
  56. a_sync/a_sync/modifiers/cache/__init__.py +160 -0
  57. a_sync/a_sync/modifiers/cache/memory.py +165 -0
  58. a_sync/a_sync/modifiers/limiter.py +132 -0
  59. a_sync/a_sync/modifiers/manager.c +16149 -0
  60. a_sync/a_sync/modifiers/manager.cp310-win32.pyd +0 -0
  61. a_sync/a_sync/modifiers/manager.pxd +5 -0
  62. a_sync/a_sync/modifiers/manager.pyi +219 -0
  63. a_sync/a_sync/modifiers/manager.pyx +299 -0
  64. a_sync/a_sync/modifiers/semaphores.py +173 -0
  65. a_sync/a_sync/property.c +27260 -0
  66. a_sync/a_sync/property.cp310-win32.pyd +0 -0
  67. a_sync/a_sync/property.pyi +376 -0
  68. a_sync/a_sync/property.pyx +819 -0
  69. a_sync/a_sync/singleton.py +63 -0
  70. a_sync/aliases.py +3 -0
  71. a_sync/async_property/__init__.pxd +1 -0
  72. a_sync/async_property/__init__.py +1 -0
  73. a_sync/async_property/cached.c +20386 -0
  74. a_sync/async_property/cached.cp310-win32.pyd +0 -0
  75. a_sync/async_property/cached.pxd +10 -0
  76. a_sync/async_property/cached.pyi +45 -0
  77. a_sync/async_property/cached.pyx +178 -0
  78. a_sync/async_property/proxy.c +34654 -0
  79. a_sync/async_property/proxy.cp310-win32.pyd +0 -0
  80. a_sync/async_property/proxy.pxd +2 -0
  81. a_sync/async_property/proxy.pyi +124 -0
  82. a_sync/async_property/proxy.pyx +474 -0
  83. a_sync/asyncio/__init__.pxd +6 -0
  84. a_sync/asyncio/__init__.py +164 -0
  85. a_sync/asyncio/as_completed.c +18841 -0
  86. a_sync/asyncio/as_completed.cp310-win32.pyd +0 -0
  87. a_sync/asyncio/as_completed.pxd +8 -0
  88. a_sync/asyncio/as_completed.pyi +109 -0
  89. a_sync/asyncio/as_completed.pyx +269 -0
  90. a_sync/asyncio/create_task.c +15902 -0
  91. a_sync/asyncio/create_task.cp310-win32.pyd +0 -0
  92. a_sync/asyncio/create_task.pxd +2 -0
  93. a_sync/asyncio/create_task.pyi +51 -0
  94. a_sync/asyncio/create_task.pyx +271 -0
  95. a_sync/asyncio/gather.c +16679 -0
  96. a_sync/asyncio/gather.cp310-win32.pyd +0 -0
  97. a_sync/asyncio/gather.pyi +107 -0
  98. a_sync/asyncio/gather.pyx +218 -0
  99. a_sync/asyncio/igather.c +12676 -0
  100. a_sync/asyncio/igather.cp310-win32.pyd +0 -0
  101. a_sync/asyncio/igather.pxd +1 -0
  102. a_sync/asyncio/igather.pyi +7 -0
  103. a_sync/asyncio/igather.pyx +182 -0
  104. a_sync/asyncio/sleep.c +9593 -0
  105. a_sync/asyncio/sleep.cp310-win32.pyd +0 -0
  106. a_sync/asyncio/sleep.pyi +14 -0
  107. a_sync/asyncio/sleep.pyx +49 -0
  108. a_sync/debugging.c +15362 -0
  109. a_sync/debugging.cp310-win32.pyd +0 -0
  110. a_sync/debugging.pyi +76 -0
  111. a_sync/debugging.pyx +107 -0
  112. a_sync/exceptions.c +13312 -0
  113. a_sync/exceptions.cp310-win32.pyd +0 -0
  114. a_sync/exceptions.pyi +376 -0
  115. a_sync/exceptions.pyx +446 -0
  116. a_sync/executor.py +619 -0
  117. a_sync/functools.c +12738 -0
  118. a_sync/functools.cp310-win32.pyd +0 -0
  119. a_sync/functools.pxd +7 -0
  120. a_sync/functools.pyi +33 -0
  121. a_sync/functools.pyx +139 -0
  122. a_sync/future.py +1497 -0
  123. a_sync/iter.c +37271 -0
  124. a_sync/iter.cp310-win32.pyd +0 -0
  125. a_sync/iter.pxd +11 -0
  126. a_sync/iter.pyi +370 -0
  127. a_sync/iter.pyx +981 -0
  128. a_sync/primitives/__init__.pxd +1 -0
  129. a_sync/primitives/__init__.py +53 -0
  130. a_sync/primitives/_debug.c +15757 -0
  131. a_sync/primitives/_debug.cp310-win32.pyd +0 -0
  132. a_sync/primitives/_debug.pxd +12 -0
  133. a_sync/primitives/_debug.pyi +52 -0
  134. a_sync/primitives/_debug.pyx +223 -0
  135. a_sync/primitives/_loggable.c +11529 -0
  136. a_sync/primitives/_loggable.cp310-win32.pyd +0 -0
  137. a_sync/primitives/_loggable.pxd +4 -0
  138. a_sync/primitives/_loggable.pyi +66 -0
  139. a_sync/primitives/_loggable.pyx +102 -0
  140. a_sync/primitives/locks/__init__.pxd +8 -0
  141. a_sync/primitives/locks/__init__.py +17 -0
  142. a_sync/primitives/locks/counter.c +17679 -0
  143. a_sync/primitives/locks/counter.cp310-win32.pyd +0 -0
  144. a_sync/primitives/locks/counter.pxd +12 -0
  145. a_sync/primitives/locks/counter.pyi +151 -0
  146. a_sync/primitives/locks/counter.pyx +260 -0
  147. a_sync/primitives/locks/event.c +17063 -0
  148. a_sync/primitives/locks/event.cp310-win32.pyd +0 -0
  149. a_sync/primitives/locks/event.pxd +22 -0
  150. a_sync/primitives/locks/event.pyi +43 -0
  151. a_sync/primitives/locks/event.pyx +185 -0
  152. a_sync/primitives/locks/prio_semaphore.c +25590 -0
  153. a_sync/primitives/locks/prio_semaphore.cp310-win32.pyd +0 -0
  154. a_sync/primitives/locks/prio_semaphore.pxd +25 -0
  155. a_sync/primitives/locks/prio_semaphore.pyi +217 -0
  156. a_sync/primitives/locks/prio_semaphore.pyx +597 -0
  157. a_sync/primitives/locks/semaphore.c +26509 -0
  158. a_sync/primitives/locks/semaphore.cp310-win32.pyd +0 -0
  159. a_sync/primitives/locks/semaphore.pxd +21 -0
  160. a_sync/primitives/locks/semaphore.pyi +197 -0
  161. a_sync/primitives/locks/semaphore.pyx +454 -0
  162. a_sync/primitives/queue.py +1022 -0
  163. a_sync/py.typed +0 -0
  164. a_sync/sphinx/__init__.py +3 -0
  165. a_sync/sphinx/ext.py +289 -0
  166. a_sync/task.py +932 -0
  167. a_sync/utils/__init__.py +105 -0
  168. a_sync/utils/iterators.py +297 -0
  169. a_sync/utils/repr.c +15799 -0
  170. a_sync/utils/repr.cp310-win32.pyd +0 -0
  171. a_sync/utils/repr.pyi +2 -0
  172. a_sync/utils/repr.pyx +73 -0
  173. ez_a_sync-0.32.29.dist-info/METADATA +367 -0
  174. ez_a_sync-0.32.29.dist-info/RECORD +177 -0
  175. ez_a_sync-0.32.29.dist-info/WHEEL +5 -0
  176. ez_a_sync-0.32.29.dist-info/licenses/LICENSE.txt +17 -0
  177. ez_a_sync-0.32.29.dist-info/top_level.txt +1 -0
a_sync/executor.py ADDED
@@ -0,0 +1,619 @@
1
+ """
2
+ This module provides several executor classes that facilitate running synchronous functions asynchronously using `asyncio`.
3
+
4
+ With these executors, you can run sync functions in your executor with `await executor.run(fn, *args, **kwargs)`.
5
+ The `executor.submit(fn, *args, **kwargs)` method works similarly to the `concurrent.futures` implementation but
6
+ returns an `asyncio.Future` instead of a `concurrent.futures.Future`.
7
+
8
+ Executor Classes:
9
+ - :class:`AsyncProcessPoolExecutor`: A process pool executor providing asynchronous run and submit methods, with support for synchronous mode
10
+ - :class:`AsyncThreadPoolExecutor`: A thread pool executor providing asynchronous run and submit methods, with support for synchronous mode
11
+ - :class:`PruningThreadPoolExecutor`: An :class:`AsyncThreadPoolExecutor` that prunes inactive threads after a timeout, ensuring at least one thread remains active to prevent locks.
12
+
13
+ See Also:
14
+ - :mod:`concurrent.futures` for the original synchronous executor implementations.
15
+ """
16
+
17
+ import asyncio
18
+ import atexit
19
+ import concurrent.futures
20
+ import multiprocessing.context
21
+ import queue
22
+ import signal
23
+ import threading
24
+ import weakref
25
+ from asyncio import sleep
26
+ from asyncio.futures import _convert_future_exc
27
+ from concurrent.futures import _base, thread
28
+
29
+ from a_sync._typing import *
30
+ from a_sync.primitives._debug import _DebugDaemonMixin
31
+
32
+ # === Executor Shutdown Logic ===
33
+ # All executors (module-level and user-created) are registered for shutdown on interpreter exit and signals.
34
+ # Signal handlers are chainable: after our cleanup, the previous handler is called (unless SIG_DFL or SIG_IGN).
35
+ # This ensures compatibility with other libraries and deduplicates shutdown logic.
36
+
37
+ _EXECUTORS = set()
38
+
39
+
40
+ def register_executor(executor) -> None:
41
+ """Register an executor for shutdown on exit/signals."""
42
+ _EXECUTORS.add(executor)
43
+
44
+
45
+ def _shutdown_all_executors(*args) -> None:
46
+ """Shutdown all registered executors (non-blocking)."""
47
+ for executor in list(_EXECUTORS):
48
+ try:
49
+ executor.shutdown(wait=False)
50
+ except Exception:
51
+ pass
52
+
53
+
54
+ def _register_executor_shutdown() -> None:
55
+ """Register atexit and chainable signal handlers for executor shutdown."""
56
+ atexit.register(_shutdown_all_executors)
57
+
58
+ def make_chainable_signal_handler(signalnum):
59
+ prev_handler = signal.getsignal(signalnum)
60
+
61
+ def handler(signum, frame):
62
+ _shutdown_all_executors()
63
+ if callable(prev_handler) and prev_handler not in (signal.SIG_DFL, signal.SIG_IGN):
64
+ prev_handler(signum, frame)
65
+
66
+ signal.signal(signalnum, handler)
67
+
68
+ make_chainable_signal_handler(signal.SIGINT)
69
+ make_chainable_signal_handler(signal.SIGTERM)
70
+
71
+
72
+ _register_executor_shutdown()
73
+
74
+ TEN_MINUTES = 60 * 10
75
+
76
+ Initializer = Callable[..., object]
77
+
78
+
79
+ class _AsyncExecutorMixin(concurrent.futures.Executor, _DebugDaemonMixin):
80
+ """
81
+ A mixin for Executors to provide asynchronous run and submit methods.
82
+
83
+ This mixin allows executors to operate in both asynchronous (normal) mode and synchronous mode.
84
+ In asynchronous (normal) mode, functions are submitted to the executor and awaited.
85
+ In synchronous mode, functions are executed directly in the current thread.
86
+
87
+ Examples:
88
+ >>> async def example():
89
+ >>> result = await executor.run(some_function, arg1, arg2, kwarg1=value1)
90
+ >>> print(result)
91
+
92
+ See Also:
93
+ - :meth:`submit` for submitting functions to the executor.
94
+ """
95
+
96
+ sync_mode: bool
97
+ """
98
+ Indicates if the executor is in synchronous mode (max_workers == 0).
99
+
100
+ Examples:
101
+ >>> if executor.sync_mode:
102
+ >>> print("Executor is in synchronous mode.")
103
+ """
104
+
105
+ _max_workers: int
106
+
107
+ _workers: str
108
+ """The type of workers used."""
109
+
110
+ __slots__ = "_max_workers", "_initializer", "_initargs", "_broken", "_shutdown_lock"
111
+
112
+ async def run(self, fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs):
113
+ """
114
+ A shorthand way to call `await asyncio.get_event_loop().run_in_executor(this_executor, fn, *args)`.
115
+ Doesn't `await this_executor.run(fn, *args)` look so much better?
116
+
117
+ In synchronous mode, the function is executed directly in the current thread.
118
+ In asynchronous mode, the function is submitted to the executor and awaited.
119
+
120
+ Args:
121
+ fn: The function to run.
122
+ *args: Positional arguments for the function.
123
+ **kwargs: Keyword arguments for the function.
124
+
125
+ Examples:
126
+ >>> async def example():
127
+ >>> result = await executor.run(some_function, arg1, arg2, kwarg1=value1)
128
+ >>> print(result)
129
+
130
+ See Also:
131
+ - :meth:`submit` for submitting functions to the executor.
132
+ """
133
+ return fn(*args, **kwargs) if self.sync_mode else await self.submit(fn, *args, **kwargs)
134
+
135
+ @overload
136
+ def submit(self, fn: Callable[P, T], *args: P.args, fire_and_forget: Literal[True], **kwargs: P.kwargs) -> None: # type: ignore [override]
137
+ """
138
+ Submits a job to the executor without expecting a result back. The executor will execute the task silently.
139
+
140
+ Args:
141
+ fn: The function to submit.
142
+ *args: Positional arguments for the function.
143
+ fire_and_forget: True
144
+ **kwargs: Keyword arguments for the function.
145
+
146
+ Examples:
147
+ >>> executor.submit(some_function, arg1, arg2, fire_and_forget=True, kwarg1=value1)
148
+
149
+ See Also:
150
+ - :meth:`run` for running functions with the executor.
151
+ """
152
+
153
+ @overload
154
+ def submit(self, fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> "asyncio.Future[T]": # type: ignore [override]
155
+ """
156
+ Submits a job to the executor and returns an :class:`asyncio.Future` that can be awaited for the result without blocking.
157
+
158
+ Args:
159
+ fn: The function to submit.
160
+ *args: Positional arguments for the function.
161
+ **kwargs: Keyword arguments for the function.
162
+
163
+ Examples:
164
+ >>> future = executor.submit(some_function, arg1, arg2, kwarg1=value1)
165
+ >>> result = await future
166
+ >>> print(result)
167
+
168
+ See Also:
169
+ - :meth:`run` for running functions with the executor.
170
+ """
171
+
172
+ def submit(self, fn: Callable[P, T], *args: P.args, fire_and_forget: bool = False, **kwargs: P.kwargs) -> Optional["asyncio.Future[T]"]: # type: ignore [override]
173
+ # sourcery skip: simplify-boolean-comparison
174
+ """
175
+ Submits a job to the executor and returns an :class:`asyncio.Future` that can be awaited for the result without blocking.
176
+
177
+ If `fire_and_forget` is True, the executor will not return any data and intead of a :class:`~Future` this function will return `None`.
178
+
179
+ Args:
180
+ fn: The function to submit.
181
+ *args: Positional arguments for the function.
182
+ fire_and_forget (optional): Set True to send the job to the executor without expecting a result. If `fire_and_forget` is True, this function will return None instead of a Future. Default False.
183
+ **kwargs: Keyword arguments for the function.
184
+
185
+ Examples:
186
+ >>> future = executor.submit(some_function, arg1, arg2, kwarg1=value1)
187
+ >>> result = await future
188
+ >>> print(result)
189
+
190
+ See Also:
191
+ - :meth:`run` for running functions with the executor.
192
+ """
193
+ if fire_and_forget is True:
194
+ # Send the job to the executor and return without creating a future or setting up callbacks
195
+ if self.sync_mode:
196
+ fn(*args, **kwargs)
197
+ else:
198
+ self.__super_submit(fn, *args, **kwargs)
199
+ return None
200
+
201
+ loop = self._get_loop()
202
+ fut = loop.create_future()
203
+ if self.sync_mode:
204
+ try:
205
+ _set_fut_result(fut, fn(*args, **kwargs))
206
+ except Exception as e:
207
+ _set_fut_exception(fut, e)
208
+ else:
209
+ self._ensure_debug_daemon(fut, fn, *args, **kwargs)
210
+
211
+ cf_fut = self.__super_submit(fn, *args, **kwargs)
212
+
213
+ # TODO: implement logic to actually cancel the job, not just the future which is useless for our use case
214
+ # def _call_check_cancel(destination: asyncio.Future):
215
+ # if _fut_is_cancelled(destination):
216
+ # cf_fut.cancel()
217
+ #
218
+ # fut.add_done_callback(_call_check_cancel)
219
+
220
+ def _call_copy_future_state(cf_fut: "concurrent.futures.Future"):
221
+ if _fut_is_cancelled(fut):
222
+ return
223
+ loop.call_soon_threadsafe(
224
+ _copy_future_state,
225
+ cf_fut,
226
+ fut,
227
+ )
228
+
229
+ _add_done_callback(cf_fut, _call_copy_future_state)
230
+
231
+ return fut
232
+
233
+ def __repr__(self) -> str:
234
+ worker_info = f"[{self.worker_count_current}/{self._max_workers} {self._workers}]"
235
+ identifier = getattr(self, "_thread_name_prefix", None) or hex(id(self))
236
+ return f"<{self.__class__.__name__} {identifier} {worker_info}>"
237
+
238
+ def __len__(self) -> int:
239
+ # NOTE: should this be queue length instead? probably
240
+ return self.worker_count_current
241
+
242
+ @property
243
+ def worker_count_current(self) -> int:
244
+ """
245
+ Returns the current number of workers.
246
+
247
+ Examples:
248
+ >>> print(f"Current worker count: {executor.worker_count_current}")
249
+ """
250
+ return len(getattr(self, f"_{self._workers}"))
251
+
252
+ def __init_mixin__(self):
253
+ self.sync_mode = self._max_workers == 0
254
+ self.__super_submit = super().submit
255
+ register_executor(self)
256
+
257
+ async def _debug_daemon(self, fut: asyncio.Future, fn, *args, **kwargs) -> None:
258
+ """
259
+ Runs until manually cancelled by the finished work item.
260
+
261
+ This code will only run if `self.logger.isEnabledFor(logging.DEBUG)` is True. You do not need to include any level checks in your custom implementations.
262
+
263
+ Args:
264
+ fut: The future being debugged.
265
+ fn: The function being executed.
266
+ *args: Positional arguments for the function.
267
+ **kwargs: Keyword arguments for the function.
268
+
269
+ See Also:
270
+ - :meth:`_start_debug_daemon` to start the debug daemon.
271
+ """
272
+ # TODO: make prettier strings for other types
273
+ if type(fn).__name__ == "function":
274
+ fnid = getattr(fn, "__qualname__", fn.__name__)
275
+ if fn.__module__:
276
+ fnid = f"{fn.__module__}.{fnid}"
277
+ else:
278
+ fnid = fn
279
+
280
+ msg = f"%s processing %s{args}"
281
+ if kwargs:
282
+ msg = f"{msg[:-1]} {', '.join(f'{k}={v}' for k, v in kwargs.items())})"
283
+ else:
284
+ msg = f"{msg[:-2]})"
285
+
286
+ done = fut.done
287
+ log_debug = self.logger.debug
288
+
289
+ while not done():
290
+ await sleep(15)
291
+ if not done():
292
+ log_debug(msg, self, fnid)
293
+
294
+
295
+ # Process
296
+
297
+
298
+ class AsyncProcessPoolExecutor(_AsyncExecutorMixin, concurrent.futures.ProcessPoolExecutor):
299
+ """
300
+ A :class:`concurrent.futures.ProcessPoolExecutor' subclass providing asynchronous
301
+ run and submit methods that support kwargs, with support for synchronous mode
302
+
303
+ Examples:
304
+ >>> executor = AsyncProcessPoolExecutor(max_workers=4)
305
+ >>> future = executor.submit(some_function, arg1, arg2, kwarg1='kwarg1')
306
+ >>> result = await future
307
+ """
308
+
309
+ _workers = "processes"
310
+ """The type of workers used, set to "processes"."""
311
+
312
+ __slots__ = (
313
+ "_mp_context",
314
+ "_processes",
315
+ "_pending_work_items",
316
+ "_call_queue",
317
+ "_result_queue",
318
+ "_queue_management_thread",
319
+ "_queue_count",
320
+ "_shutdown_thread",
321
+ "_work_ids",
322
+ "_queue_management_thread_wakeup",
323
+ )
324
+
325
+ def __init__(
326
+ self,
327
+ max_workers: Optional[int] = None,
328
+ mp_context: Optional[multiprocessing.context.BaseContext] = None,
329
+ initializer: Optional[Initializer] = None,
330
+ initargs: Tuple[Any, ...] = (),
331
+ ) -> None:
332
+ """
333
+ Initializes the AsyncProcessPoolExecutor.
334
+
335
+ Args:
336
+ max_workers: The maximum number of workers. Defaults to None.
337
+ mp_context: The multiprocessing context. Defaults to None.
338
+ initializer: An initializer callable. Defaults to None.
339
+ initargs: Arguments for the initializer. Defaults to ().
340
+
341
+ Examples:
342
+ >>> executor = AsyncProcessPoolExecutor(max_workers=4)
343
+ >>> future = executor.submit(some_function, arg1, arg2)
344
+ >>> result = await future
345
+ """
346
+ if max_workers == 0:
347
+ concurrent.futures.ProcessPoolExecutor.__init__(
348
+ self, 1, mp_context, initializer, initargs
349
+ )
350
+ self._max_workers = 0
351
+ else:
352
+ concurrent.futures.ProcessPoolExecutor.__init__(
353
+ self, max_workers, mp_context, initializer, initargs
354
+ )
355
+ self.__init_mixin__()
356
+
357
+
358
+ # Thread
359
+
360
+
361
+ class AsyncThreadPoolExecutor(_AsyncExecutorMixin, concurrent.futures.ThreadPoolExecutor):
362
+ """
363
+ A :class:`concurrent.futures.ThreadPoolExecutor' subclass providing asynchronous
364
+ run and submit methods that support kwargs, with support for synchronous mode
365
+
366
+ Examples:
367
+ >>> executor = AsyncThreadPoolExecutor(max_workers=10, thread_name_prefix="MyThread")
368
+ >>> future = executor.submit(some_function, arg1, arg2, kwarg1='kwarg1')
369
+ >>> result = await future
370
+ """
371
+
372
+ _workers = "threads"
373
+ """The type of workers used, set to "threads"."""
374
+
375
+ __slots__ = (
376
+ "_work_queue",
377
+ "_idle_semaphore",
378
+ "_threads",
379
+ "_shutdown",
380
+ "_thread_name_prefix",
381
+ )
382
+
383
+ def __init__(
384
+ self,
385
+ max_workers: Optional[int] = None,
386
+ thread_name_prefix: str = "",
387
+ initializer: Optional[Initializer] = None,
388
+ initargs: Tuple[Any, ...] = (),
389
+ ) -> None:
390
+ """
391
+ Initializes the AsyncThreadPoolExecutor.
392
+
393
+ Args:
394
+ max_workers: The maximum number of workers. Defaults to None.
395
+ thread_name_prefix: Prefix for thread names. Defaults to ''.
396
+ initializer: An initializer callable. Defaults to None.
397
+ initargs: Arguments for the initializer. Defaults to ().
398
+
399
+ Examples:
400
+ >>> executor = AsyncThreadPoolExecutor(max_workers=10, thread_name_prefix="MyThread")
401
+ >>> future = executor.submit(some_function, arg1, arg2)
402
+ >>> result = await future
403
+ """
404
+ if max_workers == 0:
405
+ concurrent.futures.ThreadPoolExecutor.__init__(
406
+ self, 1, thread_name_prefix, initializer, initargs
407
+ )
408
+ self._max_workers = 0
409
+ else:
410
+ concurrent.futures.ThreadPoolExecutor.__init__(
411
+ self, max_workers, thread_name_prefix, initializer, initargs
412
+ )
413
+ self.__init_mixin__()
414
+
415
+
416
+ AsyncExecutor = Union[AsyncThreadPoolExecutor, AsyncProcessPoolExecutor]
417
+
418
+ # For backward-compatibility
419
+ ThreadPoolExecutor = AsyncThreadPoolExecutor
420
+ ProcessPoolExecutor = AsyncProcessPoolExecutor
421
+
422
+ # Pruning thread pool
423
+
424
+
425
+ def _worker(executor_reference, work_queue, initializer, initargs, timeout): # NOTE: NEW 'timeout'
426
+ """
427
+ Worker function for the PruningThreadPoolExecutor.
428
+
429
+ Args:
430
+ executor_reference: A weak reference to the executor.
431
+ work_queue: The work queue.
432
+ initializer: The initializer function.
433
+ initargs: Arguments for the initializer.
434
+ timeout: Timeout duration for pruning inactive threads.
435
+
436
+ See Also:
437
+ - :class:`PruningThreadPoolExecutor` for more details on thread pruning.
438
+ """
439
+ if initializer is not None:
440
+ try:
441
+ initializer(*initargs)
442
+ except BaseException:
443
+ _base.LOGGER.critical("Exception in initializer:", exc_info=True)
444
+ executor = executor_reference()
445
+ if executor is not None:
446
+ executor._initializer_failed()
447
+ return
448
+
449
+ try:
450
+ while True:
451
+ try: # NOTE: NEW
452
+ work_item = work_queue.get(block=True, timeout=timeout) # NOTE: NEW
453
+ except queue.Empty: # NOTE: NEW
454
+ # Its been 'timeout' seconds and there are no new work items. # NOTE: NEW
455
+ # Let's suicide the thread. # NOTE: NEW
456
+ executor = executor_reference() # NOTE: NEW
457
+
458
+ with executor._adjusting_lock: # NOTE: NEW
459
+ # NOTE: We keep a minimum of one thread active to prevent locks
460
+ if len(executor) > 1: # NOTE: NEW
461
+ t = threading.current_thread() # NOTE: NEW
462
+ executor._threads.remove(t) # NOTE: NEW
463
+ thread._threads_queues.pop(t) # NOTE: NEW
464
+ # Let the executor know we have one less idle thread available
465
+ executor._idle_semaphore.acquire(blocking=False) # NOTE: NEW
466
+ return # NOTE: NEW
467
+ continue
468
+
469
+ if work_item is not None:
470
+ work_item.run()
471
+ # Delete references to object. See issue16284
472
+ del work_item
473
+
474
+ # attempt to increment idle count
475
+ executor = executor_reference()
476
+ if executor is not None:
477
+ executor._idle_semaphore.release()
478
+ del executor
479
+ continue
480
+
481
+ executor = executor_reference()
482
+ # Exit if:
483
+ # - The interpreter is shutting down OR
484
+ # - The executor that owns the worker has been collected OR
485
+ # - The executor that owns the worker has been shutdown OR
486
+ if thread._shutdown or executor is None or executor._shutdown:
487
+ # Flag the executor as shutting down as early as possible if it is not gc-ed yet.
488
+ if executor is not None:
489
+ executor._shutdown = True
490
+ # Notice other workers
491
+ work_queue.put(None)
492
+ return
493
+ del executor
494
+ except BaseException:
495
+ _base.LOGGER.critical("Exception in worker", exc_info=True)
496
+
497
+
498
+ class PruningThreadPoolExecutor(AsyncThreadPoolExecutor):
499
+ """
500
+ This :class:`~AsyncThreadPoolExecutor` implementation prunes inactive threads after 'timeout' seconds without a work item.
501
+ Pruned threads will be automatically recreated as needed for future workloads. Up to 'max_threads' can be active at any one time.
502
+ The executor ensures that at least one active thread remains to prevent locks.
503
+
504
+ Note:
505
+ The `_worker` function includes a check (`len(executor) > 1`) to ensure that at least one thread remains active.
506
+ This prevents the executor from having zero active threads, which could lead to deadlocks.
507
+
508
+ Examples:
509
+ >>> executor = PruningThreadPoolExecutor(max_workers=5, timeout=300)
510
+ >>> future = executor.submit(some_function, arg1, arg2, kwarg1='kwarg1')
511
+ >>> result = await future
512
+ """
513
+
514
+ __slots__ = "_timeout", "_adjusting_lock"
515
+
516
+ def __init__(
517
+ self,
518
+ max_workers=None,
519
+ thread_name_prefix="",
520
+ initializer=None,
521
+ initargs=(),
522
+ timeout=TEN_MINUTES,
523
+ ):
524
+ """
525
+ Initializes the PruningThreadPoolExecutor.
526
+
527
+ Args:
528
+ max_workers: The maximum number of workers. Defaults to None.
529
+ thread_name_prefix: Prefix for thread names. Defaults to ''.
530
+ initializer: An initializer callable. Defaults to None.
531
+ initargs: Arguments for the initializer. Defaults to ().
532
+ timeout: Timeout duration for pruning inactive threads. Defaults to TEN_MINUTES.
533
+
534
+ Examples:
535
+ >>> executor = PruningThreadPoolExecutor(max_workers=5, timeout=300)
536
+ >>> future = executor.submit(some_function, arg1, arg2)
537
+ >>> result = await future
538
+ """
539
+
540
+ self._timeout = timeout
541
+ """Timeout duration for pruning inactive threads."""
542
+
543
+ self._adjusting_lock = threading.Lock()
544
+ """Lock used to adjust the number of threads."""
545
+
546
+ AsyncThreadPoolExecutor.__init__(
547
+ self, max_workers, thread_name_prefix, initializer, initargs
548
+ )
549
+
550
+ def __len__(self) -> int:
551
+ return len(self._threads)
552
+
553
+ def _adjust_thread_count(self):
554
+ """
555
+ Adjusts the number of threads based on workload and idle threads.
556
+
557
+ See Also:
558
+ - :func:`_worker` for the worker function that handles thread pruning.
559
+ """
560
+ with self._adjusting_lock:
561
+ # if idle threads are available, don't spin new threads
562
+ if self._idle_semaphore.acquire(timeout=0):
563
+ return
564
+
565
+ # When the executor gets lost, the weakref callback will wake up
566
+ # the worker threads.
567
+ def weakref_cb(_, q=self._work_queue):
568
+ q.put(None)
569
+
570
+ num_threads = len(self._threads)
571
+ if num_threads < self._max_workers:
572
+ thread_name = "%s_%d" % (self._thread_name_prefix or self, num_threads)
573
+ t = threading.Thread(
574
+ name=thread_name,
575
+ target=_worker,
576
+ args=(
577
+ weakref.ref(self, weakref_cb),
578
+ self._work_queue,
579
+ self._initializer,
580
+ self._initargs,
581
+ self._timeout,
582
+ ),
583
+ )
584
+ t.daemon = True
585
+ t.start()
586
+ self._threads.add(t)
587
+ thread._threads_queues[t] = self._work_queue
588
+
589
+
590
+ def _copy_future_state(cf_fut: concurrent.futures.Future, fut: asyncio.Future):
591
+ """Internal helper to copy state from another Future.
592
+
593
+ The other Future may be a concurrent.futures.Future.
594
+ """
595
+ # check this again in case it was cancelled since the last check
596
+ if _fut_is_cancelled(fut):
597
+ return
598
+ exception = _get_cf_fut_exception(cf_fut)
599
+ if exception is None:
600
+ _set_fut_result(fut, _get_cf_fut_result(cf_fut))
601
+ else:
602
+ _set_fut_exception(fut, _convert_future_exc(exception))
603
+
604
+
605
+ _fut_is_cancelled = asyncio.Future.cancelled
606
+ _get_cf_fut_result = concurrent.futures.Future.result
607
+ _get_cf_fut_exception = concurrent.futures.Future.exception
608
+ _set_fut_result = asyncio.Future.set_result
609
+ _set_fut_exception = asyncio.Future.set_exception
610
+ _add_done_callback = concurrent.futures.Future.add_done_callback
611
+
612
+
613
+ executor = PruningThreadPoolExecutor(128)
614
+
615
+ __all__ = [
616
+ "AsyncThreadPoolExecutor",
617
+ "AsyncProcessPoolExecutor",
618
+ "PruningThreadPoolExecutor",
619
+ ]