glow 0.15.2__tar.gz → 0.15.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {glow-0.15.2 → glow-0.15.3}/LICENSE +21 -21
- {glow-0.15.2 → glow-0.15.3}/PKG-INFO +1 -1
- {glow-0.15.2 → glow-0.15.3}/pyproject.toml +1 -1
- {glow-0.15.2 → glow-0.15.3}/src/glow/__init__.py +3 -1
- glow-0.15.3/src/glow/_async.py +336 -0
- glow-0.15.3/src/glow/_async.pyi +108 -0
- glow-0.15.3/src/glow/_cache.py +575 -0
- glow-0.15.3/src/glow/_cache.pyi +62 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_concurrency.py +72 -28
- {glow-0.15.2 → glow-0.15.3}/src/glow/_concurrency.pyi +10 -9
- glow-0.15.3/src/glow/_dev.py +16 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_import_hook.py +1 -2
- {glow-0.15.2 → glow-0.15.3}/src/glow/_logging.py +6 -2
- {glow-0.15.2 → glow-0.15.3}/src/glow/_parallel.py +32 -12
- {glow-0.15.2 → glow-0.15.3}/src/glow/_parallel.pyi +16 -38
- {glow-0.15.2 → glow-0.15.3}/src/glow/_profile.py +4 -3
- {glow-0.15.2 → glow-0.15.3}/src/glow/_reusable.py +5 -8
- glow-0.15.3/src/glow/_types.py +52 -0
- glow-0.15.3/test/test_batch.py +159 -0
- glow-0.15.2/src/glow/_async.py +0 -173
- glow-0.15.2/src/glow/_async.pyi +0 -105
- glow-0.15.2/src/glow/_cache.py +0 -553
- glow-0.15.2/src/glow/_cache.pyi +0 -78
- glow-0.15.2/test/test_batch.py +0 -69
- {glow-0.15.2 → glow-0.15.3}/.gitignore +0 -0
- {glow-0.15.2 → glow-0.15.3}/README.md +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_array.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_coro.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_debug.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_ic.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_imutil.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_keys.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_more.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_patch_len.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_patch_print.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_patch_scipy.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_profile.pyi +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_reduction.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_repr.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_sizeof.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_streams.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_thread_quota.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_uuid.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/_wrap.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/api/__init__.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/api/config.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/api/exporting.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/cli.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/cli.pyi +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/io/__init__.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/io/_sound.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/io/_svg.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/src/glow/py.typed +0 -0
- {glow-0.15.2 → glow-0.15.3}/test/__init__.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/test/test_api.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/test/test_buffered.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/test/test_cli.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/test/test_iter.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/test/test_shm.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/test/test_thread_pool.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/test/test_timed.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/test/test_timer.py +0 -0
- {glow-0.15.2 → glow-0.15.3}/test/test_uuid.py +0 -0
|
@@ -1,21 +1,21 @@
|
|
|
1
|
-
MIT License
|
|
2
|
-
|
|
3
|
-
Copyright (c) 2019 Paul Maevskikh
|
|
4
|
-
|
|
5
|
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
-
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
-
in the Software without restriction, including without limitation the rights
|
|
8
|
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
-
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
-
furnished to do so, subject to the following conditions:
|
|
11
|
-
|
|
12
|
-
The above copyright notice and this permission notice shall be included in all
|
|
13
|
-
copies or substantial portions of the Software.
|
|
14
|
-
|
|
15
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
-
SOFTWARE.
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2019 Paul Maevskikh
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -6,7 +6,7 @@ from typing import TYPE_CHECKING
|
|
|
6
6
|
|
|
7
7
|
from . import _patch_len, _patch_print, _patch_scipy
|
|
8
8
|
from ._array import aceil, afloor, apack, around, pascal
|
|
9
|
-
from ._async import amap, astarmap, azip
|
|
9
|
+
from ._async import amap, amap_dict, astarmap, astreaming, azip
|
|
10
10
|
from ._cache import cache_status, memoize
|
|
11
11
|
from ._concurrency import (
|
|
12
12
|
call_once,
|
|
@@ -73,11 +73,13 @@ __all__ = [
|
|
|
73
73
|
'aceil',
|
|
74
74
|
'afloor',
|
|
75
75
|
'amap',
|
|
76
|
+
'amap_dict',
|
|
76
77
|
'apack',
|
|
77
78
|
'around',
|
|
78
79
|
'as_actor',
|
|
79
80
|
'as_iter',
|
|
80
81
|
'astarmap',
|
|
82
|
+
'astreaming',
|
|
81
83
|
'azip',
|
|
82
84
|
'buffered',
|
|
83
85
|
'cache_status',
|
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
__all__ = ['amap', 'amap_dict', 'astarmap', 'azip']
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from asyncio import Queue, Task
|
|
5
|
+
from collections import deque
|
|
6
|
+
from collections.abc import (
|
|
7
|
+
AsyncIterator,
|
|
8
|
+
Callable,
|
|
9
|
+
Collection,
|
|
10
|
+
Iterable,
|
|
11
|
+
Iterator,
|
|
12
|
+
Mapping,
|
|
13
|
+
Sequence,
|
|
14
|
+
)
|
|
15
|
+
from contextlib import suppress
|
|
16
|
+
from functools import partial
|
|
17
|
+
from typing import TypeGuard, cast, overload
|
|
18
|
+
|
|
19
|
+
from ._dev import declutter_tb
|
|
20
|
+
from ._types import (
|
|
21
|
+
ABatchDecorator,
|
|
22
|
+
ABatchFn,
|
|
23
|
+
AnyFuture,
|
|
24
|
+
AnyIterable,
|
|
25
|
+
AnyIterator,
|
|
26
|
+
Coro,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
type _Job[T, R] = tuple[T, AnyFuture[R]]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
async def amap_dict[K, T1, T2](
|
|
33
|
+
func: Callable[[T1], Coro[T2]],
|
|
34
|
+
obj: Mapping[K, T1],
|
|
35
|
+
/,
|
|
36
|
+
*,
|
|
37
|
+
limit: int,
|
|
38
|
+
) -> dict[K, T2]:
|
|
39
|
+
"""Asynchronously apply `func` to each value in a mapping.
|
|
40
|
+
|
|
41
|
+
For extra options, see astarmap, which is used under hood.
|
|
42
|
+
"""
|
|
43
|
+
aiter_values = amap(func, obj.values(), limit=limit)
|
|
44
|
+
values = [v async for v in aiter_values]
|
|
45
|
+
return dict(zip(obj.keys(), values, strict=True))
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def amap[R](
|
|
49
|
+
func: Callable[..., Coro[R]],
|
|
50
|
+
/,
|
|
51
|
+
*iterables: AnyIterable,
|
|
52
|
+
limit: int,
|
|
53
|
+
unordered: bool = False,
|
|
54
|
+
) -> AsyncIterator[R]:
|
|
55
|
+
"""Async version of map(func, *iterables).
|
|
56
|
+
|
|
57
|
+
Make an iterator that computes the function using arguments from
|
|
58
|
+
each of the iterables. Stops when the shortest iterable is exhausted.
|
|
59
|
+
|
|
60
|
+
For extra options, see `astarmap`.
|
|
61
|
+
"""
|
|
62
|
+
it = zip(*iterables) if _all_sync_iters(iterables) else azip(*iterables)
|
|
63
|
+
return astarmap(func, it, limit=limit, unordered=unordered)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
async def astarmap[*Ts, R](
|
|
67
|
+
func: Callable[[*Ts], Coro[R]],
|
|
68
|
+
iterable: AnyIterable[tuple[*Ts]],
|
|
69
|
+
/,
|
|
70
|
+
*,
|
|
71
|
+
limit: int,
|
|
72
|
+
unordered: bool = False,
|
|
73
|
+
) -> AsyncIterator[R]:
|
|
74
|
+
"""Async version of itertools.starmap(fn, iterable).
|
|
75
|
+
|
|
76
|
+
Return an iterator whose values are returned from the function evaluated
|
|
77
|
+
with an argument tuple taken from the given sequence.
|
|
78
|
+
|
|
79
|
+
Options:
|
|
80
|
+
- limit - Maximum number of simultaneously running tasks.
|
|
81
|
+
- unordered - Set to get yield results as soon as they are ready.
|
|
82
|
+
"""
|
|
83
|
+
assert callable(func)
|
|
84
|
+
|
|
85
|
+
# optimization: Plain loop if concurrency is unnecessary
|
|
86
|
+
if limit <= 1:
|
|
87
|
+
if isinstance(iterable, Iterable):
|
|
88
|
+
for args in iterable:
|
|
89
|
+
yield await func(*args)
|
|
90
|
+
else:
|
|
91
|
+
async for args in iterable:
|
|
92
|
+
yield await func(*args)
|
|
93
|
+
return
|
|
94
|
+
|
|
95
|
+
async with asyncio.TaskGroup() as tg:
|
|
96
|
+
ts = (
|
|
97
|
+
(tg.create_task(func(*args)) for args in iterable)
|
|
98
|
+
if isinstance(iterable, Iterable)
|
|
99
|
+
else (tg.create_task(func(*args)) async for args in iterable)
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
it = (
|
|
103
|
+
_iter_results_unordered(ts, limit=limit)
|
|
104
|
+
if unordered
|
|
105
|
+
else _iter_results(ts, limit=limit)
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
async for x in it:
|
|
109
|
+
yield x
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
async def _iter_results_unordered[T](
|
|
113
|
+
ts: AnyIterator[Task[T]], limit: int
|
|
114
|
+
) -> AsyncIterator[T]:
|
|
115
|
+
"""Fetch and run async tasks.
|
|
116
|
+
|
|
117
|
+
Runs exactly `limit` tasks simultaneously (less in the end of iteration).
|
|
118
|
+
Order of results is arbitrary.
|
|
119
|
+
"""
|
|
120
|
+
todo = set[Task[T]]()
|
|
121
|
+
done = Queue[Task[T]]()
|
|
122
|
+
|
|
123
|
+
def _todo_to_done(t: Task[T]) -> None:
|
|
124
|
+
todo.discard(t)
|
|
125
|
+
done.put_nowait(t)
|
|
126
|
+
|
|
127
|
+
while True:
|
|
128
|
+
# Prefill task buffer
|
|
129
|
+
while len(todo) + done.qsize() < limit and (
|
|
130
|
+
t := (
|
|
131
|
+
next(ts, None)
|
|
132
|
+
if isinstance(ts, Iterator)
|
|
133
|
+
else await anext(ts, None)
|
|
134
|
+
)
|
|
135
|
+
):
|
|
136
|
+
# optimization: Immediately put to done if the task is
|
|
137
|
+
# already done (e.g. if the coro was able to complete eagerly),
|
|
138
|
+
# and skip scheduling a done callback
|
|
139
|
+
if t.done():
|
|
140
|
+
done.put_nowait(t)
|
|
141
|
+
else:
|
|
142
|
+
todo.add(t)
|
|
143
|
+
t.add_done_callback(_todo_to_done)
|
|
144
|
+
|
|
145
|
+
# No more tasks to do and nothing more to schedule
|
|
146
|
+
if not todo and done.empty():
|
|
147
|
+
return
|
|
148
|
+
|
|
149
|
+
# Wait till any task succeed
|
|
150
|
+
yield (await done.get()).result()
|
|
151
|
+
|
|
152
|
+
# Pop tasks happened to also be DONE (after line above)
|
|
153
|
+
while not done.empty():
|
|
154
|
+
yield done.get_nowait().result()
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
async def _iter_results[T](
|
|
158
|
+
ts: AnyIterator[Task[T]], limit: int
|
|
159
|
+
) -> AsyncIterator[T]:
|
|
160
|
+
"""Fetch and run async tasks.
|
|
161
|
+
|
|
162
|
+
Runs up to `limit` tasks simultaneously (less in the end of iteration).
|
|
163
|
+
Order of results is preserved.
|
|
164
|
+
"""
|
|
165
|
+
todo = deque[Task[T]]()
|
|
166
|
+
while True:
|
|
167
|
+
# Prefill task buffer
|
|
168
|
+
while len(todo) < limit and (
|
|
169
|
+
t := (
|
|
170
|
+
next(ts, None)
|
|
171
|
+
if isinstance(ts, Iterator)
|
|
172
|
+
else await anext(ts, None)
|
|
173
|
+
)
|
|
174
|
+
):
|
|
175
|
+
todo.append(t)
|
|
176
|
+
if not todo: # No more tasks to do and nothing more to schedule
|
|
177
|
+
return
|
|
178
|
+
|
|
179
|
+
# Forcefully block first task, while it's awaited,
|
|
180
|
+
# others in `todo` are also running, because they are `asyncio.Task`.
|
|
181
|
+
# So after this some of tasks from `todo` are also done.
|
|
182
|
+
yield await todo.popleft()
|
|
183
|
+
|
|
184
|
+
# Pop tasks happened to also be DONE (after line above)
|
|
185
|
+
while todo and todo[0].done():
|
|
186
|
+
yield todo.popleft().result()
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
async def azip(*iterables: AnyIterable) -> AsyncIterator[tuple]:
|
|
190
|
+
if _all_sync_iters(iterables):
|
|
191
|
+
for x in zip(*iterables):
|
|
192
|
+
yield x
|
|
193
|
+
return
|
|
194
|
+
|
|
195
|
+
aiters = (
|
|
196
|
+
_wrapgen(it) if isinstance(it, Iterable) else aiter(it)
|
|
197
|
+
for it in iterables
|
|
198
|
+
)
|
|
199
|
+
while True:
|
|
200
|
+
try:
|
|
201
|
+
ret = await asyncio.gather(*(anext(ait) for ait in aiters))
|
|
202
|
+
except StopAsyncIteration:
|
|
203
|
+
return
|
|
204
|
+
else:
|
|
205
|
+
yield tuple(ret)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def _all_sync_iters(
|
|
209
|
+
iterables: Collection[AnyIterable],
|
|
210
|
+
) -> TypeGuard[Collection[Iterable]]:
|
|
211
|
+
return all(isinstance(it, Iterable) for it in iterables)
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
async def _wrapgen[T](it: Iterable[T]) -> AsyncIterator[T]:
|
|
215
|
+
for x in it:
|
|
216
|
+
yield x
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
@overload
|
|
220
|
+
def astreaming(
|
|
221
|
+
*, batch_size: int | None = ..., timeout: float = ...
|
|
222
|
+
) -> ABatchDecorator: ...
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
@overload
|
|
226
|
+
def astreaming[T, R](
|
|
227
|
+
fn: ABatchFn[T, R],
|
|
228
|
+
/,
|
|
229
|
+
*,
|
|
230
|
+
batch_size: int | None = ...,
|
|
231
|
+
timeout: float = ...,
|
|
232
|
+
) -> ABatchFn[T, R]: ...
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def astreaming[T, R](
|
|
236
|
+
fn: ABatchFn[T, R] | None = None,
|
|
237
|
+
/,
|
|
238
|
+
*,
|
|
239
|
+
batch_size: int | None = None,
|
|
240
|
+
timeout: float = 0.1,
|
|
241
|
+
) -> ABatchFn[T, R] | ABatchDecorator:
|
|
242
|
+
"""Compute on `timeout` or if batch is collected.
|
|
243
|
+
|
|
244
|
+
`timeout` (in seconds) is a time to wait till the batch is full,
|
|
245
|
+
i.e. latency.
|
|
246
|
+
|
|
247
|
+
Uses ideas from
|
|
248
|
+
- https://github.com/ShannonAI/service-streamer
|
|
249
|
+
- https://github.com/leon0707/batch_processor
|
|
250
|
+
- ray.serve.batch
|
|
251
|
+
https://github.com/ray-project/ray/blob/master/python/ray/serve/batching.py
|
|
252
|
+
|
|
253
|
+
Note: currently supports only functions and bound methods.
|
|
254
|
+
|
|
255
|
+
Implementation details:
|
|
256
|
+
- any caller enqueues jobs and starts waiting
|
|
257
|
+
"""
|
|
258
|
+
if fn is None:
|
|
259
|
+
deco = partial(astreaming, batch_size=batch_size, timeout=timeout)
|
|
260
|
+
return cast('ABatchDecorator', deco)
|
|
261
|
+
|
|
262
|
+
assert batch_size is None or batch_size >= 1
|
|
263
|
+
assert timeout > 0
|
|
264
|
+
|
|
265
|
+
buf: list[_Job[T, R]] = []
|
|
266
|
+
deadline = float('-inf')
|
|
267
|
+
not_last = asyncio.Event()
|
|
268
|
+
lock = asyncio.Lock()
|
|
269
|
+
ncalls = 0
|
|
270
|
+
|
|
271
|
+
async def wrapper(items: Sequence[T]) -> list[R]:
|
|
272
|
+
nonlocal ncalls, deadline
|
|
273
|
+
if not items:
|
|
274
|
+
return []
|
|
275
|
+
|
|
276
|
+
# There's another handling call with tail, wake it up
|
|
277
|
+
if not ncalls and buf:
|
|
278
|
+
not_last.set()
|
|
279
|
+
|
|
280
|
+
ncalls += 1
|
|
281
|
+
fs: list[asyncio.Future[R]] = []
|
|
282
|
+
try:
|
|
283
|
+
for x in items:
|
|
284
|
+
f = asyncio.Future[R]()
|
|
285
|
+
fs.append(f)
|
|
286
|
+
buf.append((x, f))
|
|
287
|
+
|
|
288
|
+
if len(buf) == 1: # Got first job, reset deadline
|
|
289
|
+
deadline = asyncio.get_running_loop().time() + timeout
|
|
290
|
+
|
|
291
|
+
# Full batch, dispatch
|
|
292
|
+
if batch_size is not None and len(buf) == batch_size:
|
|
293
|
+
batch, buf[:] = buf[:], []
|
|
294
|
+
async with lock:
|
|
295
|
+
await _adispatch(fn, *batch)
|
|
296
|
+
finally:
|
|
297
|
+
ncalls -= 1
|
|
298
|
+
|
|
299
|
+
if not ncalls and buf: # Was last call, wait for another
|
|
300
|
+
not_last.clear()
|
|
301
|
+
|
|
302
|
+
notified = False
|
|
303
|
+
with suppress(TimeoutError):
|
|
304
|
+
async with asyncio.timeout_at(deadline):
|
|
305
|
+
notified = await not_last.wait()
|
|
306
|
+
|
|
307
|
+
if not notified:
|
|
308
|
+
batch, buf[:] = buf[:], []
|
|
309
|
+
async with lock:
|
|
310
|
+
await _adispatch(fn, *batch)
|
|
311
|
+
|
|
312
|
+
return await asyncio.gather(*fs)
|
|
313
|
+
|
|
314
|
+
return wrapper
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
async def _adispatch[T, R](fn: ABatchFn[T, R], *xs: _Job[T, R]) -> None:
|
|
318
|
+
if not xs:
|
|
319
|
+
return
|
|
320
|
+
obj: list[R] | BaseException
|
|
321
|
+
try:
|
|
322
|
+
obj = list(await fn([x for x, _ in xs]))
|
|
323
|
+
if len(obj) != len(xs):
|
|
324
|
+
obj = RuntimeError(
|
|
325
|
+
f'Call with {len(xs)} arguments '
|
|
326
|
+
f'incorrectly returned {len(obj)} results'
|
|
327
|
+
)
|
|
328
|
+
except BaseException as exc: # noqa: BLE001
|
|
329
|
+
obj = declutter_tb(exc, fn.__code__)
|
|
330
|
+
|
|
331
|
+
if isinstance(obj, BaseException):
|
|
332
|
+
for _, f in xs:
|
|
333
|
+
f.set_exception(obj)
|
|
334
|
+
else:
|
|
335
|
+
for (_, f), res in zip(xs, obj):
|
|
336
|
+
f.set_result(res)
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from collections.abc import AsyncIterator, Callable, Mapping
|
|
2
|
+
from typing import Any, Required, TypedDict, Unpack, overload
|
|
3
|
+
|
|
4
|
+
from ._types import ABatchDecorator, ABatchFn, AnyIterable, Coro
|
|
5
|
+
|
|
6
|
+
class _AmapKwargs(TypedDict, total=False):
|
|
7
|
+
limit: Required[int]
|
|
8
|
+
unordered: bool
|
|
9
|
+
|
|
10
|
+
def astarmap[*Ts, R](
|
|
11
|
+
func: Callable[[*Ts], Coro[R]],
|
|
12
|
+
iterable: AnyIterable[tuple[*Ts]],
|
|
13
|
+
/,
|
|
14
|
+
**kwargs: Unpack[_AmapKwargs],
|
|
15
|
+
) -> AsyncIterator[R]: ...
|
|
16
|
+
@overload
|
|
17
|
+
def amap[T, R](
|
|
18
|
+
func: Callable[[T], Coro[R]],
|
|
19
|
+
iter1: AnyIterable[T],
|
|
20
|
+
/,
|
|
21
|
+
**kwargs: Unpack[_AmapKwargs],
|
|
22
|
+
) -> AsyncIterator[R]: ...
|
|
23
|
+
@overload
|
|
24
|
+
def amap[T, T2, R](
|
|
25
|
+
func: Callable[[T, T2], Coro[R]],
|
|
26
|
+
iter1: AnyIterable[T],
|
|
27
|
+
iter2: AnyIterable[T2],
|
|
28
|
+
/,
|
|
29
|
+
**kwargs: Unpack[_AmapKwargs],
|
|
30
|
+
) -> AsyncIterator[R]: ...
|
|
31
|
+
@overload
|
|
32
|
+
def amap[T, T2, T3, R](
|
|
33
|
+
func: Callable[[T, T2, T3], Coro[R]],
|
|
34
|
+
iter1: AnyIterable[T],
|
|
35
|
+
iter2: AnyIterable[T2],
|
|
36
|
+
iter3: AnyIterable[T3],
|
|
37
|
+
/,
|
|
38
|
+
**kwargs: Unpack[_AmapKwargs],
|
|
39
|
+
) -> AsyncIterator[R]: ...
|
|
40
|
+
@overload
|
|
41
|
+
def amap[T, T2, T3, T4, R](
|
|
42
|
+
func: Callable[[T, T2, T3, T4], Coro[R]],
|
|
43
|
+
iter1: AnyIterable[T],
|
|
44
|
+
iter2: AnyIterable[T2],
|
|
45
|
+
iter3: AnyIterable[T3],
|
|
46
|
+
iter4: AnyIterable[T4],
|
|
47
|
+
/,
|
|
48
|
+
**kwargs: Unpack[_AmapKwargs],
|
|
49
|
+
) -> AsyncIterator[R]: ...
|
|
50
|
+
@overload
|
|
51
|
+
def amap[R](
|
|
52
|
+
func: Callable[..., Coro[R]],
|
|
53
|
+
iter1: AnyIterable,
|
|
54
|
+
iter2: AnyIterable,
|
|
55
|
+
iter3: AnyIterable,
|
|
56
|
+
iter4: AnyIterable,
|
|
57
|
+
iter5: AnyIterable,
|
|
58
|
+
/,
|
|
59
|
+
*iters: AnyIterable,
|
|
60
|
+
**kwargs: Unpack[_AmapKwargs],
|
|
61
|
+
) -> AsyncIterator[R]: ...
|
|
62
|
+
async def amap_dict[K, T1, T2](
|
|
63
|
+
func: Callable[[T1], Coro[T2]], obj: Mapping[K, T1], /, *, limit: int
|
|
64
|
+
) -> dict[K, T2]: ...
|
|
65
|
+
@overload
|
|
66
|
+
def azip() -> AsyncIterator[Any]: ...
|
|
67
|
+
@overload
|
|
68
|
+
def azip[T](
|
|
69
|
+
iter1: AnyIterable[T], /
|
|
70
|
+
) -> AsyncIterator[tuple[T]]: ... # noqa: RUF100,Y090
|
|
71
|
+
@overload
|
|
72
|
+
def azip[T, T2](
|
|
73
|
+
iter1: AnyIterable[T], iter2: AnyIterable[T2], /
|
|
74
|
+
) -> AsyncIterator[tuple[T, T2]]: ...
|
|
75
|
+
@overload
|
|
76
|
+
def azip[T, T2, T3](
|
|
77
|
+
iter1: AnyIterable[T], iter2: AnyIterable[T2], iter3: AnyIterable[T3], /
|
|
78
|
+
) -> AsyncIterator[tuple[T, T2, T3]]: ...
|
|
79
|
+
@overload
|
|
80
|
+
def azip[T, T2, T3, T4](
|
|
81
|
+
iter1: AnyIterable[T],
|
|
82
|
+
iter2: AnyIterable[T2],
|
|
83
|
+
iter3: AnyIterable[T3],
|
|
84
|
+
iter4: AnyIterable[T4],
|
|
85
|
+
/,
|
|
86
|
+
) -> AsyncIterator[tuple[T, T2, T3, T4]]: ...
|
|
87
|
+
@overload
|
|
88
|
+
def azip(
|
|
89
|
+
iter1: AnyIterable,
|
|
90
|
+
iter2: AnyIterable,
|
|
91
|
+
iter3: AnyIterable,
|
|
92
|
+
iter4: AnyIterable,
|
|
93
|
+
iter5: AnyIterable,
|
|
94
|
+
/,
|
|
95
|
+
*iters: AnyIterable,
|
|
96
|
+
) -> AsyncIterator[tuple]: ...
|
|
97
|
+
@overload
|
|
98
|
+
def astreaming(
|
|
99
|
+
*, batch_size: int | None = ..., timeout: float = ...
|
|
100
|
+
) -> ABatchDecorator: ...
|
|
101
|
+
@overload
|
|
102
|
+
def astreaming[T, R](
|
|
103
|
+
fn: ABatchFn[T, R],
|
|
104
|
+
/,
|
|
105
|
+
*,
|
|
106
|
+
batch_size: int | None = ...,
|
|
107
|
+
timeout: float = ...,
|
|
108
|
+
) -> ABatchFn[T, R]: ...
|