omlish 0.0.0.dev136__py3-none-any.whl → 0.0.0.dev138__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. omlish/__about__.py +2 -2
  2. omlish/cached.py +2 -2
  3. omlish/collections/mappings.py +1 -1
  4. omlish/configs/flattening.py +1 -1
  5. omlish/diag/_pycharm/runhack.py +3 -0
  6. omlish/formats/json/stream/errors.py +2 -0
  7. omlish/formats/json/stream/lex.py +11 -5
  8. omlish/formats/json/stream/parse.py +37 -21
  9. omlish/funcs/genmachine.py +5 -4
  10. omlish/io/compress/__init__.py +0 -0
  11. omlish/io/compress/abc.py +104 -0
  12. omlish/io/compress/adapters.py +147 -0
  13. omlish/io/compress/bz2.py +42 -0
  14. omlish/io/compress/gzip.py +306 -0
  15. omlish/io/compress/lzma.py +32 -0
  16. omlish/io/compress/types.py +29 -0
  17. omlish/io/generators/__init__.py +0 -0
  18. omlish/io/generators/readers.py +183 -0
  19. omlish/io/generators/stepped.py +104 -0
  20. omlish/lang/__init__.py +11 -1
  21. omlish/lang/functions.py +0 -2
  22. omlish/lang/generators.py +243 -0
  23. omlish/lang/iterables.py +28 -51
  24. omlish/lang/maybes.py +4 -4
  25. {omlish-0.0.0.dev136.dist-info → omlish-0.0.0.dev138.dist-info}/METADATA +1 -1
  26. {omlish-0.0.0.dev136.dist-info → omlish-0.0.0.dev138.dist-info}/RECORD +34 -22
  27. /omlish/collections/{_abc.py → abc.py} +0 -0
  28. /omlish/io/{_abc.py → abc.py} +0 -0
  29. /omlish/logs/{_abc.py → abc.py} +0 -0
  30. /omlish/sql/{_abc.py → abc.py} +0 -0
  31. {omlish-0.0.0.dev136.dist-info → omlish-0.0.0.dev138.dist-info}/LICENSE +0 -0
  32. {omlish-0.0.0.dev136.dist-info → omlish-0.0.0.dev138.dist-info}/WHEEL +0 -0
  33. {omlish-0.0.0.dev136.dist-info → omlish-0.0.0.dev138.dist-info}/entry_points.txt +0 -0
  34. {omlish-0.0.0.dev136.dist-info → omlish-0.0.0.dev138.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,306 @@
1
+ # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
2
+ # --------------------------------------------
3
+ #
4
+ # 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization
5
+ # ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated
6
+ # documentation.
7
+ #
8
+ # 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive,
9
+ # royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative
10
+ # works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License
11
+ # Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
12
+ # 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights Reserved" are retained in Python
13
+ # alone or in any derivative version prepared by Licensee.
14
+ #
15
+ # 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and
16
+ # wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in
17
+ # any such work a brief summary of the changes made to Python.
18
+ #
19
+ # 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES,
20
+ # EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY
21
+ # OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY
22
+ # RIGHTS.
23
+ #
24
+ # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL
25
+ # DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF
26
+ # ADVISED OF THE POSSIBILITY THEREOF.
27
+ #
28
+ # 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
29
+ #
30
+ # 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint
31
+ # venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade
32
+ # name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
33
+ #
34
+ # 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this
35
+ # License Agreement.
36
+ import functools
37
+ import os.path
38
+ import struct
39
+ import time
40
+ import typing as ta
41
+
42
+ from ... import cached
43
+ from ... import check
44
+ from ... import lang
45
+ from ..generators.readers import PrependableBytesGeneratorReader
46
+ from .types import IncrementalCompressor
47
+ from .types import IncrementalDecompressor
48
+
49
+
50
+ if ta.TYPE_CHECKING:
51
+ import gzip
52
+ import zlib
53
+ else:
54
+ gzip = lang.proxy_import('gzip')
55
+ zlib = lang.proxy_import('zlib')
56
+
57
+
58
+ ##
59
+
60
+
61
+ COMPRESS_LEVEL_FAST = 1
62
+ COMPRESS_LEVEL_TRADEOFF = 6
63
+ COMPRESS_LEVEL_BEST = 9
64
+
65
+
66
+ @cached.function
67
+ def _zero_crc() -> int:
68
+ return zlib.crc32(b'')
69
+
70
+
71
+ ##
72
+
73
+
74
+ class IncrementalGzipCompressor:
75
+ def __init__(
76
+ self,
77
+ *,
78
+ compresslevel: int = COMPRESS_LEVEL_BEST,
79
+ name: str | bytes | None = None,
80
+ mtime: float | None = None,
81
+ ) -> None:
82
+ super().__init__()
83
+
84
+ self._name = name or ''
85
+ self._compresslevel = compresslevel
86
+ self._mtime = mtime
87
+
88
+ def _write_gzip_header(self) -> ta.Generator[bytes, None, None]:
89
+ check.none((yield b'\037\213')) # magic header
90
+ check.none((yield b'\010')) # compression method
91
+
92
+ try:
93
+ # RFC 1952 requires the FNAME field to be Latin-1. Do not include filenames that cannot be represented that
94
+ # way.
95
+ fname = os.path.basename(self._name)
96
+ if not isinstance(fname, bytes):
97
+ fname = fname.encode('latin-1')
98
+ if fname.endswith(b'.gz'):
99
+ fname = fname[:-3]
100
+ except UnicodeEncodeError:
101
+ fname = b''
102
+
103
+ flags = 0
104
+ if fname:
105
+ flags = gzip.FNAME
106
+ check.none((yield chr(flags).encode('latin-1')))
107
+
108
+ mtime = self._mtime
109
+ if mtime is None:
110
+ mtime = time.time()
111
+ check.none((yield struct.pack('<L', int(mtime))))
112
+
113
+ if self._compresslevel == COMPRESS_LEVEL_BEST:
114
+ xfl = b'\002'
115
+ elif self._compresslevel == COMPRESS_LEVEL_FAST:
116
+ xfl = b'\004'
117
+ else:
118
+ xfl = b'\000'
119
+ check.none((yield xfl))
120
+
121
+ check.none((yield b'\377'))
122
+
123
+ if fname:
124
+ check.none((yield fname + b'\000'))
125
+
126
+ @lang.autostart
127
+ def __call__(self) -> IncrementalCompressor:
128
+ crc = _zero_crc()
129
+ size = 0
130
+ offset = 0 # Current file offset for seek(), tell(), etc
131
+ wrote_header = False
132
+
133
+ compress = zlib.compressobj(
134
+ self._compresslevel,
135
+ zlib.DEFLATED,
136
+ -zlib.MAX_WBITS,
137
+ zlib.DEF_MEM_LEVEL,
138
+ 0,
139
+ )
140
+
141
+ while True:
142
+ data: ta.Any = check.isinstance((yield None), bytes)
143
+
144
+ if not wrote_header:
145
+ yield from self._write_gzip_header()
146
+ wrote_header = True
147
+
148
+ if not data:
149
+ break
150
+
151
+ # Called by our self._buffer underlying BufferedWriterDelegate.
152
+ if isinstance(data, (bytes, bytearray)):
153
+ length = len(data)
154
+ else:
155
+ # accept any data that supports the buffer protocol
156
+ data = memoryview(data)
157
+ length = data.nbytes
158
+
159
+ if length > 0:
160
+ if (fl := compress.compress(data)):
161
+ check.none((yield fl))
162
+ size += length
163
+ crc = zlib.crc32(data, crc)
164
+ offset += length
165
+
166
+ if (fl := compress.flush()):
167
+ check.none((yield fl))
168
+
169
+ yield struct.pack('<L', crc)
170
+ # size may exceed 2 GiB, or even 4 GiB
171
+ yield struct.pack('<L', size & 0xffffffff)
172
+
173
+ yield b''
174
+
175
+
176
+ ##
177
+
178
+
179
+ class IncrementalGzipDecompressor:
180
+ def __init__(self) -> None:
181
+ super().__init__()
182
+
183
+ self._factory = functools.partial(
184
+ zlib.decompressobj,
185
+ wbits=-zlib.MAX_WBITS,
186
+ )
187
+
188
+ def _read_gzip_header(
189
+ self,
190
+ rdr: PrependableBytesGeneratorReader,
191
+ ) -> ta.Generator[int | None, bytes, int | None]:
192
+ magic = yield from rdr.read(2)
193
+ if magic == b'':
194
+ return None
195
+
196
+ if magic != b'\037\213':
197
+ raise gzip.BadGzipFile(f'Not a gzipped file ({magic!r})')
198
+
199
+ buf = yield from rdr.read(8)
200
+ method, flag, last_mtime = struct.unpack('<BBIxx', buf)
201
+ if method != 8:
202
+ raise gzip.BadGzipFile('Unknown compression method')
203
+
204
+ if flag & gzip.FEXTRA:
205
+ # Read & discard the extra field, if present
206
+ buf = yield from rdr.read(2)
207
+ extra_len, = struct.unpack('<H', buf)
208
+ if extra_len:
209
+ yield from rdr.read(extra_len)
210
+
211
+ if flag & gzip.FNAME:
212
+ # Read and discard a null-terminated string containing the filename
213
+ while True:
214
+ s = yield from rdr.read(1)
215
+ if not s or s == b'\000':
216
+ break
217
+
218
+ if flag & gzip.FCOMMENT:
219
+ # Read and discard a null-terminated string containing a comment
220
+ while True:
221
+ s = yield from rdr.read(1)
222
+ if not s or s == b'\000':
223
+ break
224
+
225
+ if flag & gzip.FHCRC:
226
+ yield from rdr.read(2) # Read & discard the 16-bit header CRC
227
+
228
+ return last_mtime
229
+
230
+ def _read_eof(
231
+ self,
232
+ rdr: PrependableBytesGeneratorReader,
233
+ crc: int,
234
+ stream_size: int,
235
+ ) -> ta.Generator[int | None, bytes, None]:
236
+ # We've read to the end of the file.
237
+ # We check that the computed CRC and size of the uncompressed data matches the stored values. Note that the size
238
+ # stored is the true file size mod 2**32.
239
+ buf = yield from rdr.read(8)
240
+ crc32, isize = struct.unpack('<II', buf)
241
+ if crc32 != crc:
242
+ raise gzip.BadGzipFile(f'CRC check failed {hex(crc32)} != {hex(crc)}')
243
+ elif isize != (stream_size & 0xffffffff):
244
+ raise gzip.BadGzipFile('Incorrect length of data produced')
245
+
246
+ # Gzip files can be padded with zeroes and still have archives. Consume all zero bytes and set the file position
247
+ # to the first non-zero byte. See http://www.gzip.org/#faq8
248
+ c = b'\0'
249
+ while c == b'\0':
250
+ c = yield from rdr.read(1)
251
+ if c:
252
+ rdr.prepend(c)
253
+
254
+ def __call__(self) -> IncrementalDecompressor:
255
+ rdr = PrependableBytesGeneratorReader()
256
+
257
+ pos = 0 # Current offset in decompressed stream
258
+
259
+ crc = _zero_crc()
260
+ stream_size = 0 # Decompressed size of unconcatenated stream
261
+ new_member = True
262
+
263
+ decompressor = self._factory()
264
+
265
+ while True:
266
+ # For certain input data, a single call to decompress() may not return any data. In this case, retry until
267
+ # we get some data or reach EOF.
268
+ while True:
269
+ if decompressor.eof:
270
+ # Ending case: we've come to the end of a member in the file, so finish up this member, and read a
271
+ # new gzip header. Check the CRC and file size, and set the flag so we read a new member
272
+ yield from self._read_eof(rdr, crc, stream_size)
273
+ new_member = True
274
+ decompressor = self._factory()
275
+
276
+ if new_member:
277
+ # If the _new_member flag is set, we have to jump to the next member, if there is one.
278
+ crc = _zero_crc()
279
+ stream_size = 0 # Decompressed size of unconcatenated stream
280
+ last_mtime = yield from self._read_gzip_header(rdr)
281
+ if not last_mtime:
282
+ check.none((yield b''))
283
+ return
284
+ new_member = False
285
+
286
+ # Read a chunk of data from the file
287
+ if not decompressor.unconsumed_tail:
288
+ buf = yield from rdr.read(None)
289
+ uncompress = decompressor.decompress(buf)
290
+ else:
291
+ uncompress = decompressor.decompress(b'')
292
+
293
+ if decompressor.unused_data != b'':
294
+ # Prepend the already read bytes to the fileobj so they can be seen by _read_eof() and
295
+ # _read_gzip_header()
296
+ rdr.prepend(decompressor.unused_data)
297
+
298
+ if uncompress != b'':
299
+ break
300
+ if buf == b'': # noqa
301
+ raise EOFError('Compressed file ended before the end-of-stream marker was reached')
302
+
303
+ crc = zlib.crc32(uncompress, crc)
304
+ stream_size += len(uncompress)
305
+ pos += len(uncompress)
306
+ check.none((yield uncompress))
@@ -0,0 +1,32 @@
1
+ import typing as ta
2
+
3
+ from ... import lang
4
+ from .adapters import CompressorIncrementalAdapter
5
+ from .adapters import DecompressorIncrementalAdapter
6
+ from .types import IncrementalCompressor
7
+ from .types import IncrementalDecompressor
8
+
9
+
10
+ if ta.TYPE_CHECKING:
11
+ import lzma
12
+ else:
13
+ lzma = lang.proxy_import('lzma')
14
+
15
+
16
+ class IncrementalLzmaCompressor:
17
+ def __init__(self) -> None:
18
+ super().__init__()
19
+
20
+ @lang.autostart
21
+ def __call__(self) -> IncrementalCompressor:
22
+ return CompressorIncrementalAdapter(
23
+ lzma.LZMACompressor, # type: ignore
24
+ )()
25
+
26
+
27
+ class IncrementalLzmaDecompressor:
28
+ def __call__(self) -> IncrementalDecompressor:
29
+ return DecompressorIncrementalAdapter(
30
+ lzma.LZMADecompressor, # type: ignore
31
+ trailing_error=lzma.LZMAError,
32
+ )()
@@ -0,0 +1,29 @@
1
+ # ruff: noqa: UP007
2
+ import typing as ta
3
+
4
+
5
+ IncrementalCompressor: ta.TypeAlias = ta.Generator[
6
+ ta.Union[
7
+ bytes, # Compressed output
8
+ None, # Need input
9
+ ],
10
+ ta.Union[
11
+ bytes, # Input bytes
12
+ None, # Need output
13
+ ],
14
+ None,
15
+ ]
16
+
17
+
18
+ IncrementalDecompressor: ta.TypeAlias = ta.Generator[
19
+ ta.Union[
20
+ bytes, # Uncompressed output
21
+ int, # Need exactly n bytes
22
+ None, # Need any amount of bytes
23
+ ],
24
+ ta.Union[
25
+ bytes, # Input bytes
26
+ None, # Need output
27
+ ],
28
+ None,
29
+ ]
File without changes
@@ -0,0 +1,183 @@
1
+ """
2
+ TODO:
3
+ - BufferedBytesGeneratorReader
4
+ - docstrings
5
+ - memoryviews
6
+ """
7
+ import abc
8
+ import typing as ta
9
+
10
+ from ... import check
11
+
12
+
13
+ T = ta.TypeVar('T')
14
+ I = ta.TypeVar('I')
15
+ R = ta.TypeVar('R')
16
+ AnyT = ta.TypeVar('AnyT', bound=ta.Any)
17
+
18
+
19
+ ReaderGenerator: ta.TypeAlias = ta.Generator[int | None, I, R]
20
+ ExactReaderGenerator: ta.TypeAlias = ta.Generator[int, I, R]
21
+
22
+ BytesReaderGenerator: ta.TypeAlias = ReaderGenerator[bytes, R]
23
+ BytesExactReaderGenerator: ta.TypeAlias = ExactReaderGenerator[bytes, R]
24
+
25
+ StrReaderGenerator: ta.TypeAlias = ReaderGenerator[str, R]
26
+ StrExactReaderGenerator: ta.TypeAlias = ExactReaderGenerator[str, R]
27
+
28
+
29
+ ##
30
+
31
+
32
+ class _BytesJoiner:
33
+ def _join(self, lst: list[bytes]) -> bytes:
34
+ return b''.join(lst)
35
+
36
+
37
+ class _StrJoiner:
38
+ def _join(self, lst: list[str]) -> str:
39
+ return ''.join(lst)
40
+
41
+
42
+ ##
43
+
44
+
45
+ class GeneratorReader(abc.ABC, ta.Generic[T]):
46
+ @abc.abstractmethod
47
+ def read(self, sz: int | None) -> ta.Generator[int | None, T, T]:
48
+ raise NotImplementedError
49
+
50
+ def read_exact(self, sz: int) -> ta.Generator[int | None, T, T]:
51
+ d: ta.Any = yield from self.read(sz)
52
+ if len(d) != sz:
53
+ raise EOFError(f'GeneratorReader got {len(d)}, expected {sz}')
54
+ return d
55
+
56
+
57
+ ##
58
+
59
+
60
+ class PrependableGeneratorReader(GeneratorReader[AnyT]):
61
+ def __init__(self) -> None:
62
+ super().__init__()
63
+
64
+ self._queue: list[tuple[AnyT, int]] = []
65
+
66
+ @abc.abstractmethod
67
+ def _join(self, lst: list[AnyT]) -> AnyT:
68
+ raise NotImplementedError
69
+
70
+ def read(self, sz: int | None) -> ta.Generator[int | None, AnyT, AnyT]:
71
+ if not self._queue:
72
+ d: AnyT = check.not_none((yield sz))
73
+ return d
74
+
75
+ if sz is None:
76
+ return self._queue.pop(0)[0]
77
+
78
+ lst: list[AnyT] = []
79
+ rem = sz
80
+ while rem > 0 and self._queue:
81
+ c, p = self._queue[0]
82
+
83
+ if len(c) - p > rem:
84
+ lst.append(c[p:p + rem])
85
+ self._queue[0] = (c, p + rem)
86
+ return self._join(lst)
87
+
88
+ lst.append(c[p:])
89
+ rem -= len(c) - p
90
+ self._queue.pop(0)
91
+
92
+ if rem:
93
+ d = check.not_none((yield rem))
94
+ if d:
95
+ lst.append(d) # type: ignore[unreachable]
96
+
97
+ if len(lst) == 1:
98
+ return lst[0]
99
+ else:
100
+ return self._join(lst)
101
+
102
+ def prepend(self, d: AnyT, p: int | None = None) -> None:
103
+ if d:
104
+ self._queue.insert(0, (d, p or 0))
105
+
106
+
107
+ class PrependableBytesGeneratorReader(
108
+ _BytesJoiner,
109
+ PrependableGeneratorReader[bytes],
110
+ ):
111
+ pass
112
+
113
+
114
+ class PrependableStrGeneratorReader(
115
+ _StrJoiner,
116
+ PrependableGeneratorReader[str],
117
+ ):
118
+ pass
119
+
120
+
121
+ prependable_bytes_generator_reader = PrependableBytesGeneratorReader
122
+ prependable_str_generator_reader = PrependableStrGeneratorReader
123
+
124
+
125
+ ##
126
+
127
+
128
+ class BufferedGeneratorReader(PrependableGeneratorReader[AnyT], abc.ABC):
129
+ DEFAULT_BUFFER_SIZE = 4 * 0x1000
130
+
131
+ def __init__(
132
+ self,
133
+ buffer_size: int = DEFAULT_BUFFER_SIZE,
134
+ ) -> None:
135
+ check.arg(buffer_size > 0)
136
+
137
+ super().__init__()
138
+
139
+ self._buffer_size = buffer_size
140
+
141
+ def read(self, sz: int | None) -> ta.Generator[int | None, AnyT, AnyT]:
142
+ g = super().read(sz)
143
+ i: ta.Any = None
144
+ while True:
145
+ try:
146
+ q = g.send(i)
147
+ except StopIteration as e:
148
+ return e.value
149
+
150
+ check.state(not self._queue)
151
+
152
+ if q is None:
153
+ i = check.not_none((yield None))
154
+ continue
155
+
156
+ r = max(q, self._buffer_size)
157
+ d: AnyT = check.not_none((yield r))
158
+ if len(d) < q:
159
+ i = d
160
+ continue
161
+
162
+ i = d[:q]
163
+ self.prepend(d, q)
164
+
165
+
166
+ class BufferedBytesGeneratorReader(
167
+ _BytesJoiner,
168
+ BufferedGeneratorReader[bytes],
169
+ PrependableGeneratorReader[bytes],
170
+ ):
171
+ pass
172
+
173
+
174
+ class BufferedStrGeneratorReader(
175
+ _StrJoiner,
176
+ BufferedGeneratorReader[str],
177
+ PrependableGeneratorReader[str],
178
+ ):
179
+ pass
180
+
181
+
182
+ buffered_bytes_generator_reader = BufferedBytesGeneratorReader
183
+ buffered_str_generator_reader = BufferedStrGeneratorReader
@@ -0,0 +1,104 @@
1
+ import typing as ta
2
+
3
+ from ... import lang
4
+
5
+
6
+ T = ta.TypeVar('T')
7
+ I = ta.TypeVar('I')
8
+ O = ta.TypeVar('O')
9
+ OF = ta.TypeVar('OF')
10
+ OT = ta.TypeVar('OT')
11
+ R = ta.TypeVar('R')
12
+
13
+
14
+ SteppedGenerator: ta.TypeAlias = ta.Generator[O | None, I | None, R]
15
+
16
+
17
+ ##
18
+
19
+
20
+ @lang.autostart
21
+ def flatmap_stepped_generator(
22
+ fn: ta.Callable[[list[OF]], OT],
23
+ g: SteppedGenerator[OF, I, R],
24
+ *,
25
+ terminate: ta.Callable[[OF], bool] | None = None,
26
+ ) -> ta.Generator[OT, I, lang.Maybe[R]]:
27
+ """
28
+ Given a 'stepped generator' - a generator which accepts input items and yields zero or more non-None values in
29
+ response until it signals it's ready for the next input by yielding None - and a function taking a list, returns a
30
+ 1:1 generator which accepts input, builds a list of yielded generator output, calls the given function with that
31
+ list, and yields the result.
32
+
33
+ An optional terminate function may be provided which will cause this function to return early if it returns true for
34
+ an encountered yielded value. The encountered value causing termination will be included in the list sent to the
35
+ given fn.
36
+
37
+ Returns a Maybe of either the given generator's return value or empty if the terminator was encountered.
38
+ """
39
+
40
+ l: list[OF]
41
+ i: I | None = yield # type: ignore
42
+ while True:
43
+ l = []
44
+
45
+ while True:
46
+ try:
47
+ o = g.send(i)
48
+ except StopIteration as e:
49
+ if l:
50
+ yield fn(l)
51
+ return lang.just(e.value)
52
+
53
+ i = None
54
+
55
+ if o is None:
56
+ break
57
+
58
+ l.append(o)
59
+
60
+ if terminate is not None and terminate(o):
61
+ yield fn(l)
62
+ return lang.empty()
63
+
64
+ i = yield fn(l)
65
+
66
+
67
+ ##
68
+
69
+
70
+ def _join_bytes(l: ta.Sequence[bytes]) -> bytes:
71
+ if not l:
72
+ return b''
73
+ elif len(l) == 1:
74
+ return l[0]
75
+ else:
76
+ return b''.join(l)
77
+
78
+
79
+ def _join_str(l: ta.Sequence[str]) -> str:
80
+ if not l:
81
+ return ''
82
+ elif len(l) == 1:
83
+ return l[0]
84
+ else:
85
+ return ''.join(l)
86
+
87
+
88
+ def _is_empty(o: T) -> bool:
89
+ return len(o) < 1 # type: ignore
90
+
91
+
92
+ ##
93
+
94
+
95
+ def joined_bytes_stepped_generator(
96
+ g: ta.Generator[bytes | None, bytes | None, R],
97
+ ) -> ta.Generator[bytes, bytes, R]:
98
+ return flatmap_stepped_generator(_join_bytes, g, terminate=_is_empty)
99
+
100
+
101
+ def joined_str_stepped_generator(
102
+ g: ta.Generator[str | None, str | None, R],
103
+ ) -> ta.Generator[str, str, R]:
104
+ return flatmap_stepped_generator(_join_str, g, terminate=_is_empty)
omlish/lang/__init__.py CHANGED
@@ -120,6 +120,17 @@ from .functions import ( # noqa
120
120
  void,
121
121
  )
122
122
 
123
+ from .generators import ( # noqa
124
+ CoroutineGenerator,
125
+ Generator,
126
+ GeneratorLike,
127
+ GeneratorMappedIterator,
128
+ autostart,
129
+ corogen,
130
+ genmap,
131
+ nextgen,
132
+ )
133
+
123
134
  from .imports import ( # noqa
124
135
  can_import,
125
136
  import_all,
@@ -136,7 +147,6 @@ from .imports import ( # noqa
136
147
 
137
148
  from .iterables import ( # noqa
138
149
  BUILTIN_SCALAR_ITERABLE_TYPES,
139
- Generator,
140
150
  asrange,
141
151
  exhaust,
142
152
  flatmap,
omlish/lang/functions.py CHANGED
@@ -82,7 +82,6 @@ def identity(obj: T) -> T:
82
82
 
83
83
 
84
84
  class constant(ta.Generic[T]): # noqa
85
-
86
85
  def __init__(self, obj: T) -> None:
87
86
  super().__init__()
88
87
 
@@ -116,7 +115,6 @@ class VoidError(Exception):
116
115
 
117
116
 
118
117
  class Void:
119
-
120
118
  def __new__(cls, *args: ta.Any, **kwargs: ta.Any) -> None: # type: ignore # noqa
121
119
  raise VoidError
122
120