omdev 0.0.0.dev162__py3-none-any.whl → 0.0.0.dev164__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of omdev might be problematic. Click here for more details.
- omdev/amalg/__main__.py +1 -1
- omdev/amalg/gen.py +212 -0
- omdev/amalg/imports.py +98 -0
- omdev/amalg/main.py +154 -0
- omdev/amalg/manifests.py +49 -0
- omdev/amalg/resources.py +99 -0
- omdev/amalg/srcfiles.py +120 -0
- omdev/amalg/strip.py +70 -0
- omdev/amalg/types.py +6 -0
- omdev/amalg/typing.py +106 -0
- omdev/manifests/build.py +2 -3
- omdev/precheck/manifests.py +2 -3
- {omdev-0.0.0.dev162.dist-info → omdev-0.0.0.dev164.dist-info}/METADATA +2 -2
- {omdev-0.0.0.dev162.dist-info → omdev-0.0.0.dev164.dist-info}/RECORD +18 -10
- omdev/amalg/amalg.py +0 -782
- {omdev-0.0.0.dev162.dist-info → omdev-0.0.0.dev164.dist-info}/LICENSE +0 -0
- {omdev-0.0.0.dev162.dist-info → omdev-0.0.0.dev164.dist-info}/WHEEL +0 -0
- {omdev-0.0.0.dev162.dist-info → omdev-0.0.0.dev164.dist-info}/entry_points.txt +0 -0
- {omdev-0.0.0.dev162.dist-info → omdev-0.0.0.dev164.dist-info}/top_level.txt +0 -0
omdev/amalg/amalg.py
DELETED
|
@@ -1,782 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Conventions:
|
|
3
|
-
- must import whole global modules, if aliased must all match
|
|
4
|
-
- must import 'from' items for local modules
|
|
5
|
-
|
|
6
|
-
TODO:
|
|
7
|
-
- !! check only importing lite code
|
|
8
|
-
- !! strip manifests? or relegate them to a separate tiny module ala __main__.py?
|
|
9
|
-
- # @omlish-no-amalg ? in cli.types? will strip stmt (more than 1 line) following @manifest, so shouldn't import
|
|
10
|
-
- more sanity checks lol
|
|
11
|
-
- typealias - support # noqa, other comments, and lamely support multiline by just stealing lines till it parses
|
|
12
|
-
- remove `if __name__ == '__main__':` blocks - thus, convention: no def _main() for these
|
|
13
|
-
|
|
14
|
-
See:
|
|
15
|
-
- https://github.com/xonsh/amalgamate - mine is for portability not speed, and doesn't try to work on unmodified code
|
|
16
|
-
|
|
17
|
-
Targets:
|
|
18
|
-
- interp
|
|
19
|
-
- pyproject
|
|
20
|
-
- precheck
|
|
21
|
-
- build
|
|
22
|
-
- pyremote
|
|
23
|
-
- bootstrap
|
|
24
|
-
- deploy
|
|
25
|
-
- supervisor?
|
|
26
|
-
"""
|
|
27
|
-
import argparse
|
|
28
|
-
import ast
|
|
29
|
-
import base64
|
|
30
|
-
import dataclasses as dc
|
|
31
|
-
import io
|
|
32
|
-
import itertools
|
|
33
|
-
import logging
|
|
34
|
-
import os.path
|
|
35
|
-
import re
|
|
36
|
-
import textwrap
|
|
37
|
-
import typing as ta
|
|
38
|
-
|
|
39
|
-
import tokenize_rt as trt
|
|
40
|
-
|
|
41
|
-
from omlish import check
|
|
42
|
-
from omlish import collections as col
|
|
43
|
-
from omlish import lang
|
|
44
|
-
from omlish.lite.runtime import LITE_REQUIRED_PYTHON_VERSION
|
|
45
|
-
from omlish.logs import all as logs
|
|
46
|
-
|
|
47
|
-
from .. import magic
|
|
48
|
-
from .. import tokens as tks
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
Tokens: ta.TypeAlias = tks.Tokens
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
log = logging.getLogger(__name__)
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
##
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
HEADER_NAMES = (*tks.WS_NAMES, 'COMMENT', 'STRING')
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
def split_header_lines(lines: ta.Iterable[Tokens]) -> tuple[list[Tokens], list[Tokens]]:
|
|
64
|
-
ws = []
|
|
65
|
-
nws = []
|
|
66
|
-
for line in (it := iter(lines)):
|
|
67
|
-
if line[0].name in HEADER_NAMES:
|
|
68
|
-
ws.append(line)
|
|
69
|
-
else:
|
|
70
|
-
nws.append(line)
|
|
71
|
-
nws.extend(it)
|
|
72
|
-
break
|
|
73
|
-
return ws, nws
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
#
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
IF_MAIN_PAT = re.compile(r'if\s+__name__\s+==\s+[\'"]__main__[\'"]\s*:')
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
def strip_main_lines(cls: ta.Sequence[Tokens]) -> list[Tokens]:
|
|
83
|
-
out = []
|
|
84
|
-
|
|
85
|
-
for l in (it := iter(cls)):
|
|
86
|
-
if IF_MAIN_PAT.fullmatch(tks.join_toks(l).strip()):
|
|
87
|
-
for l in it:
|
|
88
|
-
if l[0].name not in ('INDENT', 'UNIMPORTANT_WS') and tks.join_toks(l).strip():
|
|
89
|
-
break
|
|
90
|
-
else:
|
|
91
|
-
out.append(l)
|
|
92
|
-
|
|
93
|
-
return out
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
#
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
STRIPPED_HEADER_MAGICS = [
|
|
100
|
-
'@omlish-lite',
|
|
101
|
-
'@omlish-script',
|
|
102
|
-
]
|
|
103
|
-
|
|
104
|
-
STRIPPED_HEADER_PAT = magic.compile_magic_style_pat(
|
|
105
|
-
magic.PY_MAGIC_STYLE,
|
|
106
|
-
keys=STRIPPED_HEADER_MAGICS,
|
|
107
|
-
)
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
def strip_header_lines(hls: ta.Sequence[Tokens]) -> list[Tokens]:
|
|
111
|
-
if hls and tks.join_toks(hls[0]).startswith('#!'):
|
|
112
|
-
hls = hls[1:]
|
|
113
|
-
out = []
|
|
114
|
-
for l in hls:
|
|
115
|
-
ls = tks.join_toks(l)
|
|
116
|
-
if not STRIPPED_HEADER_PAT.fullmatch(ls):
|
|
117
|
-
out.append(l)
|
|
118
|
-
return out
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
##
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
@dc.dataclass(frozen=True, kw_only=True)
|
|
125
|
-
class Import:
|
|
126
|
-
mod: str
|
|
127
|
-
item: str | None
|
|
128
|
-
as_: str | None
|
|
129
|
-
|
|
130
|
-
src_path: str
|
|
131
|
-
line: int
|
|
132
|
-
|
|
133
|
-
mod_path: str | None
|
|
134
|
-
|
|
135
|
-
toks: Tokens = dc.field(repr=False)
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
def make_import(
|
|
139
|
-
lts: Tokens,
|
|
140
|
-
*,
|
|
141
|
-
src_path: str,
|
|
142
|
-
mounts: ta.Mapping[str, str],
|
|
143
|
-
) -> Import | None:
|
|
144
|
-
if not lts:
|
|
145
|
-
return None
|
|
146
|
-
ft = lts[0]
|
|
147
|
-
|
|
148
|
-
if ft.name != 'NAME' or ft.src not in ('import', 'from'):
|
|
149
|
-
return None
|
|
150
|
-
|
|
151
|
-
ml = []
|
|
152
|
-
il: list[str] | None = None
|
|
153
|
-
as_ = None
|
|
154
|
-
for tok in (it := iter(tks.ignore_ws(lts[1:]))):
|
|
155
|
-
if tok.name in ('NAME', 'OP'):
|
|
156
|
-
if tok.src == 'as':
|
|
157
|
-
check.none(as_)
|
|
158
|
-
nt = next(it)
|
|
159
|
-
check.equal(nt.name, 'NAME')
|
|
160
|
-
as_ = nt.src
|
|
161
|
-
elif tok.src == 'import':
|
|
162
|
-
check.equal(ft.src, 'from')
|
|
163
|
-
il = []
|
|
164
|
-
elif il is not None:
|
|
165
|
-
il.append(tok.src)
|
|
166
|
-
else:
|
|
167
|
-
ml.append(tok.src)
|
|
168
|
-
else:
|
|
169
|
-
raise Exception(tok)
|
|
170
|
-
|
|
171
|
-
mod = ''.join(ml)
|
|
172
|
-
item = ''.join(il) if il is not None else None
|
|
173
|
-
|
|
174
|
-
if (mnt := mounts.get(mod.partition('.')[0])) is not None:
|
|
175
|
-
ps = mod.split('.')
|
|
176
|
-
mod_path = os.path.abspath(os.path.join(
|
|
177
|
-
mnt,
|
|
178
|
-
*ps[1:-1],
|
|
179
|
-
ps[-1] + '.py',
|
|
180
|
-
))
|
|
181
|
-
|
|
182
|
-
elif not mod.startswith('.'):
|
|
183
|
-
mod_path = None
|
|
184
|
-
|
|
185
|
-
else:
|
|
186
|
-
parts = mod.split('.')
|
|
187
|
-
nd = len(parts) - parts[::-1].index('')
|
|
188
|
-
mod_path = os.path.abspath(os.path.join(
|
|
189
|
-
os.path.dirname(src_path),
|
|
190
|
-
'../' * (nd - 1),
|
|
191
|
-
*parts[nd:-1],
|
|
192
|
-
parts[-1] + '.py',
|
|
193
|
-
))
|
|
194
|
-
|
|
195
|
-
mod = check.isinstance(mod_path, str)
|
|
196
|
-
|
|
197
|
-
return Import(
|
|
198
|
-
mod=mod,
|
|
199
|
-
item=item,
|
|
200
|
-
as_=as_,
|
|
201
|
-
|
|
202
|
-
src_path=src_path,
|
|
203
|
-
line=ft.line,
|
|
204
|
-
|
|
205
|
-
mod_path=mod_path,
|
|
206
|
-
|
|
207
|
-
toks=lts,
|
|
208
|
-
)
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
##
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
TYPE_ALIAS_COMMENT = '# ta.TypeAlias'
|
|
215
|
-
NOQA_TYPE_ALIAS_COMMENT = TYPE_ALIAS_COMMENT + ' # noqa'
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
@dc.dataclass(frozen=True, kw_only=True)
|
|
219
|
-
class Typing:
|
|
220
|
-
src: str
|
|
221
|
-
|
|
222
|
-
src_path: str
|
|
223
|
-
line: int
|
|
224
|
-
|
|
225
|
-
toks: Tokens = dc.field(repr=False)
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
def _is_typing(
|
|
229
|
-
lts: Tokens,
|
|
230
|
-
*,
|
|
231
|
-
exclude_newtypes: bool = False,
|
|
232
|
-
) -> bool:
|
|
233
|
-
es = tks.join_toks(lts).strip()
|
|
234
|
-
if any(es.endswith(sfx) for sfx in (TYPE_ALIAS_COMMENT, NOQA_TYPE_ALIAS_COMMENT)):
|
|
235
|
-
return True
|
|
236
|
-
|
|
237
|
-
wts = list(tks.ignore_ws(lts, keep=['INDENT', 'UNINDENT']))
|
|
238
|
-
if not tks.match_toks(wts, [
|
|
239
|
-
('NAME', None),
|
|
240
|
-
('OP', '='),
|
|
241
|
-
('NAME', 'ta'),
|
|
242
|
-
('OP', '.'),
|
|
243
|
-
(None, None),
|
|
244
|
-
]):
|
|
245
|
-
return False
|
|
246
|
-
|
|
247
|
-
if exclude_newtypes:
|
|
248
|
-
if wts[4].name == 'NAME' and wts[4].src == 'NewType':
|
|
249
|
-
return False
|
|
250
|
-
|
|
251
|
-
return True
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
def make_typing(
|
|
255
|
-
lts: Tokens,
|
|
256
|
-
*,
|
|
257
|
-
src_path: str,
|
|
258
|
-
) -> Typing | None:
|
|
259
|
-
if not lts or lts[0].name == 'UNIMPORTANT_WS':
|
|
260
|
-
return None
|
|
261
|
-
|
|
262
|
-
if not _is_typing(lts, exclude_newtypes=True):
|
|
263
|
-
return None
|
|
264
|
-
|
|
265
|
-
ft = next(iter(tks.ignore_ws(lts)))
|
|
266
|
-
return Typing(
|
|
267
|
-
src=tks.join_toks(lts),
|
|
268
|
-
|
|
269
|
-
src_path=src_path,
|
|
270
|
-
line=ft.line,
|
|
271
|
-
|
|
272
|
-
toks=lts,
|
|
273
|
-
)
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
##
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
def is_root_level_if_type_checking_block(lts: Tokens) -> bool:
|
|
280
|
-
return tks.match_toks(tks.ignore_ws(lts, keep=['INDENT']), [
|
|
281
|
-
('NAME', 'if'),
|
|
282
|
-
('NAME', 'ta'),
|
|
283
|
-
('OP', '.'),
|
|
284
|
-
('NAME', 'TYPE_CHECKING'),
|
|
285
|
-
('OP', ':'),
|
|
286
|
-
])
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
##
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
class RootLevelResourcesRead(ta.NamedTuple):
|
|
293
|
-
variable: str
|
|
294
|
-
kind: ta.Literal['binary', 'text']
|
|
295
|
-
resource: str
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
def is_root_level_resources_read(lts: Tokens) -> RootLevelResourcesRead | None:
|
|
299
|
-
wts = list(tks.ignore_ws(lts, keep=['INDENT']))
|
|
300
|
-
|
|
301
|
-
if not tks.match_toks(wts, [
|
|
302
|
-
('NAME', None),
|
|
303
|
-
('OP', '='),
|
|
304
|
-
('NAME', ('read_package_resource_binary', 'read_package_resource_text')),
|
|
305
|
-
('OP', '('),
|
|
306
|
-
('NAME', '__package__'),
|
|
307
|
-
('OP', ','),
|
|
308
|
-
('STRING', None),
|
|
309
|
-
('OP', ')'),
|
|
310
|
-
]):
|
|
311
|
-
return None
|
|
312
|
-
|
|
313
|
-
return RootLevelResourcesRead(
|
|
314
|
-
wts[0].src,
|
|
315
|
-
'binary' if wts[2].src == 'read_package_resource_binary' else 'text',
|
|
316
|
-
ast.literal_eval(wts[6].src),
|
|
317
|
-
)
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
##
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
@dc.dataclass(frozen=True, kw_only=True)
|
|
324
|
-
class SrcFile:
|
|
325
|
-
path: str
|
|
326
|
-
|
|
327
|
-
src: str = dc.field(repr=False)
|
|
328
|
-
tokens: Tokens = dc.field(repr=False)
|
|
329
|
-
lines: ta.Sequence[Tokens] = dc.field(repr=False)
|
|
330
|
-
|
|
331
|
-
header_lines: ta.Sequence[Tokens] = dc.field(repr=False)
|
|
332
|
-
imports: ta.Sequence[Import] = dc.field(repr=False)
|
|
333
|
-
typings: ta.Sequence[Typing] = dc.field(repr=False)
|
|
334
|
-
content_lines: ta.Sequence[Tokens] = dc.field(repr=False)
|
|
335
|
-
|
|
336
|
-
ruff_noqa: ta.AbstractSet[str] = dc.field(repr=False)
|
|
337
|
-
|
|
338
|
-
has_binary_resources: bool = False
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
def make_src_file(
|
|
342
|
-
path: str,
|
|
343
|
-
*,
|
|
344
|
-
mounts: ta.Mapping[str, str],
|
|
345
|
-
) -> SrcFile:
|
|
346
|
-
with open(path) as f:
|
|
347
|
-
src = f.read().strip()
|
|
348
|
-
|
|
349
|
-
tokens = trt.src_to_tokens(src)
|
|
350
|
-
lines = tks.split_lines(tokens)
|
|
351
|
-
|
|
352
|
-
hls, cls = split_header_lines(lines)
|
|
353
|
-
|
|
354
|
-
hls = strip_header_lines(hls)
|
|
355
|
-
rnls, hls = col.partition(hls, lambda l: tks.join_toks(l).startswith('# ruff: noqa: '))
|
|
356
|
-
|
|
357
|
-
imps: list[Import] = []
|
|
358
|
-
tys: list[Typing] = []
|
|
359
|
-
ctls: list[Tokens] = []
|
|
360
|
-
|
|
361
|
-
has_binary_resources = False
|
|
362
|
-
|
|
363
|
-
i = 0
|
|
364
|
-
while i < len(cls):
|
|
365
|
-
line = cls[i]
|
|
366
|
-
i += 1
|
|
367
|
-
|
|
368
|
-
if (imp := make_import(
|
|
369
|
-
line,
|
|
370
|
-
src_path=path,
|
|
371
|
-
mounts=mounts,
|
|
372
|
-
)) is not None:
|
|
373
|
-
imps.append(imp)
|
|
374
|
-
|
|
375
|
-
elif (ty := make_typing(
|
|
376
|
-
line,
|
|
377
|
-
src_path=path,
|
|
378
|
-
)) is not None:
|
|
379
|
-
tys.append(ty)
|
|
380
|
-
|
|
381
|
-
elif (
|
|
382
|
-
line and
|
|
383
|
-
(ft := line[0]).name == 'COMMENT' and
|
|
384
|
-
ft.src.startswith('# @omlish-manifest')
|
|
385
|
-
):
|
|
386
|
-
mls = [line]
|
|
387
|
-
while True:
|
|
388
|
-
mls.append(cls[i])
|
|
389
|
-
i += 1
|
|
390
|
-
|
|
391
|
-
msrc = tks.join_lines(mls).strip()
|
|
392
|
-
try:
|
|
393
|
-
node = ast.parse(msrc)
|
|
394
|
-
except SyntaxError:
|
|
395
|
-
continue
|
|
396
|
-
|
|
397
|
-
mmod = check.isinstance(node, ast.Module)
|
|
398
|
-
check.isinstance(check.single(mmod.body), ast.Assign)
|
|
399
|
-
break
|
|
400
|
-
|
|
401
|
-
ctls.extend([
|
|
402
|
-
[trt.Token('COMMENT', '# ' + tks.join_toks(ml))]
|
|
403
|
-
for ml in mls
|
|
404
|
-
])
|
|
405
|
-
|
|
406
|
-
elif is_root_level_if_type_checking_block(line):
|
|
407
|
-
def skip_block():
|
|
408
|
-
nonlocal i
|
|
409
|
-
while True:
|
|
410
|
-
nl = cls[i]
|
|
411
|
-
if nl and nl[0].name != 'INDENT':
|
|
412
|
-
return nl
|
|
413
|
-
i += 1
|
|
414
|
-
|
|
415
|
-
nl = skip_block()
|
|
416
|
-
|
|
417
|
-
if tks.match_toks(nl, [
|
|
418
|
-
('DEDENT', None),
|
|
419
|
-
('NAME', 'else'),
|
|
420
|
-
]):
|
|
421
|
-
i += 1
|
|
422
|
-
skip_block()
|
|
423
|
-
|
|
424
|
-
elif (rsrc := is_root_level_resources_read(line)) is not None:
|
|
425
|
-
rf = os.path.join(os.path.dirname(path), rsrc.resource)
|
|
426
|
-
|
|
427
|
-
if rsrc.kind == 'binary':
|
|
428
|
-
with open(rf, 'rb') as bf:
|
|
429
|
-
rb = bf.read() # noqa
|
|
430
|
-
|
|
431
|
-
ctls.append([
|
|
432
|
-
trt.Token(name='NAME', src=rsrc.variable),
|
|
433
|
-
trt.Token(name='UNIMPORTANT_WS', src=' '),
|
|
434
|
-
trt.Token(name='OP', src='='),
|
|
435
|
-
trt.Token(name='UNIMPORTANT_WS', src=' '),
|
|
436
|
-
trt.Token(name='NAME', src='base64'),
|
|
437
|
-
trt.Token(name='OP', src='.'),
|
|
438
|
-
trt.Token(name='NAME', src='b64decode'),
|
|
439
|
-
trt.Token(name='OP', src='('),
|
|
440
|
-
trt.Token(name='NL', src='\n'),
|
|
441
|
-
])
|
|
442
|
-
|
|
443
|
-
rb64 = base64.b64encode(rb).decode('ascii')
|
|
444
|
-
for chunk in itertools.batched(rb64, 96):
|
|
445
|
-
ctls.append([
|
|
446
|
-
trt.Token(name='UNIMPORTANT_WS', src=' '),
|
|
447
|
-
trt.Token(name='STRING', src=f"'{''.join(chunk)}'"),
|
|
448
|
-
trt.Token(name='NL', src='\n'),
|
|
449
|
-
])
|
|
450
|
-
|
|
451
|
-
ctls.append([
|
|
452
|
-
trt.Token(name='OP', src=')'),
|
|
453
|
-
trt.Token(name='NEWLINE', src='\n'),
|
|
454
|
-
])
|
|
455
|
-
|
|
456
|
-
has_binary_resources = True
|
|
457
|
-
|
|
458
|
-
elif rsrc.kind == 'text':
|
|
459
|
-
with open(rf) as tf:
|
|
460
|
-
rt = tf.read() # noqa
|
|
461
|
-
rt = rt.replace('\\', '\\\\') # Escape backslashes
|
|
462
|
-
rt = rt.replace('"""', r'\"\"\"')
|
|
463
|
-
ctls.append([
|
|
464
|
-
trt.Token(name='NAME', src=rsrc.variable),
|
|
465
|
-
trt.Token(name='UNIMPORTANT_WS', src=' '),
|
|
466
|
-
trt.Token(name='OP', src='='),
|
|
467
|
-
trt.Token(name='UNIMPORTANT_WS', src=' '),
|
|
468
|
-
trt.Token(name='STRING', src=f'"""\\\n{rt}"""\n'),
|
|
469
|
-
trt.Token(name='NEWLINE', src=''),
|
|
470
|
-
])
|
|
471
|
-
|
|
472
|
-
else:
|
|
473
|
-
raise ValueError(rsrc.kind)
|
|
474
|
-
|
|
475
|
-
else:
|
|
476
|
-
ctls.append(line)
|
|
477
|
-
|
|
478
|
-
return SrcFile(
|
|
479
|
-
path=path,
|
|
480
|
-
|
|
481
|
-
src=src,
|
|
482
|
-
tokens=tokens,
|
|
483
|
-
lines=lines,
|
|
484
|
-
|
|
485
|
-
header_lines=hls,
|
|
486
|
-
imports=imps,
|
|
487
|
-
typings=tys,
|
|
488
|
-
content_lines=ctls,
|
|
489
|
-
|
|
490
|
-
ruff_noqa=set(lang.flatten(tks.join_toks(l).strip().split()[3:] for l in rnls)), # noqa
|
|
491
|
-
|
|
492
|
-
has_binary_resources=has_binary_resources,
|
|
493
|
-
)
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
##
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
SECTION_SEP = '#' * 40 + '\n'
|
|
500
|
-
|
|
501
|
-
RUFF_DISABLES: ta.AbstractSet[str] = {
|
|
502
|
-
'UP006', # non-pep585-annotation
|
|
503
|
-
'UP007', # non-pep604-annotation
|
|
504
|
-
'UP036', # outdated-version-block
|
|
505
|
-
}
|
|
506
|
-
|
|
507
|
-
OUTPUT_COMMENT = '# @omlish-amalg-output '
|
|
508
|
-
SCAN_COMMENT = '# @omlish-amalg '
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
def gen_amalg(
|
|
512
|
-
main_path: str,
|
|
513
|
-
*,
|
|
514
|
-
mounts: ta.Mapping[str, str],
|
|
515
|
-
output_dir: str | None = None,
|
|
516
|
-
) -> str:
|
|
517
|
-
src_files: dict[str, SrcFile] = {}
|
|
518
|
-
todo = [main_path]
|
|
519
|
-
while todo:
|
|
520
|
-
src_path = todo.pop()
|
|
521
|
-
if src_path in src_files:
|
|
522
|
-
continue
|
|
523
|
-
|
|
524
|
-
f = make_src_file(
|
|
525
|
-
src_path,
|
|
526
|
-
mounts=mounts,
|
|
527
|
-
)
|
|
528
|
-
src_files[src_path] = f
|
|
529
|
-
|
|
530
|
-
for imp in f.imports:
|
|
531
|
-
if (mp := imp.mod_path) is not None:
|
|
532
|
-
todo.append(mp)
|
|
533
|
-
|
|
534
|
-
##
|
|
535
|
-
|
|
536
|
-
out = io.StringIO()
|
|
537
|
-
|
|
538
|
-
##
|
|
539
|
-
|
|
540
|
-
hls = []
|
|
541
|
-
|
|
542
|
-
mf = src_files[main_path]
|
|
543
|
-
if mf.header_lines:
|
|
544
|
-
hls.extend([
|
|
545
|
-
hl
|
|
546
|
-
for hlts in mf.header_lines
|
|
547
|
-
if not (hl := tks.join_toks(hlts)).startswith(SCAN_COMMENT)
|
|
548
|
-
])
|
|
549
|
-
|
|
550
|
-
if output_dir is not None:
|
|
551
|
-
ogf = os.path.relpath(main_path, output_dir)
|
|
552
|
-
else:
|
|
553
|
-
ogf = os.path.basename(main_path)
|
|
554
|
-
|
|
555
|
-
nhls = []
|
|
556
|
-
nhls.extend([
|
|
557
|
-
'#!/usr/bin/env python3\n',
|
|
558
|
-
'# noinspection DuplicatedCode\n',
|
|
559
|
-
'# @omlish-lite\n',
|
|
560
|
-
'# @omlish-script\n',
|
|
561
|
-
f'{OUTPUT_COMMENT.strip()} {ogf}\n',
|
|
562
|
-
])
|
|
563
|
-
|
|
564
|
-
ruff_disables = sorted({
|
|
565
|
-
*lang.flatten(f.ruff_noqa for f in src_files.values()),
|
|
566
|
-
*RUFF_DISABLES,
|
|
567
|
-
})
|
|
568
|
-
if ruff_disables:
|
|
569
|
-
nhls.append(f'# ruff: noqa: {" ".join(sorted(ruff_disables))}\n')
|
|
570
|
-
|
|
571
|
-
hls = [*nhls, *hls]
|
|
572
|
-
out.write(''.join(hls))
|
|
573
|
-
|
|
574
|
-
##
|
|
575
|
-
|
|
576
|
-
all_imps = [i for f in src_files.values() for i in f.imports]
|
|
577
|
-
gl_imps = [i for i in all_imps if i.mod_path is None]
|
|
578
|
-
|
|
579
|
-
dct: dict = {
|
|
580
|
-
('sys', None, None): ['import sys\n'],
|
|
581
|
-
}
|
|
582
|
-
if any(sf.has_binary_resources for sf in src_files.values()):
|
|
583
|
-
dct[('base64', None, None)] = ['import base64\n']
|
|
584
|
-
for imp in gl_imps:
|
|
585
|
-
dct.setdefault((imp.mod, imp.item, imp.as_), []).append(imp)
|
|
586
|
-
for _, l in sorted(dct.items()):
|
|
587
|
-
il = l[0]
|
|
588
|
-
out.write(il if isinstance(il, str) else tks.join_toks(il.toks))
|
|
589
|
-
if dct:
|
|
590
|
-
out.write('\n\n')
|
|
591
|
-
|
|
592
|
-
##
|
|
593
|
-
|
|
594
|
-
out.write(SECTION_SEP)
|
|
595
|
-
out.write('\n\n')
|
|
596
|
-
|
|
597
|
-
version_check_fail_msg = (
|
|
598
|
-
f'Requires python {LITE_REQUIRED_PYTHON_VERSION!r}, '
|
|
599
|
-
f'got {{sys.version_info}} from {{sys.executable}}'
|
|
600
|
-
)
|
|
601
|
-
out.write(textwrap.dedent(f"""
|
|
602
|
-
if sys.version_info < {LITE_REQUIRED_PYTHON_VERSION!r}:
|
|
603
|
-
raise OSError(f{version_check_fail_msg!r}) # noqa
|
|
604
|
-
""").lstrip())
|
|
605
|
-
out.write('\n\n')
|
|
606
|
-
|
|
607
|
-
##
|
|
608
|
-
|
|
609
|
-
ts = list(col.toposort({ # noqa
|
|
610
|
-
f.path: {mp for i in f.imports if (mp := i.mod_path) is not None}
|
|
611
|
-
for f in src_files.values()
|
|
612
|
-
}))
|
|
613
|
-
sfs = [sf for ss in ts for sf in sorted(ss)]
|
|
614
|
-
|
|
615
|
-
##
|
|
616
|
-
|
|
617
|
-
tyd: dict[str, list[Typing]] = {}
|
|
618
|
-
tys = set()
|
|
619
|
-
for sf in sfs:
|
|
620
|
-
f = src_files[sf]
|
|
621
|
-
for ty in f.typings:
|
|
622
|
-
if ty.src not in tys:
|
|
623
|
-
tyd.setdefault(f.path, []).append(ty)
|
|
624
|
-
tys.add(ty.src)
|
|
625
|
-
if tys:
|
|
626
|
-
out.write(SECTION_SEP)
|
|
627
|
-
out.write('\n\n')
|
|
628
|
-
for i, (sf, ftys) in enumerate(tyd.items()):
|
|
629
|
-
f = src_files[sf]
|
|
630
|
-
if i:
|
|
631
|
-
out.write('\n')
|
|
632
|
-
if f is not mf:
|
|
633
|
-
rp = os.path.relpath(f.path, os.path.dirname(mf.path))
|
|
634
|
-
else:
|
|
635
|
-
rp = os.path.basename(f.path)
|
|
636
|
-
out.write(f'# {rp}\n')
|
|
637
|
-
for ty in ftys:
|
|
638
|
-
out.write(ty.src)
|
|
639
|
-
if tys:
|
|
640
|
-
out.write('\n\n')
|
|
641
|
-
|
|
642
|
-
##
|
|
643
|
-
|
|
644
|
-
for i, sf in enumerate(sfs):
|
|
645
|
-
f = src_files[sf]
|
|
646
|
-
out.write(SECTION_SEP)
|
|
647
|
-
if f is not mf:
|
|
648
|
-
rp = os.path.relpath(f.path, mf.path)
|
|
649
|
-
else:
|
|
650
|
-
rp = os.path.basename(f.path)
|
|
651
|
-
out.write(f'# {rp}\n')
|
|
652
|
-
if f is not mf and f.header_lines:
|
|
653
|
-
out.write(tks.join_lines(f.header_lines))
|
|
654
|
-
out.write(f'\n\n')
|
|
655
|
-
cls = f.content_lines
|
|
656
|
-
if f is not mf:
|
|
657
|
-
cls = strip_main_lines(cls)
|
|
658
|
-
sf_src = tks.join_lines(cls)
|
|
659
|
-
out.write(sf_src.strip())
|
|
660
|
-
if i < len(sfs) - 1:
|
|
661
|
-
out.write('\n\n\n')
|
|
662
|
-
else:
|
|
663
|
-
out.write('\n')
|
|
664
|
-
|
|
665
|
-
##
|
|
666
|
-
|
|
667
|
-
return out.getvalue()
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
##
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
def _gen_one(
|
|
674
|
-
input_path: str,
|
|
675
|
-
output_path: str | None,
|
|
676
|
-
*,
|
|
677
|
-
mounts: ta.Mapping[str, str],
|
|
678
|
-
) -> None:
|
|
679
|
-
log.info('Generating: %s -> %s', input_path, output_path)
|
|
680
|
-
|
|
681
|
-
src = gen_amalg(
|
|
682
|
-
input_path,
|
|
683
|
-
mounts=mounts,
|
|
684
|
-
output_dir=os.path.dirname(output_path if output_path is not None else input_path),
|
|
685
|
-
)
|
|
686
|
-
|
|
687
|
-
if output_path is not None:
|
|
688
|
-
with open(output_path, 'w') as f:
|
|
689
|
-
f.write(src)
|
|
690
|
-
os.chmod(output_path, os.stat(input_path).st_mode)
|
|
691
|
-
|
|
692
|
-
else:
|
|
693
|
-
print(src)
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
def _scan_one(
|
|
697
|
-
input_path: str,
|
|
698
|
-
**kwargs: ta.Any,
|
|
699
|
-
) -> None:
|
|
700
|
-
if not input_path.endswith('.py'):
|
|
701
|
-
return
|
|
702
|
-
|
|
703
|
-
with open(input_path) as f:
|
|
704
|
-
src = f.read()
|
|
705
|
-
|
|
706
|
-
sls = [l for l in src.splitlines() if l.startswith(SCAN_COMMENT)]
|
|
707
|
-
for sl in sls:
|
|
708
|
-
sas = sl[len(SCAN_COMMENT):].split()
|
|
709
|
-
if len(sas) != 1:
|
|
710
|
-
raise Exception(f'Invalid scan args: {input_path=} {sas=}')
|
|
711
|
-
|
|
712
|
-
output_path = os.path.abspath(os.path.join(os.path.dirname(input_path), sas[0]))
|
|
713
|
-
_gen_one(
|
|
714
|
-
input_path,
|
|
715
|
-
output_path,
|
|
716
|
-
**kwargs,
|
|
717
|
-
)
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
def _gen_cmd(args) -> None:
|
|
721
|
-
if not os.path.isfile('pyproject.toml'):
|
|
722
|
-
raise Exception('Not in project root')
|
|
723
|
-
|
|
724
|
-
mounts = {}
|
|
725
|
-
for m in args.mounts or ():
|
|
726
|
-
if ':' not in m:
|
|
727
|
-
mounts[m] = os.path.abspath(m)
|
|
728
|
-
else:
|
|
729
|
-
k, v = m.split(':')
|
|
730
|
-
mounts[k] = os.path.abspath(v)
|
|
731
|
-
|
|
732
|
-
for i in args.inputs:
|
|
733
|
-
if os.path.isdir(i):
|
|
734
|
-
log.info('Scanning %s', i)
|
|
735
|
-
for we_dirpath, we_dirnames, we_filenames in os.walk(i): # noqa
|
|
736
|
-
for fname in we_filenames:
|
|
737
|
-
_scan_one(
|
|
738
|
-
os.path.abspath(os.path.join(we_dirpath, fname)),
|
|
739
|
-
mounts=mounts,
|
|
740
|
-
)
|
|
741
|
-
|
|
742
|
-
else:
|
|
743
|
-
output_dir = args.output
|
|
744
|
-
if output_dir is not None:
|
|
745
|
-
output_path = check.isinstance(os.path.join(output_dir, os.path.basename(i)), str)
|
|
746
|
-
else:
|
|
747
|
-
output_path = None
|
|
748
|
-
|
|
749
|
-
_gen_one(
|
|
750
|
-
os.path.abspath(i),
|
|
751
|
-
output_path,
|
|
752
|
-
mounts=mounts,
|
|
753
|
-
)
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
def _build_parser() -> argparse.ArgumentParser:
|
|
757
|
-
parser = argparse.ArgumentParser()
|
|
758
|
-
|
|
759
|
-
subparsers = parser.add_subparsers()
|
|
760
|
-
|
|
761
|
-
parser_gen = subparsers.add_parser('gen')
|
|
762
|
-
parser_gen.add_argument('--mount', '-m', dest='mounts', action='append')
|
|
763
|
-
parser_gen.add_argument('--output', '-o')
|
|
764
|
-
parser_gen.add_argument('inputs', nargs='+')
|
|
765
|
-
parser_gen.set_defaults(func=_gen_cmd)
|
|
766
|
-
|
|
767
|
-
return parser
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
def _main() -> None:
|
|
771
|
-
logs.configure_standard_logging('INFO')
|
|
772
|
-
|
|
773
|
-
parser = _build_parser()
|
|
774
|
-
args = parser.parse_args()
|
|
775
|
-
if not getattr(args, 'func', None):
|
|
776
|
-
parser.print_help()
|
|
777
|
-
else:
|
|
778
|
-
args.func(args)
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
if __name__ == '__main__':
|
|
782
|
-
_main()
|