omdev 0.0.0.dev211__py3-none-any.whl → 0.0.0.dev212__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- omdev/.manifests.json +15 -1
- omdev/__about__.py +0 -4
- omdev/amalg/gen.py +2 -3
- omdev/amalg/imports.py +4 -5
- omdev/amalg/manifests.py +7 -10
- omdev/amalg/resources.py +24 -27
- omdev/amalg/srcfiles.py +7 -10
- omdev/amalg/strip.py +4 -5
- omdev/amalg/types.py +1 -1
- omdev/amalg/typing.py +9 -8
- omdev/ci/cli.py +10 -4
- omdev/ci/compose.py +4 -1
- omdev/ci/requirements.py +2 -2
- omdev/scripts/ci.py +16 -7
- omdev/tokens/__init__.py +0 -0
- omdev/tokens/all.py +35 -0
- omdev/tokens/tokenizert.py +217 -0
- omdev/{tokens.py → tokens/utils.py} +6 -12
- omdev/tools/mkenv.py +131 -0
- omdev/tools/mkrelimp.py +4 -6
- {omdev-0.0.0.dev211.dist-info → omdev-0.0.0.dev212.dist-info}/METADATA +2 -5
- {omdev-0.0.0.dev211.dist-info → omdev-0.0.0.dev212.dist-info}/RECORD +26 -22
- {omdev-0.0.0.dev211.dist-info → omdev-0.0.0.dev212.dist-info}/LICENSE +0 -0
- {omdev-0.0.0.dev211.dist-info → omdev-0.0.0.dev212.dist-info}/WHEEL +0 -0
- {omdev-0.0.0.dev211.dist-info → omdev-0.0.0.dev212.dist-info}/entry_points.txt +0 -0
- {omdev-0.0.0.dev211.dist-info → omdev-0.0.0.dev212.dist-info}/top_level.txt +0 -0
omdev/.manifests.json
CHANGED
@@ -326,11 +326,25 @@
|
|
326
326
|
}
|
327
327
|
}
|
328
328
|
},
|
329
|
+
{
|
330
|
+
"module": ".tools.mkenv",
|
331
|
+
"attr": "_CLI_MODULE",
|
332
|
+
"file": "omdev/tools/mkenv.py",
|
333
|
+
"line": 123,
|
334
|
+
"value": {
|
335
|
+
"$.cli.types.CliModule": {
|
336
|
+
"cmd_name": [
|
337
|
+
"mkenv"
|
338
|
+
],
|
339
|
+
"mod_name": "omdev.tools.mkenv"
|
340
|
+
}
|
341
|
+
}
|
342
|
+
},
|
329
343
|
{
|
330
344
|
"module": ".tools.mkrelimp",
|
331
345
|
"attr": "_CLI_MODULE",
|
332
346
|
"file": "omdev/tools/mkrelimp.py",
|
333
|
-
"line":
|
347
|
+
"line": 146,
|
334
348
|
"value": {
|
335
349
|
"$.cli.types.CliModule": {
|
336
350
|
"cmd_name": "py/mkrelimp",
|
omdev/__about__.py
CHANGED
omdev/amalg/gen.py
CHANGED
@@ -8,11 +8,10 @@ from omlish import collections as col
|
|
8
8
|
from omlish import lang
|
9
9
|
from omlish.lite.runtime import LITE_REQUIRED_PYTHON_VERSION
|
10
10
|
|
11
|
-
from .. import
|
11
|
+
from ..tokens import all as tks
|
12
12
|
from .srcfiles import SrcFile
|
13
13
|
from .srcfiles import make_src_file
|
14
14
|
from .strip import strip_main_lines
|
15
|
-
from .types import Tokens
|
16
15
|
from .typing import Typing
|
17
16
|
|
18
17
|
|
@@ -71,7 +70,7 @@ class AmalgGenerator:
|
|
71
70
|
return self._src_files()[self._main_path]
|
72
71
|
|
73
72
|
@cached.function
|
74
|
-
def _header_lines(self) -> list[
|
73
|
+
def _header_lines(self) -> list[str]:
|
75
74
|
header_lines = []
|
76
75
|
|
77
76
|
if self._main_file().header_lines:
|
omdev/amalg/imports.py
CHANGED
@@ -4,8 +4,7 @@ import typing as ta
|
|
4
4
|
|
5
5
|
from omlish import check
|
6
6
|
|
7
|
-
from .. import
|
8
|
-
from .types import Tokens
|
7
|
+
from ..tokens import all as tks
|
9
8
|
|
10
9
|
|
11
10
|
##
|
@@ -22,11 +21,11 @@ class Import:
|
|
22
21
|
|
23
22
|
mod_path: str | None
|
24
23
|
|
25
|
-
toks: Tokens = dc.field(repr=False)
|
24
|
+
toks: tks.Tokens = dc.field(repr=False)
|
26
25
|
|
27
26
|
|
28
27
|
def make_import(
|
29
|
-
lts: Tokens,
|
28
|
+
lts: tks.Tokens,
|
30
29
|
*,
|
31
30
|
src_path: str,
|
32
31
|
mounts: ta.Mapping[str, str],
|
@@ -90,7 +89,7 @@ def make_import(
|
|
90
89
|
as_=as_,
|
91
90
|
|
92
91
|
src_path=src_path,
|
93
|
-
line=ft.line,
|
92
|
+
line=check.not_none(ft.line),
|
94
93
|
|
95
94
|
mod_path=mod_path,
|
96
95
|
|
omdev/amalg/manifests.py
CHANGED
@@ -1,17 +1,14 @@
|
|
1
1
|
import ast
|
2
2
|
|
3
|
-
import tokenize_rt as trt
|
4
|
-
|
5
3
|
from omlish import check
|
6
4
|
|
7
|
-
from .. import
|
8
|
-
from .types import Tokens
|
5
|
+
from ..tokens import all as tks
|
9
6
|
|
10
7
|
|
11
8
|
##
|
12
9
|
|
13
10
|
|
14
|
-
def is_manifest_comment(line: Tokens) -> bool:
|
11
|
+
def is_manifest_comment(line: tks.Tokens) -> bool:
|
15
12
|
if not line:
|
16
13
|
return False
|
17
14
|
|
@@ -22,10 +19,10 @@ def is_manifest_comment(line: Tokens) -> bool:
|
|
22
19
|
|
23
20
|
|
24
21
|
def comment_out_manifest_comment(
|
25
|
-
line: Tokens,
|
26
|
-
cls: list[Tokens],
|
22
|
+
line: tks.Tokens,
|
23
|
+
cls: list[tks.Tokens],
|
27
24
|
i: int,
|
28
|
-
) -> tuple[list[Tokens], int]:
|
25
|
+
) -> tuple[list[tks.Tokens], int]:
|
29
26
|
mls = [line]
|
30
27
|
while True:
|
31
28
|
mls.append(cls[i])
|
@@ -41,8 +38,8 @@ def comment_out_manifest_comment(
|
|
41
38
|
check.isinstance(check.single(mmod.body), ast.Assign)
|
42
39
|
break
|
43
40
|
|
44
|
-
out: list[Tokens] = [
|
45
|
-
[
|
41
|
+
out: list[tks.Tokens] = [
|
42
|
+
[tks.Token('COMMENT', '# ' + tks.join_toks(ml))]
|
46
43
|
for ml in mls
|
47
44
|
]
|
48
45
|
|
omdev/amalg/resources.py
CHANGED
@@ -4,10 +4,7 @@ import itertools
|
|
4
4
|
import os.path
|
5
5
|
import typing as ta
|
6
6
|
|
7
|
-
import
|
8
|
-
|
9
|
-
from .. import tokens as tks
|
10
|
-
from .types import Tokens
|
7
|
+
from ..tokens import all as tks
|
11
8
|
|
12
9
|
|
13
10
|
##
|
@@ -19,7 +16,7 @@ class RootLevelResourcesRead(ta.NamedTuple):
|
|
19
16
|
resource: str
|
20
17
|
|
21
18
|
|
22
|
-
def is_root_level_resources_read(lts: Tokens) -> RootLevelResourcesRead | None:
|
19
|
+
def is_root_level_resources_read(lts: tks.Tokens) -> RootLevelResourcesRead | None:
|
23
20
|
wts = list(tks.ignore_ws(lts, keep=['INDENT']))
|
24
21
|
|
25
22
|
if not tks.match_toks(wts, [
|
@@ -47,36 +44,36 @@ def is_root_level_resources_read(lts: Tokens) -> RootLevelResourcesRead | None:
|
|
47
44
|
def build_resource_lines(
|
48
45
|
rsrc: RootLevelResourcesRead,
|
49
46
|
path: str,
|
50
|
-
) -> list[Tokens]:
|
47
|
+
) -> list[tks.Tokens]:
|
51
48
|
rf = os.path.join(os.path.dirname(path), rsrc.resource)
|
52
49
|
|
53
50
|
if rsrc.kind == 'binary':
|
54
51
|
with open(rf, 'rb') as bf:
|
55
52
|
rb = bf.read() # noqa
|
56
53
|
|
57
|
-
out: list[Tokens] = [[
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
54
|
+
out: list[tks.Tokens] = [[
|
55
|
+
tks.Token(name='NAME', src=rsrc.variable),
|
56
|
+
tks.Token(name='UNIMPORTANT_WS', src=' '),
|
57
|
+
tks.Token(name='OP', src='='),
|
58
|
+
tks.Token(name='UNIMPORTANT_WS', src=' '),
|
59
|
+
tks.Token(name='NAME', src='base64'),
|
60
|
+
tks.Token(name='OP', src='.'),
|
61
|
+
tks.Token(name='NAME', src='b64decode'),
|
62
|
+
tks.Token(name='OP', src='('),
|
63
|
+
tks.Token(name='NL', src='\n'),
|
67
64
|
]]
|
68
65
|
|
69
66
|
rb64 = base64.b64encode(rb).decode('ascii')
|
70
67
|
for chunk in itertools.batched(rb64, 96):
|
71
68
|
out.append([
|
72
|
-
|
73
|
-
|
74
|
-
|
69
|
+
tks.Token(name='UNIMPORTANT_WS', src=' '),
|
70
|
+
tks.Token(name='STRING', src=f"'{''.join(chunk)}'"),
|
71
|
+
tks.Token(name='NL', src='\n'),
|
75
72
|
])
|
76
73
|
|
77
74
|
out.append([
|
78
|
-
|
79
|
-
|
75
|
+
tks.Token(name='OP', src=')'),
|
76
|
+
tks.Token(name='NEWLINE', src='\n'),
|
80
77
|
])
|
81
78
|
|
82
79
|
return out
|
@@ -87,12 +84,12 @@ def build_resource_lines(
|
|
87
84
|
rt = rt.replace('\\', '\\\\') # Escape backslashes
|
88
85
|
rt = rt.replace('"""', r'\"\"\"')
|
89
86
|
return [[
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
87
|
+
tks.Token(name='NAME', src=rsrc.variable),
|
88
|
+
tks.Token(name='UNIMPORTANT_WS', src=' '),
|
89
|
+
tks.Token(name='OP', src='='),
|
90
|
+
tks.Token(name='UNIMPORTANT_WS', src=' '),
|
91
|
+
tks.Token(name='STRING', src=f'"""\\\n{rt}""" # noqa\n'),
|
92
|
+
tks.Token(name='NEWLINE', src=''),
|
96
93
|
]]
|
97
94
|
|
98
95
|
else:
|
omdev/amalg/srcfiles.py
CHANGED
@@ -1,12 +1,10 @@
|
|
1
1
|
import dataclasses as dc
|
2
2
|
import typing as ta
|
3
3
|
|
4
|
-
import tokenize_rt as trt
|
5
|
-
|
6
4
|
from omlish import collections as col
|
7
5
|
from omlish import lang
|
8
6
|
|
9
|
-
from .. import
|
7
|
+
from ..tokens import all as tks
|
10
8
|
from .imports import Import
|
11
9
|
from .imports import make_import
|
12
10
|
from .manifests import comment_out_manifest_comment
|
@@ -15,7 +13,6 @@ from .resources import build_resource_lines
|
|
15
13
|
from .resources import is_root_level_resources_read
|
16
14
|
from .strip import split_header_lines
|
17
15
|
from .strip import strip_header_lines
|
18
|
-
from .types import Tokens
|
19
16
|
from .typing import Typing
|
20
17
|
from .typing import is_root_level_if_type_checking_block
|
21
18
|
from .typing import make_typing
|
@@ -30,13 +27,13 @@ class SrcFile:
|
|
30
27
|
path: str
|
31
28
|
|
32
29
|
src: str = dc.field(repr=False)
|
33
|
-
tokens: Tokens = dc.field(repr=False)
|
34
|
-
lines: ta.Sequence[Tokens] = dc.field(repr=False)
|
30
|
+
tokens: tks.Tokens = dc.field(repr=False)
|
31
|
+
lines: ta.Sequence[tks.Tokens] = dc.field(repr=False)
|
35
32
|
|
36
|
-
header_lines: ta.Sequence[Tokens] = dc.field(repr=False)
|
33
|
+
header_lines: ta.Sequence[tks.Tokens] = dc.field(repr=False)
|
37
34
|
imports: ta.Sequence[Import] = dc.field(repr=False)
|
38
35
|
typings: ta.Sequence[Typing] = dc.field(repr=False)
|
39
|
-
content_lines: ta.Sequence[Tokens] = dc.field(repr=False)
|
36
|
+
content_lines: ta.Sequence[tks.Tokens] = dc.field(repr=False)
|
40
37
|
|
41
38
|
ruff_noqa: ta.AbstractSet[str] = dc.field(repr=False)
|
42
39
|
|
@@ -51,7 +48,7 @@ def make_src_file(
|
|
51
48
|
with open(path) as f:
|
52
49
|
src = f.read().strip()
|
53
50
|
|
54
|
-
tokens =
|
51
|
+
tokens = tks.src_to_tokens(src)
|
55
52
|
lines = tks.split_lines(tokens)
|
56
53
|
|
57
54
|
header_lines, cls = split_header_lines(lines)
|
@@ -61,7 +58,7 @@ def make_src_file(
|
|
61
58
|
|
62
59
|
imps: list[Import] = []
|
63
60
|
tys: list[Typing] = []
|
64
|
-
ctls: list[Tokens] = []
|
61
|
+
ctls: list[tks.Tokens] = []
|
65
62
|
|
66
63
|
has_binary_resources = False
|
67
64
|
|
omdev/amalg/strip.py
CHANGED
@@ -2,8 +2,7 @@ import re
|
|
2
2
|
import typing as ta
|
3
3
|
|
4
4
|
from .. import magic
|
5
|
-
from .. import
|
6
|
-
from .types import Tokens
|
5
|
+
from ..tokens import all as tks
|
7
6
|
|
8
7
|
|
9
8
|
##
|
@@ -12,7 +11,7 @@ from .types import Tokens
|
|
12
11
|
HEADER_NAMES = (*tks.WS_NAMES, 'COMMENT', 'STRING')
|
13
12
|
|
14
13
|
|
15
|
-
def split_header_lines(lines: ta.Iterable[Tokens]) -> tuple[list[Tokens], list[Tokens]]:
|
14
|
+
def split_header_lines(lines: ta.Iterable[tks.Tokens]) -> tuple[list[tks.Tokens], list[tks.Tokens]]:
|
16
15
|
ws = []
|
17
16
|
nws = []
|
18
17
|
for line in (it := iter(lines)):
|
@@ -31,7 +30,7 @@ def split_header_lines(lines: ta.Iterable[Tokens]) -> tuple[list[Tokens], list[T
|
|
31
30
|
IF_MAIN_PAT = re.compile(r'if\s+__name__\s+==\s+[\'"]__main__[\'"]\s*:')
|
32
31
|
|
33
32
|
|
34
|
-
def strip_main_lines(cls: ta.Sequence[Tokens]) -> list[Tokens]:
|
33
|
+
def strip_main_lines(cls: ta.Sequence[tks.Tokens]) -> list[tks.Tokens]:
|
35
34
|
out = []
|
36
35
|
|
37
36
|
for l in (it := iter(cls)):
|
@@ -59,7 +58,7 @@ STRIPPED_HEADER_PAT = magic.compile_magic_style_pat(
|
|
59
58
|
)
|
60
59
|
|
61
60
|
|
62
|
-
def strip_header_lines(hls: ta.Sequence[Tokens]) -> list[Tokens]:
|
61
|
+
def strip_header_lines(hls: ta.Sequence[tks.Tokens]) -> list[tks.Tokens]:
|
63
62
|
if hls and tks.join_toks(hls[0]).startswith('#!'):
|
64
63
|
hls = hls[1:]
|
65
64
|
out = []
|
omdev/amalg/types.py
CHANGED
omdev/amalg/typing.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
1
|
import dataclasses as dc
|
2
2
|
|
3
|
-
from
|
4
|
-
|
3
|
+
from omlish import check
|
4
|
+
|
5
|
+
from ..tokens import all as tks
|
5
6
|
|
6
7
|
|
7
8
|
##
|
@@ -18,11 +19,11 @@ class Typing:
|
|
18
19
|
src_path: str
|
19
20
|
line: int
|
20
21
|
|
21
|
-
toks: Tokens = dc.field(repr=False)
|
22
|
+
toks: tks.Tokens = dc.field(repr=False)
|
22
23
|
|
23
24
|
|
24
25
|
def _is_typing(
|
25
|
-
lts: Tokens,
|
26
|
+
lts: tks.Tokens,
|
26
27
|
*,
|
27
28
|
exclude_newtypes: bool = False,
|
28
29
|
) -> bool:
|
@@ -48,7 +49,7 @@ def _is_typing(
|
|
48
49
|
|
49
50
|
|
50
51
|
def make_typing(
|
51
|
-
lts: Tokens,
|
52
|
+
lts: tks.Tokens,
|
52
53
|
*,
|
53
54
|
src_path: str,
|
54
55
|
) -> Typing | None:
|
@@ -63,7 +64,7 @@ def make_typing(
|
|
63
64
|
src=tks.join_toks(lts),
|
64
65
|
|
65
66
|
src_path=src_path,
|
66
|
-
line=ft.line,
|
67
|
+
line=check.not_none(ft.line),
|
67
68
|
|
68
69
|
toks=lts,
|
69
70
|
)
|
@@ -72,7 +73,7 @@ def make_typing(
|
|
72
73
|
##
|
73
74
|
|
74
75
|
|
75
|
-
def is_root_level_if_type_checking_block(lts: Tokens) -> bool:
|
76
|
+
def is_root_level_if_type_checking_block(lts: tks.Tokens) -> bool:
|
76
77
|
return tks.match_toks(tks.ignore_ws(lts, keep=['INDENT']), [
|
77
78
|
('NAME', 'if'),
|
78
79
|
('NAME', 'ta'),
|
@@ -83,7 +84,7 @@ def is_root_level_if_type_checking_block(lts: Tokens) -> bool:
|
|
83
84
|
|
84
85
|
|
85
86
|
def skip_root_level_if_type_checking_block(
|
86
|
-
cls: list[Tokens],
|
87
|
+
cls: list[tks.Tokens],
|
87
88
|
i: int,
|
88
89
|
) -> int:
|
89
90
|
def skip_block():
|
omdev/ci/cli.py
CHANGED
@@ -112,10 +112,16 @@ class CiCli(ArgparseCli):
|
|
112
112
|
check.state(os.path.isfile(docker_file))
|
113
113
|
|
114
114
|
if compose_file is None:
|
115
|
-
compose_file = find_alt_file(
|
116
|
-
'
|
117
|
-
|
118
|
-
|
115
|
+
compose_file = find_alt_file(*[
|
116
|
+
f'{f}.{x}'
|
117
|
+
for f in [
|
118
|
+
'docker/docker-compose',
|
119
|
+
'docker/compose',
|
120
|
+
'docker-compose',
|
121
|
+
'compose',
|
122
|
+
]
|
123
|
+
for x in ['yaml', 'yml']
|
124
|
+
])
|
119
125
|
check.state(os.path.isfile(compose_file))
|
120
126
|
|
121
127
|
if not requirements_txts:
|
omdev/ci/compose.py
CHANGED
@@ -200,7 +200,10 @@ class DockerComposeRun(ExitStacked):
|
|
200
200
|
'-f', compose_file,
|
201
201
|
'run',
|
202
202
|
'--rm',
|
203
|
-
*itertools.chain.from_iterable(
|
203
|
+
*itertools.chain.from_iterable(
|
204
|
+
['-e', k]
|
205
|
+
for k in (self._cfg.cmd.env or [])
|
206
|
+
),
|
204
207
|
*(self._cfg.run_options or []),
|
205
208
|
self._cfg.service,
|
206
209
|
'sh', '-c', shlex.quote(self._cfg.cmd.s),
|
omdev/ci/requirements.py
CHANGED
omdev/scripts/ci.py
CHANGED
@@ -2461,7 +2461,10 @@ class DockerComposeRun(ExitStacked):
|
|
2461
2461
|
'-f', compose_file,
|
2462
2462
|
'run',
|
2463
2463
|
'--rm',
|
2464
|
-
*itertools.chain.from_iterable(
|
2464
|
+
*itertools.chain.from_iterable(
|
2465
|
+
['-e', k]
|
2466
|
+
for k in (self._cfg.cmd.env or [])
|
2467
|
+
),
|
2465
2468
|
*(self._cfg.run_options or []),
|
2466
2469
|
self._cfg.service,
|
2467
2470
|
'sh', '-c', shlex.quote(self._cfg.cmd.s),
|
@@ -3018,10 +3021,10 @@ def download_requirements(
|
|
3018
3021
|
'pip',
|
3019
3022
|
'download',
|
3020
3023
|
'-d', '/requirements',
|
3021
|
-
*itertools.chain.from_iterable(
|
3024
|
+
*itertools.chain.from_iterable(
|
3022
3025
|
['-r', f'/requirements_txt/{os.path.basename(rt)}']
|
3023
3026
|
for rt in requirements_txts
|
3024
|
-
|
3027
|
+
),
|
3025
3028
|
)
|
3026
3029
|
|
3027
3030
|
|
@@ -3358,10 +3361,16 @@ class CiCli(ArgparseCli):
|
|
3358
3361
|
check.state(os.path.isfile(docker_file))
|
3359
3362
|
|
3360
3363
|
if compose_file is None:
|
3361
|
-
compose_file = find_alt_file(
|
3362
|
-
'
|
3363
|
-
|
3364
|
-
|
3364
|
+
compose_file = find_alt_file(*[
|
3365
|
+
f'{f}.{x}'
|
3366
|
+
for f in [
|
3367
|
+
'docker/docker-compose',
|
3368
|
+
'docker/compose',
|
3369
|
+
'docker-compose',
|
3370
|
+
'compose',
|
3371
|
+
]
|
3372
|
+
for x in ['yaml', 'yml']
|
3373
|
+
])
|
3365
3374
|
check.state(os.path.isfile(compose_file))
|
3366
3375
|
|
3367
3376
|
if not requirements_txts:
|
omdev/tokens/__init__.py
ADDED
File without changes
|
omdev/tokens/all.py
ADDED
@@ -0,0 +1,35 @@
|
|
1
|
+
from .tokenizert import ( # noqa
|
2
|
+
TokenNames,
|
3
|
+
Token,
|
4
|
+
TokenOffset,
|
5
|
+
|
6
|
+
Tokenization,
|
7
|
+
)
|
8
|
+
|
9
|
+
from .utils import ( # noqa
|
10
|
+
Tokens,
|
11
|
+
|
12
|
+
WS_NAMES,
|
13
|
+
is_ws,
|
14
|
+
ignore_ws,
|
15
|
+
|
16
|
+
split_lines,
|
17
|
+
join_toks,
|
18
|
+
join_lines,
|
19
|
+
|
20
|
+
match_toks,
|
21
|
+
)
|
22
|
+
|
23
|
+
|
24
|
+
##
|
25
|
+
|
26
|
+
|
27
|
+
ESCAPED_NL = TokenNames.ESCAPED_NL # noqa
|
28
|
+
UNIMPORTANT_WS = TokenNames.UNIMPORTANT_WS # noqa
|
29
|
+
NON_CODING_TOKENS = TokenNames.NON_CODING_TOKENS # noqa
|
30
|
+
|
31
|
+
curly_escape = Tokenization.curly_escape # noqa
|
32
|
+
src_to_tokens = Tokenization.src_to_tokens # noqa
|
33
|
+
parse_string_literal = Tokenization.parse_string_literal # noqa
|
34
|
+
tokens_to_src = Tokenization.tokens_to_src # noqa
|
35
|
+
rfind_string_parts = Tokenization.rfind_string_parts # noqa
|
@@ -0,0 +1,217 @@
|
|
1
|
+
# @omlish-lite
|
2
|
+
# ruff: noqa: UP006 UP007
|
3
|
+
# Copyright (c) 2017 Anthony Sottile
|
4
|
+
#
|
5
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
|
6
|
+
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
|
7
|
+
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
|
8
|
+
# persons to whom the Software is furnished to do so, subject to the following conditions:
|
9
|
+
#
|
10
|
+
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
|
11
|
+
# Software.
|
12
|
+
#
|
13
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
14
|
+
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
15
|
+
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
16
|
+
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
17
|
+
# https://github.com/asottile/tokenize-rt/blob/413692b7c1ad8a873caec39dd4f427d55ee538ea/tokenize_rt.py
|
18
|
+
import argparse
|
19
|
+
import io
|
20
|
+
import keyword
|
21
|
+
import re
|
22
|
+
import tokenize
|
23
|
+
import typing as ta
|
24
|
+
|
25
|
+
from omlish.lite.check import check
|
26
|
+
|
27
|
+
|
28
|
+
##
|
29
|
+
|
30
|
+
|
31
|
+
class TokenNames:
|
32
|
+
def __new__(cls, *args, **kwargs): # noqa
|
33
|
+
raise TypeError
|
34
|
+
|
35
|
+
ESCAPED_NL = 'ESCAPED_NL'
|
36
|
+
UNIMPORTANT_WS = 'UNIMPORTANT_WS'
|
37
|
+
NON_CODING_TOKENS = frozenset(('COMMENT', ESCAPED_NL, 'NL', UNIMPORTANT_WS))
|
38
|
+
|
39
|
+
|
40
|
+
class TokenOffset(ta.NamedTuple):
|
41
|
+
line: ta.Optional[int] = None
|
42
|
+
utf8_byte_offset: ta.Optional[int] = None
|
43
|
+
|
44
|
+
|
45
|
+
class Token(ta.NamedTuple):
|
46
|
+
name: str
|
47
|
+
src: str
|
48
|
+
line: ta.Optional[int] = None
|
49
|
+
utf8_byte_offset: ta.Optional[int] = None
|
50
|
+
|
51
|
+
@property
|
52
|
+
def offset(self) -> TokenOffset:
|
53
|
+
return TokenOffset(self.line, self.utf8_byte_offset)
|
54
|
+
|
55
|
+
def matches(self, *, name: str, src: str) -> bool:
|
56
|
+
return self.name == name and self.src == src
|
57
|
+
|
58
|
+
|
59
|
+
##
|
60
|
+
|
61
|
+
|
62
|
+
class Tokenization:
|
63
|
+
_STRING_RE = re.compile('^([^\'"]*)(.*)$', re.DOTALL)
|
64
|
+
_ESCAPED_NL_RE = re.compile(r'\\(\n|\r\n|\r)')
|
65
|
+
|
66
|
+
_NAMED_UNICODE_RE = re.compile(r'(?<!\\)(?:\\\\)*(\\N\{[^}]+\})')
|
67
|
+
|
68
|
+
@classmethod
|
69
|
+
def curly_escape(cls, s: str) -> str:
|
70
|
+
parts = cls._NAMED_UNICODE_RE.split(s)
|
71
|
+
return ''.join(
|
72
|
+
part.replace('{', '{{').replace('}', '}}') if i % 2 == 0 else part
|
73
|
+
for i, part in enumerate(parts)
|
74
|
+
)
|
75
|
+
|
76
|
+
@classmethod
|
77
|
+
def _re_partition(cls, regex: ta.Pattern[str], s: str) -> ta.Tuple[str, str, str]:
|
78
|
+
match = regex.search(s)
|
79
|
+
if match:
|
80
|
+
return s[:match.start()], s[slice(*match.span())], s[match.end():]
|
81
|
+
else:
|
82
|
+
return (s, '', '')
|
83
|
+
|
84
|
+
@classmethod
|
85
|
+
def src_to_tokens(cls, src: str) -> ta.List[Token]:
|
86
|
+
tokenize_target = io.StringIO(src)
|
87
|
+
lines = ('', *tokenize_target)
|
88
|
+
|
89
|
+
tokenize_target.seek(0)
|
90
|
+
|
91
|
+
tokens = []
|
92
|
+
last_line = 1
|
93
|
+
last_col = 0
|
94
|
+
end_offset = 0
|
95
|
+
|
96
|
+
gen = tokenize.generate_tokens(tokenize_target.readline)
|
97
|
+
for tok_type, tok_text, (sline, scol), (eline, ecol), line in gen:
|
98
|
+
if sline > last_line:
|
99
|
+
newtok = lines[last_line][last_col:]
|
100
|
+
for lineno in range(last_line + 1, sline):
|
101
|
+
newtok += lines[lineno]
|
102
|
+
if scol > 0:
|
103
|
+
newtok += lines[sline][:scol]
|
104
|
+
|
105
|
+
# a multiline unimportant whitespace may contain escaped newlines
|
106
|
+
while cls._ESCAPED_NL_RE.search(newtok):
|
107
|
+
ws, nl, newtok = cls._re_partition(cls._ESCAPED_NL_RE, newtok)
|
108
|
+
if ws:
|
109
|
+
tokens.append(
|
110
|
+
Token(TokenNames.UNIMPORTANT_WS, ws, last_line, end_offset),
|
111
|
+
)
|
112
|
+
end_offset += len(ws.encode())
|
113
|
+
tokens.append(Token(TokenNames.ESCAPED_NL, nl, last_line, end_offset))
|
114
|
+
end_offset = 0
|
115
|
+
last_line += 1
|
116
|
+
if newtok:
|
117
|
+
tokens.append(Token(TokenNames.UNIMPORTANT_WS, newtok, sline, 0))
|
118
|
+
end_offset = len(newtok.encode())
|
119
|
+
else:
|
120
|
+
end_offset = 0
|
121
|
+
|
122
|
+
elif scol > last_col:
|
123
|
+
newtok = line[last_col:scol]
|
124
|
+
tokens.append(Token(TokenNames.UNIMPORTANT_WS, newtok, sline, end_offset))
|
125
|
+
end_offset += len(newtok.encode())
|
126
|
+
|
127
|
+
tok_name = tokenize.tok_name[tok_type]
|
128
|
+
|
129
|
+
if tok_name == 'FSTRING_MIDDLE': # pragma: >=3.12 cover
|
130
|
+
if '{' in tok_text or '}' in tok_text:
|
131
|
+
new_tok_text = cls.curly_escape(tok_text)
|
132
|
+
ecol += len(new_tok_text) - len(tok_text)
|
133
|
+
tok_text = new_tok_text
|
134
|
+
|
135
|
+
tokens.append(Token(tok_name, tok_text, sline, end_offset))
|
136
|
+
last_line, last_col = eline, ecol
|
137
|
+
if sline != eline:
|
138
|
+
end_offset = len(lines[last_line][:last_col].encode())
|
139
|
+
else:
|
140
|
+
end_offset += len(tok_text.encode())
|
141
|
+
|
142
|
+
return tokens
|
143
|
+
|
144
|
+
@classmethod
|
145
|
+
def parse_string_literal(cls, src: str) -> ta.Tuple[str, str]:
|
146
|
+
"""parse a string literal's source into (prefix, string)"""
|
147
|
+
match = check.not_none(cls._STRING_RE.match(src))
|
148
|
+
return match.group(1), match.group(2)
|
149
|
+
|
150
|
+
@classmethod
|
151
|
+
def tokens_to_src(cls, tokens: ta.Iterable[Token]) -> str:
|
152
|
+
return ''.join(tok.src for tok in tokens)
|
153
|
+
|
154
|
+
@classmethod
|
155
|
+
def rfind_string_parts(cls, tokens: ta.Sequence[Token], start: int) -> ta.Tuple[int, ...]:
|
156
|
+
"""
|
157
|
+
Find the indicies of the string parts of a (joined) string literal.
|
158
|
+
|
159
|
+
- `i` should start at the end of the string literal
|
160
|
+
- returns `()` (an empty tuple) for things which are not string literals
|
161
|
+
"""
|
162
|
+
|
163
|
+
ret = []
|
164
|
+
depth = 0
|
165
|
+
for i in range(start, -1, -1):
|
166
|
+
token = tokens[i]
|
167
|
+
if token.name == 'STRING':
|
168
|
+
ret.append(i)
|
169
|
+
elif token.name in TokenNames.NON_CODING_TOKENS:
|
170
|
+
pass
|
171
|
+
elif token.src == ')':
|
172
|
+
depth += 1
|
173
|
+
elif depth and token.src == '(':
|
174
|
+
depth -= 1
|
175
|
+
# if we closed the paren(s) make sure it was a parenthesized string
|
176
|
+
# and not actually a call
|
177
|
+
if depth == 0:
|
178
|
+
for j in range(i - 1, -1, -1):
|
179
|
+
tok = tokens[j]
|
180
|
+
if tok.name in TokenNames.NON_CODING_TOKENS:
|
181
|
+
pass
|
182
|
+
# this was actually a call and not a parenthesized string
|
183
|
+
elif (
|
184
|
+
tok.src in {']', ')'} or (
|
185
|
+
tok.name == 'NAME' and
|
186
|
+
tok.src not in keyword.kwlist
|
187
|
+
)
|
188
|
+
):
|
189
|
+
return ()
|
190
|
+
else:
|
191
|
+
break
|
192
|
+
break
|
193
|
+
elif depth: # it looked like a string but wasn't
|
194
|
+
return ()
|
195
|
+
else:
|
196
|
+
break
|
197
|
+
return tuple(reversed(ret))
|
198
|
+
|
199
|
+
|
200
|
+
##
|
201
|
+
|
202
|
+
|
203
|
+
if __name__ == '__main__':
|
204
|
+
def main(argv: ta.Optional[ta.Sequence[str]] = None) -> int:
|
205
|
+
parser = argparse.ArgumentParser()
|
206
|
+
parser.add_argument('filename')
|
207
|
+
args = parser.parse_args(argv)
|
208
|
+
with open(args.filename) as f:
|
209
|
+
tokens = Tokenization.src_to_tokens(f.read())
|
210
|
+
|
211
|
+
for token in tokens:
|
212
|
+
line, col = str(token.line), str(token.utf8_byte_offset)
|
213
|
+
print(f'{line}:{col} {token.name} {token.src!r}')
|
214
|
+
|
215
|
+
return 0
|
216
|
+
|
217
|
+
raise SystemExit(main())
|
@@ -1,16 +1,10 @@
|
|
1
1
|
import itertools
|
2
2
|
import typing as ta
|
3
3
|
|
4
|
-
from
|
4
|
+
from .tokenizert import Token
|
5
5
|
|
6
6
|
|
7
|
-
|
8
|
-
import tokenize_rt as trt
|
9
|
-
else:
|
10
|
-
trt = lang.proxy_import('tokenize_rt')
|
11
|
-
|
12
|
-
|
13
|
-
Tokens: ta.TypeAlias = ta.Sequence['trt.Token']
|
7
|
+
Tokens: ta.TypeAlias = ta.Sequence[Token]
|
14
8
|
|
15
9
|
|
16
10
|
##
|
@@ -25,15 +19,15 @@ WS_NAMES = (
|
|
25
19
|
)
|
26
20
|
|
27
21
|
|
28
|
-
def is_ws(tok:
|
22
|
+
def is_ws(tok: Token) -> bool:
|
29
23
|
return tok.name in WS_NAMES
|
30
24
|
|
31
25
|
|
32
26
|
def ignore_ws(
|
33
|
-
toks: ta.Iterable[
|
27
|
+
toks: ta.Iterable[Token],
|
34
28
|
*,
|
35
29
|
keep: ta.Container[str] = (),
|
36
|
-
) -> ta.Iterable[
|
30
|
+
) -> ta.Iterable[Token]:
|
37
31
|
return (
|
38
32
|
t
|
39
33
|
for t in toks
|
@@ -60,7 +54,7 @@ def join_lines(ls: ta.Iterable[Tokens]) -> str:
|
|
60
54
|
|
61
55
|
|
62
56
|
def match_toks(
|
63
|
-
ts: ta.Iterable[
|
57
|
+
ts: ta.Iterable[Token],
|
64
58
|
pat: ta.Sequence[tuple[str | None, str | tuple[str, ...] | None]],
|
65
59
|
) -> bool:
|
66
60
|
it = iter(ts)
|
omdev/tools/mkenv.py
ADDED
@@ -0,0 +1,131 @@
|
|
1
|
+
"""
|
2
|
+
TODO:
|
3
|
+
- detect file extension
|
4
|
+
|
5
|
+
==
|
6
|
+
|
7
|
+
export $(./python mkenv.py secrets.yml foo_access_token | xargs)
|
8
|
+
eval $(om mkenv -e secrets.yml foo_access_token)
|
9
|
+
"""
|
10
|
+
import argparse
|
11
|
+
import json
|
12
|
+
import shlex
|
13
|
+
import sys
|
14
|
+
import typing as ta
|
15
|
+
|
16
|
+
from omlish import check
|
17
|
+
from omlish.configs.formats import DEFAULT_CONFIG_FILE_LOADER
|
18
|
+
from omlish.specs import jmespath
|
19
|
+
|
20
|
+
|
21
|
+
##
|
22
|
+
|
23
|
+
|
24
|
+
VALUE_TYPES: tuple[type, ...] = (
|
25
|
+
str,
|
26
|
+
int,
|
27
|
+
float,
|
28
|
+
bool,
|
29
|
+
)
|
30
|
+
|
31
|
+
|
32
|
+
def extract_item(
|
33
|
+
obj: ta.Any,
|
34
|
+
item: str,
|
35
|
+
*,
|
36
|
+
uppercase_keys: bool = False,
|
37
|
+
) -> tuple[str, str]:
|
38
|
+
if '=' not in item:
|
39
|
+
k = item
|
40
|
+
v = obj[k]
|
41
|
+
if uppercase_keys:
|
42
|
+
k = k.upper()
|
43
|
+
|
44
|
+
else:
|
45
|
+
k, p = item.split('=')
|
46
|
+
v = jmespath.search(p, obj)
|
47
|
+
|
48
|
+
#
|
49
|
+
|
50
|
+
if isinstance(v, str):
|
51
|
+
s = v
|
52
|
+
|
53
|
+
elif isinstance(v, bool):
|
54
|
+
s = 'true' if v else 'false'
|
55
|
+
|
56
|
+
else:
|
57
|
+
check.isinstance(v, VALUE_TYPES)
|
58
|
+
s = str(v)
|
59
|
+
|
60
|
+
#
|
61
|
+
|
62
|
+
check.equal(s.strip(), s)
|
63
|
+
for c in '\t\n':
|
64
|
+
check.not_in(c, s)
|
65
|
+
|
66
|
+
#
|
67
|
+
|
68
|
+
return (k, s)
|
69
|
+
|
70
|
+
|
71
|
+
def extract_items(
|
72
|
+
obj: ta.Any,
|
73
|
+
items: ta.Iterable[str],
|
74
|
+
**kwargs: ta.Any,
|
75
|
+
) -> dict[str, str]:
|
76
|
+
return dict(
|
77
|
+
extract_item(obj, item, **kwargs)
|
78
|
+
for item in items
|
79
|
+
)
|
80
|
+
|
81
|
+
|
82
|
+
def _main() -> None:
|
83
|
+
parser = argparse.ArgumentParser()
|
84
|
+
|
85
|
+
parser.add_argument('file')
|
86
|
+
parser.add_argument('-e', '--for-eval', action='store_true')
|
87
|
+
parser.add_argument('-u', '--uppercase', action='store_true')
|
88
|
+
parser.add_argument('item', nargs='*')
|
89
|
+
|
90
|
+
args = parser.parse_args()
|
91
|
+
|
92
|
+
#
|
93
|
+
|
94
|
+
if args.file == '-':
|
95
|
+
obj = json.loads(sys.stdin.read())
|
96
|
+
|
97
|
+
else:
|
98
|
+
data = DEFAULT_CONFIG_FILE_LOADER.load_file(args.file)
|
99
|
+
obj = data.as_map()
|
100
|
+
|
101
|
+
#
|
102
|
+
|
103
|
+
items = extract_items(
|
104
|
+
obj,
|
105
|
+
args.item,
|
106
|
+
uppercase_keys=args.uppercase,
|
107
|
+
)
|
108
|
+
|
109
|
+
#
|
110
|
+
|
111
|
+
if args.for_eval:
|
112
|
+
cmd = ' '.join([
|
113
|
+
'export',
|
114
|
+
*[f'{k}={qv if (qv := shlex.quote(v)) != v else v}' for k, v in items.items()],
|
115
|
+
])
|
116
|
+
print(cmd)
|
117
|
+
|
118
|
+
else:
|
119
|
+
for k, v in items.items():
|
120
|
+
print(f'{k}={v}')
|
121
|
+
|
122
|
+
|
123
|
+
# @omlish-manifest
|
124
|
+
_CLI_MODULE = {'$omdev.cli.types.CliModule': {
|
125
|
+
'cmd_name': ['mkenv'],
|
126
|
+
'mod_name': __name__,
|
127
|
+
}}
|
128
|
+
|
129
|
+
|
130
|
+
if __name__ == '__main__':
|
131
|
+
_main()
|
omdev/tools/mkrelimp.py
CHANGED
@@ -4,12 +4,10 @@ import logging
|
|
4
4
|
import os.path
|
5
5
|
import typing as ta
|
6
6
|
|
7
|
-
import tokenize_rt as trt
|
8
|
-
|
9
7
|
from omlish.logs import all as logs
|
10
8
|
|
11
|
-
from .. import tokens as tks
|
12
9
|
from ..cli import CliModule
|
10
|
+
from ..tokens import all as tks
|
13
11
|
|
14
12
|
|
15
13
|
T = ta.TypeVar('T')
|
@@ -91,8 +89,8 @@ class Processor:
|
|
91
89
|
##
|
92
90
|
|
93
91
|
new_tks = list(interleave(
|
94
|
-
|
95
|
-
[
|
92
|
+
tks.Token(name='OP', src='.'),
|
93
|
+
[tks.Token(name='NAME', src=p) for p in rel_imp_name_parts],
|
96
94
|
))
|
97
95
|
out_tks = [
|
98
96
|
*pfx,
|
@@ -111,7 +109,7 @@ class Processor:
|
|
111
109
|
with open(src_file) as f:
|
112
110
|
src = f.read()
|
113
111
|
|
114
|
-
ts =
|
112
|
+
ts = tks.src_to_tokens(src)
|
115
113
|
in_ls = tks.split_lines(ts)
|
116
114
|
out_ls = [
|
117
115
|
self.process_line_tks(
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: omdev
|
3
|
-
Version: 0.0.0.
|
3
|
+
Version: 0.0.0.dev212
|
4
4
|
Summary: omdev
|
5
5
|
Author: wrmsr
|
6
6
|
License: BSD-3-Clause
|
@@ -12,7 +12,7 @@ Classifier: Operating System :: OS Independent
|
|
12
12
|
Classifier: Operating System :: POSIX
|
13
13
|
Requires-Python: >=3.12
|
14
14
|
License-File: LICENSE
|
15
|
-
Requires-Dist: omlish==0.0.0.
|
15
|
+
Requires-Dist: omlish==0.0.0.dev212
|
16
16
|
Provides-Extra: all
|
17
17
|
Requires-Dist: black~=24.10; extra == "all"
|
18
18
|
Requires-Dist: pycparser~=2.22; extra == "all"
|
@@ -24,7 +24,6 @@ Requires-Dist: mypy~=1.11; extra == "all"
|
|
24
24
|
Requires-Dist: gprof2dot~=2024.6; extra == "all"
|
25
25
|
Requires-Dist: prompt-toolkit~=3.0; extra == "all"
|
26
26
|
Requires-Dist: segno~=1.6; extra == "all"
|
27
|
-
Requires-Dist: tokenize-rt~=6.1; extra == "all"
|
28
27
|
Requires-Dist: wheel~=0.44; extra == "all"
|
29
28
|
Provides-Extra: black
|
30
29
|
Requires-Dist: black~=24.10; extra == "black"
|
@@ -43,7 +42,5 @@ Provides-Extra: ptk
|
|
43
42
|
Requires-Dist: prompt-toolkit~=3.0; extra == "ptk"
|
44
43
|
Provides-Extra: qr
|
45
44
|
Requires-Dist: segno~=1.6; extra == "qr"
|
46
|
-
Provides-Extra: tokens
|
47
|
-
Requires-Dist: tokenize-rt~=6.1; extra == "tokens"
|
48
45
|
Provides-Extra: wheel
|
49
46
|
Requires-Dist: wheel~=0.44; extra == "wheel"
|
@@ -1,5 +1,5 @@
|
|
1
|
-
omdev/.manifests.json,sha256=
|
2
|
-
omdev/__about__.py,sha256=
|
1
|
+
omdev/.manifests.json,sha256=kIHxCLwrqvBumPKT0GKsgRPhrJSovHf9jDnWzUDZMHs,9092
|
2
|
+
omdev/__about__.py,sha256=j3vFclhFvyPICV6FK4aDApFzMCqJWxv9FaWwdwXrSgw,1215
|
3
3
|
omdev/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
omdev/bracepy.py,sha256=I8EdqtDvxzAi3I8TuMEW-RBfwXfqKbwp06CfOdj3L1o,2743
|
5
5
|
omdev/classdot.py,sha256=YOvgy6x295I_8NKBbBlRVd3AN7Osirm_Lqt4Wj0j9rY,1631
|
@@ -10,19 +10,18 @@ omdev/pip.py,sha256=7cZ_IOpekQvgPm_gKnX3Pr8xjqUid50PPScTlZCYVlM,2118
|
|
10
10
|
omdev/revisions.py,sha256=0feRWC0Uttd6K9cCImAHEXoo6-Nuso3tpaHUuhzBlRQ,4985
|
11
11
|
omdev/secrets.py,sha256=bcquaBIDKqX4UIKOzUuKrX7nxVCenj67rRHIMIrd9bk,540
|
12
12
|
omdev/tagstrings.py,sha256=hrinoRmYCFMt4WYCZAYrocVYKQvIApNGKbJaGz8whqs,5334
|
13
|
-
omdev/tokens.py,sha256=zh2TCAfCbcq8ZnoVdQ824jrTiwNy3XJ_oCqlZpLpcCY,1574
|
14
13
|
omdev/wheelfile.py,sha256=yfupGcGkbFlmzGzKU64k_vmOKpaKnUlDWxeGn2KdekU,10005
|
15
14
|
omdev/amalg/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
15
|
omdev/amalg/__main__.py,sha256=1sZH8SLAueWxMxK9ngvndUW3L_rw7f-s_jK3ZP1yAH8,170
|
17
|
-
omdev/amalg/gen.py,sha256=
|
18
|
-
omdev/amalg/imports.py,sha256=
|
16
|
+
omdev/amalg/gen.py,sha256=auSK_NakPiEW-8pLr0JnE6hg995zOmQZaAJh38v1o5Q,5969
|
17
|
+
omdev/amalg/imports.py,sha256=me-I_FOD5874NVebZGt_wlc9kxEpuZV-XGuifbNkgNk,2073
|
19
18
|
omdev/amalg/main.py,sha256=CNkpAMVXlBoaCmgs68rKSUlL8Xh4AuNdZjSFZPeao_4,4313
|
20
|
-
omdev/amalg/manifests.py,sha256=
|
21
|
-
omdev/amalg/resources.py,sha256=
|
22
|
-
omdev/amalg/srcfiles.py,sha256=
|
23
|
-
omdev/amalg/strip.py,sha256=
|
24
|
-
omdev/amalg/types.py,sha256=
|
25
|
-
omdev/amalg/typing.py,sha256=
|
19
|
+
omdev/amalg/manifests.py,sha256=CGwh-DiOU4ZKv0p_uUvdpBl2H72g468kvBHtguOHGMA,902
|
20
|
+
omdev/amalg/resources.py,sha256=aIqVd1tbGCv4vIt6gbzqnB0L3PsQsAeqVaZJ_J7QSz8,2692
|
21
|
+
omdev/amalg/srcfiles.py,sha256=TKSYSfv78Wb8bcsQS3V76dV_H_MICPwzv06Ar09ew9U,3079
|
22
|
+
omdev/amalg/strip.py,sha256=dWQQ5WbtcebLi_PGjPzVYey2mJOnrRES8rkoUS8L52w,1444
|
23
|
+
omdev/amalg/types.py,sha256=BXXJI0VctKTsZv_wXiyMMq3-xShxZ1ak0wxXUK8n9_g,89
|
24
|
+
omdev/amalg/typing.py,sha256=viaoxX4ijxf1dfrgL5W0jwYxSzFouNhmdUKi-9uL3KU,2088
|
26
25
|
omdev/antlr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
27
26
|
omdev/antlr/consts.py,sha256=6iKuncbRXnGAqemNAceaR2mdFSs-8VbBV-pluM-8Bu4,284
|
28
27
|
omdev/antlr/gen.py,sha256=ZuDoh8ksIGlTSufEnJXogpk7gRVfmPirERdLx2nMWDo,3020
|
@@ -74,10 +73,10 @@ omdev/ci/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
74
73
|
omdev/ci/__main__.py,sha256=Jsrv3P7LX2Cg08W7ByZfZ1JQT4lgLDPW1qNAmShFuMk,75
|
75
74
|
omdev/ci/cache.py,sha256=jGrsnYNHnnOLJGIEoyHJUdpVzCBdh4tmy2d22Xgarnk,4132
|
76
75
|
omdev/ci/ci.py,sha256=4ZbFrXL22NQLvOZydrUo-MgaW8I6u-hT-N57wDHakyI,7396
|
77
|
-
omdev/ci/cli.py,sha256
|
78
|
-
omdev/ci/compose.py,sha256=
|
76
|
+
omdev/ci/cli.py,sha256=-hxHvbHqPRtm4rhy1ZOTjP_eo5rEYHEDBcWdJqxhVUw,5348
|
77
|
+
omdev/ci/compose.py,sha256=IFZSgBdqdrdDf2z3MMDYJU1q-z5-TEvZW6K0CtwNi5s,5140
|
79
78
|
omdev/ci/docker.py,sha256=BsKuqRHYSstqC1sQXnbHevNGUBWf5M9PYLobjZO7UMk,3218
|
80
|
-
omdev/ci/requirements.py,sha256=
|
79
|
+
omdev/ci/requirements.py,sha256=9K-f3DXZANoOonLjCCtpE6wR8JPJxj33ouo0VKNjZUE,2115
|
81
80
|
omdev/ci/shell.py,sha256=KTkVZb_piQeB5Z12M2jUS_zGJfLR_Y2W2uYkrQpJpyw,850
|
82
81
|
omdev/ci/utils.py,sha256=yiLSFOy4nNN9EY1C8rBe8fXdTmjpfBb6n1IgpzVJ00g,1605
|
83
82
|
omdev/ci/github/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -172,7 +171,7 @@ omdev/pyproject/resources/docker-dev.sh,sha256=DHkz5D18jok_oDolfg2mqrvGRWFoCe9GQ
|
|
172
171
|
omdev/pyproject/resources/python.sh,sha256=jvrwddYw2KNttpuImLbdCdJK0HsUNMrHtTnmLvhxQxg,757
|
173
172
|
omdev/scripts/__init__.py,sha256=MKCvUAEQwsIvwLixwtPlpBqmkMXLCnjjXyAXvVpDwVk,91
|
174
173
|
omdev/scripts/bumpversion.py,sha256=Kn7fo73Hs8uJh3Hi3EIyLOlzLPWAC6dwuD_lZ3cIzuY,1064
|
175
|
-
omdev/scripts/ci.py,sha256=
|
174
|
+
omdev/scripts/ci.py,sha256=BIWoEqc-AWnjItuADz4b_ufhk7aySwKptCe8nitBzhU,88107
|
176
175
|
omdev/scripts/execrss.py,sha256=mR0G0wERBYtQmVIn63lCIIFb5zkCM6X_XOENDFYDBKc,651
|
177
176
|
omdev/scripts/exectime.py,sha256=sFb376GflU6s9gNX-2-we8hgH6w5MuQNS9g6i4SqJIo,610
|
178
177
|
omdev/scripts/importtrace.py,sha256=oa7CtcWJVMNDbyIEiRHej6ICfABfErMeo4_haIqe18Q,14041
|
@@ -180,6 +179,10 @@ omdev/scripts/interp.py,sha256=EB8hRemH41IMY_Iho_tZdIYdP0i5Q5z4a-oWr7K4bLk,14126
|
|
180
179
|
omdev/scripts/pyproject.py,sha256=4KmTDZFqeTpNAdhW0DXTzmrH4LjHlp4rE6mRzhnBV1s,243089
|
181
180
|
omdev/scripts/slowcat.py,sha256=lssv4yrgJHiWfOiHkUut2p8E8Tq32zB-ujXESQxFFHY,2728
|
182
181
|
omdev/scripts/tmpexec.py,sha256=WTYcf56Tj2qjYV14AWmV8SfT0u6Y8eIU6cKgQRvEK3c,1442
|
182
|
+
omdev/tokens/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
183
|
+
omdev/tokens/all.py,sha256=4t_3NnU-ph_ehyC3XLB_J_lIda7KD1Xvc7rKexOVBAo,686
|
184
|
+
omdev/tokens/tokenizert.py,sha256=y2sFFLJzcYXqP2xiO-6zmJuRYfA02-CGU3CPeD1NXjg,7895
|
185
|
+
omdev/tokens/utils.py,sha256=DJA2jsDzrtQxnO0bdkUk1-GmenX4ECugdhl1xOBe7QI,1448
|
183
186
|
omdev/tools/__init__.py,sha256=iVJAOQ0viGTQOm0DLX4uZLro-9jOioYJGLg9s0kDx1A,78
|
184
187
|
omdev/tools/cloc.py,sha256=jYlMHBae9oGKN4VKeBGuqjiQNcM2be7KIoTF0oNwx_I,5205
|
185
188
|
omdev/tools/doc.py,sha256=wvgGhv6aFaV-Zl-Qivejx37i-lKQ207rZ-4K2fPf-Ss,2547
|
@@ -187,7 +190,8 @@ omdev/tools/docker.py,sha256=KVFckA8eAdiapFUr8xkfMw9Uv3Qy4oNq0e70Lqt1F7I,7352
|
|
187
190
|
omdev/tools/git.py,sha256=fiQc4w2w63PELLLyMXdwpmgpzFzs7UNn35vewWckScM,7514
|
188
191
|
omdev/tools/importscan.py,sha256=nhJIhtjDY6eFVlReP7fegvv6L5ZjN-Z2VeyhsBonev4,4639
|
189
192
|
omdev/tools/linehisto.py,sha256=0ZNm34EuiZBE9Q2YC6KNLNNydNT8QPSOwvYzXiU9S2Q,8881
|
190
|
-
omdev/tools/
|
193
|
+
omdev/tools/mkenv.py,sha256=G2tu5bmiyKFyZuqtUoM7Z-6AI6CI86F2LwoIozoWOvo,2300
|
194
|
+
omdev/tools/mkrelimp.py,sha256=p_35JBGDec0gvD6AHQ279iHcjU39E7Si6LcEDCMrJpo,4042
|
191
195
|
omdev/tools/notebook.py,sha256=q1YMGwM1skHv-dPbtT_cM7UOGFNiMEAxjr6rr6rbobk,3494
|
192
196
|
omdev/tools/pip.py,sha256=eBD41hp-V3thGfhUBM3Erxl4CSG-5LG6Szo1sA76P2k,3459
|
193
197
|
omdev/tools/prof.py,sha256=hQakAsViJD4gLJpLLZnTkOqmTDAwM48Nx5q-O_aFlYM,1467
|
@@ -204,9 +208,9 @@ omdev/tools/json/rendering.py,sha256=tMcjOW5edfozcMSTxxvF7WVTsbYLoe9bCKFh50qyaGw
|
|
204
208
|
omdev/tools/pawk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
205
209
|
omdev/tools/pawk/__main__.py,sha256=VCqeRVnqT1RPEoIrqHFSu4PXVMg4YEgF4qCQm90-eRI,66
|
206
210
|
omdev/tools/pawk/pawk.py,sha256=zsEkfQX0jF5bn712uqPAyBSdJt2dno1LH2oeSMNfXQI,11424
|
207
|
-
omdev-0.0.0.
|
208
|
-
omdev-0.0.0.
|
209
|
-
omdev-0.0.0.
|
210
|
-
omdev-0.0.0.
|
211
|
-
omdev-0.0.0.
|
212
|
-
omdev-0.0.0.
|
211
|
+
omdev-0.0.0.dev212.dist-info/LICENSE,sha256=B_hVtavaA8zCYDW99DYdcpDLKz1n3BBRjZrcbv8uG8c,1451
|
212
|
+
omdev-0.0.0.dev212.dist-info/METADATA,sha256=3Jk_ZtfrtW8Z3LA2VBaaQly2E6XVpPso8BKuL1m0bfI,1638
|
213
|
+
omdev-0.0.0.dev212.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
214
|
+
omdev-0.0.0.dev212.dist-info/entry_points.txt,sha256=dHLXFmq5D9B8qUyhRtFqTGWGxlbx3t5ejedjrnXNYLU,33
|
215
|
+
omdev-0.0.0.dev212.dist-info/top_level.txt,sha256=1nr7j30fEWgLYHW3lGR9pkdHkb7knv1U1ES1XRNVQ6k,6
|
216
|
+
omdev-0.0.0.dev212.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|