omdev 0.0.0.dev210__py3-none-any.whl → 0.0.0.dev212__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- omdev/.manifests.json +15 -1
- omdev/__about__.py +0 -4
- omdev/amalg/gen.py +2 -3
- omdev/amalg/imports.py +4 -5
- omdev/amalg/manifests.py +7 -10
- omdev/amalg/resources.py +24 -27
- omdev/amalg/srcfiles.py +7 -10
- omdev/amalg/strip.py +4 -5
- omdev/amalg/types.py +1 -1
- omdev/amalg/typing.py +9 -8
- omdev/ci/cache.py +137 -10
- omdev/ci/ci.py +110 -75
- omdev/ci/cli.py +51 -11
- omdev/ci/compose.py +34 -15
- omdev/ci/{dockertars.py → docker.py} +43 -30
- omdev/ci/github/__init__.py +0 -0
- omdev/ci/github/bootstrap.py +11 -0
- omdev/ci/github/cache.py +355 -0
- omdev/ci/github/cacheapi.py +207 -0
- omdev/ci/github/cli.py +39 -0
- omdev/ci/requirements.py +3 -2
- omdev/ci/shell.py +42 -0
- omdev/ci/utils.py +49 -0
- omdev/scripts/ci.py +1734 -473
- omdev/scripts/interp.py +22 -22
- omdev/scripts/pyproject.py +22 -22
- omdev/tokens/__init__.py +0 -0
- omdev/tokens/all.py +35 -0
- omdev/tokens/tokenizert.py +217 -0
- omdev/{tokens.py → tokens/utils.py} +6 -12
- omdev/tools/mkenv.py +131 -0
- omdev/tools/mkrelimp.py +4 -6
- {omdev-0.0.0.dev210.dist-info → omdev-0.0.0.dev212.dist-info}/METADATA +2 -5
- {omdev-0.0.0.dev210.dist-info → omdev-0.0.0.dev212.dist-info}/RECORD +38 -28
- {omdev-0.0.0.dev210.dist-info → omdev-0.0.0.dev212.dist-info}/LICENSE +0 -0
- {omdev-0.0.0.dev210.dist-info → omdev-0.0.0.dev212.dist-info}/WHEEL +0 -0
- {omdev-0.0.0.dev210.dist-info → omdev-0.0.0.dev212.dist-info}/entry_points.txt +0 -0
- {omdev-0.0.0.dev210.dist-info → omdev-0.0.0.dev212.dist-info}/top_level.txt +0 -0
omdev/scripts/interp.py
CHANGED
@@ -1189,7 +1189,7 @@ def deep_subclasses(cls: ta.Type[T]) -> ta.Iterator[ta.Type[T]]:
|
|
1189
1189
|
##
|
1190
1190
|
|
1191
1191
|
|
1192
|
-
def camel_case(name: str, lower: bool = False) -> str:
|
1192
|
+
def camel_case(name: str, *, lower: bool = False) -> str:
|
1193
1193
|
if not name:
|
1194
1194
|
return ''
|
1195
1195
|
s = ''.join(map(str.capitalize, name.split('_'))) # noqa
|
@@ -1206,6 +1206,27 @@ def snake_case(name: str) -> str:
|
|
1206
1206
|
##
|
1207
1207
|
|
1208
1208
|
|
1209
|
+
def is_dunder(name: str) -> bool:
|
1210
|
+
return (
|
1211
|
+
name[:2] == name[-2:] == '__' and
|
1212
|
+
name[2:3] != '_' and
|
1213
|
+
name[-3:-2] != '_' and
|
1214
|
+
len(name) > 4
|
1215
|
+
)
|
1216
|
+
|
1217
|
+
|
1218
|
+
def is_sunder(name: str) -> bool:
|
1219
|
+
return (
|
1220
|
+
name[0] == name[-1] == '_' and
|
1221
|
+
name[1:2] != '_' and
|
1222
|
+
name[-2:-1] != '_' and
|
1223
|
+
len(name) > 2
|
1224
|
+
)
|
1225
|
+
|
1226
|
+
|
1227
|
+
##
|
1228
|
+
|
1229
|
+
|
1209
1230
|
def strip_with_newline(s: str) -> str:
|
1210
1231
|
if not s:
|
1211
1232
|
return ''
|
@@ -1237,27 +1258,6 @@ def split_keep_delimiter(s, d):
|
|
1237
1258
|
##
|
1238
1259
|
|
1239
1260
|
|
1240
|
-
def is_dunder(name: str) -> bool:
|
1241
|
-
return (
|
1242
|
-
name[:2] == name[-2:] == '__' and
|
1243
|
-
name[2:3] != '_' and
|
1244
|
-
name[-3:-2] != '_' and
|
1245
|
-
len(name) > 4
|
1246
|
-
)
|
1247
|
-
|
1248
|
-
|
1249
|
-
def is_sunder(name: str) -> bool:
|
1250
|
-
return (
|
1251
|
-
name[0] == name[-1] == '_' and
|
1252
|
-
name[1:2] != '_' and
|
1253
|
-
name[-2:-1] != '_' and
|
1254
|
-
len(name) > 2
|
1255
|
-
)
|
1256
|
-
|
1257
|
-
|
1258
|
-
##
|
1259
|
-
|
1260
|
-
|
1261
1261
|
def attr_repr(obj: ta.Any, *attrs: str) -> str:
|
1262
1262
|
return f'{type(obj).__name__}({", ".join(f"{attr}={getattr(obj, attr)!r}" for attr in attrs)})'
|
1263
1263
|
|
omdev/scripts/pyproject.py
CHANGED
@@ -2497,7 +2497,7 @@ def deep_subclasses(cls: ta.Type[T]) -> ta.Iterator[ta.Type[T]]:
|
|
2497
2497
|
##
|
2498
2498
|
|
2499
2499
|
|
2500
|
-
def camel_case(name: str, lower: bool = False) -> str:
|
2500
|
+
def camel_case(name: str, *, lower: bool = False) -> str:
|
2501
2501
|
if not name:
|
2502
2502
|
return ''
|
2503
2503
|
s = ''.join(map(str.capitalize, name.split('_'))) # noqa
|
@@ -2514,6 +2514,27 @@ def snake_case(name: str) -> str:
|
|
2514
2514
|
##
|
2515
2515
|
|
2516
2516
|
|
2517
|
+
def is_dunder(name: str) -> bool:
|
2518
|
+
return (
|
2519
|
+
name[:2] == name[-2:] == '__' and
|
2520
|
+
name[2:3] != '_' and
|
2521
|
+
name[-3:-2] != '_' and
|
2522
|
+
len(name) > 4
|
2523
|
+
)
|
2524
|
+
|
2525
|
+
|
2526
|
+
def is_sunder(name: str) -> bool:
|
2527
|
+
return (
|
2528
|
+
name[0] == name[-1] == '_' and
|
2529
|
+
name[1:2] != '_' and
|
2530
|
+
name[-2:-1] != '_' and
|
2531
|
+
len(name) > 2
|
2532
|
+
)
|
2533
|
+
|
2534
|
+
|
2535
|
+
##
|
2536
|
+
|
2537
|
+
|
2517
2538
|
def strip_with_newline(s: str) -> str:
|
2518
2539
|
if not s:
|
2519
2540
|
return ''
|
@@ -2545,27 +2566,6 @@ def split_keep_delimiter(s, d):
|
|
2545
2566
|
##
|
2546
2567
|
|
2547
2568
|
|
2548
|
-
def is_dunder(name: str) -> bool:
|
2549
|
-
return (
|
2550
|
-
name[:2] == name[-2:] == '__' and
|
2551
|
-
name[2:3] != '_' and
|
2552
|
-
name[-3:-2] != '_' and
|
2553
|
-
len(name) > 4
|
2554
|
-
)
|
2555
|
-
|
2556
|
-
|
2557
|
-
def is_sunder(name: str) -> bool:
|
2558
|
-
return (
|
2559
|
-
name[0] == name[-1] == '_' and
|
2560
|
-
name[1:2] != '_' and
|
2561
|
-
name[-2:-1] != '_' and
|
2562
|
-
len(name) > 2
|
2563
|
-
)
|
2564
|
-
|
2565
|
-
|
2566
|
-
##
|
2567
|
-
|
2568
|
-
|
2569
2569
|
def attr_repr(obj: ta.Any, *attrs: str) -> str:
|
2570
2570
|
return f'{type(obj).__name__}({", ".join(f"{attr}={getattr(obj, attr)!r}" for attr in attrs)})'
|
2571
2571
|
|
omdev/tokens/__init__.py
ADDED
File without changes
|
omdev/tokens/all.py
ADDED
@@ -0,0 +1,35 @@
|
|
1
|
+
from .tokenizert import ( # noqa
|
2
|
+
TokenNames,
|
3
|
+
Token,
|
4
|
+
TokenOffset,
|
5
|
+
|
6
|
+
Tokenization,
|
7
|
+
)
|
8
|
+
|
9
|
+
from .utils import ( # noqa
|
10
|
+
Tokens,
|
11
|
+
|
12
|
+
WS_NAMES,
|
13
|
+
is_ws,
|
14
|
+
ignore_ws,
|
15
|
+
|
16
|
+
split_lines,
|
17
|
+
join_toks,
|
18
|
+
join_lines,
|
19
|
+
|
20
|
+
match_toks,
|
21
|
+
)
|
22
|
+
|
23
|
+
|
24
|
+
##
|
25
|
+
|
26
|
+
|
27
|
+
ESCAPED_NL = TokenNames.ESCAPED_NL # noqa
|
28
|
+
UNIMPORTANT_WS = TokenNames.UNIMPORTANT_WS # noqa
|
29
|
+
NON_CODING_TOKENS = TokenNames.NON_CODING_TOKENS # noqa
|
30
|
+
|
31
|
+
curly_escape = Tokenization.curly_escape # noqa
|
32
|
+
src_to_tokens = Tokenization.src_to_tokens # noqa
|
33
|
+
parse_string_literal = Tokenization.parse_string_literal # noqa
|
34
|
+
tokens_to_src = Tokenization.tokens_to_src # noqa
|
35
|
+
rfind_string_parts = Tokenization.rfind_string_parts # noqa
|
@@ -0,0 +1,217 @@
|
|
1
|
+
# @omlish-lite
|
2
|
+
# ruff: noqa: UP006 UP007
|
3
|
+
# Copyright (c) 2017 Anthony Sottile
|
4
|
+
#
|
5
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
|
6
|
+
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
|
7
|
+
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
|
8
|
+
# persons to whom the Software is furnished to do so, subject to the following conditions:
|
9
|
+
#
|
10
|
+
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
|
11
|
+
# Software.
|
12
|
+
#
|
13
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
14
|
+
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
15
|
+
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
16
|
+
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
17
|
+
# https://github.com/asottile/tokenize-rt/blob/413692b7c1ad8a873caec39dd4f427d55ee538ea/tokenize_rt.py
|
18
|
+
import argparse
|
19
|
+
import io
|
20
|
+
import keyword
|
21
|
+
import re
|
22
|
+
import tokenize
|
23
|
+
import typing as ta
|
24
|
+
|
25
|
+
from omlish.lite.check import check
|
26
|
+
|
27
|
+
|
28
|
+
##
|
29
|
+
|
30
|
+
|
31
|
+
class TokenNames:
|
32
|
+
def __new__(cls, *args, **kwargs): # noqa
|
33
|
+
raise TypeError
|
34
|
+
|
35
|
+
ESCAPED_NL = 'ESCAPED_NL'
|
36
|
+
UNIMPORTANT_WS = 'UNIMPORTANT_WS'
|
37
|
+
NON_CODING_TOKENS = frozenset(('COMMENT', ESCAPED_NL, 'NL', UNIMPORTANT_WS))
|
38
|
+
|
39
|
+
|
40
|
+
class TokenOffset(ta.NamedTuple):
|
41
|
+
line: ta.Optional[int] = None
|
42
|
+
utf8_byte_offset: ta.Optional[int] = None
|
43
|
+
|
44
|
+
|
45
|
+
class Token(ta.NamedTuple):
|
46
|
+
name: str
|
47
|
+
src: str
|
48
|
+
line: ta.Optional[int] = None
|
49
|
+
utf8_byte_offset: ta.Optional[int] = None
|
50
|
+
|
51
|
+
@property
|
52
|
+
def offset(self) -> TokenOffset:
|
53
|
+
return TokenOffset(self.line, self.utf8_byte_offset)
|
54
|
+
|
55
|
+
def matches(self, *, name: str, src: str) -> bool:
|
56
|
+
return self.name == name and self.src == src
|
57
|
+
|
58
|
+
|
59
|
+
##
|
60
|
+
|
61
|
+
|
62
|
+
class Tokenization:
|
63
|
+
_STRING_RE = re.compile('^([^\'"]*)(.*)$', re.DOTALL)
|
64
|
+
_ESCAPED_NL_RE = re.compile(r'\\(\n|\r\n|\r)')
|
65
|
+
|
66
|
+
_NAMED_UNICODE_RE = re.compile(r'(?<!\\)(?:\\\\)*(\\N\{[^}]+\})')
|
67
|
+
|
68
|
+
@classmethod
|
69
|
+
def curly_escape(cls, s: str) -> str:
|
70
|
+
parts = cls._NAMED_UNICODE_RE.split(s)
|
71
|
+
return ''.join(
|
72
|
+
part.replace('{', '{{').replace('}', '}}') if i % 2 == 0 else part
|
73
|
+
for i, part in enumerate(parts)
|
74
|
+
)
|
75
|
+
|
76
|
+
@classmethod
|
77
|
+
def _re_partition(cls, regex: ta.Pattern[str], s: str) -> ta.Tuple[str, str, str]:
|
78
|
+
match = regex.search(s)
|
79
|
+
if match:
|
80
|
+
return s[:match.start()], s[slice(*match.span())], s[match.end():]
|
81
|
+
else:
|
82
|
+
return (s, '', '')
|
83
|
+
|
84
|
+
@classmethod
|
85
|
+
def src_to_tokens(cls, src: str) -> ta.List[Token]:
|
86
|
+
tokenize_target = io.StringIO(src)
|
87
|
+
lines = ('', *tokenize_target)
|
88
|
+
|
89
|
+
tokenize_target.seek(0)
|
90
|
+
|
91
|
+
tokens = []
|
92
|
+
last_line = 1
|
93
|
+
last_col = 0
|
94
|
+
end_offset = 0
|
95
|
+
|
96
|
+
gen = tokenize.generate_tokens(tokenize_target.readline)
|
97
|
+
for tok_type, tok_text, (sline, scol), (eline, ecol), line in gen:
|
98
|
+
if sline > last_line:
|
99
|
+
newtok = lines[last_line][last_col:]
|
100
|
+
for lineno in range(last_line + 1, sline):
|
101
|
+
newtok += lines[lineno]
|
102
|
+
if scol > 0:
|
103
|
+
newtok += lines[sline][:scol]
|
104
|
+
|
105
|
+
# a multiline unimportant whitespace may contain escaped newlines
|
106
|
+
while cls._ESCAPED_NL_RE.search(newtok):
|
107
|
+
ws, nl, newtok = cls._re_partition(cls._ESCAPED_NL_RE, newtok)
|
108
|
+
if ws:
|
109
|
+
tokens.append(
|
110
|
+
Token(TokenNames.UNIMPORTANT_WS, ws, last_line, end_offset),
|
111
|
+
)
|
112
|
+
end_offset += len(ws.encode())
|
113
|
+
tokens.append(Token(TokenNames.ESCAPED_NL, nl, last_line, end_offset))
|
114
|
+
end_offset = 0
|
115
|
+
last_line += 1
|
116
|
+
if newtok:
|
117
|
+
tokens.append(Token(TokenNames.UNIMPORTANT_WS, newtok, sline, 0))
|
118
|
+
end_offset = len(newtok.encode())
|
119
|
+
else:
|
120
|
+
end_offset = 0
|
121
|
+
|
122
|
+
elif scol > last_col:
|
123
|
+
newtok = line[last_col:scol]
|
124
|
+
tokens.append(Token(TokenNames.UNIMPORTANT_WS, newtok, sline, end_offset))
|
125
|
+
end_offset += len(newtok.encode())
|
126
|
+
|
127
|
+
tok_name = tokenize.tok_name[tok_type]
|
128
|
+
|
129
|
+
if tok_name == 'FSTRING_MIDDLE': # pragma: >=3.12 cover
|
130
|
+
if '{' in tok_text or '}' in tok_text:
|
131
|
+
new_tok_text = cls.curly_escape(tok_text)
|
132
|
+
ecol += len(new_tok_text) - len(tok_text)
|
133
|
+
tok_text = new_tok_text
|
134
|
+
|
135
|
+
tokens.append(Token(tok_name, tok_text, sline, end_offset))
|
136
|
+
last_line, last_col = eline, ecol
|
137
|
+
if sline != eline:
|
138
|
+
end_offset = len(lines[last_line][:last_col].encode())
|
139
|
+
else:
|
140
|
+
end_offset += len(tok_text.encode())
|
141
|
+
|
142
|
+
return tokens
|
143
|
+
|
144
|
+
@classmethod
|
145
|
+
def parse_string_literal(cls, src: str) -> ta.Tuple[str, str]:
|
146
|
+
"""parse a string literal's source into (prefix, string)"""
|
147
|
+
match = check.not_none(cls._STRING_RE.match(src))
|
148
|
+
return match.group(1), match.group(2)
|
149
|
+
|
150
|
+
@classmethod
|
151
|
+
def tokens_to_src(cls, tokens: ta.Iterable[Token]) -> str:
|
152
|
+
return ''.join(tok.src for tok in tokens)
|
153
|
+
|
154
|
+
@classmethod
|
155
|
+
def rfind_string_parts(cls, tokens: ta.Sequence[Token], start: int) -> ta.Tuple[int, ...]:
|
156
|
+
"""
|
157
|
+
Find the indicies of the string parts of a (joined) string literal.
|
158
|
+
|
159
|
+
- `i` should start at the end of the string literal
|
160
|
+
- returns `()` (an empty tuple) for things which are not string literals
|
161
|
+
"""
|
162
|
+
|
163
|
+
ret = []
|
164
|
+
depth = 0
|
165
|
+
for i in range(start, -1, -1):
|
166
|
+
token = tokens[i]
|
167
|
+
if token.name == 'STRING':
|
168
|
+
ret.append(i)
|
169
|
+
elif token.name in TokenNames.NON_CODING_TOKENS:
|
170
|
+
pass
|
171
|
+
elif token.src == ')':
|
172
|
+
depth += 1
|
173
|
+
elif depth and token.src == '(':
|
174
|
+
depth -= 1
|
175
|
+
# if we closed the paren(s) make sure it was a parenthesized string
|
176
|
+
# and not actually a call
|
177
|
+
if depth == 0:
|
178
|
+
for j in range(i - 1, -1, -1):
|
179
|
+
tok = tokens[j]
|
180
|
+
if tok.name in TokenNames.NON_CODING_TOKENS:
|
181
|
+
pass
|
182
|
+
# this was actually a call and not a parenthesized string
|
183
|
+
elif (
|
184
|
+
tok.src in {']', ')'} or (
|
185
|
+
tok.name == 'NAME' and
|
186
|
+
tok.src not in keyword.kwlist
|
187
|
+
)
|
188
|
+
):
|
189
|
+
return ()
|
190
|
+
else:
|
191
|
+
break
|
192
|
+
break
|
193
|
+
elif depth: # it looked like a string but wasn't
|
194
|
+
return ()
|
195
|
+
else:
|
196
|
+
break
|
197
|
+
return tuple(reversed(ret))
|
198
|
+
|
199
|
+
|
200
|
+
##
|
201
|
+
|
202
|
+
|
203
|
+
if __name__ == '__main__':
|
204
|
+
def main(argv: ta.Optional[ta.Sequence[str]] = None) -> int:
|
205
|
+
parser = argparse.ArgumentParser()
|
206
|
+
parser.add_argument('filename')
|
207
|
+
args = parser.parse_args(argv)
|
208
|
+
with open(args.filename) as f:
|
209
|
+
tokens = Tokenization.src_to_tokens(f.read())
|
210
|
+
|
211
|
+
for token in tokens:
|
212
|
+
line, col = str(token.line), str(token.utf8_byte_offset)
|
213
|
+
print(f'{line}:{col} {token.name} {token.src!r}')
|
214
|
+
|
215
|
+
return 0
|
216
|
+
|
217
|
+
raise SystemExit(main())
|
@@ -1,16 +1,10 @@
|
|
1
1
|
import itertools
|
2
2
|
import typing as ta
|
3
3
|
|
4
|
-
from
|
4
|
+
from .tokenizert import Token
|
5
5
|
|
6
6
|
|
7
|
-
|
8
|
-
import tokenize_rt as trt
|
9
|
-
else:
|
10
|
-
trt = lang.proxy_import('tokenize_rt')
|
11
|
-
|
12
|
-
|
13
|
-
Tokens: ta.TypeAlias = ta.Sequence['trt.Token']
|
7
|
+
Tokens: ta.TypeAlias = ta.Sequence[Token]
|
14
8
|
|
15
9
|
|
16
10
|
##
|
@@ -25,15 +19,15 @@ WS_NAMES = (
|
|
25
19
|
)
|
26
20
|
|
27
21
|
|
28
|
-
def is_ws(tok:
|
22
|
+
def is_ws(tok: Token) -> bool:
|
29
23
|
return tok.name in WS_NAMES
|
30
24
|
|
31
25
|
|
32
26
|
def ignore_ws(
|
33
|
-
toks: ta.Iterable[
|
27
|
+
toks: ta.Iterable[Token],
|
34
28
|
*,
|
35
29
|
keep: ta.Container[str] = (),
|
36
|
-
) -> ta.Iterable[
|
30
|
+
) -> ta.Iterable[Token]:
|
37
31
|
return (
|
38
32
|
t
|
39
33
|
for t in toks
|
@@ -60,7 +54,7 @@ def join_lines(ls: ta.Iterable[Tokens]) -> str:
|
|
60
54
|
|
61
55
|
|
62
56
|
def match_toks(
|
63
|
-
ts: ta.Iterable[
|
57
|
+
ts: ta.Iterable[Token],
|
64
58
|
pat: ta.Sequence[tuple[str | None, str | tuple[str, ...] | None]],
|
65
59
|
) -> bool:
|
66
60
|
it = iter(ts)
|
omdev/tools/mkenv.py
ADDED
@@ -0,0 +1,131 @@
|
|
1
|
+
"""
|
2
|
+
TODO:
|
3
|
+
- detect file extension
|
4
|
+
|
5
|
+
==
|
6
|
+
|
7
|
+
export $(./python mkenv.py secrets.yml foo_access_token | xargs)
|
8
|
+
eval $(om mkenv -e secrets.yml foo_access_token)
|
9
|
+
"""
|
10
|
+
import argparse
|
11
|
+
import json
|
12
|
+
import shlex
|
13
|
+
import sys
|
14
|
+
import typing as ta
|
15
|
+
|
16
|
+
from omlish import check
|
17
|
+
from omlish.configs.formats import DEFAULT_CONFIG_FILE_LOADER
|
18
|
+
from omlish.specs import jmespath
|
19
|
+
|
20
|
+
|
21
|
+
##
|
22
|
+
|
23
|
+
|
24
|
+
VALUE_TYPES: tuple[type, ...] = (
|
25
|
+
str,
|
26
|
+
int,
|
27
|
+
float,
|
28
|
+
bool,
|
29
|
+
)
|
30
|
+
|
31
|
+
|
32
|
+
def extract_item(
|
33
|
+
obj: ta.Any,
|
34
|
+
item: str,
|
35
|
+
*,
|
36
|
+
uppercase_keys: bool = False,
|
37
|
+
) -> tuple[str, str]:
|
38
|
+
if '=' not in item:
|
39
|
+
k = item
|
40
|
+
v = obj[k]
|
41
|
+
if uppercase_keys:
|
42
|
+
k = k.upper()
|
43
|
+
|
44
|
+
else:
|
45
|
+
k, p = item.split('=')
|
46
|
+
v = jmespath.search(p, obj)
|
47
|
+
|
48
|
+
#
|
49
|
+
|
50
|
+
if isinstance(v, str):
|
51
|
+
s = v
|
52
|
+
|
53
|
+
elif isinstance(v, bool):
|
54
|
+
s = 'true' if v else 'false'
|
55
|
+
|
56
|
+
else:
|
57
|
+
check.isinstance(v, VALUE_TYPES)
|
58
|
+
s = str(v)
|
59
|
+
|
60
|
+
#
|
61
|
+
|
62
|
+
check.equal(s.strip(), s)
|
63
|
+
for c in '\t\n':
|
64
|
+
check.not_in(c, s)
|
65
|
+
|
66
|
+
#
|
67
|
+
|
68
|
+
return (k, s)
|
69
|
+
|
70
|
+
|
71
|
+
def extract_items(
|
72
|
+
obj: ta.Any,
|
73
|
+
items: ta.Iterable[str],
|
74
|
+
**kwargs: ta.Any,
|
75
|
+
) -> dict[str, str]:
|
76
|
+
return dict(
|
77
|
+
extract_item(obj, item, **kwargs)
|
78
|
+
for item in items
|
79
|
+
)
|
80
|
+
|
81
|
+
|
82
|
+
def _main() -> None:
|
83
|
+
parser = argparse.ArgumentParser()
|
84
|
+
|
85
|
+
parser.add_argument('file')
|
86
|
+
parser.add_argument('-e', '--for-eval', action='store_true')
|
87
|
+
parser.add_argument('-u', '--uppercase', action='store_true')
|
88
|
+
parser.add_argument('item', nargs='*')
|
89
|
+
|
90
|
+
args = parser.parse_args()
|
91
|
+
|
92
|
+
#
|
93
|
+
|
94
|
+
if args.file == '-':
|
95
|
+
obj = json.loads(sys.stdin.read())
|
96
|
+
|
97
|
+
else:
|
98
|
+
data = DEFAULT_CONFIG_FILE_LOADER.load_file(args.file)
|
99
|
+
obj = data.as_map()
|
100
|
+
|
101
|
+
#
|
102
|
+
|
103
|
+
items = extract_items(
|
104
|
+
obj,
|
105
|
+
args.item,
|
106
|
+
uppercase_keys=args.uppercase,
|
107
|
+
)
|
108
|
+
|
109
|
+
#
|
110
|
+
|
111
|
+
if args.for_eval:
|
112
|
+
cmd = ' '.join([
|
113
|
+
'export',
|
114
|
+
*[f'{k}={qv if (qv := shlex.quote(v)) != v else v}' for k, v in items.items()],
|
115
|
+
])
|
116
|
+
print(cmd)
|
117
|
+
|
118
|
+
else:
|
119
|
+
for k, v in items.items():
|
120
|
+
print(f'{k}={v}')
|
121
|
+
|
122
|
+
|
123
|
+
# @omlish-manifest
|
124
|
+
_CLI_MODULE = {'$omdev.cli.types.CliModule': {
|
125
|
+
'cmd_name': ['mkenv'],
|
126
|
+
'mod_name': __name__,
|
127
|
+
}}
|
128
|
+
|
129
|
+
|
130
|
+
if __name__ == '__main__':
|
131
|
+
_main()
|
omdev/tools/mkrelimp.py
CHANGED
@@ -4,12 +4,10 @@ import logging
|
|
4
4
|
import os.path
|
5
5
|
import typing as ta
|
6
6
|
|
7
|
-
import tokenize_rt as trt
|
8
|
-
|
9
7
|
from omlish.logs import all as logs
|
10
8
|
|
11
|
-
from .. import tokens as tks
|
12
9
|
from ..cli import CliModule
|
10
|
+
from ..tokens import all as tks
|
13
11
|
|
14
12
|
|
15
13
|
T = ta.TypeVar('T')
|
@@ -91,8 +89,8 @@ class Processor:
|
|
91
89
|
##
|
92
90
|
|
93
91
|
new_tks = list(interleave(
|
94
|
-
|
95
|
-
[
|
92
|
+
tks.Token(name='OP', src='.'),
|
93
|
+
[tks.Token(name='NAME', src=p) for p in rel_imp_name_parts],
|
96
94
|
))
|
97
95
|
out_tks = [
|
98
96
|
*pfx,
|
@@ -111,7 +109,7 @@ class Processor:
|
|
111
109
|
with open(src_file) as f:
|
112
110
|
src = f.read()
|
113
111
|
|
114
|
-
ts =
|
112
|
+
ts = tks.src_to_tokens(src)
|
115
113
|
in_ls = tks.split_lines(ts)
|
116
114
|
out_ls = [
|
117
115
|
self.process_line_tks(
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: omdev
|
3
|
-
Version: 0.0.0.
|
3
|
+
Version: 0.0.0.dev212
|
4
4
|
Summary: omdev
|
5
5
|
Author: wrmsr
|
6
6
|
License: BSD-3-Clause
|
@@ -12,7 +12,7 @@ Classifier: Operating System :: OS Independent
|
|
12
12
|
Classifier: Operating System :: POSIX
|
13
13
|
Requires-Python: >=3.12
|
14
14
|
License-File: LICENSE
|
15
|
-
Requires-Dist: omlish==0.0.0.
|
15
|
+
Requires-Dist: omlish==0.0.0.dev212
|
16
16
|
Provides-Extra: all
|
17
17
|
Requires-Dist: black~=24.10; extra == "all"
|
18
18
|
Requires-Dist: pycparser~=2.22; extra == "all"
|
@@ -24,7 +24,6 @@ Requires-Dist: mypy~=1.11; extra == "all"
|
|
24
24
|
Requires-Dist: gprof2dot~=2024.6; extra == "all"
|
25
25
|
Requires-Dist: prompt-toolkit~=3.0; extra == "all"
|
26
26
|
Requires-Dist: segno~=1.6; extra == "all"
|
27
|
-
Requires-Dist: tokenize-rt~=6.1; extra == "all"
|
28
27
|
Requires-Dist: wheel~=0.44; extra == "all"
|
29
28
|
Provides-Extra: black
|
30
29
|
Requires-Dist: black~=24.10; extra == "black"
|
@@ -43,7 +42,5 @@ Provides-Extra: ptk
|
|
43
42
|
Requires-Dist: prompt-toolkit~=3.0; extra == "ptk"
|
44
43
|
Provides-Extra: qr
|
45
44
|
Requires-Dist: segno~=1.6; extra == "qr"
|
46
|
-
Provides-Extra: tokens
|
47
|
-
Requires-Dist: tokenize-rt~=6.1; extra == "tokens"
|
48
45
|
Provides-Extra: wheel
|
49
46
|
Requires-Dist: wheel~=0.44; extra == "wheel"
|