omdev 0.0.0.dev211__py3-none-any.whl → 0.0.0.dev213__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
omdev/scripts/interp.py CHANGED
@@ -606,6 +606,17 @@ class Checks:
606
606
 
607
607
  #
608
608
 
609
+ def register_on_raise_breakpoint_if_env_var_set(self, key: str) -> None:
610
+ import os
611
+
612
+ def on_raise(exc: Exception) -> None: # noqa
613
+ if key in os.environ:
614
+ breakpoint() # noqa
615
+
616
+ self.register_on_raise(on_raise)
617
+
618
+ #
619
+
609
620
  def set_exception_factory(self, factory: CheckExceptionFactory) -> None:
610
621
  self._exception_factory = factory
611
622
 
@@ -921,6 +932,18 @@ class Checks:
921
932
 
922
933
  return v
923
934
 
935
+ def not_equal(self, v: T, o: ta.Any, msg: CheckMessage = None) -> T:
936
+ if o == v:
937
+ self._raise(
938
+ ValueError,
939
+ 'Must not be equal',
940
+ msg,
941
+ Checks._ArgsKwargs(v, o),
942
+ render_fmt='%s == %s',
943
+ )
944
+
945
+ return v
946
+
924
947
  def is_(self, v: T, o: ta.Any, msg: CheckMessage = None) -> T:
925
948
  if o is not v:
926
949
  self._raise(
@@ -1907,6 +1907,17 @@ class Checks:
1907
1907
 
1908
1908
  #
1909
1909
 
1910
+ def register_on_raise_breakpoint_if_env_var_set(self, key: str) -> None:
1911
+ import os
1912
+
1913
+ def on_raise(exc: Exception) -> None: # noqa
1914
+ if key in os.environ:
1915
+ breakpoint() # noqa
1916
+
1917
+ self.register_on_raise(on_raise)
1918
+
1919
+ #
1920
+
1910
1921
  def set_exception_factory(self, factory: CheckExceptionFactory) -> None:
1911
1922
  self._exception_factory = factory
1912
1923
 
@@ -2222,6 +2233,18 @@ class Checks:
2222
2233
 
2223
2234
  return v
2224
2235
 
2236
+ def not_equal(self, v: T, o: ta.Any, msg: CheckMessage = None) -> T:
2237
+ if o == v:
2238
+ self._raise(
2239
+ ValueError,
2240
+ 'Must not be equal',
2241
+ msg,
2242
+ Checks._ArgsKwargs(v, o),
2243
+ render_fmt='%s == %s',
2244
+ )
2245
+
2246
+ return v
2247
+
2225
2248
  def is_(self, v: T, o: ta.Any, msg: CheckMessage = None) -> T:
2226
2249
  if o is not v:
2227
2250
  self._raise(
File without changes
omdev/tokens/all.py ADDED
@@ -0,0 +1,35 @@
1
+ from .tokenizert import ( # noqa
2
+ TokenNames,
3
+ Token,
4
+ TokenOffset,
5
+
6
+ Tokenization,
7
+ )
8
+
9
+ from .utils import ( # noqa
10
+ Tokens,
11
+
12
+ WS_NAMES,
13
+ is_ws,
14
+ ignore_ws,
15
+
16
+ split_lines,
17
+ join_toks,
18
+ join_lines,
19
+
20
+ match_toks,
21
+ )
22
+
23
+
24
+ ##
25
+
26
+
27
+ ESCAPED_NL = TokenNames.ESCAPED_NL # noqa
28
+ UNIMPORTANT_WS = TokenNames.UNIMPORTANT_WS # noqa
29
+ NON_CODING_TOKENS = TokenNames.NON_CODING_TOKENS # noqa
30
+
31
+ curly_escape = Tokenization.curly_escape # noqa
32
+ src_to_tokens = Tokenization.src_to_tokens # noqa
33
+ parse_string_literal = Tokenization.parse_string_literal # noqa
34
+ tokens_to_src = Tokenization.tokens_to_src # noqa
35
+ rfind_string_parts = Tokenization.rfind_string_parts # noqa
@@ -0,0 +1,215 @@
1
+ # @omlish-lite
2
+ # ruff: noqa: UP006 UP007
3
+ # Copyright (c) 2017 Anthony Sottile
4
+ #
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
6
+ # documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
7
+ # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
8
+ # persons to whom the Software is furnished to do so, subject to the following conditions:
9
+ #
10
+ # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
11
+ # Software.
12
+ #
13
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
14
+ # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
15
+ # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
16
+ # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
17
+ # https://github.com/asottile/tokenize-rt/blob/413692b7c1ad8a873caec39dd4f427d55ee538ea/tokenize_rt.py
18
+ import argparse
19
+ import io
20
+ import keyword
21
+ import re
22
+ import tokenize
23
+ import typing as ta
24
+
25
+ from omlish.lite.check import check
26
+
27
+
28
+ ##
29
+
30
+
31
+ class TokenNames:
32
+ def __new__(cls, *args, **kwargs): # noqa
33
+ raise TypeError
34
+
35
+ ESCAPED_NL = 'ESCAPED_NL'
36
+ UNIMPORTANT_WS = 'UNIMPORTANT_WS'
37
+ NON_CODING_TOKENS = frozenset(('COMMENT', ESCAPED_NL, 'NL', UNIMPORTANT_WS))
38
+
39
+
40
+ class TokenOffset(ta.NamedTuple):
41
+ line: ta.Optional[int] = None
42
+ utf8_byte_offset: ta.Optional[int] = None
43
+
44
+
45
+ class Token(ta.NamedTuple):
46
+ name: str
47
+ src: str
48
+ line: ta.Optional[int] = None
49
+ utf8_byte_offset: ta.Optional[int] = None
50
+
51
+ @property
52
+ def offset(self) -> TokenOffset:
53
+ return TokenOffset(self.line, self.utf8_byte_offset)
54
+
55
+ def matches(self, *, name: str, src: str) -> bool:
56
+ return self.name == name and self.src == src
57
+
58
+
59
+ ##
60
+
61
+
62
+ class Tokenization:
63
+ _STRING_RE = re.compile('^([^\'"]*)(.*)$', re.DOTALL)
64
+ _ESCAPED_NL_RE = re.compile(r'\\(\n|\r\n|\r)')
65
+
66
+ _NAMED_UNICODE_RE = re.compile(r'(?<!\\)(?:\\\\)*(\\N\{[^}]+\})')
67
+
68
+ @classmethod
69
+ def curly_escape(cls, s: str) -> str:
70
+ parts = cls._NAMED_UNICODE_RE.split(s)
71
+ return ''.join(
72
+ part.replace('{', '{{').replace('}', '}}') if i % 2 == 0 else part
73
+ for i, part in enumerate(parts)
74
+ )
75
+
76
+ @classmethod
77
+ def _re_partition(cls, regex: ta.Pattern[str], s: str) -> ta.Tuple[str, str, str]:
78
+ match = regex.search(s)
79
+ if match:
80
+ return s[:match.start()], s[slice(*match.span())], s[match.end():]
81
+ else:
82
+ return (s, '', '')
83
+
84
+ @classmethod
85
+ def src_to_tokens(cls, src: str) -> ta.List[Token]:
86
+ tokenize_target = io.StringIO(src)
87
+ lines = ('', *tokenize_target)
88
+
89
+ tokenize_target.seek(0)
90
+
91
+ tokens = []
92
+ last_line = 1
93
+ last_col = 0
94
+ end_offset = 0
95
+
96
+ gen = tokenize.generate_tokens(tokenize_target.readline)
97
+ for tok_type, tok_text, (sline, scol), (eline, ecol), line in gen:
98
+ if sline > last_line:
99
+ newtok = lines[last_line][last_col:]
100
+ for lineno in range(last_line + 1, sline):
101
+ newtok += lines[lineno]
102
+ if scol > 0:
103
+ newtok += lines[sline][:scol]
104
+
105
+ # a multiline unimportant whitespace may contain escaped newlines
106
+ while cls._ESCAPED_NL_RE.search(newtok):
107
+ ws, nl, newtok = cls._re_partition(cls._ESCAPED_NL_RE, newtok)
108
+ if ws:
109
+ tokens.append(Token(TokenNames.UNIMPORTANT_WS, ws, last_line, end_offset))
110
+ end_offset += len(ws.encode())
111
+ tokens.append(Token(TokenNames.ESCAPED_NL, nl, last_line, end_offset))
112
+ end_offset = 0
113
+ last_line += 1
114
+ if newtok:
115
+ tokens.append(Token(TokenNames.UNIMPORTANT_WS, newtok, sline, 0))
116
+ end_offset = len(newtok.encode())
117
+ else:
118
+ end_offset = 0
119
+
120
+ elif scol > last_col:
121
+ newtok = line[last_col:scol]
122
+ tokens.append(Token(TokenNames.UNIMPORTANT_WS, newtok, sline, end_offset))
123
+ end_offset += len(newtok.encode())
124
+
125
+ tok_name = tokenize.tok_name[tok_type]
126
+
127
+ if tok_name == 'FSTRING_MIDDLE': # pragma: >=3.12 cover
128
+ if '{' in tok_text or '}' in tok_text:
129
+ new_tok_text = cls.curly_escape(tok_text)
130
+ ecol += len(new_tok_text) - len(tok_text)
131
+ tok_text = new_tok_text
132
+
133
+ tokens.append(Token(tok_name, tok_text, sline, end_offset))
134
+ last_line, last_col = eline, ecol
135
+ if sline != eline:
136
+ end_offset = len(lines[last_line][:last_col].encode())
137
+ else:
138
+ end_offset += len(tok_text.encode())
139
+
140
+ return tokens
141
+
142
+ @classmethod
143
+ def parse_string_literal(cls, src: str) -> ta.Tuple[str, str]:
144
+ """parse a string literal's source into (prefix, string)"""
145
+ match = check.not_none(cls._STRING_RE.match(src))
146
+ return match.group(1), match.group(2)
147
+
148
+ @classmethod
149
+ def tokens_to_src(cls, tokens: ta.Iterable[Token]) -> str:
150
+ return ''.join(tok.src for tok in tokens)
151
+
152
+ @classmethod
153
+ def rfind_string_parts(cls, tokens: ta.Sequence[Token], start: int) -> ta.Tuple[int, ...]:
154
+ """
155
+ Find the indicies of the string parts of a (joined) string literal.
156
+
157
+ - `i` should start at the end of the string literal
158
+ - returns `()` (an empty tuple) for things which are not string literals
159
+ """
160
+
161
+ ret = []
162
+ depth = 0
163
+ for i in range(start, -1, -1):
164
+ token = tokens[i]
165
+ if token.name == 'STRING':
166
+ ret.append(i)
167
+ elif token.name in TokenNames.NON_CODING_TOKENS:
168
+ pass
169
+ elif token.src == ')':
170
+ depth += 1
171
+ elif depth and token.src == '(':
172
+ depth -= 1
173
+ # if we closed the paren(s) make sure it was a parenthesized string
174
+ # and not actually a call
175
+ if depth == 0:
176
+ for j in range(i - 1, -1, -1):
177
+ tok = tokens[j]
178
+ if tok.name in TokenNames.NON_CODING_TOKENS:
179
+ pass
180
+ # this was actually a call and not a parenthesized string
181
+ elif (
182
+ tok.src in {']', ')'} or (
183
+ tok.name == 'NAME' and
184
+ tok.src not in keyword.kwlist
185
+ )
186
+ ):
187
+ return ()
188
+ else:
189
+ break
190
+ break
191
+ elif depth: # it looked like a string but wasn't
192
+ return ()
193
+ else:
194
+ break
195
+ return tuple(reversed(ret))
196
+
197
+
198
+ ##
199
+
200
+
201
+ if __name__ == '__main__':
202
+ def main(argv: ta.Optional[ta.Sequence[str]] = None) -> int:
203
+ parser = argparse.ArgumentParser()
204
+ parser.add_argument('filename')
205
+ args = parser.parse_args(argv)
206
+ with open(args.filename) as f:
207
+ tokens = Tokenization.src_to_tokens(f.read())
208
+
209
+ for token in tokens:
210
+ line, col = str(token.line), str(token.utf8_byte_offset)
211
+ print(f'{line}:{col} {token.name} {token.src!r}')
212
+
213
+ return 0
214
+
215
+ raise SystemExit(main())
@@ -1,16 +1,10 @@
1
1
  import itertools
2
2
  import typing as ta
3
3
 
4
- from omlish import lang
4
+ from .tokenizert import Token
5
5
 
6
6
 
7
- if ta.TYPE_CHECKING:
8
- import tokenize_rt as trt
9
- else:
10
- trt = lang.proxy_import('tokenize_rt')
11
-
12
-
13
- Tokens: ta.TypeAlias = ta.Sequence['trt.Token']
7
+ Tokens: ta.TypeAlias = ta.Sequence[Token]
14
8
 
15
9
 
16
10
  ##
@@ -25,15 +19,15 @@ WS_NAMES = (
25
19
  )
26
20
 
27
21
 
28
- def is_ws(tok: 'trt.Token') -> bool:
22
+ def is_ws(tok: Token) -> bool:
29
23
  return tok.name in WS_NAMES
30
24
 
31
25
 
32
26
  def ignore_ws(
33
- toks: ta.Iterable['trt.Token'],
27
+ toks: ta.Iterable[Token],
34
28
  *,
35
29
  keep: ta.Container[str] = (),
36
- ) -> ta.Iterable['trt.Token']:
30
+ ) -> ta.Iterable[Token]:
37
31
  return (
38
32
  t
39
33
  for t in toks
@@ -60,7 +54,7 @@ def join_lines(ls: ta.Iterable[Tokens]) -> str:
60
54
 
61
55
 
62
56
  def match_toks(
63
- ts: ta.Iterable['trt.Token'],
57
+ ts: ta.Iterable[Token],
64
58
  pat: ta.Sequence[tuple[str | None, str | tuple[str, ...] | None]],
65
59
  ) -> bool:
66
60
  it = iter(ts)
omdev/tools/mkenv.py ADDED
@@ -0,0 +1,131 @@
1
+ """
2
+ TODO:
3
+ - detect file extension
4
+
5
+ ==
6
+
7
+ export $(./python mkenv.py secrets.yml foo_access_token | xargs)
8
+ eval $(om mkenv -e secrets.yml foo_access_token)
9
+ """
10
+ import argparse
11
+ import json
12
+ import shlex
13
+ import sys
14
+ import typing as ta
15
+
16
+ from omlish import check
17
+ from omlish.configs.formats import DEFAULT_CONFIG_FILE_LOADER
18
+ from omlish.specs import jmespath
19
+
20
+
21
+ ##
22
+
23
+
24
+ VALUE_TYPES: tuple[type, ...] = (
25
+ str,
26
+ int,
27
+ float,
28
+ bool,
29
+ )
30
+
31
+
32
+ def extract_item(
33
+ obj: ta.Any,
34
+ item: str,
35
+ *,
36
+ uppercase_keys: bool = False,
37
+ ) -> tuple[str, str]:
38
+ if '=' not in item:
39
+ k = item
40
+ v = obj[k]
41
+ if uppercase_keys:
42
+ k = k.upper()
43
+
44
+ else:
45
+ k, p = item.split('=')
46
+ v = jmespath.search(p, obj)
47
+
48
+ #
49
+
50
+ if isinstance(v, str):
51
+ s = v
52
+
53
+ elif isinstance(v, bool):
54
+ s = 'true' if v else 'false'
55
+
56
+ else:
57
+ check.isinstance(v, VALUE_TYPES)
58
+ s = str(v)
59
+
60
+ #
61
+
62
+ check.equal(s.strip(), s)
63
+ for c in '\t\n':
64
+ check.not_in(c, s)
65
+
66
+ #
67
+
68
+ return (k, s)
69
+
70
+
71
+ def extract_items(
72
+ obj: ta.Any,
73
+ items: ta.Iterable[str],
74
+ **kwargs: ta.Any,
75
+ ) -> dict[str, str]:
76
+ return dict(
77
+ extract_item(obj, item, **kwargs)
78
+ for item in items
79
+ )
80
+
81
+
82
+ def _main() -> None:
83
+ parser = argparse.ArgumentParser()
84
+
85
+ parser.add_argument('file')
86
+ parser.add_argument('-e', '--for-eval', action='store_true')
87
+ parser.add_argument('-u', '--uppercase', action='store_true')
88
+ parser.add_argument('item', nargs='*')
89
+
90
+ args = parser.parse_args()
91
+
92
+ #
93
+
94
+ if args.file == '-':
95
+ obj = json.loads(sys.stdin.read())
96
+
97
+ else:
98
+ data = DEFAULT_CONFIG_FILE_LOADER.load_file(args.file)
99
+ obj = data.as_map()
100
+
101
+ #
102
+
103
+ items = extract_items(
104
+ obj,
105
+ args.item,
106
+ uppercase_keys=args.uppercase,
107
+ )
108
+
109
+ #
110
+
111
+ if args.for_eval:
112
+ cmd = ' '.join([
113
+ 'export',
114
+ *[f'{k}={qv if (qv := shlex.quote(v)) != v else v}' for k, v in items.items()],
115
+ ])
116
+ print(cmd)
117
+
118
+ else:
119
+ for k, v in items.items():
120
+ print(f'{k}={v}')
121
+
122
+
123
+ # @omlish-manifest
124
+ _CLI_MODULE = {'$omdev.cli.types.CliModule': {
125
+ 'cmd_name': ['mkenv'],
126
+ 'mod_name': __name__,
127
+ }}
128
+
129
+
130
+ if __name__ == '__main__':
131
+ _main()
omdev/tools/mkrelimp.py CHANGED
@@ -4,12 +4,10 @@ import logging
4
4
  import os.path
5
5
  import typing as ta
6
6
 
7
- import tokenize_rt as trt
8
-
9
7
  from omlish.logs import all as logs
10
8
 
11
- from .. import tokens as tks
12
9
  from ..cli import CliModule
10
+ from ..tokens import all as tks
13
11
 
14
12
 
15
13
  T = ta.TypeVar('T')
@@ -91,8 +89,8 @@ class Processor:
91
89
  ##
92
90
 
93
91
  new_tks = list(interleave(
94
- trt.Token(name='OP', src='.'),
95
- [trt.Token(name='NAME', src=p) for p in rel_imp_name_parts],
92
+ tks.Token(name='OP', src='.'),
93
+ [tks.Token(name='NAME', src=p) for p in rel_imp_name_parts],
96
94
  ))
97
95
  out_tks = [
98
96
  *pfx,
@@ -111,7 +109,7 @@ class Processor:
111
109
  with open(src_file) as f:
112
110
  src = f.read()
113
111
 
114
- ts = trt.src_to_tokens(src)
112
+ ts = tks.src_to_tokens(src)
115
113
  in_ls = tks.split_lines(ts)
116
114
  out_ls = [
117
115
  self.process_line_tks(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: omdev
3
- Version: 0.0.0.dev211
3
+ Version: 0.0.0.dev213
4
4
  Summary: omdev
5
5
  Author: wrmsr
6
6
  License: BSD-3-Clause
@@ -12,7 +12,7 @@ Classifier: Operating System :: OS Independent
12
12
  Classifier: Operating System :: POSIX
13
13
  Requires-Python: >=3.12
14
14
  License-File: LICENSE
15
- Requires-Dist: omlish==0.0.0.dev211
15
+ Requires-Dist: omlish==0.0.0.dev213
16
16
  Provides-Extra: all
17
17
  Requires-Dist: black~=24.10; extra == "all"
18
18
  Requires-Dist: pycparser~=2.22; extra == "all"
@@ -24,7 +24,6 @@ Requires-Dist: mypy~=1.11; extra == "all"
24
24
  Requires-Dist: gprof2dot~=2024.6; extra == "all"
25
25
  Requires-Dist: prompt-toolkit~=3.0; extra == "all"
26
26
  Requires-Dist: segno~=1.6; extra == "all"
27
- Requires-Dist: tokenize-rt~=6.1; extra == "all"
28
27
  Requires-Dist: wheel~=0.44; extra == "all"
29
28
  Provides-Extra: black
30
29
  Requires-Dist: black~=24.10; extra == "black"
@@ -43,7 +42,5 @@ Provides-Extra: ptk
43
42
  Requires-Dist: prompt-toolkit~=3.0; extra == "ptk"
44
43
  Provides-Extra: qr
45
44
  Requires-Dist: segno~=1.6; extra == "qr"
46
- Provides-Extra: tokens
47
- Requires-Dist: tokenize-rt~=6.1; extra == "tokens"
48
45
  Provides-Extra: wheel
49
46
  Requires-Dist: wheel~=0.44; extra == "wheel"