omdev 0.0.0.dev500__py3-none-any.whl → 0.0.0.dev506__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of omdev might be problematic. Click here for more details.
- omdev/.omlish-manifests.json +1 -1
- omdev/__about__.py +2 -2
- omdev/markdown/incparse.py +392 -0
- omdev/pyproject/cli.py +1 -35
- omdev/pyproject/configs.py +1 -1
- omdev/pyproject/tools/pyversions.py +47 -0
- omdev/pyproject/versions.py +40 -0
- omdev/scripts/ci.py +8 -1
- omdev/scripts/interp.py +8 -1
- omdev/scripts/lib/inject.py +8 -1
- omdev/scripts/pyproject.py +66 -39
- omdev/tools/sqlrepl.py +189 -78
- omdev/tui/rich/markdown2.py +219 -18
- {omdev-0.0.0.dev500.dist-info → omdev-0.0.0.dev506.dist-info}/METADATA +6 -6
- {omdev-0.0.0.dev500.dist-info → omdev-0.0.0.dev506.dist-info}/RECORD +19 -17
- {omdev-0.0.0.dev500.dist-info → omdev-0.0.0.dev506.dist-info}/WHEEL +0 -0
- {omdev-0.0.0.dev500.dist-info → omdev-0.0.0.dev506.dist-info}/entry_points.txt +0 -0
- {omdev-0.0.0.dev500.dist-info → omdev-0.0.0.dev506.dist-info}/licenses/LICENSE +0 -0
- {omdev-0.0.0.dev500.dist-info → omdev-0.0.0.dev506.dist-info}/top_level.txt +0 -0
omdev/.omlish-manifests.json
CHANGED
omdev/__about__.py
CHANGED
|
@@ -13,7 +13,7 @@ class Project(ProjectBase):
|
|
|
13
13
|
|
|
14
14
|
optional_dependencies = {
|
|
15
15
|
'black': [
|
|
16
|
-
'black ~=
|
|
16
|
+
'black ~= 26.1',
|
|
17
17
|
],
|
|
18
18
|
|
|
19
19
|
'c': [
|
|
@@ -44,7 +44,7 @@ class Project(ProjectBase):
|
|
|
44
44
|
|
|
45
45
|
'tui': [
|
|
46
46
|
'rich ~= 14.2',
|
|
47
|
-
'textual ~= 7.
|
|
47
|
+
'textual ~= 7.3', # [syntax]
|
|
48
48
|
'textual-dev ~= 1.8',
|
|
49
49
|
'textual-speedups ~= 0.2',
|
|
50
50
|
],
|
omdev/markdown/incparse.py
CHANGED
|
@@ -114,3 +114,395 @@ class IncrementalMarkdownParser:
|
|
|
114
114
|
adjusted.append(token)
|
|
115
115
|
|
|
116
116
|
return adjusted
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
##
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class ClaudeIncrementalMarkdownParser:
|
|
123
|
+
# @omlish-llm-author "claude-opus-4-5"
|
|
124
|
+
|
|
125
|
+
def __init__(
|
|
126
|
+
self,
|
|
127
|
+
*,
|
|
128
|
+
parser: ta.Optional['md.MarkdownIt'] = None,
|
|
129
|
+
) -> None:
|
|
130
|
+
super().__init__()
|
|
131
|
+
|
|
132
|
+
if parser is None:
|
|
133
|
+
parser = md.MarkdownIt()
|
|
134
|
+
self._parser = parser
|
|
135
|
+
|
|
136
|
+
self._stable_tokens: list[md.token.Token] = []
|
|
137
|
+
self._buffer = ''
|
|
138
|
+
self._num_stable_lines = 0
|
|
139
|
+
|
|
140
|
+
class FeedOutput(ta.NamedTuple):
|
|
141
|
+
stable: ta.Sequence['md.token.Token']
|
|
142
|
+
new_stable: ta.Sequence['md.token.Token']
|
|
143
|
+
unstable: ta.Sequence['md.token.Token']
|
|
144
|
+
|
|
145
|
+
def feed2(self, chunk: str) -> FeedOutput:
|
|
146
|
+
self._buffer += chunk
|
|
147
|
+
|
|
148
|
+
new_tokens = self._parser.parse(self._buffer)
|
|
149
|
+
|
|
150
|
+
adjusted_tokens = self._adjust_token_line_numbers(new_tokens, self._num_stable_lines)
|
|
151
|
+
|
|
152
|
+
stable_count = self._find_stable_token_count(adjusted_tokens, self._buffer)
|
|
153
|
+
|
|
154
|
+
newly_stable: ta.Sequence[md.token.Token]
|
|
155
|
+
if stable_count > 0:
|
|
156
|
+
newly_stable = adjusted_tokens[:stable_count]
|
|
157
|
+
|
|
158
|
+
max_line = 0
|
|
159
|
+
for token in newly_stable:
|
|
160
|
+
if token.map:
|
|
161
|
+
max_line = max(max_line, token.map[1])
|
|
162
|
+
|
|
163
|
+
if max_line > self._num_stable_lines:
|
|
164
|
+
lines_to_remove = max_line - self._num_stable_lines
|
|
165
|
+
lines = self._buffer.split('\n')
|
|
166
|
+
self._buffer = '\n'.join(lines[lines_to_remove:])
|
|
167
|
+
|
|
168
|
+
self._stable_tokens.extend(newly_stable)
|
|
169
|
+
self._num_stable_lines = max_line
|
|
170
|
+
|
|
171
|
+
else:
|
|
172
|
+
newly_stable = ()
|
|
173
|
+
|
|
174
|
+
return ClaudeIncrementalMarkdownParser.FeedOutput(
|
|
175
|
+
stable=self._stable_tokens,
|
|
176
|
+
new_stable=newly_stable,
|
|
177
|
+
unstable=adjusted_tokens[stable_count:],
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
def feed(self, chunk: str) -> list['md.token.Token']:
|
|
181
|
+
out = self.feed2(chunk)
|
|
182
|
+
return [*out.stable, *out.unstable]
|
|
183
|
+
|
|
184
|
+
def _find_stable_token_count(
|
|
185
|
+
self,
|
|
186
|
+
tokens: list['md.token.Token'],
|
|
187
|
+
buffer: str,
|
|
188
|
+
) -> int:
|
|
189
|
+
if not tokens:
|
|
190
|
+
return 0
|
|
191
|
+
|
|
192
|
+
parent_indices = []
|
|
193
|
+
for i, token in enumerate(tokens):
|
|
194
|
+
if token.nesting in (1, 0) and token.level == 0:
|
|
195
|
+
parent_indices.append(i)
|
|
196
|
+
|
|
197
|
+
if len(parent_indices) < 2:
|
|
198
|
+
return 0
|
|
199
|
+
|
|
200
|
+
# Find the last parent index that is fully terminated. We need at least one more parent after it to consider it
|
|
201
|
+
# stable.
|
|
202
|
+
buffer_lines = buffer.split('\n')
|
|
203
|
+
|
|
204
|
+
for candidate_idx in range(len(parent_indices) - 2, -1, -1):
|
|
205
|
+
token_list_idx = parent_indices[candidate_idx]
|
|
206
|
+
|
|
207
|
+
# Find the end of this block (either the token itself or its closing tag)
|
|
208
|
+
block_end_idx = token_list_idx
|
|
209
|
+
if tokens[token_list_idx].nesting == 1:
|
|
210
|
+
# Opening tag - find corresponding close
|
|
211
|
+
depth = 1
|
|
212
|
+
for j in range(token_list_idx + 1, len(tokens)):
|
|
213
|
+
if tokens[j].level == 0:
|
|
214
|
+
if tokens[j].nesting == 1:
|
|
215
|
+
depth += 1
|
|
216
|
+
elif tokens[j].nesting == -1:
|
|
217
|
+
depth -= 1
|
|
218
|
+
if depth == 0:
|
|
219
|
+
block_end_idx = j
|
|
220
|
+
break
|
|
221
|
+
|
|
222
|
+
# Get the line range for this block
|
|
223
|
+
end_line = 0
|
|
224
|
+
for t in tokens[:block_end_idx + 1]:
|
|
225
|
+
if t.map:
|
|
226
|
+
end_line = max(end_line, t.map[1])
|
|
227
|
+
|
|
228
|
+
# Check if followed by blank line or another clear block boundary end_line is exclusive (points to line
|
|
229
|
+
# after the block). Relative to current buffer (not absolute).
|
|
230
|
+
relative_end = end_line - self._num_stable_lines
|
|
231
|
+
|
|
232
|
+
if relative_end < 0:
|
|
233
|
+
continue
|
|
234
|
+
|
|
235
|
+
if relative_end < len(buffer_lines):
|
|
236
|
+
# Check for blank line or clear termination
|
|
237
|
+
if relative_end < len(buffer_lines):
|
|
238
|
+
following_content = '\n'.join(buffer_lines[relative_end:])
|
|
239
|
+
# Stable if: blank line follows, or significant content after
|
|
240
|
+
if (
|
|
241
|
+
following_content.startswith('\n') or
|
|
242
|
+
(following_content.strip() and len(following_content.strip()) > 0)
|
|
243
|
+
):
|
|
244
|
+
# Check the next parent token exists and has been parsed
|
|
245
|
+
if candidate_idx + 1 < len(parent_indices):
|
|
246
|
+
next_parent_idx = parent_indices[candidate_idx + 1]
|
|
247
|
+
next_token = tokens[next_parent_idx]
|
|
248
|
+
# The next block should start after our block ends
|
|
249
|
+
if next_token.map and next_token.map[0] >= end_line - self._num_stable_lines:
|
|
250
|
+
return parent_indices[candidate_idx + 1]
|
|
251
|
+
|
|
252
|
+
return 0
|
|
253
|
+
|
|
254
|
+
def _adjust_token_line_numbers(
|
|
255
|
+
self,
|
|
256
|
+
tokens: list['md.token.Token'],
|
|
257
|
+
line_offset: int,
|
|
258
|
+
) -> list['md.token.Token']:
|
|
259
|
+
adjusted = []
|
|
260
|
+
for token in tokens:
|
|
261
|
+
if token.map:
|
|
262
|
+
token = dc.replace(
|
|
263
|
+
token,
|
|
264
|
+
map=[token.map[0] + line_offset, token.map[1] + line_offset],
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
adjusted.append(token)
|
|
268
|
+
|
|
269
|
+
return adjusted
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
class GptIncrementalMarkdownParser:
|
|
273
|
+
# @omlish-llm-author "gpt-5.2"
|
|
274
|
+
|
|
275
|
+
def __init__(
|
|
276
|
+
self,
|
|
277
|
+
*,
|
|
278
|
+
parser: ta.Optional['md.MarkdownIt'] = None,
|
|
279
|
+
) -> None:
|
|
280
|
+
super().__init__()
|
|
281
|
+
|
|
282
|
+
if parser is None:
|
|
283
|
+
parser = md.MarkdownIt()
|
|
284
|
+
self._parser = parser
|
|
285
|
+
|
|
286
|
+
self._stable_tokens: list[md.token.Token] = []
|
|
287
|
+
self._buffer = ''
|
|
288
|
+
self._num_stable_lines = 0 # Number of *source* lines removed from the buffer and committed.
|
|
289
|
+
|
|
290
|
+
class FeedOutput(ta.NamedTuple):
|
|
291
|
+
stable: ta.Sequence['md.token.Token']
|
|
292
|
+
new_stable: ta.Sequence['md.token.Token']
|
|
293
|
+
unstable: ta.Sequence['md.token.Token']
|
|
294
|
+
|
|
295
|
+
def feed2(self, chunk: str) -> FeedOutput:
|
|
296
|
+
self._buffer += chunk
|
|
297
|
+
|
|
298
|
+
# Parse the current buffer (line numbers are relative to the buffer's start).
|
|
299
|
+
new_tokens = self._parser.parse(self._buffer)
|
|
300
|
+
|
|
301
|
+
# Adjust ALL tokens to account for stable lines from previous parses.
|
|
302
|
+
adjusted_tokens = self._adjust_token_line_numbers(new_tokens, self._num_stable_lines)
|
|
303
|
+
|
|
304
|
+
# Decide how many *source lines* from the front of the buffer are safe to commit permanently.
|
|
305
|
+
stable_line_cut = self._find_stable_line_cut(self._buffer)
|
|
306
|
+
stable_abs_line = self._num_stable_lines + stable_line_cut
|
|
307
|
+
|
|
308
|
+
newly_stable: ta.Sequence[md.token.Token]
|
|
309
|
+
if stable_line_cut > 0:
|
|
310
|
+
# Commit tokens that are wholly before the stable cut.
|
|
311
|
+
newly_stable_list: list[md.token.Token] = []
|
|
312
|
+
remaining_list: list[md.token.Token] = []
|
|
313
|
+
|
|
314
|
+
for t in adjusted_tokens:
|
|
315
|
+
# Tokens without maps are treated conservatively as unstable unless we've already committed
|
|
316
|
+
# all remaining source.
|
|
317
|
+
if not t.map:
|
|
318
|
+
remaining_list.append(t)
|
|
319
|
+
continue
|
|
320
|
+
|
|
321
|
+
# t.map is [start_line, end_line) in absolute source lines (after adjustment).
|
|
322
|
+
if t.map[1] <= stable_abs_line:
|
|
323
|
+
newly_stable_list.append(t)
|
|
324
|
+
else:
|
|
325
|
+
remaining_list.append(t)
|
|
326
|
+
|
|
327
|
+
newly_stable = newly_stable_list
|
|
328
|
+
|
|
329
|
+
# Remove committed source lines from the buffer.
|
|
330
|
+
lines = self._buffer.split('\n')
|
|
331
|
+
self._buffer = '\n'.join(lines[stable_line_cut:])
|
|
332
|
+
|
|
333
|
+
# Persist committed state.
|
|
334
|
+
self._stable_tokens.extend(newly_stable)
|
|
335
|
+
self._num_stable_lines = stable_abs_line
|
|
336
|
+
|
|
337
|
+
unstable = remaining_list
|
|
338
|
+
|
|
339
|
+
else:
|
|
340
|
+
newly_stable = ()
|
|
341
|
+
unstable = adjusted_tokens
|
|
342
|
+
|
|
343
|
+
return GptIncrementalMarkdownParser.FeedOutput(
|
|
344
|
+
stable=self._stable_tokens,
|
|
345
|
+
new_stable=newly_stable,
|
|
346
|
+
unstable=unstable,
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
def feed(self, chunk: str) -> list['md.token.Token']:
|
|
350
|
+
out = self.feed2(chunk)
|
|
351
|
+
return [*out.stable, *out.unstable]
|
|
352
|
+
|
|
353
|
+
##
|
|
354
|
+
# Stability boundary
|
|
355
|
+
|
|
356
|
+
def _find_stable_line_cut(self, buf: str) -> int:
|
|
357
|
+
"""
|
|
358
|
+
Return a conservative number of *source lines* from the buffer start that can be treated as permanently stable
|
|
359
|
+
(i.e. future suffixes of the markdown source will not change their parse/render).
|
|
360
|
+
|
|
361
|
+
This intentionally errs on the side of keeping more in the unstable tail.
|
|
362
|
+
"""
|
|
363
|
+
|
|
364
|
+
if not buf:
|
|
365
|
+
return 0
|
|
366
|
+
|
|
367
|
+
lines = buf.split('\n')
|
|
368
|
+
|
|
369
|
+
# Track whether we're inside a fenced code block. This is the biggest retroactive-parse hazard.
|
|
370
|
+
in_fence = False
|
|
371
|
+
fence_marker: str | None = None
|
|
372
|
+
|
|
373
|
+
# Track whether we're inside a blockquote region (heuristic).
|
|
374
|
+
in_quote = False
|
|
375
|
+
|
|
376
|
+
# Track whether we're inside a list region (heuristic).
|
|
377
|
+
in_list = False
|
|
378
|
+
|
|
379
|
+
# We only commit up to a "hard" boundary: a blank line that is outside fence/quote/list context. Additionally,
|
|
380
|
+
# we require that the boundary line itself is blank (so setext headings can't reach back).
|
|
381
|
+
last_safe_cut: int = 0
|
|
382
|
+
|
|
383
|
+
def is_blank(s: str) -> bool:
|
|
384
|
+
return not s.strip()
|
|
385
|
+
|
|
386
|
+
def is_fence_line(s: str) -> str | None:
|
|
387
|
+
st = s.lstrip()
|
|
388
|
+
if st.startswith('```'):
|
|
389
|
+
return '```'
|
|
390
|
+
if st.startswith('~~~'):
|
|
391
|
+
return '~~~'
|
|
392
|
+
return None
|
|
393
|
+
|
|
394
|
+
def is_quote_line(s: str) -> bool:
|
|
395
|
+
return s.lstrip().startswith('>')
|
|
396
|
+
|
|
397
|
+
def is_list_line(s: str) -> bool:
|
|
398
|
+
st = s.lstrip()
|
|
399
|
+
if not st:
|
|
400
|
+
return False
|
|
401
|
+
# Very conservative list marker detection.
|
|
402
|
+
if st[0] in ('-', '*', '+') and len(st) > 1 and st[1].isspace():
|
|
403
|
+
return True
|
|
404
|
+
# "1. " / "1) "
|
|
405
|
+
i = 0
|
|
406
|
+
while i < len(st) and st[i].isdigit():
|
|
407
|
+
i += 1
|
|
408
|
+
if i > 0 and i < len(st) and st[i] in ('.', ')'):
|
|
409
|
+
j = i + 1
|
|
410
|
+
return j < len(st) and st[j].isspace()
|
|
411
|
+
return False
|
|
412
|
+
|
|
413
|
+
def is_indented_code(s: str) -> bool:
|
|
414
|
+
# Indented code blocks (4 spaces / 1 tab) can be sensitive to context; treat as "unstable context" for
|
|
415
|
+
# committing boundaries.
|
|
416
|
+
return s.startswith((' ', '\t'))
|
|
417
|
+
|
|
418
|
+
for i, line in enumerate(lines):
|
|
419
|
+
# Fence tracking.
|
|
420
|
+
fm = is_fence_line(line)
|
|
421
|
+
if fm is not None:
|
|
422
|
+
if not in_fence:
|
|
423
|
+
in_fence = True
|
|
424
|
+
fence_marker = fm
|
|
425
|
+
else:
|
|
426
|
+
# Only close on the matching marker (conservative).
|
|
427
|
+
if fence_marker == fm:
|
|
428
|
+
in_fence = False
|
|
429
|
+
fence_marker = None
|
|
430
|
+
|
|
431
|
+
# Quote tracking (heuristic: treat contiguous quote lines as quote context).
|
|
432
|
+
if is_quote_line(line):
|
|
433
|
+
in_quote = True
|
|
434
|
+
elif is_blank(line):
|
|
435
|
+
# A blank line is a potential place to end a quote, but only if we are not in a fence.
|
|
436
|
+
if not in_fence:
|
|
437
|
+
in_quote = False
|
|
438
|
+
|
|
439
|
+
# List tracking (heuristic: any list marker enters list context; blank lines end list context only if the
|
|
440
|
+
# following non-blank line is not indented / not list / not quote).
|
|
441
|
+
if is_list_line(line):
|
|
442
|
+
in_list = True
|
|
443
|
+
if is_blank(line) and not in_fence:
|
|
444
|
+
# Peek ahead to see if the list plausibly continues.
|
|
445
|
+
j = i + 1
|
|
446
|
+
while j < len(lines) and is_blank(lines[j]):
|
|
447
|
+
j += 1
|
|
448
|
+
if j >= len(lines):
|
|
449
|
+
# End of buffer: keep tail unstable.
|
|
450
|
+
pass
|
|
451
|
+
else:
|
|
452
|
+
nxt = lines[j]
|
|
453
|
+
if (
|
|
454
|
+
not is_indented_code(nxt) and
|
|
455
|
+
not is_list_line(nxt) and
|
|
456
|
+
not is_quote_line(nxt)
|
|
457
|
+
):
|
|
458
|
+
in_list = False
|
|
459
|
+
|
|
460
|
+
# Commit boundary selection.
|
|
461
|
+
if is_blank(line) and not in_fence and not in_quote and not in_list:
|
|
462
|
+
# Safe to commit through this blank line (i.e. cut after it).
|
|
463
|
+
last_safe_cut = i + 1
|
|
464
|
+
|
|
465
|
+
# Never cut the entire buffer; leave at least one line in the tail so incremental feeds keep working.
|
|
466
|
+
if last_safe_cut >= len(lines):
|
|
467
|
+
return 0
|
|
468
|
+
|
|
469
|
+
return last_safe_cut
|
|
470
|
+
|
|
471
|
+
def _adjust_token_line_numbers(
|
|
472
|
+
self,
|
|
473
|
+
tokens: list['md.token.Token'],
|
|
474
|
+
line_offset: int,
|
|
475
|
+
) -> list['md.token.Token']:
|
|
476
|
+
adjusted: list[md.token.Token] = []
|
|
477
|
+
|
|
478
|
+
def adj_tok(t: 'md.token.Token') -> 'md.token.Token':
|
|
479
|
+
nt = t
|
|
480
|
+
if nt.map:
|
|
481
|
+
nt = dc.replace(
|
|
482
|
+
nt,
|
|
483
|
+
map=[nt.map[0] + line_offset, nt.map[1] + line_offset],
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
# Adjust children maps too (markdown-it uses children for inline tokens).
|
|
487
|
+
ch = getattr(nt, 'children', None)
|
|
488
|
+
if ch:
|
|
489
|
+
new_children: list[md.token.Token] = []
|
|
490
|
+
changed = False
|
|
491
|
+
for c in ch:
|
|
492
|
+
nc = c
|
|
493
|
+
if nc.map:
|
|
494
|
+
nc = dc.replace(
|
|
495
|
+
nc,
|
|
496
|
+
map=[nc.map[0] + line_offset, nc.map[1] + line_offset],
|
|
497
|
+
)
|
|
498
|
+
changed = True
|
|
499
|
+
new_children.append(nc)
|
|
500
|
+
if changed:
|
|
501
|
+
nt = dc.replace(nt, children=new_children)
|
|
502
|
+
|
|
503
|
+
return nt
|
|
504
|
+
|
|
505
|
+
for token in tokens:
|
|
506
|
+
adjusted.append(adj_tok(token))
|
|
507
|
+
|
|
508
|
+
return adjusted
|
omdev/pyproject/cli.py
CHANGED
|
@@ -22,7 +22,6 @@ See:
|
|
|
22
22
|
import argparse
|
|
23
23
|
import asyncio
|
|
24
24
|
import concurrent.futures as cf
|
|
25
|
-
import dataclasses as dc
|
|
26
25
|
import functools
|
|
27
26
|
import itertools
|
|
28
27
|
import multiprocessing as mp
|
|
@@ -47,40 +46,7 @@ from .configs import PyprojectConfigPreparer
|
|
|
47
46
|
from .pkg import BasePyprojectPackageGenerator
|
|
48
47
|
from .pkg import PyprojectPackageGenerator
|
|
49
48
|
from .venvs import Venv
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
##
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
@dc.dataclass(frozen=True)
|
|
56
|
-
class VersionsFile:
|
|
57
|
-
name: ta.Optional[str] = '.versions'
|
|
58
|
-
|
|
59
|
-
@staticmethod
|
|
60
|
-
def parse(s: str) -> ta.Mapping[str, str]:
|
|
61
|
-
return {
|
|
62
|
-
k: v
|
|
63
|
-
for l in s.splitlines()
|
|
64
|
-
if (sl := l.split('#')[0].strip())
|
|
65
|
-
for k, _, v in (sl.partition('='),)
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
@cached_nullary
|
|
69
|
-
def contents(self) -> ta.Mapping[str, str]:
|
|
70
|
-
if not self.name or not os.path.exists(self.name):
|
|
71
|
-
return {}
|
|
72
|
-
with open(self.name) as f:
|
|
73
|
-
s = f.read()
|
|
74
|
-
return self.parse(s)
|
|
75
|
-
|
|
76
|
-
@staticmethod
|
|
77
|
-
def get_pythons(d: ta.Mapping[str, str]) -> ta.Mapping[str, str]:
|
|
78
|
-
pfx = 'PYTHON_'
|
|
79
|
-
return {k[len(pfx):].lower(): v for k, v in d.items() if k.startswith(pfx)}
|
|
80
|
-
|
|
81
|
-
@cached_nullary
|
|
82
|
-
def pythons(self) -> ta.Mapping[str, str]:
|
|
83
|
-
return self.get_pythons(self.contents())
|
|
49
|
+
from .versions import VersionsFile
|
|
84
50
|
|
|
85
51
|
|
|
86
52
|
##
|
omdev/pyproject/configs.py
CHANGED
|
@@ -24,7 +24,7 @@ class PyprojectConfig:
|
|
|
24
24
|
venvs: ta.Mapping[str, VenvConfig] = dc.field(default_factory=dict)
|
|
25
25
|
|
|
26
26
|
venvs_dir: str = '.venvs'
|
|
27
|
-
versions_file: ta.Optional[str] = '.versions'
|
|
27
|
+
# versions_file: ta.Optional[str] = '.versions' # FIXME:
|
|
28
28
|
|
|
29
29
|
|
|
30
30
|
class PyprojectConfigPreparer:
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# ruff: noqa: UP006 UP045
|
|
2
|
+
import dataclasses as dc
|
|
3
|
+
import json
|
|
4
|
+
import typing as ta
|
|
5
|
+
import urllib.request
|
|
6
|
+
|
|
7
|
+
from ..versions import VersionsFile
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
##
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dc.dataclass(frozen=True)
|
|
14
|
+
class PyVersion:
|
|
15
|
+
name: str # "Python 3.13.11",
|
|
16
|
+
slug: str # "python-31311"
|
|
17
|
+
version: int # 3
|
|
18
|
+
is_published: bool
|
|
19
|
+
is_latest: bool
|
|
20
|
+
release_date: str # "2025-12-05T19:24:49Z"
|
|
21
|
+
pre_release: bool
|
|
22
|
+
release_page: ta.Optional[str]
|
|
23
|
+
release_notes_url: str # "https://docs.python.org/release/3.13.11/whatsnew/changelog.html"
|
|
24
|
+
show_on_download_page: bool
|
|
25
|
+
resource_uri: str # "https://www.python.org/api/v2/downloads/release/1083/"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
PY_VERSIONS_URL = 'https://www.python.org/api/v2/downloads/release/?is_published=true'
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def get_py_versions() -> ta.List[PyVersion]:
|
|
32
|
+
with urllib.request.urlopen(PY_VERSIONS_URL) as r: # noqa
|
|
33
|
+
data = json.load(r)
|
|
34
|
+
|
|
35
|
+
return [PyVersion(**dct) for dct in data]
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
##
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _main() -> None:
|
|
42
|
+
print(get_py_versions())
|
|
43
|
+
print(VersionsFile().pythons())
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
if __name__ == '__main__':
|
|
47
|
+
_main()
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# ruff: noqa: UP045
|
|
2
|
+
import dataclasses as dc
|
|
3
|
+
import os.path
|
|
4
|
+
import typing as ta
|
|
5
|
+
|
|
6
|
+
from omlish.lite.cached import cached_nullary
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
##
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dc.dataclass(frozen=True)
|
|
13
|
+
class VersionsFile:
|
|
14
|
+
name: ta.Optional[str] = '.versions'
|
|
15
|
+
|
|
16
|
+
@staticmethod
|
|
17
|
+
def parse(s: str) -> ta.Mapping[str, str]:
|
|
18
|
+
return {
|
|
19
|
+
k: v
|
|
20
|
+
for l in s.splitlines()
|
|
21
|
+
if (sl := l.split('#')[0].strip())
|
|
22
|
+
for k, _, v in (sl.partition('='),)
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
@cached_nullary
|
|
26
|
+
def contents(self) -> ta.Mapping[str, str]:
|
|
27
|
+
if not self.name or not os.path.exists(self.name):
|
|
28
|
+
return {}
|
|
29
|
+
with open(self.name) as f:
|
|
30
|
+
s = f.read()
|
|
31
|
+
return self.parse(s)
|
|
32
|
+
|
|
33
|
+
@staticmethod
|
|
34
|
+
def get_pythons(d: ta.Mapping[str, str]) -> ta.Mapping[str, str]:
|
|
35
|
+
pfx = 'PYTHON_'
|
|
36
|
+
return {k[len(pfx):].lower(): v for k, v in d.items() if k.startswith(pfx)}
|
|
37
|
+
|
|
38
|
+
@cached_nullary
|
|
39
|
+
def pythons(self) -> ta.Mapping[str, str]:
|
|
40
|
+
return self.get_pythons(self.contents())
|
omdev/scripts/ci.py
CHANGED
|
@@ -125,7 +125,7 @@ def __omlish_amalg__(): # noqa
|
|
|
125
125
|
dict(path='../../omlish/http/coro/io.py', sha1='2cdf6529c37a37cc0c1db2e02032157cf906d5d6'),
|
|
126
126
|
dict(path='../../omlish/http/parsing.py', sha1='3fea28dc6341908ba7c8fad42bf7bbe711f21b82'),
|
|
127
127
|
dict(path='../../omlish/lite/marshal.py', sha1='96348f5f2a26dc27d842d33cc3927e9da163436b'),
|
|
128
|
-
dict(path='../../omlish/lite/maybes.py', sha1='
|
|
128
|
+
dict(path='../../omlish/lite/maybes.py', sha1='04d2fcbea17028a5e6b8e7a7fb742375495ed233'),
|
|
129
129
|
dict(path='../../omlish/lite/runtime.py', sha1='2e752a27ae2bf89b1bb79b4a2da522a3ec360c70'),
|
|
130
130
|
dict(path='../../omlish/lite/timeouts.py', sha1='a0f673033a6943f242e35848d78a41892b9c62a1'),
|
|
131
131
|
dict(path='../../omlish/logs/infos.py', sha1='4dd104bd468a8c438601dd0bbda619b47d2f1620'),
|
|
@@ -5015,6 +5015,13 @@ class Maybe(ta.Generic[T]):
|
|
|
5015
5015
|
else:
|
|
5016
5016
|
return other
|
|
5017
5017
|
|
|
5018
|
+
@ta.final
|
|
5019
|
+
def or_none(self) -> ta.Optional[T]:
|
|
5020
|
+
if self.present:
|
|
5021
|
+
return self.must()
|
|
5022
|
+
else:
|
|
5023
|
+
return None
|
|
5024
|
+
|
|
5018
5025
|
@ta.final
|
|
5019
5026
|
def or_else_get(self, supplier: ta.Callable[[], ta.Union[T, U]]) -> ta.Union[T, U]:
|
|
5020
5027
|
if self.present:
|
omdev/scripts/interp.py
CHANGED
|
@@ -65,7 +65,7 @@ def __omlish_amalg__(): # noqa
|
|
|
65
65
|
dict(path='../../omlish/logs/std/proxy.py', sha1='3e7301a2aa351127f9c85f61b2f85dcc3f15aafb'),
|
|
66
66
|
dict(path='../packaging/specifiers.py', sha1='a56ab4e8c9b174adb523921f6280ac41e0fce749'),
|
|
67
67
|
dict(path='../../omlish/argparse/cli.py', sha1='f4dc3cd353d14386b5da0306768700e396afd2b3'),
|
|
68
|
-
dict(path='../../omlish/lite/maybes.py', sha1='
|
|
68
|
+
dict(path='../../omlish/lite/maybes.py', sha1='04d2fcbea17028a5e6b8e7a7fb742375495ed233'),
|
|
69
69
|
dict(path='../../omlish/lite/runtime.py', sha1='2e752a27ae2bf89b1bb79b4a2da522a3ec360c70'),
|
|
70
70
|
dict(path='../../omlish/lite/timeouts.py', sha1='a0f673033a6943f242e35848d78a41892b9c62a1'),
|
|
71
71
|
dict(path='../../omlish/logs/protocols.py', sha1='05ca4d1d7feb50c4e3b9f22ee371aa7bf4b3dbd1'),
|
|
@@ -2668,6 +2668,13 @@ class Maybe(ta.Generic[T]):
|
|
|
2668
2668
|
else:
|
|
2669
2669
|
return other
|
|
2670
2670
|
|
|
2671
|
+
@ta.final
|
|
2672
|
+
def or_none(self) -> ta.Optional[T]:
|
|
2673
|
+
if self.present:
|
|
2674
|
+
return self.must()
|
|
2675
|
+
else:
|
|
2676
|
+
return None
|
|
2677
|
+
|
|
2671
2678
|
@ta.final
|
|
2672
2679
|
def or_else_get(self, supplier: ta.Callable[[], ta.Union[T, U]]) -> ta.Union[T, U]:
|
|
2673
2680
|
if self.present:
|
omdev/scripts/lib/inject.py
CHANGED
|
@@ -34,7 +34,7 @@ def __omlish_amalg__(): # noqa
|
|
|
34
34
|
dict(path='abstract.py', sha1='a2fc3f3697fa8de5247761e9d554e70176f37aac'),
|
|
35
35
|
dict(path='check.py', sha1='bb6b6b63333699b84462951a854d99ae83195b94'),
|
|
36
36
|
dict(path='reflect.py', sha1='c4fec44bf144e9d93293c996af06f6c65fc5e63d'),
|
|
37
|
-
dict(path='maybes.py', sha1='
|
|
37
|
+
dict(path='maybes.py', sha1='04d2fcbea17028a5e6b8e7a7fb742375495ed233'),
|
|
38
38
|
dict(path='inject.py', sha1='6f097e3170019a34ff6834d36fcc9cbeed3a7ab4'),
|
|
39
39
|
],
|
|
40
40
|
)
|
|
@@ -884,6 +884,13 @@ class Maybe(ta.Generic[T]):
|
|
|
884
884
|
else:
|
|
885
885
|
return other
|
|
886
886
|
|
|
887
|
+
@ta.final
|
|
888
|
+
def or_none(self) -> ta.Optional[T]:
|
|
889
|
+
if self.present:
|
|
890
|
+
return self.must()
|
|
891
|
+
else:
|
|
892
|
+
return None
|
|
893
|
+
|
|
887
894
|
@ta.final
|
|
888
895
|
def or_else_get(self, supplier: ta.Callable[[], ta.Union[T, U]]) -> ta.Union[T, U]:
|
|
889
896
|
if self.present:
|