omdev 0.0.0.dev500__py3-none-any.whl → 0.0.0.dev509__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of omdev might be problematic. Click here for more details.

@@ -27,7 +27,7 @@
27
27
  "module": ".cexts.cmake",
28
28
  "attr": "_CLI_MODULE",
29
29
  "file": "omdev/cexts/cmake.py",
30
- "line": 335,
30
+ "line": 375,
31
31
  "value": {
32
32
  "!.cli.types.CliModule": {
33
33
  "name": "cmake",
@@ -527,7 +527,7 @@
527
527
  "module": ".tools.sqlrepl",
528
528
  "attr": "_CLI_MODULE",
529
529
  "file": "omdev/tools/sqlrepl.py",
530
- "line": 196,
530
+ "line": 307,
531
531
  "value": {
532
532
  "!.cli.types.CliModule": {
533
533
  "name": "sqlrepl",
omdev/__about__.py CHANGED
@@ -13,7 +13,7 @@ class Project(ProjectBase):
13
13
 
14
14
  optional_dependencies = {
15
15
  'black': [
16
- 'black ~= 25.12',
16
+ 'black ~= 26.1',
17
17
  ],
18
18
 
19
19
  'c': [
@@ -44,7 +44,7 @@ class Project(ProjectBase):
44
44
 
45
45
  'tui': [
46
46
  'rich ~= 14.2',
47
- 'textual ~= 7.0', # [syntax]
47
+ 'textual ~= 7.3', # [syntax]
48
48
  'textual-dev ~= 1.8',
49
49
  'textual-speedups ~= 0.2',
50
50
  ],
omdev/cache/data/specs.py CHANGED
@@ -63,7 +63,7 @@ class UrlSpec(Spec):
63
63
 
64
64
  _: dc.KW_ONLY
65
65
 
66
- headers: ta.Mapping[str, str] | None = dc.field(default=None) | msh.with_field_metadata(omit_if=operator.not_)
66
+ headers: ta.Mapping[str, str] | None = dc.field(default=None) | msh.with_field_options(omit_if=operator.not_)
67
67
 
68
68
  @cached.property
69
69
  def file_name_or_default(self) -> str:
omdev/cexts/cmake.py CHANGED
@@ -55,6 +55,33 @@ log = logs.get_module_logger(globals())
55
55
  ##
56
56
 
57
57
 
58
+ CLANG_TIDY_COMMAND_TEMPLATE = """
59
+ find_program(CLANG_TIDY_EXE NAMES clang-tidy)
60
+
61
+ if(CLANG_TIDY_EXE)
62
+ add_custom_target(tidy
63
+ COMMAND ${CLANG_TIDY_EXE}
64
+ -p ${CMAKE_BINARY_DIR}
65
+ ${ALL_SOURCE_FILES}
66
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
67
+ COMMENT "Running clang-tidy on all source files"
68
+ )
69
+
70
+ add_custom_target(tidy-fix
71
+ COMMAND ${CLANG_TIDY_EXE}
72
+ -p ${CMAKE_BINARY_DIR}
73
+ -fix
74
+ ${ALL_SOURCE_FILES}
75
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
76
+ COMMENT "Running clang-tidy with automatic fixes"
77
+ )
78
+ endif()
79
+ """
80
+
81
+
82
+ ##
83
+
84
+
58
85
  def _sep_str_grps(*ls: ta.Sequence[str]) -> list[str]:
59
86
  o = []
60
87
  for i, l in enumerate(ls):
@@ -112,6 +139,15 @@ class CmakeProjectGen:
112
139
  with open(os.path.join(self.cmake_dir(), '.gitignore'), 'w') as f:
113
140
  f.write('\n'.join(sorted(['/cmake-*', '/build'])))
114
141
 
142
+ def add_root_symlinks(self) -> None:
143
+ for fn in [
144
+ '.clang-tidy',
145
+ ]:
146
+ check.state(os.path.isfile(sfp := os.path.abspath(os.path.join(self._prj_root, fn))))
147
+ dfp = os.path.join(self.cmake_dir(), fn)
148
+ rp = os.path.relpath(os.path.abspath(sfp), self.cmake_dir())
149
+ os.symlink(rp, dfp)
150
+
115
151
  #
116
152
 
117
153
  @dc.dataclass(frozen=True, kw_only=True)
@@ -218,7 +254,7 @@ class CmakeProjectGen:
218
254
  ['TARGET', ext_name, 'POST_BUILD'],
219
255
  [
220
256
  ' '.join([
221
- 'COMMAND ${CMAKE_COMMAND} -E ',
257
+ 'COMMAND ${CMAKE_COMMAND} -E',
222
258
  f'copy $<TARGET_FILE_NAME:{ext_name}> ../../../{os.path.dirname(ext_src)}/{so_name}',
223
259
  ]),
224
260
  'COMMAND_EXPAND_LISTS',
@@ -235,6 +271,9 @@ class CmakeProjectGen:
235
271
  self.g.write(f'project({self.p.prj_name()})')
236
272
  self.g.write('')
237
273
 
274
+ self.g.write('set(CMAKE_EXPORT_COMPILE_COMMANDS ON)')
275
+ self.g.write('')
276
+
238
277
  self.g.write_var(cmake.Var(
239
278
  f'{self.var_prefix}_INCLUDE_DIRECTORIES',
240
279
  _sep_str_grps(
@@ -309,6 +348,7 @@ class CmakeProjectGen:
309
348
 
310
349
  self.cmake_dir()
311
350
  self.write_git_ignore()
351
+ self.add_root_symlinks()
312
352
 
313
353
  out = io.StringIO()
314
354
  clg = self._CmakeListsGen(self, out)
omdev/imgur.py CHANGED
@@ -40,7 +40,7 @@ from .home.secrets import load_secrets
40
40
 
41
41
 
42
42
  @dc.dataclass(frozen=True)
43
- @msh.update_object_metadata(unknown_field='x')
43
+ @msh.update_object_options(unknown_field='x')
44
44
  class ImageUploadData:
45
45
  id: str
46
46
  deletehash: str # noqa
@@ -66,7 +66,7 @@ class ImageUploadData:
66
66
 
67
67
 
68
68
  @dc.dataclass(frozen=True)
69
- @msh.update_object_metadata(unknown_field='x')
69
+ @msh.update_object_options(unknown_field='x')
70
70
  class ImageUploadResponse:
71
71
  status: int
72
72
  success: bool
@@ -114,3 +114,395 @@ class IncrementalMarkdownParser:
114
114
  adjusted.append(token)
115
115
 
116
116
  return adjusted
117
+
118
+
119
+ ##
120
+
121
+
122
+ class ClaudeIncrementalMarkdownParser:
123
+ # @omlish-llm-author "claude-opus-4-5"
124
+
125
+ def __init__(
126
+ self,
127
+ *,
128
+ parser: ta.Optional['md.MarkdownIt'] = None,
129
+ ) -> None:
130
+ super().__init__()
131
+
132
+ if parser is None:
133
+ parser = md.MarkdownIt()
134
+ self._parser = parser
135
+
136
+ self._stable_tokens: list[md.token.Token] = []
137
+ self._buffer = ''
138
+ self._num_stable_lines = 0
139
+
140
+ class FeedOutput(ta.NamedTuple):
141
+ stable: ta.Sequence['md.token.Token']
142
+ new_stable: ta.Sequence['md.token.Token']
143
+ unstable: ta.Sequence['md.token.Token']
144
+
145
+ def feed2(self, chunk: str) -> FeedOutput:
146
+ self._buffer += chunk
147
+
148
+ new_tokens = self._parser.parse(self._buffer)
149
+
150
+ adjusted_tokens = self._adjust_token_line_numbers(new_tokens, self._num_stable_lines)
151
+
152
+ stable_count = self._find_stable_token_count(adjusted_tokens, self._buffer)
153
+
154
+ newly_stable: ta.Sequence[md.token.Token]
155
+ if stable_count > 0:
156
+ newly_stable = adjusted_tokens[:stable_count]
157
+
158
+ max_line = 0
159
+ for token in newly_stable:
160
+ if token.map:
161
+ max_line = max(max_line, token.map[1])
162
+
163
+ if max_line > self._num_stable_lines:
164
+ lines_to_remove = max_line - self._num_stable_lines
165
+ lines = self._buffer.split('\n')
166
+ self._buffer = '\n'.join(lines[lines_to_remove:])
167
+
168
+ self._stable_tokens.extend(newly_stable)
169
+ self._num_stable_lines = max_line
170
+
171
+ else:
172
+ newly_stable = ()
173
+
174
+ return ClaudeIncrementalMarkdownParser.FeedOutput(
175
+ stable=self._stable_tokens,
176
+ new_stable=newly_stable,
177
+ unstable=adjusted_tokens[stable_count:],
178
+ )
179
+
180
+ def feed(self, chunk: str) -> list['md.token.Token']:
181
+ out = self.feed2(chunk)
182
+ return [*out.stable, *out.unstable]
183
+
184
+ def _find_stable_token_count(
185
+ self,
186
+ tokens: list['md.token.Token'],
187
+ buffer: str,
188
+ ) -> int:
189
+ if not tokens:
190
+ return 0
191
+
192
+ parent_indices = []
193
+ for i, token in enumerate(tokens):
194
+ if token.nesting in (1, 0) and token.level == 0:
195
+ parent_indices.append(i)
196
+
197
+ if len(parent_indices) < 2:
198
+ return 0
199
+
200
+ # Find the last parent index that is fully terminated. We need at least one more parent after it to consider it
201
+ # stable.
202
+ buffer_lines = buffer.split('\n')
203
+
204
+ for candidate_idx in range(len(parent_indices) - 2, -1, -1):
205
+ token_list_idx = parent_indices[candidate_idx]
206
+
207
+ # Find the end of this block (either the token itself or its closing tag)
208
+ block_end_idx = token_list_idx
209
+ if tokens[token_list_idx].nesting == 1:
210
+ # Opening tag - find corresponding close
211
+ depth = 1
212
+ for j in range(token_list_idx + 1, len(tokens)):
213
+ if tokens[j].level == 0:
214
+ if tokens[j].nesting == 1:
215
+ depth += 1
216
+ elif tokens[j].nesting == -1:
217
+ depth -= 1
218
+ if depth == 0:
219
+ block_end_idx = j
220
+ break
221
+
222
+ # Get the line range for this block
223
+ end_line = 0
224
+ for t in tokens[:block_end_idx + 1]:
225
+ if t.map:
226
+ end_line = max(end_line, t.map[1])
227
+
228
+ # Check if followed by blank line or another clear block boundary end_line is exclusive (points to line
229
+ # after the block). Relative to current buffer (not absolute).
230
+ relative_end = end_line - self._num_stable_lines
231
+
232
+ if relative_end < 0:
233
+ continue
234
+
235
+ if relative_end < len(buffer_lines):
236
+ # Check for blank line or clear termination
237
+ if relative_end < len(buffer_lines):
238
+ following_content = '\n'.join(buffer_lines[relative_end:])
239
+ # Stable if: blank line follows, or significant content after
240
+ if (
241
+ following_content.startswith('\n') or
242
+ (following_content.strip() and len(following_content.strip()) > 0)
243
+ ):
244
+ # Check the next parent token exists and has been parsed
245
+ if candidate_idx + 1 < len(parent_indices):
246
+ next_parent_idx = parent_indices[candidate_idx + 1]
247
+ next_token = tokens[next_parent_idx]
248
+ # The next block should start after our block ends
249
+ if next_token.map and next_token.map[0] >= end_line - self._num_stable_lines:
250
+ return parent_indices[candidate_idx + 1]
251
+
252
+ return 0
253
+
254
+ def _adjust_token_line_numbers(
255
+ self,
256
+ tokens: list['md.token.Token'],
257
+ line_offset: int,
258
+ ) -> list['md.token.Token']:
259
+ adjusted = []
260
+ for token in tokens:
261
+ if token.map:
262
+ token = dc.replace(
263
+ token,
264
+ map=[token.map[0] + line_offset, token.map[1] + line_offset],
265
+ )
266
+
267
+ adjusted.append(token)
268
+
269
+ return adjusted
270
+
271
+
272
+ class GptIncrementalMarkdownParser:
273
+ # @omlish-llm-author "gpt-5.2"
274
+
275
+ def __init__(
276
+ self,
277
+ *,
278
+ parser: ta.Optional['md.MarkdownIt'] = None,
279
+ ) -> None:
280
+ super().__init__()
281
+
282
+ if parser is None:
283
+ parser = md.MarkdownIt()
284
+ self._parser = parser
285
+
286
+ self._stable_tokens: list[md.token.Token] = []
287
+ self._buffer = ''
288
+ self._num_stable_lines = 0 # Number of *source* lines removed from the buffer and committed.
289
+
290
+ class FeedOutput(ta.NamedTuple):
291
+ stable: ta.Sequence['md.token.Token']
292
+ new_stable: ta.Sequence['md.token.Token']
293
+ unstable: ta.Sequence['md.token.Token']
294
+
295
+ def feed2(self, chunk: str) -> FeedOutput:
296
+ self._buffer += chunk
297
+
298
+ # Parse the current buffer (line numbers are relative to the buffer's start).
299
+ new_tokens = self._parser.parse(self._buffer)
300
+
301
+ # Adjust ALL tokens to account for stable lines from previous parses.
302
+ adjusted_tokens = self._adjust_token_line_numbers(new_tokens, self._num_stable_lines)
303
+
304
+ # Decide how many *source lines* from the front of the buffer are safe to commit permanently.
305
+ stable_line_cut = self._find_stable_line_cut(self._buffer)
306
+ stable_abs_line = self._num_stable_lines + stable_line_cut
307
+
308
+ newly_stable: ta.Sequence[md.token.Token]
309
+ if stable_line_cut > 0:
310
+ # Commit tokens that are wholly before the stable cut.
311
+ newly_stable_list: list[md.token.Token] = []
312
+ remaining_list: list[md.token.Token] = []
313
+
314
+ for t in adjusted_tokens:
315
+ # Tokens without maps are treated conservatively as unstable unless we've already committed
316
+ # all remaining source.
317
+ if not t.map:
318
+ remaining_list.append(t)
319
+ continue
320
+
321
+ # t.map is [start_line, end_line) in absolute source lines (after adjustment).
322
+ if t.map[1] <= stable_abs_line:
323
+ newly_stable_list.append(t)
324
+ else:
325
+ remaining_list.append(t)
326
+
327
+ newly_stable = newly_stable_list
328
+
329
+ # Remove committed source lines from the buffer.
330
+ lines = self._buffer.split('\n')
331
+ self._buffer = '\n'.join(lines[stable_line_cut:])
332
+
333
+ # Persist committed state.
334
+ self._stable_tokens.extend(newly_stable)
335
+ self._num_stable_lines = stable_abs_line
336
+
337
+ unstable = remaining_list
338
+
339
+ else:
340
+ newly_stable = ()
341
+ unstable = adjusted_tokens
342
+
343
+ return GptIncrementalMarkdownParser.FeedOutput(
344
+ stable=self._stable_tokens,
345
+ new_stable=newly_stable,
346
+ unstable=unstable,
347
+ )
348
+
349
+ def feed(self, chunk: str) -> list['md.token.Token']:
350
+ out = self.feed2(chunk)
351
+ return [*out.stable, *out.unstable]
352
+
353
+ ##
354
+ # Stability boundary
355
+
356
+ def _find_stable_line_cut(self, buf: str) -> int:
357
+ """
358
+ Return a conservative number of *source lines* from the buffer start that can be treated as permanently stable
359
+ (i.e. future suffixes of the markdown source will not change their parse/render).
360
+
361
+ This intentionally errs on the side of keeping more in the unstable tail.
362
+ """
363
+
364
+ if not buf:
365
+ return 0
366
+
367
+ lines = buf.split('\n')
368
+
369
+ # Track whether we're inside a fenced code block. This is the biggest retroactive-parse hazard.
370
+ in_fence = False
371
+ fence_marker: str | None = None
372
+
373
+ # Track whether we're inside a blockquote region (heuristic).
374
+ in_quote = False
375
+
376
+ # Track whether we're inside a list region (heuristic).
377
+ in_list = False
378
+
379
+ # We only commit up to a "hard" boundary: a blank line that is outside fence/quote/list context. Additionally,
380
+ # we require that the boundary line itself is blank (so setext headings can't reach back).
381
+ last_safe_cut: int = 0
382
+
383
+ def is_blank(s: str) -> bool:
384
+ return not s.strip()
385
+
386
+ def is_fence_line(s: str) -> str | None:
387
+ st = s.lstrip()
388
+ if st.startswith('```'):
389
+ return '```'
390
+ if st.startswith('~~~'):
391
+ return '~~~'
392
+ return None
393
+
394
+ def is_quote_line(s: str) -> bool:
395
+ return s.lstrip().startswith('>')
396
+
397
+ def is_list_line(s: str) -> bool:
398
+ st = s.lstrip()
399
+ if not st:
400
+ return False
401
+ # Very conservative list marker detection.
402
+ if st[0] in ('-', '*', '+') and len(st) > 1 and st[1].isspace():
403
+ return True
404
+ # "1. " / "1) "
405
+ i = 0
406
+ while i < len(st) and st[i].isdigit():
407
+ i += 1
408
+ if i > 0 and i < len(st) and st[i] in ('.', ')'):
409
+ j = i + 1
410
+ return j < len(st) and st[j].isspace()
411
+ return False
412
+
413
+ def is_indented_code(s: str) -> bool:
414
+ # Indented code blocks (4 spaces / 1 tab) can be sensitive to context; treat as "unstable context" for
415
+ # committing boundaries.
416
+ return s.startswith((' ', '\t'))
417
+
418
+ for i, line in enumerate(lines):
419
+ # Fence tracking.
420
+ fm = is_fence_line(line)
421
+ if fm is not None:
422
+ if not in_fence:
423
+ in_fence = True
424
+ fence_marker = fm
425
+ else:
426
+ # Only close on the matching marker (conservative).
427
+ if fence_marker == fm:
428
+ in_fence = False
429
+ fence_marker = None
430
+
431
+ # Quote tracking (heuristic: treat contiguous quote lines as quote context).
432
+ if is_quote_line(line):
433
+ in_quote = True
434
+ elif is_blank(line):
435
+ # A blank line is a potential place to end a quote, but only if we are not in a fence.
436
+ if not in_fence:
437
+ in_quote = False
438
+
439
+ # List tracking (heuristic: any list marker enters list context; blank lines end list context only if the
440
+ # following non-blank line is not indented / not list / not quote).
441
+ if is_list_line(line):
442
+ in_list = True
443
+ if is_blank(line) and not in_fence:
444
+ # Peek ahead to see if the list plausibly continues.
445
+ j = i + 1
446
+ while j < len(lines) and is_blank(lines[j]):
447
+ j += 1
448
+ if j >= len(lines):
449
+ # End of buffer: keep tail unstable.
450
+ pass
451
+ else:
452
+ nxt = lines[j]
453
+ if (
454
+ not is_indented_code(nxt) and
455
+ not is_list_line(nxt) and
456
+ not is_quote_line(nxt)
457
+ ):
458
+ in_list = False
459
+
460
+ # Commit boundary selection.
461
+ if is_blank(line) and not in_fence and not in_quote and not in_list:
462
+ # Safe to commit through this blank line (i.e. cut after it).
463
+ last_safe_cut = i + 1
464
+
465
+ # Never cut the entire buffer; leave at least one line in the tail so incremental feeds keep working.
466
+ if last_safe_cut >= len(lines):
467
+ return 0
468
+
469
+ return last_safe_cut
470
+
471
+ def _adjust_token_line_numbers(
472
+ self,
473
+ tokens: list['md.token.Token'],
474
+ line_offset: int,
475
+ ) -> list['md.token.Token']:
476
+ adjusted: list[md.token.Token] = []
477
+
478
+ def adj_tok(t: 'md.token.Token') -> 'md.token.Token':
479
+ nt = t
480
+ if nt.map:
481
+ nt = dc.replace(
482
+ nt,
483
+ map=[nt.map[0] + line_offset, nt.map[1] + line_offset],
484
+ )
485
+
486
+ # Adjust children maps too (markdown-it uses children for inline tokens).
487
+ ch = getattr(nt, 'children', None)
488
+ if ch:
489
+ new_children: list[md.token.Token] = []
490
+ changed = False
491
+ for c in ch:
492
+ nc = c
493
+ if nc.map:
494
+ nc = dc.replace(
495
+ nc,
496
+ map=[nc.map[0] + line_offset, nc.map[1] + line_offset],
497
+ )
498
+ changed = True
499
+ new_children.append(nc)
500
+ if changed:
501
+ nt = dc.replace(nt, children=new_children)
502
+
503
+ return nt
504
+
505
+ for token in tokens:
506
+ adjusted.append(adj_tok(token))
507
+
508
+ return adjusted
@@ -327,6 +327,10 @@ class Package:
327
327
  unfiltered_candidates: ta.Sequence[Candidate] | None = None
328
328
  candidates: ta.Sequence[Candidate] | None = None
329
329
 
330
+ @cached.function
331
+ def candidates_by_version(self) -> ta.Mapping[Version, ta.Sequence[Candidate]]:
332
+ return col.multi_map_by(lambda c: c.version, check.not_none(self.candidates))
333
+
330
334
  latest_candidate: Candidate | None = None
331
335
  suggested_candidate: Candidate | None = None
332
336
 
@@ -511,7 +515,9 @@ def format_for_columns(pkgs: ta.Sequence[Package]) -> tuple[list[list[str]], lis
511
515
 
512
516
  header = [
513
517
  'Package',
518
+
514
519
  'Current',
520
+ 'Age',
515
521
 
516
522
  'Suggested',
517
523
  'Age',
@@ -543,9 +549,19 @@ def format_for_columns(pkgs: ta.Sequence[Package]) -> tuple[list[list[str]], lis
543
549
 
544
550
  row = [
545
551
  pkg.dist.raw_name,
552
+
546
553
  pkg.dist.raw_version,
547
554
  ]
548
555
 
556
+ if (
557
+ (cs := pkg.candidates_by_version().get(pkg.dist.version)) and
558
+ # FIXME: lame and wrong lol
559
+ (c_ut := cs[0].upload_time()) is not None
560
+ ):
561
+ row.append(human_round_td(now_utc() - c_ut))
562
+ else:
563
+ row.append('')
564
+
549
565
  def add_c(c):
550
566
  if c is None:
551
567
  row.extend(['', ''])
omdev/pyproject/cli.py CHANGED
@@ -22,7 +22,6 @@ See:
22
22
  import argparse
23
23
  import asyncio
24
24
  import concurrent.futures as cf
25
- import dataclasses as dc
26
25
  import functools
27
26
  import itertools
28
27
  import multiprocessing as mp
@@ -47,40 +46,7 @@ from .configs import PyprojectConfigPreparer
47
46
  from .pkg import BasePyprojectPackageGenerator
48
47
  from .pkg import PyprojectPackageGenerator
49
48
  from .venvs import Venv
50
-
51
-
52
- ##
53
-
54
-
55
- @dc.dataclass(frozen=True)
56
- class VersionsFile:
57
- name: ta.Optional[str] = '.versions'
58
-
59
- @staticmethod
60
- def parse(s: str) -> ta.Mapping[str, str]:
61
- return {
62
- k: v
63
- for l in s.splitlines()
64
- if (sl := l.split('#')[0].strip())
65
- for k, _, v in (sl.partition('='),)
66
- }
67
-
68
- @cached_nullary
69
- def contents(self) -> ta.Mapping[str, str]:
70
- if not self.name or not os.path.exists(self.name):
71
- return {}
72
- with open(self.name) as f:
73
- s = f.read()
74
- return self.parse(s)
75
-
76
- @staticmethod
77
- def get_pythons(d: ta.Mapping[str, str]) -> ta.Mapping[str, str]:
78
- pfx = 'PYTHON_'
79
- return {k[len(pfx):].lower(): v for k, v in d.items() if k.startswith(pfx)}
80
-
81
- @cached_nullary
82
- def pythons(self) -> ta.Mapping[str, str]:
83
- return self.get_pythons(self.contents())
49
+ from .versions import VersionsFile
84
50
 
85
51
 
86
52
  ##
@@ -24,7 +24,7 @@ class PyprojectConfig:
24
24
  venvs: ta.Mapping[str, VenvConfig] = dc.field(default_factory=dict)
25
25
 
26
26
  venvs_dir: str = '.venvs'
27
- versions_file: ta.Optional[str] = '.versions'
27
+ # versions_file: ta.Optional[str] = '.versions' # FIXME:
28
28
 
29
29
 
30
30
  class PyprojectConfigPreparer: