rolfedh-doc-utils 0.1.41__py3-none-any.whl → 0.1.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -41,41 +41,20 @@ class DefListConverter:
41
41
  if table_title:
42
42
  # Remove leading dot and trailing period if present
43
43
  title_text = table_title.lstrip('.').rstrip('.')
44
- lines = [f'\n{title_text}, where:']
44
+ lines = [f'{title_text}, where:']
45
45
  else:
46
- lines = ['\nwhere:']
46
+ lines = ['where:']
47
47
 
48
48
  # Process each group (which may contain one or more callouts)
49
49
  for group in callout_groups:
50
50
  code_line = group.code_line
51
51
  callout_nums = group.callout_numbers
52
52
 
53
- # COMMENTED OUT: User-replaceable value detection causes false positives
54
- # with Java generics (e.g., <MyEntity, Integer>) and other valid syntax
55
- # that uses angle brackets. Always use the full code line as the term.
56
- #
57
- # # Check if this is a user-replaceable value (contains angle brackets but not heredoc)
58
- # # User values are single words/phrases in angle brackets like <my-value>
59
- # user_values = DefListConverter.USER_VALUE_PATTERN.findall(code_line)
60
- #
61
- # if user_values and len(user_values) == 1 and len(code_line) < 100:
62
- # # This looks like a user-replaceable value placeholder
63
- # # Format the value (ensure it has angle brackets)
64
- # user_value = user_values[0]
65
- # if not user_value.startswith('<'):
66
- # user_value = f'<{user_value}>'
67
- # if not user_value.endswith('>'):
68
- # user_value = f'{user_value}>'
69
- # term = f'`{user_value}`'
70
- # else:
71
- # # This is a code line - strip whitespace before wrapping in backticks
72
- # term = f'`{code_line.strip()}`'
73
-
74
53
  # Always use the full code line - strip whitespace before wrapping in backticks
75
54
  term = f'`{code_line.strip()}`'
76
55
 
77
- # Add blank line before each term
78
- lines.append('')
56
+ # Add continuation marker before each definition term
57
+ lines.append('+')
79
58
  lines.append(f'{term}::')
80
59
 
81
60
  # Add explanations for all callouts in this group
callout_lib/detector.py CHANGED
@@ -281,8 +281,8 @@ class CalloutDetector:
281
281
  explanations = {}
282
282
  i = start_line + 1 # Start after the closing delimiter
283
283
 
284
- # Skip blank lines and continuation markers (+)
285
- while i < len(lines) and (not lines[i].strip() or lines[i].strip() == '+'):
284
+ # Skip blank lines, continuation markers (+), and {nbsp} spacers
285
+ while i < len(lines) and (not lines[i].strip() or lines[i].strip() in ('+', '{nbsp}')):
286
286
  i += 1
287
287
 
288
288
  # Collect consecutive callout explanation lines
@@ -298,8 +298,18 @@ class CalloutDetector:
298
298
  # Continue until we hit a blank line, a new callout, or certain patterns
299
299
  while i < len(lines):
300
300
  line = lines[i]
301
- # Stop if we hit a blank line, new callout, or list start marker
302
- if not line.strip() or self.CALLOUT_EXPLANATION.match(line) or line.startswith('[start='):
301
+ stripped = line.strip()
302
+ # Stop if we hit:
303
+ # - blank line
304
+ # - new callout explanation
305
+ # - list start marker [start=N]
306
+ # - standalone + (list continuation that attaches to parent)
307
+ # - admonition block start [NOTE], [IMPORTANT], [WARNING], [TIP], [CAUTION]
308
+ if (not stripped or
309
+ self.CALLOUT_EXPLANATION.match(line) or
310
+ line.startswith('[start=') or
311
+ stripped == '+' or
312
+ stripped in ('[NOTE]', '[IMPORTANT]', '[WARNING]', '[TIP]', '[CAUTION]')):
303
313
  break
304
314
  # Add continuation line preserving original formatting
305
315
  explanation_lines.append(line)
@@ -381,10 +381,19 @@ class InteractiveCalloutConverter:
381
381
  )
382
382
  else:
383
383
  # Remove old explanations, add new list
384
+ # Find where explanations actually start (skip {nbsp} and + markers to preserve them)
385
+ explanation_start_line = block.end_line + 1
386
+ while explanation_start_line < len(new_lines) and (
387
+ not new_lines[explanation_start_line].strip() or
388
+ new_lines[explanation_start_line].strip() in ('+', '{nbsp}')
389
+ ):
390
+ explanation_start_line += 1
391
+
384
392
  new_section = (
385
393
  new_lines[:content_start] +
386
394
  converted_content +
387
- [new_lines[content_end]] +
395
+ [new_lines[content_end]] + # Keep closing delimiter
396
+ new_lines[content_end + 1:explanation_start_line] + # Preserve {nbsp} and + markers
388
397
  output_list +
389
398
  new_lines[explanation_end + 1:]
390
399
  )
@@ -245,9 +245,13 @@ class CalloutConverter:
245
245
  # Table format: preserve content between code block and table start
246
246
  explanation_start_line = self.detector.last_table.start_line
247
247
  else:
248
- # List format: skip blank lines after code block
248
+ # List format: skip blank lines, {nbsp} spacers, and + continuation markers
249
+ # These will be preserved in the output via the slice below
249
250
  explanation_start_line = block.end_line + 1
250
- while explanation_start_line < len(new_lines) and not new_lines[explanation_start_line].strip():
251
+ while explanation_start_line < len(new_lines) and (
252
+ not new_lines[explanation_start_line].strip() or
253
+ new_lines[explanation_start_line].strip() in ('+', '{nbsp}')
254
+ ):
251
255
  explanation_start_line += 1
252
256
 
253
257
  # Build the new section
@@ -0,0 +1,229 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ convert-id-attributes-to-ids - Convert :id: attribute definitions to AsciiDoc [id="..."] anchors.
4
+
5
+ This script recursively scans a directory for AsciiDoc files and replaces instances of
6
+ `:id: <id_value>` with `[id="<id_value>_{context}"]`.
7
+
8
+ Optionally, with --clean-up, it also removes related boilerplate lines:
9
+ - // define ID as an attribute
10
+ - // assign ID conditionally, followed by header
11
+ - include::{modules}/common/id.adoc[]
12
+ """
13
+
14
+ import argparse
15
+ import os
16
+ import re
17
+ import sys
18
+ from pathlib import Path
19
+
20
+ from doc_utils.version_check import check_version_on_startup
21
+ from doc_utils.version import __version__
22
+ from doc_utils.spinner import Spinner
23
+
24
+
25
+ def find_adoc_files(directory: Path) -> list[Path]:
26
+ """Recursively find all .adoc files in a directory."""
27
+ adoc_files = []
28
+ for root, dirs, files in os.walk(directory, followlinks=False):
29
+ # Skip hidden directories and common non-content directories
30
+ dirs[:] = [d for d in dirs if not d.startswith('.') and d not in ('node_modules', '__pycache__')]
31
+ for file in files:
32
+ if file.endswith('.adoc'):
33
+ adoc_files.append(Path(root) / file)
34
+ return adoc_files
35
+
36
+
37
+ def convert_id_attributes(content: str, clean_up: bool = False) -> tuple[str, int, int]:
38
+ """
39
+ Convert :id: attributes to [id="..._{context}"] format.
40
+
41
+ Args:
42
+ content: The file content to process
43
+ clean_up: If True, also remove boilerplate lines
44
+
45
+ Returns:
46
+ Tuple of (modified_content, id_replacements_count, cleanup_removals_count)
47
+ """
48
+ lines = content.split('\n')
49
+ new_lines = []
50
+ id_replacements = 0
51
+ cleanup_removals = 0
52
+
53
+ # Patterns for clean-up (flexible matching for variations)
54
+ cleanup_patterns = [
55
+ re.compile(r'^\s*//\s*define ID as an attribute', re.IGNORECASE),
56
+ re.compile(r'^\s*//\s*assign.*ID conditionally', re.IGNORECASE),
57
+ re.compile(r'^\s*include::\{modules\}/common/id\.adoc\[\]'),
58
+ ]
59
+
60
+ # Pattern to match :id: <value>
61
+ id_pattern = re.compile(r'^:id:\s*(.+?)\s*$')
62
+
63
+ for line in lines:
64
+ # Check if this is an :id: line
65
+ id_match = id_pattern.match(line)
66
+ if id_match:
67
+ id_value = id_match.group(1)
68
+ new_line = f'[id="{id_value}_{{context}}"]'
69
+ new_lines.append(new_line)
70
+ id_replacements += 1
71
+ continue
72
+
73
+ # Check if clean-up is enabled and line matches cleanup patterns
74
+ if clean_up:
75
+ should_remove = False
76
+ for pattern in cleanup_patterns:
77
+ if pattern.search(line):
78
+ should_remove = True
79
+ cleanup_removals += 1
80
+ break
81
+ if should_remove:
82
+ continue
83
+
84
+ new_lines.append(line)
85
+
86
+ return '\n'.join(new_lines), id_replacements, cleanup_removals
87
+
88
+
89
+ def process_file(file_path: Path, dry_run: bool = False, clean_up: bool = False) -> tuple[int, int]:
90
+ """
91
+ Process a single AsciiDoc file.
92
+
93
+ Returns:
94
+ Tuple of (id_replacements, cleanup_removals)
95
+ """
96
+ try:
97
+ content = file_path.read_text(encoding='utf-8')
98
+ except Exception as e:
99
+ print(f" Error reading {file_path}: {e}")
100
+ return 0, 0
101
+
102
+ new_content, id_replacements, cleanup_removals = convert_id_attributes(content, clean_up)
103
+
104
+ if id_replacements > 0 or cleanup_removals > 0:
105
+ if not dry_run:
106
+ try:
107
+ file_path.write_text(new_content, encoding='utf-8')
108
+ except Exception as e:
109
+ print(f" Error writing {file_path}: {e}")
110
+ return 0, 0
111
+
112
+ return id_replacements, cleanup_removals
113
+
114
+
115
+ def main():
116
+ # Check for updates (non-blocking)
117
+ check_version_on_startup()
118
+
119
+ parser = argparse.ArgumentParser(
120
+ description='Convert :id: attribute definitions to AsciiDoc [id="..._{context}"] anchors.'
121
+ )
122
+ parser.add_argument(
123
+ 'directory',
124
+ nargs='?',
125
+ default='.',
126
+ help='Directory to scan for .adoc files (default: current directory)'
127
+ )
128
+ parser.add_argument(
129
+ '--dry-run', '-n',
130
+ action='store_true',
131
+ help='Show what would be changed without making actual modifications'
132
+ )
133
+ parser.add_argument(
134
+ '--clean-up',
135
+ action='store_true',
136
+ help='Also remove ID-related boilerplate lines (comments and include directives)'
137
+ )
138
+ parser.add_argument(
139
+ '--verbose', '-v',
140
+ action='store_true',
141
+ help='Show detailed output for each file processed'
142
+ )
143
+ parser.add_argument(
144
+ '--version',
145
+ action='version',
146
+ version=f'%(prog)s {__version__}'
147
+ )
148
+
149
+ args = parser.parse_args()
150
+
151
+ # Resolve directory path
152
+ directory = Path(args.directory).resolve()
153
+
154
+ if not directory.exists():
155
+ print(f"Error: Directory not found: {directory}")
156
+ sys.exit(1)
157
+
158
+ if not directory.is_dir():
159
+ print(f"Error: Not a directory: {directory}")
160
+ sys.exit(1)
161
+
162
+ mode_str = "DRY RUN MODE - " if args.dry_run else ""
163
+ print(f"{mode_str}Scanning directory: {directory}")
164
+
165
+ if args.clean_up:
166
+ print("Clean-up mode enabled: will remove ID-related boilerplate lines")
167
+
168
+ # Find all AsciiDoc files
169
+ spinner = Spinner("Searching for .adoc files")
170
+ spinner.start()
171
+ adoc_files = find_adoc_files(directory)
172
+ spinner.stop(f"Found {len(adoc_files)} .adoc files")
173
+
174
+ if not adoc_files:
175
+ print("No AsciiDoc files found.")
176
+ sys.exit(0)
177
+
178
+ if args.dry_run:
179
+ print("\n*** DRY RUN MODE - No files will be modified ***\n")
180
+
181
+ # Process each file
182
+ total_id_replacements = 0
183
+ total_cleanup_removals = 0
184
+ files_modified = 0
185
+
186
+ spinner = Spinner(f"Processing {len(adoc_files)} files")
187
+ spinner.start()
188
+
189
+ for file_path in adoc_files:
190
+ id_replacements, cleanup_removals = process_file(file_path, args.dry_run, args.clean_up)
191
+
192
+ if id_replacements > 0 or cleanup_removals > 0:
193
+ files_modified += 1
194
+ total_id_replacements += id_replacements
195
+ total_cleanup_removals += cleanup_removals
196
+
197
+ if args.verbose:
198
+ rel_path = file_path.relative_to(directory)
199
+ changes = []
200
+ if id_replacements > 0:
201
+ changes.append(f"{id_replacements} ID conversion(s)")
202
+ if cleanup_removals > 0:
203
+ changes.append(f"{cleanup_removals} line(s) removed")
204
+ print(f" {rel_path}: {', '.join(changes)}")
205
+
206
+ spinner.stop(f"Processed {len(adoc_files)} files")
207
+
208
+ # Summary
209
+ print(f"\nSummary:")
210
+ if args.dry_run:
211
+ print(f" Files that would be modified: {files_modified}")
212
+ print(f" :id: attributes that would be converted: {total_id_replacements}")
213
+ if args.clean_up:
214
+ print(f" Boilerplate lines that would be removed: {total_cleanup_removals}")
215
+ print("\nRun without --dry-run to apply changes.")
216
+ else:
217
+ print(f" Files modified: {files_modified}")
218
+ print(f" :id: attributes converted: {total_id_replacements}")
219
+ if args.clean_up:
220
+ print(f" Boilerplate lines removed: {total_cleanup_removals}")
221
+
222
+ if total_id_replacements == 0:
223
+ print("\nNo :id: attributes found to convert.")
224
+ else:
225
+ print("\nConversion complete!")
226
+
227
+
228
+ if __name__ == '__main__':
229
+ main()
doc_utils/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  """Version information for doc-utils."""
2
2
 
3
3
  # This should match the version in pyproject.toml
4
- __version__ = "0.1.41"
4
+ __version__ = "0.1.42"
5
5
 
6
6
  def get_version():
7
7
  """Return the current version string."""
@@ -0,0 +1,257 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Insert .Procedure block title above numbered steps in AsciiDoc procedure files.
4
+
5
+ This script finds AsciiDoc procedure files (those with :_mod-docs-content-type: PROCEDURE)
6
+ and inserts a .Procedure block title before the first numbered step if one is missing.
7
+
8
+ Usage:
9
+ insert-procedure-title <file_or_directory> [options]
10
+
11
+ Examples:
12
+ insert-procedure-title modules/proc_example.adoc
13
+ insert-procedure-title modules/ --dry-run
14
+ insert-procedure-title . --verbose
15
+ """
16
+
17
+ import argparse
18
+ import os
19
+ import re
20
+ import sys
21
+ from pathlib import Path
22
+
23
+
24
+ def is_procedure_file(content: str) -> bool:
25
+ """Check if file is a PROCEDURE content type."""
26
+ return ':_mod-docs-content-type: PROCEDURE' in content
27
+
28
+
29
+ def find_first_numbered_step(lines: list[str]) -> int | None:
30
+ """
31
+ Find the line index of the first numbered step.
32
+
33
+ Numbered steps can be:
34
+ - `. Step text` (AsciiDoc implicit ordered list)
35
+ - `1. Step text` (explicit numbered list)
36
+
37
+ Returns None if no numbered steps found.
38
+ """
39
+ # Pattern for ordered list items:
40
+ # - Starts with `. ` (implicit) or `<digit>. ` (explicit)
41
+ # - Must not be a block title (block titles are `.Title` without space after dot)
42
+ ordered_list_pattern = re.compile(r'^(\d+\.\s|\.(?!\w)\s)')
43
+
44
+ for i, line in enumerate(lines):
45
+ stripped = line.strip()
46
+ if ordered_list_pattern.match(stripped):
47
+ return i
48
+ return None
49
+
50
+
51
+ def has_procedure_title_before(lines: list[str], step_index: int) -> bool:
52
+ """
53
+ Check if there's a .Procedure block title before the numbered steps.
54
+
55
+ Looks backward from the step index to find `.Procedure` on its own line.
56
+ Continues past other block titles (like sub-section titles) until hitting
57
+ a section heading (= or ==) or the start of the file.
58
+ """
59
+ for i in range(step_index - 1, -1, -1):
60
+ stripped = lines[i].strip()
61
+ if stripped == '.Procedure':
62
+ return True
63
+ # Stop searching if we hit a section heading
64
+ if stripped.startswith('= ') or stripped.startswith('== '):
65
+ return False
66
+ return False
67
+
68
+
69
+ def find_insertion_point(lines: list[str], step_index: int) -> int:
70
+ """
71
+ Find the correct line index to insert .Procedure block title.
72
+
73
+ The insertion point should be before the numbered steps, but after:
74
+ - Prerequisites block
75
+ - Introductory paragraphs
76
+ - Blank lines
77
+
78
+ Returns the line index where .Procedure should be inserted.
79
+ """
80
+ # Look backward from the step to find a good insertion point
81
+ # We want to insert just before the numbered list starts
82
+ insertion_point = step_index
83
+
84
+ # Skip backward over any preceding blank lines to insert before them
85
+ while insertion_point > 0 and lines[insertion_point - 1].strip() == '':
86
+ insertion_point -= 1
87
+
88
+ return insertion_point
89
+
90
+
91
+ def insert_procedure_title(content: str) -> tuple[str, bool]:
92
+ """
93
+ Insert .Procedure block title if missing.
94
+
95
+ Returns:
96
+ tuple: (modified_content, was_modified)
97
+ """
98
+ lines = content.split('\n')
99
+
100
+ # Find the first numbered step
101
+ step_index = find_first_numbered_step(lines)
102
+ if step_index is None:
103
+ return content, False
104
+
105
+ # Check if .Procedure already exists before the steps
106
+ if has_procedure_title_before(lines, step_index):
107
+ return content, False
108
+
109
+ # Find where to insert
110
+ insertion_point = find_insertion_point(lines, step_index)
111
+
112
+ # Insert .Procedure followed by blank line
113
+ # If there's already a blank line before steps, just insert .Procedure
114
+ if insertion_point < len(lines) and lines[insertion_point].strip() == '':
115
+ lines.insert(insertion_point, '.Procedure')
116
+ lines.insert(insertion_point + 1, '')
117
+ else:
118
+ lines.insert(insertion_point, '')
119
+ lines.insert(insertion_point, '.Procedure')
120
+
121
+ return '\n'.join(lines), True
122
+
123
+
124
+ def has_numbered_steps(content: str) -> bool:
125
+ """Check if file has numbered steps."""
126
+ lines = content.split('\n')
127
+ return find_first_numbered_step(lines) is not None
128
+
129
+
130
+ def has_procedure_title(content: str) -> bool:
131
+ """Check if file has a .Procedure block title."""
132
+ for line in content.split('\n'):
133
+ if line.strip() == '.Procedure':
134
+ return True
135
+ return False
136
+
137
+
138
+ def process_file(filepath: Path, dry_run: bool = False, verbose: bool = False) -> tuple[bool, bool]:
139
+ """
140
+ Process a single AsciiDoc file.
141
+
142
+ Returns tuple: (was_modified, has_warning)
143
+ """
144
+ try:
145
+ content = filepath.read_text(encoding='utf-8')
146
+ except Exception as e:
147
+ print(f"Error reading {filepath}: {e}", file=sys.stderr)
148
+ return False, False
149
+
150
+ # Only process PROCEDURE files
151
+ if not is_procedure_file(content):
152
+ if verbose:
153
+ print(f"Skipping (not a procedure file): {filepath}")
154
+ return False, False
155
+
156
+ # If file already has .Procedure, no action needed
157
+ # (handles cases where procedures use unordered lists instead of numbered steps)
158
+ if has_procedure_title(content):
159
+ if verbose:
160
+ print(f"No changes needed (has .Procedure): {filepath}")
161
+ return False, False
162
+
163
+ # Check if file has numbered steps
164
+ if not has_numbered_steps(content):
165
+ print(f"Warning: Procedure file has no numbered steps and no .Procedure title: {filepath}")
166
+ return False, True
167
+
168
+ new_content, was_modified = insert_procedure_title(content)
169
+
170
+ if was_modified:
171
+ if dry_run:
172
+ print(f"Would modify: {filepath}")
173
+ else:
174
+ filepath.write_text(new_content, encoding='utf-8')
175
+ print(f"Modified: {filepath}")
176
+ return True, False
177
+ else:
178
+ if verbose:
179
+ print(f"No changes needed: {filepath}")
180
+ return False, False
181
+
182
+
183
+ def collect_adoc_files(path: Path) -> list[Path]:
184
+ """Collect all .adoc files from path (file or directory)."""
185
+ if path.is_file():
186
+ if path.suffix == '.adoc':
187
+ return [path]
188
+ return []
189
+
190
+ files = []
191
+ for root, _, filenames in os.walk(path):
192
+ for filename in filenames:
193
+ if filename.endswith('.adoc'):
194
+ files.append(Path(root) / filename)
195
+ return sorted(files)
196
+
197
+
198
+ def main():
199
+ parser = argparse.ArgumentParser(
200
+ description='Insert .Procedure block title above numbered steps in AsciiDoc procedure files.',
201
+ formatter_class=argparse.RawDescriptionHelpFormatter,
202
+ epilog='''
203
+ Examples:
204
+ %(prog)s modules/proc_example.adoc
205
+ %(prog)s modules/ --dry-run
206
+ %(prog)s . --verbose
207
+ '''
208
+ )
209
+ parser.add_argument(
210
+ 'path',
211
+ type=Path,
212
+ help='File or directory to process'
213
+ )
214
+ parser.add_argument(
215
+ '-n', '--dry-run',
216
+ action='store_true',
217
+ help='Show what would be changed without modifying files'
218
+ )
219
+ parser.add_argument(
220
+ '-v', '--verbose',
221
+ action='store_true',
222
+ help='Show all files processed, including those not modified'
223
+ )
224
+
225
+ args = parser.parse_args()
226
+
227
+ if not args.path.exists():
228
+ print(f"Error: Path does not exist: {args.path}", file=sys.stderr)
229
+ sys.exit(1)
230
+
231
+ files = collect_adoc_files(args.path)
232
+
233
+ if not files:
234
+ print(f"No .adoc files found in: {args.path}")
235
+ sys.exit(0)
236
+
237
+ modified_count = 0
238
+ warning_count = 0
239
+ for filepath in files:
240
+ was_modified, has_warning = process_file(filepath, dry_run=args.dry_run, verbose=args.verbose)
241
+ if was_modified:
242
+ modified_count += 1
243
+ if has_warning:
244
+ warning_count += 1
245
+
246
+ print()
247
+ if args.dry_run:
248
+ print(f"Dry run complete. {modified_count} file(s) would be modified.")
249
+ else:
250
+ print(f"Complete. {modified_count} file(s) modified.")
251
+
252
+ if warning_count > 0:
253
+ print(f"Warnings: {warning_count} procedure file(s) have no numbered steps.")
254
+
255
+
256
+ if __name__ == '__main__':
257
+ main()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rolfedh-doc-utils
3
- Version: 0.1.41
3
+ Version: 0.1.42
4
4
  Summary: CLI tools for AsciiDoc documentation projects
5
5
  Author: Rolfe Dlugy-Hegwer
6
6
  License: MIT License
@@ -3,9 +3,10 @@ archive_unused_images.py,sha256=EvPhMIwp6_AHKtuNYQ663q6biXBeXaqf88NzWrhvtIE,2029
3
3
  check_published_links.py,sha256=nk07prV6xHVqVrYCy2Eb8BWkjkgJBhczk8U0E-KeIvA,43258
4
4
  check_scannability.py,sha256=O6ROr-e624jVPvPpASpsWo0gTfuCFpA2mTSX61BjAEI,5478
5
5
  check_source_directives.py,sha256=JiIvn_ph9VKPMH4zg-aSsuIGQZcnI_imj7rZLLE04L8,3660
6
- convert_callouts_interactive.py,sha256=4PjiVIOWxNJiJLQuBHT3x6rE46-hgfFHSaoo5quYIs8,22889
7
- convert_callouts_to_deflist.py,sha256=BoqW5_GkQ-KqNzn4vmE6lsQosrPV0lkB-bfAx3dzyMw,25886
6
+ convert_callouts_interactive.py,sha256=VJo3CePMr13NJXTHkuZ1lSiSSasfKlV08Bw8tHBzyuI,23468
7
+ convert_callouts_to_deflist.py,sha256=fHr7RYq1ZjVvX221CYenf5E52RadDhph8DQfZ1YLLHw,26129
8
8
  convert_freemarker_to_asciidoc.py,sha256=ki0bFDPWxl9aUHK_-xqffIKF4KJYMXA8S4XLG_mOA0U,10097
9
+ convert_id_attributes_to_ids.py,sha256=bqeKapxCCzonLxSJ-5W-fZacnAm0yyZrukoqKVFHU8Y,7407
9
10
  convert_tables_to_deflists.py,sha256=PIP6xummuMqC3aSzahKKRBYahes_j5ZpHp_-k6BjurY,15599
10
11
  doc_utils_cli.py,sha256=J3CE7cTDDCRGkhAknYejNWHhk5t9YFGt27WDVfR98Xk,5111
11
12
  extract_link_attributes.py,sha256=wR2SmR2la-jR6DzDbas2PoNONgRZ4dZ6aqwzkwEv8Gs,3516
@@ -14,14 +15,15 @@ find_duplicate_includes.py,sha256=sQaVLOe4Ksc3t08_A_2GaLMwQCgKe9Nsr8c3ipp1Ph0,54
14
15
  find_unused_attributes.py,sha256=AQVJsvRRgGsDjOZClcvJRQ5i5H2YrClcR-1nRLVBzI8,5140
15
16
  format_asciidoc_spacing.py,sha256=nmWpw2dgwhd81LXyznq0rT8w6Z7cNRyGtPJGRyKFRdc,4212
16
17
  insert_abstract_role.py,sha256=C1PZilpYTC1xUfdujAarNXo3oYXbToLdQB4wCpWQrsg,5454
18
+ insert_procedure_title.py,sha256=MnhSG2_e5v7v3XXU4NXr2vjk6BuppVNalCoUMuBupjI,7917
17
19
  inventory_conditionals.py,sha256=vLWEDTj9MbqUnA_iw4g-HEVX47fSG8tfd4KpSJKg6kA,1416
18
20
  replace_link_attributes.py,sha256=Cpc4E-j9j-4_y0LOstAKYOPl02Ln_2bGNIeqp3ZVCdA,7624
19
21
  validate_links.py,sha256=lWuK8sgfiFdfcUdSVAt_5U9JHVde_oa6peSUlBQtsac,6145
20
22
  callout_lib/__init__.py,sha256=8B82N_z4D1LaZVYgd5jZR53QAabtgPzADOyGlnvihj0,665
21
23
  callout_lib/converter_bullets.py,sha256=nfH0hz4p8qNM2F-MhtBjwH-lUYcNf2m1sdJebRlCxoo,4405
22
24
  callout_lib/converter_comments.py,sha256=do0dH8uOyNFpn5CDEzR0jYYCMIPP3oPFM8cEB-Fp22c,9767
23
- callout_lib/converter_deflist.py,sha256=Ocr3gutTo_Sl_MkzethZH1UO6mCDEcuExGMZF5MfZFg,6131
24
- callout_lib/detector.py,sha256=S0vZDa4zhTSn6Kv0hWfG56W-5srGxUc-nvpLe_gIx-A,15971
25
+ callout_lib/converter_deflist.py,sha256=Deep2QXqEmJY4Q8cTWyrCpErQdIl7yBnLkibZDlG4EM,4919
26
+ callout_lib/detector.py,sha256=lLg0CuSLIIMHRHuo_3_fLP347pki31sQHoiBpbqRmzo,16498
25
27
  callout_lib/table_parser.py,sha256=ZucisADE8RDAk5HtIrttaPgBi6Hf8ZUpw7KzfbcmEjc,31450
26
28
  doc_utils/__init__.py,sha256=qqZR3lohzkP63soymrEZPBGzzk6-nFzi4_tSffjmu_0,74
27
29
  doc_utils/convert_freemarker_to_asciidoc.py,sha256=UGQ7iS_9bkVdDMAWBORXbK0Q5mLPmDs1cDJqoR4LLH8,22491
@@ -41,12 +43,12 @@ doc_utils/unused_adoc.py,sha256=LPQWPGEOizXECxepk7E_5cjTVvKn6RXQYTWG97Ps5VQ,9077
41
43
  doc_utils/unused_attributes.py,sha256=2UmqdXd5ogaPtj9_teApM0IlkdCmzBZNRh7XXrVYJOk,9032
42
44
  doc_utils/unused_images.py,sha256=hL8Qrik9QCkVh54eBLuNczRS9tMnsqIEfavNamM1UeQ,5664
43
45
  doc_utils/validate_links.py,sha256=iBGXnwdeLlgIT3fo3v01ApT5k0X2FtctsvkrE6E3VMk,19610
44
- doc_utils/version.py,sha256=6M2GqXp9MDStl7sAFi0I6plYG3mZnB2F3aR5fYWcdNI,203
46
+ doc_utils/version.py,sha256=9kAm8RDU9obPgPJ99XR9NylVS9V7v-_6YogFp0RiSNs,203
45
47
  doc_utils/version_check.py,sha256=-31Y6AN0KGi_CUCAVOOhf6bPO3r7SQIXPxxeffLAF0w,7535
46
48
  doc_utils/warnings_report.py,sha256=20yfwqBjOprfFhQwCujbcsvjJCbHHhmH84uAujm-y-o,8877
47
- rolfedh_doc_utils-0.1.41.dist-info/licenses/LICENSE,sha256=vLxtwMVOJA_hEy8b77niTkdmQI9kNJskXHq0dBS36e0,1075
48
- rolfedh_doc_utils-0.1.41.dist-info/METADATA,sha256=TFcXE1pz0bqxhUSvIN20DJzYn4bS9Ix-HLw1b38cdUk,8654
49
- rolfedh_doc_utils-0.1.41.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
50
- rolfedh_doc_utils-0.1.41.dist-info/entry_points.txt,sha256=VbjA2E5PzSdELQ59Gutdj2C7ZXVhTypTzc1LyTdepis,1023
51
- rolfedh_doc_utils-0.1.41.dist-info/top_level.txt,sha256=5ajEGX1siKKjC1cahR-_X-XKMsH8BdY9RPaH8vdSHB8,460
52
- rolfedh_doc_utils-0.1.41.dist-info/RECORD,,
49
+ rolfedh_doc_utils-0.1.42.dist-info/licenses/LICENSE,sha256=vLxtwMVOJA_hEy8b77niTkdmQI9kNJskXHq0dBS36e0,1075
50
+ rolfedh_doc_utils-0.1.42.dist-info/METADATA,sha256=qptswU3wJNmGolK1JTaJUVrvue2NOv3pShN2qN0cOoM,8654
51
+ rolfedh_doc_utils-0.1.42.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
52
+ rolfedh_doc_utils-0.1.42.dist-info/entry_points.txt,sha256=3CiBc2vWwHezC6aBBBz9oY0Z6J8ADrFz1B3hh71Lg0o,1141
53
+ rolfedh_doc_utils-0.1.42.dist-info/top_level.txt,sha256=iaqMjXiZZTvQ0wUEL3tHOf48pB-VJw7lw0_20FJ8y64,512
54
+ rolfedh_doc_utils-0.1.42.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.10.1)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -7,6 +7,7 @@ check-source-directives = check_source_directives:main
7
7
  convert-callouts-interactive = convert_callouts_interactive:main
8
8
  convert-callouts-to-deflist = convert_callouts_to_deflist:main
9
9
  convert-freemarker-to-asciidoc = convert_freemarker_to_asciidoc:main
10
+ convert-id-attributes-to-ids = convert_id_attributes_to_ids:main
10
11
  convert-tables-to-deflists = convert_tables_to_deflists:main
11
12
  doc-utils = doc_utils_cli:main
12
13
  extract-link-attributes = extract_link_attributes:main
@@ -15,6 +16,7 @@ find-duplicate-includes = find_duplicate_includes:main
15
16
  find-unused-attributes = find_unused_attributes:main
16
17
  format-asciidoc-spacing = format_asciidoc_spacing:main
17
18
  insert-abstract-role = insert_abstract_role:main
19
+ insert-procedure-title = insert_procedure_title:main
18
20
  inventory-conditionals = inventory_conditionals:main
19
21
  replace-link-attributes = replace_link_attributes:main
20
22
  validate-links = validate_links:main
@@ -7,6 +7,7 @@ check_source_directives
7
7
  convert_callouts_interactive
8
8
  convert_callouts_to_deflist
9
9
  convert_freemarker_to_asciidoc
10
+ convert_id_attributes_to_ids
10
11
  convert_tables_to_deflists
11
12
  doc_utils
12
13
  doc_utils_cli
@@ -16,6 +17,7 @@ find_duplicate_includes
16
17
  find_unused_attributes
17
18
  format_asciidoc_spacing
18
19
  insert_abstract_role
20
+ insert_procedure_title
19
21
  inventory_conditionals
20
22
  replace_link_attributes
21
23
  validate_links