rolfedh-doc-utils 0.1.40__py3-none-any.whl → 0.1.42__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- callout_lib/converter_deflist.py +4 -25
- callout_lib/detector.py +14 -4
- convert_callouts_interactive.py +10 -1
- convert_callouts_to_deflist.py +6 -2
- convert_id_attributes_to_ids.py +229 -0
- doc_utils/insert_abstract_role.py +220 -0
- doc_utils/version.py +1 -1
- insert_abstract_role.py +163 -0
- insert_procedure_title.py +257 -0
- {rolfedh_doc_utils-0.1.40.dist-info → rolfedh_doc_utils-0.1.42.dist-info}/METADATA +1 -1
- {rolfedh_doc_utils-0.1.40.dist-info → rolfedh_doc_utils-0.1.42.dist-info}/RECORD +15 -11
- {rolfedh_doc_utils-0.1.40.dist-info → rolfedh_doc_utils-0.1.42.dist-info}/WHEEL +1 -1
- {rolfedh_doc_utils-0.1.40.dist-info → rolfedh_doc_utils-0.1.42.dist-info}/entry_points.txt +3 -0
- {rolfedh_doc_utils-0.1.40.dist-info → rolfedh_doc_utils-0.1.42.dist-info}/top_level.txt +3 -0
- {rolfedh_doc_utils-0.1.40.dist-info → rolfedh_doc_utils-0.1.42.dist-info}/licenses/LICENSE +0 -0
callout_lib/converter_deflist.py
CHANGED
|
@@ -41,41 +41,20 @@ class DefListConverter:
|
|
|
41
41
|
if table_title:
|
|
42
42
|
# Remove leading dot and trailing period if present
|
|
43
43
|
title_text = table_title.lstrip('.').rstrip('.')
|
|
44
|
-
lines = [f'
|
|
44
|
+
lines = [f'{title_text}, where:']
|
|
45
45
|
else:
|
|
46
|
-
lines = ['
|
|
46
|
+
lines = ['where:']
|
|
47
47
|
|
|
48
48
|
# Process each group (which may contain one or more callouts)
|
|
49
49
|
for group in callout_groups:
|
|
50
50
|
code_line = group.code_line
|
|
51
51
|
callout_nums = group.callout_numbers
|
|
52
52
|
|
|
53
|
-
# COMMENTED OUT: User-replaceable value detection causes false positives
|
|
54
|
-
# with Java generics (e.g., <MyEntity, Integer>) and other valid syntax
|
|
55
|
-
# that uses angle brackets. Always use the full code line as the term.
|
|
56
|
-
#
|
|
57
|
-
# # Check if this is a user-replaceable value (contains angle brackets but not heredoc)
|
|
58
|
-
# # User values are single words/phrases in angle brackets like <my-value>
|
|
59
|
-
# user_values = DefListConverter.USER_VALUE_PATTERN.findall(code_line)
|
|
60
|
-
#
|
|
61
|
-
# if user_values and len(user_values) == 1 and len(code_line) < 100:
|
|
62
|
-
# # This looks like a user-replaceable value placeholder
|
|
63
|
-
# # Format the value (ensure it has angle brackets)
|
|
64
|
-
# user_value = user_values[0]
|
|
65
|
-
# if not user_value.startswith('<'):
|
|
66
|
-
# user_value = f'<{user_value}>'
|
|
67
|
-
# if not user_value.endswith('>'):
|
|
68
|
-
# user_value = f'{user_value}>'
|
|
69
|
-
# term = f'`{user_value}`'
|
|
70
|
-
# else:
|
|
71
|
-
# # This is a code line - strip whitespace before wrapping in backticks
|
|
72
|
-
# term = f'`{code_line.strip()}`'
|
|
73
|
-
|
|
74
53
|
# Always use the full code line - strip whitespace before wrapping in backticks
|
|
75
54
|
term = f'`{code_line.strip()}`'
|
|
76
55
|
|
|
77
|
-
# Add
|
|
78
|
-
lines.append('')
|
|
56
|
+
# Add continuation marker before each definition term
|
|
57
|
+
lines.append('+')
|
|
79
58
|
lines.append(f'{term}::')
|
|
80
59
|
|
|
81
60
|
# Add explanations for all callouts in this group
|
callout_lib/detector.py
CHANGED
|
@@ -281,8 +281,8 @@ class CalloutDetector:
|
|
|
281
281
|
explanations = {}
|
|
282
282
|
i = start_line + 1 # Start after the closing delimiter
|
|
283
283
|
|
|
284
|
-
# Skip blank lines
|
|
285
|
-
while i < len(lines) and (not lines[i].strip() or lines[i].strip()
|
|
284
|
+
# Skip blank lines, continuation markers (+), and {nbsp} spacers
|
|
285
|
+
while i < len(lines) and (not lines[i].strip() or lines[i].strip() in ('+', '{nbsp}')):
|
|
286
286
|
i += 1
|
|
287
287
|
|
|
288
288
|
# Collect consecutive callout explanation lines
|
|
@@ -298,8 +298,18 @@ class CalloutDetector:
|
|
|
298
298
|
# Continue until we hit a blank line, a new callout, or certain patterns
|
|
299
299
|
while i < len(lines):
|
|
300
300
|
line = lines[i]
|
|
301
|
-
|
|
302
|
-
|
|
301
|
+
stripped = line.strip()
|
|
302
|
+
# Stop if we hit:
|
|
303
|
+
# - blank line
|
|
304
|
+
# - new callout explanation
|
|
305
|
+
# - list start marker [start=N]
|
|
306
|
+
# - standalone + (list continuation that attaches to parent)
|
|
307
|
+
# - admonition block start [NOTE], [IMPORTANT], [WARNING], [TIP], [CAUTION]
|
|
308
|
+
if (not stripped or
|
|
309
|
+
self.CALLOUT_EXPLANATION.match(line) or
|
|
310
|
+
line.startswith('[start=') or
|
|
311
|
+
stripped == '+' or
|
|
312
|
+
stripped in ('[NOTE]', '[IMPORTANT]', '[WARNING]', '[TIP]', '[CAUTION]')):
|
|
303
313
|
break
|
|
304
314
|
# Add continuation line preserving original formatting
|
|
305
315
|
explanation_lines.append(line)
|
convert_callouts_interactive.py
CHANGED
|
@@ -381,10 +381,19 @@ class InteractiveCalloutConverter:
|
|
|
381
381
|
)
|
|
382
382
|
else:
|
|
383
383
|
# Remove old explanations, add new list
|
|
384
|
+
# Find where explanations actually start (skip {nbsp} and + markers to preserve them)
|
|
385
|
+
explanation_start_line = block.end_line + 1
|
|
386
|
+
while explanation_start_line < len(new_lines) and (
|
|
387
|
+
not new_lines[explanation_start_line].strip() or
|
|
388
|
+
new_lines[explanation_start_line].strip() in ('+', '{nbsp}')
|
|
389
|
+
):
|
|
390
|
+
explanation_start_line += 1
|
|
391
|
+
|
|
384
392
|
new_section = (
|
|
385
393
|
new_lines[:content_start] +
|
|
386
394
|
converted_content +
|
|
387
|
-
[new_lines[content_end]] +
|
|
395
|
+
[new_lines[content_end]] + # Keep closing delimiter
|
|
396
|
+
new_lines[content_end + 1:explanation_start_line] + # Preserve {nbsp} and + markers
|
|
388
397
|
output_list +
|
|
389
398
|
new_lines[explanation_end + 1:]
|
|
390
399
|
)
|
convert_callouts_to_deflist.py
CHANGED
|
@@ -245,9 +245,13 @@ class CalloutConverter:
|
|
|
245
245
|
# Table format: preserve content between code block and table start
|
|
246
246
|
explanation_start_line = self.detector.last_table.start_line
|
|
247
247
|
else:
|
|
248
|
-
# List format: skip blank lines
|
|
248
|
+
# List format: skip blank lines, {nbsp} spacers, and + continuation markers
|
|
249
|
+
# These will be preserved in the output via the slice below
|
|
249
250
|
explanation_start_line = block.end_line + 1
|
|
250
|
-
while explanation_start_line < len(new_lines) and
|
|
251
|
+
while explanation_start_line < len(new_lines) and (
|
|
252
|
+
not new_lines[explanation_start_line].strip() or
|
|
253
|
+
new_lines[explanation_start_line].strip() in ('+', '{nbsp}')
|
|
254
|
+
):
|
|
251
255
|
explanation_start_line += 1
|
|
252
256
|
|
|
253
257
|
# Build the new section
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
convert-id-attributes-to-ids - Convert :id: attribute definitions to AsciiDoc [id="..."] anchors.
|
|
4
|
+
|
|
5
|
+
This script recursively scans a directory for AsciiDoc files and replaces instances of
|
|
6
|
+
`:id: <id_value>` with `[id="<id_value>_{context}"]`.
|
|
7
|
+
|
|
8
|
+
Optionally, with --clean-up, it also removes related boilerplate lines:
|
|
9
|
+
- // define ID as an attribute
|
|
10
|
+
- // assign ID conditionally, followed by header
|
|
11
|
+
- include::{modules}/common/id.adoc[]
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import argparse
|
|
15
|
+
import os
|
|
16
|
+
import re
|
|
17
|
+
import sys
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
|
|
20
|
+
from doc_utils.version_check import check_version_on_startup
|
|
21
|
+
from doc_utils.version import __version__
|
|
22
|
+
from doc_utils.spinner import Spinner
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def find_adoc_files(directory: Path) -> list[Path]:
|
|
26
|
+
"""Recursively find all .adoc files in a directory."""
|
|
27
|
+
adoc_files = []
|
|
28
|
+
for root, dirs, files in os.walk(directory, followlinks=False):
|
|
29
|
+
# Skip hidden directories and common non-content directories
|
|
30
|
+
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in ('node_modules', '__pycache__')]
|
|
31
|
+
for file in files:
|
|
32
|
+
if file.endswith('.adoc'):
|
|
33
|
+
adoc_files.append(Path(root) / file)
|
|
34
|
+
return adoc_files
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def convert_id_attributes(content: str, clean_up: bool = False) -> tuple[str, int, int]:
|
|
38
|
+
"""
|
|
39
|
+
Convert :id: attributes to [id="..._{context}"] format.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
content: The file content to process
|
|
43
|
+
clean_up: If True, also remove boilerplate lines
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Tuple of (modified_content, id_replacements_count, cleanup_removals_count)
|
|
47
|
+
"""
|
|
48
|
+
lines = content.split('\n')
|
|
49
|
+
new_lines = []
|
|
50
|
+
id_replacements = 0
|
|
51
|
+
cleanup_removals = 0
|
|
52
|
+
|
|
53
|
+
# Patterns for clean-up (flexible matching for variations)
|
|
54
|
+
cleanup_patterns = [
|
|
55
|
+
re.compile(r'^\s*//\s*define ID as an attribute', re.IGNORECASE),
|
|
56
|
+
re.compile(r'^\s*//\s*assign.*ID conditionally', re.IGNORECASE),
|
|
57
|
+
re.compile(r'^\s*include::\{modules\}/common/id\.adoc\[\]'),
|
|
58
|
+
]
|
|
59
|
+
|
|
60
|
+
# Pattern to match :id: <value>
|
|
61
|
+
id_pattern = re.compile(r'^:id:\s*(.+?)\s*$')
|
|
62
|
+
|
|
63
|
+
for line in lines:
|
|
64
|
+
# Check if this is an :id: line
|
|
65
|
+
id_match = id_pattern.match(line)
|
|
66
|
+
if id_match:
|
|
67
|
+
id_value = id_match.group(1)
|
|
68
|
+
new_line = f'[id="{id_value}_{{context}}"]'
|
|
69
|
+
new_lines.append(new_line)
|
|
70
|
+
id_replacements += 1
|
|
71
|
+
continue
|
|
72
|
+
|
|
73
|
+
# Check if clean-up is enabled and line matches cleanup patterns
|
|
74
|
+
if clean_up:
|
|
75
|
+
should_remove = False
|
|
76
|
+
for pattern in cleanup_patterns:
|
|
77
|
+
if pattern.search(line):
|
|
78
|
+
should_remove = True
|
|
79
|
+
cleanup_removals += 1
|
|
80
|
+
break
|
|
81
|
+
if should_remove:
|
|
82
|
+
continue
|
|
83
|
+
|
|
84
|
+
new_lines.append(line)
|
|
85
|
+
|
|
86
|
+
return '\n'.join(new_lines), id_replacements, cleanup_removals
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def process_file(file_path: Path, dry_run: bool = False, clean_up: bool = False) -> tuple[int, int]:
|
|
90
|
+
"""
|
|
91
|
+
Process a single AsciiDoc file.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Tuple of (id_replacements, cleanup_removals)
|
|
95
|
+
"""
|
|
96
|
+
try:
|
|
97
|
+
content = file_path.read_text(encoding='utf-8')
|
|
98
|
+
except Exception as e:
|
|
99
|
+
print(f" Error reading {file_path}: {e}")
|
|
100
|
+
return 0, 0
|
|
101
|
+
|
|
102
|
+
new_content, id_replacements, cleanup_removals = convert_id_attributes(content, clean_up)
|
|
103
|
+
|
|
104
|
+
if id_replacements > 0 or cleanup_removals > 0:
|
|
105
|
+
if not dry_run:
|
|
106
|
+
try:
|
|
107
|
+
file_path.write_text(new_content, encoding='utf-8')
|
|
108
|
+
except Exception as e:
|
|
109
|
+
print(f" Error writing {file_path}: {e}")
|
|
110
|
+
return 0, 0
|
|
111
|
+
|
|
112
|
+
return id_replacements, cleanup_removals
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def main():
|
|
116
|
+
# Check for updates (non-blocking)
|
|
117
|
+
check_version_on_startup()
|
|
118
|
+
|
|
119
|
+
parser = argparse.ArgumentParser(
|
|
120
|
+
description='Convert :id: attribute definitions to AsciiDoc [id="..._{context}"] anchors.'
|
|
121
|
+
)
|
|
122
|
+
parser.add_argument(
|
|
123
|
+
'directory',
|
|
124
|
+
nargs='?',
|
|
125
|
+
default='.',
|
|
126
|
+
help='Directory to scan for .adoc files (default: current directory)'
|
|
127
|
+
)
|
|
128
|
+
parser.add_argument(
|
|
129
|
+
'--dry-run', '-n',
|
|
130
|
+
action='store_true',
|
|
131
|
+
help='Show what would be changed without making actual modifications'
|
|
132
|
+
)
|
|
133
|
+
parser.add_argument(
|
|
134
|
+
'--clean-up',
|
|
135
|
+
action='store_true',
|
|
136
|
+
help='Also remove ID-related boilerplate lines (comments and include directives)'
|
|
137
|
+
)
|
|
138
|
+
parser.add_argument(
|
|
139
|
+
'--verbose', '-v',
|
|
140
|
+
action='store_true',
|
|
141
|
+
help='Show detailed output for each file processed'
|
|
142
|
+
)
|
|
143
|
+
parser.add_argument(
|
|
144
|
+
'--version',
|
|
145
|
+
action='version',
|
|
146
|
+
version=f'%(prog)s {__version__}'
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
args = parser.parse_args()
|
|
150
|
+
|
|
151
|
+
# Resolve directory path
|
|
152
|
+
directory = Path(args.directory).resolve()
|
|
153
|
+
|
|
154
|
+
if not directory.exists():
|
|
155
|
+
print(f"Error: Directory not found: {directory}")
|
|
156
|
+
sys.exit(1)
|
|
157
|
+
|
|
158
|
+
if not directory.is_dir():
|
|
159
|
+
print(f"Error: Not a directory: {directory}")
|
|
160
|
+
sys.exit(1)
|
|
161
|
+
|
|
162
|
+
mode_str = "DRY RUN MODE - " if args.dry_run else ""
|
|
163
|
+
print(f"{mode_str}Scanning directory: {directory}")
|
|
164
|
+
|
|
165
|
+
if args.clean_up:
|
|
166
|
+
print("Clean-up mode enabled: will remove ID-related boilerplate lines")
|
|
167
|
+
|
|
168
|
+
# Find all AsciiDoc files
|
|
169
|
+
spinner = Spinner("Searching for .adoc files")
|
|
170
|
+
spinner.start()
|
|
171
|
+
adoc_files = find_adoc_files(directory)
|
|
172
|
+
spinner.stop(f"Found {len(adoc_files)} .adoc files")
|
|
173
|
+
|
|
174
|
+
if not adoc_files:
|
|
175
|
+
print("No AsciiDoc files found.")
|
|
176
|
+
sys.exit(0)
|
|
177
|
+
|
|
178
|
+
if args.dry_run:
|
|
179
|
+
print("\n*** DRY RUN MODE - No files will be modified ***\n")
|
|
180
|
+
|
|
181
|
+
# Process each file
|
|
182
|
+
total_id_replacements = 0
|
|
183
|
+
total_cleanup_removals = 0
|
|
184
|
+
files_modified = 0
|
|
185
|
+
|
|
186
|
+
spinner = Spinner(f"Processing {len(adoc_files)} files")
|
|
187
|
+
spinner.start()
|
|
188
|
+
|
|
189
|
+
for file_path in adoc_files:
|
|
190
|
+
id_replacements, cleanup_removals = process_file(file_path, args.dry_run, args.clean_up)
|
|
191
|
+
|
|
192
|
+
if id_replacements > 0 or cleanup_removals > 0:
|
|
193
|
+
files_modified += 1
|
|
194
|
+
total_id_replacements += id_replacements
|
|
195
|
+
total_cleanup_removals += cleanup_removals
|
|
196
|
+
|
|
197
|
+
if args.verbose:
|
|
198
|
+
rel_path = file_path.relative_to(directory)
|
|
199
|
+
changes = []
|
|
200
|
+
if id_replacements > 0:
|
|
201
|
+
changes.append(f"{id_replacements} ID conversion(s)")
|
|
202
|
+
if cleanup_removals > 0:
|
|
203
|
+
changes.append(f"{cleanup_removals} line(s) removed")
|
|
204
|
+
print(f" {rel_path}: {', '.join(changes)}")
|
|
205
|
+
|
|
206
|
+
spinner.stop(f"Processed {len(adoc_files)} files")
|
|
207
|
+
|
|
208
|
+
# Summary
|
|
209
|
+
print(f"\nSummary:")
|
|
210
|
+
if args.dry_run:
|
|
211
|
+
print(f" Files that would be modified: {files_modified}")
|
|
212
|
+
print(f" :id: attributes that would be converted: {total_id_replacements}")
|
|
213
|
+
if args.clean_up:
|
|
214
|
+
print(f" Boilerplate lines that would be removed: {total_cleanup_removals}")
|
|
215
|
+
print("\nRun without --dry-run to apply changes.")
|
|
216
|
+
else:
|
|
217
|
+
print(f" Files modified: {files_modified}")
|
|
218
|
+
print(f" :id: attributes converted: {total_id_replacements}")
|
|
219
|
+
if args.clean_up:
|
|
220
|
+
print(f" Boilerplate lines removed: {total_cleanup_removals}")
|
|
221
|
+
|
|
222
|
+
if total_id_replacements == 0:
|
|
223
|
+
print("\nNo :id: attributes found to convert.")
|
|
224
|
+
else:
|
|
225
|
+
print("\nConversion complete!")
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
if __name__ == '__main__':
|
|
229
|
+
main()
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Insert abstract role - ensures AsciiDoc files have [role="_abstract"] above the first paragraph.
|
|
3
|
+
|
|
4
|
+
Core logic for adding the [role="_abstract"] attribute required for DITA short description conversion.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import re
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import List, Tuple, Optional
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def find_first_paragraph_after_title(lines: List[str]) -> Optional[int]:
|
|
13
|
+
"""
|
|
14
|
+
Find the line index of the first paragraph after the document title.
|
|
15
|
+
|
|
16
|
+
The first paragraph is the first non-empty line that:
|
|
17
|
+
- Comes after a level 1 heading (= Title)
|
|
18
|
+
- Is not an attribute definition (starts with :)
|
|
19
|
+
- Is not a comment (starts with //)
|
|
20
|
+
- Is not a block attribute (starts with [)
|
|
21
|
+
- Is not another heading
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
lines: List of lines from the file (without trailing newlines)
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
Line index of the first paragraph, or None if not found
|
|
28
|
+
"""
|
|
29
|
+
title_found = False
|
|
30
|
+
title_index = -1
|
|
31
|
+
|
|
32
|
+
for i, line in enumerate(lines):
|
|
33
|
+
# Check for level 1 heading (document title)
|
|
34
|
+
if re.match(r'^=\s+[^=]', line):
|
|
35
|
+
title_found = True
|
|
36
|
+
title_index = i
|
|
37
|
+
continue
|
|
38
|
+
|
|
39
|
+
# Only look for first paragraph after we've found the title
|
|
40
|
+
if not title_found:
|
|
41
|
+
continue
|
|
42
|
+
|
|
43
|
+
# Skip empty lines
|
|
44
|
+
if re.match(r'^\s*$', line):
|
|
45
|
+
continue
|
|
46
|
+
|
|
47
|
+
# Skip attribute definitions
|
|
48
|
+
if re.match(r'^:', line):
|
|
49
|
+
continue
|
|
50
|
+
|
|
51
|
+
# Skip comments (single line)
|
|
52
|
+
if re.match(r'^//', line):
|
|
53
|
+
continue
|
|
54
|
+
|
|
55
|
+
# Skip block attributes like [role=...], [id=...], etc.
|
|
56
|
+
if re.match(r'^\[', line):
|
|
57
|
+
continue
|
|
58
|
+
|
|
59
|
+
# Skip other headings
|
|
60
|
+
if re.match(r'^=+\s+', line):
|
|
61
|
+
continue
|
|
62
|
+
|
|
63
|
+
# Skip include directives
|
|
64
|
+
if re.match(r'^include::', line):
|
|
65
|
+
continue
|
|
66
|
+
|
|
67
|
+
# This is the first paragraph
|
|
68
|
+
return i
|
|
69
|
+
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def has_abstract_role(lines: List[str], paragraph_index: int) -> bool:
|
|
74
|
+
"""
|
|
75
|
+
Check if there's already a [role="_abstract"] before the paragraph.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
lines: List of lines from the file
|
|
79
|
+
paragraph_index: Index of the first paragraph
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
True if [role="_abstract"] already exists before the paragraph
|
|
83
|
+
"""
|
|
84
|
+
# Look at the lines immediately before the paragraph
|
|
85
|
+
for i in range(paragraph_index - 1, -1, -1):
|
|
86
|
+
line = lines[i].strip()
|
|
87
|
+
|
|
88
|
+
# Skip empty lines
|
|
89
|
+
if not line:
|
|
90
|
+
continue
|
|
91
|
+
|
|
92
|
+
# Found abstract role
|
|
93
|
+
if re.match(r'^\[role=["\']_abstract["\']\]$', line):
|
|
94
|
+
return True
|
|
95
|
+
|
|
96
|
+
# If we hit any other non-empty content, stop looking
|
|
97
|
+
# (could be attribute, heading, etc.)
|
|
98
|
+
break
|
|
99
|
+
|
|
100
|
+
return False
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def process_file(file_path: Path, dry_run: bool = False, verbose: bool = False) -> Tuple[bool, List[str]]:
|
|
104
|
+
"""
|
|
105
|
+
Process a single AsciiDoc file to add [role="_abstract"] if needed.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
file_path: Path to the file to process
|
|
109
|
+
dry_run: If True, show what would be changed without modifying
|
|
110
|
+
verbose: If True, show detailed output
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Tuple of (changes_made, messages) where messages is a list of verbose output
|
|
114
|
+
"""
|
|
115
|
+
messages = []
|
|
116
|
+
|
|
117
|
+
if verbose:
|
|
118
|
+
messages.append(f"Processing: {file_path}")
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
122
|
+
lines = f.readlines()
|
|
123
|
+
except (IOError, UnicodeDecodeError) as e:
|
|
124
|
+
raise IOError(f"Error reading {file_path}: {e}")
|
|
125
|
+
|
|
126
|
+
# Remove trailing newlines from lines for processing
|
|
127
|
+
lines = [line.rstrip('\n\r') for line in lines]
|
|
128
|
+
|
|
129
|
+
# Find the first paragraph after the title
|
|
130
|
+
paragraph_index = find_first_paragraph_after_title(lines)
|
|
131
|
+
|
|
132
|
+
if paragraph_index is None:
|
|
133
|
+
if verbose:
|
|
134
|
+
messages.append(" No paragraph found after title")
|
|
135
|
+
return False, messages
|
|
136
|
+
|
|
137
|
+
# Check if abstract role already exists
|
|
138
|
+
if has_abstract_role(lines, paragraph_index):
|
|
139
|
+
if verbose:
|
|
140
|
+
messages.append(" [role=\"_abstract\"] already present")
|
|
141
|
+
return False, messages
|
|
142
|
+
|
|
143
|
+
# Insert [role="_abstract"] before the first paragraph
|
|
144
|
+
# We need to add it with a blank line before it if there isn't one
|
|
145
|
+
new_lines = lines[:paragraph_index]
|
|
146
|
+
|
|
147
|
+
# Check if we need to add a blank line before the role
|
|
148
|
+
if paragraph_index > 0 and lines[paragraph_index - 1].strip():
|
|
149
|
+
new_lines.append('')
|
|
150
|
+
|
|
151
|
+
new_lines.append('[role="_abstract"]')
|
|
152
|
+
new_lines.extend(lines[paragraph_index:])
|
|
153
|
+
|
|
154
|
+
if verbose:
|
|
155
|
+
preview = lines[paragraph_index][:60] + "..." if len(lines[paragraph_index]) > 60 else lines[paragraph_index]
|
|
156
|
+
messages.append(f" Adding [role=\"_abstract\"] before line {paragraph_index + 1}: {preview}")
|
|
157
|
+
|
|
158
|
+
if not dry_run:
|
|
159
|
+
try:
|
|
160
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
161
|
+
for line in new_lines:
|
|
162
|
+
f.write(line + '\n')
|
|
163
|
+
except IOError as e:
|
|
164
|
+
raise IOError(f"Error writing {file_path}: {e}")
|
|
165
|
+
|
|
166
|
+
return True, messages
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def find_adoc_files(path: Path, exclude_dirs: List[str] = None, exclude_files: List[str] = None) -> List[Path]:
|
|
170
|
+
"""
|
|
171
|
+
Find all .adoc files in the given path.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
path: File or directory path to search
|
|
175
|
+
exclude_dirs: List of directory paths to exclude
|
|
176
|
+
exclude_files: List of file paths to exclude
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
List of Path objects for .adoc files
|
|
180
|
+
"""
|
|
181
|
+
exclude_dirs = exclude_dirs or []
|
|
182
|
+
exclude_files = exclude_files or []
|
|
183
|
+
|
|
184
|
+
# Normalize exclusion paths to absolute
|
|
185
|
+
exclude_dirs_abs = [Path(d).resolve() for d in exclude_dirs]
|
|
186
|
+
exclude_files_abs = [Path(f).resolve() for f in exclude_files]
|
|
187
|
+
|
|
188
|
+
adoc_files = []
|
|
189
|
+
|
|
190
|
+
if path.is_file():
|
|
191
|
+
if path.suffix == '.adoc':
|
|
192
|
+
path_abs = path.resolve()
|
|
193
|
+
if path_abs not in exclude_files_abs:
|
|
194
|
+
adoc_files.append(path)
|
|
195
|
+
elif path.is_dir():
|
|
196
|
+
for adoc_path in path.rglob('*.adoc'):
|
|
197
|
+
# Skip symlinks
|
|
198
|
+
if adoc_path.is_symlink():
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
path_abs = adoc_path.resolve()
|
|
202
|
+
|
|
203
|
+
# Check if file is excluded
|
|
204
|
+
if path_abs in exclude_files_abs:
|
|
205
|
+
continue
|
|
206
|
+
|
|
207
|
+
# Check if any parent directory is excluded
|
|
208
|
+
skip = False
|
|
209
|
+
for exclude_dir in exclude_dirs_abs:
|
|
210
|
+
try:
|
|
211
|
+
path_abs.relative_to(exclude_dir)
|
|
212
|
+
skip = True
|
|
213
|
+
break
|
|
214
|
+
except ValueError:
|
|
215
|
+
pass
|
|
216
|
+
|
|
217
|
+
if not skip:
|
|
218
|
+
adoc_files.append(adoc_path)
|
|
219
|
+
|
|
220
|
+
return sorted(adoc_files)
|
doc_utils/version.py
CHANGED
insert_abstract_role.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
insert-abstract-role - Insert [role="_abstract"] above the first paragraph after the title.
|
|
4
|
+
|
|
5
|
+
Ensures AsciiDoc files have the [role="_abstract"] attribute required for DITA short description
|
|
6
|
+
conversion, as enforced by the AsciiDocDITA.ShortDescription vale rule.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import argparse
|
|
10
|
+
import sys
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
from doc_utils.insert_abstract_role import process_file, find_adoc_files
|
|
14
|
+
from doc_utils.version_check import check_version_on_startup
|
|
15
|
+
from doc_utils.version import __version__
|
|
16
|
+
from doc_utils.file_utils import parse_exclude_list_file
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Colors for output
|
|
20
|
+
class Colors:
|
|
21
|
+
RED = '\033[0;31m'
|
|
22
|
+
GREEN = '\033[0;32m'
|
|
23
|
+
YELLOW = '\033[1;33m'
|
|
24
|
+
NC = '\033[0m' # No Color
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def print_colored(message: str, color: str = Colors.NC) -> None:
|
|
28
|
+
"""Print message with color"""
|
|
29
|
+
print(f"{color}{message}{Colors.NC}")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def main():
|
|
33
|
+
# Check for updates (non-blocking, won't interfere with tool operation)
|
|
34
|
+
check_version_on_startup()
|
|
35
|
+
|
|
36
|
+
parser = argparse.ArgumentParser(
|
|
37
|
+
description="Insert [role=\"_abstract\"] above the first paragraph after the document title",
|
|
38
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
39
|
+
epilog="""
|
|
40
|
+
Insert [role="_abstract"] above the first paragraph after the document title in AsciiDoc files.
|
|
41
|
+
This attribute is required for DITA short description conversion.
|
|
42
|
+
|
|
43
|
+
The tool identifies the first paragraph after a level 1 heading (= Title) and inserts
|
|
44
|
+
the [role="_abstract"] attribute on the line immediately before it.
|
|
45
|
+
|
|
46
|
+
Examples:
|
|
47
|
+
%(prog)s # Process all .adoc files in current directory
|
|
48
|
+
%(prog)s modules/ # Process all .adoc files in modules/
|
|
49
|
+
%(prog)s modules/rn/my-release-note.adoc # Process single file
|
|
50
|
+
%(prog)s --dry-run modules/ # Preview changes without modifying
|
|
51
|
+
%(prog)s --exclude-dir .archive modules/ # Exclude .archive directories
|
|
52
|
+
"""
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
parser.add_argument(
|
|
56
|
+
'path',
|
|
57
|
+
nargs='?',
|
|
58
|
+
default='.',
|
|
59
|
+
help='File or directory to process (default: current directory)'
|
|
60
|
+
)
|
|
61
|
+
parser.add_argument(
|
|
62
|
+
'-n', '--dry-run',
|
|
63
|
+
action='store_true',
|
|
64
|
+
help='Show what would be changed without modifying files'
|
|
65
|
+
)
|
|
66
|
+
parser.add_argument(
|
|
67
|
+
'-v', '--verbose',
|
|
68
|
+
action='store_true',
|
|
69
|
+
help='Show detailed output'
|
|
70
|
+
)
|
|
71
|
+
parser.add_argument(
|
|
72
|
+
'--exclude-dir',
|
|
73
|
+
action='append',
|
|
74
|
+
default=[],
|
|
75
|
+
help='Directory to exclude (can be specified multiple times)'
|
|
76
|
+
)
|
|
77
|
+
parser.add_argument(
|
|
78
|
+
'--exclude-file',
|
|
79
|
+
action='append',
|
|
80
|
+
default=[],
|
|
81
|
+
help='File to exclude (can be specified multiple times)'
|
|
82
|
+
)
|
|
83
|
+
parser.add_argument(
|
|
84
|
+
'--exclude-list',
|
|
85
|
+
help='Path to file containing list of files/directories to exclude (one per line)'
|
|
86
|
+
)
|
|
87
|
+
parser.add_argument('--version', action='version', version=f'%(prog)s {__version__}')
|
|
88
|
+
|
|
89
|
+
args = parser.parse_args()
|
|
90
|
+
|
|
91
|
+
# Convert path to Path object
|
|
92
|
+
target_path = Path(args.path)
|
|
93
|
+
|
|
94
|
+
# Check if path exists
|
|
95
|
+
if not target_path.exists():
|
|
96
|
+
print_colored(f"Error: Path does not exist: {target_path}", Colors.RED)
|
|
97
|
+
sys.exit(1)
|
|
98
|
+
|
|
99
|
+
# Parse exclusion list file if provided
|
|
100
|
+
exclude_dirs = list(args.exclude_dir)
|
|
101
|
+
exclude_files = list(args.exclude_file)
|
|
102
|
+
|
|
103
|
+
if args.exclude_list:
|
|
104
|
+
list_dirs, list_files = parse_exclude_list_file(args.exclude_list)
|
|
105
|
+
exclude_dirs.extend(list_dirs)
|
|
106
|
+
exclude_files.extend(list_files)
|
|
107
|
+
|
|
108
|
+
# Display dry-run mode message
|
|
109
|
+
if args.dry_run:
|
|
110
|
+
print_colored("DRY RUN MODE - No files will be modified", Colors.YELLOW)
|
|
111
|
+
|
|
112
|
+
# Find all AsciiDoc files
|
|
113
|
+
adoc_files = find_adoc_files(target_path, exclude_dirs, exclude_files)
|
|
114
|
+
|
|
115
|
+
if not adoc_files:
|
|
116
|
+
if target_path.is_file():
|
|
117
|
+
print_colored(f"Warning: {target_path} is not an AsciiDoc file (.adoc)", Colors.YELLOW)
|
|
118
|
+
print(f"Processed 0 AsciiDoc file(s)")
|
|
119
|
+
print("Insert abstract role complete!")
|
|
120
|
+
return
|
|
121
|
+
|
|
122
|
+
# Process each file
|
|
123
|
+
files_processed = 0
|
|
124
|
+
files_modified = 0
|
|
125
|
+
|
|
126
|
+
for file_path in adoc_files:
|
|
127
|
+
try:
|
|
128
|
+
changes_made, messages = process_file(file_path, args.dry_run, args.verbose)
|
|
129
|
+
|
|
130
|
+
# Print verbose messages
|
|
131
|
+
if args.verbose:
|
|
132
|
+
for msg in messages:
|
|
133
|
+
print(msg)
|
|
134
|
+
|
|
135
|
+
if changes_made:
|
|
136
|
+
files_modified += 1
|
|
137
|
+
if args.dry_run:
|
|
138
|
+
print_colored(f"Would modify: {file_path}", Colors.YELLOW)
|
|
139
|
+
else:
|
|
140
|
+
print_colored(f"Modified: {file_path}", Colors.GREEN)
|
|
141
|
+
elif args.verbose:
|
|
142
|
+
print(f" No changes needed for: {file_path}")
|
|
143
|
+
|
|
144
|
+
files_processed += 1
|
|
145
|
+
|
|
146
|
+
except KeyboardInterrupt:
|
|
147
|
+
print_colored("\nOperation cancelled by user", Colors.YELLOW)
|
|
148
|
+
sys.exit(1)
|
|
149
|
+
except IOError as e:
|
|
150
|
+
print_colored(f"{e}", Colors.RED)
|
|
151
|
+
except Exception as e:
|
|
152
|
+
print_colored(f"Unexpected error processing {file_path}: {e}", Colors.RED)
|
|
153
|
+
|
|
154
|
+
print(f"Processed {files_processed} AsciiDoc file(s)")
|
|
155
|
+
if args.dry_run and files_modified > 0:
|
|
156
|
+
print(f"Would modify {files_modified} file(s)")
|
|
157
|
+
elif files_modified > 0:
|
|
158
|
+
print(f"Modified {files_modified} file(s)")
|
|
159
|
+
print("Insert abstract role complete!")
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
if __name__ == "__main__":
|
|
163
|
+
main()
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Insert .Procedure block title above numbered steps in AsciiDoc procedure files.
|
|
4
|
+
|
|
5
|
+
This script finds AsciiDoc procedure files (those with :_mod-docs-content-type: PROCEDURE)
|
|
6
|
+
and inserts a .Procedure block title before the first numbered step if one is missing.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
insert-procedure-title <file_or_directory> [options]
|
|
10
|
+
|
|
11
|
+
Examples:
|
|
12
|
+
insert-procedure-title modules/proc_example.adoc
|
|
13
|
+
insert-procedure-title modules/ --dry-run
|
|
14
|
+
insert-procedure-title . --verbose
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import argparse
|
|
18
|
+
import os
|
|
19
|
+
import re
|
|
20
|
+
import sys
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def is_procedure_file(content: str) -> bool:
|
|
25
|
+
"""Check if file is a PROCEDURE content type."""
|
|
26
|
+
return ':_mod-docs-content-type: PROCEDURE' in content
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def find_first_numbered_step(lines: list[str]) -> int | None:
|
|
30
|
+
"""
|
|
31
|
+
Find the line index of the first numbered step.
|
|
32
|
+
|
|
33
|
+
Numbered steps can be:
|
|
34
|
+
- `. Step text` (AsciiDoc implicit ordered list)
|
|
35
|
+
- `1. Step text` (explicit numbered list)
|
|
36
|
+
|
|
37
|
+
Returns None if no numbered steps found.
|
|
38
|
+
"""
|
|
39
|
+
# Pattern for ordered list items:
|
|
40
|
+
# - Starts with `. ` (implicit) or `<digit>. ` (explicit)
|
|
41
|
+
# - Must not be a block title (block titles are `.Title` without space after dot)
|
|
42
|
+
ordered_list_pattern = re.compile(r'^(\d+\.\s|\.(?!\w)\s)')
|
|
43
|
+
|
|
44
|
+
for i, line in enumerate(lines):
|
|
45
|
+
stripped = line.strip()
|
|
46
|
+
if ordered_list_pattern.match(stripped):
|
|
47
|
+
return i
|
|
48
|
+
return None
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def has_procedure_title_before(lines: list[str], step_index: int) -> bool:
|
|
52
|
+
"""
|
|
53
|
+
Check if there's a .Procedure block title before the numbered steps.
|
|
54
|
+
|
|
55
|
+
Looks backward from the step index to find `.Procedure` on its own line.
|
|
56
|
+
Continues past other block titles (like sub-section titles) until hitting
|
|
57
|
+
a section heading (= or ==) or the start of the file.
|
|
58
|
+
"""
|
|
59
|
+
for i in range(step_index - 1, -1, -1):
|
|
60
|
+
stripped = lines[i].strip()
|
|
61
|
+
if stripped == '.Procedure':
|
|
62
|
+
return True
|
|
63
|
+
# Stop searching if we hit a section heading
|
|
64
|
+
if stripped.startswith('= ') or stripped.startswith('== '):
|
|
65
|
+
return False
|
|
66
|
+
return False
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def find_insertion_point(lines: list[str], step_index: int) -> int:
|
|
70
|
+
"""
|
|
71
|
+
Find the correct line index to insert .Procedure block title.
|
|
72
|
+
|
|
73
|
+
The insertion point should be before the numbered steps, but after:
|
|
74
|
+
- Prerequisites block
|
|
75
|
+
- Introductory paragraphs
|
|
76
|
+
- Blank lines
|
|
77
|
+
|
|
78
|
+
Returns the line index where .Procedure should be inserted.
|
|
79
|
+
"""
|
|
80
|
+
# Look backward from the step to find a good insertion point
|
|
81
|
+
# We want to insert just before the numbered list starts
|
|
82
|
+
insertion_point = step_index
|
|
83
|
+
|
|
84
|
+
# Skip backward over any preceding blank lines to insert before them
|
|
85
|
+
while insertion_point > 0 and lines[insertion_point - 1].strip() == '':
|
|
86
|
+
insertion_point -= 1
|
|
87
|
+
|
|
88
|
+
return insertion_point
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def insert_procedure_title(content: str) -> tuple[str, bool]:
|
|
92
|
+
"""
|
|
93
|
+
Insert .Procedure block title if missing.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
tuple: (modified_content, was_modified)
|
|
97
|
+
"""
|
|
98
|
+
lines = content.split('\n')
|
|
99
|
+
|
|
100
|
+
# Find the first numbered step
|
|
101
|
+
step_index = find_first_numbered_step(lines)
|
|
102
|
+
if step_index is None:
|
|
103
|
+
return content, False
|
|
104
|
+
|
|
105
|
+
# Check if .Procedure already exists before the steps
|
|
106
|
+
if has_procedure_title_before(lines, step_index):
|
|
107
|
+
return content, False
|
|
108
|
+
|
|
109
|
+
# Find where to insert
|
|
110
|
+
insertion_point = find_insertion_point(lines, step_index)
|
|
111
|
+
|
|
112
|
+
# Insert .Procedure followed by blank line
|
|
113
|
+
# If there's already a blank line before steps, just insert .Procedure
|
|
114
|
+
if insertion_point < len(lines) and lines[insertion_point].strip() == '':
|
|
115
|
+
lines.insert(insertion_point, '.Procedure')
|
|
116
|
+
lines.insert(insertion_point + 1, '')
|
|
117
|
+
else:
|
|
118
|
+
lines.insert(insertion_point, '')
|
|
119
|
+
lines.insert(insertion_point, '.Procedure')
|
|
120
|
+
|
|
121
|
+
return '\n'.join(lines), True
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def has_numbered_steps(content: str) -> bool:
|
|
125
|
+
"""Check if file has numbered steps."""
|
|
126
|
+
lines = content.split('\n')
|
|
127
|
+
return find_first_numbered_step(lines) is not None
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def has_procedure_title(content: str) -> bool:
|
|
131
|
+
"""Check if file has a .Procedure block title."""
|
|
132
|
+
for line in content.split('\n'):
|
|
133
|
+
if line.strip() == '.Procedure':
|
|
134
|
+
return True
|
|
135
|
+
return False
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def process_file(filepath: Path, dry_run: bool = False, verbose: bool = False) -> tuple[bool, bool]:
|
|
139
|
+
"""
|
|
140
|
+
Process a single AsciiDoc file.
|
|
141
|
+
|
|
142
|
+
Returns tuple: (was_modified, has_warning)
|
|
143
|
+
"""
|
|
144
|
+
try:
|
|
145
|
+
content = filepath.read_text(encoding='utf-8')
|
|
146
|
+
except Exception as e:
|
|
147
|
+
print(f"Error reading {filepath}: {e}", file=sys.stderr)
|
|
148
|
+
return False, False
|
|
149
|
+
|
|
150
|
+
# Only process PROCEDURE files
|
|
151
|
+
if not is_procedure_file(content):
|
|
152
|
+
if verbose:
|
|
153
|
+
print(f"Skipping (not a procedure file): {filepath}")
|
|
154
|
+
return False, False
|
|
155
|
+
|
|
156
|
+
# If file already has .Procedure, no action needed
|
|
157
|
+
# (handles cases where procedures use unordered lists instead of numbered steps)
|
|
158
|
+
if has_procedure_title(content):
|
|
159
|
+
if verbose:
|
|
160
|
+
print(f"No changes needed (has .Procedure): {filepath}")
|
|
161
|
+
return False, False
|
|
162
|
+
|
|
163
|
+
# Check if file has numbered steps
|
|
164
|
+
if not has_numbered_steps(content):
|
|
165
|
+
print(f"Warning: Procedure file has no numbered steps and no .Procedure title: {filepath}")
|
|
166
|
+
return False, True
|
|
167
|
+
|
|
168
|
+
new_content, was_modified = insert_procedure_title(content)
|
|
169
|
+
|
|
170
|
+
if was_modified:
|
|
171
|
+
if dry_run:
|
|
172
|
+
print(f"Would modify: {filepath}")
|
|
173
|
+
else:
|
|
174
|
+
filepath.write_text(new_content, encoding='utf-8')
|
|
175
|
+
print(f"Modified: {filepath}")
|
|
176
|
+
return True, False
|
|
177
|
+
else:
|
|
178
|
+
if verbose:
|
|
179
|
+
print(f"No changes needed: {filepath}")
|
|
180
|
+
return False, False
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def collect_adoc_files(path: Path) -> list[Path]:
|
|
184
|
+
"""Collect all .adoc files from path (file or directory)."""
|
|
185
|
+
if path.is_file():
|
|
186
|
+
if path.suffix == '.adoc':
|
|
187
|
+
return [path]
|
|
188
|
+
return []
|
|
189
|
+
|
|
190
|
+
files = []
|
|
191
|
+
for root, _, filenames in os.walk(path):
|
|
192
|
+
for filename in filenames:
|
|
193
|
+
if filename.endswith('.adoc'):
|
|
194
|
+
files.append(Path(root) / filename)
|
|
195
|
+
return sorted(files)
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def main():
|
|
199
|
+
parser = argparse.ArgumentParser(
|
|
200
|
+
description='Insert .Procedure block title above numbered steps in AsciiDoc procedure files.',
|
|
201
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
202
|
+
epilog='''
|
|
203
|
+
Examples:
|
|
204
|
+
%(prog)s modules/proc_example.adoc
|
|
205
|
+
%(prog)s modules/ --dry-run
|
|
206
|
+
%(prog)s . --verbose
|
|
207
|
+
'''
|
|
208
|
+
)
|
|
209
|
+
parser.add_argument(
|
|
210
|
+
'path',
|
|
211
|
+
type=Path,
|
|
212
|
+
help='File or directory to process'
|
|
213
|
+
)
|
|
214
|
+
parser.add_argument(
|
|
215
|
+
'-n', '--dry-run',
|
|
216
|
+
action='store_true',
|
|
217
|
+
help='Show what would be changed without modifying files'
|
|
218
|
+
)
|
|
219
|
+
parser.add_argument(
|
|
220
|
+
'-v', '--verbose',
|
|
221
|
+
action='store_true',
|
|
222
|
+
help='Show all files processed, including those not modified'
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
args = parser.parse_args()
|
|
226
|
+
|
|
227
|
+
if not args.path.exists():
|
|
228
|
+
print(f"Error: Path does not exist: {args.path}", file=sys.stderr)
|
|
229
|
+
sys.exit(1)
|
|
230
|
+
|
|
231
|
+
files = collect_adoc_files(args.path)
|
|
232
|
+
|
|
233
|
+
if not files:
|
|
234
|
+
print(f"No .adoc files found in: {args.path}")
|
|
235
|
+
sys.exit(0)
|
|
236
|
+
|
|
237
|
+
modified_count = 0
|
|
238
|
+
warning_count = 0
|
|
239
|
+
for filepath in files:
|
|
240
|
+
was_modified, has_warning = process_file(filepath, dry_run=args.dry_run, verbose=args.verbose)
|
|
241
|
+
if was_modified:
|
|
242
|
+
modified_count += 1
|
|
243
|
+
if has_warning:
|
|
244
|
+
warning_count += 1
|
|
245
|
+
|
|
246
|
+
print()
|
|
247
|
+
if args.dry_run:
|
|
248
|
+
print(f"Dry run complete. {modified_count} file(s) would be modified.")
|
|
249
|
+
else:
|
|
250
|
+
print(f"Complete. {modified_count} file(s) modified.")
|
|
251
|
+
|
|
252
|
+
if warning_count > 0:
|
|
253
|
+
print(f"Warnings: {warning_count} procedure file(s) have no numbered steps.")
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
if __name__ == '__main__':
|
|
257
|
+
main()
|
|
@@ -3,9 +3,10 @@ archive_unused_images.py,sha256=EvPhMIwp6_AHKtuNYQ663q6biXBeXaqf88NzWrhvtIE,2029
|
|
|
3
3
|
check_published_links.py,sha256=nk07prV6xHVqVrYCy2Eb8BWkjkgJBhczk8U0E-KeIvA,43258
|
|
4
4
|
check_scannability.py,sha256=O6ROr-e624jVPvPpASpsWo0gTfuCFpA2mTSX61BjAEI,5478
|
|
5
5
|
check_source_directives.py,sha256=JiIvn_ph9VKPMH4zg-aSsuIGQZcnI_imj7rZLLE04L8,3660
|
|
6
|
-
convert_callouts_interactive.py,sha256=
|
|
7
|
-
convert_callouts_to_deflist.py,sha256=
|
|
6
|
+
convert_callouts_interactive.py,sha256=VJo3CePMr13NJXTHkuZ1lSiSSasfKlV08Bw8tHBzyuI,23468
|
|
7
|
+
convert_callouts_to_deflist.py,sha256=fHr7RYq1ZjVvX221CYenf5E52RadDhph8DQfZ1YLLHw,26129
|
|
8
8
|
convert_freemarker_to_asciidoc.py,sha256=ki0bFDPWxl9aUHK_-xqffIKF4KJYMXA8S4XLG_mOA0U,10097
|
|
9
|
+
convert_id_attributes_to_ids.py,sha256=bqeKapxCCzonLxSJ-5W-fZacnAm0yyZrukoqKVFHU8Y,7407
|
|
9
10
|
convert_tables_to_deflists.py,sha256=PIP6xummuMqC3aSzahKKRBYahes_j5ZpHp_-k6BjurY,15599
|
|
10
11
|
doc_utils_cli.py,sha256=J3CE7cTDDCRGkhAknYejNWHhk5t9YFGt27WDVfR98Xk,5111
|
|
11
12
|
extract_link_attributes.py,sha256=wR2SmR2la-jR6DzDbas2PoNONgRZ4dZ6aqwzkwEv8Gs,3516
|
|
@@ -13,14 +14,16 @@ find_duplicate_content.py,sha256=iYWekmriItXWSd8nBnIQN_FoZkv6quPJNL0qjv6UxUA,634
|
|
|
13
14
|
find_duplicate_includes.py,sha256=sQaVLOe4Ksc3t08_A_2GaLMwQCgKe9Nsr8c3ipp1Ph0,5456
|
|
14
15
|
find_unused_attributes.py,sha256=AQVJsvRRgGsDjOZClcvJRQ5i5H2YrClcR-1nRLVBzI8,5140
|
|
15
16
|
format_asciidoc_spacing.py,sha256=nmWpw2dgwhd81LXyznq0rT8w6Z7cNRyGtPJGRyKFRdc,4212
|
|
17
|
+
insert_abstract_role.py,sha256=C1PZilpYTC1xUfdujAarNXo3oYXbToLdQB4wCpWQrsg,5454
|
|
18
|
+
insert_procedure_title.py,sha256=MnhSG2_e5v7v3XXU4NXr2vjk6BuppVNalCoUMuBupjI,7917
|
|
16
19
|
inventory_conditionals.py,sha256=vLWEDTj9MbqUnA_iw4g-HEVX47fSG8tfd4KpSJKg6kA,1416
|
|
17
20
|
replace_link_attributes.py,sha256=Cpc4E-j9j-4_y0LOstAKYOPl02Ln_2bGNIeqp3ZVCdA,7624
|
|
18
21
|
validate_links.py,sha256=lWuK8sgfiFdfcUdSVAt_5U9JHVde_oa6peSUlBQtsac,6145
|
|
19
22
|
callout_lib/__init__.py,sha256=8B82N_z4D1LaZVYgd5jZR53QAabtgPzADOyGlnvihj0,665
|
|
20
23
|
callout_lib/converter_bullets.py,sha256=nfH0hz4p8qNM2F-MhtBjwH-lUYcNf2m1sdJebRlCxoo,4405
|
|
21
24
|
callout_lib/converter_comments.py,sha256=do0dH8uOyNFpn5CDEzR0jYYCMIPP3oPFM8cEB-Fp22c,9767
|
|
22
|
-
callout_lib/converter_deflist.py,sha256=
|
|
23
|
-
callout_lib/detector.py,sha256=
|
|
25
|
+
callout_lib/converter_deflist.py,sha256=Deep2QXqEmJY4Q8cTWyrCpErQdIl7yBnLkibZDlG4EM,4919
|
|
26
|
+
callout_lib/detector.py,sha256=lLg0CuSLIIMHRHuo_3_fLP347pki31sQHoiBpbqRmzo,16498
|
|
24
27
|
callout_lib/table_parser.py,sha256=ZucisADE8RDAk5HtIrttaPgBi6Hf8ZUpw7KzfbcmEjc,31450
|
|
25
28
|
doc_utils/__init__.py,sha256=qqZR3lohzkP63soymrEZPBGzzk6-nFzi4_tSffjmu_0,74
|
|
26
29
|
doc_utils/convert_freemarker_to_asciidoc.py,sha256=UGQ7iS_9bkVdDMAWBORXbK0Q5mLPmDs1cDJqoR4LLH8,22491
|
|
@@ -29,6 +32,7 @@ doc_utils/duplicate_includes.py,sha256=8hpL7fq_pHcKMS0C50LTwTyzqth39nMQ9Lz67gie8
|
|
|
29
32
|
doc_utils/extract_link_attributes.py,sha256=U0EvPZReJQigNfbT-icBsVT6Li64hYki5W7MQz6qqbc,22743
|
|
30
33
|
doc_utils/file_utils.py,sha256=fpTh3xx759sF8sNocdn_arsP3KAv8XA6cTQTAVIZiZg,4247
|
|
31
34
|
doc_utils/format_asciidoc_spacing.py,sha256=RL2WU_dG_UfGL01LnevcyJfKsvYy_ogNyeoVX-Fyqks,13579
|
|
35
|
+
doc_utils/insert_abstract_role.py,sha256=z_Pm8A3EE03DU2fBReJDwMfIUT1qRbDcOKw4EI63I90,6641
|
|
32
36
|
doc_utils/inventory_conditionals.py,sha256=PSrdmeBHbpayvXgaRryqvjUlLZYryPgU9js8IBYqB7g,5486
|
|
33
37
|
doc_utils/missing_source_directive.py,sha256=X3Acn0QJTk6XjmBXhGus5JAjlIitCiicCRE3fslifyw,8048
|
|
34
38
|
doc_utils/replace_link_attributes.py,sha256=gmAs68_njBqEz-Qni-UGgeYEDTMxlTWk_IOm76FONNE,7279
|
|
@@ -39,12 +43,12 @@ doc_utils/unused_adoc.py,sha256=LPQWPGEOizXECxepk7E_5cjTVvKn6RXQYTWG97Ps5VQ,9077
|
|
|
39
43
|
doc_utils/unused_attributes.py,sha256=2UmqdXd5ogaPtj9_teApM0IlkdCmzBZNRh7XXrVYJOk,9032
|
|
40
44
|
doc_utils/unused_images.py,sha256=hL8Qrik9QCkVh54eBLuNczRS9tMnsqIEfavNamM1UeQ,5664
|
|
41
45
|
doc_utils/validate_links.py,sha256=iBGXnwdeLlgIT3fo3v01ApT5k0X2FtctsvkrE6E3VMk,19610
|
|
42
|
-
doc_utils/version.py,sha256=
|
|
46
|
+
doc_utils/version.py,sha256=9kAm8RDU9obPgPJ99XR9NylVS9V7v-_6YogFp0RiSNs,203
|
|
43
47
|
doc_utils/version_check.py,sha256=-31Y6AN0KGi_CUCAVOOhf6bPO3r7SQIXPxxeffLAF0w,7535
|
|
44
48
|
doc_utils/warnings_report.py,sha256=20yfwqBjOprfFhQwCujbcsvjJCbHHhmH84uAujm-y-o,8877
|
|
45
|
-
rolfedh_doc_utils-0.1.
|
|
46
|
-
rolfedh_doc_utils-0.1.
|
|
47
|
-
rolfedh_doc_utils-0.1.
|
|
48
|
-
rolfedh_doc_utils-0.1.
|
|
49
|
-
rolfedh_doc_utils-0.1.
|
|
50
|
-
rolfedh_doc_utils-0.1.
|
|
49
|
+
rolfedh_doc_utils-0.1.42.dist-info/licenses/LICENSE,sha256=vLxtwMVOJA_hEy8b77niTkdmQI9kNJskXHq0dBS36e0,1075
|
|
50
|
+
rolfedh_doc_utils-0.1.42.dist-info/METADATA,sha256=qptswU3wJNmGolK1JTaJUVrvue2NOv3pShN2qN0cOoM,8654
|
|
51
|
+
rolfedh_doc_utils-0.1.42.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
52
|
+
rolfedh_doc_utils-0.1.42.dist-info/entry_points.txt,sha256=3CiBc2vWwHezC6aBBBz9oY0Z6J8ADrFz1B3hh71Lg0o,1141
|
|
53
|
+
rolfedh_doc_utils-0.1.42.dist-info/top_level.txt,sha256=iaqMjXiZZTvQ0wUEL3tHOf48pB-VJw7lw0_20FJ8y64,512
|
|
54
|
+
rolfedh_doc_utils-0.1.42.dist-info/RECORD,,
|
|
@@ -7,6 +7,7 @@ check-source-directives = check_source_directives:main
|
|
|
7
7
|
convert-callouts-interactive = convert_callouts_interactive:main
|
|
8
8
|
convert-callouts-to-deflist = convert_callouts_to_deflist:main
|
|
9
9
|
convert-freemarker-to-asciidoc = convert_freemarker_to_asciidoc:main
|
|
10
|
+
convert-id-attributes-to-ids = convert_id_attributes_to_ids:main
|
|
10
11
|
convert-tables-to-deflists = convert_tables_to_deflists:main
|
|
11
12
|
doc-utils = doc_utils_cli:main
|
|
12
13
|
extract-link-attributes = extract_link_attributes:main
|
|
@@ -14,6 +15,8 @@ find-duplicate-content = find_duplicate_content:main
|
|
|
14
15
|
find-duplicate-includes = find_duplicate_includes:main
|
|
15
16
|
find-unused-attributes = find_unused_attributes:main
|
|
16
17
|
format-asciidoc-spacing = format_asciidoc_spacing:main
|
|
18
|
+
insert-abstract-role = insert_abstract_role:main
|
|
19
|
+
insert-procedure-title = insert_procedure_title:main
|
|
17
20
|
inventory-conditionals = inventory_conditionals:main
|
|
18
21
|
replace-link-attributes = replace_link_attributes:main
|
|
19
22
|
validate-links = validate_links:main
|
|
@@ -7,6 +7,7 @@ check_source_directives
|
|
|
7
7
|
convert_callouts_interactive
|
|
8
8
|
convert_callouts_to_deflist
|
|
9
9
|
convert_freemarker_to_asciidoc
|
|
10
|
+
convert_id_attributes_to_ids
|
|
10
11
|
convert_tables_to_deflists
|
|
11
12
|
doc_utils
|
|
12
13
|
doc_utils_cli
|
|
@@ -15,6 +16,8 @@ find_duplicate_content
|
|
|
15
16
|
find_duplicate_includes
|
|
16
17
|
find_unused_attributes
|
|
17
18
|
format_asciidoc_spacing
|
|
19
|
+
insert_abstract_role
|
|
20
|
+
insert_procedure_title
|
|
18
21
|
inventory_conditionals
|
|
19
22
|
replace_link_attributes
|
|
20
23
|
validate_links
|
|
File without changes
|