rolfedh-doc-utils 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doc_utils/format_asciidoc_spacing.py +222 -0
- doc_utils/replace_link_attributes.py +168 -0
- format_asciidoc_spacing.py +42 -232
- replace_link_attributes.py +186 -0
- {rolfedh_doc_utils-0.1.8.dist-info → rolfedh_doc_utils-0.1.9.dist-info}/METADATA +2 -1
- {rolfedh_doc_utils-0.1.8.dist-info → rolfedh_doc_utils-0.1.9.dist-info}/RECORD +10 -7
- {rolfedh_doc_utils-0.1.8.dist-info → rolfedh_doc_utils-0.1.9.dist-info}/entry_points.txt +1 -0
- {rolfedh_doc_utils-0.1.8.dist-info → rolfedh_doc_utils-0.1.9.dist-info}/top_level.txt +1 -0
- {rolfedh_doc_utils-0.1.8.dist-info → rolfedh_doc_utils-0.1.9.dist-info}/WHEEL +0 -0
- {rolfedh_doc_utils-0.1.8.dist-info → rolfedh_doc_utils-0.1.9.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Format AsciiDoc spacing - ensures blank lines after headings and around include directives.
|
|
3
|
+
|
|
4
|
+
Core logic for formatting AsciiDoc files with proper spacing.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import re
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import List, Tuple
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def process_file(file_path: Path, dry_run: bool = False, verbose: bool = False) -> Tuple[bool, List[str]]:
|
|
13
|
+
"""
|
|
14
|
+
Process a single AsciiDoc file to fix spacing issues.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
file_path: Path to the file to process
|
|
18
|
+
dry_run: If True, show what would be changed without modifying
|
|
19
|
+
verbose: If True, show detailed output
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
Tuple of (changes_made, messages) where messages is a list of verbose output
|
|
23
|
+
"""
|
|
24
|
+
messages = []
|
|
25
|
+
|
|
26
|
+
if verbose:
|
|
27
|
+
messages.append(f"Processing: {file_path}")
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
31
|
+
lines = f.readlines()
|
|
32
|
+
except (IOError, UnicodeDecodeError) as e:
|
|
33
|
+
raise IOError(f"Error reading {file_path}: {e}")
|
|
34
|
+
|
|
35
|
+
# Remove trailing newlines from lines for processing
|
|
36
|
+
lines = [line.rstrip('\n\r') for line in lines]
|
|
37
|
+
|
|
38
|
+
new_lines = []
|
|
39
|
+
changes_made = False
|
|
40
|
+
in_block = False # Track if we're inside a block (admonition, listing, etc.)
|
|
41
|
+
in_conditional = False # Track if we're inside a conditional block
|
|
42
|
+
|
|
43
|
+
for i, current_line in enumerate(lines):
|
|
44
|
+
prev_line = lines[i-1] if i > 0 else ""
|
|
45
|
+
next_line = lines[i+1] if i + 1 < len(lines) else ""
|
|
46
|
+
|
|
47
|
+
# Check for conditional start (ifdef:: or ifndef::)
|
|
48
|
+
if re.match(r'^(ifdef::|ifndef::)', current_line):
|
|
49
|
+
in_conditional = True
|
|
50
|
+
# Add blank line before conditional if needed
|
|
51
|
+
if (prev_line and
|
|
52
|
+
not re.match(r'^\s*$', prev_line) and
|
|
53
|
+
not re.match(r'^(ifdef::|ifndef::|endif::)', prev_line)):
|
|
54
|
+
new_lines.append("")
|
|
55
|
+
changes_made = True
|
|
56
|
+
if verbose:
|
|
57
|
+
messages.append(" Added blank line before conditional block")
|
|
58
|
+
new_lines.append(current_line)
|
|
59
|
+
|
|
60
|
+
# Check for conditional end (endif::)
|
|
61
|
+
elif re.match(r'^endif::', current_line):
|
|
62
|
+
new_lines.append(current_line)
|
|
63
|
+
in_conditional = False
|
|
64
|
+
# Add blank line after conditional if needed
|
|
65
|
+
if (next_line and
|
|
66
|
+
not re.match(r'^\s*$', next_line) and
|
|
67
|
+
not re.match(r'^(ifdef::|ifndef::|endif::)', next_line)):
|
|
68
|
+
new_lines.append("")
|
|
69
|
+
changes_made = True
|
|
70
|
+
if verbose:
|
|
71
|
+
messages.append(" Added blank line after conditional block")
|
|
72
|
+
|
|
73
|
+
# Check for block delimiters (====, ----, ...., ____)
|
|
74
|
+
# These are used for admonitions, listing blocks, literal blocks, etc.
|
|
75
|
+
elif re.match(r'^(====+|----+|\.\.\.\.+|____+)$', current_line):
|
|
76
|
+
in_block = not in_block # Toggle block state
|
|
77
|
+
new_lines.append(current_line)
|
|
78
|
+
# Check if current line is a heading (but not if we're in a block)
|
|
79
|
+
elif not in_block and re.match(r'^=+\s+', current_line):
|
|
80
|
+
new_lines.append(current_line)
|
|
81
|
+
|
|
82
|
+
# Check if next line is not empty and not another heading
|
|
83
|
+
if (next_line and
|
|
84
|
+
not re.match(r'^=+\s+', next_line) and
|
|
85
|
+
not re.match(r'^\s*$', next_line)):
|
|
86
|
+
new_lines.append("")
|
|
87
|
+
changes_made = True
|
|
88
|
+
if verbose:
|
|
89
|
+
truncated = current_line[:50] + "..." if len(current_line) > 50 else current_line
|
|
90
|
+
messages.append(f" Added blank line after heading: {truncated}")
|
|
91
|
+
|
|
92
|
+
# Check if current line is a comment (AsciiDoc comments start with //)
|
|
93
|
+
elif re.match(r'^//', current_line):
|
|
94
|
+
# Skip special handling if we're inside a conditional block
|
|
95
|
+
if in_conditional:
|
|
96
|
+
new_lines.append(current_line)
|
|
97
|
+
else:
|
|
98
|
+
# Check if next line is an include directive
|
|
99
|
+
if next_line and re.match(r'^include::', next_line):
|
|
100
|
+
# This comment belongs to the include, add blank line before comment if needed
|
|
101
|
+
if (prev_line and
|
|
102
|
+
not re.match(r'^\s*$', prev_line) and
|
|
103
|
+
not re.match(r'^//', prev_line) and
|
|
104
|
+
not re.match(r'^:', prev_line)): # Don't add if previous is attribute
|
|
105
|
+
new_lines.append("")
|
|
106
|
+
changes_made = True
|
|
107
|
+
if verbose:
|
|
108
|
+
messages.append(" Added blank line before comment above include")
|
|
109
|
+
new_lines.append(current_line)
|
|
110
|
+
|
|
111
|
+
# Check if current line is an attribute (starts with :)
|
|
112
|
+
elif re.match(r'^:', current_line):
|
|
113
|
+
# Skip special handling if we're inside a conditional block
|
|
114
|
+
if in_conditional:
|
|
115
|
+
new_lines.append(current_line)
|
|
116
|
+
else:
|
|
117
|
+
# Check if next line is an include directive
|
|
118
|
+
if next_line and re.match(r'^include::', next_line):
|
|
119
|
+
# This attribute belongs to the include, add blank line before attribute if needed
|
|
120
|
+
if (prev_line and
|
|
121
|
+
not re.match(r'^\s*$', prev_line) and
|
|
122
|
+
not re.match(r'^//', prev_line) and
|
|
123
|
+
not re.match(r'^:', prev_line)): # Don't add if previous is comment or attribute
|
|
124
|
+
new_lines.append("")
|
|
125
|
+
changes_made = True
|
|
126
|
+
if verbose:
|
|
127
|
+
messages.append(" Added blank line before attribute above include")
|
|
128
|
+
new_lines.append(current_line)
|
|
129
|
+
|
|
130
|
+
# Check if current line is an include directive
|
|
131
|
+
elif re.match(r'^include::', current_line):
|
|
132
|
+
# Skip special handling if we're inside a conditional block
|
|
133
|
+
if in_conditional:
|
|
134
|
+
new_lines.append(current_line)
|
|
135
|
+
else:
|
|
136
|
+
# Check if this is an attribute include (contains "attribute" in the path)
|
|
137
|
+
is_attribute_include = 'attribute' in current_line.lower()
|
|
138
|
+
|
|
139
|
+
# Check if this appears near the top of the file (within first 10 lines after H1)
|
|
140
|
+
# Find the H1 heading position
|
|
141
|
+
h1_position = -1
|
|
142
|
+
for j in range(min(i, 10)): # Look back up to 10 lines or to current position
|
|
143
|
+
if re.match(r'^=\s+', lines[j]): # H1 heading starts with single =
|
|
144
|
+
h1_position = j
|
|
145
|
+
break
|
|
146
|
+
|
|
147
|
+
# If this is an attribute include near the H1 heading, don't add surrounding blank lines
|
|
148
|
+
is_near_h1 = h1_position >= 0 and (i - h1_position) <= 2
|
|
149
|
+
|
|
150
|
+
# Check if previous line is a comment or attribute (which belongs to this include)
|
|
151
|
+
has_comment_above = prev_line and re.match(r'^//', prev_line)
|
|
152
|
+
has_attribute_above = prev_line and re.match(r'^:', prev_line)
|
|
153
|
+
|
|
154
|
+
# If it's an attribute include near H1, only the heading's blank line is needed
|
|
155
|
+
if not (is_attribute_include and is_near_h1):
|
|
156
|
+
# Don't add blank line if there's a comment or attribute above (it was handled by the comment/attribute logic)
|
|
157
|
+
if not has_comment_above and not has_attribute_above:
|
|
158
|
+
# Add blank line before include if previous line is not empty and not an include
|
|
159
|
+
if (prev_line and
|
|
160
|
+
not re.match(r'^\s*$', prev_line) and
|
|
161
|
+
not re.match(r'^include::', prev_line)):
|
|
162
|
+
new_lines.append("")
|
|
163
|
+
changes_made = True
|
|
164
|
+
if verbose:
|
|
165
|
+
truncated = current_line[:50] + "..." if len(current_line) > 50 else current_line
|
|
166
|
+
messages.append(f" Added blank line before include: {truncated}")
|
|
167
|
+
|
|
168
|
+
new_lines.append(current_line)
|
|
169
|
+
|
|
170
|
+
# If it's an attribute include near H1, don't add blank line after
|
|
171
|
+
if not (is_attribute_include and is_near_h1):
|
|
172
|
+
# Add blank line after include if next line exists and is not empty and not an include
|
|
173
|
+
if (next_line and
|
|
174
|
+
not re.match(r'^\s*$', next_line) and
|
|
175
|
+
not re.match(r'^include::', next_line)):
|
|
176
|
+
new_lines.append("")
|
|
177
|
+
changes_made = True
|
|
178
|
+
if verbose:
|
|
179
|
+
truncated = current_line[:50] + "..." if len(current_line) > 50 else current_line
|
|
180
|
+
messages.append(f" Added blank line after include: {truncated}")
|
|
181
|
+
|
|
182
|
+
else:
|
|
183
|
+
new_lines.append(current_line)
|
|
184
|
+
|
|
185
|
+
# Apply changes if any were made
|
|
186
|
+
if changes_made:
|
|
187
|
+
# Clean up any consecutive blank lines we may have added
|
|
188
|
+
cleaned_lines = []
|
|
189
|
+
for i, line in enumerate(new_lines):
|
|
190
|
+
# Check if this is a blank line we're about to add
|
|
191
|
+
if line == "":
|
|
192
|
+
# Check if the previous line is also a blank line
|
|
193
|
+
if i > 0 and cleaned_lines and cleaned_lines[-1] == "":
|
|
194
|
+
# Skip this blank line as we already have one
|
|
195
|
+
continue
|
|
196
|
+
cleaned_lines.append(line)
|
|
197
|
+
|
|
198
|
+
if not dry_run:
|
|
199
|
+
try:
|
|
200
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
201
|
+
for line in cleaned_lines:
|
|
202
|
+
f.write(line + '\n')
|
|
203
|
+
except IOError as e:
|
|
204
|
+
raise IOError(f"Error writing {file_path}: {e}")
|
|
205
|
+
else:
|
|
206
|
+
if verbose:
|
|
207
|
+
messages.append(" No changes needed")
|
|
208
|
+
|
|
209
|
+
return changes_made, messages
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def find_adoc_files(path: Path) -> List[Path]:
|
|
213
|
+
"""Find all .adoc files in the given path"""
|
|
214
|
+
adoc_files = []
|
|
215
|
+
|
|
216
|
+
if path.is_file():
|
|
217
|
+
if path.suffix == '.adoc':
|
|
218
|
+
adoc_files.append(path)
|
|
219
|
+
elif path.is_dir():
|
|
220
|
+
adoc_files = list(path.rglob('*.adoc'))
|
|
221
|
+
|
|
222
|
+
return adoc_files
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Replace AsciiDoc attributes within link URLs with their actual values.
|
|
3
|
+
|
|
4
|
+
This module finds and replaces attribute references (like {attribute-name}) that appear
|
|
5
|
+
in the URL portion of AsciiDoc link macros (link: and xref:) with their resolved values
|
|
6
|
+
from attributes.adoc. Link text is preserved unchanged.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import re
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Dict, List, Tuple, Optional
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def find_attributes_files(root_dir: Path) -> List[Path]:
|
|
15
|
+
"""Find all attributes.adoc files in the repository."""
|
|
16
|
+
attributes_files = []
|
|
17
|
+
|
|
18
|
+
for path in root_dir.rglob('**/attributes.adoc'):
|
|
19
|
+
# Skip hidden directories and common build directories
|
|
20
|
+
parts = path.parts
|
|
21
|
+
if any(part.startswith('.') or part in ['target', 'build', 'node_modules'] for part in parts):
|
|
22
|
+
continue
|
|
23
|
+
attributes_files.append(path)
|
|
24
|
+
|
|
25
|
+
return attributes_files
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def load_attributes(attributes_file: Path) -> Dict[str, str]:
|
|
29
|
+
"""Load attribute definitions from an attributes.adoc file."""
|
|
30
|
+
attributes = {}
|
|
31
|
+
|
|
32
|
+
with open(attributes_file, 'r', encoding='utf-8') as f:
|
|
33
|
+
for line in f:
|
|
34
|
+
# Match attribute definitions
|
|
35
|
+
# Format: :attribute-name: value
|
|
36
|
+
match = re.match(r'^:([a-zA-Z0-9_-]+):\s*(.*)$', line)
|
|
37
|
+
if match:
|
|
38
|
+
attr_name = match.group(1)
|
|
39
|
+
attr_value = match.group(2).strip()
|
|
40
|
+
attributes[attr_name] = attr_value
|
|
41
|
+
|
|
42
|
+
return attributes
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def resolve_nested_attributes(attributes: Dict[str, str], max_iterations: int = 10) -> Dict[str, str]:
|
|
46
|
+
"""Resolve nested attribute references within attribute values."""
|
|
47
|
+
for _ in range(max_iterations):
|
|
48
|
+
changes_made = False
|
|
49
|
+
|
|
50
|
+
for attr_name, attr_value in attributes.items():
|
|
51
|
+
# Find all attribute references in the value
|
|
52
|
+
refs = re.findall(r'\{([a-zA-Z0-9_-]+)\}', attr_value)
|
|
53
|
+
|
|
54
|
+
for ref in refs:
|
|
55
|
+
if ref in attributes:
|
|
56
|
+
new_value = attr_value.replace(f'{{{ref}}}', attributes[ref])
|
|
57
|
+
if new_value != attr_value:
|
|
58
|
+
attributes[attr_name] = new_value
|
|
59
|
+
changes_made = True
|
|
60
|
+
attr_value = new_value
|
|
61
|
+
|
|
62
|
+
if not changes_made:
|
|
63
|
+
break
|
|
64
|
+
|
|
65
|
+
return attributes
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def replace_link_attributes_in_file(file_path: Path, attributes: Dict[str, str], dry_run: bool = False) -> int:
|
|
69
|
+
"""Replace attribute references within link macros in a single file."""
|
|
70
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
71
|
+
content = f.read()
|
|
72
|
+
|
|
73
|
+
original_content = content
|
|
74
|
+
replacement_count = 0
|
|
75
|
+
|
|
76
|
+
# Find all link macros containing attributes in the URL portion only
|
|
77
|
+
# Match link: and xref: macros, capturing URL and text separately
|
|
78
|
+
link_patterns = [
|
|
79
|
+
# link:url[text] - replace only in URL portion
|
|
80
|
+
(r'link:([^[\]]*)\[([^\]]*)\]', 'link'),
|
|
81
|
+
# xref:target[text] - replace only in target portion
|
|
82
|
+
(r'xref:([^[\]]*)\[([^\]]*)\]', 'xref'),
|
|
83
|
+
# link:url[] or xref:target[] - replace in URL/target portion
|
|
84
|
+
(r'(link|xref):([^[\]]*)\[\]', 'empty_text')
|
|
85
|
+
]
|
|
86
|
+
|
|
87
|
+
for pattern, link_type in link_patterns:
|
|
88
|
+
matches = list(re.finditer(pattern, content))
|
|
89
|
+
|
|
90
|
+
# Process matches in reverse order to maintain string positions
|
|
91
|
+
for match in reversed(matches):
|
|
92
|
+
if link_type == 'empty_text':
|
|
93
|
+
# For links with empty text []
|
|
94
|
+
macro_type = match.group(1) # 'link' or 'xref'
|
|
95
|
+
url_part = match.group(2)
|
|
96
|
+
text_part = ''
|
|
97
|
+
|
|
98
|
+
# Check if URL contains attributes
|
|
99
|
+
if re.search(r'\{[a-zA-Z0-9_-]+\}', url_part):
|
|
100
|
+
modified_url = url_part
|
|
101
|
+
|
|
102
|
+
# Replace attributes only in URL
|
|
103
|
+
attr_matches = re.findall(r'\{([a-zA-Z0-9_-]+)\}', url_part)
|
|
104
|
+
for attr_name in attr_matches:
|
|
105
|
+
if attr_name in attributes:
|
|
106
|
+
attr_pattern = re.escape(f'{{{attr_name}}}')
|
|
107
|
+
modified_url = re.sub(attr_pattern, attributes[attr_name], modified_url)
|
|
108
|
+
replacement_count += 1
|
|
109
|
+
|
|
110
|
+
if modified_url != url_part:
|
|
111
|
+
# Reconstruct the link with modified URL
|
|
112
|
+
modified = f'{macro_type}:{modified_url}[]'
|
|
113
|
+
start = match.start()
|
|
114
|
+
end = match.end()
|
|
115
|
+
content = content[:start] + modified + content[end:]
|
|
116
|
+
else:
|
|
117
|
+
# For links with text
|
|
118
|
+
url_part = match.group(1)
|
|
119
|
+
text_part = match.group(2)
|
|
120
|
+
|
|
121
|
+
# Check if URL contains attributes
|
|
122
|
+
if re.search(r'\{[a-zA-Z0-9_-]+\}', url_part):
|
|
123
|
+
modified_url = url_part
|
|
124
|
+
|
|
125
|
+
# Replace attributes only in URL
|
|
126
|
+
attr_matches = re.findall(r'\{([a-zA-Z0-9_-]+)\}', url_part)
|
|
127
|
+
for attr_name in attr_matches:
|
|
128
|
+
if attr_name in attributes:
|
|
129
|
+
attr_pattern = re.escape(f'{{{attr_name}}}')
|
|
130
|
+
modified_url = re.sub(attr_pattern, attributes[attr_name], modified_url)
|
|
131
|
+
replacement_count += 1
|
|
132
|
+
|
|
133
|
+
if modified_url != url_part:
|
|
134
|
+
# Reconstruct the link with modified URL but original text
|
|
135
|
+
if link_type == 'link':
|
|
136
|
+
modified = f'link:{modified_url}[{text_part}]'
|
|
137
|
+
else: # xref
|
|
138
|
+
modified = f'xref:{modified_url}[{text_part}]'
|
|
139
|
+
|
|
140
|
+
start = match.start()
|
|
141
|
+
end = match.end()
|
|
142
|
+
content = content[:start] + modified + content[end:]
|
|
143
|
+
|
|
144
|
+
# Write changes if not in dry-run mode
|
|
145
|
+
if content != original_content:
|
|
146
|
+
if not dry_run:
|
|
147
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
148
|
+
f.write(content)
|
|
149
|
+
|
|
150
|
+
return replacement_count
|
|
151
|
+
|
|
152
|
+
return 0
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def find_adoc_files(root_dir: Path, exclude_dirs: Optional[set] = None) -> List[Path]:
|
|
156
|
+
"""Find all *.adoc files in the repository."""
|
|
157
|
+
if exclude_dirs is None:
|
|
158
|
+
exclude_dirs = {'.git', 'target', 'build', 'node_modules'}
|
|
159
|
+
|
|
160
|
+
adoc_files = []
|
|
161
|
+
|
|
162
|
+
for path in root_dir.rglob('*.adoc'):
|
|
163
|
+
# Check if any part of the path is in exclude_dirs
|
|
164
|
+
parts = set(path.parts)
|
|
165
|
+
if not parts.intersection(exclude_dirs):
|
|
166
|
+
adoc_files.append(path)
|
|
167
|
+
|
|
168
|
+
return adoc_files
|
format_asciidoc_spacing.py
CHANGED
|
@@ -1,13 +1,15 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
format-asciidoc-spacing - Format AsciiDoc spacing.
|
|
2
4
|
|
|
3
|
-
|
|
5
|
+
Ensures blank lines after headings and around include directives.
|
|
6
|
+
"""
|
|
4
7
|
|
|
5
8
|
import argparse
|
|
6
|
-
import os
|
|
7
|
-
import re
|
|
8
9
|
import sys
|
|
9
10
|
from pathlib import Path
|
|
10
|
-
|
|
11
|
+
|
|
12
|
+
from doc_utils.format_asciidoc_spacing import process_file, find_adoc_files
|
|
11
13
|
|
|
12
14
|
|
|
13
15
|
# Colors for output
|
|
@@ -23,224 +25,6 @@ def print_colored(message: str, color: str = Colors.NC) -> None:
|
|
|
23
25
|
print(f"{color}{message}{Colors.NC}")
|
|
24
26
|
|
|
25
27
|
|
|
26
|
-
def process_file(file_path: Path, dry_run: bool = False, verbose: bool = False) -> bool:
|
|
27
|
-
"""
|
|
28
|
-
Process a single AsciiDoc file to fix spacing issues.
|
|
29
|
-
|
|
30
|
-
Args:
|
|
31
|
-
file_path: Path to the file to process
|
|
32
|
-
dry_run: If True, show what would be changed without modifying
|
|
33
|
-
verbose: If True, show detailed output
|
|
34
|
-
|
|
35
|
-
Returns:
|
|
36
|
-
True if changes were made (or would be made in dry-run), False otherwise
|
|
37
|
-
"""
|
|
38
|
-
if verbose:
|
|
39
|
-
print(f"Processing: {file_path}")
|
|
40
|
-
|
|
41
|
-
try:
|
|
42
|
-
with open(file_path, 'r', encoding='utf-8') as f:
|
|
43
|
-
lines = f.readlines()
|
|
44
|
-
except (IOError, UnicodeDecodeError) as e:
|
|
45
|
-
print_colored(f"Error reading {file_path}: {e}", Colors.RED)
|
|
46
|
-
return False
|
|
47
|
-
|
|
48
|
-
# Remove trailing newlines from lines for processing
|
|
49
|
-
lines = [line.rstrip('\n\r') for line in lines]
|
|
50
|
-
|
|
51
|
-
new_lines = []
|
|
52
|
-
changes_made = False
|
|
53
|
-
in_block = False # Track if we're inside a block (admonition, listing, etc.)
|
|
54
|
-
in_conditional = False # Track if we're inside a conditional block
|
|
55
|
-
|
|
56
|
-
for i, current_line in enumerate(lines):
|
|
57
|
-
prev_line = lines[i-1] if i > 0 else ""
|
|
58
|
-
next_line = lines[i+1] if i + 1 < len(lines) else ""
|
|
59
|
-
|
|
60
|
-
# Check for conditional start (ifdef:: or ifndef::)
|
|
61
|
-
if re.match(r'^(ifdef::|ifndef::)', current_line):
|
|
62
|
-
in_conditional = True
|
|
63
|
-
# Add blank line before conditional if needed
|
|
64
|
-
if (prev_line and
|
|
65
|
-
not re.match(r'^\s*$', prev_line) and
|
|
66
|
-
not re.match(r'^(ifdef::|ifndef::|endif::)', prev_line)):
|
|
67
|
-
new_lines.append("")
|
|
68
|
-
changes_made = True
|
|
69
|
-
if verbose:
|
|
70
|
-
print(f" Added blank line before conditional block")
|
|
71
|
-
new_lines.append(current_line)
|
|
72
|
-
|
|
73
|
-
# Check for conditional end (endif::)
|
|
74
|
-
elif re.match(r'^endif::', current_line):
|
|
75
|
-
new_lines.append(current_line)
|
|
76
|
-
in_conditional = False
|
|
77
|
-
# Add blank line after conditional if needed
|
|
78
|
-
if (next_line and
|
|
79
|
-
not re.match(r'^\s*$', next_line) and
|
|
80
|
-
not re.match(r'^(ifdef::|ifndef::|endif::)', next_line)):
|
|
81
|
-
new_lines.append("")
|
|
82
|
-
changes_made = True
|
|
83
|
-
if verbose:
|
|
84
|
-
print(f" Added blank line after conditional block")
|
|
85
|
-
|
|
86
|
-
# Check for block delimiters (====, ----, ...., ____)
|
|
87
|
-
# These are used for admonitions, listing blocks, literal blocks, etc.
|
|
88
|
-
elif re.match(r'^(====+|----+|\.\.\.\.+|____+)$', current_line):
|
|
89
|
-
in_block = not in_block # Toggle block state
|
|
90
|
-
new_lines.append(current_line)
|
|
91
|
-
# Check if current line is a heading (but not if we're in a block)
|
|
92
|
-
elif not in_block and re.match(r'^=+\s+', current_line):
|
|
93
|
-
new_lines.append(current_line)
|
|
94
|
-
|
|
95
|
-
# Check if next line is not empty and not another heading
|
|
96
|
-
if (next_line and
|
|
97
|
-
not re.match(r'^=+\s+', next_line) and
|
|
98
|
-
not re.match(r'^\s*$', next_line)):
|
|
99
|
-
new_lines.append("")
|
|
100
|
-
changes_made = True
|
|
101
|
-
if verbose:
|
|
102
|
-
truncated = current_line[:50] + "..." if len(current_line) > 50 else current_line
|
|
103
|
-
print(f" Added blank line after heading: {truncated}")
|
|
104
|
-
|
|
105
|
-
# Check if current line is a comment (AsciiDoc comments start with //)
|
|
106
|
-
elif re.match(r'^//', current_line):
|
|
107
|
-
# Skip special handling if we're inside a conditional block
|
|
108
|
-
if in_conditional:
|
|
109
|
-
new_lines.append(current_line)
|
|
110
|
-
else:
|
|
111
|
-
# Check if next line is an include directive
|
|
112
|
-
if next_line and re.match(r'^include::', next_line):
|
|
113
|
-
# This comment belongs to the include, add blank line before comment if needed
|
|
114
|
-
if (prev_line and
|
|
115
|
-
not re.match(r'^\s*$', prev_line) and
|
|
116
|
-
not re.match(r'^//', prev_line) and
|
|
117
|
-
not re.match(r'^:', prev_line)): # Don't add if previous is attribute
|
|
118
|
-
new_lines.append("")
|
|
119
|
-
changes_made = True
|
|
120
|
-
if verbose:
|
|
121
|
-
print(f" Added blank line before comment above include")
|
|
122
|
-
new_lines.append(current_line)
|
|
123
|
-
|
|
124
|
-
# Check if current line is an attribute (starts with :)
|
|
125
|
-
elif re.match(r'^:', current_line):
|
|
126
|
-
# Skip special handling if we're inside a conditional block
|
|
127
|
-
if in_conditional:
|
|
128
|
-
new_lines.append(current_line)
|
|
129
|
-
else:
|
|
130
|
-
# Check if next line is an include directive
|
|
131
|
-
if next_line and re.match(r'^include::', next_line):
|
|
132
|
-
# This attribute belongs to the include, add blank line before attribute if needed
|
|
133
|
-
if (prev_line and
|
|
134
|
-
not re.match(r'^\s*$', prev_line) and
|
|
135
|
-
not re.match(r'^//', prev_line) and
|
|
136
|
-
not re.match(r'^:', prev_line)): # Don't add if previous is comment or attribute
|
|
137
|
-
new_lines.append("")
|
|
138
|
-
changes_made = True
|
|
139
|
-
if verbose:
|
|
140
|
-
print(f" Added blank line before attribute above include")
|
|
141
|
-
new_lines.append(current_line)
|
|
142
|
-
|
|
143
|
-
# Check if current line is an include directive
|
|
144
|
-
elif re.match(r'^include::', current_line):
|
|
145
|
-
# Skip special handling if we're inside a conditional block
|
|
146
|
-
if in_conditional:
|
|
147
|
-
new_lines.append(current_line)
|
|
148
|
-
else:
|
|
149
|
-
# Check if this is an attribute include (contains "attribute" in the path)
|
|
150
|
-
is_attribute_include = 'attribute' in current_line.lower()
|
|
151
|
-
|
|
152
|
-
# Check if this appears near the top of the file (within first 10 lines after H1)
|
|
153
|
-
# Find the H1 heading position
|
|
154
|
-
h1_position = -1
|
|
155
|
-
for j in range(min(i, 10)): # Look back up to 10 lines or to current position
|
|
156
|
-
if re.match(r'^=\s+', lines[j]): # H1 heading starts with single =
|
|
157
|
-
h1_position = j
|
|
158
|
-
break
|
|
159
|
-
|
|
160
|
-
# If this is an attribute include near the H1 heading, don't add surrounding blank lines
|
|
161
|
-
is_near_h1 = h1_position >= 0 and (i - h1_position) <= 2
|
|
162
|
-
|
|
163
|
-
# Check if previous line is a comment or attribute (which belongs to this include)
|
|
164
|
-
has_comment_above = prev_line and re.match(r'^//', prev_line)
|
|
165
|
-
has_attribute_above = prev_line and re.match(r'^:', prev_line)
|
|
166
|
-
|
|
167
|
-
# If it's an attribute include near H1, only the heading's blank line is needed
|
|
168
|
-
if not (is_attribute_include and is_near_h1):
|
|
169
|
-
# Don't add blank line if there's a comment or attribute above (it was handled by the comment/attribute logic)
|
|
170
|
-
if not has_comment_above and not has_attribute_above:
|
|
171
|
-
# Add blank line before include if previous line is not empty and not an include
|
|
172
|
-
if (prev_line and
|
|
173
|
-
not re.match(r'^\s*$', prev_line) and
|
|
174
|
-
not re.match(r'^include::', prev_line)):
|
|
175
|
-
new_lines.append("")
|
|
176
|
-
changes_made = True
|
|
177
|
-
if verbose:
|
|
178
|
-
truncated = current_line[:50] + "..." if len(current_line) > 50 else current_line
|
|
179
|
-
print(f" Added blank line before include: {truncated}")
|
|
180
|
-
|
|
181
|
-
new_lines.append(current_line)
|
|
182
|
-
|
|
183
|
-
# If it's an attribute include near H1, don't add blank line after
|
|
184
|
-
if not (is_attribute_include and is_near_h1):
|
|
185
|
-
# Add blank line after include if next line exists and is not empty and not an include
|
|
186
|
-
if (next_line and
|
|
187
|
-
not re.match(r'^\s*$', next_line) and
|
|
188
|
-
not re.match(r'^include::', next_line)):
|
|
189
|
-
new_lines.append("")
|
|
190
|
-
changes_made = True
|
|
191
|
-
if verbose:
|
|
192
|
-
truncated = current_line[:50] + "..." if len(current_line) > 50 else current_line
|
|
193
|
-
print(f" Added blank line after include: {truncated}")
|
|
194
|
-
|
|
195
|
-
else:
|
|
196
|
-
new_lines.append(current_line)
|
|
197
|
-
|
|
198
|
-
# Apply changes if any were made
|
|
199
|
-
if changes_made:
|
|
200
|
-
# Clean up any consecutive blank lines we may have added
|
|
201
|
-
cleaned_lines = []
|
|
202
|
-
for i, line in enumerate(new_lines):
|
|
203
|
-
# Check if this is a blank line we're about to add
|
|
204
|
-
if line == "":
|
|
205
|
-
# Check if the previous line is also a blank line
|
|
206
|
-
if i > 0 and cleaned_lines and cleaned_lines[-1] == "":
|
|
207
|
-
# Skip this blank line as we already have one
|
|
208
|
-
continue
|
|
209
|
-
cleaned_lines.append(line)
|
|
210
|
-
|
|
211
|
-
if dry_run:
|
|
212
|
-
print_colored(f"Would modify: {file_path}", Colors.YELLOW)
|
|
213
|
-
else:
|
|
214
|
-
try:
|
|
215
|
-
with open(file_path, 'w', encoding='utf-8') as f:
|
|
216
|
-
for line in cleaned_lines:
|
|
217
|
-
f.write(line + '\n')
|
|
218
|
-
print_colored(f"Modified: {file_path}", Colors.GREEN)
|
|
219
|
-
except IOError as e:
|
|
220
|
-
print_colored(f"Error writing {file_path}: {e}", Colors.RED)
|
|
221
|
-
return False
|
|
222
|
-
else:
|
|
223
|
-
if verbose:
|
|
224
|
-
print(" No changes needed")
|
|
225
|
-
|
|
226
|
-
return changes_made
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
def find_adoc_files(path: Path) -> List[Path]:
|
|
230
|
-
"""Find all .adoc files in the given path"""
|
|
231
|
-
adoc_files = []
|
|
232
|
-
|
|
233
|
-
if path.is_file():
|
|
234
|
-
if path.suffix == '.adoc':
|
|
235
|
-
adoc_files.append(path)
|
|
236
|
-
else:
|
|
237
|
-
print_colored(f"Warning: {path} is not an AsciiDoc file (.adoc)", Colors.YELLOW)
|
|
238
|
-
elif path.is_dir():
|
|
239
|
-
adoc_files = list(path.rglob('*.adoc'))
|
|
240
|
-
|
|
241
|
-
return adoc_files
|
|
242
|
-
|
|
243
|
-
|
|
244
28
|
def main():
|
|
245
29
|
"""Main entry point"""
|
|
246
30
|
parser = argparse.ArgumentParser(
|
|
@@ -258,7 +42,7 @@ Examples:
|
|
|
258
42
|
%(prog)s --dry-run modules/ # Preview changes without modifying
|
|
259
43
|
"""
|
|
260
44
|
)
|
|
261
|
-
|
|
45
|
+
|
|
262
46
|
parser.add_argument(
|
|
263
47
|
'path',
|
|
264
48
|
nargs='?',
|
|
@@ -275,42 +59,68 @@ Examples:
|
|
|
275
59
|
action='store_true',
|
|
276
60
|
help='Show detailed output'
|
|
277
61
|
)
|
|
278
|
-
|
|
62
|
+
|
|
279
63
|
args = parser.parse_args()
|
|
280
|
-
|
|
64
|
+
|
|
281
65
|
# Convert path to Path object
|
|
282
66
|
target_path = Path(args.path)
|
|
283
|
-
|
|
67
|
+
|
|
284
68
|
# Check if path exists
|
|
285
69
|
if not target_path.exists():
|
|
286
70
|
print_colored(f"Error: Path does not exist: {target_path}", Colors.RED)
|
|
287
71
|
sys.exit(1)
|
|
288
|
-
|
|
72
|
+
|
|
289
73
|
# Display dry-run mode message
|
|
290
74
|
if args.dry_run:
|
|
291
75
|
print_colored("DRY RUN MODE - No files will be modified", Colors.YELLOW)
|
|
292
|
-
|
|
76
|
+
|
|
293
77
|
# Find all AsciiDoc files
|
|
294
78
|
adoc_files = find_adoc_files(target_path)
|
|
295
|
-
|
|
79
|
+
|
|
296
80
|
if not adoc_files:
|
|
81
|
+
if target_path.is_file():
|
|
82
|
+
print_colored(f"Warning: {target_path} is not an AsciiDoc file (.adoc)", Colors.YELLOW)
|
|
297
83
|
print(f"Processed 0 AsciiDoc file(s)")
|
|
298
84
|
print("AsciiDoc spacing formatting complete!")
|
|
299
85
|
return
|
|
300
|
-
|
|
86
|
+
|
|
301
87
|
# Process each file
|
|
302
88
|
files_processed = 0
|
|
89
|
+
files_modified = 0
|
|
90
|
+
|
|
303
91
|
for file_path in adoc_files:
|
|
304
92
|
try:
|
|
305
|
-
process_file(file_path, args.dry_run, args.verbose)
|
|
93
|
+
changes_made, messages = process_file(file_path, args.dry_run, args.verbose)
|
|
94
|
+
|
|
95
|
+
# Print verbose messages
|
|
96
|
+
if args.verbose:
|
|
97
|
+
for msg in messages:
|
|
98
|
+
print(msg)
|
|
99
|
+
|
|
100
|
+
if changes_made:
|
|
101
|
+
files_modified += 1
|
|
102
|
+
if args.dry_run:
|
|
103
|
+
print_colored(f"Would modify: {file_path}", Colors.YELLOW)
|
|
104
|
+
else:
|
|
105
|
+
print_colored(f"Modified: {file_path}", Colors.GREEN)
|
|
106
|
+
elif args.verbose:
|
|
107
|
+
print(f" No changes needed for: {file_path}")
|
|
108
|
+
|
|
306
109
|
files_processed += 1
|
|
110
|
+
|
|
307
111
|
except KeyboardInterrupt:
|
|
308
112
|
print_colored("\nOperation cancelled by user", Colors.YELLOW)
|
|
309
113
|
sys.exit(1)
|
|
114
|
+
except IOError as e:
|
|
115
|
+
print_colored(f"{e}", Colors.RED)
|
|
310
116
|
except Exception as e:
|
|
311
117
|
print_colored(f"Unexpected error processing {file_path}: {e}", Colors.RED)
|
|
312
|
-
|
|
118
|
+
|
|
313
119
|
print(f"Processed {files_processed} AsciiDoc file(s)")
|
|
120
|
+
if args.dry_run and files_modified > 0:
|
|
121
|
+
print(f"Would modify {files_modified} file(s)")
|
|
122
|
+
elif files_modified > 0:
|
|
123
|
+
print(f"Modified {files_modified} file(s)")
|
|
314
124
|
print("AsciiDoc spacing formatting complete!")
|
|
315
125
|
|
|
316
126
|
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
replace-link-attributes - Replace AsciiDoc attributes within link URLs with their actual values.
|
|
4
|
+
|
|
5
|
+
This script finds and replaces attribute references (like {attribute-name}) that appear
|
|
6
|
+
in the URL portion of AsciiDoc link macros (link: and xref:) with their resolved values
|
|
7
|
+
from attributes.adoc. Link text is preserved unchanged.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import argparse
|
|
11
|
+
import sys
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Optional
|
|
14
|
+
|
|
15
|
+
from doc_utils.replace_link_attributes import (
|
|
16
|
+
find_attributes_files,
|
|
17
|
+
load_attributes,
|
|
18
|
+
resolve_nested_attributes,
|
|
19
|
+
replace_link_attributes_in_file,
|
|
20
|
+
find_adoc_files
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def prompt_for_attributes_file(attributes_files: list[Path]) -> Optional[Path]:
|
|
25
|
+
"""Prompt user to select or specify attributes file."""
|
|
26
|
+
if not attributes_files:
|
|
27
|
+
print("No attributes.adoc files found in the repository.")
|
|
28
|
+
response = input("Enter the path to your attributes.adoc file (or 'q' to quit): ").strip()
|
|
29
|
+
if response.lower() == 'q':
|
|
30
|
+
return None
|
|
31
|
+
path = Path(response)
|
|
32
|
+
if path.exists() and path.is_file():
|
|
33
|
+
return path
|
|
34
|
+
else:
|
|
35
|
+
print(f"Error: File not found: {response}")
|
|
36
|
+
return None
|
|
37
|
+
|
|
38
|
+
if len(attributes_files) == 1:
|
|
39
|
+
file_path = attributes_files[0]
|
|
40
|
+
response = input(f"Found attributes file: {file_path}\nUse this file? (y/n/q): ").strip().lower()
|
|
41
|
+
if response == 'y':
|
|
42
|
+
return file_path
|
|
43
|
+
elif response == 'q':
|
|
44
|
+
return None
|
|
45
|
+
else:
|
|
46
|
+
response = input("Enter the path to your attributes.adoc file (or 'q' to quit): ").strip()
|
|
47
|
+
if response.lower() == 'q':
|
|
48
|
+
return None
|
|
49
|
+
path = Path(response)
|
|
50
|
+
if path.exists() and path.is_file():
|
|
51
|
+
return path
|
|
52
|
+
else:
|
|
53
|
+
print(f"Error: File not found: {response}")
|
|
54
|
+
return None
|
|
55
|
+
|
|
56
|
+
# Multiple files found
|
|
57
|
+
print("\nFound multiple attributes.adoc files:")
|
|
58
|
+
for i, file_path in enumerate(attributes_files, 1):
|
|
59
|
+
print(f" {i}. {file_path}")
|
|
60
|
+
print(f" {len(attributes_files) + 1}. Enter custom path")
|
|
61
|
+
|
|
62
|
+
while True:
|
|
63
|
+
response = input(f"\nSelect option (1-{len(attributes_files) + 1}) or 'q' to quit: ").strip()
|
|
64
|
+
if response.lower() == 'q':
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
choice = int(response)
|
|
69
|
+
if 1 <= choice <= len(attributes_files):
|
|
70
|
+
return attributes_files[choice - 1]
|
|
71
|
+
elif choice == len(attributes_files) + 1:
|
|
72
|
+
response = input("Enter the path to your attributes.adoc file: ").strip()
|
|
73
|
+
path = Path(response)
|
|
74
|
+
if path.exists() and path.is_file():
|
|
75
|
+
return path
|
|
76
|
+
else:
|
|
77
|
+
print(f"Error: File not found: {response}")
|
|
78
|
+
else:
|
|
79
|
+
print(f"Invalid choice. Please enter a number between 1 and {len(attributes_files) + 1}")
|
|
80
|
+
except ValueError:
|
|
81
|
+
print("Invalid input. Please enter a number.")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def main():
|
|
85
|
+
parser = argparse.ArgumentParser(
|
|
86
|
+
description='Replace AsciiDoc attributes within link macros with their actual values.'
|
|
87
|
+
)
|
|
88
|
+
parser.add_argument(
|
|
89
|
+
'--dry-run', '-n',
|
|
90
|
+
action='store_true',
|
|
91
|
+
help='Show what would be changed without making actual modifications'
|
|
92
|
+
)
|
|
93
|
+
parser.add_argument(
|
|
94
|
+
'--path', '-p',
|
|
95
|
+
type=str,
|
|
96
|
+
default='.',
|
|
97
|
+
help='Repository path to search (default: current directory)'
|
|
98
|
+
)
|
|
99
|
+
parser.add_argument(
|
|
100
|
+
'--attributes-file', '-a',
|
|
101
|
+
type=str,
|
|
102
|
+
help='Path to attributes.adoc file (skips interactive selection)'
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
args = parser.parse_args()
|
|
106
|
+
|
|
107
|
+
# Determine repository root
|
|
108
|
+
repo_root = Path(args.path).resolve()
|
|
109
|
+
|
|
110
|
+
if not repo_root.exists() or not repo_root.is_dir():
|
|
111
|
+
print(f"Error: Directory not found: {repo_root}")
|
|
112
|
+
sys.exit(1)
|
|
113
|
+
|
|
114
|
+
print(f"{'DRY RUN MODE - ' if args.dry_run else ''}Searching in: {repo_root}")
|
|
115
|
+
|
|
116
|
+
# Find or get attributes file
|
|
117
|
+
if args.attributes_file:
|
|
118
|
+
attributes_file = Path(args.attributes_file)
|
|
119
|
+
if not attributes_file.exists():
|
|
120
|
+
print(f"Error: Specified attributes file not found: {attributes_file}")
|
|
121
|
+
sys.exit(1)
|
|
122
|
+
else:
|
|
123
|
+
print("\nSearching for attributes.adoc files...")
|
|
124
|
+
attributes_files = find_attributes_files(repo_root)
|
|
125
|
+
attributes_file = prompt_for_attributes_file(attributes_files)
|
|
126
|
+
|
|
127
|
+
if not attributes_file:
|
|
128
|
+
print("Operation cancelled.")
|
|
129
|
+
sys.exit(0)
|
|
130
|
+
|
|
131
|
+
print(f"\nLoading attributes from: {attributes_file}")
|
|
132
|
+
attributes = load_attributes(attributes_file)
|
|
133
|
+
|
|
134
|
+
if not attributes:
|
|
135
|
+
print("No attributes found in the file.")
|
|
136
|
+
sys.exit(1)
|
|
137
|
+
|
|
138
|
+
print(f"Found {len(attributes)} attributes")
|
|
139
|
+
|
|
140
|
+
# Resolve nested references
|
|
141
|
+
print("Resolving nested attribute references...")
|
|
142
|
+
attributes = resolve_nested_attributes(attributes)
|
|
143
|
+
|
|
144
|
+
# Find all AsciiDoc files
|
|
145
|
+
print(f"\nSearching for *.adoc files in {repo_root}")
|
|
146
|
+
adoc_files = find_adoc_files(repo_root)
|
|
147
|
+
|
|
148
|
+
# Exclude the attributes file itself
|
|
149
|
+
adoc_files = [f for f in adoc_files if f != attributes_file]
|
|
150
|
+
|
|
151
|
+
print(f"Found {len(adoc_files)} AsciiDoc files to process")
|
|
152
|
+
|
|
153
|
+
if args.dry_run:
|
|
154
|
+
print("\n*** DRY RUN MODE - No files will be modified ***\n")
|
|
155
|
+
|
|
156
|
+
# Process each file
|
|
157
|
+
total_replacements = 0
|
|
158
|
+
files_modified = 0
|
|
159
|
+
|
|
160
|
+
for file_path in adoc_files:
|
|
161
|
+
replacements = replace_link_attributes_in_file(file_path, attributes, args.dry_run)
|
|
162
|
+
if replacements > 0:
|
|
163
|
+
rel_path = file_path.relative_to(repo_root)
|
|
164
|
+
prefix = "[DRY RUN] " if args.dry_run else ""
|
|
165
|
+
print(f" {prefix}Modified {rel_path}: {replacements} replacements")
|
|
166
|
+
total_replacements += replacements
|
|
167
|
+
files_modified += 1
|
|
168
|
+
|
|
169
|
+
# Summary
|
|
170
|
+
print(f"\nSummary:")
|
|
171
|
+
if args.dry_run:
|
|
172
|
+
print(f" Would modify {files_modified} files")
|
|
173
|
+
print(f" Would make {total_replacements} replacements")
|
|
174
|
+
print("\nRun without --dry-run to apply changes.")
|
|
175
|
+
else:
|
|
176
|
+
print(f" Total files modified: {files_modified}")
|
|
177
|
+
print(f" Total replacements: {total_replacements}")
|
|
178
|
+
|
|
179
|
+
if total_replacements == 0:
|
|
180
|
+
print("\nNo attribute references found within link macros.")
|
|
181
|
+
else:
|
|
182
|
+
print("\nReplacement complete!")
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
if __name__ == '__main__':
|
|
186
|
+
main()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: rolfedh-doc-utils
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.9
|
|
4
4
|
Summary: CLI tools for AsciiDoc documentation projects
|
|
5
5
|
Author: Rolfe Dlugy-Hegwer
|
|
6
6
|
License: MIT License
|
|
@@ -79,6 +79,7 @@ pip install -e .
|
|
|
79
79
|
|
|
80
80
|
| Tool | Description | Usage |
|
|
81
81
|
|------|-------------|-------|
|
|
82
|
+
| **`replace-link-attributes`** | Resolves Vale LinkAttribute violations by replacing attributes in link URLs | `replace-link-attributes --dry-run` |
|
|
82
83
|
| **`format-asciidoc-spacing`** | Standardizes spacing after headings and around includes | `format-asciidoc-spacing --dry-run modules/` |
|
|
83
84
|
| **`check-scannability`** | Analyzes readability (sentence/paragraph length) | `check-scannability --max-words 25` |
|
|
84
85
|
| **`archive-unused-files`** | Finds and archives unreferenced .adoc files | `archive-unused-files` (preview)<br>`archive-unused-files --archive` (execute) |
|
|
@@ -2,17 +2,20 @@ archive_unused_files.py,sha256=KMC5a1WL3rZ5owoVnncvfpT1YeMKbVXq9giHvadDgbM,1936
|
|
|
2
2
|
archive_unused_images.py,sha256=PG2o3haovYckgfhoPhl6KRG_a9czyZuqlLkzkupKTCY,1526
|
|
3
3
|
check_scannability.py,sha256=gcM-vFXKHGP_yFBz7-V5xbXWhIMmtMzBYIGwP9CFbzI,5140
|
|
4
4
|
find_unused_attributes.py,sha256=fk-K32eoCVHxoj7RiBNgSmX1arBLuwYfdSAOMc-wIx0,1677
|
|
5
|
-
format_asciidoc_spacing.py,sha256=
|
|
5
|
+
format_asciidoc_spacing.py,sha256=ROp-cdMs2_hk8H4z5ljT0iDgGtsiECZ8TVjjcN_oOWE,3874
|
|
6
|
+
replace_link_attributes.py,sha256=vg_aufw7dKXvh_epCKRNq_hEBMU_9crZ_JyJPpxSMNk,6454
|
|
6
7
|
doc_utils/__init__.py,sha256=qqZR3lohzkP63soymrEZPBGzzk6-nFzi4_tSffjmu_0,74
|
|
7
8
|
doc_utils/file_utils.py,sha256=fpTh3xx759sF8sNocdn_arsP3KAv8XA6cTQTAVIZiZg,4247
|
|
9
|
+
doc_utils/format_asciidoc_spacing.py,sha256=XnVJekaj39aDzjV3xFKl58flM41AaJzejxNYJIIAMz0,10139
|
|
10
|
+
doc_utils/replace_link_attributes.py,sha256=kBiePbxjQn3O2rzqmYY8Mqy_mJgZ6yw048vSZ5SSB5E,6587
|
|
8
11
|
doc_utils/scannability.py,sha256=XwlmHqDs69p_V36X7DLjPTy0DUoLszSGqYjJ9wE-3hg,982
|
|
9
12
|
doc_utils/topic_map_parser.py,sha256=tKcIO1m9r2K6dvPRGue58zqMr0O2zKU1gnZMzEE3U6o,4571
|
|
10
13
|
doc_utils/unused_adoc.py,sha256=2cbqcYr1os2EhETUU928BlPRlsZVSdI00qaMhqjSIqQ,5263
|
|
11
14
|
doc_utils/unused_attributes.py,sha256=HBgmHelqearfWl3TTC2bZGiJytjLADIgiGQUNKqXXPg,1847
|
|
12
15
|
doc_utils/unused_images.py,sha256=nqn36Bbrmon2KlGlcaruNjJJvTQ8_9H0WU9GvCW7rW8,1456
|
|
13
|
-
rolfedh_doc_utils-0.1.
|
|
14
|
-
rolfedh_doc_utils-0.1.
|
|
15
|
-
rolfedh_doc_utils-0.1.
|
|
16
|
-
rolfedh_doc_utils-0.1.
|
|
17
|
-
rolfedh_doc_utils-0.1.
|
|
18
|
-
rolfedh_doc_utils-0.1.
|
|
16
|
+
rolfedh_doc_utils-0.1.9.dist-info/licenses/LICENSE,sha256=vLxtwMVOJA_hEy8b77niTkdmQI9kNJskXHq0dBS36e0,1075
|
|
17
|
+
rolfedh_doc_utils-0.1.9.dist-info/METADATA,sha256=QmlwozZ8j3S2dnbZhsQpCr4JIDTyyjKpMCUYWnkCOTA,7041
|
|
18
|
+
rolfedh_doc_utils-0.1.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
19
|
+
rolfedh_doc_utils-0.1.9.dist-info/entry_points.txt,sha256=w5FlyLk-L2rtzYsMuTo8OVxYPo4nm4zRiArBTYqVud0,326
|
|
20
|
+
rolfedh_doc_utils-0.1.9.dist-info/top_level.txt,sha256=YrJAJFSkY9RKoHGDmrFCwsMVZS3Azw6ewzR2b081qCU,143
|
|
21
|
+
rolfedh_doc_utils-0.1.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|