rolfedh-doc-utils 0.1.28__tar.gz → 0.1.30__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. {rolfedh_doc_utils-0.1.28/rolfedh_doc_utils.egg-info → rolfedh_doc_utils-0.1.30}/PKG-INFO +1 -1
  2. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/callout_lib/converter_deflist.py +27 -0
  3. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/callout_lib/detector.py +17 -13
  4. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/callout_lib/table_parser.py +147 -50
  5. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/format_asciidoc_spacing.py +16 -5
  6. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/version.py +1 -1
  7. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/version_check.py +16 -5
  8. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils_cli.py +6 -1
  9. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/pyproject.toml +1 -1
  10. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30/rolfedh_doc_utils.egg-info}/PKG-INFO +1 -1
  11. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_version_check.py +6 -4
  12. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/LICENSE +0 -0
  13. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/README.md +0 -0
  14. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/archive_unused_files.py +0 -0
  15. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/archive_unused_images.py +0 -0
  16. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/callout_lib/__init__.py +0 -0
  17. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/callout_lib/converter_bullets.py +0 -0
  18. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/callout_lib/converter_comments.py +0 -0
  19. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/check_scannability.py +0 -0
  20. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/convert_callouts_interactive.py +0 -0
  21. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/convert_callouts_to_deflist.py +0 -0
  22. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/__init__.py +0 -0
  23. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/extract_link_attributes.py +0 -0
  24. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/file_utils.py +0 -0
  25. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/replace_link_attributes.py +0 -0
  26. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/scannability.py +0 -0
  27. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/spinner.py +0 -0
  28. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/topic_map_parser.py +0 -0
  29. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/unused_adoc.py +0 -0
  30. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/unused_attributes.py +0 -0
  31. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/unused_images.py +0 -0
  32. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/doc_utils/validate_links.py +0 -0
  33. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/extract_link_attributes.py +0 -0
  34. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/find_unused_attributes.py +0 -0
  35. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/format_asciidoc_spacing.py +0 -0
  36. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/replace_link_attributes.py +0 -0
  37. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/rolfedh_doc_utils.egg-info/SOURCES.txt +0 -0
  38. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/rolfedh_doc_utils.egg-info/dependency_links.txt +0 -0
  39. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/rolfedh_doc_utils.egg-info/entry_points.txt +0 -0
  40. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/rolfedh_doc_utils.egg-info/requires.txt +0 -0
  41. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/rolfedh_doc_utils.egg-info/top_level.txt +0 -0
  42. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/setup.cfg +0 -0
  43. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/setup.py +0 -0
  44. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_archive_unused_files.py +0 -0
  45. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_archive_unused_images.py +0 -0
  46. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_auto_discovery.py +0 -0
  47. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_check_scannability.py +0 -0
  48. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_cli_entry_points.py +0 -0
  49. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_extract_link_attributes.py +0 -0
  50. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_file_utils.py +0 -0
  51. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_fixture_archive_unused_files.py +0 -0
  52. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_fixture_archive_unused_images.py +0 -0
  53. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_fixture_check_scannability.py +0 -0
  54. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_parse_exclude_list.py +0 -0
  55. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_replace_link_attributes.py +0 -0
  56. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_symlink_handling.py +0 -0
  57. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_table_callout_conversion.py +0 -0
  58. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_table_parser.py +0 -0
  59. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_topic_map_parser.py +0 -0
  60. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_unused_attributes.py +0 -0
  61. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/tests/test_validate_links.py +0 -0
  62. {rolfedh_doc_utils-0.1.28 → rolfedh_doc_utils-0.1.30}/validate_links.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rolfedh-doc-utils
3
- Version: 0.1.28
3
+ Version: 0.1.30
4
4
  Summary: CLI tools for AsciiDoc documentation projects
5
5
  Author: Rolfe Dlugy-Hegwer
6
6
  License: MIT License
@@ -70,10 +70,37 @@ class DefListConverter:
70
70
  lines.append('+')
71
71
 
72
72
  # Add explanation lines, prepending "Optional. " to first line if needed
73
+ # Handle blank lines and conditionals by inserting continuation markers
74
+ need_continuation = False
75
+ had_content = False # Track if we've output any non-conditional content
76
+
73
77
  for line_idx, line in enumerate(explanation.lines):
78
+ stripped = line.strip()
79
+
80
+ # Check if this is a blank line
81
+ if stripped == '':
82
+ # Next non-blank line will need a continuation marker
83
+ need_continuation = True
84
+ continue # Skip blank lines
85
+
86
+ # Check if this is a conditional directive
87
+ is_conditional = stripped.startswith(('ifdef::', 'ifndef::', 'endif::'))
88
+
89
+ # Add continuation marker if:
90
+ # 1. Previous line was blank (need_continuation=True), OR
91
+ # 2. This is a conditional and we've had content before (need separator)
92
+ if need_continuation or (is_conditional and had_content and line_idx > 0):
93
+ lines.append('+')
94
+ need_continuation = False
95
+
96
+ # Add the line
74
97
  if line_idx == 0 and explanation.is_optional:
75
98
  lines.append(f'Optional. {line}')
76
99
  else:
77
100
  lines.append(line)
78
101
 
102
+ # Track that we've output content (not just conditionals)
103
+ if not is_conditional:
104
+ had_content = True
105
+
79
106
  return lines
@@ -184,14 +184,17 @@ class CalloutDetector:
184
184
  explanations = {}
185
185
  table_data = self.table_parser.extract_callout_explanations_from_table(table)
186
186
 
187
- for callout_num, (explanation_lines, conditionals) in table_data.items():
188
- # Combine explanation lines with conditionals preserved
187
+ for callout_num, (explanation_lines, row_conditionals) in table_data.items():
188
+ # explanation_lines now includes blank lines and conditionals inline
189
+ # row_conditionals are before/after the entire row (rarely used)
189
190
  all_lines = []
190
- for line in explanation_lines:
191
- all_lines.append(line)
192
191
 
193
- # Add conditionals as separate lines (they'll be preserved in output)
194
- all_lines.extend(conditionals)
192
+ # Add any row-level conditionals before
193
+ if row_conditionals:
194
+ all_lines.extend(row_conditionals)
195
+
196
+ # Add explanation lines (already includes inline conditionals and blank lines)
197
+ all_lines.extend(explanation_lines)
195
198
 
196
199
  # Check if marked as optional
197
200
  is_optional = False
@@ -215,11 +218,15 @@ class CalloutDetector:
215
218
  explanations = {}
216
219
  table_data = self.table_parser.extract_3column_callout_explanations(table)
217
220
 
218
- for callout_num, (value_lines, description_lines, conditionals) in table_data.items():
221
+ for callout_num, (value_lines, description_lines, row_conditionals) in table_data.items():
219
222
  # Combine value and description into explanation lines
220
- # Strategy: Include value as context, then description
223
+ # Both value_lines and description_lines now include conditionals and blank lines inline
221
224
  all_lines = []
222
225
 
226
+ # Add any row-level conditionals before
227
+ if row_conditionals:
228
+ all_lines.extend(row_conditionals)
229
+
223
230
  # Add value lines with context
224
231
  if value_lines:
225
232
  # Format: "`value`:"
@@ -228,16 +235,13 @@ class CalloutDetector:
228
235
  if value_text:
229
236
  all_lines.append(f"{value_text}:")
230
237
 
231
- # Add additional value lines if multi-line
238
+ # Add additional value lines if multi-line (includes conditionals and blank lines)
232
239
  for line in value_lines[1:]:
233
240
  all_lines.append(line)
234
241
 
235
- # Add description lines
242
+ # Add description lines (already includes conditionals and blank lines)
236
243
  all_lines.extend(description_lines)
237
244
 
238
- # Add conditionals as separate lines (they'll be preserved in output)
239
- all_lines.extend(conditionals)
240
-
241
245
  # Check if marked as optional
242
246
  is_optional = False
243
247
  if all_lines and (all_lines[0].lower().startswith('optional.') or
@@ -57,6 +57,44 @@ class TableParser:
57
57
  # Pattern for callout number (used for callout table detection)
58
58
  CALLOUT_NUMBER = re.compile(r'^<(\d+)>\s*$')
59
59
 
60
+ def _finalize_row_if_complete(self, current_row_cells, conditionals_before_row,
61
+ conditionals_after_row, expected_columns, rows):
62
+ """
63
+ Check if we have enough cells for a complete row, and if so, save it.
64
+
65
+ Returns: (new_current_row_cells, new_conditionals_before, new_conditionals_after)
66
+ """
67
+ if expected_columns > 0 and len(current_row_cells) >= expected_columns:
68
+ # Row is complete - save it
69
+ rows.append(TableRow(
70
+ cells=current_row_cells.copy(),
71
+ conditionals_before=conditionals_before_row.copy(),
72
+ conditionals_after=conditionals_after_row.copy()
73
+ ))
74
+ return [], [], [] # Reset for next row
75
+
76
+ # Row not complete yet
77
+ return current_row_cells, conditionals_before_row, conditionals_after_row
78
+
79
+ def _parse_column_count(self, attributes: str) -> int:
80
+ """
81
+ Parse the cols attribute to determine number of columns.
82
+
83
+ Example: '[cols="1,7a"]' returns 2
84
+ '[cols="1,2,3"]' returns 3
85
+ """
86
+ import re
87
+ # Match cols="..." or cols='...'
88
+ match = re.search(r'cols=["\']([^"\']+)["\']', attributes)
89
+ if not match:
90
+ return 0 # Unknown column count
91
+
92
+ cols_spec = match.group(1)
93
+ # Count comma-separated values
94
+ # Handle formats like: "1,2", "1a,2a", "1,2,3", etc.
95
+ columns = cols_spec.split(',')
96
+ return len(columns)
97
+
60
98
  def find_tables(self, lines: List[str]) -> List[AsciiDocTable]:
61
99
  """Find all tables in the document."""
62
100
  tables = []
@@ -96,12 +134,20 @@ class TableParser:
96
134
  |Cell4
97
135
  |===
98
136
  """
137
+ # Get attributes and parse column count
138
+ attributes = ""
139
+ if start_line < delimiter_line:
140
+ attributes = lines[start_line]
141
+
142
+ expected_columns = self._parse_column_count(attributes)
143
+
99
144
  i = delimiter_line + 1
100
145
  rows = []
101
146
  current_row_cells = []
102
147
  current_cell_lines = []
103
148
  conditionals_before_row = []
104
149
  conditionals_after_row = []
150
+ in_asciidoc_cell = False # Track if we're in an a| (AsciiDoc) cell
105
151
 
106
152
  while i < len(lines):
107
153
  line = lines[i]
@@ -138,22 +184,29 @@ class TableParser:
138
184
 
139
185
  # Check for conditional directives
140
186
  if self.IFDEF_PATTERN.match(line) or self.ENDIF_PATTERN.match(line):
141
- if not current_row_cells:
187
+ # If we're building a cell (current_cell_lines is not empty) OR
188
+ # we're in an AsciiDoc cell, add conditional to cell content
189
+ if current_cell_lines or in_asciidoc_cell:
190
+ # Inside a cell - conditional is part of cell content
191
+ current_cell_lines.append(line)
192
+ elif current_row_cells:
193
+ # Between cells in the same row
194
+ conditionals_after_row.append(line)
195
+ else:
142
196
  # Conditional before any cells in this row
143
197
  conditionals_before_row.append(line)
144
- else:
145
- # Conditional after cells started - treat as part of current context
146
- if current_cell_lines:
147
- # Inside a cell
148
- current_cell_lines.append(line)
149
- else:
150
- # Between cells in the same row
151
- conditionals_after_row.append(line)
152
198
  i += 1
153
199
  continue
154
200
 
155
- # Blank line separates rows
201
+ # Blank line handling
156
202
  if not line.strip():
203
+ # In AsciiDoc cells (a|), blank lines are part of cell content
204
+ if in_asciidoc_cell:
205
+ current_cell_lines.append(line)
206
+ i += 1
207
+ continue
208
+
209
+ # Otherwise, blank line separates rows
157
210
  # Save pending cell if exists
158
211
  if current_cell_lines:
159
212
  current_row_cells.append(TableCell(
@@ -161,6 +214,7 @@ class TableParser:
161
214
  conditionals=[]
162
215
  ))
163
216
  current_cell_lines = []
217
+ in_asciidoc_cell = False
164
218
 
165
219
  # Save row if we have cells
166
220
  if current_row_cells:
@@ -178,6 +232,18 @@ class TableParser:
178
232
 
179
233
  # Check for cell separator (|)
180
234
  if self.CELL_SEPARATOR.match(line):
235
+ # Check if this is a cell type specifier on its own line (e.g., "a|" or "s|")
236
+ cell_content = line[1:].strip() # Remove leading | and whitespace
237
+
238
+ # If line is just "a|", "s|", "h|", etc. (cell type specifier alone)
239
+ if len(cell_content) == 2 and cell_content[0] in 'ashdmev' and cell_content[1] == '|':
240
+ # This is a cell type specifier on its own line
241
+ if cell_content[0] == 'a':
242
+ in_asciidoc_cell = True
243
+ # Don't create a cell yet - content comes on following lines
244
+ i += 1
245
+ continue
246
+
181
247
  # Save previous cell if exists
182
248
  if current_cell_lines:
183
249
  current_row_cells.append(TableCell(
@@ -185,9 +251,27 @@ class TableParser:
185
251
  conditionals=[]
186
252
  ))
187
253
  current_cell_lines = []
254
+ in_asciidoc_cell = False # Reset for next cell
255
+
256
+ # Check if row is complete (have enough cells based on cols attribute)
257
+ current_row_cells, conditionals_before_row, conditionals_after_row = \
258
+ self._finalize_row_if_complete(
259
+ current_row_cells, conditionals_before_row,
260
+ conditionals_after_row, expected_columns, rows
261
+ )
188
262
 
189
263
  # Extract cell content from this line (text after |)
190
- cell_content = line[1:].strip() # Remove leading |
264
+ cell_content = line[1:] # Remove leading |
265
+
266
+ # Check for inline cell type specifier (a|text, s|text, etc.)
267
+ # Cell type specifiers are single characters followed by |
268
+ if len(cell_content) > 0 and cell_content[0] in 'ashdmev' and len(cell_content) > 1 and cell_content[1] == '|':
269
+ # Track if this is an AsciiDoc cell (a|)
270
+ if cell_content[0] == 'a':
271
+ in_asciidoc_cell = True
272
+ cell_content = cell_content[2:] # Remove type specifier and second |
273
+
274
+ cell_content = cell_content.strip()
191
275
 
192
276
  # Check if there are multiple cells on the same line (e.g., |Cell1 |Cell2 |Cell3)
193
277
  if '|' in cell_content:
@@ -220,6 +304,39 @@ class TableParser:
220
304
  i += 1
221
305
  continue
222
306
 
307
+ # Check for cell type specifier on its own line (e.g., "a|", "s|", "h|")
308
+ # This is actually a cell SEPARATOR with type specifier
309
+ # Example:
310
+ # |<1> ← Cell 1
311
+ # a| ← Start cell 2 with type 'a' (AsciiDoc)
312
+ # content... ← Cell 2 content
313
+ stripped_line = line.strip()
314
+ if (len(stripped_line) == 2 and
315
+ stripped_line[0] in 'ashdmev' and
316
+ stripped_line[1] == '|' and
317
+ (current_cell_lines or current_row_cells)):
318
+ # Save previous cell if we have one
319
+ if current_cell_lines:
320
+ current_row_cells.append(TableCell(
321
+ content=current_cell_lines.copy(),
322
+ conditionals=[]
323
+ ))
324
+ current_cell_lines = []
325
+
326
+ # Check if row is complete
327
+ current_row_cells, conditionals_before_row, conditionals_after_row = \
328
+ self._finalize_row_if_complete(
329
+ current_row_cells, conditionals_before_row,
330
+ conditionals_after_row, expected_columns, rows
331
+ )
332
+
333
+ # Set cell type for the NEW cell we're starting
334
+ if stripped_line[0] == 'a':
335
+ in_asciidoc_cell = True
336
+ # Start collecting content for the new cell (no content on this line)
337
+ i += 1
338
+ continue
339
+
223
340
  # Regular content line (continuation of current cell)
224
341
  if current_cell_lines or current_row_cells:
225
342
  current_cell_lines.append(line)
@@ -338,25 +455,20 @@ class TableParser:
338
455
 
339
456
  callout_num = int(match.group(1))
340
457
 
341
- # Collect explanation lines
458
+ # Collect explanation lines, preserving blank lines and conditionals inline
459
+ # Blank lines will need to become continuation markers (+) in definition lists
342
460
  explanation_lines = []
343
461
  for line in explanation_cell.content:
344
- # Skip conditional directives in explanation (preserve them separately)
345
- if not (self.IFDEF_PATTERN.match(line) or self.ENDIF_PATTERN.match(line)):
346
- explanation_lines.append(line)
462
+ # Preserve ALL lines including conditionals and blank lines
463
+ # Empty lines will be marked as '' which signals need for continuation marker
464
+ explanation_lines.append(line)
347
465
 
348
- # Collect all conditionals for this row
349
- all_conditionals = []
350
- all_conditionals.extend(row.conditionals_before)
466
+ # Collect conditionals that appear before/after the row
467
+ row_conditionals = []
468
+ row_conditionals.extend(row.conditionals_before)
469
+ row_conditionals.extend(row.conditionals_after)
351
470
 
352
- # Extract conditionals from explanation cell
353
- for line in explanation_cell.content:
354
- if self.IFDEF_PATTERN.match(line) or self.ENDIF_PATTERN.match(line):
355
- all_conditionals.append(line)
356
-
357
- all_conditionals.extend(row.conditionals_after)
358
-
359
- explanations[callout_num] = (explanation_lines, all_conditionals)
471
+ explanations[callout_num] = (explanation_lines, row_conditionals)
360
472
 
361
473
  return explanations
362
474
 
@@ -397,37 +509,22 @@ class TableParser:
397
509
 
398
510
  callout_num = int(item_num_str)
399
511
 
400
- # Collect value lines (column 2)
512
+ # Collect value lines (column 2), preserving all content including conditionals
401
513
  value_lines = []
402
514
  for line in value_cell.content:
403
- # Skip conditional directives in value (preserve them separately)
404
- if not (self.IFDEF_PATTERN.match(line) or self.ENDIF_PATTERN.match(line)):
405
- value_lines.append(line)
515
+ value_lines.append(line)
406
516
 
407
- # Collect description lines (column 3)
517
+ # Collect description lines (column 3), preserving all content including conditionals
408
518
  description_lines = []
409
519
  for line in desc_cell.content:
410
- # Skip conditional directives in description (preserve them separately)
411
- if not (self.IFDEF_PATTERN.match(line) or self.ENDIF_PATTERN.match(line)):
412
- description_lines.append(line)
413
-
414
- # Collect all conditionals for this row
415
- all_conditionals = []
416
- all_conditionals.extend(row.conditionals_before)
417
-
418
- # Extract conditionals from value cell
419
- for line in value_cell.content:
420
- if self.IFDEF_PATTERN.match(line) or self.ENDIF_PATTERN.match(line):
421
- all_conditionals.append(line)
422
-
423
- # Extract conditionals from description cell
424
- for line in desc_cell.content:
425
- if self.IFDEF_PATTERN.match(line) or self.ENDIF_PATTERN.match(line):
426
- all_conditionals.append(line)
520
+ description_lines.append(line)
427
521
 
428
- all_conditionals.extend(row.conditionals_after)
522
+ # Collect conditionals that appear before/after the row
523
+ row_conditionals = []
524
+ row_conditionals.extend(row.conditionals_before)
525
+ row_conditionals.extend(row.conditionals_after)
429
526
 
430
- explanations[callout_num] = (value_lines, description_lines, all_conditionals)
527
+ explanations[callout_num] = (value_lines, description_lines, row_conditionals)
431
528
 
432
529
  return explanations
433
530
 
@@ -65,9 +65,16 @@ def process_file(file_path: Path, dry_run: bool = False, verbose: bool = False)
65
65
  new_lines.append(current_line)
66
66
  in_conditional = False
67
67
  # Add blank line after conditional if needed
68
+ # Don't add if next line is:
69
+ # - a list item (starts with *, -, ., .., or numbered)
70
+ # - list continuation (+)
71
+ # - another conditional
72
+ # - blank
68
73
  if (next_line and
69
74
  not re.match(r'^\s*$', next_line) and
70
- not re.match(r'^(ifdef::|ifndef::|endif::)', next_line)):
75
+ not re.match(r'^(ifdef::|ifndef::|endif::)', next_line) and
76
+ not re.match(r'^(\*|\-|\.|\.\.|\d+\.)\s', next_line) and # List items
77
+ not re.match(r'^\+\s*$', next_line)): # List continuation
71
78
  new_lines.append("")
72
79
  changes_made = True
73
80
  if verbose:
@@ -102,10 +109,13 @@ def process_file(file_path: Path, dry_run: bool = False, verbose: bool = False)
102
109
  # Check for block titles (.Title)
103
110
  elif not in_block and not in_comment_block and re.match(r'^\.[A-Z]', current_line):
104
111
  # Add blank line before block title if needed
105
- if (prev_line and
112
+ # Don't add if inside a conditional block or if previous line is a conditional directive
113
+ if (not in_conditional and
114
+ prev_line and
106
115
  not re.match(r'^\s*$', prev_line) and
107
116
  not re.match(r'^=+\s+', prev_line) and
108
- not re.match(r'^\[role=', prev_line)): # Don't add if previous is heading, empty, or role block
117
+ not re.match(r'^\[role=', prev_line) and
118
+ not re.match(r'^(ifdef::|ifndef::|endif::)', prev_line)): # Don't add if previous is conditional
109
119
  new_lines.append("")
110
120
  changes_made = True
111
121
  if verbose:
@@ -117,11 +127,12 @@ def process_file(file_path: Path, dry_run: bool = False, verbose: bool = False)
117
127
  elif not in_block and re.match(r'^=+\s+', current_line):
118
128
  new_lines.append(current_line)
119
129
 
120
- # Check if next line is not empty, not another heading, and not a comment block
130
+ # Check if next line is not empty, not another heading, not a comment block, and not a conditional
121
131
  if (next_line and
122
132
  not re.match(r'^=+\s+', next_line) and
123
133
  not re.match(r'^\s*$', next_line) and
124
- not re.match(r'^////+$', next_line)): # Don't add if next is comment block
134
+ not re.match(r'^////+$', next_line) and # Don't add if next is comment block
135
+ not re.match(r'^(ifdef::|ifndef::|endif::)', next_line)): # Don't add if next is conditional
125
136
  new_lines.append("")
126
137
  changes_made = True
127
138
  if verbose:
@@ -1,7 +1,7 @@
1
1
  """Version information for doc-utils."""
2
2
 
3
3
  # This should match the version in pyproject.toml
4
- __version__ = "0.1.21"
4
+ __version__ = "0.1.30"
5
5
 
6
6
  def get_version():
7
7
  """Return the current version string."""
@@ -141,10 +141,12 @@ def detect_install_method() -> str:
141
141
  Detect how the package was installed.
142
142
 
143
143
  Returns:
144
- 'pipx', 'pip', or 'unknown'
144
+ 'pipx' or 'pip'
145
+
146
+ Note: Defaults to 'pipx' as the recommended installation method.
145
147
  """
146
- # Check if running from pipx venv
147
- if 'pipx' in sys.prefix:
148
+ # Check if running from pipx venv (standard pipx install)
149
+ if 'pipx' in sys.prefix.lower():
148
150
  return 'pipx'
149
151
 
150
152
  # Check PIPX_HOME environment variable
@@ -152,8 +154,17 @@ def detect_install_method() -> str:
152
154
  if pipx_home and str(Path(sys.prefix)).startswith(str(Path(pipx_home))):
153
155
  return 'pipx'
154
156
 
155
- # Default to pip
156
- return 'pip'
157
+ # Check if executable is in typical pipx bin location
158
+ try:
159
+ exe_path = Path(sys.executable)
160
+ if '.local/pipx' in str(exe_path):
161
+ return 'pipx'
162
+ except Exception:
163
+ pass
164
+
165
+ # Default to pipx as the recommended method (per CLAUDE.md guidelines)
166
+ # This ensures users see the recommended upgrade command even for editable installs
167
+ return 'pipx'
157
168
 
158
169
 
159
170
  def show_update_notification(latest_version: str, current_version: str = None):
@@ -55,9 +55,14 @@ TOOLS = [
55
55
  },
56
56
  {
57
57
  'name': 'convert-callouts-to-deflist',
58
- 'description': 'Converts callout-style annotations to definition list format',
58
+ 'description': 'Converts callouts to definition lists (batch mode)',
59
59
  'example': 'convert-callouts-to-deflist --dry-run modules/'
60
60
  },
61
+ {
62
+ 'name': 'convert-callouts-interactive',
63
+ 'description': 'Interactively converts callouts with per-block format selection',
64
+ 'example': 'convert-callouts-interactive modules/'
65
+ },
61
66
  ]
62
67
 
63
68
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "rolfedh-doc-utils"
7
- version = "0.1.28"
7
+ version = "0.1.30"
8
8
  description = "CLI tools for AsciiDoc documentation projects"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rolfedh-doc-utils
3
- Version: 0.1.28
3
+ Version: 0.1.30
4
4
  Summary: CLI tools for AsciiDoc documentation projects
5
5
  Author: Rolfe Dlugy-Hegwer
6
6
  License: MIT License
@@ -26,20 +26,22 @@ class TestDetectInstallMethod:
26
26
  assert detect_install_method() == 'pipx'
27
27
 
28
28
  def test_detect_pip(self):
29
- """Test detection defaults to pip."""
29
+ """Test detection defaults to pipx (per project guidelines)."""
30
30
  with patch('sys.prefix', '/usr'):
31
31
  # Clear PIPX_HOME if set
32
32
  env = {k: v for k, v in os.environ.items() if k != 'PIPX_HOME'}
33
33
  with patch.dict(os.environ, env, clear=True):
34
- assert detect_install_method() == 'pip'
34
+ # Default is now pipx to align with installation recommendations
35
+ assert detect_install_method() == 'pipx'
35
36
 
36
37
  def test_detect_pip_user(self):
37
- """Test detection for pip --user installations."""
38
+ """Test detection for pip --user installations defaults to pipx."""
38
39
  with patch('sys.prefix', '/home/user/.local'):
39
40
  # Clear PIPX_HOME if set
40
41
  env = {k: v for k, v in os.environ.items() if k != 'PIPX_HOME'}
41
42
  with patch.dict(os.environ, env, clear=True):
42
- assert detect_install_method() == 'pip'
43
+ # Default is now pipx to align with installation recommendations
44
+ assert detect_install_method() == 'pipx'
43
45
 
44
46
 
45
47
  class TestParseVersion: