rolfedh-doc-utils 0.1.34__py3-none-any.whl → 0.1.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
archive_unused_files.py CHANGED
@@ -22,6 +22,7 @@ def main():
22
22
  epilog='By default, automatically discovers all modules and assemblies directories in the repository.'
23
23
  )
24
24
  parser.add_argument('--archive', action='store_true', help='Move the files to a dated zip in the archive directory.')
25
+ parser.add_argument('--commented', action='store_true', help='Include files that are referenced only in commented lines in the archive operation.')
25
26
  parser.add_argument('--scan-dir', action='append', default=[], help='Specific directory to scan (can be used multiple times). If not specified, auto-discovers directories.')
26
27
  parser.add_argument('--exclude-dir', action='append', default=[], help='Directory to exclude (can be used multiple times).')
27
28
  parser.add_argument('--exclude-file', action='append', default=[], help='File to exclude (can be used multiple times).')
@@ -35,13 +36,13 @@ def main():
35
36
 
36
37
  exclude_dirs = list(args.exclude_dir)
37
38
  exclude_files = list(args.exclude_file)
38
-
39
+
39
40
  if args.exclude_list:
40
41
  list_dirs, list_files = parse_exclude_list_file(args.exclude_list)
41
42
  exclude_dirs.extend(list_dirs)
42
43
  exclude_files.extend(list_files)
43
44
 
44
- find_unused_adoc(scan_dirs, archive_dir, args.archive, exclude_dirs, exclude_files)
45
+ find_unused_adoc(scan_dirs, archive_dir, args.archive, exclude_dirs, exclude_files, args.commented)
45
46
 
46
47
  if __name__ == '__main__':
47
48
  main()
archive_unused_images.py CHANGED
@@ -18,6 +18,7 @@ def main():
18
18
  check_version_on_startup()
19
19
  parser = argparse.ArgumentParser(description='Archive unused image files.')
20
20
  parser.add_argument('--archive', action='store_true', help='Move the files to a dated zip in the archive directory.')
21
+ parser.add_argument('--commented', action='store_true', help='Include images that are referenced only in commented lines in the archive operation.')
21
22
  parser.add_argument('--exclude-dir', action='append', default=[], help='Directory to exclude (can be used multiple times).')
22
23
  parser.add_argument('--exclude-file', action='append', default=[], help='File to exclude (can be used multiple times).')
23
24
  parser.add_argument('--exclude-list', type=str, help='Path to a file containing directories or files to exclude, one per line.')
@@ -29,13 +30,13 @@ def main():
29
30
 
30
31
  exclude_dirs = list(args.exclude_dir)
31
32
  exclude_files = list(args.exclude_file)
32
-
33
+
33
34
  if args.exclude_list:
34
35
  list_dirs, list_files = parse_exclude_list_file(args.exclude_list)
35
36
  exclude_dirs.extend(list_dirs)
36
37
  exclude_files.extend(list_files)
37
38
 
38
- find_unused_images(scan_dirs, archive_dir, args.archive, exclude_dirs, exclude_files)
39
+ find_unused_images(scan_dirs, archive_dir, args.archive, exclude_dirs, exclude_files, args.commented)
39
40
 
40
41
  if __name__ == '__main__':
41
42
  main()
@@ -0,0 +1,101 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Check Source Directives
4
+
5
+ Detects code blocks (----) that are missing [source] directive in AsciiDoc files.
6
+ This helps prevent AsciiDoc-to-DocBook XML conversion errors.
7
+
8
+ Usage:
9
+ check-source-directives # Scan current directory
10
+ check-source-directives asciidoc # Scan asciidoc/ directory
11
+ check-source-directives --fix # Scan and fix issues in current directory
12
+ check-source-directives --fix asciidoc # Scan and fix issues in asciidoc/ directory
13
+ """
14
+
15
+ import argparse
16
+ import sys
17
+ from doc_utils.missing_source_directive import find_missing_source_directives
18
+ from doc_utils.version_check import check_version_on_startup
19
+ from doc_utils.version import __version__
20
+
21
+ # ANSI color codes
22
+ RED = '\033[0;31m'
23
+ YELLOW = '\033[1;33m'
24
+ GREEN = '\033[0;32m'
25
+ NC = '\033[0m' # No Color
26
+
27
+ def main():
28
+ # Check for updates (non-blocking)
29
+ check_version_on_startup()
30
+
31
+ parser = argparse.ArgumentParser(
32
+ description='Detect code blocks (----) missing [source] directive in AsciiDoc files',
33
+ formatter_class=argparse.RawDescriptionHelpFormatter,
34
+ epilog="""
35
+ Examples:
36
+ %(prog)s # Scan current directory
37
+ %(prog)s asciidoc # Scan asciidoc/ directory
38
+ %(prog)s --fix # Scan and fix issues in current directory
39
+ %(prog)s --fix asciidoc # Scan and fix issues in asciidoc/ directory
40
+ """
41
+ )
42
+ parser.add_argument('directory', nargs='?', default='.',
43
+ help='Directory to scan (default: current directory)')
44
+ parser.add_argument('--fix', action='store_true',
45
+ help='Automatically insert [source] directives where missing')
46
+ parser.add_argument('--version', action='version', version=f'%(prog)s {__version__}')
47
+
48
+ args = parser.parse_args()
49
+
50
+ mode = "Fixing" if args.fix else "Scanning for"
51
+ print(f"{mode} code blocks missing [source] directive in: {args.directory}")
52
+ print("=" * 64)
53
+ print()
54
+
55
+ try:
56
+ results = find_missing_source_directives(
57
+ scan_dir=args.directory,
58
+ auto_fix=args.fix
59
+ )
60
+ except ValueError as e:
61
+ print(f"{RED}Error: {e}{NC}", file=sys.stderr)
62
+ sys.exit(1)
63
+ except Exception as e:
64
+ print(f"{RED}Unexpected error: {e}{NC}", file=sys.stderr)
65
+ sys.exit(1)
66
+
67
+ # Display results
68
+ for file_info in results['file_details']:
69
+ filepath = file_info['filepath']
70
+ issues = file_info['issues']
71
+
72
+ print(f"{YELLOW}File: {filepath}{NC}")
73
+
74
+ for issue in issues:
75
+ print(f" {RED}Line {issue['line_num']}:{NC} Code block without [source] directive")
76
+ print(f" Previous line ({issue['prev_line_num']}): {issue['prev_line']}")
77
+ print()
78
+
79
+ if args.fix:
80
+ if file_info.get('fixed'):
81
+ print(f" {GREEN}✓ Fixed {len(issues)} issue(s){NC}")
82
+ elif 'error' in file_info:
83
+ print(f" {RED}✗ Failed to fix file: {file_info['error']}{NC}")
84
+ print()
85
+
86
+ # Summary
87
+ print("=" * 64)
88
+ if results['total_issues'] == 0:
89
+ print(f"{GREEN}✓ No issues found!{NC}")
90
+ sys.exit(0)
91
+ else:
92
+ if args.fix:
93
+ print(f"{GREEN}Fixed {results['total_issues']} code block(s) in {results['files_fixed']} file(s){NC}")
94
+ sys.exit(0)
95
+ else:
96
+ print(f"{RED}Found {results['total_issues']} code block(s) missing [source] directive in {results['files_with_issues']} file(s){NC}")
97
+ print(f"\nRun with --fix to automatically fix these issues")
98
+ sys.exit(1)
99
+
100
+ if __name__ == '__main__':
101
+ main()
@@ -0,0 +1,479 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ convert-tables-to-deflists: Convert AsciiDoc tables to definition lists.
4
+
5
+ Converts 2-column AsciiDoc tables to definition list format, where:
6
+ - The first column becomes the term
7
+ - The second column becomes the definition
8
+
9
+ Tables with more than 2 columns are skipped (use --columns to specify which
10
+ columns to use as term and definition).
11
+
12
+ Usage:
13
+ convert-tables-to-deflists [OPTIONS] [PATH]
14
+
15
+ Examples:
16
+ # Preview changes (dry-run mode)
17
+ convert-tables-to-deflists .
18
+
19
+ # Apply changes to all .adoc files
20
+ convert-tables-to-deflists --apply .
21
+
22
+ # Process a single file
23
+ convert-tables-to-deflists --apply path/to/file.adoc
24
+
25
+ # Use columns 1 and 3 for 3-column tables
26
+ convert-tables-to-deflists --columns 1,3 .
27
+
28
+ # Skip tables with headers
29
+ convert-tables-to-deflists --skip-header-tables .
30
+ """
31
+
32
+ import argparse
33
+ import sys
34
+ import re
35
+ from pathlib import Path
36
+ from typing import List, Optional, Tuple
37
+
38
+ from callout_lib.table_parser import TableParser, AsciiDocTable
39
+ from doc_utils.version import __version__
40
+ from doc_utils.file_utils import parse_exclude_list_file
41
+
42
+
43
+ class Colors:
44
+ """ANSI color codes for terminal output."""
45
+ RED = '\033[0;31m'
46
+ GREEN = '\033[0;32m'
47
+ YELLOW = '\033[1;33m'
48
+ BLUE = '\033[0;34m'
49
+ CYAN = '\033[0;36m'
50
+ NC = '\033[0m' # No Color
51
+
52
+
53
+ def print_colored(message: str, color: str = Colors.NC) -> None:
54
+ """Print a message with optional color."""
55
+ print(f"{color}{message}{Colors.NC}")
56
+
57
+
58
+ class TableToDeflistConverter:
59
+ """Converts AsciiDoc tables to definition lists."""
60
+
61
+ def __init__(self, dry_run: bool = True, verbose: bool = False,
62
+ columns: Optional[Tuple[int, int]] = None,
63
+ skip_header_tables: bool = False,
64
+ skip_callout_tables: bool = True):
65
+ """
66
+ Initialize the converter.
67
+
68
+ Args:
69
+ dry_run: If True, don't modify files (preview mode)
70
+ verbose: If True, show detailed output
71
+ columns: Tuple of (term_col, def_col) for multi-column tables (1-indexed)
72
+ skip_header_tables: If True, skip tables that have header rows
73
+ skip_callout_tables: If True, skip tables that look like callout tables
74
+ """
75
+ self.dry_run = dry_run
76
+ self.verbose = verbose
77
+ self.columns = columns # 1-indexed column numbers
78
+ self.skip_header_tables = skip_header_tables
79
+ self.skip_callout_tables = skip_callout_tables
80
+ self.parser = TableParser()
81
+ self.files_processed = 0
82
+ self.files_modified = 0
83
+ self.tables_converted = 0
84
+
85
+ def find_adoc_files(self, path: Path, exclude_dirs: List[str] = None,
86
+ exclude_files: List[str] = None) -> List[Path]:
87
+ """Find all .adoc files in the given path."""
88
+ exclude_dirs = exclude_dirs or []
89
+ exclude_files = exclude_files or []
90
+
91
+ if path.is_file():
92
+ return [path] if path.suffix == '.adoc' else []
93
+
94
+ adoc_files = []
95
+ for adoc_file in path.rglob('*.adoc'):
96
+ # Skip excluded directories
97
+ if any(excl in str(adoc_file) for excl in exclude_dirs):
98
+ continue
99
+ # Skip excluded files
100
+ if any(excl in str(adoc_file) for excl in exclude_files):
101
+ continue
102
+ # Skip symlinks
103
+ if adoc_file.is_symlink():
104
+ continue
105
+ adoc_files.append(adoc_file)
106
+
107
+ return sorted(adoc_files)
108
+
109
+ def _should_skip_table(self, table: AsciiDocTable) -> Tuple[bool, str]:
110
+ """
111
+ Determine if a table should be skipped.
112
+
113
+ Returns:
114
+ Tuple of (should_skip, reason)
115
+ """
116
+ # Skip empty tables
117
+ if not table.rows:
118
+ return True, "empty table"
119
+
120
+ # Skip callout tables (they're handled by convert-callouts-to-deflist)
121
+ if self.skip_callout_tables:
122
+ if self.parser.is_callout_table(table) or self.parser.is_3column_callout_table(table):
123
+ return True, "callout table (use convert-callouts-to-deflist)"
124
+
125
+ # Check column count
126
+ if table.rows:
127
+ first_row_cols = len(table.rows[0].cells)
128
+
129
+ # If specific columns are specified, verify they exist
130
+ if self.columns:
131
+ term_col, def_col = self.columns
132
+ if term_col > first_row_cols or def_col > first_row_cols:
133
+ return True, f"specified columns ({term_col}, {def_col}) exceed table columns ({first_row_cols})"
134
+ else:
135
+ # Default: only process 2-column tables
136
+ if first_row_cols != 2:
137
+ return True, f"{first_row_cols}-column table (use --columns to specify term and definition columns)"
138
+
139
+ # Check for header row
140
+ if self.skip_header_tables and self.parser._has_header_row(table):
141
+ return True, "table has header row"
142
+
143
+ return False, ""
144
+
145
+ def _convert_table_to_deflist(self, table: AsciiDocTable) -> List[str]:
146
+ """
147
+ Convert a table to definition list format.
148
+
149
+ Args:
150
+ table: The AsciiDocTable to convert
151
+
152
+ Returns:
153
+ List of lines representing the definition list
154
+ """
155
+ output = []
156
+
157
+ # Determine which columns to use (0-indexed internally)
158
+ if self.columns:
159
+ term_idx = self.columns[0] - 1 # Convert to 0-indexed
160
+ def_idx = self.columns[1] - 1
161
+ else:
162
+ term_idx = 0
163
+ def_idx = 1
164
+
165
+ # Check if table has a header row
166
+ has_header = self.parser._has_header_row(table)
167
+ data_rows = table.rows[1:] if has_header else table.rows
168
+
169
+ for row in data_rows:
170
+ # Verify row has enough cells
171
+ if len(row.cells) <= max(term_idx, def_idx):
172
+ continue
173
+
174
+ # Add conditionals before row
175
+ if row.conditionals_before:
176
+ output.extend(row.conditionals_before)
177
+
178
+ # Get term (first specified column)
179
+ term_cell = row.cells[term_idx]
180
+ term = ' '.join(line.strip() for line in term_cell.content if line.strip())
181
+
182
+ # Get definition (second specified column)
183
+ def_cell = row.cells[def_idx]
184
+ def_lines = def_cell.content
185
+
186
+ # Create definition list entry
187
+ if term:
188
+ output.append(f'{term}::')
189
+
190
+ # Add definition lines
191
+ first_content_line = True
192
+ for line in def_lines:
193
+ stripped = line.strip()
194
+
195
+ # Handle conditional directives
196
+ if stripped.startswith(('ifdef::', 'ifndef::', 'endif::')):
197
+ output.append(line)
198
+ continue
199
+
200
+ # Skip empty lines within definition but track them
201
+ if not stripped:
202
+ continue
203
+
204
+ # First content line gets no indent, subsequent lines do
205
+ if first_content_line:
206
+ output.append(stripped)
207
+ first_content_line = False
208
+ else:
209
+ output.append(f'+\n{stripped}')
210
+
211
+ # Add blank line after entry
212
+ output.append('')
213
+
214
+ # Add conditionals after row
215
+ if row.conditionals_after:
216
+ output.extend(row.conditionals_after)
217
+
218
+ # Remove trailing blank line if present
219
+ if output and not output[-1].strip():
220
+ output.pop()
221
+
222
+ return output
223
+
224
+ def process_file(self, file_path: Path) -> int:
225
+ """
226
+ Process a single file, converting tables to definition lists.
227
+
228
+ Args:
229
+ file_path: Path to the .adoc file
230
+
231
+ Returns:
232
+ Number of tables converted
233
+ """
234
+ try:
235
+ with open(file_path, 'r', encoding='utf-8') as f:
236
+ lines = [line.rstrip('\n') for line in f]
237
+ except Exception as e:
238
+ print_colored(f"Error reading {file_path}: {e}", Colors.RED)
239
+ return 0
240
+
241
+ original_lines = lines.copy()
242
+ tables = self.parser.find_tables(lines)
243
+ conversions = 0
244
+
245
+ # Process tables in reverse order to preserve line numbers
246
+ for table in reversed(tables):
247
+ should_skip, reason = self._should_skip_table(table)
248
+
249
+ if should_skip:
250
+ if self.verbose:
251
+ print(f" Skipping table at line {table.start_line + 1}: {reason}")
252
+ continue
253
+
254
+ # Convert the table
255
+ deflist_lines = self._convert_table_to_deflist(table)
256
+
257
+ if deflist_lines:
258
+ # Replace table with definition list
259
+ lines[table.start_line:table.end_line + 1] = deflist_lines
260
+ conversions += 1
261
+
262
+ if self.verbose:
263
+ print(f" Converted table at line {table.start_line + 1}")
264
+
265
+ # Write changes if not in dry-run mode
266
+ if conversions > 0:
267
+ if self.dry_run:
268
+ print_colored(f"Would modify: {file_path} ({conversions} table(s))", Colors.YELLOW)
269
+ else:
270
+ try:
271
+ with open(file_path, 'w', encoding='utf-8') as f:
272
+ f.write('\n'.join(lines) + '\n')
273
+ print_colored(f"Modified: {file_path} ({conversions} table(s))", Colors.GREEN)
274
+ except Exception as e:
275
+ print_colored(f"Error writing {file_path}: {e}", Colors.RED)
276
+ return 0
277
+
278
+ return conversions
279
+
280
+ def process_path(self, path: Path, exclude_dirs: List[str] = None,
281
+ exclude_files: List[str] = None) -> None:
282
+ """
283
+ Process all .adoc files in the given path.
284
+
285
+ Args:
286
+ path: File or directory path to process
287
+ exclude_dirs: List of directory patterns to exclude
288
+ exclude_files: List of file patterns to exclude
289
+ """
290
+ adoc_files = self.find_adoc_files(path, exclude_dirs, exclude_files)
291
+
292
+ if not adoc_files:
293
+ print_colored("No .adoc files found.", Colors.YELLOW)
294
+ return
295
+
296
+ if self.dry_run:
297
+ print_colored("DRY RUN MODE - No files will be modified", Colors.YELLOW)
298
+ print()
299
+
300
+ for file_path in adoc_files:
301
+ self.files_processed += 1
302
+ conversions = self.process_file(file_path)
303
+
304
+ if conversions > 0:
305
+ self.files_modified += 1
306
+ self.tables_converted += conversions
307
+
308
+ # Print summary
309
+ print()
310
+ print(f"Processed {self.files_processed} file(s)")
311
+ print(f"Tables converted: {self.tables_converted}")
312
+ print(f"Files {'would be ' if self.dry_run else ''}modified: {self.files_modified}")
313
+
314
+ if self.dry_run and self.files_modified > 0:
315
+ print()
316
+ print_colored("DRY RUN - No files were modified. Use --apply to apply changes.", Colors.YELLOW)
317
+
318
+
319
+ def parse_columns(columns_str: str) -> Tuple[int, int]:
320
+ """
321
+ Parse a columns specification like "1,3" into a tuple.
322
+
323
+ Args:
324
+ columns_str: String like "1,3" specifying term and definition columns
325
+
326
+ Returns:
327
+ Tuple of (term_column, definition_column) as 1-indexed integers
328
+
329
+ Raises:
330
+ argparse.ArgumentTypeError: If the format is invalid
331
+ """
332
+ try:
333
+ parts = columns_str.split(',')
334
+ if len(parts) != 2:
335
+ raise ValueError("Expected exactly two column numbers")
336
+ term_col = int(parts[0].strip())
337
+ def_col = int(parts[1].strip())
338
+ if term_col < 1 or def_col < 1:
339
+ raise ValueError("Column numbers must be 1 or greater")
340
+ if term_col == def_col:
341
+ raise ValueError("Term and definition columns must be different")
342
+ return (term_col, def_col)
343
+ except ValueError as e:
344
+ raise argparse.ArgumentTypeError(
345
+ f"Invalid columns format '{columns_str}': {e}. "
346
+ "Use format like '1,2' or '1,3' (1-indexed column numbers)"
347
+ )
348
+
349
+
350
+ def main() -> int:
351
+ """Main entry point for the CLI."""
352
+ parser = argparse.ArgumentParser(
353
+ description='Convert AsciiDoc tables to definition lists.',
354
+ formatter_class=argparse.RawDescriptionHelpFormatter,
355
+ epilog="""
356
+ Examples:
357
+ # Preview changes (default dry-run mode)
358
+ convert-tables-to-deflists .
359
+
360
+ # Apply changes to all .adoc files
361
+ convert-tables-to-deflists --apply .
362
+
363
+ # Process a single file
364
+ convert-tables-to-deflists --apply path/to/file.adoc
365
+
366
+ # For 3-column tables, use columns 1 and 3
367
+ convert-tables-to-deflists --columns 1,3 .
368
+
369
+ # Skip tables that have header rows
370
+ convert-tables-to-deflists --skip-header-tables .
371
+
372
+ Notes:
373
+ - By default, only 2-column tables are converted
374
+ - Callout tables are automatically skipped (use convert-callouts-to-deflist)
375
+ - Use --columns to specify which columns to use for multi-column tables
376
+ - The first specified column becomes the term, the second becomes the definition
377
+ """
378
+ )
379
+
380
+ parser.add_argument(
381
+ '--version',
382
+ action='version',
383
+ version=f'%(prog)s {__version__}'
384
+ )
385
+
386
+ parser.add_argument(
387
+ 'path',
388
+ nargs='?',
389
+ default='.',
390
+ help='File or directory to process (default: current directory)'
391
+ )
392
+
393
+ parser.add_argument(
394
+ '--apply',
395
+ action='store_true',
396
+ help='Apply changes (default is dry-run mode)'
397
+ )
398
+
399
+ parser.add_argument(
400
+ '-v', '--verbose',
401
+ action='store_true',
402
+ help='Show detailed output'
403
+ )
404
+
405
+ parser.add_argument(
406
+ '--columns',
407
+ type=parse_columns,
408
+ metavar='TERM,DEF',
409
+ help='Column numbers to use as term and definition (1-indexed, e.g., "1,3")'
410
+ )
411
+
412
+ parser.add_argument(
413
+ '--skip-header-tables',
414
+ action='store_true',
415
+ help='Skip tables that have header rows'
416
+ )
417
+
418
+ parser.add_argument(
419
+ '--include-callout-tables',
420
+ action='store_true',
421
+ help='Include callout tables (normally skipped)'
422
+ )
423
+
424
+ parser.add_argument(
425
+ '--exclude-dir',
426
+ action='append',
427
+ default=[],
428
+ metavar='DIR',
429
+ help='Directory pattern to exclude (can be specified multiple times)'
430
+ )
431
+
432
+ parser.add_argument(
433
+ '--exclude-file',
434
+ action='append',
435
+ default=[],
436
+ metavar='FILE',
437
+ help='File pattern to exclude (can be specified multiple times)'
438
+ )
439
+
440
+ parser.add_argument(
441
+ '--exclude-list',
442
+ type=Path,
443
+ metavar='FILE',
444
+ help='Path to file containing exclusion patterns (one per line)'
445
+ )
446
+
447
+ args = parser.parse_args()
448
+
449
+ # Parse exclusion list if provided
450
+ exclude_dirs = list(args.exclude_dir)
451
+ exclude_files = list(args.exclude_file)
452
+
453
+ if args.exclude_list:
454
+ list_dirs, list_files = parse_exclude_list_file(args.exclude_list)
455
+ exclude_dirs.extend(list_dirs)
456
+ exclude_files.extend(list_files)
457
+
458
+ # Create converter
459
+ converter = TableToDeflistConverter(
460
+ dry_run=not args.apply,
461
+ verbose=args.verbose,
462
+ columns=args.columns,
463
+ skip_header_tables=args.skip_header_tables,
464
+ skip_callout_tables=not args.include_callout_tables
465
+ )
466
+
467
+ # Process files
468
+ path = Path(args.path)
469
+ if not path.exists():
470
+ print_colored(f"Error: Path does not exist: {path}", Colors.RED)
471
+ return 1
472
+
473
+ converter.process_path(path, exclude_dirs, exclude_files)
474
+
475
+ return 0
476
+
477
+
478
+ if __name__ == '__main__':
479
+ sys.exit(main())
@@ -0,0 +1,211 @@
1
+ # doc_utils/missing_source_directive.py
2
+
3
+ """
4
+ Detects code blocks (----) that are missing [source] directive on the preceding line.
5
+
6
+ This module provides functionality to scan AsciiDoc files for code blocks that lack
7
+ proper source directives, which can cause issues with AsciiDoc-to-DocBook XML conversion.
8
+ """
9
+
10
+ import os
11
+ import re
12
+
13
+ def is_code_block_start(line):
14
+ """Check if line is a code block delimiter (4 or more dashes)"""
15
+ return re.match(r'^-{4,}$', line.strip())
16
+
17
+ def has_source_directive(line):
18
+ """Check if line contains [source] directive"""
19
+ # Match [source], [source,lang], [source, lang], etc.
20
+ return re.match(r'^\[source[\s,]', line.strip())
21
+
22
+ def is_empty_or_whitespace(line):
23
+ """Check if line is empty or contains only whitespace"""
24
+ return len(line.strip()) == 0
25
+
26
+ def scan_file(filepath):
27
+ """
28
+ Scan a single AsciiDoc file for missing [source] directives.
29
+
30
+ Args:
31
+ filepath: Path to the AsciiDoc file to scan
32
+
33
+ Returns:
34
+ List of issue dictionaries containing line_num, prev_line_num, and prev_line
35
+ """
36
+ issues = []
37
+
38
+ try:
39
+ with open(filepath, 'r', encoding='utf-8') as f:
40
+ lines = f.readlines()
41
+
42
+ in_code_block = False
43
+
44
+ for i, line in enumerate(lines, start=1):
45
+ # Check if current line is a code block delimiter
46
+ if is_code_block_start(line):
47
+ if not in_code_block:
48
+ # This is the START of a code block
49
+ prev_line_num = i - 1
50
+ prev_line = lines[prev_line_num - 1].rstrip() if prev_line_num > 0 else ""
51
+
52
+ # Check if [source] exists in previous lines (within last 3 lines)
53
+ # This handles cases where there's a title between [source] and ----
54
+ has_source_in_context = False
55
+ for lookback in range(1, min(4, i)):
56
+ check_line = lines[i - lookback - 1].strip()
57
+ if has_source_directive(check_line):
58
+ has_source_in_context = True
59
+ break
60
+ # Stop looking if we hit an empty line or structural element
61
+ if not check_line or check_line.startswith(('=', '----')):
62
+ break
63
+
64
+ # Only flag if:
65
+ # 1. No [source] directive in recent context
66
+ # 2. Previous line is not empty (which might be valid formatting)
67
+ if (not has_source_in_context and
68
+ not is_empty_or_whitespace(prev_line)):
69
+
70
+ # Additional heuristic: check if previous line looks like it should have [source]
71
+ # Skip if previous line is a title, comment, or other structural element
72
+ prev_stripped = prev_line.strip()
73
+
74
+ # Skip common valid patterns
75
+ if prev_stripped.startswith(('=', '//', 'NOTE:', 'TIP:', 'WARNING:', 'IMPORTANT:', 'CAUTION:')):
76
+ in_code_block = True
77
+ continue
78
+
79
+ # Skip if previous line is already an attribute block (but not [source])
80
+ if prev_stripped.startswith('[') and prev_stripped.endswith(']'):
81
+ # It's some other attribute like [id], [role], etc., might be intentional
82
+ in_code_block = True
83
+ continue
84
+
85
+ # Skip if previous line is just a plus sign (continuation)
86
+ if prev_stripped == '+':
87
+ in_code_block = True
88
+ continue
89
+
90
+ # Skip if previous line is a block title (starts with .)
91
+ if prev_stripped.startswith('.') and len(prev_stripped) > 1:
92
+ # This might be a title for a source block that's defined earlier
93
+ # Check if there's a [source] before the title
94
+ if i >= 3:
95
+ two_lines_back = lines[i - 3].strip()
96
+ if has_source_directive(two_lines_back):
97
+ in_code_block = True
98
+ continue
99
+
100
+ issues.append({
101
+ 'line_num': i,
102
+ 'prev_line_num': prev_line_num,
103
+ 'prev_line': prev_line[:80] # Truncate for display
104
+ })
105
+
106
+ in_code_block = True
107
+ else:
108
+ # This is the END of a code block
109
+ in_code_block = False
110
+
111
+ except Exception as e:
112
+ raise IOError(f"Error reading {filepath}: {e}")
113
+
114
+ return issues
115
+
116
+ def fix_file(filepath, issues):
117
+ """
118
+ Insert [source] directives for missing code blocks.
119
+
120
+ Args:
121
+ filepath: Path to the AsciiDoc file to fix
122
+ issues: List of issue dictionaries from scan_file()
123
+
124
+ Returns:
125
+ True if successful, False otherwise
126
+ """
127
+ try:
128
+ with open(filepath, 'r', encoding='utf-8') as f:
129
+ lines = f.readlines()
130
+
131
+ # Sort issues by line number in reverse order so we can insert from bottom to top
132
+ # This prevents line number shifts from affecting subsequent insertions
133
+ sorted_issues = sorted(issues, key=lambda x: x['line_num'], reverse=True)
134
+
135
+ for issue in sorted_issues:
136
+ line_num = issue['line_num']
137
+ # Insert [source] directive before the ---- line (at line_num - 1, which is index line_num - 1)
138
+ insert_index = line_num - 1
139
+ lines.insert(insert_index, '[source]\n')
140
+
141
+ # Write the modified content back to the file
142
+ with open(filepath, 'w', encoding='utf-8') as f:
143
+ f.writelines(lines)
144
+
145
+ return True
146
+
147
+ except Exception as e:
148
+ raise IOError(f"Error fixing {filepath}: {e}")
149
+
150
+ def find_missing_source_directives(scan_dir='.', auto_fix=False):
151
+ """
152
+ Scan directory for AsciiDoc files with missing [source] directives.
153
+
154
+ Args:
155
+ scan_dir: Directory to scan (default: current directory)
156
+ auto_fix: If True, automatically insert [source] directives
157
+
158
+ Returns:
159
+ Dictionary with statistics:
160
+ - total_issues: Total number of issues found
161
+ - files_with_issues: Number of files with issues
162
+ - files_fixed: Number of files successfully fixed (if auto_fix=True)
163
+ - file_details: List of dictionaries with file paths and their issues
164
+ """
165
+ if not os.path.isdir(scan_dir):
166
+ raise ValueError(f"Directory '{scan_dir}' does not exist")
167
+
168
+ total_issues = 0
169
+ files_with_issues = 0
170
+ files_fixed = 0
171
+ file_details = []
172
+
173
+ # Find all .adoc files (excluding symbolic links)
174
+ adoc_files = []
175
+ for root, dirs, files in os.walk(scan_dir):
176
+ for filename in files:
177
+ if filename.endswith('.adoc'):
178
+ filepath = os.path.join(root, filename)
179
+ # Skip symbolic links
180
+ if not os.path.islink(filepath):
181
+ adoc_files.append(filepath)
182
+
183
+ for filepath in sorted(adoc_files):
184
+ issues = scan_file(filepath)
185
+
186
+ if issues:
187
+ files_with_issues += 1
188
+ total_issues += len(issues)
189
+
190
+ file_info = {
191
+ 'filepath': filepath,
192
+ 'issues': issues,
193
+ 'fixed': False
194
+ }
195
+
196
+ if auto_fix:
197
+ try:
198
+ if fix_file(filepath, issues):
199
+ files_fixed += 1
200
+ file_info['fixed'] = True
201
+ except Exception as e:
202
+ file_info['error'] = str(e)
203
+
204
+ file_details.append(file_info)
205
+
206
+ return {
207
+ 'total_issues': total_issues,
208
+ 'files_with_issues': files_with_issues,
209
+ 'files_fixed': files_fixed,
210
+ 'file_details': file_details
211
+ }
doc_utils/unused_adoc.py CHANGED
@@ -60,10 +60,10 @@ def find_scan_directories(base_path='.', exclude_dirs=None):
60
60
 
61
61
  return scan_dirs
62
62
 
63
- def find_unused_adoc(scan_dirs=None, archive_dir='./archive', archive=False, exclude_dirs=None, exclude_files=None):
63
+ def find_unused_adoc(scan_dirs=None, archive_dir='./archive', archive=False, exclude_dirs=None, exclude_files=None, include_commented=False):
64
64
  # Print safety warning
65
65
  print("\n⚠️ SAFETY: Work in a git branch! Run without --archive first to preview.\n")
66
-
66
+
67
67
  # If no scan_dirs provided, auto-discover them
68
68
  if not scan_dirs:
69
69
  scan_dirs = find_scan_directories(exclude_dirs=exclude_dirs)
@@ -75,46 +75,107 @@ def find_unused_adoc(scan_dirs=None, archive_dir='./archive', archive=False, exc
75
75
  print("No 'modules' or 'assemblies' directories found containing .adoc files.")
76
76
  print("Please run this tool from your documentation repository root.")
77
77
  return
78
-
78
+
79
79
  # Detect repository type
80
80
  repo_type = detect_repo_type()
81
81
  print(f"Detected repository type: {repo_type}")
82
-
82
+
83
83
  # Collect all .adoc files in scan directories
84
84
  asciidoc_files = collect_files(scan_dirs, {'.adoc'}, exclude_dirs, exclude_files)
85
-
86
- # Track which files are referenced
87
- referenced_files = set()
88
-
85
+
86
+ # Track which files are referenced (uncommented and commented separately)
87
+ referenced_files = set() # Files in uncommented includes
88
+ commented_only_files = {} # Files referenced ONLY in commented lines: {basename: [(file, line_num, line_text)]}
89
+
89
90
  if repo_type == 'topic_map':
90
91
  # For OpenShift-docs style repos, get references from topic maps
91
92
  topic_references = get_all_topic_map_references()
92
93
  # Convert to basenames for comparison
93
94
  referenced_files.update(os.path.basename(ref) for ref in topic_references)
94
-
95
- # Always scan for include:: directives in all .adoc files
95
+
96
+ # Patterns for finding includes (both commented and uncommented)
96
97
  include_pattern = re.compile(r'include::(.+?)\[')
98
+ commented_include_pattern = re.compile(r'^\s*//.*include::(.+?)\[')
99
+
97
100
  adoc_files = collect_files(['.'], {'.adoc'}, exclude_dirs, exclude_files)
98
-
101
+
99
102
  for file_path in adoc_files:
100
103
  try:
101
104
  with open(file_path, 'r', encoding='utf-8') as f:
102
- content = f.read()
103
- includes = include_pattern.findall(content)
104
- # Extract just the filename from the include path
105
- for include in includes:
106
- # Handle both relative and absolute includes
107
- include_basename = os.path.basename(include)
108
- referenced_files.add(include_basename)
105
+ lines = f.readlines()
106
+
107
+ for line_num, line in enumerate(lines, 1):
108
+ # Check if this is a commented include
109
+ commented_match = commented_include_pattern.search(line)
110
+ if commented_match:
111
+ include_basename = os.path.basename(commented_match.group(1))
112
+ # Track location of commented reference
113
+ if include_basename not in commented_only_files:
114
+ commented_only_files[include_basename] = []
115
+ commented_only_files[include_basename].append((file_path, line_num, line.strip()))
116
+ else:
117
+ # Check for uncommented includes
118
+ uncommented_match = include_pattern.search(line)
119
+ if uncommented_match:
120
+ include_basename = os.path.basename(uncommented_match.group(1))
121
+ referenced_files.add(include_basename)
122
+ # If we found an uncommented reference, remove from commented_only tracking
123
+ if include_basename in commented_only_files:
124
+ del commented_only_files[include_basename]
109
125
  except Exception as e:
110
126
  print(f"Warning: could not read {file_path}: {e}")
111
-
112
- # Find unused files by comparing basenames
113
- unused_files = [f for f in asciidoc_files if os.path.basename(f) not in referenced_files]
127
+
128
+ # Determine which files are unused based on the include_commented flag
129
+ if include_commented:
130
+ # When --commented is used: treat files with commented-only references as unused
131
+ # Only files with uncommented references are considered "used"
132
+ unused_files = [f for f in asciidoc_files if os.path.basename(f) not in referenced_files]
133
+ commented_only_unused = []
134
+ else:
135
+ # Default behavior: files referenced only in commented lines are considered "used"
136
+ # They should NOT be in the unused list, but we track them for reporting
137
+ all_referenced = referenced_files.union(set(commented_only_files.keys()))
138
+ unused_files = [f for f in asciidoc_files if os.path.basename(f) not in all_referenced]
139
+
140
+ # Generate list of files referenced only in comments for the report
141
+ commented_only_unused = []
142
+ for basename, references in commented_only_files.items():
143
+ # Find the full path for this basename in asciidoc_files
144
+ matching_files = [f for f in asciidoc_files if os.path.basename(f) == basename]
145
+ for f in matching_files:
146
+ commented_only_unused.append((f, references))
147
+
114
148
  unused_files = list(dict.fromkeys(unused_files)) # Remove duplicates
115
-
149
+
150
+ # Print summary
116
151
  print(f"Found {len(unused_files)} unused files out of {len(asciidoc_files)} total files in scan directories")
117
-
152
+
153
+ # Generate detailed report for commented-only references
154
+ if commented_only_unused and not include_commented:
155
+ report_path = os.path.join(archive_dir, 'commented-references-report.txt')
156
+ os.makedirs(archive_dir, exist_ok=True)
157
+
158
+ with open(report_path, 'w', encoding='utf-8') as report:
159
+ report.write("Files Referenced Only in Commented Lines\n")
160
+ report.write("=" * 70 + "\n\n")
161
+ report.write(f"Found {len(commented_only_unused)} files that are referenced only in commented-out includes.\n")
162
+ report.write("These files are considered 'used' by default and will NOT be archived.\n\n")
163
+ report.write("To archive these files along with other unused files, use the --commented flag.\n\n")
164
+ report.write("-" * 70 + "\n\n")
165
+
166
+ for file_path, references in sorted(commented_only_unused):
167
+ report.write(f"File: {file_path}\n")
168
+ report.write(f"Referenced in {len(references)} commented line(s):\n")
169
+ for ref_file, line_num, line_text in references:
170
+ report.write(f" {ref_file}:{line_num}\n")
171
+ report.write(f" {line_text}\n")
172
+ report.write("\n")
173
+
174
+ print(f"\n📋 Found {len(commented_only_unused)} files referenced only in commented lines.")
175
+ print(f" Detailed report saved to: {report_path}")
176
+ print(f" These files are considered 'used' and will NOT be archived by default.")
177
+ print(f" To include them in the archive operation, use the --commented flag.\n")
178
+
118
179
  return write_manifest_and_archive(
119
180
  unused_files, archive_dir, 'to-archive', 'to-archive', archive=archive
120
181
  )
@@ -6,26 +6,98 @@ from .file_utils import collect_files, write_manifest_and_archive
6
6
 
7
7
  IMAGE_EXTENSIONS = {'.png', '.jpg', '.jpeg', '.gif', '.svg'}
8
8
 
9
- def find_unused_images(scan_dirs, archive_dir, archive=False, exclude_dirs=None, exclude_files=None):
9
+ def find_unused_images(scan_dirs, archive_dir, archive=False, exclude_dirs=None, exclude_files=None, include_commented=False):
10
10
  # Print safety warning
11
11
  print("\n⚠️ SAFETY: Work in a git branch! Run without --archive first to preview.\n")
12
-
12
+
13
13
  image_files = collect_files(scan_dirs, IMAGE_EXTENSIONS, exclude_dirs, exclude_files)
14
14
  adoc_files = collect_files(['.'], {'.adoc'}, exclude_dirs, exclude_files)
15
- referenced_images = set()
15
+
16
+ # Track which images are referenced (uncommented and commented separately)
17
+ referenced_images = set() # Images in uncommented references
18
+ commented_only_images = {} # Images referenced ONLY in commented lines: {basename: [(file, line_num, line_text)]}
19
+
20
+ # Patterns for finding image references (both commented and uncommented)
16
21
  image_ref_pattern = re.compile(r'(?i)image::([^\[]+)[\[]|image:([^\[]+)[\[]|"([^"\s]+\.(?:png|jpg|jpeg|gif|svg))"')
22
+ commented_line_pattern = re.compile(r'^\s*//')
23
+
17
24
  for adoc_file in adoc_files:
18
25
  try:
19
26
  with open(adoc_file, 'r', encoding='utf-8') as f:
20
- content = f.read()
21
- for match in image_ref_pattern.findall(content):
22
- for group in match:
23
- if group:
24
- referenced_images.add(os.path.basename(group))
27
+ lines = f.readlines()
28
+
29
+ for line_num, line in enumerate(lines, 1):
30
+ # Check if this line is commented
31
+ is_commented = commented_line_pattern.match(line)
32
+
33
+ # Find all image references in the line
34
+ for match in image_ref_pattern.findall(line):
35
+ for group in match:
36
+ if group:
37
+ image_basename = os.path.basename(group)
38
+
39
+ if is_commented:
40
+ # Track location of commented reference
41
+ if image_basename not in commented_only_images:
42
+ commented_only_images[image_basename] = []
43
+ commented_only_images[image_basename].append((adoc_file, line_num, line.strip()))
44
+ else:
45
+ # Add to uncommented references
46
+ referenced_images.add(image_basename)
47
+ # If we found an uncommented reference, remove from commented_only tracking
48
+ if image_basename in commented_only_images:
49
+ del commented_only_images[image_basename]
25
50
  except Exception as e:
26
51
  print(f"Warning: could not read {adoc_file}: {e}")
27
- unused_images = [f for f in image_files if os.path.basename(f) not in referenced_images]
52
+
53
+ # Determine which images are unused based on the include_commented flag
54
+ if include_commented:
55
+ # When --commented is used: treat images with commented-only references as unused
56
+ # Only images with uncommented references are considered "used"
57
+ unused_images = [f for f in image_files if os.path.basename(f) not in referenced_images]
58
+ commented_only_unused = []
59
+ else:
60
+ # Default behavior: images referenced only in commented lines are considered "used"
61
+ # They should NOT be in the unused list, but we track them for reporting
62
+ all_referenced = referenced_images.union(set(commented_only_images.keys()))
63
+ unused_images = [f for f in image_files if os.path.basename(f) not in all_referenced]
64
+
65
+ # Generate list of images referenced only in comments for the report
66
+ commented_only_unused = []
67
+ for basename, references in commented_only_images.items():
68
+ # Find the full path for this basename in image_files
69
+ matching_files = [f for f in image_files if os.path.basename(f) == basename]
70
+ for f in matching_files:
71
+ commented_only_unused.append((f, references))
72
+
28
73
  unused_images = list(dict.fromkeys(unused_images))
74
+
75
+ # Generate detailed report for commented-only references
76
+ if commented_only_unused and not include_commented:
77
+ report_path = os.path.join(archive_dir, 'commented-image-references-report.txt')
78
+ os.makedirs(archive_dir, exist_ok=True)
79
+
80
+ with open(report_path, 'w', encoding='utf-8') as report:
81
+ report.write("Images Referenced Only in Commented Lines\n")
82
+ report.write("=" * 70 + "\n\n")
83
+ report.write(f"Found {len(commented_only_unused)} images that are referenced only in commented-out lines.\n")
84
+ report.write("These images are considered 'used' by default and will NOT be archived.\n\n")
85
+ report.write("To archive these images along with other unused images, use the --commented flag.\n\n")
86
+ report.write("-" * 70 + "\n\n")
87
+
88
+ for file_path, references in sorted(commented_only_unused):
89
+ report.write(f"Image: {file_path}\n")
90
+ report.write(f"Referenced in {len(references)} commented line(s):\n")
91
+ for ref_file, line_num, line_text in references:
92
+ report.write(f" {ref_file}:{line_num}\n")
93
+ report.write(f" {line_text}\n")
94
+ report.write("\n")
95
+
96
+ print(f"\n📋 Found {len(commented_only_unused)} images referenced only in commented lines.")
97
+ print(f" Detailed report saved to: {report_path}")
98
+ print(f" These images are considered 'used' and will NOT be archived by default.")
99
+ print(f" To include them in the archive operation, use the --commented flag.\n")
100
+
29
101
  return write_manifest_and_archive(
30
102
  unused_images, archive_dir, 'unused-images', 'unused-images', archive=archive
31
103
  )
doc_utils/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  """Version information for doc-utils."""
2
2
 
3
3
  # This should match the version in pyproject.toml
4
- __version__ = "0.1.34"
4
+ __version__ = "0.1.37"
5
5
 
6
6
  def get_version():
7
7
  """Return the current version string."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rolfedh-doc-utils
3
- Version: 0.1.34
3
+ Version: 0.1.37
4
4
  Summary: CLI tools for AsciiDoc documentation projects
5
5
  Author: Rolfe Dlugy-Hegwer
6
6
  License: MIT License
@@ -1,8 +1,10 @@
1
- archive_unused_files.py,sha256=OJZrkqn70hiOXED218jMYPFNFWnsDpjsCYOmBRxYnHU,2274
2
- archive_unused_images.py,sha256=fZeyEZtTd72Gbd3YBXTy5xoshAAM9qb4qFPMjhHL1Fg,1864
1
+ archive_unused_files.py,sha256=YKYPtuBHEZcsyQSwSYxSYvw9v9Mh6Of8MqT53A5bM44,2438
2
+ archive_unused_images.py,sha256=EvPhMIwp6_AHKtuNYQ663q6biXBeXaqf88NzWrhvtIE,2029
3
3
  check_scannability.py,sha256=O6ROr-e624jVPvPpASpsWo0gTfuCFpA2mTSX61BjAEI,5478
4
+ check_source_directives.py,sha256=JiIvn_ph9VKPMH4zg-aSsuIGQZcnI_imj7rZLLE04L8,3660
4
5
  convert_callouts_interactive.py,sha256=4PjiVIOWxNJiJLQuBHT3x6rE46-hgfFHSaoo5quYIs8,22889
5
6
  convert_callouts_to_deflist.py,sha256=BoqW5_GkQ-KqNzn4vmE6lsQosrPV0lkB-bfAx3dzyMw,25886
7
+ convert_tables_to_deflists.py,sha256=PIP6xummuMqC3aSzahKKRBYahes_j5ZpHp_-k6BjurY,15599
6
8
  doc_utils_cli.py,sha256=J3CE7cTDDCRGkhAknYejNWHhk5t9YFGt27WDVfR98Xk,5111
7
9
  extract_link_attributes.py,sha256=wR2SmR2la-jR6DzDbas2PoNONgRZ4dZ6aqwzkwEv8Gs,3516
8
10
  find_unused_attributes.py,sha256=77CxFdm72wj6SO81w-auMdDjnvF83jWy_qaM7DsAtBw,4263
@@ -19,20 +21,21 @@ doc_utils/__init__.py,sha256=qqZR3lohzkP63soymrEZPBGzzk6-nFzi4_tSffjmu_0,74
19
21
  doc_utils/extract_link_attributes.py,sha256=U0EvPZReJQigNfbT-icBsVT6Li64hYki5W7MQz6qqbc,22743
20
22
  doc_utils/file_utils.py,sha256=fpTh3xx759sF8sNocdn_arsP3KAv8XA6cTQTAVIZiZg,4247
21
23
  doc_utils/format_asciidoc_spacing.py,sha256=RL2WU_dG_UfGL01LnevcyJfKsvYy_ogNyeoVX-Fyqks,13579
24
+ doc_utils/missing_source_directive.py,sha256=X3Acn0QJTk6XjmBXhGus5JAjlIitCiicCRE3fslifyw,8048
22
25
  doc_utils/replace_link_attributes.py,sha256=gmAs68_njBqEz-Qni-UGgeYEDTMxlTWk_IOm76FONNE,7279
23
26
  doc_utils/scannability.py,sha256=XwlmHqDs69p_V36X7DLjPTy0DUoLszSGqYjJ9wE-3hg,982
24
27
  doc_utils/spinner.py,sha256=lJg15qzODiKoR0G6uFIk2BdVNgn9jFexoTRUMrjiWvk,3554
25
28
  doc_utils/topic_map_parser.py,sha256=tKcIO1m9r2K6dvPRGue58zqMr0O2zKU1gnZMzEE3U6o,4571
26
- doc_utils/unused_adoc.py,sha256=2cbqcYr1os2EhETUU928BlPRlsZVSdI00qaMhqjSIqQ,5263
29
+ doc_utils/unused_adoc.py,sha256=LPQWPGEOizXECxepk7E_5cjTVvKn6RXQYTWG97Ps5VQ,9077
27
30
  doc_utils/unused_attributes.py,sha256=OHyAdaBD7aNo357B0SLBN5NC_jNY5TWXMwgtfJNh3X8,7621
28
- doc_utils/unused_images.py,sha256=nqn36Bbrmon2KlGlcaruNjJJvTQ8_9H0WU9GvCW7rW8,1456
31
+ doc_utils/unused_images.py,sha256=hL8Qrik9QCkVh54eBLuNczRS9tMnsqIEfavNamM1UeQ,5664
29
32
  doc_utils/validate_links.py,sha256=iBGXnwdeLlgIT3fo3v01ApT5k0X2FtctsvkrE6E3VMk,19610
30
- doc_utils/version.py,sha256=LpXe7kXo5uNMJOga179IYdU101aWLSTOnciZkUlrK0E,203
33
+ doc_utils/version.py,sha256=zVnktTYITGhLqPNoyXbSnWi5bQassZ3M9S4LgDCGD-E,203
31
34
  doc_utils/version_check.py,sha256=-31Y6AN0KGi_CUCAVOOhf6bPO3r7SQIXPxxeffLAF0w,7535
32
35
  doc_utils/warnings_report.py,sha256=20yfwqBjOprfFhQwCujbcsvjJCbHHhmH84uAujm-y-o,8877
33
- rolfedh_doc_utils-0.1.34.dist-info/licenses/LICENSE,sha256=vLxtwMVOJA_hEy8b77niTkdmQI9kNJskXHq0dBS36e0,1075
34
- rolfedh_doc_utils-0.1.34.dist-info/METADATA,sha256=uDcruRVK6RPRkZtBtM5DsH9FZ5q9LXEf8hEqOsg3mig,8325
35
- rolfedh_doc_utils-0.1.34.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
36
- rolfedh_doc_utils-0.1.34.dist-info/entry_points.txt,sha256=vL_LlLKOiurRzchrq8iRUQG19Xi9lSAFVZGjO-xyErk,577
37
- rolfedh_doc_utils-0.1.34.dist-info/top_level.txt,sha256=J4xtr3zoyCip27b3GnticFVZoyz5HHtgGqHQ-SZONCA,265
38
- rolfedh_doc_utils-0.1.34.dist-info/RECORD,,
36
+ rolfedh_doc_utils-0.1.37.dist-info/licenses/LICENSE,sha256=vLxtwMVOJA_hEy8b77niTkdmQI9kNJskXHq0dBS36e0,1075
37
+ rolfedh_doc_utils-0.1.37.dist-info/METADATA,sha256=RZ3wEHeIdcjF52LN0DQ7LbrANY_2WlxK96cSJIP1cAg,8325
38
+ rolfedh_doc_utils-0.1.37.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
39
+ rolfedh_doc_utils-0.1.37.dist-info/entry_points.txt,sha256=pICKzbXMRsD5l_9RK4n7WYltHj4cqW4FXu6fv_EuVyE,693
40
+ rolfedh_doc_utils-0.1.37.dist-info/top_level.txt,sha256=ii_0OmWdCjgCBV1RX6LY63jdH4SOEL0aYtfTMsRGAtU,316
41
+ rolfedh_doc_utils-0.1.37.dist-info/RECORD,,
@@ -2,8 +2,10 @@
2
2
  archive-unused-files = archive_unused_files:main
3
3
  archive-unused-images = archive_unused_images:main
4
4
  check-scannability = check_scannability:main
5
+ check-source-directives = check_source_directives:main
5
6
  convert-callouts-interactive = convert_callouts_interactive:main
6
7
  convert-callouts-to-deflist = convert_callouts_to_deflist:main
8
+ convert-tables-to-deflists = convert_tables_to_deflists:main
7
9
  doc-utils = doc_utils_cli:main
8
10
  extract-link-attributes = extract_link_attributes:main
9
11
  find-unused-attributes = find_unused_attributes:main
@@ -2,8 +2,10 @@ archive_unused_files
2
2
  archive_unused_images
3
3
  callout_lib
4
4
  check_scannability
5
+ check_source_directives
5
6
  convert_callouts_interactive
6
7
  convert_callouts_to_deflist
8
+ convert_tables_to_deflists
7
9
  doc_utils
8
10
  doc_utils_cli
9
11
  extract_link_attributes