lintro 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lintro might be problematic. Click here for more details.

Files changed (85) hide show
  1. lintro/__init__.py +3 -0
  2. lintro/__main__.py +6 -0
  3. lintro/ascii-art/fail.txt +404 -0
  4. lintro/ascii-art/success.txt +484 -0
  5. lintro/cli.py +70 -0
  6. lintro/cli_utils/__init__.py +7 -0
  7. lintro/cli_utils/commands/__init__.py +7 -0
  8. lintro/cli_utils/commands/check.py +210 -0
  9. lintro/cli_utils/commands/format.py +167 -0
  10. lintro/cli_utils/commands/list_tools.py +114 -0
  11. lintro/enums/__init__.py +0 -0
  12. lintro/enums/action.py +29 -0
  13. lintro/enums/darglint_strictness.py +22 -0
  14. lintro/enums/group_by.py +31 -0
  15. lintro/enums/hadolint_enums.py +46 -0
  16. lintro/enums/output_format.py +40 -0
  17. lintro/enums/tool_name.py +36 -0
  18. lintro/enums/tool_type.py +27 -0
  19. lintro/enums/yamllint_format.py +22 -0
  20. lintro/exceptions/__init__.py +0 -0
  21. lintro/exceptions/errors.py +15 -0
  22. lintro/formatters/__init__.py +0 -0
  23. lintro/formatters/core/__init__.py +0 -0
  24. lintro/formatters/core/output_style.py +21 -0
  25. lintro/formatters/core/table_descriptor.py +24 -0
  26. lintro/formatters/styles/__init__.py +17 -0
  27. lintro/formatters/styles/csv.py +41 -0
  28. lintro/formatters/styles/grid.py +91 -0
  29. lintro/formatters/styles/html.py +48 -0
  30. lintro/formatters/styles/json.py +61 -0
  31. lintro/formatters/styles/markdown.py +41 -0
  32. lintro/formatters/styles/plain.py +39 -0
  33. lintro/formatters/tools/__init__.py +35 -0
  34. lintro/formatters/tools/darglint_formatter.py +72 -0
  35. lintro/formatters/tools/hadolint_formatter.py +84 -0
  36. lintro/formatters/tools/prettier_formatter.py +76 -0
  37. lintro/formatters/tools/ruff_formatter.py +116 -0
  38. lintro/formatters/tools/yamllint_formatter.py +87 -0
  39. lintro/models/__init__.py +0 -0
  40. lintro/models/core/__init__.py +0 -0
  41. lintro/models/core/tool.py +104 -0
  42. lintro/models/core/tool_config.py +23 -0
  43. lintro/models/core/tool_result.py +39 -0
  44. lintro/parsers/__init__.py +0 -0
  45. lintro/parsers/darglint/__init__.py +0 -0
  46. lintro/parsers/darglint/darglint_issue.py +9 -0
  47. lintro/parsers/darglint/darglint_parser.py +62 -0
  48. lintro/parsers/hadolint/__init__.py +1 -0
  49. lintro/parsers/hadolint/hadolint_issue.py +24 -0
  50. lintro/parsers/hadolint/hadolint_parser.py +65 -0
  51. lintro/parsers/prettier/__init__.py +0 -0
  52. lintro/parsers/prettier/prettier_issue.py +10 -0
  53. lintro/parsers/prettier/prettier_parser.py +60 -0
  54. lintro/parsers/ruff/__init__.py +1 -0
  55. lintro/parsers/ruff/ruff_issue.py +43 -0
  56. lintro/parsers/ruff/ruff_parser.py +89 -0
  57. lintro/parsers/yamllint/__init__.py +0 -0
  58. lintro/parsers/yamllint/yamllint_issue.py +24 -0
  59. lintro/parsers/yamllint/yamllint_parser.py +68 -0
  60. lintro/tools/__init__.py +40 -0
  61. lintro/tools/core/__init__.py +0 -0
  62. lintro/tools/core/tool_base.py +320 -0
  63. lintro/tools/core/tool_manager.py +167 -0
  64. lintro/tools/implementations/__init__.py +0 -0
  65. lintro/tools/implementations/tool_darglint.py +245 -0
  66. lintro/tools/implementations/tool_hadolint.py +302 -0
  67. lintro/tools/implementations/tool_prettier.py +270 -0
  68. lintro/tools/implementations/tool_ruff.py +618 -0
  69. lintro/tools/implementations/tool_yamllint.py +240 -0
  70. lintro/tools/tool_enum.py +17 -0
  71. lintro/utils/__init__.py +0 -0
  72. lintro/utils/ascii_normalize_cli.py +84 -0
  73. lintro/utils/config.py +39 -0
  74. lintro/utils/console_logger.py +783 -0
  75. lintro/utils/formatting.py +173 -0
  76. lintro/utils/output_manager.py +301 -0
  77. lintro/utils/path_utils.py +41 -0
  78. lintro/utils/tool_executor.py +443 -0
  79. lintro/utils/tool_utils.py +431 -0
  80. lintro-0.3.2.dist-info/METADATA +338 -0
  81. lintro-0.3.2.dist-info/RECORD +85 -0
  82. lintro-0.3.2.dist-info/WHEEL +5 -0
  83. lintro-0.3.2.dist-info/entry_points.txt +2 -0
  84. lintro-0.3.2.dist-info/licenses/LICENSE +21 -0
  85. lintro-0.3.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,431 @@
1
+ """Tool utilities for handling core operations."""
2
+
3
+ import fnmatch
4
+ import os
5
+ from collections.abc import Callable
6
+
7
+ try:
8
+ from tabulate import tabulate
9
+
10
+ TABULATE_AVAILABLE = True
11
+ except ImportError:
12
+ TABULATE_AVAILABLE = False
13
+
14
+ from lintro.formatters.tools.darglint_formatter import (
15
+ DarglintTableDescriptor,
16
+ format_darglint_issues,
17
+ )
18
+ from lintro.formatters.tools.hadolint_formatter import (
19
+ HadolintTableDescriptor,
20
+ format_hadolint_issues,
21
+ )
22
+ from lintro.formatters.tools.prettier_formatter import (
23
+ PrettierTableDescriptor,
24
+ format_prettier_issues,
25
+ )
26
+ from lintro.formatters.tools.ruff_formatter import (
27
+ RuffTableDescriptor,
28
+ format_ruff_issues,
29
+ )
30
+ from lintro.formatters.tools.yamllint_formatter import (
31
+ YamllintTableDescriptor,
32
+ format_yamllint_issues,
33
+ )
34
+ from lintro.parsers.darglint.darglint_parser import parse_darglint_output
35
+ from lintro.parsers.hadolint.hadolint_parser import parse_hadolint_output
36
+ from lintro.parsers.prettier.prettier_issue import PrettierIssue
37
+ from lintro.parsers.prettier.prettier_parser import parse_prettier_output
38
+ from lintro.parsers.ruff.ruff_issue import RuffFormatIssue, RuffIssue
39
+ from lintro.parsers.ruff.ruff_parser import parse_ruff_output
40
+ from lintro.parsers.yamllint.yamllint_parser import parse_yamllint_output
41
+
42
+ # Constants
43
+ TOOL_TABLE_FORMATTERS: dict[str, tuple] = {
44
+ "darglint": (DarglintTableDescriptor(), format_darglint_issues),
45
+ "hadolint": (HadolintTableDescriptor(), format_hadolint_issues),
46
+ "prettier": (PrettierTableDescriptor(), format_prettier_issues),
47
+ "ruff": (RuffTableDescriptor(), format_ruff_issues),
48
+ "yamllint": (YamllintTableDescriptor(), format_yamllint_issues),
49
+ }
50
+ VENV_PATTERNS: list[str] = [
51
+ "venv",
52
+ "env",
53
+ "ENV",
54
+ ".venv",
55
+ ".env",
56
+ "virtualenv",
57
+ "virtual_env",
58
+ "virtualenvs",
59
+ "site-packages",
60
+ "node_modules",
61
+ ]
62
+
63
+
64
+ def parse_tool_list(tools_str: str | None) -> list[str]:
65
+ """Parse a comma-separated list of core names into ToolEnum members.
66
+
67
+ Args:
68
+ tools_str: str | None: Comma-separated string of tool names, or None.
69
+
70
+ Returns:
71
+ list: List of ToolEnum members parsed from the input string.
72
+
73
+ Raises:
74
+ ValueError: If an invalid tool name is provided.
75
+ """
76
+ if not tools_str:
77
+ return []
78
+ # Import ToolEnum here to avoid circular import at module level
79
+ from lintro.tools.tool_enum import ToolEnum
80
+
81
+ result: list = []
82
+ for t in tools_str.split(","):
83
+ t = t.strip()
84
+ if not t:
85
+ continue
86
+ try:
87
+ result.append(ToolEnum[t.upper()])
88
+ except KeyError:
89
+ raise ValueError(f"Unknown core: {t}")
90
+ return result
91
+
92
+
93
+ def parse_tool_options(tool_options_str: str | None) -> dict:
94
+ """Parse tool-specific options.
95
+
96
+ Args:
97
+ tool_options_str: str | None: Comma-separated string of tool-specific
98
+ options, or None.
99
+
100
+ Returns:
101
+ dict: Dictionary of parsed tool options.
102
+ """
103
+ if not tool_options_str:
104
+ return {}
105
+
106
+ options: dict = {}
107
+ for opt in tool_options_str.split(","):
108
+ if ":" in opt:
109
+ tool_name, tool_opt = opt.split(":", 1)
110
+ if "=" in tool_opt:
111
+ opt_name, opt_value = tool_opt.split("=", 1)
112
+ if tool_name not in options:
113
+ options[tool_name] = {}
114
+ options[tool_name][opt_name] = opt_value
115
+ return options
116
+
117
+
118
+ def should_exclude_path(
119
+ path: str,
120
+ exclude_patterns: list[str],
121
+ ) -> bool:
122
+ """Check if a path should be excluded based on patterns.
123
+
124
+ Args:
125
+ path: str: File path to check for exclusion.
126
+ exclude_patterns: list[str]: List of glob patterns to match against.
127
+
128
+ Returns:
129
+ bool: True if the path should be excluded, False otherwise.
130
+ """
131
+ if not exclude_patterns:
132
+ return False
133
+
134
+ # Normalize path separators for cross-platform compatibility
135
+ normalized_path: str = path.replace("\\", "/")
136
+
137
+ for pattern in exclude_patterns:
138
+ if fnmatch.fnmatch(normalized_path, pattern):
139
+ return True
140
+ # Also check if the pattern matches any part of the path
141
+ path_parts: list[str] = normalized_path.split("/")
142
+ for part in path_parts:
143
+ if fnmatch.fnmatch(part, pattern):
144
+ return True
145
+ return False
146
+
147
+
148
+ def get_table_columns(
149
+ issues: list[dict[str, str]],
150
+ tool_name: str,
151
+ group_by: str | None = None,
152
+ ) -> tuple[list[str], list[str]]:
153
+ """Get table columns and rows for a list of issues.
154
+
155
+ Args:
156
+ issues: list[dict[str, str]]: List of issue dictionaries.
157
+ tool_name: str: Name of the tool that generated the issues.
158
+ group_by: str | None: How to group the issues (file, code, none, auto).
159
+
160
+ Returns:
161
+ tuple: (columns, rows) where columns is a list of column names and rows
162
+ is a list of row data.
163
+ """
164
+ if not issues:
165
+ return [], []
166
+
167
+ # Canonical key-to-column mapping used when descriptor columns are known
168
+ key_mapping = {
169
+ "file": "File",
170
+ "line": "Line",
171
+ "column": "Column",
172
+ "code": "Code",
173
+ "message": "Message",
174
+ "fixable": "Fixable",
175
+ }
176
+
177
+ # Get the appropriate formatter for this tool
178
+ if tool_name in TOOL_TABLE_FORMATTERS:
179
+ descriptor, _ = TOOL_TABLE_FORMATTERS[tool_name]
180
+ expected_columns: list[str] = descriptor.get_columns()
181
+ # Use expected columns but map available keys
182
+ columns = expected_columns
183
+ else:
184
+ # Fallback: use all unique keys from the first issue
185
+ if issues:
186
+ columns = list(issues[0].keys())
187
+ else:
188
+ columns = []
189
+
190
+ # Convert issues to rows
191
+ rows: list[list[str]] = []
192
+ for issue in issues:
193
+ row: list[str] = []
194
+ for col in columns:
195
+ # Try to find the corresponding key in the issue dictionary
196
+ value = ""
197
+ for key, mapped_col in key_mapping.items():
198
+ if mapped_col == col and key in issue:
199
+ value = str(issue[key])
200
+ break
201
+ if not value: # If no mapping found, try direct key match
202
+ value = str(issue.get(col, ""))
203
+ row.append(value)
204
+ rows.append(row)
205
+
206
+ return columns, rows
207
+
208
+
209
+ def format_as_table(
210
+ issues: list[dict[str, str]],
211
+ tool_name: str,
212
+ group_by: str | None = None,
213
+ ) -> str:
214
+ """Format issues as a table using the appropriate formatter.
215
+
216
+ Args:
217
+ issues: list[dict[str, str]]: List of issue dictionaries.
218
+ tool_name: str: Name of the tool that generated the issues.
219
+ group_by: str | None: How to group the issues (file, code, none, auto).
220
+
221
+ Returns:
222
+ str: Formatted table as a string.
223
+ """
224
+ if not issues:
225
+ return "No issues found."
226
+
227
+ # Get the appropriate formatter for this tool
228
+ if tool_name in TOOL_TABLE_FORMATTERS:
229
+ try:
230
+ _, formatter_func = TOOL_TABLE_FORMATTERS[tool_name]
231
+ # Try to use the formatter, but it might expect specific issue objects
232
+ result = formatter_func(issues=issues, format="grid")
233
+ if result: # If formatter worked, return the result
234
+ return result
235
+ except (TypeError, AttributeError):
236
+ # Formatter failed, fall back to tabulate
237
+ pass
238
+
239
+ # Fallback: use tabulate if available
240
+ if TABULATE_AVAILABLE:
241
+ columns, rows = get_table_columns(
242
+ issues=issues,
243
+ tool_name=tool_name,
244
+ group_by=group_by,
245
+ )
246
+ return tabulate(tabular_data=rows, headers=columns, tablefmt="grid")
247
+ else:
248
+ # Simple text format
249
+ columns, rows = get_table_columns(
250
+ issues=issues,
251
+ tool_name=tool_name,
252
+ group_by=group_by,
253
+ )
254
+ if not columns:
255
+ return "No issues found."
256
+ header: str = " | ".join(columns)
257
+ separator: str = "-" * len(header)
258
+ lines: list[str] = [header, separator]
259
+ for row in rows:
260
+ lines.append(" | ".join(str(cell) for cell in row))
261
+ return "\n".join(lines)
262
+
263
+
264
+ def format_tool_output(
265
+ tool_name: str,
266
+ output: str,
267
+ group_by: str = "auto",
268
+ output_format: str = "grid",
269
+ issues: list[object] | None = None,
270
+ ) -> str:
271
+ """Format tool output using the specified format.
272
+
273
+ Args:
274
+ tool_name: str: Name of the tool that generated the output.
275
+ output: str: Raw output from the tool.
276
+ group_by: str: How to group issues (file, code, none, auto).
277
+ output_format: str: Output format (plain, grid, markdown, html, json, csv).
278
+ issues: list[object] | None: List of parsed issue objects (optional).
279
+
280
+ Returns:
281
+ str: Formatted output string.
282
+ """
283
+ # If parsed issues are provided, prefer them regardless of raw output
284
+ if issues and tool_name in TOOL_TABLE_FORMATTERS:
285
+ # Fixability predicates per tool
286
+ def _is_fixable_predicate(tool: str) -> Callable[[object], bool] | None:
287
+ if tool == "ruff":
288
+ return lambda i: isinstance(i, RuffFormatIssue) or (
289
+ isinstance(i, RuffIssue) and getattr(i, "fixable", False)
290
+ )
291
+ if tool == "prettier":
292
+ return lambda i: isinstance(i, PrettierIssue) or True
293
+ return None
294
+
295
+ is_fixable = _is_fixable_predicate(tool_name)
296
+
297
+ if output_format != "json" and is_fixable is not None and TABULATE_AVAILABLE:
298
+ descriptor, _ = TOOL_TABLE_FORMATTERS[tool_name]
299
+
300
+ fixable_issues = [i for i in issues if is_fixable(i)]
301
+ non_fixable_issues = [i for i in issues if not is_fixable(i)]
302
+
303
+ sections: list[str] = []
304
+ if fixable_issues:
305
+ cols_f = descriptor.get_columns()
306
+ rows_f = descriptor.get_rows(fixable_issues)
307
+ table_f = tabulate(
308
+ tabular_data=rows_f,
309
+ headers=cols_f,
310
+ tablefmt="grid",
311
+ stralign="left",
312
+ disable_numparse=True,
313
+ )
314
+ sections.append("Auto-fixable issues\n" + table_f)
315
+ if non_fixable_issues:
316
+ cols_u = descriptor.get_columns()
317
+ rows_u = descriptor.get_rows(non_fixable_issues)
318
+ table_u = tabulate(
319
+ tabular_data=rows_u,
320
+ headers=cols_u,
321
+ tablefmt="grid",
322
+ stralign="left",
323
+ disable_numparse=True,
324
+ )
325
+ sections.append("Not auto-fixable issues\n" + table_u)
326
+ if sections:
327
+ return "\n\n".join(sections)
328
+
329
+ # Fallback to tool-specific formatter on provided issues
330
+ _, formatter_func = TOOL_TABLE_FORMATTERS[tool_name]
331
+ return formatter_func(issues=issues, format=output_format)
332
+
333
+ if not output or not output.strip():
334
+ return "No issues found."
335
+
336
+ # If we have parsed issues, prefer centralized split-by-fixability when
337
+ # a predicate is known for this tool (non-JSON formats only). Otherwise
338
+ # fall back to the tool-specific formatter.
339
+
340
+ # Otherwise, try to parse the output and format it
341
+ parsed_issues: list = []
342
+ if tool_name == "ruff":
343
+ parsed_issues = parse_ruff_output(output=output)
344
+ elif tool_name == "prettier":
345
+ parsed_issues = parse_prettier_output(output=output)
346
+ elif tool_name == "darglint":
347
+ parsed_issues = parse_darglint_output(output=output)
348
+ elif tool_name == "hadolint":
349
+ parsed_issues = parse_hadolint_output(output=output)
350
+ elif tool_name == "yamllint":
351
+ parsed_issues = parse_yamllint_output(output=output)
352
+
353
+ if parsed_issues and tool_name in TOOL_TABLE_FORMATTERS:
354
+ _, formatter_func = TOOL_TABLE_FORMATTERS[tool_name]
355
+ return formatter_func(issues=parsed_issues, format=output_format)
356
+
357
+ # Fallback: return the raw output
358
+ return output
359
+
360
+
361
+ def walk_files_with_excludes(
362
+ paths: list[str],
363
+ file_patterns: list[str],
364
+ exclude_patterns: list[str],
365
+ include_venv: bool = False,
366
+ ) -> list[str]:
367
+ """Walk through directories and find files matching patterns, excluding
368
+ specified patterns.
369
+
370
+ Args:
371
+ paths: list[str]: List of file or directory paths to search.
372
+ file_patterns: list[str]: List of file patterns to include (e.g.,
373
+ ["*.py", "*.js"]).
374
+ exclude_patterns: list[str]: List of patterns to exclude (e.g.,
375
+ ["__pycache__", "*.pyc"]).
376
+ include_venv: bool: Whether to include virtual environment directories.
377
+
378
+ Returns:
379
+ list[str]: List of file paths that match the patterns and are not excluded.
380
+ """
381
+
382
+ all_files: list[str] = []
383
+
384
+ for path in paths:
385
+ if os.path.isfile(path):
386
+ # Single file - check if the filename matches any file pattern
387
+ filename = os.path.basename(path)
388
+ for pattern in file_patterns:
389
+ if fnmatch.fnmatch(filename, pattern):
390
+ all_files.append(path)
391
+ break
392
+ elif os.path.isdir(path):
393
+ # Directory - walk through it
394
+ for root, dirs, files in os.walk(path):
395
+ # Filter out virtual environment directories unless include_venv is True
396
+ if not include_venv:
397
+ dirs[:] = [d for d in dirs if not _is_venv_directory(d)]
398
+
399
+ # Check each file against the patterns
400
+ for file in files:
401
+ file_path: str = os.path.join(root, file)
402
+ rel_path: str = os.path.relpath(file_path, path)
403
+
404
+ # Check if file matches any file pattern
405
+ matches_pattern: bool = False
406
+ for pattern in file_patterns:
407
+ if fnmatch.fnmatch(file, pattern):
408
+ matches_pattern = True
409
+ break
410
+
411
+ if matches_pattern:
412
+ # Check if file should be excluded
413
+ if not should_exclude_path(
414
+ path=rel_path,
415
+ exclude_patterns=exclude_patterns,
416
+ ):
417
+ all_files.append(file_path)
418
+
419
+ return sorted(all_files)
420
+
421
+
422
+ def _is_venv_directory(dirname: str) -> bool:
423
+ """Check if a directory name indicates a virtual environment.
424
+
425
+ Args:
426
+ dirname: str: Directory name to check.
427
+
428
+ Returns:
429
+ bool: True if the directory appears to be a virtual environment.
430
+ """
431
+ return dirname in VENV_PATTERNS