python-code-quality 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
py_cq/localtypes.py ADDED
@@ -0,0 +1,135 @@
1
+ """Utility classes for representing and aggregating results from static-analysis tools.
2
+
3
+ This module defines dataclasses that capture tool configuration (`ToolConfig`), raw execution output (`RawResult`), parsed metrics (`ToolResult`), and a consolidated view of all tool results (`CombinedToolResults`). It also provides an abstract `AbstractParser` that concrete parsers should subclass to convert a `RawResult` into a `ToolResult`. Together these components enable parsing, combining, and serialising analysis metrics for downstream reporting and analysis."""
4
+
5
+ from abc import ABC, abstractmethod
6
+ from collections.abc import Callable
7
+ from dataclasses import dataclass, field
8
+ from typing import Any
9
+
10
+
11
+ @dataclass
12
+ class ToolConfig:
13
+ """Represents the configuration for an analysis tool, including its name, command, parser class, context path, priority, and thresholds for warnings and errors."""
14
+
15
+ name: str # e.g., "pytest", "coverage", "pydocstyle"
16
+ command: str # The command to execute (can include placeholders)
17
+ parser_class: Callable # Name of the parser class to use
18
+ context_path: str = "" # Path to project or file
19
+ priority: int = 5 # 1=critical (compilation), 5=low (style)
20
+ warning_threshold: float = 0.7 # Yellow warning if below this
21
+ error_threshold: float = 0.5 # Red error if below this
22
+ run_in_target_env: bool = False # If True, run in target project's env via uv
23
+ extra_deps: list[str] = field(default_factory=list) # Extra deps to inject via uv --with
24
+
25
+
26
+ @dataclass
27
+ class RawResult:
28
+ """Represents the raw output from a tool execution.
29
+
30
+ Instances store the unprocessed data returned by a tool and can be
31
+ converted to a plain dictionary using :meth:`to_dict`."""
32
+
33
+ tool_name: str = ""
34
+ command: str = ""
35
+ stdout: str = ""
36
+ stderr: str = ""
37
+ return_code: int = 0
38
+ timestamp: str = "" # For tracking when the analysis ran
39
+
40
+ def to_dict(self):
41
+ """Returns a dictionary containing the tool name, command, stdout, stderr, return code, and timestamp."""
42
+ return {
43
+ "tool_name": self.tool_name,
44
+ "command": self.command,
45
+ "stdout": self.stdout,
46
+ "stderr": self.stderr,
47
+ "return_code": self.return_code,
48
+ "timestamp": self.timestamp,
49
+ }
50
+
51
+
52
+ @dataclass
53
+ class ToolResult:
54
+ """Represents a parsed metric from a tool run.
55
+
56
+ This dataclass stores information about a metric extracted from a tool
57
+ execution, ensuring that the `details` attribute is always a dictionary.
58
+ It provides a `to_dict` method for convenient serialization of the metric
59
+ data into a plain dictionary."""
60
+
61
+ metrics: dict[str, float] = field(default_factory=dict)
62
+ details: dict[str, Any] = field(
63
+ default_factory=dict
64
+ ) # Additional details about the metric
65
+ raw: RawResult = field(default_factory=RawResult)
66
+ duration_s: float = 0.0
67
+
68
+ def __post_init__(self):
69
+ """Ensures that the `details` and `metrics` attributes are dictionaries, initializing them to empty dictionaries if they are not."""
70
+ if not isinstance(self.details, dict):
71
+ self.details = {}
72
+ if not isinstance(self.metrics, dict):
73
+ self.metrics = {}
74
+
75
+ def to_dict(self) -> dict:
76
+ """Returns a dictionary containing the metrics, details, and the raw data serialized via its own `to_dict` method."""
77
+ return {
78
+ "metrics": self.metrics,
79
+ "details": self.details,
80
+ "raw": self.raw.to_dict(),
81
+ "duration_s": self.duration_s,
82
+ }
83
+
84
+
85
+ @dataclass
86
+ class CombinedToolResults:
87
+ """Aggregates results from multiple tools, stores the associated path, and calculates an overall score by averaging the mean metric values of each ``ToolResult``. If a ``ToolResult`` has no metrics, it contributes zero, and the score defaults to ``0.0`` when the list is empty."""
88
+
89
+ def __init__(self, path: str, tool_results: list[ToolResult]):
90
+ """Initializes a CombinedToolResults instance.
91
+
92
+ Stores the given path and list of ToolResult objects, and computes an overall
93
+ score by averaging the mean metric values of each ToolResult. ToolResults
94
+ without metrics contribute zero. If the list is empty the score defaults to
95
+ 0.0.
96
+
97
+ Args:
98
+ path (str): Path associated with the results.
99
+ tool_results (list[ToolResult]): List of ToolResult objects."""
100
+ self.tool_results = tool_results
101
+ self.path = path
102
+ scored = [tr for tr in tool_results if tr.metrics]
103
+ self.score = sum(sum(tr.metrics.values()) / len(tr.metrics) for tr in scored) / len(scored) if scored else 0.0
104
+
105
+ score: float = 0.0
106
+ path: str = ""
107
+
108
+ def to_dict(self) -> dict:
109
+ """Returns a dictionary containing the path, overall score, and each ToolResult serialized."""
110
+ return {
111
+ "metrics": [tool_result.to_dict() for tool_result in self.tool_results],
112
+ "score": self.score,
113
+ "path": self.path,
114
+ }
115
+
116
+
117
+ class AbstractParser(ABC):
118
+ """Base class for parsers that transform raw tool output into structured `ToolResult` objects.
119
+
120
+ Subclasses must implement `parse` to convert a `RawResult` into a `ToolResult`. An optional `provide_help` can be overridden to supply contextual guidance for a parsed result."""
121
+
122
+ @abstractmethod
123
+ def parse(self, raw_result: RawResult) -> ToolResult:
124
+ """Converts raw tool output into a structured ToolResult."""
125
+ pass
126
+
127
+ def format_llm_message(self, tr: ToolResult) -> str:
128
+ """Return a single-defect description for LLM consumption.
129
+
130
+ Default implementation reports the worst metric by name and score.
131
+ Parsers with richer details should override this."""
132
+ if tr.metrics:
133
+ metric_name, value = next(iter(tr.metrics.items()))
134
+ return f"**{metric_name}** score: {value:.3f}"
135
+ return "No details available"
py_cq/main.py ADDED
@@ -0,0 +1,12 @@
1
+ """This module provides the entry point for the application. It defines a `main` function that handles initialization, command-line argument parsing, and triggers the core application workflow. The module can be executed directly to run the application."""
2
+
3
+ from py_cq.cli import app
4
+
5
+
6
+ def main():
7
+ """Runs the application."""
8
+ app()
9
+
10
+
11
+ if __name__ == "__main__":
12
+ main()
@@ -0,0 +1,14 @@
1
+ """Utilities for consolidating static-analysis metrics.
2
+
3
+ This module provides a single function, :func:`aggregate_metrics`, which takes a filesystem path and a list of
4
+ :class:`ToolResult` objects produced by various static-analysis tools. It validates the input, merges the
5
+ metrics from each tool into a :class:`CombinedToolResults` instance, and returns the unified representation.
6
+ The resulting object can be consumed by reporting tools or CI pipelines to present a single view of all analysis
7
+ results for a given file or directory."""
8
+
9
+ from py_cq.localtypes import CombinedToolResults, ToolResult
10
+
11
+
12
+ def aggregate_metrics(path: str, metrics: list[ToolResult]) -> CombinedToolResults:
13
+ """Returns a CombinedToolResults instance aggregating the given metrics for the specified path."""
14
+ return CombinedToolResults(path=path, tool_results=metrics)
File without changes
@@ -0,0 +1,52 @@
1
+ """Parses output from bandit security linter into a standardized ToolResult.
2
+
3
+ Bandit is invoked with ``-f json``, producing a JSON blob on stdout.
4
+ The parser extracts per-file violations, applies severity weighting
5
+ (HIGH=5, MEDIUM=2, LOW=1), and converts the weighted count into a
6
+ logistic-variant score stored under the ``security`` metric key.
7
+ """
8
+
9
+ import json
10
+
11
+ from py_cq.localtypes import AbstractParser, RawResult, ToolResult
12
+ from py_cq.parsers.common import score_logistic_variant
13
+
14
+ _SEVERITY_WEIGHT = {"HIGH": 5, "MEDIUM": 2, "LOW": 1}
15
+
16
+
17
+ class BanditParser(AbstractParser):
18
+ """Parses raw JSON output from ``bandit -f json`` into a ToolResult."""
19
+
20
+ def parse(self, raw_result: RawResult) -> ToolResult:
21
+ try:
22
+ data = json.loads(raw_result.stdout)
23
+ except (json.JSONDecodeError, ValueError):
24
+ return ToolResult(raw=raw_result, metrics={"security": 1.0})
25
+
26
+ files: dict[str, list] = {}
27
+ weighted = 0
28
+ for issue in data.get("results", []):
29
+ path = issue.get("filename", "").replace("\\", "/")
30
+ severity = issue.get("issue_severity", "LOW")
31
+ files.setdefault(path, []).append({
32
+ "line": issue.get("line_number", 0),
33
+ "code": issue.get("test_id", ""),
34
+ "severity": severity,
35
+ "confidence": issue.get("issue_confidence", ""),
36
+ "message": issue.get("issue_text", ""),
37
+ })
38
+ weighted += _SEVERITY_WEIGHT.get(severity, 1)
39
+
40
+ score = score_logistic_variant(weighted, scale_factor=10)
41
+ return ToolResult(raw=raw_result, metrics={"security": score}, details=files)
42
+
43
+ def format_llm_message(self, tr: ToolResult) -> str:
44
+ if not tr.details:
45
+ return "bandit reported issues (no details available)"
46
+ file, issues = next(iter(tr.details.items()))
47
+ issue = issues[0]
48
+ line = issue.get("line", "?")
49
+ code = issue.get("code", "")
50
+ severity = issue.get("severity", "")
51
+ message = issue.get("message", "")
52
+ return f"`{file}:{line}` — **{code}** [{severity}]: {message}"
@@ -0,0 +1,87 @@
1
+ """Utility functions for normalising numeric values and scoring error magnitudes.
2
+
3
+ This module provides two small helpers that are often used when working with
4
+ performance metrics or error scores:
5
+
6
+ * :func:`inv_normalize` - Inversely normalises a value relative to a maximum
7
+ reference, yielding a float in the interval [0,\u202f1].
8
+ * :func:`score_logistic_variant` - Maps an error magnitude to a bounded score
9
+ using a logistic-style curve, with optional parameters controlling the scale
10
+ and steepness of the transition.
11
+
12
+ Both functions return a float and can be used directly in downstream analytics,
13
+ visualisation or decision-making pipelines."""
14
+
15
+
16
+
17
+ def read_source_lines(file_path: str, line: int, count: int = 5) -> str:
18
+ """Return up to `count` source lines starting at the given 1-based line number."""
19
+ from pathlib import Path
20
+ try:
21
+ all_lines = Path(file_path).read_text(encoding="utf-8").splitlines()
22
+ start = max(0, line - 1)
23
+ return "\n".join(all_lines[start : start + count])
24
+ except OSError:
25
+ return ""
26
+
27
+
28
+ def inv_normalize(value: float, max_value: float) -> float:
29
+ """Returns the inverse normalized value of `value` relative to `max_value`."""
30
+ return (max_value - min(value, max_value)) / max_value
31
+
32
+
33
+ def score_logistic_variant(
34
+ errors, scale_factor: float = 30, steepness: float = 2
35
+ ) -> float:
36
+ """Calculate a logistic-variant score from an error value.
37
+
38
+ The score is always in the range ``[0.0, 1.0]`` and decreases monotonically
39
+ as the magnitude of the error increases. Negative errors are treated as
40
+ zero. A special case occurs when ``scale_factor`` is ``0``: the method
41
+ returns ``1.0`` only when the error is exactly zero; otherwise it returns
42
+ ``0.0``.
43
+
44
+ The logistic function is computed as::
45
+
46
+ 1 / (1 + (errors / scale_factor) ** steepness)
47
+
48
+ To avoid numerical overflow, the intermediate term is capped at
49
+ ``float('inf')`` when ``errors / scale_factor`` exceeds
50
+ ``709 / steepness`` (the largest value that can be exponentiated
51
+ without raising an :class:`OverflowError`).
52
+
53
+ Args:
54
+ errors (float): The error magnitude to score. Negative values are
55
+ treated as zero.
56
+ scale_factor (float, optional): Scaling factor applied to the error.
57
+ Defaults to ``30``. When ``0``, the special case described above
58
+ applies.
59
+ steepness (float, optional): Exponent controlling the steepness of
60
+ the logistic curve. Defaults to ``2``.
61
+
62
+ Returns:
63
+ float: A score between ``0.0`` and ``1.0`` representing the logistic
64
+ mapping of the input error. The function safely handles large
65
+ error values by capping the intermediate calculation to infinity.
66
+
67
+ Example:
68
+ >>> score_logistic_variant(5, scale_factor=10, steepness=2)
69
+ 0.9090909090909091
70
+ >>> score_logistic_variant(-3)
71
+ 1.0
72
+ >>> score_logistic_variant(10, scale_factor=0)
73
+ 0.0"""
74
+ if errors < 0:
75
+ errors = 0
76
+ if scale_factor == 0:
77
+ return 1.0 if errors == 0 else 0.0
78
+ try:
79
+ # Handle case where errors/scale_factor is very large, to avoid overflow
80
+ base = errors / scale_factor
81
+ if base > 709 / steepness: # exp(709) is near max float
82
+ term = float("inf")
83
+ else:
84
+ term = base**steepness
85
+ except OverflowError: # pragma: no cover
86
+ return 0.0 # Score becomes 0 if term is too large
87
+ return 1.0 / (1.0 + term)
@@ -0,0 +1,134 @@
1
+ """This module defines the :class:`CompileParser`, a concrete subclass of
2
+ :class:`AbstractParser`. The parser translates raw compiler output into a
3
+ structured :class:`ToolResult`, extracting diagnostics, computing a
4
+ compile score, and providing concise help messages for any failures."""
5
+
6
+ import logging
7
+
8
+ from py_cq.localtypes import AbstractParser, RawResult, ToolResult
9
+ from py_cq.parsers.common import read_source_lines, score_logistic_variant
10
+
11
+ log = logging.getLogger("cq")
12
+
13
+
14
+ class CompileParser(AbstractParser):
15
+ """Parses raw compiler output into a structured ToolResult.
16
+
17
+ The `CompileParser` implements the `AbstractParser` interface, converting
18
+ `RawResult` objects into `ToolResult` instances that include diagnostics,
19
+ metrics, and a mapping of failed files. It also supplies a human-readable
20
+ help string summarizing any compilation errors for a given `ToolResult`."""
21
+
22
+ def parse(self, raw_result: RawResult) -> ToolResult:
23
+ """Parses compiler output into a structured ``ToolResult``.
24
+
25
+ The method scans the ``stdout`` of a ``RawResult`` object for compilation
26
+ events and error messages. For each file that emits an error, it extracts
27
+ the line number, source snippet, error type, and help text, normalizes the
28
+ file path, and stores this information in a dictionary keyed by file path.
29
+ It then computes a failure ratio (failed files ÷ total compilations) and
30
+ derives a compile score via ``score_logistic_variant``. The original
31
+ ``stdout`` is cleaned of ``Listing`` lines and back-slash path separators
32
+ are replaced with forward slashes. A ``ToolResult`` containing the raw
33
+ result, a compile metric, and, if any, a mapping of failed files is
34
+ returned.
35
+
36
+ Args:
37
+ raw_result (RawResult): Raw compiler output, typically containing a
38
+ ``stdout`` attribute.
39
+
40
+ Returns:
41
+ ToolResult: A structured result containing diagnostics, a compile
42
+ metric, and a mapping of failed files (if any).
43
+
44
+ Example:
45
+ >>> parser = CompileParser()
46
+ >>> raw = RawResult(stdout=(
47
+ ... 'Compiling a.c\\\\n'
48
+ ... '*** File "a.c" line 10, column 5:\\\\n'
49
+ ... ' error: unknown type name \\\\'foo\\\\'\\\\n'
50
+ ... '\\\\n'))
51
+ >>> result = parser.parse(raw)
52
+ >>> result.metrics['compile']
53
+ 0.5
54
+ >>> result.details['failed_files']['a.c']['type']
55
+ 'error'"""
56
+
57
+ compilations = 0
58
+ failed_files: dict[str, dict] = {}
59
+ current_error = None
60
+ # Process stdout first for successful compilations
61
+ if raw_result.stdout:
62
+ for line in raw_result.stdout.splitlines():
63
+ if line.startswith("Compiling "):
64
+ compilations += 1
65
+ elif line.startswith("*** File "):
66
+ # This indicates a compilation error
67
+ file_path = line.split('"')[1]
68
+ current_error = {"file": file_path, "error": line}
69
+ elif current_error and line.strip():
70
+ # Append additional error context
71
+ current_error["error"] += "\n" + line
72
+ elif line.startswith("Listing "):
73
+ # Skip directory listings
74
+ continue
75
+ elif current_error and (not line.strip()):
76
+ # Empty line ends the error block
77
+ # Parse error details from the error block
78
+ error_lines = current_error["error"].splitlines()
79
+ log.debug("Compile error lines: %s", error_lines)
80
+ error_info = {}
81
+ # Extract line number if present
82
+ if "line " in error_lines[0]:
83
+ error_info["line"] = int(
84
+ error_lines[0].split("line ")[1].split(",")[0]
85
+ )
86
+ # Get source code context if available
87
+ if len(error_lines) > 1:
88
+ error_info["src"] = error_lines[1].strip()
89
+ if len(error_lines) > 3:
90
+ if "Error:" in error_lines[3]:
91
+ error_parts = error_lines[3].split(":")
92
+ error_info["type"] = (
93
+ error_parts[0].strip().split()[-1]
94
+ ) # Gets "SyntaxError"
95
+ error_info["help"] = ",".join(
96
+ error_parts[1:]
97
+ ).strip() # Gets help message
98
+ else:
99
+ error_info["type"] = "Unknown"
100
+ error_info["help"] = "\n".join(error_lines[2:]).strip()
101
+ file_path = current_error["file"].replace("\\", "/")
102
+ failed_files[file_path] = error_info
103
+ current_error = None
104
+ failure_ratio = len(failed_files) / compilations if compilations > 0 else 0.0
105
+ score = score_logistic_variant(failure_ratio, scale_factor=0.25)
106
+ raw_result.stdout = "\n".join(
107
+ [
108
+ line.replace("\\\\", "/")
109
+ for line in raw_result.stdout.splitlines()
110
+ if not line.startswith("Listing")
111
+ ]
112
+ )
113
+ tr = ToolResult(raw=raw_result, metrics={"compile": score})
114
+ if failed_files:
115
+ tr.details["failed_files"] = failed_files
116
+ return tr
117
+
118
+ def format_llm_message(self, tr: ToolResult) -> str:
119
+ """Return the first compilation failure as a defect description."""
120
+ failed = tr.details.get("failed_files", {})
121
+ if not failed:
122
+ return "Compilation failed (no details available)"
123
+ file, info = next(iter(failed.items()))
124
+ line = info.get("line", "?")
125
+ typ = info.get("type", "Error")
126
+ help_msg = info.get("help", "")
127
+ if isinstance(line, int):
128
+ context_start = max(1, line - 3)
129
+ raw_lines = read_source_lines(file, context_start, count=8).splitlines()
130
+ src = "\n".join(f"{context_start + i}: {rline}" for i, rline in enumerate(raw_lines)) if raw_lines else info.get("src", "")
131
+ else:
132
+ src = info.get("src", "")
133
+ code_block = f"\n```python\n{src}\n```" if src else ""
134
+ return f"`{file}:{line}` — **{typ}**: {help_msg}{code_block}"
@@ -0,0 +1,86 @@
1
+ """Provides a `ComplexityParser` that converts raw complexity-analysis output into structured `ToolResult` objects for downstream use."""
2
+
3
+ import json
4
+
5
+ from py_cq.localtypes import AbstractParser, RawResult, ToolResult
6
+ from py_cq.parsers.common import score_logistic_variant
7
+
8
+
9
+ class ComplexityParser(AbstractParser):
10
+ """Parse raw output from a complexity analysis tool into structured results.
11
+
12
+ This parser accepts a :class:`~tools.core.RawResult` containing the raw
13
+ ``stdout`` of a static-analysis or profiling tool. It validates the
14
+ JSON payload, extracts per-file and per-function metrics, and returns a
15
+ :class:`~tools.core.ToolResult` that holds the parsed data, a
16
+ per-item details dictionary, and overall summary metrics such as the
17
+ overall simplicity score.
18
+
19
+ Example
20
+ -------
21
+ >>> parser = ComplexityParser()
22
+ >>> raw = RawResult(stdout='{"main.py":[{"name":"foo","complexity":12,"rank":"B"}]}',
23
+ ... return_code=0)
24
+ >>> result = parser.parse(raw)
25
+ >>> result.metrics['simplicity']
26
+ 0.4"""
27
+
28
+ def parse(self, raw_result: RawResult) -> ToolResult:
29
+ """Parse raw tool output into a structured :class:`~tools.core.ToolResult`.
30
+
31
+ The method accepts a :class:`~tools.core.RawResult` that contains the raw
32
+ ``stdout`` from a complexity analysis tool. The ``stdout`` is expected to
33
+ be a JSON string mapping file names to lists of function descriptors.
34
+ Each descriptor should at least contain a ``name`` and a ``complexity``
35
+ value, and may optionally include a ``rank``. The parser converts each
36
+ function into a *simplicity* score using the logistic variant
37
+ (`score_logistic_variant`). The overall simplicity score is the mean of
38
+ all function scores. The resulting :class:`~tools.core.ToolResult`
39
+ holds the original raw result, a ``details`` dictionary keyed by file
40
+ and function names (with simplicity and rank), and a ``metrics``
41
+ dictionary that contains the overall simplicity value. The tool's
42
+ return code is also recorded in ``details['return_code']``.
43
+
44
+ Args:
45
+ raw_result (RawResult):
46
+ The raw result from a complexity analysis tool. It must expose a
47
+ ``stdout`` attribute containing a JSON string that maps file
48
+ names to lists of function descriptors, and a ``return_code``
49
+ attribute.
50
+
51
+ Returns:
52
+ ToolResult: A structured result that includes the original raw result,
53
+ per-file/function details with simplicity scores and ranks, and a
54
+ metrics dictionary that holds the overall simplicity score.
55
+
56
+ Raises:
57
+ json.JSONDecodeError: If ``raw_result.stdout`` cannot be parsed as
58
+ JSON.
59
+
60
+ Example:
61
+ >>> raw = RawResult(stdout='{"main.py": [{"name": "foo", "complexity": 12, "rank": "B"}]}', return_code=0)
62
+ >>> parser = ComplexityParser()
63
+ >>> result = parser.parse(raw)
64
+ >>> result.metrics["simplicity"]
65
+ 0.4"""
66
+ tr = ToolResult(raw=raw_result)
67
+ data = json.loads(raw_result.stdout)
68
+ score = 0
69
+ num_items = 0
70
+ max_complexity = 30
71
+ for file, functions in data.items():
72
+ file_name = file.replace("\\", "/")
73
+ if file_name not in tr.details:
74
+ tr.details[file_name] = {}
75
+ for function in functions:
76
+ num_items += 1
77
+ function_score = score_logistic_variant(
78
+ function.get("complexity", max_complexity), max_complexity
79
+ )
80
+ score += function_score
81
+ tr.details[file_name][function["name"]] = {
82
+ "simplicity": function_score,
83
+ "rank": function.get("rank", "F"),
84
+ }
85
+ tr.metrics["simplicity"] = score / num_items if num_items > 0 else 0.0
86
+ return tr
@@ -0,0 +1,88 @@
1
+ """Parses raw coverage tool output into a standardized `ToolResult` for consistent analysis across different coverage utilities.
2
+ The module defines `CoverageParser`, a concrete implementation of `AbstractParser`, which extracts overall and per-file coverage metrics from a `RawResult` object and normalises the data format for downstream processing."""
3
+
4
+ import logging
5
+
6
+ from py_cq.localtypes import AbstractParser, RawResult, ToolResult
7
+
8
+ log = logging.getLogger("cq")
9
+
10
+
11
+ class CoverageParser(AbstractParser):
12
+ """Parses raw coverage output into structured ToolResult instances.
13
+ Extends AbstractParser, extracting overall coverage percentages, per-file coverage values, normalising file paths, and preserving the tool's return code."""
14
+
15
+ def parse(self, raw_result: RawResult) -> ToolResult:
16
+ """Parse raw coverage output into a :class:`ToolResult`.
17
+
18
+ Given a :class:`RawResult` containing the stdout of a coverage tool, the
19
+ method extracts every line that ends with a percent sign. Each such line
20
+ is expected to follow the format::
21
+
22
+ <file> <total_lines> <covered_lines> <coverage>%
23
+
24
+ The coverage percentage is converted to a fraction (e.g. 90\u202f% → 0.9) and
25
+ stored in ``metrics['coverage']`` for the overall ``TOTAL`` line, while
26
+ the per-file values are placed in ``details`` with the file path
27
+ normalised to use forward slashes. The tool's return code is added to
28
+ ``details`` under the key ``'return_code'``.
29
+
30
+ Args:
31
+ raw_result (RawResult): The raw output from a coverage tool.
32
+
33
+ Returns:
34
+ ToolResult: A structured result containing the overall coverage
35
+ metric, per-file coverage percentages, and the tool's return code.
36
+
37
+ Example:
38
+ >>> parser = CoverageParser()
39
+ >>> raw = RawResult(
40
+ ... stdout='src/main.py 100 90 90%\\\\nTOTAL 200 180 90%',
41
+ ... return_code=0)
42
+ >>> result = parser.parse(raw)
43
+ >>> result.metrics['coverage']
44
+ 0.9
45
+ >>> result.details['src/main.py']
46
+ 0.9"""
47
+ tr = ToolResult(raw=raw_result)
48
+ lines = raw_result.stdout.splitlines()
49
+ coverage_lines = [line for line in lines if line.endswith("%")]
50
+ details = {}
51
+ for line in coverage_lines:
52
+ parts = line.split()
53
+ if len(parts) >= 2:
54
+ file_name = parts[0]
55
+ try:
56
+ coverage_percentage = float(parts[-1].rstrip('%')) / 100.0
57
+ except ValueError:
58
+ log.warning("Error parsing coverage percentage from line: %s", line)
59
+ continue
60
+ if file_name == "TOTAL":
61
+ tr.metrics["coverage"] = coverage_percentage
62
+ else:
63
+ try:
64
+ missing = int(parts[2]) if len(parts) >= 4 else None
65
+ except (ValueError, IndexError):
66
+ missing = None
67
+ details[file_name.replace("\\", "/")] = {
68
+ "coverage": coverage_percentage,
69
+ "missing": missing,
70
+ }
71
+ tr.details = details
72
+ return tr
73
+
74
+ def format_llm_message(self, tr: ToolResult) -> str:
75
+ """Return the files with lowest coverage as a defect description."""
76
+ score = tr.metrics.get("coverage", 0)
77
+ uncovered = sorted(
78
+ [(f, d) for f, d in tr.details.items() if isinstance(d, dict) and d.get("missing")],
79
+ key=lambda x: x[1]["coverage"],
80
+ )[:5]
81
+ if not uncovered:
82
+ return f"**coverage** score: {score:.3f}"
83
+ lines = [f"**coverage** score: {score:.3f} — files with lowest coverage:"]
84
+ for path, data in uncovered:
85
+ pct = data["coverage"]
86
+ miss = data["missing"]
87
+ lines.append(f"- `{path}`: {pct:.0%} ({miss} uncovered statements)")
88
+ return "\n".join(lines)