pipeguard-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pipeguard/__init__.py +5 -0
- pipeguard/_version.py +34 -0
- pipeguard/cli.py +92 -0
- pipeguard/output/__init__.py +0 -0
- pipeguard/output/autofix.py +37 -0
- pipeguard/output/formatter.py +133 -0
- pipeguard/scanner/__init__.py +3 -0
- pipeguard/scanner/base.py +24 -0
- pipeguard/scanner/github_actions/__init__.py +0 -0
- pipeguard/scanner/github_actions/action_inventory.py +59 -0
- pipeguard/scanner/github_actions/actionlint_runner.py +43 -0
- pipeguard/scanner/github_actions/cve_check.py +88 -0
- pipeguard/scanner/github_actions/cve_db.json +110 -0
- pipeguard/scanner/github_actions/permissions.py +235 -0
- pipeguard/scanner/github_actions/pull_request_target.py +108 -0
- pipeguard/scanner/github_actions/secrets_flow.py +155 -0
- pipeguard/scanner/github_actions/sha_pinning.py +90 -0
- pipeguard/scanner/github_actions/supply_chain.py +103 -0
- pipeguard_cli-0.1.0.dist-info/METADATA +277 -0
- pipeguard_cli-0.1.0.dist-info/RECORD +23 -0
- pipeguard_cli-0.1.0.dist-info/WHEEL +4 -0
- pipeguard_cli-0.1.0.dist-info/entry_points.txt +2 -0
- pipeguard_cli-0.1.0.dist-info/licenses/LICENSE +178 -0
pipeguard/__init__.py
ADDED
pipeguard/_version.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# file generated by setuptools-scm
|
|
2
|
+
# don't change, don't track in version control
|
|
3
|
+
|
|
4
|
+
__all__ = [
|
|
5
|
+
"__version__",
|
|
6
|
+
"__version_tuple__",
|
|
7
|
+
"version",
|
|
8
|
+
"version_tuple",
|
|
9
|
+
"__commit_id__",
|
|
10
|
+
"commit_id",
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
TYPE_CHECKING = False
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from typing import Tuple
|
|
16
|
+
from typing import Union
|
|
17
|
+
|
|
18
|
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
19
|
+
COMMIT_ID = Union[str, None]
|
|
20
|
+
else:
|
|
21
|
+
VERSION_TUPLE = object
|
|
22
|
+
COMMIT_ID = object
|
|
23
|
+
|
|
24
|
+
version: str
|
|
25
|
+
__version__: str
|
|
26
|
+
__version_tuple__: VERSION_TUPLE
|
|
27
|
+
version_tuple: VERSION_TUPLE
|
|
28
|
+
commit_id: COMMIT_ID
|
|
29
|
+
__commit_id__: COMMIT_ID
|
|
30
|
+
|
|
31
|
+
__version__ = version = '0.1.0'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 1, 0)
|
|
33
|
+
|
|
34
|
+
__commit_id__ = commit_id = None
|
pipeguard/cli.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"""Entry point: pipeguard scan"""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
import click
|
|
9
|
+
|
|
10
|
+
from pipeguard import __version__
|
|
11
|
+
from pipeguard.output.formatter import Formatter, OutputFormat
|
|
12
|
+
from pipeguard.scanner.base import Finding
|
|
13
|
+
from pipeguard.scanner.github_actions.action_inventory import check_action_inventory
|
|
14
|
+
from pipeguard.scanner.github_actions.actionlint_runner import run_actionlint
|
|
15
|
+
from pipeguard.scanner.github_actions.cve_check import check_cve
|
|
16
|
+
from pipeguard.scanner.github_actions.permissions import check_permissions
|
|
17
|
+
from pipeguard.scanner.github_actions.pull_request_target import check_pull_request_target
|
|
18
|
+
from pipeguard.scanner.github_actions.secrets_flow import check_secrets_flow
|
|
19
|
+
from pipeguard.scanner.github_actions.sha_pinning import check_sha_pinning
|
|
20
|
+
from pipeguard.scanner.github_actions.supply_chain import check_supply_chain
|
|
21
|
+
|
|
22
|
+
_WORKFLOW_GLOB = ("*.yml", "*.yaml")
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _collect_workflows(path: str) -> list[Path]:
|
|
26
|
+
"""Return workflow files from a file path or directory."""
|
|
27
|
+
p = Path(path)
|
|
28
|
+
if p.is_file():
|
|
29
|
+
return [p]
|
|
30
|
+
files: list[Path] = []
|
|
31
|
+
for pattern in _WORKFLOW_GLOB:
|
|
32
|
+
files.extend(sorted(p.rglob(pattern)))
|
|
33
|
+
return files
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _scan_file(workflow: Path) -> list[Finding]:
|
|
37
|
+
findings: list[Finding] = []
|
|
38
|
+
findings += run_actionlint(str(workflow))
|
|
39
|
+
findings += check_sha_pinning(str(workflow))
|
|
40
|
+
findings += check_secrets_flow(str(workflow))
|
|
41
|
+
findings += check_supply_chain(str(workflow))
|
|
42
|
+
findings += check_permissions(str(workflow))
|
|
43
|
+
findings += check_pull_request_target(str(workflow))
|
|
44
|
+
findings += check_cve(str(workflow))
|
|
45
|
+
findings += check_action_inventory(str(workflow))
|
|
46
|
+
return findings
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@click.group()
|
|
50
|
+
@click.version_option(__version__, prog_name="pipeguard")
|
|
51
|
+
def main() -> None:
|
|
52
|
+
"""PipeGuard — catch GitHub Actions security issues before they reach your runners."""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@main.command()
|
|
56
|
+
@click.argument("path", default=".github/workflows", type=click.Path(exists=True))
|
|
57
|
+
@click.option(
|
|
58
|
+
"--format",
|
|
59
|
+
"output_format",
|
|
60
|
+
type=click.Choice(["terminal", "json", "sarif"]),
|
|
61
|
+
default="terminal",
|
|
62
|
+
show_default=True,
|
|
63
|
+
help="Output format.",
|
|
64
|
+
)
|
|
65
|
+
@click.option("--fix", is_flag=True, help="Generate auto-fix suggestions.")
|
|
66
|
+
def scan(path: str, output_format: str, fix: bool) -> None:
|
|
67
|
+
"""Scan a workflow FILE or DIRECTORY (default: .github/workflows)."""
|
|
68
|
+
workflows = _collect_workflows(path)
|
|
69
|
+
|
|
70
|
+
if not workflows:
|
|
71
|
+
click.echo(f"[pipeguard] No workflow files found in '{path}'.", err=True)
|
|
72
|
+
sys.exit(0)
|
|
73
|
+
|
|
74
|
+
fmt = Formatter(OutputFormat(output_format))
|
|
75
|
+
all_findings: list[Finding] = []
|
|
76
|
+
|
|
77
|
+
for workflow in workflows:
|
|
78
|
+
findings = _scan_file(workflow)
|
|
79
|
+
all_findings.extend(findings)
|
|
80
|
+
fmt.render(findings, str(workflow))
|
|
81
|
+
|
|
82
|
+
if len(workflows) > 1:
|
|
83
|
+
errors = sum(1 for f in all_findings if f.severity == "error")
|
|
84
|
+
warnings = sum(1 for f in all_findings if f.severity == "warning")
|
|
85
|
+
infos = sum(1 for f in all_findings if f.severity == "info")
|
|
86
|
+
click.echo(
|
|
87
|
+
f"\nScanned {len(workflows)} file(s) — "
|
|
88
|
+
f"{errors} error(s), {warnings} warning(s), {infos} info(s) total."
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
if any(f.severity in ("error", "warning") for f in all_findings):
|
|
92
|
+
sys.exit(1)
|
|
File without changes
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""Generates auto-fix suggestions for findings (e.g. SHA-pinning patches)."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from pipeguard.scanner.base import Finding
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def generate_fixes(findings: list[Finding], workflow_path: str) -> str:
|
|
13
|
+
"""Return a unified-diff-style patch for all fixable findings.
|
|
14
|
+
|
|
15
|
+
Currently supports: sha-pinning
|
|
16
|
+
"""
|
|
17
|
+
original = Path(workflow_path).read_text()
|
|
18
|
+
patched = original
|
|
19
|
+
|
|
20
|
+
for f in findings:
|
|
21
|
+
if f.rule == "sha-pinning" and f.fix_suggestion:
|
|
22
|
+
# Placeholder: real implementation would resolve the SHA via GitHub API.
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
if patched == original:
|
|
26
|
+
return ""
|
|
27
|
+
|
|
28
|
+
# Return as unified diff.
|
|
29
|
+
import difflib
|
|
30
|
+
|
|
31
|
+
diff = difflib.unified_diff(
|
|
32
|
+
original.splitlines(keepends=True),
|
|
33
|
+
patched.splitlines(keepends=True),
|
|
34
|
+
fromfile=f"a/{workflow_path}",
|
|
35
|
+
tofile=f"b/{workflow_path}",
|
|
36
|
+
)
|
|
37
|
+
return "".join(diff)
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
"""Renders findings in terminal, JSON, and SARIF formats."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from enum import StrEnum
|
|
7
|
+
from typing import TYPE_CHECKING
|
|
8
|
+
|
|
9
|
+
from rich import box
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.table import Table
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from pipeguard.scanner.base import Finding
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class OutputFormat(StrEnum):
|
|
18
|
+
TERMINAL = "terminal"
|
|
19
|
+
JSON = "json"
|
|
20
|
+
SARIF = "sarif"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class Formatter:
|
|
24
|
+
def __init__(self, fmt: OutputFormat = OutputFormat.TERMINAL) -> None:
|
|
25
|
+
self.fmt = fmt
|
|
26
|
+
self._console = Console()
|
|
27
|
+
|
|
28
|
+
def render(self, findings: list[Finding], workflow_path: str) -> None:
|
|
29
|
+
match self.fmt:
|
|
30
|
+
case OutputFormat.TERMINAL:
|
|
31
|
+
self._render_terminal(findings, workflow_path)
|
|
32
|
+
case OutputFormat.JSON:
|
|
33
|
+
self._render_json(findings)
|
|
34
|
+
case OutputFormat.SARIF:
|
|
35
|
+
self._render_sarif(findings, workflow_path)
|
|
36
|
+
|
|
37
|
+
# ------------------------------------------------------------------
|
|
38
|
+
# Terminal
|
|
39
|
+
# ------------------------------------------------------------------
|
|
40
|
+
|
|
41
|
+
def _render_terminal(self, findings: list[Finding], workflow_path: str) -> None:
|
|
42
|
+
self._console.print(f"\n[bold]Scanning:[/bold] {workflow_path}")
|
|
43
|
+
if not findings:
|
|
44
|
+
self._console.print("[green] ✓ No issues found[/green]")
|
|
45
|
+
return
|
|
46
|
+
|
|
47
|
+
table = Table(box=box.ROUNDED, show_lines=True, expand=False)
|
|
48
|
+
table.add_column("Severity", style="bold", width=9, no_wrap=True)
|
|
49
|
+
table.add_column("Rule", style="cyan", width=22, no_wrap=True)
|
|
50
|
+
table.add_column("Location", width=26, no_wrap=True)
|
|
51
|
+
table.add_column("Message", max_width=60)
|
|
52
|
+
|
|
53
|
+
for f in findings:
|
|
54
|
+
sev_style = {"error": "red", "warning": "yellow", "info": "blue"}.get(
|
|
55
|
+
f.severity, "white"
|
|
56
|
+
)
|
|
57
|
+
table.add_row(
|
|
58
|
+
f"[{sev_style}]{f.severity}[/{sev_style}]",
|
|
59
|
+
f.rule,
|
|
60
|
+
f"{f.file}:{f.line}",
|
|
61
|
+
f.message + (f"\n[dim]Fix: {f.fix_suggestion}[/dim]" if f.fix_suggestion else ""),
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
self._console.print(table)
|
|
65
|
+
self._console.print(
|
|
66
|
+
f"\n[bold]{'[red]' if any(f.severity == 'error' for f in findings) else '[yellow]'}"
|
|
67
|
+
f"{len(findings)} issue(s) found.[/bold]"
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# ------------------------------------------------------------------
|
|
71
|
+
# JSON
|
|
72
|
+
# ------------------------------------------------------------------
|
|
73
|
+
|
|
74
|
+
def _render_json(self, findings: list[Finding]) -> None:
|
|
75
|
+
output = [
|
|
76
|
+
{
|
|
77
|
+
"rule": f.rule,
|
|
78
|
+
"severity": f.severity,
|
|
79
|
+
"message": f.message,
|
|
80
|
+
"file": f.file,
|
|
81
|
+
"line": f.line,
|
|
82
|
+
"col": f.col,
|
|
83
|
+
"fix_suggestion": f.fix_suggestion,
|
|
84
|
+
}
|
|
85
|
+
for f in findings
|
|
86
|
+
]
|
|
87
|
+
print(json.dumps(output, indent=2))
|
|
88
|
+
|
|
89
|
+
# ------------------------------------------------------------------
|
|
90
|
+
# SARIF 2.1.0
|
|
91
|
+
# ------------------------------------------------------------------
|
|
92
|
+
|
|
93
|
+
def _render_sarif(self, findings: list[Finding], workflow_path: str) -> None:
|
|
94
|
+
results = []
|
|
95
|
+
rules: dict[str, dict[str, object]] = {}
|
|
96
|
+
|
|
97
|
+
for f in findings:
|
|
98
|
+
rules[f.rule] = {
|
|
99
|
+
"id": f.rule,
|
|
100
|
+
"shortDescription": {"text": f.rule},
|
|
101
|
+
}
|
|
102
|
+
results.append(
|
|
103
|
+
{
|
|
104
|
+
"ruleId": f.rule,
|
|
105
|
+
"level": "error" if f.severity == "error" else "warning",
|
|
106
|
+
"message": {"text": f.message},
|
|
107
|
+
"locations": [
|
|
108
|
+
{
|
|
109
|
+
"physicalLocation": {
|
|
110
|
+
"artifactLocation": {"uri": f.file},
|
|
111
|
+
"region": {"startLine": f.line, "startColumn": f.col},
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
],
|
|
115
|
+
}
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
sarif = {
|
|
119
|
+
"version": "2.1.0",
|
|
120
|
+
"$schema": "https://json.schemastore.org/sarif-2.1.0.json",
|
|
121
|
+
"runs": [
|
|
122
|
+
{
|
|
123
|
+
"tool": {
|
|
124
|
+
"driver": {
|
|
125
|
+
"name": "pipeguard",
|
|
126
|
+
"rules": list(rules.values()),
|
|
127
|
+
}
|
|
128
|
+
},
|
|
129
|
+
"results": results,
|
|
130
|
+
}
|
|
131
|
+
],
|
|
132
|
+
}
|
|
133
|
+
print(json.dumps(sarif, indent=2))
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""Base types shared across all scanner modules and platforms."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class Finding:
|
|
11
|
+
rule: str
|
|
12
|
+
message: str
|
|
13
|
+
file: str
|
|
14
|
+
line: int
|
|
15
|
+
col: int
|
|
16
|
+
severity: str = "error" # "error" | "warning" | "info"
|
|
17
|
+
fix_suggestion: str | None = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class BaseScanner(ABC):
|
|
21
|
+
@abstractmethod
|
|
22
|
+
def check(self, workflow_path: str) -> list[Finding]:
|
|
23
|
+
"""Run this scanner against a single workflow file."""
|
|
24
|
+
...
|
|
File without changes
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"""Lists all third-party Actions used in a workflow as an inventory."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
from collections import defaultdict
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
import yaml
|
|
10
|
+
|
|
11
|
+
from pipeguard.scanner.base import Finding
|
|
12
|
+
|
|
13
|
+
_USES_RE = re.compile(r"^(?P<action>[^@]+)@(?P<ref>.+)$")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def check_action_inventory(workflow_path: str) -> list[Finding]:
|
|
17
|
+
"""Return one info-level finding per unique third-party action used."""
|
|
18
|
+
text = Path(workflow_path).read_text()
|
|
19
|
+
data = yaml.safe_load(text)
|
|
20
|
+
lines = text.splitlines()
|
|
21
|
+
|
|
22
|
+
# action_slug -> list of (ref, line_no)
|
|
23
|
+
inventory: dict[str, list[tuple[str, int]]] = defaultdict(list)
|
|
24
|
+
|
|
25
|
+
jobs = data.get("jobs", {}) if isinstance(data, dict) else {}
|
|
26
|
+
for _job_id, job in jobs.items():
|
|
27
|
+
steps = job.get("steps", []) if isinstance(job, dict) else []
|
|
28
|
+
for step in steps:
|
|
29
|
+
uses = step.get("uses", "") if isinstance(step, dict) else ""
|
|
30
|
+
if not uses or uses.startswith("./"):
|
|
31
|
+
continue
|
|
32
|
+
m = _USES_RE.match(uses)
|
|
33
|
+
if not m:
|
|
34
|
+
continue
|
|
35
|
+
action, ref = m.group("action"), m.group("ref")
|
|
36
|
+
line_no = next(
|
|
37
|
+
(i + 1 for i, line in enumerate(lines) if uses in line), 0
|
|
38
|
+
)
|
|
39
|
+
inventory[action].append((ref, line_no))
|
|
40
|
+
|
|
41
|
+
findings: list[Finding] = []
|
|
42
|
+
for action, occurrences in sorted(inventory.items()):
|
|
43
|
+
refs = sorted({ref for ref, _ in occurrences})
|
|
44
|
+
first_line = occurrences[0][1]
|
|
45
|
+
findings.append(
|
|
46
|
+
Finding(
|
|
47
|
+
rule="action-inventory",
|
|
48
|
+
message=(
|
|
49
|
+
f"Action '{action}' used with ref(s): "
|
|
50
|
+
+ ", ".join(f"'{r}'" for r in refs)
|
|
51
|
+
+ f" ({len(occurrences)} occurrence(s))."
|
|
52
|
+
),
|
|
53
|
+
file=workflow_path,
|
|
54
|
+
line=first_line,
|
|
55
|
+
col=0,
|
|
56
|
+
severity="info",
|
|
57
|
+
)
|
|
58
|
+
)
|
|
59
|
+
return findings
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""Runs actionlint as a subprocess and wraps its output as PipeGuard findings."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import subprocess
|
|
7
|
+
|
|
8
|
+
from pipeguard.scanner.base import Finding
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def run_actionlint(workflow_path: str) -> list[Finding]:
|
|
12
|
+
"""Run actionlint on *workflow_path* and return findings.
|
|
13
|
+
|
|
14
|
+
Returns an empty list if actionlint is not installed — fail loud via a
|
|
15
|
+
warning, but do not block other checks.
|
|
16
|
+
"""
|
|
17
|
+
try:
|
|
18
|
+
result = subprocess.run(
|
|
19
|
+
["actionlint", "-format", "{{json .}}", workflow_path],
|
|
20
|
+
capture_output=True,
|
|
21
|
+
text=True,
|
|
22
|
+
)
|
|
23
|
+
except FileNotFoundError:
|
|
24
|
+
print("[pipeguard] WARNING: actionlint not found. Install it for full analysis.")
|
|
25
|
+
return []
|
|
26
|
+
|
|
27
|
+
if not result.stdout.strip():
|
|
28
|
+
return []
|
|
29
|
+
|
|
30
|
+
raw: list[dict[str, int | str]] = json.loads(result.stdout)
|
|
31
|
+
findings: list[Finding] = []
|
|
32
|
+
for item in raw:
|
|
33
|
+
findings.append(
|
|
34
|
+
Finding(
|
|
35
|
+
rule=str(item.get("kind", "actionlint")),
|
|
36
|
+
message=str(item.get("message", "")),
|
|
37
|
+
file=str(item.get("filepath", workflow_path)),
|
|
38
|
+
line=int(item.get("line", 0)),
|
|
39
|
+
col=int(item.get("column", 0)),
|
|
40
|
+
severity="error",
|
|
41
|
+
)
|
|
42
|
+
)
|
|
43
|
+
return findings
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"""Checks used Actions against a local database of known critical CVEs."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import re
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import TypedDict
|
|
9
|
+
|
|
10
|
+
import yaml
|
|
11
|
+
|
|
12
|
+
from pipeguard.scanner.base import Finding
|
|
13
|
+
|
|
14
|
+
_USES_RE = re.compile(r"^(?P<action>[^@]+)@(?P<ref>.+)$")
|
|
15
|
+
_SHA_RE = re.compile(r"^[0-9a-f]{40}$")
|
|
16
|
+
|
|
17
|
+
_CVE_DB_PATH = Path(__file__).parent / "cve_db.json"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class CveRecord(TypedDict):
|
|
21
|
+
action: str
|
|
22
|
+
cve_id: str
|
|
23
|
+
description: str
|
|
24
|
+
affected_refs: list[str] # "all_tags" | specific tag/SHA
|
|
25
|
+
advisory_url: str
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _load_cve_db() -> dict[str, list[CveRecord]]:
|
|
29
|
+
"""Load CVE database from the bundled JSON file, keyed by action (lowercase)."""
|
|
30
|
+
records: list[CveRecord] = json.loads(_CVE_DB_PATH.read_text())
|
|
31
|
+
db: dict[str, list[CveRecord]] = {}
|
|
32
|
+
for record in records:
|
|
33
|
+
key = record["action"].lower()
|
|
34
|
+
db.setdefault(key, []).append(record)
|
|
35
|
+
return db
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def check_cve(workflow_path: str) -> list[Finding]:
|
|
39
|
+
"""Return findings for actions that match known CVEs in the local database."""
|
|
40
|
+
cve_db = _load_cve_db()
|
|
41
|
+
|
|
42
|
+
text = Path(workflow_path).read_text()
|
|
43
|
+
data = yaml.safe_load(text)
|
|
44
|
+
lines = text.splitlines()
|
|
45
|
+
|
|
46
|
+
findings: list[Finding] = []
|
|
47
|
+
jobs = data.get("jobs", {}) if isinstance(data, dict) else {}
|
|
48
|
+
for _job_id, job in jobs.items():
|
|
49
|
+
steps = job.get("steps", []) if isinstance(job, dict) else []
|
|
50
|
+
for step in steps:
|
|
51
|
+
uses = step.get("uses", "") if isinstance(step, dict) else ""
|
|
52
|
+
if not uses or uses.startswith("./"):
|
|
53
|
+
continue
|
|
54
|
+
m = _USES_RE.match(uses)
|
|
55
|
+
if not m:
|
|
56
|
+
continue
|
|
57
|
+
action, ref = m.group("action"), m.group("ref")
|
|
58
|
+
|
|
59
|
+
for record in cve_db.get(action.lower(), []):
|
|
60
|
+
is_tag = not _SHA_RE.match(ref)
|
|
61
|
+
hit = ("all_tags" in record["affected_refs"] and is_tag) or (
|
|
62
|
+
ref in record["affected_refs"]
|
|
63
|
+
)
|
|
64
|
+
if not hit:
|
|
65
|
+
continue
|
|
66
|
+
|
|
67
|
+
line_no = next(
|
|
68
|
+
(i + 1 for i, line in enumerate(lines) if uses in line), 0
|
|
69
|
+
)
|
|
70
|
+
cve_id = record["cve_id"]
|
|
71
|
+
findings.append(
|
|
72
|
+
Finding(
|
|
73
|
+
rule=f"cve-{cve_id.lower()}",
|
|
74
|
+
message=(
|
|
75
|
+
f"Action '{action}@{ref}' is affected by {cve_id}: "
|
|
76
|
+
f"{record['description']}"
|
|
77
|
+
),
|
|
78
|
+
file=workflow_path,
|
|
79
|
+
line=line_no,
|
|
80
|
+
col=0,
|
|
81
|
+
severity="error",
|
|
82
|
+
fix_suggestion=(
|
|
83
|
+
f"Immediately pin '{action}' to a verified safe SHA. "
|
|
84
|
+
f"See {record['advisory_url']}"
|
|
85
|
+
),
|
|
86
|
+
)
|
|
87
|
+
)
|
|
88
|
+
return findings
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
[
|
|
2
|
+
{
|
|
3
|
+
"action": "tj-actions/changed-files",
|
|
4
|
+
"cve_id": "CVE-2025-30066",
|
|
5
|
+
"description": "tj-actions/changed-files was compromised in a supply-chain attack. All tag-based refs are potentially affected \u2014 pin to a known-good SHA.",
|
|
6
|
+
"affected_refs": [
|
|
7
|
+
"all_tags"
|
|
8
|
+
],
|
|
9
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2025-30066"
|
|
10
|
+
},
|
|
11
|
+
{
|
|
12
|
+
"action": "reviewdog/action-setup",
|
|
13
|
+
"cve_id": "CVE-2025-30154",
|
|
14
|
+
"description": "reviewdog/action-setup was compromised in the same supply-chain campaign as tj-actions/changed-files. Pin to a vetted SHA.",
|
|
15
|
+
"affected_refs": [
|
|
16
|
+
"all_tags"
|
|
17
|
+
],
|
|
18
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2025-30154"
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"action": "tj-actions/branch-names",
|
|
22
|
+
"cve_id": "CVE-2023-49291",
|
|
23
|
+
"description": "tj-actions/branch-names improperly references github.event.pull_request.head.ref and github.head_ref in a run step, allowing a specially crafted branch name to execute arbitrary code and steal secrets or abuse GITHUB_TOKEN permissions.",
|
|
24
|
+
"affected_refs": [
|
|
25
|
+
"all_tags"
|
|
26
|
+
],
|
|
27
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2023-49291"
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
"action": "PENDING_REVIEW",
|
|
31
|
+
"cve_id": "CVE-2023-24538",
|
|
32
|
+
"description": "Templates do not properly consider backticks (`) as Javascript string delimiters, and do not escape them as expected. Backticks are used, since ES6, for JS template literals. If a template contains a Go template action within a Javascript template literal, the contents of the action can be used to terminate the literal, injecting arbitrary Javascript code into the Go template. As ES6 template literals are rather complex, and themselves can do string interpolation, the decision was made to simply disallow Go template actions from being used inside of them (e.g. \"var a = {{.}}\"), since there is no obviously safe way to allow this behavior. This takes the same approach as github.com/google/safehtml. With fix, Template.Parse returns an Error when it encounters templates like this, with an ErrorCode of value 12. This ErrorCode is currently unexported, but will be exported in the release of Go 1.21. Users who rely on the previous behavior can re-enable it using the GODEBUG flag jstmpllitinterp=1, with the caveat that backticks will now be escaped. This should be used with caution.",
|
|
33
|
+
"affected_refs": [
|
|
34
|
+
"all_tags"
|
|
35
|
+
],
|
|
36
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2023-24538"
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"action": "PENDING_REVIEW",
|
|
40
|
+
"cve_id": "CVE-2023-29453",
|
|
41
|
+
"description": "Templates do not properly consider backticks (`) as Javascript string delimiters, and do not escape them as expected. Backticks are used, since ES6, for JS template literals. If a template contains a Go template action within a Javascript template literal, the contents of the action can be used to terminate the literal, injecting arbitrary Javascript code into the Go template. As ES6 template literals are rather complex, and themselves can do string interpolation, the decision was made to simply disallow Go template actions from being used inside of them (e.g., \"var a = {{.}}\"), since there is no obviously safe way to allow this behavior. This takes the same approach as github.com/google/safehtml. With fix, Template. Parse returns an Error when it encounters templates like this, with an ErrorCode of value 12. This ErrorCode is currently unexported but will be exported in the release of Go 1.21. Users who rely on the previous behavior can re-enable it using the GODEBUG flag jstmpllitinterp=1, with the caveat that backticks will now be escaped. This should be used with caution.",
|
|
42
|
+
"affected_refs": [
|
|
43
|
+
"all_tags"
|
|
44
|
+
],
|
|
45
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2023-29453"
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"action": "PENDING_REVIEW",
|
|
49
|
+
"cve_id": "CVE-2024-1355",
|
|
50
|
+
"description": "A command injection vulnerability was identified in GitHub Enterprise Server that allowed an attacker with an editor role in the Management Console to gain admin SSH access to the appliance via the actions-console\u00a0docker container while setting a service URL. Exploitation of this vulnerability required access to the GitHub Enterprise Server instance and access to the Management Console with the editor role. This vulnerability affected all versions of GitHub Enterprise Server prior to 3.12 and was fixed in versions 3.11.5, 3.10.7, 3.9.10, and 3.8.15. This vulnerability was reported via the GitHub Bug Bounty program.\n",
|
|
51
|
+
"affected_refs": [
|
|
52
|
+
"all_tags"
|
|
53
|
+
],
|
|
54
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2024-1355"
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
"action": "PENDING_REVIEW",
|
|
58
|
+
"cve_id": "CVE-2024-39700",
|
|
59
|
+
"description": "JupyterLab extension template is a `copier` template for JupyterLab extensions. Repositories created using this template with `test` option include `update-integration-tests.yml` workflow which has an RCE vulnerability. Extension authors hosting their code on GitHub are urged to upgrade the template to the latest version. Users who made changes to `update-integration-tests.yml`, accept overwriting of this file and re-apply your changes later. Users may wish to temporarily disable GitHub Actions while working on the upgrade. We recommend rebasing all open pull requests from untrusted users as actions may run using the version from the `main` branch at the time when the pull request was created. Users who are upgrading from template version prior to 4.3.0 may wish to leave out proposed changes to the release workflow for now as it requires additional configuration.",
|
|
60
|
+
"affected_refs": [
|
|
61
|
+
"all_tags"
|
|
62
|
+
],
|
|
63
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2024-39700"
|
|
64
|
+
},
|
|
65
|
+
{
|
|
66
|
+
"action": "PENDING_REVIEW",
|
|
67
|
+
"cve_id": "CVE-2024-52009",
|
|
68
|
+
"description": "Atlantis is a self-hosted golang application that listens for Terraform pull request events via webhooks. Atlantis logs contains GitHub credentials (tokens `ghs_...`) when they are rotated. This enables an attacker able to read these logs to impersonate Atlantis application and to perform actions on GitHub. When Atlantis is used to administer a GitHub organization, this enables getting administration privileges on the organization. This was reported in #4060 and fixed in #4667 . The fix was included in Atlantis v0.30.0. All users are advised to upgrade. There are no known workarounds for this vulnerability.",
|
|
69
|
+
"affected_refs": [
|
|
70
|
+
"all_tags"
|
|
71
|
+
],
|
|
72
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2024-52009"
|
|
73
|
+
},
|
|
74
|
+
{
|
|
75
|
+
"action": "PENDING_REVIEW",
|
|
76
|
+
"cve_id": "CVE-2025-58371",
|
|
77
|
+
"description": "Roo Code is an AI-powered autonomous coding agent that lives in users' editors. In versions 3.26.6 and below, a Github workflow used unsanitized pull request metadata in a privileged context, allowing an attacker to craft malicious input and achieve Remote Code Execution (RCE) on the Actions runner. The workflow runs with broad permissions and access to repository secrets. It is possible for an attacker to execute arbitrary commands on the runner, push or modify code in the repository, access secrets, and create malicious releases or packages, resulting in a complete compromise of the repository and its associated services. This is fixed in version 3.26.7.",
|
|
78
|
+
"affected_refs": [
|
|
79
|
+
"all_tags"
|
|
80
|
+
],
|
|
81
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2025-58371"
|
|
82
|
+
},
|
|
83
|
+
{
|
|
84
|
+
"action": "PENDING_REVIEW",
|
|
85
|
+
"cve_id": "CVE-2025-11892",
|
|
86
|
+
"description": "An improper neutralization of input vulnerability was identified in GitHub Enterprise Server that allows DOM-based cross-site scripting via Issues search label filter that could lead to privilege escalation and unauthorized workflow triggers. Successful exploitation requires an attacker to have access to the target GitHub Enterprise Server instance and to entice a user, while operating in sudo mode, to click on a crafted malicious link to perform actions that require elevated privileges.\u00a0This vulnerability affected all versions of GitHub Enterprise Server prior to\u00a03.18.1, 3.17.7, 3.16.10, 3.15.14, 3.14.19.",
|
|
87
|
+
"affected_refs": [
|
|
88
|
+
"all_tags"
|
|
89
|
+
],
|
|
90
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2025-11892"
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
"action": "PENDING_REVIEW",
|
|
94
|
+
"cve_id": "CVE-2025-67727",
|
|
95
|
+
"description": "Parse Server is an open source backend that can be deployed to any infrastructure that runs Node.js. In versions prior to 8.6.0-alpha.2, a GitHub CI workflow is triggered in a way that grants the GitHub Actions workflow elevated permissions, giving it access to GitHub secrets and write permissions which are defined in the workflow. Code from a fork or lifecycle scripts is potentially included. Only the repository's CI/CD infrastructure is affected, including any public GitHub forks with GitHub Actions enabled. This issue is fixed version 8.6.0-alpha.2 and commits 6b9f896 and e3d27fe.",
|
|
96
|
+
"affected_refs": [
|
|
97
|
+
"all_tags"
|
|
98
|
+
],
|
|
99
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2025-67727"
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
"action": "PENDING_REVIEW",
|
|
103
|
+
"cve_id": "CVE-2026-0573",
|
|
104
|
+
"description": "An URL redirection vulnerability was identified in GitHub Enterprise Server that allowed attacker-controlled redirects to leak sensitive authorization tokens. The repository_pages API insecurely followed HTTP redirects when fetching artifact URLs, preserving the authorization header containing a privileged JWT. An authenticated user could redirect these requests to an attacker-controlled domain, exfiltrate the Actions.ManageOrgs JWT, and leverage it for potential remote code execution. Attackers would require access to the target GitHub Enterprise Server instance and the ability to exploit a legacy redirect to an attacker-controlled domain. This vulnerability affected all versions of GitHub Enterprise Server prior to 3.19 and was fixed in versions 3.19.2, 3.18.4, 3.17.10, 3.16.13, 3.15.17, and 3.14.22. This vulnerability was reported via the GitHub Bug Bounty program.",
|
|
105
|
+
"affected_refs": [
|
|
106
|
+
"all_tags"
|
|
107
|
+
],
|
|
108
|
+
"advisory_url": "https://www.cve.org/CVERecord?id=CVE-2026-0573"
|
|
109
|
+
}
|
|
110
|
+
]
|