cisco-ai-skill-scanner 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cisco_ai_skill_scanner-1.0.0.dist-info/METADATA +253 -0
- cisco_ai_skill_scanner-1.0.0.dist-info/RECORD +100 -0
- cisco_ai_skill_scanner-1.0.0.dist-info/WHEEL +4 -0
- cisco_ai_skill_scanner-1.0.0.dist-info/entry_points.txt +4 -0
- cisco_ai_skill_scanner-1.0.0.dist-info/licenses/LICENSE +17 -0
- skillanalyzer/__init__.py +45 -0
- skillanalyzer/_version.py +34 -0
- skillanalyzer/api/__init__.py +25 -0
- skillanalyzer/api/api.py +34 -0
- skillanalyzer/api/api_cli.py +78 -0
- skillanalyzer/api/api_server.py +634 -0
- skillanalyzer/api/router.py +527 -0
- skillanalyzer/cli/__init__.py +25 -0
- skillanalyzer/cli/cli.py +816 -0
- skillanalyzer/config/__init__.py +26 -0
- skillanalyzer/config/config.py +149 -0
- skillanalyzer/config/config_parser.py +122 -0
- skillanalyzer/config/constants.py +85 -0
- skillanalyzer/core/__init__.py +24 -0
- skillanalyzer/core/analyzers/__init__.py +75 -0
- skillanalyzer/core/analyzers/aidefense_analyzer.py +872 -0
- skillanalyzer/core/analyzers/base.py +53 -0
- skillanalyzer/core/analyzers/behavioral/__init__.py +30 -0
- skillanalyzer/core/analyzers/behavioral/alignment/__init__.py +45 -0
- skillanalyzer/core/analyzers/behavioral/alignment/alignment_llm_client.py +240 -0
- skillanalyzer/core/analyzers/behavioral/alignment/alignment_orchestrator.py +216 -0
- skillanalyzer/core/analyzers/behavioral/alignment/alignment_prompt_builder.py +422 -0
- skillanalyzer/core/analyzers/behavioral/alignment/alignment_response_validator.py +136 -0
- skillanalyzer/core/analyzers/behavioral/alignment/threat_vulnerability_classifier.py +198 -0
- skillanalyzer/core/analyzers/behavioral_analyzer.py +453 -0
- skillanalyzer/core/analyzers/cross_skill_analyzer.py +490 -0
- skillanalyzer/core/analyzers/llm_analyzer.py +440 -0
- skillanalyzer/core/analyzers/llm_prompt_builder.py +270 -0
- skillanalyzer/core/analyzers/llm_provider_config.py +215 -0
- skillanalyzer/core/analyzers/llm_request_handler.py +284 -0
- skillanalyzer/core/analyzers/llm_response_parser.py +81 -0
- skillanalyzer/core/analyzers/meta_analyzer.py +845 -0
- skillanalyzer/core/analyzers/static.py +1105 -0
- skillanalyzer/core/analyzers/trigger_analyzer.py +341 -0
- skillanalyzer/core/analyzers/virustotal_analyzer.py +463 -0
- skillanalyzer/core/exceptions.py +77 -0
- skillanalyzer/core/loader.py +377 -0
- skillanalyzer/core/models.py +300 -0
- skillanalyzer/core/reporters/__init__.py +26 -0
- skillanalyzer/core/reporters/json_reporter.py +65 -0
- skillanalyzer/core/reporters/markdown_reporter.py +209 -0
- skillanalyzer/core/reporters/sarif_reporter.py +246 -0
- skillanalyzer/core/reporters/table_reporter.py +195 -0
- skillanalyzer/core/rules/__init__.py +19 -0
- skillanalyzer/core/rules/patterns.py +165 -0
- skillanalyzer/core/rules/yara_scanner.py +157 -0
- skillanalyzer/core/scanner.py +437 -0
- skillanalyzer/core/static_analysis/__init__.py +27 -0
- skillanalyzer/core/static_analysis/cfg/__init__.py +21 -0
- skillanalyzer/core/static_analysis/cfg/builder.py +439 -0
- skillanalyzer/core/static_analysis/context_extractor.py +742 -0
- skillanalyzer/core/static_analysis/dataflow/__init__.py +25 -0
- skillanalyzer/core/static_analysis/dataflow/forward_analysis.py +715 -0
- skillanalyzer/core/static_analysis/interprocedural/__init__.py +21 -0
- skillanalyzer/core/static_analysis/interprocedural/call_graph_analyzer.py +406 -0
- skillanalyzer/core/static_analysis/interprocedural/cross_file_analyzer.py +190 -0
- skillanalyzer/core/static_analysis/parser/__init__.py +21 -0
- skillanalyzer/core/static_analysis/parser/python_parser.py +380 -0
- skillanalyzer/core/static_analysis/semantic/__init__.py +28 -0
- skillanalyzer/core/static_analysis/semantic/name_resolver.py +206 -0
- skillanalyzer/core/static_analysis/semantic/type_analyzer.py +200 -0
- skillanalyzer/core/static_analysis/taint/__init__.py +21 -0
- skillanalyzer/core/static_analysis/taint/tracker.py +252 -0
- skillanalyzer/core/static_analysis/types/__init__.py +36 -0
- skillanalyzer/data/__init__.py +30 -0
- skillanalyzer/data/prompts/boilerplate_protection_rule_prompt.md +26 -0
- skillanalyzer/data/prompts/code_alignment_threat_analysis_prompt.md +901 -0
- skillanalyzer/data/prompts/llm_response_schema.json +71 -0
- skillanalyzer/data/prompts/skill_meta_analysis_prompt.md +303 -0
- skillanalyzer/data/prompts/skill_threat_analysis_prompt.md +263 -0
- skillanalyzer/data/prompts/unified_response_schema.md +97 -0
- skillanalyzer/data/rules/signatures.yaml +440 -0
- skillanalyzer/data/yara_rules/autonomy_abuse.yara +66 -0
- skillanalyzer/data/yara_rules/code_execution.yara +61 -0
- skillanalyzer/data/yara_rules/coercive_injection.yara +115 -0
- skillanalyzer/data/yara_rules/command_injection.yara +54 -0
- skillanalyzer/data/yara_rules/credential_harvesting.yara +115 -0
- skillanalyzer/data/yara_rules/prompt_injection.yara +71 -0
- skillanalyzer/data/yara_rules/script_injection.yara +83 -0
- skillanalyzer/data/yara_rules/skill_discovery_abuse.yara +57 -0
- skillanalyzer/data/yara_rules/sql_injection.yara +73 -0
- skillanalyzer/data/yara_rules/system_manipulation.yara +65 -0
- skillanalyzer/data/yara_rules/tool_chaining_abuse.yara +60 -0
- skillanalyzer/data/yara_rules/transitive_trust_abuse.yara +73 -0
- skillanalyzer/data/yara_rules/unicode_steganography.yara +65 -0
- skillanalyzer/hooks/__init__.py +21 -0
- skillanalyzer/hooks/pre_commit.py +450 -0
- skillanalyzer/threats/__init__.py +25 -0
- skillanalyzer/threats/threats.py +480 -0
- skillanalyzer/utils/__init__.py +28 -0
- skillanalyzer/utils/command_utils.py +129 -0
- skillanalyzer/utils/di_container.py +154 -0
- skillanalyzer/utils/file_utils.py +86 -0
- skillanalyzer/utils/logging_config.py +96 -0
- skillanalyzer/utils/logging_utils.py +71 -0
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# Copyright 2026 Cisco Systems, Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
#
|
|
15
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
16
|
+
|
|
17
|
+
"""
|
|
18
|
+
JSON format reporter for scan results.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
import json
|
|
22
|
+
|
|
23
|
+
from ...core.models import Report, ScanResult
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class JSONReporter:
|
|
27
|
+
"""Generates JSON format reports."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, pretty: bool = True):
|
|
30
|
+
"""
|
|
31
|
+
Initialize JSON reporter.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
pretty: If True, format JSON with indentation
|
|
35
|
+
"""
|
|
36
|
+
self.pretty = pretty
|
|
37
|
+
|
|
38
|
+
def generate_report(self, data: ScanResult | Report) -> str:
|
|
39
|
+
"""
|
|
40
|
+
Generate JSON report.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
data: ScanResult or Report object
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
JSON string
|
|
47
|
+
"""
|
|
48
|
+
report_dict = data.to_dict()
|
|
49
|
+
|
|
50
|
+
if self.pretty:
|
|
51
|
+
return json.dumps(report_dict, indent=2, default=str)
|
|
52
|
+
else:
|
|
53
|
+
return json.dumps(report_dict, default=str)
|
|
54
|
+
|
|
55
|
+
def save_report(self, data: ScanResult | Report, output_path: str):
|
|
56
|
+
"""
|
|
57
|
+
Save JSON report to file.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
data: ScanResult or Report object
|
|
61
|
+
output_path: Path to save file
|
|
62
|
+
"""
|
|
63
|
+
report_json = self.generate_report(data)
|
|
64
|
+
with open(output_path, "w", encoding="utf-8") as f:
|
|
65
|
+
f.write(report_json)
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
# Copyright 2026 Cisco Systems, Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
#
|
|
15
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
16
|
+
|
|
17
|
+
"""
|
|
18
|
+
Markdown format reporter for scan results.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from ...core.models import Finding, Report, ScanResult, Severity
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MarkdownReporter:
|
|
25
|
+
"""Generates Markdown format reports."""
|
|
26
|
+
|
|
27
|
+
def __init__(self, detailed: bool = True):
|
|
28
|
+
"""
|
|
29
|
+
Initialize Markdown reporter.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
detailed: If True, include full finding details
|
|
33
|
+
"""
|
|
34
|
+
self.detailed = detailed
|
|
35
|
+
|
|
36
|
+
def generate_report(self, data: ScanResult | Report) -> str:
|
|
37
|
+
"""
|
|
38
|
+
Generate Markdown report.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
data: ScanResult or Report object
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
Markdown string
|
|
45
|
+
"""
|
|
46
|
+
if isinstance(data, ScanResult):
|
|
47
|
+
return self._generate_scan_result_report(data)
|
|
48
|
+
else:
|
|
49
|
+
return self._generate_multi_skill_report(data)
|
|
50
|
+
|
|
51
|
+
def _generate_scan_result_report(self, result: ScanResult) -> str:
|
|
52
|
+
"""Generate report for a single skill scan."""
|
|
53
|
+
lines = []
|
|
54
|
+
|
|
55
|
+
# Header
|
|
56
|
+
lines.append("# Claude Skill Security Scan Report")
|
|
57
|
+
lines.append("")
|
|
58
|
+
lines.append(f"**Skill:** {result.skill_name}")
|
|
59
|
+
lines.append(f"**Directory:** {result.skill_directory}")
|
|
60
|
+
lines.append(f"**Status:** {'[OK] SAFE' if result.is_safe else '[FAIL] ISSUES FOUND'}")
|
|
61
|
+
lines.append(f"**Max Severity:** {result.max_severity.value}")
|
|
62
|
+
lines.append(f"**Scan Duration:** {result.scan_duration_seconds:.2f}s")
|
|
63
|
+
lines.append(f"**Timestamp:** {result.timestamp.isoformat()}")
|
|
64
|
+
lines.append("")
|
|
65
|
+
|
|
66
|
+
# Summary
|
|
67
|
+
lines.append("## Summary")
|
|
68
|
+
lines.append("")
|
|
69
|
+
lines.append(f"- **Total Findings:** {len(result.findings)}")
|
|
70
|
+
lines.append(f"- **Critical:** {len(result.get_findings_by_severity(Severity.CRITICAL))}")
|
|
71
|
+
lines.append(f"- **High:** {len(result.get_findings_by_severity(Severity.HIGH))}")
|
|
72
|
+
lines.append(f"- **Medium:** {len(result.get_findings_by_severity(Severity.MEDIUM))}")
|
|
73
|
+
lines.append(f"- **Low:** {len(result.get_findings_by_severity(Severity.LOW))}")
|
|
74
|
+
lines.append(f"- **Info:** {len(result.get_findings_by_severity(Severity.INFO))}")
|
|
75
|
+
lines.append("")
|
|
76
|
+
|
|
77
|
+
# Findings
|
|
78
|
+
if result.findings:
|
|
79
|
+
lines.append("## Findings")
|
|
80
|
+
lines.append("")
|
|
81
|
+
|
|
82
|
+
# Group by severity
|
|
83
|
+
for severity in [Severity.CRITICAL, Severity.HIGH, Severity.MEDIUM, Severity.LOW, Severity.INFO]:
|
|
84
|
+
findings = result.get_findings_by_severity(severity)
|
|
85
|
+
if findings:
|
|
86
|
+
lines.append(f"### {severity.value} Severity")
|
|
87
|
+
lines.append("")
|
|
88
|
+
|
|
89
|
+
for finding in findings:
|
|
90
|
+
lines.extend(self._format_finding(finding))
|
|
91
|
+
lines.append("")
|
|
92
|
+
else:
|
|
93
|
+
lines.append("## [OK] No Issues Found")
|
|
94
|
+
lines.append("")
|
|
95
|
+
lines.append("This skill passed all security checks.")
|
|
96
|
+
lines.append("")
|
|
97
|
+
|
|
98
|
+
# Analyzers used
|
|
99
|
+
lines.append("## Analyzers")
|
|
100
|
+
lines.append("")
|
|
101
|
+
lines.append("The following analyzers were used:")
|
|
102
|
+
lines.append("")
|
|
103
|
+
for analyzer in result.analyzers_used:
|
|
104
|
+
lines.append(f"- {analyzer}")
|
|
105
|
+
lines.append("")
|
|
106
|
+
|
|
107
|
+
return "\n".join(lines)
|
|
108
|
+
|
|
109
|
+
def _generate_multi_skill_report(self, report: Report) -> str:
|
|
110
|
+
"""Generate report for multiple skills."""
|
|
111
|
+
lines = []
|
|
112
|
+
|
|
113
|
+
# Header
|
|
114
|
+
lines.append("# Claude Skills Security Scan Report")
|
|
115
|
+
lines.append("")
|
|
116
|
+
lines.append(f"**Timestamp:** {report.timestamp.isoformat()}")
|
|
117
|
+
lines.append("")
|
|
118
|
+
|
|
119
|
+
# Summary
|
|
120
|
+
lines.append("## Summary")
|
|
121
|
+
lines.append("")
|
|
122
|
+
lines.append(f"- **Total Skills Scanned:** {report.total_skills_scanned}")
|
|
123
|
+
lines.append(f"- **Safe Skills:** {report.safe_count}")
|
|
124
|
+
lines.append(f"- **Total Findings:** {report.total_findings}")
|
|
125
|
+
lines.append("")
|
|
126
|
+
lines.append("### Findings by Severity")
|
|
127
|
+
lines.append("")
|
|
128
|
+
lines.append(f"- **Critical:** {report.critical_count}")
|
|
129
|
+
lines.append(f"- **High:** {report.high_count}")
|
|
130
|
+
lines.append(f"- **Medium:** {report.medium_count}")
|
|
131
|
+
lines.append(f"- **Low:** {report.low_count}")
|
|
132
|
+
lines.append(f"- **Info:** {report.info_count}")
|
|
133
|
+
lines.append("")
|
|
134
|
+
|
|
135
|
+
# Individual skill results
|
|
136
|
+
lines.append("## Skill Results")
|
|
137
|
+
lines.append("")
|
|
138
|
+
|
|
139
|
+
for result in report.scan_results:
|
|
140
|
+
status_icon = "[OK]" if result.is_safe else "[FAIL]"
|
|
141
|
+
lines.append(f"### {status_icon} {result.skill_name}")
|
|
142
|
+
lines.append("")
|
|
143
|
+
lines.append(f"- **Max Severity:** {result.max_severity.value}")
|
|
144
|
+
lines.append(f"- **Findings:** {len(result.findings)}")
|
|
145
|
+
lines.append(f"- **Directory:** {result.skill_directory}")
|
|
146
|
+
lines.append("")
|
|
147
|
+
|
|
148
|
+
if self.detailed and result.findings:
|
|
149
|
+
for finding in result.findings:
|
|
150
|
+
lines.extend(self._format_finding(finding, indent=1))
|
|
151
|
+
lines.append("")
|
|
152
|
+
|
|
153
|
+
return "\n".join(lines)
|
|
154
|
+
|
|
155
|
+
def _format_finding(self, finding: Finding, indent: int = 0) -> list:
|
|
156
|
+
"""Format a single finding as markdown lines."""
|
|
157
|
+
lines = []
|
|
158
|
+
indent_str = " " * indent
|
|
159
|
+
|
|
160
|
+
# Severity prefix
|
|
161
|
+
severity_prefix = {
|
|
162
|
+
Severity.CRITICAL: "[CRITICAL]",
|
|
163
|
+
Severity.HIGH: "[HIGH]",
|
|
164
|
+
Severity.MEDIUM: "[MEDIUM]",
|
|
165
|
+
Severity.LOW: "[LOW]",
|
|
166
|
+
Severity.INFO: "[INFO]",
|
|
167
|
+
}
|
|
168
|
+
prefix = severity_prefix.get(finding.severity, "[INFO]")
|
|
169
|
+
|
|
170
|
+
lines.append(f"{indent_str}#### {prefix} {finding.title}")
|
|
171
|
+
lines.append(f"{indent_str}")
|
|
172
|
+
lines.append(f"{indent_str}**Severity:** {finding.severity.value}")
|
|
173
|
+
lines.append(f"{indent_str}**Category:** {finding.category.value}")
|
|
174
|
+
lines.append(f"{indent_str}**Rule ID:** {finding.rule_id}")
|
|
175
|
+
|
|
176
|
+
if finding.file_path:
|
|
177
|
+
location = f"{finding.file_path}"
|
|
178
|
+
if finding.line_number:
|
|
179
|
+
location += f":{finding.line_number}"
|
|
180
|
+
lines.append(f"{indent_str}**Location:** {location}")
|
|
181
|
+
|
|
182
|
+
lines.append(f"{indent_str}")
|
|
183
|
+
lines.append(f"{indent_str}**Description:** {finding.description}")
|
|
184
|
+
|
|
185
|
+
if self.detailed:
|
|
186
|
+
if finding.snippet:
|
|
187
|
+
lines.append(f"{indent_str}")
|
|
188
|
+
lines.append(f"{indent_str}**Code Snippet:**")
|
|
189
|
+
lines.append(f"{indent_str}```")
|
|
190
|
+
lines.append(f"{indent_str}{finding.snippet}")
|
|
191
|
+
lines.append(f"{indent_str}```")
|
|
192
|
+
|
|
193
|
+
if finding.remediation:
|
|
194
|
+
lines.append(f"{indent_str}")
|
|
195
|
+
lines.append(f"{indent_str}**Remediation:** {finding.remediation}")
|
|
196
|
+
|
|
197
|
+
return lines
|
|
198
|
+
|
|
199
|
+
def save_report(self, data: ScanResult | Report, output_path: str):
|
|
200
|
+
"""
|
|
201
|
+
Save Markdown report to file.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
data: ScanResult or Report object
|
|
205
|
+
output_path: Path to save file
|
|
206
|
+
"""
|
|
207
|
+
report_md = self.generate_report(data)
|
|
208
|
+
with open(output_path, "w", encoding="utf-8") as f:
|
|
209
|
+
f.write(report_md)
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
# Copyright 2026 Cisco Systems, Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
#
|
|
15
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
16
|
+
|
|
17
|
+
"""
|
|
18
|
+
SARIF format reporter for GitHub Code Scanning integration.
|
|
19
|
+
|
|
20
|
+
Implements SARIF 2.1.0 specification for security scan results.
|
|
21
|
+
https://docs.oasis-open.org/sarif/sarif/v2.1.0/sarif-v2.1.0.html
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import json
|
|
25
|
+
from typing import Any
|
|
26
|
+
|
|
27
|
+
from ...core.models import Finding, Report, ScanResult, Severity
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class SARIFReporter:
|
|
31
|
+
"""Generates SARIF 2.1.0 format reports for GitHub Code Scanning."""
|
|
32
|
+
|
|
33
|
+
SARIF_VERSION = "2.1.0"
|
|
34
|
+
SARIF_SCHEMA = "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json"
|
|
35
|
+
|
|
36
|
+
# Map severity to SARIF levels
|
|
37
|
+
SEVERITY_TO_LEVEL = {
|
|
38
|
+
Severity.CRITICAL: "error",
|
|
39
|
+
Severity.HIGH: "error",
|
|
40
|
+
Severity.MEDIUM: "warning",
|
|
41
|
+
Severity.LOW: "note",
|
|
42
|
+
Severity.INFO: "note",
|
|
43
|
+
Severity.SAFE: "none",
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
def __init__(self, tool_name: str = "skill-analyzer", tool_version: str = "1.0.0"):
|
|
47
|
+
"""
|
|
48
|
+
Initialize SARIF reporter.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
tool_name: Name of the scanning tool
|
|
52
|
+
tool_version: Version of the scanning tool
|
|
53
|
+
"""
|
|
54
|
+
self.tool_name = tool_name
|
|
55
|
+
self.tool_version = tool_version
|
|
56
|
+
|
|
57
|
+
def generate_report(self, data: ScanResult | Report) -> str:
|
|
58
|
+
"""
|
|
59
|
+
Generate SARIF report.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
data: ScanResult or Report object
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
SARIF JSON string
|
|
66
|
+
"""
|
|
67
|
+
if isinstance(data, ScanResult):
|
|
68
|
+
sarif = self._generate_from_scan_result(data)
|
|
69
|
+
else:
|
|
70
|
+
sarif = self._generate_from_report(data)
|
|
71
|
+
|
|
72
|
+
return json.dumps(sarif, indent=2, default=str)
|
|
73
|
+
|
|
74
|
+
def _generate_from_scan_result(self, result: ScanResult) -> dict[str, Any]:
|
|
75
|
+
"""Generate SARIF from a single ScanResult."""
|
|
76
|
+
rules = self._extract_rules(result.findings)
|
|
77
|
+
results = self._convert_findings(result.findings, result.skill_directory)
|
|
78
|
+
|
|
79
|
+
return {
|
|
80
|
+
"$schema": self.SARIF_SCHEMA,
|
|
81
|
+
"version": self.SARIF_VERSION,
|
|
82
|
+
"runs": [
|
|
83
|
+
{
|
|
84
|
+
"tool": self._create_tool_component(rules),
|
|
85
|
+
"results": results,
|
|
86
|
+
"invocations": [
|
|
87
|
+
{
|
|
88
|
+
"executionSuccessful": True,
|
|
89
|
+
"endTimeUtc": result.timestamp.isoformat() + "Z",
|
|
90
|
+
}
|
|
91
|
+
],
|
|
92
|
+
}
|
|
93
|
+
],
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
def _generate_from_report(self, report: Report) -> dict[str, Any]:
|
|
97
|
+
"""Generate SARIF from a Report with multiple scan results."""
|
|
98
|
+
all_findings = []
|
|
99
|
+
for scan_result in report.scan_results:
|
|
100
|
+
all_findings.extend(scan_result.findings)
|
|
101
|
+
|
|
102
|
+
rules = self._extract_rules(all_findings)
|
|
103
|
+
|
|
104
|
+
# Create results with proper artifact locations
|
|
105
|
+
all_results = []
|
|
106
|
+
for scan_result in report.scan_results:
|
|
107
|
+
results = self._convert_findings(scan_result.findings, scan_result.skill_directory)
|
|
108
|
+
all_results.extend(results)
|
|
109
|
+
|
|
110
|
+
return {
|
|
111
|
+
"$schema": self.SARIF_SCHEMA,
|
|
112
|
+
"version": self.SARIF_VERSION,
|
|
113
|
+
"runs": [
|
|
114
|
+
{
|
|
115
|
+
"tool": self._create_tool_component(rules),
|
|
116
|
+
"results": all_results,
|
|
117
|
+
"invocations": [
|
|
118
|
+
{
|
|
119
|
+
"executionSuccessful": True,
|
|
120
|
+
"endTimeUtc": report.timestamp.isoformat() + "Z",
|
|
121
|
+
}
|
|
122
|
+
],
|
|
123
|
+
}
|
|
124
|
+
],
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
def _create_tool_component(self, rules: list[dict[str, Any]]) -> dict[str, Any]:
|
|
128
|
+
"""Create the tool component with rules."""
|
|
129
|
+
return {
|
|
130
|
+
"driver": {
|
|
131
|
+
"name": self.tool_name,
|
|
132
|
+
"version": self.tool_version,
|
|
133
|
+
"informationUri": "https://github.com/anthropics/skill-analyzer",
|
|
134
|
+
"rules": rules,
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
def _extract_rules(self, findings: list[Finding]) -> list[dict[str, Any]]:
|
|
139
|
+
"""Extract unique rules from findings."""
|
|
140
|
+
seen_rules: set[str] = set()
|
|
141
|
+
rules = []
|
|
142
|
+
|
|
143
|
+
for finding in findings:
|
|
144
|
+
if finding.rule_id in seen_rules:
|
|
145
|
+
continue
|
|
146
|
+
seen_rules.add(finding.rule_id)
|
|
147
|
+
|
|
148
|
+
rule = {
|
|
149
|
+
"id": finding.rule_id,
|
|
150
|
+
"name": finding.rule_id.replace("_", " ").title(),
|
|
151
|
+
"shortDescription": {
|
|
152
|
+
"text": finding.title,
|
|
153
|
+
},
|
|
154
|
+
"fullDescription": {
|
|
155
|
+
"text": finding.description,
|
|
156
|
+
},
|
|
157
|
+
"defaultConfiguration": {
|
|
158
|
+
"level": self.SEVERITY_TO_LEVEL.get(finding.severity, "warning"),
|
|
159
|
+
},
|
|
160
|
+
"properties": {
|
|
161
|
+
"category": finding.category.value,
|
|
162
|
+
"severity": finding.severity.value,
|
|
163
|
+
"tags": [finding.category.value, "security"],
|
|
164
|
+
},
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
if finding.remediation:
|
|
168
|
+
rule["help"] = {
|
|
169
|
+
"text": finding.remediation,
|
|
170
|
+
"markdown": f"**Remediation**: {finding.remediation}",
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
rules.append(rule)
|
|
174
|
+
|
|
175
|
+
return rules
|
|
176
|
+
|
|
177
|
+
def _convert_findings(self, findings: list[Finding], base_path: str) -> list[dict[str, Any]]:
|
|
178
|
+
"""Convert findings to SARIF results."""
|
|
179
|
+
results = []
|
|
180
|
+
|
|
181
|
+
for finding in findings:
|
|
182
|
+
result = {
|
|
183
|
+
"ruleId": finding.rule_id,
|
|
184
|
+
"level": self.SEVERITY_TO_LEVEL.get(finding.severity, "warning"),
|
|
185
|
+
"message": {
|
|
186
|
+
"text": finding.description,
|
|
187
|
+
},
|
|
188
|
+
"properties": {
|
|
189
|
+
"category": finding.category.value,
|
|
190
|
+
"severity": finding.severity.value,
|
|
191
|
+
},
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
# Add location if file path is available
|
|
195
|
+
if finding.file_path:
|
|
196
|
+
location = {
|
|
197
|
+
"physicalLocation": {
|
|
198
|
+
"artifactLocation": {
|
|
199
|
+
"uri": finding.file_path,
|
|
200
|
+
"uriBaseId": "%SRCROOT%",
|
|
201
|
+
},
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
# Add region if line number is available
|
|
206
|
+
if finding.line_number:
|
|
207
|
+
location["physicalLocation"]["region"] = {
|
|
208
|
+
"startLine": finding.line_number,
|
|
209
|
+
}
|
|
210
|
+
if finding.snippet:
|
|
211
|
+
location["physicalLocation"]["region"]["snippet"] = {
|
|
212
|
+
"text": finding.snippet,
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
result["locations"] = [location]
|
|
216
|
+
|
|
217
|
+
# Add fixes/remediation if available
|
|
218
|
+
if finding.remediation:
|
|
219
|
+
result["fixes"] = [
|
|
220
|
+
{
|
|
221
|
+
"description": {
|
|
222
|
+
"text": finding.remediation,
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
]
|
|
226
|
+
|
|
227
|
+
# Add fingerprint for deduplication
|
|
228
|
+
result["fingerprints"] = {
|
|
229
|
+
"primaryLocationLineHash": finding.id,
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
results.append(result)
|
|
233
|
+
|
|
234
|
+
return results
|
|
235
|
+
|
|
236
|
+
def save_report(self, data: ScanResult | Report, output_path: str):
|
|
237
|
+
"""
|
|
238
|
+
Save SARIF report to file.
|
|
239
|
+
|
|
240
|
+
Args:
|
|
241
|
+
data: ScanResult or Report object
|
|
242
|
+
output_path: Path to save file
|
|
243
|
+
"""
|
|
244
|
+
report_json = self.generate_report(data)
|
|
245
|
+
with open(output_path, "w", encoding="utf-8") as f:
|
|
246
|
+
f.write(report_json)
|