cisco-ai-skill-scanner 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cisco_ai_skill_scanner-1.0.0.dist-info/METADATA +253 -0
- cisco_ai_skill_scanner-1.0.0.dist-info/RECORD +100 -0
- cisco_ai_skill_scanner-1.0.0.dist-info/WHEEL +4 -0
- cisco_ai_skill_scanner-1.0.0.dist-info/entry_points.txt +4 -0
- cisco_ai_skill_scanner-1.0.0.dist-info/licenses/LICENSE +17 -0
- skillanalyzer/__init__.py +45 -0
- skillanalyzer/_version.py +34 -0
- skillanalyzer/api/__init__.py +25 -0
- skillanalyzer/api/api.py +34 -0
- skillanalyzer/api/api_cli.py +78 -0
- skillanalyzer/api/api_server.py +634 -0
- skillanalyzer/api/router.py +527 -0
- skillanalyzer/cli/__init__.py +25 -0
- skillanalyzer/cli/cli.py +816 -0
- skillanalyzer/config/__init__.py +26 -0
- skillanalyzer/config/config.py +149 -0
- skillanalyzer/config/config_parser.py +122 -0
- skillanalyzer/config/constants.py +85 -0
- skillanalyzer/core/__init__.py +24 -0
- skillanalyzer/core/analyzers/__init__.py +75 -0
- skillanalyzer/core/analyzers/aidefense_analyzer.py +872 -0
- skillanalyzer/core/analyzers/base.py +53 -0
- skillanalyzer/core/analyzers/behavioral/__init__.py +30 -0
- skillanalyzer/core/analyzers/behavioral/alignment/__init__.py +45 -0
- skillanalyzer/core/analyzers/behavioral/alignment/alignment_llm_client.py +240 -0
- skillanalyzer/core/analyzers/behavioral/alignment/alignment_orchestrator.py +216 -0
- skillanalyzer/core/analyzers/behavioral/alignment/alignment_prompt_builder.py +422 -0
- skillanalyzer/core/analyzers/behavioral/alignment/alignment_response_validator.py +136 -0
- skillanalyzer/core/analyzers/behavioral/alignment/threat_vulnerability_classifier.py +198 -0
- skillanalyzer/core/analyzers/behavioral_analyzer.py +453 -0
- skillanalyzer/core/analyzers/cross_skill_analyzer.py +490 -0
- skillanalyzer/core/analyzers/llm_analyzer.py +440 -0
- skillanalyzer/core/analyzers/llm_prompt_builder.py +270 -0
- skillanalyzer/core/analyzers/llm_provider_config.py +215 -0
- skillanalyzer/core/analyzers/llm_request_handler.py +284 -0
- skillanalyzer/core/analyzers/llm_response_parser.py +81 -0
- skillanalyzer/core/analyzers/meta_analyzer.py +845 -0
- skillanalyzer/core/analyzers/static.py +1105 -0
- skillanalyzer/core/analyzers/trigger_analyzer.py +341 -0
- skillanalyzer/core/analyzers/virustotal_analyzer.py +463 -0
- skillanalyzer/core/exceptions.py +77 -0
- skillanalyzer/core/loader.py +377 -0
- skillanalyzer/core/models.py +300 -0
- skillanalyzer/core/reporters/__init__.py +26 -0
- skillanalyzer/core/reporters/json_reporter.py +65 -0
- skillanalyzer/core/reporters/markdown_reporter.py +209 -0
- skillanalyzer/core/reporters/sarif_reporter.py +246 -0
- skillanalyzer/core/reporters/table_reporter.py +195 -0
- skillanalyzer/core/rules/__init__.py +19 -0
- skillanalyzer/core/rules/patterns.py +165 -0
- skillanalyzer/core/rules/yara_scanner.py +157 -0
- skillanalyzer/core/scanner.py +437 -0
- skillanalyzer/core/static_analysis/__init__.py +27 -0
- skillanalyzer/core/static_analysis/cfg/__init__.py +21 -0
- skillanalyzer/core/static_analysis/cfg/builder.py +439 -0
- skillanalyzer/core/static_analysis/context_extractor.py +742 -0
- skillanalyzer/core/static_analysis/dataflow/__init__.py +25 -0
- skillanalyzer/core/static_analysis/dataflow/forward_analysis.py +715 -0
- skillanalyzer/core/static_analysis/interprocedural/__init__.py +21 -0
- skillanalyzer/core/static_analysis/interprocedural/call_graph_analyzer.py +406 -0
- skillanalyzer/core/static_analysis/interprocedural/cross_file_analyzer.py +190 -0
- skillanalyzer/core/static_analysis/parser/__init__.py +21 -0
- skillanalyzer/core/static_analysis/parser/python_parser.py +380 -0
- skillanalyzer/core/static_analysis/semantic/__init__.py +28 -0
- skillanalyzer/core/static_analysis/semantic/name_resolver.py +206 -0
- skillanalyzer/core/static_analysis/semantic/type_analyzer.py +200 -0
- skillanalyzer/core/static_analysis/taint/__init__.py +21 -0
- skillanalyzer/core/static_analysis/taint/tracker.py +252 -0
- skillanalyzer/core/static_analysis/types/__init__.py +36 -0
- skillanalyzer/data/__init__.py +30 -0
- skillanalyzer/data/prompts/boilerplate_protection_rule_prompt.md +26 -0
- skillanalyzer/data/prompts/code_alignment_threat_analysis_prompt.md +901 -0
- skillanalyzer/data/prompts/llm_response_schema.json +71 -0
- skillanalyzer/data/prompts/skill_meta_analysis_prompt.md +303 -0
- skillanalyzer/data/prompts/skill_threat_analysis_prompt.md +263 -0
- skillanalyzer/data/prompts/unified_response_schema.md +97 -0
- skillanalyzer/data/rules/signatures.yaml +440 -0
- skillanalyzer/data/yara_rules/autonomy_abuse.yara +66 -0
- skillanalyzer/data/yara_rules/code_execution.yara +61 -0
- skillanalyzer/data/yara_rules/coercive_injection.yara +115 -0
- skillanalyzer/data/yara_rules/command_injection.yara +54 -0
- skillanalyzer/data/yara_rules/credential_harvesting.yara +115 -0
- skillanalyzer/data/yara_rules/prompt_injection.yara +71 -0
- skillanalyzer/data/yara_rules/script_injection.yara +83 -0
- skillanalyzer/data/yara_rules/skill_discovery_abuse.yara +57 -0
- skillanalyzer/data/yara_rules/sql_injection.yara +73 -0
- skillanalyzer/data/yara_rules/system_manipulation.yara +65 -0
- skillanalyzer/data/yara_rules/tool_chaining_abuse.yara +60 -0
- skillanalyzer/data/yara_rules/transitive_trust_abuse.yara +73 -0
- skillanalyzer/data/yara_rules/unicode_steganography.yara +65 -0
- skillanalyzer/hooks/__init__.py +21 -0
- skillanalyzer/hooks/pre_commit.py +450 -0
- skillanalyzer/threats/__init__.py +25 -0
- skillanalyzer/threats/threats.py +480 -0
- skillanalyzer/utils/__init__.py +28 -0
- skillanalyzer/utils/command_utils.py +129 -0
- skillanalyzer/utils/di_container.py +154 -0
- skillanalyzer/utils/file_utils.py +86 -0
- skillanalyzer/utils/logging_config.py +96 -0
- skillanalyzer/utils/logging_utils.py +71 -0
skillanalyzer/cli/cli.py
ADDED
|
@@ -0,0 +1,816 @@
|
|
|
1
|
+
# Copyright 2026 Cisco Systems, Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
#
|
|
15
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
16
|
+
|
|
17
|
+
"""
|
|
18
|
+
Command-line interface for the Claude Skill Analyzer.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
import argparse
|
|
22
|
+
import asyncio
|
|
23
|
+
import os
|
|
24
|
+
import sys
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
|
|
27
|
+
from ..core.analyzers.behavioral_analyzer import BehavioralAnalyzer
|
|
28
|
+
from ..core.analyzers.static import StaticAnalyzer
|
|
29
|
+
from ..core.reporters.json_reporter import JSONReporter
|
|
30
|
+
from ..core.reporters.sarif_reporter import SARIFReporter
|
|
31
|
+
from ..core.scanner import SkillScanner
|
|
32
|
+
|
|
33
|
+
# Optional LLM analyzer
|
|
34
|
+
try:
|
|
35
|
+
from ..core.analyzers.llm_analyzer import LLMAnalyzer
|
|
36
|
+
|
|
37
|
+
LLM_AVAILABLE = True
|
|
38
|
+
except (ImportError, ModuleNotFoundError):
|
|
39
|
+
LLM_AVAILABLE = False
|
|
40
|
+
LLMAnalyzer = None
|
|
41
|
+
|
|
42
|
+
# Optional Meta analyzer
|
|
43
|
+
try:
|
|
44
|
+
from ..core.analyzers.meta_analyzer import MetaAnalyzer, apply_meta_analysis_to_results
|
|
45
|
+
|
|
46
|
+
META_AVAILABLE = True
|
|
47
|
+
except (ImportError, ModuleNotFoundError):
|
|
48
|
+
META_AVAILABLE = False
|
|
49
|
+
MetaAnalyzer = None
|
|
50
|
+
apply_meta_analysis_to_results = None
|
|
51
|
+
|
|
52
|
+
from ..core.loader import SkillLoadError
|
|
53
|
+
from ..core.reporters.markdown_reporter import MarkdownReporter
|
|
54
|
+
from ..core.reporters.table_reporter import TableReporter
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def scan_command(args):
|
|
58
|
+
"""Handle the scan command for a single skill."""
|
|
59
|
+
skill_dir = Path(args.skill_directory)
|
|
60
|
+
|
|
61
|
+
if not skill_dir.exists():
|
|
62
|
+
print(f"Error: Directory does not exist: {skill_dir}", file=sys.stderr)
|
|
63
|
+
return 1
|
|
64
|
+
|
|
65
|
+
# Create scanner with configured analyzers
|
|
66
|
+
analyzers = [StaticAnalyzer()]
|
|
67
|
+
|
|
68
|
+
# Helper to print status messages - go to stderr when JSON output to avoid breaking parsing
|
|
69
|
+
is_json_output = getattr(args, "format", "summary") == "json"
|
|
70
|
+
|
|
71
|
+
def status_print(msg: str) -> None:
|
|
72
|
+
if is_json_output:
|
|
73
|
+
print(msg, file=sys.stderr)
|
|
74
|
+
else:
|
|
75
|
+
print(msg)
|
|
76
|
+
|
|
77
|
+
# Add behavioral analyzer if requested
|
|
78
|
+
if hasattr(args, "use_behavioral") and args.use_behavioral:
|
|
79
|
+
try:
|
|
80
|
+
behavioral_analyzer = BehavioralAnalyzer(use_static_analysis=True)
|
|
81
|
+
analyzers.append(behavioral_analyzer)
|
|
82
|
+
status_print("Using behavioral analyzer (static dataflow analysis)")
|
|
83
|
+
except Exception as e:
|
|
84
|
+
print(f"Warning: Could not initialize behavioral analyzer: {e}", file=sys.stderr)
|
|
85
|
+
|
|
86
|
+
# Add LLM analyzer if requested and available
|
|
87
|
+
if hasattr(args, "use_llm") and args.use_llm:
|
|
88
|
+
if not LLM_AVAILABLE:
|
|
89
|
+
print("Warning: LLM analyzer requested but dependencies not installed.", file=sys.stderr)
|
|
90
|
+
print("Install with: pip install anthropic openai", file=sys.stderr)
|
|
91
|
+
else:
|
|
92
|
+
try:
|
|
93
|
+
# Get API key and model from environment
|
|
94
|
+
# Use SKILL_SCANNER_* env vars only (no provider-specific fallbacks)
|
|
95
|
+
api_key = os.getenv("SKILL_SCANNER_LLM_API_KEY")
|
|
96
|
+
model = os.getenv("SKILL_SCANNER_LLM_MODEL") or "claude-3-5-sonnet-20241022"
|
|
97
|
+
base_url = os.getenv("SKILL_SCANNER_LLM_BASE_URL")
|
|
98
|
+
api_version = os.getenv("SKILL_SCANNER_LLM_API_VERSION")
|
|
99
|
+
|
|
100
|
+
llm_analyzer = LLMAnalyzer(
|
|
101
|
+
model=model,
|
|
102
|
+
api_key=api_key,
|
|
103
|
+
base_url=base_url,
|
|
104
|
+
api_version=api_version,
|
|
105
|
+
)
|
|
106
|
+
analyzers.append(llm_analyzer)
|
|
107
|
+
status_print(f"Using LLM analyzer with model: {model}")
|
|
108
|
+
except Exception as e:
|
|
109
|
+
print(f"Warning: Could not initialize LLM analyzer: {e}", file=sys.stderr)
|
|
110
|
+
|
|
111
|
+
# Add VirusTotal analyzer if requested
|
|
112
|
+
if hasattr(args, "use_virustotal") and args.use_virustotal:
|
|
113
|
+
vt_api_key = args.vt_api_key or os.getenv("VIRUSTOTAL_API_KEY")
|
|
114
|
+
if not vt_api_key:
|
|
115
|
+
print("Warning: VirusTotal requested but no API key provided.", file=sys.stderr)
|
|
116
|
+
print("Set VIRUSTOTAL_API_KEY environment variable or use --vt-api-key", file=sys.stderr)
|
|
117
|
+
else:
|
|
118
|
+
try:
|
|
119
|
+
from ..core.analyzers.virustotal_analyzer import VirusTotalAnalyzer
|
|
120
|
+
|
|
121
|
+
vt_upload = getattr(args, "vt_upload_files", False)
|
|
122
|
+
vt_analyzer = VirusTotalAnalyzer(api_key=vt_api_key, enabled=True, upload_files=vt_upload)
|
|
123
|
+
analyzers.append(vt_analyzer)
|
|
124
|
+
mode = "with file uploads" if vt_upload else "hash-only mode"
|
|
125
|
+
status_print(f"Using VirusTotal binary file scanner ({mode})")
|
|
126
|
+
except Exception as e:
|
|
127
|
+
print(f"Warning: Could not initialize VirusTotal analyzer: {e}", file=sys.stderr)
|
|
128
|
+
|
|
129
|
+
# Add AI Defense analyzer if requested
|
|
130
|
+
if hasattr(args, "use_aidefense") and args.use_aidefense:
|
|
131
|
+
aidefense_api_key = getattr(args, "aidefense_api_key", None) or os.getenv("AI_DEFENSE_API_KEY")
|
|
132
|
+
if not aidefense_api_key:
|
|
133
|
+
print("Warning: AI Defense requested but no API key provided.", file=sys.stderr)
|
|
134
|
+
print("Set AI_DEFENSE_API_KEY environment variable or use --aidefense-api-key", file=sys.stderr)
|
|
135
|
+
else:
|
|
136
|
+
try:
|
|
137
|
+
from ..core.analyzers.aidefense_analyzer import AIDefenseAnalyzer
|
|
138
|
+
|
|
139
|
+
aidefense_api_url = getattr(args, "aidefense_api_url", None) or os.getenv("AI_DEFENSE_API_URL")
|
|
140
|
+
aidefense_analyzer = AIDefenseAnalyzer(api_key=aidefense_api_key, api_url=aidefense_api_url)
|
|
141
|
+
analyzers.append(aidefense_analyzer)
|
|
142
|
+
status_print("Using AI Defense analyzer")
|
|
143
|
+
except Exception as e:
|
|
144
|
+
print(f"Warning: Could not initialize AI Defense analyzer: {e}", file=sys.stderr)
|
|
145
|
+
|
|
146
|
+
# Add Trigger analyzer if requested
|
|
147
|
+
if hasattr(args, "use_trigger") and args.use_trigger:
|
|
148
|
+
try:
|
|
149
|
+
from ..core.analyzers.trigger_analyzer import TriggerAnalyzer
|
|
150
|
+
|
|
151
|
+
trigger_analyzer = TriggerAnalyzer()
|
|
152
|
+
analyzers.append(trigger_analyzer)
|
|
153
|
+
status_print("Using Trigger analyzer (description specificity analysis)")
|
|
154
|
+
except Exception as e:
|
|
155
|
+
print(f"Warning: Could not initialize Trigger analyzer: {e}", file=sys.stderr)
|
|
156
|
+
|
|
157
|
+
# Initialize meta-analyzer if requested
|
|
158
|
+
meta_analyzer = None
|
|
159
|
+
enable_meta = hasattr(args, "enable_meta") and args.enable_meta
|
|
160
|
+
if enable_meta:
|
|
161
|
+
if not META_AVAILABLE:
|
|
162
|
+
print("Warning: Meta-analyzer requested but dependencies not installed.", file=sys.stderr)
|
|
163
|
+
print("Install with: pip install litellm", file=sys.stderr)
|
|
164
|
+
elif len(analyzers) < 2:
|
|
165
|
+
print("Warning: Meta-analysis requires at least 2 analyzers. Skipping meta-analysis.", file=sys.stderr)
|
|
166
|
+
else:
|
|
167
|
+
try:
|
|
168
|
+
# Use SKILL_SCANNER_* env vars only (no provider-specific fallbacks)
|
|
169
|
+
# Priority: meta-specific > scanner-wide
|
|
170
|
+
meta_api_key = os.getenv("SKILL_SCANNER_META_LLM_API_KEY") or os.getenv("SKILL_SCANNER_LLM_API_KEY")
|
|
171
|
+
meta_model = os.getenv("SKILL_SCANNER_META_LLM_MODEL") or os.getenv("SKILL_SCANNER_LLM_MODEL")
|
|
172
|
+
meta_base_url = os.getenv("SKILL_SCANNER_META_LLM_BASE_URL") or os.getenv("SKILL_SCANNER_LLM_BASE_URL")
|
|
173
|
+
meta_api_version = os.getenv("SKILL_SCANNER_META_LLM_API_VERSION") or os.getenv(
|
|
174
|
+
"SKILL_SCANNER_LLM_API_VERSION"
|
|
175
|
+
)
|
|
176
|
+
meta_analyzer = MetaAnalyzer(
|
|
177
|
+
model=meta_model,
|
|
178
|
+
api_key=meta_api_key,
|
|
179
|
+
base_url=meta_base_url,
|
|
180
|
+
api_version=meta_api_version,
|
|
181
|
+
)
|
|
182
|
+
status_print("Using Meta-Analyzer for false positive filtering and finding prioritization")
|
|
183
|
+
except Exception as e:
|
|
184
|
+
print(f"Warning: Could not initialize Meta-Analyzer: {e}", file=sys.stderr)
|
|
185
|
+
|
|
186
|
+
scanner = SkillScanner(analyzers=analyzers)
|
|
187
|
+
|
|
188
|
+
try:
|
|
189
|
+
# Scan the skill
|
|
190
|
+
result = scanner.scan_skill(skill_dir)
|
|
191
|
+
|
|
192
|
+
# Run meta-analysis if enabled and we have findings
|
|
193
|
+
if meta_analyzer and result.findings:
|
|
194
|
+
status_print("Running meta-analysis to filter false positives...")
|
|
195
|
+
try:
|
|
196
|
+
# Load the skill for context
|
|
197
|
+
skill = scanner.loader.load_skill(skill_dir)
|
|
198
|
+
|
|
199
|
+
# Run meta-analysis asynchronously
|
|
200
|
+
meta_result = asyncio.run(
|
|
201
|
+
meta_analyzer.analyze_with_findings(
|
|
202
|
+
skill=skill,
|
|
203
|
+
findings=result.findings,
|
|
204
|
+
analyzers_used=result.analyzers_used,
|
|
205
|
+
)
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
# Apply meta-analysis results
|
|
209
|
+
filtered_findings = apply_meta_analysis_to_results(
|
|
210
|
+
original_findings=result.findings,
|
|
211
|
+
meta_result=meta_result,
|
|
212
|
+
skill=skill,
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
# Update result with filtered findings
|
|
216
|
+
original_count = len(result.findings)
|
|
217
|
+
result.findings = filtered_findings
|
|
218
|
+
result.analyzers_used.append("meta_analyzer")
|
|
219
|
+
|
|
220
|
+
fp_count = original_count - len([f for f in filtered_findings if f.analyzer != "meta"])
|
|
221
|
+
new_count = len([f for f in filtered_findings if f.analyzer == "meta"])
|
|
222
|
+
status_print(
|
|
223
|
+
f"Meta-analysis complete: {fp_count} false positives filtered, {new_count} new threats detected"
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
except Exception as e:
|
|
227
|
+
print(f"Warning: Meta-analysis failed: {e}", file=sys.stderr)
|
|
228
|
+
print("Continuing with original findings.", file=sys.stderr)
|
|
229
|
+
|
|
230
|
+
# Generate report based on format
|
|
231
|
+
if args.format == "json":
|
|
232
|
+
reporter = JSONReporter(pretty=not args.compact)
|
|
233
|
+
output = reporter.generate_report(result)
|
|
234
|
+
elif args.format == "markdown":
|
|
235
|
+
reporter = MarkdownReporter(detailed=args.detailed)
|
|
236
|
+
output = reporter.generate_report(result)
|
|
237
|
+
elif args.format == "table":
|
|
238
|
+
reporter = TableReporter()
|
|
239
|
+
output = reporter.generate_report(result)
|
|
240
|
+
elif args.format == "sarif":
|
|
241
|
+
reporter = SARIFReporter()
|
|
242
|
+
output = reporter.generate_report(result)
|
|
243
|
+
else: # summary
|
|
244
|
+
output = generate_summary(result)
|
|
245
|
+
|
|
246
|
+
# Output
|
|
247
|
+
if args.output:
|
|
248
|
+
with open(args.output, "w", encoding="utf-8") as f:
|
|
249
|
+
f.write(output)
|
|
250
|
+
print(f"Report saved to: {args.output}")
|
|
251
|
+
else:
|
|
252
|
+
print(output)
|
|
253
|
+
|
|
254
|
+
# Exit with error code if critical/high issues found
|
|
255
|
+
if not result.is_safe and args.fail_on_findings:
|
|
256
|
+
return 1
|
|
257
|
+
|
|
258
|
+
return 0
|
|
259
|
+
|
|
260
|
+
except SkillLoadError as e:
|
|
261
|
+
print(f"Error loading skill: {e}", file=sys.stderr)
|
|
262
|
+
return 1
|
|
263
|
+
except Exception as e:
|
|
264
|
+
print(f"Unexpected error: {e}", file=sys.stderr)
|
|
265
|
+
return 1
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def scan_all_command(args):
|
|
269
|
+
"""Handle the scan-all command for multiple skills."""
|
|
270
|
+
skills_dir = Path(args.skills_directory)
|
|
271
|
+
|
|
272
|
+
if not skills_dir.exists():
|
|
273
|
+
print(f"Error: Directory does not exist: {skills_dir}", file=sys.stderr)
|
|
274
|
+
return 1
|
|
275
|
+
|
|
276
|
+
# Create scanner with configured analyzers
|
|
277
|
+
analyzers = [StaticAnalyzer()]
|
|
278
|
+
|
|
279
|
+
# Helper to print status messages - go to stderr when JSON output to avoid breaking parsing
|
|
280
|
+
is_json_output = getattr(args, "format", "summary") == "json"
|
|
281
|
+
|
|
282
|
+
def status_print(msg: str) -> None:
|
|
283
|
+
if is_json_output:
|
|
284
|
+
print(msg, file=sys.stderr)
|
|
285
|
+
else:
|
|
286
|
+
print(msg)
|
|
287
|
+
|
|
288
|
+
# Add behavioral analyzer if requested
|
|
289
|
+
if hasattr(args, "use_behavioral") and args.use_behavioral:
|
|
290
|
+
try:
|
|
291
|
+
behavioral_analyzer = BehavioralAnalyzer(use_static_analysis=True)
|
|
292
|
+
analyzers.append(behavioral_analyzer)
|
|
293
|
+
status_print("Using behavioral analyzer (static dataflow analysis)")
|
|
294
|
+
except Exception as e:
|
|
295
|
+
print(f"Warning: Could not initialize behavioral analyzer: {e}", file=sys.stderr)
|
|
296
|
+
|
|
297
|
+
# Add LLM analyzer if requested
|
|
298
|
+
if hasattr(args, "use_llm") and args.use_llm and LLM_AVAILABLE:
|
|
299
|
+
try:
|
|
300
|
+
# Use SKILL_SCANNER_* env vars only (no provider-specific fallbacks)
|
|
301
|
+
api_key = os.getenv("SKILL_SCANNER_LLM_API_KEY")
|
|
302
|
+
model = os.getenv("SKILL_SCANNER_LLM_MODEL") or "claude-3-5-sonnet-20241022"
|
|
303
|
+
base_url = os.getenv("SKILL_SCANNER_LLM_BASE_URL")
|
|
304
|
+
api_version = os.getenv("SKILL_SCANNER_LLM_API_VERSION")
|
|
305
|
+
|
|
306
|
+
llm_analyzer = LLMAnalyzer(
|
|
307
|
+
model=model,
|
|
308
|
+
api_key=api_key,
|
|
309
|
+
base_url=base_url,
|
|
310
|
+
api_version=api_version,
|
|
311
|
+
)
|
|
312
|
+
analyzers.append(llm_analyzer)
|
|
313
|
+
status_print(f"Using LLM analyzer with model: {model}")
|
|
314
|
+
except Exception as e:
|
|
315
|
+
print(f"Warning: Could not initialize LLM analyzer: {e}", file=sys.stderr)
|
|
316
|
+
|
|
317
|
+
# Add VirusTotal analyzer if requested
|
|
318
|
+
if hasattr(args, "use_virustotal") and args.use_virustotal:
|
|
319
|
+
vt_api_key = args.vt_api_key or os.getenv("VIRUSTOTAL_API_KEY")
|
|
320
|
+
vt_upload = getattr(args, "vt_upload_files", False)
|
|
321
|
+
if not vt_api_key:
|
|
322
|
+
print("Warning: VirusTotal requested but no API key provided.", file=sys.stderr)
|
|
323
|
+
print("Set VIRUSTOTAL_API_KEY environment variable or use --vt-api-key", file=sys.stderr)
|
|
324
|
+
else:
|
|
325
|
+
try:
|
|
326
|
+
from ..core.analyzers.virustotal_analyzer import VirusTotalAnalyzer
|
|
327
|
+
|
|
328
|
+
vt_analyzer = VirusTotalAnalyzer(api_key=vt_api_key, enabled=True, upload_files=vt_upload)
|
|
329
|
+
analyzers.append(vt_analyzer)
|
|
330
|
+
mode = "with file uploads" if vt_upload else "hash-only mode"
|
|
331
|
+
status_print(f"Using VirusTotal binary file scanner ({mode})")
|
|
332
|
+
except Exception as e:
|
|
333
|
+
print(f"Warning: Could not initialize VirusTotal analyzer: {e}", file=sys.stderr)
|
|
334
|
+
|
|
335
|
+
# Add AI Defense analyzer if requested
|
|
336
|
+
if hasattr(args, "use_aidefense") and args.use_aidefense:
|
|
337
|
+
aidefense_api_key = getattr(args, "aidefense_api_key", None) or os.getenv("AI_DEFENSE_API_KEY")
|
|
338
|
+
if not aidefense_api_key:
|
|
339
|
+
print("Warning: AI Defense requested but no API key provided.", file=sys.stderr)
|
|
340
|
+
print("Set AI_DEFENSE_API_KEY environment variable or use --aidefense-api-key", file=sys.stderr)
|
|
341
|
+
else:
|
|
342
|
+
try:
|
|
343
|
+
from ..core.analyzers.aidefense_analyzer import AIDefenseAnalyzer
|
|
344
|
+
|
|
345
|
+
aidefense_api_url = getattr(args, "aidefense_api_url", None) or os.getenv("AI_DEFENSE_API_URL")
|
|
346
|
+
aidefense_analyzer = AIDefenseAnalyzer(api_key=aidefense_api_key, api_url=aidefense_api_url)
|
|
347
|
+
analyzers.append(aidefense_analyzer)
|
|
348
|
+
status_print("Using AI Defense analyzer")
|
|
349
|
+
except Exception as e:
|
|
350
|
+
print(f"Warning: Could not initialize AI Defense analyzer: {e}", file=sys.stderr)
|
|
351
|
+
|
|
352
|
+
# Add Trigger analyzer if requested
|
|
353
|
+
if hasattr(args, "use_trigger") and args.use_trigger:
|
|
354
|
+
try:
|
|
355
|
+
from ..core.analyzers.trigger_analyzer import TriggerAnalyzer
|
|
356
|
+
|
|
357
|
+
trigger_analyzer = TriggerAnalyzer()
|
|
358
|
+
analyzers.append(trigger_analyzer)
|
|
359
|
+
status_print("Using Trigger analyzer (description specificity analysis)")
|
|
360
|
+
except Exception as e:
|
|
361
|
+
print(f"Warning: Could not initialize Trigger analyzer: {e}", file=sys.stderr)
|
|
362
|
+
|
|
363
|
+
# Initialize meta-analyzer if requested
|
|
364
|
+
meta_analyzer = None
|
|
365
|
+
enable_meta = hasattr(args, "enable_meta") and args.enable_meta
|
|
366
|
+
if enable_meta:
|
|
367
|
+
if not META_AVAILABLE:
|
|
368
|
+
print("Warning: Meta-analyzer requested but dependencies not installed.", file=sys.stderr)
|
|
369
|
+
print("Install with: pip install litellm", file=sys.stderr)
|
|
370
|
+
elif len(analyzers) < 2:
|
|
371
|
+
print("Warning: Meta-analysis requires at least 2 analyzers. Skipping meta-analysis.", file=sys.stderr)
|
|
372
|
+
else:
|
|
373
|
+
try:
|
|
374
|
+
# Use SKILL_SCANNER_* env vars only (no provider-specific fallbacks)
|
|
375
|
+
# Priority: meta-specific > scanner-wide
|
|
376
|
+
meta_api_key = os.getenv("SKILL_SCANNER_META_LLM_API_KEY") or os.getenv("SKILL_SCANNER_LLM_API_KEY")
|
|
377
|
+
meta_model = os.getenv("SKILL_SCANNER_META_LLM_MODEL") or os.getenv("SKILL_SCANNER_LLM_MODEL")
|
|
378
|
+
meta_base_url = os.getenv("SKILL_SCANNER_META_LLM_BASE_URL") or os.getenv("SKILL_SCANNER_LLM_BASE_URL")
|
|
379
|
+
meta_api_version = os.getenv("SKILL_SCANNER_META_LLM_API_VERSION") or os.getenv(
|
|
380
|
+
"SKILL_SCANNER_LLM_API_VERSION"
|
|
381
|
+
)
|
|
382
|
+
meta_analyzer = MetaAnalyzer(
|
|
383
|
+
model=meta_model,
|
|
384
|
+
api_key=meta_api_key,
|
|
385
|
+
base_url=meta_base_url,
|
|
386
|
+
api_version=meta_api_version,
|
|
387
|
+
)
|
|
388
|
+
status_print("Using Meta-Analyzer for false positive filtering and finding prioritization")
|
|
389
|
+
except Exception as e:
|
|
390
|
+
print(f"Warning: Could not initialize Meta-Analyzer: {e}", file=sys.stderr)
|
|
391
|
+
|
|
392
|
+
scanner = SkillScanner(analyzers=analyzers)
|
|
393
|
+
|
|
394
|
+
try:
|
|
395
|
+
# Scan all skills
|
|
396
|
+
check_overlap = hasattr(args, "check_overlap") and args.check_overlap
|
|
397
|
+
report = scanner.scan_directory(skills_dir, recursive=args.recursive, check_overlap=check_overlap)
|
|
398
|
+
|
|
399
|
+
if report.total_skills_scanned == 0:
|
|
400
|
+
print("No skills found to scan.", file=sys.stderr)
|
|
401
|
+
return 1
|
|
402
|
+
|
|
403
|
+
# Run meta-analysis on each skill's results if enabled
|
|
404
|
+
if meta_analyzer:
|
|
405
|
+
status_print("Running meta-analysis on scan results...")
|
|
406
|
+
total_fp_filtered = 0
|
|
407
|
+
total_new_threats = 0
|
|
408
|
+
|
|
409
|
+
for result in report.scan_results:
|
|
410
|
+
if result.findings:
|
|
411
|
+
try:
|
|
412
|
+
# Load the skill for context
|
|
413
|
+
skill_dir = Path(result.skill_directory)
|
|
414
|
+
skill = scanner.loader.load_skill(skill_dir)
|
|
415
|
+
|
|
416
|
+
# Run meta-analysis asynchronously
|
|
417
|
+
meta_result = asyncio.run(
|
|
418
|
+
meta_analyzer.analyze_with_findings(
|
|
419
|
+
skill=skill,
|
|
420
|
+
findings=result.findings,
|
|
421
|
+
analyzers_used=result.analyzers_used,
|
|
422
|
+
)
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# Apply meta-analysis results
|
|
426
|
+
original_count = len(result.findings)
|
|
427
|
+
filtered_findings = apply_meta_analysis_to_results(
|
|
428
|
+
original_findings=result.findings,
|
|
429
|
+
meta_result=meta_result,
|
|
430
|
+
skill=skill,
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
# Track statistics
|
|
434
|
+
fp_count = original_count - len([f for f in filtered_findings if f.analyzer != "meta"])
|
|
435
|
+
new_count = len([f for f in filtered_findings if f.analyzer == "meta"])
|
|
436
|
+
total_fp_filtered += fp_count
|
|
437
|
+
total_new_threats += new_count
|
|
438
|
+
|
|
439
|
+
# Update result
|
|
440
|
+
result.findings = filtered_findings
|
|
441
|
+
result.analyzers_used.append("meta_analyzer")
|
|
442
|
+
|
|
443
|
+
except Exception as e:
|
|
444
|
+
print(f"Warning: Meta-analysis failed for {result.skill_name}: {e}", file=sys.stderr)
|
|
445
|
+
|
|
446
|
+
status_print(
|
|
447
|
+
f"Meta-analysis complete: {total_fp_filtered} total false positives filtered, {total_new_threats} new threats detected"
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
# Recalculate report totals
|
|
451
|
+
report.total_findings = sum(len(r.findings) for r in report.scan_results)
|
|
452
|
+
report.critical_count = sum(
|
|
453
|
+
1 for r in report.scan_results for f in r.findings if f.severity.value == "CRITICAL"
|
|
454
|
+
)
|
|
455
|
+
report.high_count = sum(1 for r in report.scan_results for f in r.findings if f.severity.value == "HIGH")
|
|
456
|
+
report.medium_count = sum(
|
|
457
|
+
1 for r in report.scan_results for f in r.findings if f.severity.value == "MEDIUM"
|
|
458
|
+
)
|
|
459
|
+
report.low_count = sum(1 for r in report.scan_results for f in r.findings if f.severity.value == "LOW")
|
|
460
|
+
report.info_count = sum(1 for r in report.scan_results for f in r.findings if f.severity.value == "INFO")
|
|
461
|
+
report.safe_count = sum(1 for r in report.scan_results if r.is_safe)
|
|
462
|
+
|
|
463
|
+
# Generate report based on format
|
|
464
|
+
if args.format == "json":
|
|
465
|
+
reporter = JSONReporter(pretty=not args.compact)
|
|
466
|
+
output = reporter.generate_report(report)
|
|
467
|
+
elif args.format == "markdown":
|
|
468
|
+
reporter = MarkdownReporter(detailed=args.detailed)
|
|
469
|
+
output = reporter.generate_report(report)
|
|
470
|
+
elif args.format == "table":
|
|
471
|
+
reporter = TableReporter()
|
|
472
|
+
output = reporter.generate_report(report)
|
|
473
|
+
elif args.format == "sarif":
|
|
474
|
+
reporter = SARIFReporter()
|
|
475
|
+
output = reporter.generate_report(report)
|
|
476
|
+
else: # summary
|
|
477
|
+
output = generate_multi_skill_summary(report)
|
|
478
|
+
|
|
479
|
+
# Output
|
|
480
|
+
if args.output:
|
|
481
|
+
with open(args.output, "w", encoding="utf-8") as f:
|
|
482
|
+
f.write(output)
|
|
483
|
+
print(f"Report saved to: {args.output}")
|
|
484
|
+
else:
|
|
485
|
+
print(output)
|
|
486
|
+
|
|
487
|
+
# Exit with error code if any skills have issues
|
|
488
|
+
if args.fail_on_findings and (report.critical_count > 0 or report.high_count > 0):
|
|
489
|
+
return 1
|
|
490
|
+
|
|
491
|
+
return 0
|
|
492
|
+
|
|
493
|
+
except Exception as e:
|
|
494
|
+
print(f"Unexpected error: {e}", file=sys.stderr)
|
|
495
|
+
return 1
|
|
496
|
+
|
|
497
|
+
|
|
498
|
+
def list_analyzers_command(args):
|
|
499
|
+
"""Handle the list-analyzers command."""
|
|
500
|
+
print("Available Analyzers:")
|
|
501
|
+
print("")
|
|
502
|
+
print("1. static_analyzer (Default)")
|
|
503
|
+
print(" - Pattern-based detection using YAML + YARA rules")
|
|
504
|
+
print(" - Scans SKILL.md instructions and scripts")
|
|
505
|
+
print(" - Detects 80+ security patterns across 12+ threat categories")
|
|
506
|
+
print("")
|
|
507
|
+
|
|
508
|
+
print("2. behavioral_analyzer [OK] Available")
|
|
509
|
+
print(" - Static dataflow analysis (AST + taint tracking)")
|
|
510
|
+
print(" - Tracks data from sources to sinks without execution")
|
|
511
|
+
print(" - Detects multi-file exfiltration chains")
|
|
512
|
+
print(" - Cross-file correlation analysis")
|
|
513
|
+
print(" - Usage: --use-behavioral")
|
|
514
|
+
print("")
|
|
515
|
+
|
|
516
|
+
print("3. virustotal_analyzer [OK] Available (optional)")
|
|
517
|
+
print(" - Scans binary files (images, PDFs, archives) using VirusTotal")
|
|
518
|
+
print(" - Hash-based malware detection via VirusTotal API")
|
|
519
|
+
print(" - Excludes code files (.py, .js, .md, etc.)")
|
|
520
|
+
print(" - Requires VirusTotal API key")
|
|
521
|
+
print(" - Usage: --use-virustotal --vt-api-key YOUR_KEY")
|
|
522
|
+
print("")
|
|
523
|
+
|
|
524
|
+
print("4. aidefense_analyzer [OK] Available (optional)")
|
|
525
|
+
print(" - Enterprise-grade threat detection via Cisco AI Defense API")
|
|
526
|
+
print(" - Analyzes prompts, instructions, markdown, and code files")
|
|
527
|
+
print(" - Detects prompt injection, data exfiltration, tool poisoning")
|
|
528
|
+
print(" - Requires Cisco AI Defense API key")
|
|
529
|
+
print(" - Usage: --use-aidefense --aidefense-api-key YOUR_KEY")
|
|
530
|
+
print("")
|
|
531
|
+
|
|
532
|
+
if LLM_AVAILABLE:
|
|
533
|
+
print("5. llm_analyzer [OK] Available")
|
|
534
|
+
print(" - Semantic analysis using LLMs as judges")
|
|
535
|
+
print(" - Context-aware threat detection")
|
|
536
|
+
print(" - Understands code intent beyond patterns")
|
|
537
|
+
print(" - Usage: --use-llm")
|
|
538
|
+
print("")
|
|
539
|
+
else:
|
|
540
|
+
print("5. llm_analyzer [WARNING] Not installed")
|
|
541
|
+
print(" - Install with: pip install litellm anthropic openai")
|
|
542
|
+
print("")
|
|
543
|
+
|
|
544
|
+
print("6. trigger_analyzer [OK] Available")
|
|
545
|
+
print(" - Detects overly generic skill descriptions")
|
|
546
|
+
print(" - Identifies trigger hijacking risks")
|
|
547
|
+
print(" - Checks description specificity and keyword baiting")
|
|
548
|
+
print(" - Usage: --use-trigger")
|
|
549
|
+
print("")
|
|
550
|
+
|
|
551
|
+
if META_AVAILABLE:
|
|
552
|
+
print("7. meta_analyzer [OK] Available")
|
|
553
|
+
print(" - Second-pass LLM analysis on findings from other analyzers")
|
|
554
|
+
print(" - Filters false positives using contextual understanding")
|
|
555
|
+
print(" - Prioritizes findings by actual exploitability")
|
|
556
|
+
print(" - Detects threats other analyzers missed")
|
|
557
|
+
print(" - Usage: --enable-meta (requires 2+ analyzers)")
|
|
558
|
+
print("")
|
|
559
|
+
else:
|
|
560
|
+
print("7. meta_analyzer [WARNING] Not installed")
|
|
561
|
+
print(" - Install with: pip install litellm")
|
|
562
|
+
print("")
|
|
563
|
+
|
|
564
|
+
print("Future Analyzers (not yet implemented):")
|
|
565
|
+
print(" - policy_checker: Organization-specific policy validation")
|
|
566
|
+
print(" - runtime_monitor: Live execution monitoring (sandbox)")
|
|
567
|
+
print("")
|
|
568
|
+
return 0
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
def validate_rules_command(args):
|
|
572
|
+
"""Handle the validate-rules command."""
|
|
573
|
+
from ..core.rules.patterns import RuleLoader
|
|
574
|
+
|
|
575
|
+
try:
|
|
576
|
+
if args.rules_file:
|
|
577
|
+
loader = RuleLoader(Path(args.rules_file))
|
|
578
|
+
else:
|
|
579
|
+
loader = RuleLoader()
|
|
580
|
+
|
|
581
|
+
rules = loader.load_rules()
|
|
582
|
+
|
|
583
|
+
print(f"[OK] Successfully loaded {len(rules)} rules")
|
|
584
|
+
print("")
|
|
585
|
+
print("Rules by category:")
|
|
586
|
+
|
|
587
|
+
for category, category_rules in loader.rules_by_category.items():
|
|
588
|
+
print(f" - {category.value}: {len(category_rules)} rules")
|
|
589
|
+
|
|
590
|
+
return 0
|
|
591
|
+
|
|
592
|
+
except Exception as e:
|
|
593
|
+
print(f"[FAIL] Error validating rules: {e}", file=sys.stderr)
|
|
594
|
+
return 1
|
|
595
|
+
|
|
596
|
+
|
|
597
|
+
def generate_summary(result) -> str:
|
|
598
|
+
"""Generate a simple summary output."""
|
|
599
|
+
lines = []
|
|
600
|
+
lines.append("=" * 60)
|
|
601
|
+
lines.append(f"Skill: {result.skill_name}")
|
|
602
|
+
lines.append("=" * 60)
|
|
603
|
+
lines.append(f"Status: {'[OK] SAFE' if result.is_safe else '[FAIL] ISSUES FOUND'}")
|
|
604
|
+
lines.append(f"Max Severity: {result.max_severity.value}")
|
|
605
|
+
lines.append(f"Total Findings: {len(result.findings)}")
|
|
606
|
+
lines.append(f"Scan Duration: {result.scan_duration_seconds:.2f}s")
|
|
607
|
+
lines.append("")
|
|
608
|
+
|
|
609
|
+
if result.findings:
|
|
610
|
+
from ..core.models import Severity
|
|
611
|
+
|
|
612
|
+
lines.append("Findings Summary:")
|
|
613
|
+
lines.append(f" Critical: {len(result.get_findings_by_severity(Severity.CRITICAL))}")
|
|
614
|
+
lines.append(f" High: {len(result.get_findings_by_severity(Severity.HIGH))}")
|
|
615
|
+
lines.append(f" Medium: {len(result.get_findings_by_severity(Severity.MEDIUM))}")
|
|
616
|
+
lines.append(f" Low: {len(result.get_findings_by_severity(Severity.LOW))}")
|
|
617
|
+
lines.append(f" Info: {len(result.get_findings_by_severity(Severity.INFO))}")
|
|
618
|
+
|
|
619
|
+
return "\n".join(lines)
|
|
620
|
+
|
|
621
|
+
|
|
622
|
+
def generate_multi_skill_summary(report) -> str:
|
|
623
|
+
"""Generate a simple summary for multiple skills."""
|
|
624
|
+
lines = []
|
|
625
|
+
lines.append("=" * 60)
|
|
626
|
+
lines.append("Claude Skills Security Scan Report")
|
|
627
|
+
lines.append("=" * 60)
|
|
628
|
+
lines.append(f"Skills Scanned: {report.total_skills_scanned}")
|
|
629
|
+
lines.append(f"Safe Skills: {report.safe_count}")
|
|
630
|
+
lines.append(f"Total Findings: {report.total_findings}")
|
|
631
|
+
lines.append("")
|
|
632
|
+
lines.append("Findings by Severity:")
|
|
633
|
+
lines.append(f" Critical: {report.critical_count}")
|
|
634
|
+
lines.append(f" High: {report.high_count}")
|
|
635
|
+
lines.append(f" Medium: {report.medium_count}")
|
|
636
|
+
lines.append(f" Low: {report.low_count}")
|
|
637
|
+
lines.append(f" Info: {report.info_count}")
|
|
638
|
+
lines.append("")
|
|
639
|
+
|
|
640
|
+
lines.append("Individual Skills:")
|
|
641
|
+
for result in report.scan_results:
|
|
642
|
+
status = "[OK]" if result.is_safe else "[FAIL]"
|
|
643
|
+
lines.append(f" {status} {result.skill_name} - {len(result.findings)} findings ({result.max_severity.value})")
|
|
644
|
+
|
|
645
|
+
return "\n".join(lines)
|
|
646
|
+
|
|
647
|
+
|
|
648
|
+
def main():
|
|
649
|
+
"""Main CLI entry point."""
|
|
650
|
+
parser = argparse.ArgumentParser(
|
|
651
|
+
description="Claude Skill Analyzer - Security scanner for Claude Skills packages",
|
|
652
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
653
|
+
epilog="""
|
|
654
|
+
Examples:
|
|
655
|
+
# Scan a single skill
|
|
656
|
+
skill-analyzer scan /path/to/skill
|
|
657
|
+
|
|
658
|
+
# Scan with behavioral analysis (dataflow tracking)
|
|
659
|
+
skill-analyzer scan /path/to/skill --use-behavioral
|
|
660
|
+
|
|
661
|
+
# Scan with all engines (static + behavioral + LLM)
|
|
662
|
+
skill-analyzer scan /path/to/skill --use-behavioral --use-llm
|
|
663
|
+
|
|
664
|
+
# Scan with JSON output
|
|
665
|
+
skill-analyzer scan /path/to/skill --format json
|
|
666
|
+
|
|
667
|
+
# Scan all skills in a directory
|
|
668
|
+
skill-analyzer scan-all /path/to/skills
|
|
669
|
+
|
|
670
|
+
# Scan recursively with all engines
|
|
671
|
+
skill-analyzer scan-all /path/to/skills --recursive --use-behavioral --use-llm
|
|
672
|
+
|
|
673
|
+
# List available analyzers
|
|
674
|
+
skill-analyzer list-analyzers
|
|
675
|
+
|
|
676
|
+
# Validate rule signatures
|
|
677
|
+
skill-analyzer validate-rules
|
|
678
|
+
""",
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
subparsers = parser.add_subparsers(dest="command", help="Command to execute")
|
|
682
|
+
|
|
683
|
+
# Scan command
|
|
684
|
+
scan_parser = subparsers.add_parser("scan", help="Scan a single skill package")
|
|
685
|
+
scan_parser.add_argument("skill_directory", help="Path to skill directory")
|
|
686
|
+
scan_parser.add_argument(
|
|
687
|
+
"--format",
|
|
688
|
+
choices=["summary", "json", "markdown", "table", "sarif"],
|
|
689
|
+
default="summary",
|
|
690
|
+
help="Output format (default: summary). Use 'sarif' for GitHub Code Scanning integration.",
|
|
691
|
+
)
|
|
692
|
+
scan_parser.add_argument("--output", "-o", help="Output file path")
|
|
693
|
+
scan_parser.add_argument("--detailed", action="store_true", help="Include detailed findings")
|
|
694
|
+
scan_parser.add_argument("--compact", action="store_true", help="Compact JSON output")
|
|
695
|
+
scan_parser.add_argument(
|
|
696
|
+
"--fail-on-findings", action="store_true", help="Exit with error code if critical/high findings exist"
|
|
697
|
+
)
|
|
698
|
+
scan_parser.add_argument("--use-behavioral", action="store_true", help="Enable behavioral dataflow analysis")
|
|
699
|
+
scan_parser.add_argument(
|
|
700
|
+
"--use-llm", action="store_true", help="Enable LLM-based semantic analysis (requires API key)"
|
|
701
|
+
)
|
|
702
|
+
scan_parser.add_argument(
|
|
703
|
+
"--use-virustotal", action="store_true", help="Enable VirusTotal binary file scanning (requires API key)"
|
|
704
|
+
)
|
|
705
|
+
scan_parser.add_argument("--vt-api-key", help="VirusTotal API key (or set VIRUSTOTAL_API_KEY environment variable)")
|
|
706
|
+
scan_parser.add_argument(
|
|
707
|
+
"--vt-upload-files",
|
|
708
|
+
action="store_true",
|
|
709
|
+
help="Upload unknown files to VirusTotal (default: hash-only lookup for privacy)",
|
|
710
|
+
)
|
|
711
|
+
scan_parser.add_argument(
|
|
712
|
+
"--use-aidefense", action="store_true", help="Enable AI Defense analyzer (requires API key)"
|
|
713
|
+
)
|
|
714
|
+
scan_parser.add_argument(
|
|
715
|
+
"--aidefense-api-key", help="AI Defense API key (or set AI_DEFENSE_API_KEY environment variable)"
|
|
716
|
+
)
|
|
717
|
+
scan_parser.add_argument("--aidefense-api-url", help="AI Defense API URL (optional, defaults to US region)")
|
|
718
|
+
scan_parser.add_argument(
|
|
719
|
+
"--llm-provider", choices=["anthropic", "openai"], default="anthropic", help="LLM provider (default: anthropic)"
|
|
720
|
+
)
|
|
721
|
+
scan_parser.add_argument(
|
|
722
|
+
"--use-trigger",
|
|
723
|
+
action="store_true",
|
|
724
|
+
help="Enable trigger specificity analysis (detects overly generic descriptions)",
|
|
725
|
+
)
|
|
726
|
+
scan_parser.add_argument(
|
|
727
|
+
"--enable-meta",
|
|
728
|
+
action="store_true",
|
|
729
|
+
help="Enable meta-analysis for false positive filtering and finding prioritization (requires 2+ analyzers including LLM)",
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
# Scan-all command
|
|
733
|
+
scan_all_parser = subparsers.add_parser("scan-all", help="Scan multiple skill packages")
|
|
734
|
+
scan_all_parser.add_argument("skills_directory", help="Directory containing skills")
|
|
735
|
+
scan_all_parser.add_argument("--recursive", "-r", action="store_true", help="Recursively search for skills")
|
|
736
|
+
scan_all_parser.add_argument(
|
|
737
|
+
"--format",
|
|
738
|
+
choices=["summary", "json", "markdown", "table", "sarif"],
|
|
739
|
+
default="summary",
|
|
740
|
+
help="Output format (default: summary). Use 'sarif' for GitHub Code Scanning integration.",
|
|
741
|
+
)
|
|
742
|
+
scan_all_parser.add_argument("--output", "-o", help="Output file path")
|
|
743
|
+
scan_all_parser.add_argument("--detailed", action="store_true", help="Include detailed findings")
|
|
744
|
+
scan_all_parser.add_argument("--compact", action="store_true", help="Compact JSON output")
|
|
745
|
+
scan_all_parser.add_argument(
|
|
746
|
+
"--fail-on-findings", action="store_true", help="Exit with error code if any critical/high findings exist"
|
|
747
|
+
)
|
|
748
|
+
scan_all_parser.add_argument("--use-behavioral", action="store_true", help="Enable behavioral dataflow analysis")
|
|
749
|
+
scan_all_parser.add_argument(
|
|
750
|
+
"--use-llm", action="store_true", help="Enable LLM-based semantic analysis (requires API key)"
|
|
751
|
+
)
|
|
752
|
+
scan_all_parser.add_argument(
|
|
753
|
+
"--use-virustotal", action="store_true", help="Enable VirusTotal binary file scanning (requires API key)"
|
|
754
|
+
)
|
|
755
|
+
scan_all_parser.add_argument(
|
|
756
|
+
"--vt-api-key", help="VirusTotal API key (or set VIRUSTOTAL_API_KEY environment variable)"
|
|
757
|
+
)
|
|
758
|
+
scan_all_parser.add_argument(
|
|
759
|
+
"--vt-upload-files",
|
|
760
|
+
action="store_true",
|
|
761
|
+
help="Upload unknown files to VirusTotal (default: hash-only lookup for privacy)",
|
|
762
|
+
)
|
|
763
|
+
scan_all_parser.add_argument(
|
|
764
|
+
"--use-aidefense", action="store_true", help="Enable AI Defense analyzer (requires API key)"
|
|
765
|
+
)
|
|
766
|
+
scan_all_parser.add_argument(
|
|
767
|
+
"--aidefense-api-key", help="AI Defense API key (or set AI_DEFENSE_API_KEY environment variable)"
|
|
768
|
+
)
|
|
769
|
+
scan_all_parser.add_argument("--aidefense-api-url", help="AI Defense API URL (optional, defaults to US region)")
|
|
770
|
+
scan_all_parser.add_argument(
|
|
771
|
+
"--llm-provider", choices=["anthropic", "openai"], default="anthropic", help="LLM provider (default: anthropic)"
|
|
772
|
+
)
|
|
773
|
+
scan_all_parser.add_argument(
|
|
774
|
+
"--use-trigger",
|
|
775
|
+
action="store_true",
|
|
776
|
+
help="Enable trigger specificity analysis (detects overly generic descriptions)",
|
|
777
|
+
)
|
|
778
|
+
scan_all_parser.add_argument(
|
|
779
|
+
"--check-overlap", action="store_true", help="Enable cross-skill description overlap detection"
|
|
780
|
+
)
|
|
781
|
+
scan_all_parser.add_argument(
|
|
782
|
+
"--enable-meta",
|
|
783
|
+
action="store_true",
|
|
784
|
+
help="Enable meta-analysis for false positive filtering and finding prioritization (requires 2+ analyzers including LLM)",
|
|
785
|
+
)
|
|
786
|
+
|
|
787
|
+
# List analyzers command
|
|
788
|
+
subparsers.add_parser("list-analyzers", help="List available analyzers")
|
|
789
|
+
|
|
790
|
+
# Validate rules command
|
|
791
|
+
validate_parser = subparsers.add_parser("validate-rules", help="Validate rule signatures")
|
|
792
|
+
validate_parser.add_argument("--rules-file", help="Path to custom rules file")
|
|
793
|
+
|
|
794
|
+
# Parse arguments
|
|
795
|
+
args = parser.parse_args()
|
|
796
|
+
|
|
797
|
+
if not args.command:
|
|
798
|
+
parser.print_help()
|
|
799
|
+
return 1
|
|
800
|
+
|
|
801
|
+
# Execute command
|
|
802
|
+
if args.command == "scan":
|
|
803
|
+
return scan_command(args)
|
|
804
|
+
elif args.command == "scan-all":
|
|
805
|
+
return scan_all_command(args)
|
|
806
|
+
elif args.command == "list-analyzers":
|
|
807
|
+
return list_analyzers_command(args)
|
|
808
|
+
elif args.command == "validate-rules":
|
|
809
|
+
return validate_rules_command(args)
|
|
810
|
+
else:
|
|
811
|
+
parser.print_help()
|
|
812
|
+
return 1
|
|
813
|
+
|
|
814
|
+
|
|
815
|
+
if __name__ == "__main__":
|
|
816
|
+
sys.exit(main())
|