anais-apk-forensic 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +249 -0
- package/anais.sh +669 -0
- package/analysis_tools/__pycache__/apk_basic_info.cpython-313.pyc +0 -0
- package/analysis_tools/__pycache__/apk_basic_info.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/check_zip_encryption.cpython-313.pyc +0 -0
- package/analysis_tools/__pycache__/check_zip_encryption.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/detect_obfuscation.cpython-313.pyc +0 -0
- package/analysis_tools/__pycache__/detect_obfuscation.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/dex_payload_hunter.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/entropy_analyzer.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/error_logger.cpython-313.pyc +0 -0
- package/analysis_tools/__pycache__/error_logger.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/find_encrypted_payload.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/fix_apk_headers.cpython-313.pyc +0 -0
- package/analysis_tools/__pycache__/fix_apk_headers.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/manifest_analyzer.cpython-313.pyc +0 -0
- package/analysis_tools/__pycache__/manifest_analyzer.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/network_analyzer.cpython-313.pyc +0 -0
- package/analysis_tools/__pycache__/network_analyzer.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/report_generator.cpython-313.pyc +0 -0
- package/analysis_tools/__pycache__/report_generator.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/report_generator_modular.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/sast_scanner.cpython-313.pyc +0 -0
- package/analysis_tools/__pycache__/sast_scanner.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/so_string_analyzer.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/yara_enhanced_analyzer.cpython-314.pyc +0 -0
- package/analysis_tools/__pycache__/yara_results_processor.cpython-314.pyc +0 -0
- package/analysis_tools/apk_basic_info.py +85 -0
- package/analysis_tools/check_zip_encryption.py +142 -0
- package/analysis_tools/detect_obfuscation.py +650 -0
- package/analysis_tools/dex_payload_hunter.py +734 -0
- package/analysis_tools/entropy_analyzer.py +335 -0
- package/analysis_tools/error_logger.py +75 -0
- package/analysis_tools/find_encrypted_payload.py +485 -0
- package/analysis_tools/fix_apk_headers.py +154 -0
- package/analysis_tools/manifest_analyzer.py +214 -0
- package/analysis_tools/network_analyzer.py +287 -0
- package/analysis_tools/report_generator.py +506 -0
- package/analysis_tools/report_generator_modular.py +885 -0
- package/analysis_tools/sast_scanner.py +412 -0
- package/analysis_tools/so_string_analyzer.py +406 -0
- package/analysis_tools/yara_enhanced_analyzer.py +330 -0
- package/analysis_tools/yara_results_processor.py +368 -0
- package/analyzer_config.json +113 -0
- package/apkid/__init__.py +32 -0
- package/apkid/__pycache__/__init__.cpython-313.pyc +0 -0
- package/apkid/__pycache__/__init__.cpython-314.pyc +0 -0
- package/apkid/__pycache__/apkid.cpython-313.pyc +0 -0
- package/apkid/__pycache__/apkid.cpython-314.pyc +0 -0
- package/apkid/__pycache__/main.cpython-313.pyc +0 -0
- package/apkid/__pycache__/main.cpython-314.pyc +0 -0
- package/apkid/__pycache__/output.cpython-313.pyc +0 -0
- package/apkid/__pycache__/rules.cpython-313.pyc +0 -0
- package/apkid/apkid.py +266 -0
- package/apkid/main.py +98 -0
- package/apkid/output.py +177 -0
- package/apkid/rules/apk/common.yara +68 -0
- package/apkid/rules/apk/obfuscators.yara +118 -0
- package/apkid/rules/apk/packers.yara +1197 -0
- package/apkid/rules/apk/protectors.yara +301 -0
- package/apkid/rules/dex/abnormal.yara +104 -0
- package/apkid/rules/dex/anti-vm.yara +568 -0
- package/apkid/rules/dex/common.yara +60 -0
- package/apkid/rules/dex/compilers.yara +434 -0
- package/apkid/rules/dex/obfuscators.yara +602 -0
- package/apkid/rules/dex/packers.yara +761 -0
- package/apkid/rules/dex/protectors.yara +520 -0
- package/apkid/rules/dll/common.yara +38 -0
- package/apkid/rules/dll/obfuscators.yara +43 -0
- package/apkid/rules/elf/anti-vm.yara +43 -0
- package/apkid/rules/elf/common.yara +54 -0
- package/apkid/rules/elf/obfuscators.yara +991 -0
- package/apkid/rules/elf/packers.yara +1128 -0
- package/apkid/rules/elf/protectors.yara +794 -0
- package/apkid/rules/res/common.yara +43 -0
- package/apkid/rules/res/obfuscators.yara +46 -0
- package/apkid/rules/res/protectors.yara +46 -0
- package/apkid/rules.py +77 -0
- package/bin/anais +3 -0
- package/dist/cli.js +82 -0
- package/dist/index.js +123 -0
- package/dist/types/index.js +2 -0
- package/dist/utils/index.js +21 -0
- package/dist/utils/output.js +44 -0
- package/dist/utils/paths.js +107 -0
- package/docs/ARCHITECTURE.txt +353 -0
- package/docs/Workflow and Reference.md +445 -0
- package/package.json +70 -0
- package/rules/yara_general_rules.yar +323 -0
- package/scripts/dynamic_analysis_helper.sh +334 -0
- package/scripts/frida/dpt_dex_dumper.js +145 -0
- package/scripts/frida/frida_dex_dump.js +145 -0
- package/scripts/frida/frida_hooks.js +437 -0
- package/scripts/frida/frida_websocket_extractor.js +154 -0
- package/scripts/setup.sh +206 -0
- package/scripts/validate_framework.sh +224 -0
- package/src/cli.ts +91 -0
- package/src/index.ts +123 -0
- package/src/types/index.ts +44 -0
- package/src/utils/index.ts +6 -0
- package/src/utils/output.ts +50 -0
- package/src/utils/paths.ts +72 -0
- package/tsconfig.json +14 -0
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Enhanced YARA Results Analyzer
|
|
4
|
+
Provides detailed analysis, categorization, and rule tracking of YARA scan results
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
9
|
+
import json
|
|
10
|
+
import re
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def analyze_yara_results(temp_dir, output_file):
|
|
15
|
+
"""Enhanced YARA results analyzer with categorization and severity scoring"""
|
|
16
|
+
|
|
17
|
+
results = {
|
|
18
|
+
'summary': {
|
|
19
|
+
'total_matches': 0,
|
|
20
|
+
'critical_findings': 0,
|
|
21
|
+
'high_severity': 0,
|
|
22
|
+
'medium_severity': 0,
|
|
23
|
+
'low_severity': 0,
|
|
24
|
+
'files_scanned': 0,
|
|
25
|
+
'total_rules_checked': 0,
|
|
26
|
+
'rules_matched': 0
|
|
27
|
+
},
|
|
28
|
+
'findings': [],
|
|
29
|
+
'categories': {},
|
|
30
|
+
'rules_checked': [],
|
|
31
|
+
'scan_details': {
|
|
32
|
+
'apk_scan': {'files': [], 'matches': 0},
|
|
33
|
+
'decompiled_scan': {'files': [], 'matches': 0},
|
|
34
|
+
'jadx_scan': {'files': [], 'matches': 0}
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
# Get all rules from YARA file to show what was checked
|
|
39
|
+
script_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
|
40
|
+
yara_rules_file = os.path.join(script_dir, 'yara_general_rules.yar')
|
|
41
|
+
|
|
42
|
+
if os.path.exists(yara_rules_file):
|
|
43
|
+
extract_rules_info(yara_rules_file, results)
|
|
44
|
+
|
|
45
|
+
# Parse results from different scans
|
|
46
|
+
yara_files = [
|
|
47
|
+
('apk', f'{temp_dir}/yara_apk_results.txt'),
|
|
48
|
+
('decompiled', f'{temp_dir}/yara_decompiled_results.txt'),
|
|
49
|
+
('jadx', f'{temp_dir}/yara_jadx_results.txt')
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
for scan_type, yara_file in yara_files:
|
|
53
|
+
if os.path.exists(yara_file):
|
|
54
|
+
parse_yara_output(yara_file, scan_type, results)
|
|
55
|
+
|
|
56
|
+
# Calculate summary statistics
|
|
57
|
+
results['summary']['total_matches'] = len(results['findings'])
|
|
58
|
+
results['summary']['rules_matched'] = len(set(f['rule_name'] for f in results['findings']))
|
|
59
|
+
results['summary']['critical_findings'] = sum(1 for f in results['findings'] if f['severity'] == 'CRITICAL')
|
|
60
|
+
results['summary']['high_severity'] = sum(1 for f in results['findings'] if f['severity'] == 'HIGH')
|
|
61
|
+
results['summary']['medium_severity'] = sum(1 for f in results['findings'] if f['severity'] == 'MEDIUM')
|
|
62
|
+
results['summary']['low_severity'] = sum(1 for f in results['findings'] if f['severity'] == 'LOW')
|
|
63
|
+
|
|
64
|
+
# Mark rules as matched
|
|
65
|
+
matched_rules = set(f['rule_name'] for f in results['findings'])
|
|
66
|
+
for rule in results['rules_checked']:
|
|
67
|
+
rule['matched'] = rule['name'] in matched_rules
|
|
68
|
+
rule['match_count'] = sum(1 for f in results['findings'] if f['rule_name'] == rule['name'])
|
|
69
|
+
|
|
70
|
+
# Sort findings by severity
|
|
71
|
+
severity_order = {'CRITICAL': 0, 'HIGH': 1, 'MEDIUM': 2, 'LOW': 3}
|
|
72
|
+
results['findings'].sort(key=lambda x: (severity_order[x['severity']], x['rule_name']))
|
|
73
|
+
|
|
74
|
+
# Save results
|
|
75
|
+
with open(output_file, 'w') as f:
|
|
76
|
+
json.dump(results, f, indent=2)
|
|
77
|
+
|
|
78
|
+
return results
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def extract_rules_info(yara_rules_file, results):
|
|
82
|
+
"""Extract information about all YARA rules"""
|
|
83
|
+
try:
|
|
84
|
+
with open(yara_rules_file, 'r', encoding='utf-8', errors='ignore') as f:
|
|
85
|
+
content = f.read()
|
|
86
|
+
|
|
87
|
+
# Simple regex to extract rule names and metadata
|
|
88
|
+
rule_pattern = r'rule\s+(\w+)\s*(?:\:\s*[\w\s]+)?\s*\{'
|
|
89
|
+
meta_pattern = r'meta:\s*((?:.*?\n)*?)\s*(?:strings:|condition:)'
|
|
90
|
+
|
|
91
|
+
rules = re.finditer(rule_pattern, content)
|
|
92
|
+
total_rules = 0
|
|
93
|
+
|
|
94
|
+
for rule_match in rules:
|
|
95
|
+
rule_name = rule_match.group(1)
|
|
96
|
+
total_rules += 1
|
|
97
|
+
|
|
98
|
+
# Try to extract description from metadata
|
|
99
|
+
rule_start = rule_match.start()
|
|
100
|
+
rule_block = content[rule_start:rule_start + 1000] # Get next 1000 chars
|
|
101
|
+
|
|
102
|
+
description = get_description(rule_name)
|
|
103
|
+
|
|
104
|
+
# Extract meta tags if available
|
|
105
|
+
meta_match = re.search(meta_pattern, rule_block, re.DOTALL)
|
|
106
|
+
if meta_match:
|
|
107
|
+
meta_content = meta_match.group(1)
|
|
108
|
+
desc_match = re.search(r'description\s*=\s*"([^"]+)"', meta_content)
|
|
109
|
+
if desc_match:
|
|
110
|
+
description = desc_match.group(1)
|
|
111
|
+
|
|
112
|
+
results['rules_checked'].append({
|
|
113
|
+
'name': rule_name,
|
|
114
|
+
'category': get_category(rule_name),
|
|
115
|
+
'severity': get_severity(rule_name),
|
|
116
|
+
'description': description,
|
|
117
|
+
'matched': False,
|
|
118
|
+
'match_count': 0
|
|
119
|
+
})
|
|
120
|
+
|
|
121
|
+
results['summary']['total_rules_checked'] = total_rules
|
|
122
|
+
|
|
123
|
+
except Exception as e:
|
|
124
|
+
print(f"Error extracting rules info: {e}", file=sys.stderr)
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def parse_yara_output(yara_file, scan_type, results):
|
|
128
|
+
"""Parse YARA output file and extract findings"""
|
|
129
|
+
try:
|
|
130
|
+
with open(yara_file, 'r', encoding='utf-8', errors='ignore') as f:
|
|
131
|
+
content = f.read()
|
|
132
|
+
|
|
133
|
+
if not content.strip():
|
|
134
|
+
return
|
|
135
|
+
|
|
136
|
+
# Track scanned file
|
|
137
|
+
scan_detail = results['scan_details'][f'{scan_type}_scan']
|
|
138
|
+
|
|
139
|
+
# Parse YARA output format:
|
|
140
|
+
# Line 1: rule_name file_path
|
|
141
|
+
# Line 2+: 0xoffset:$string_name: matched_content (optional, only with -s flag)
|
|
142
|
+
|
|
143
|
+
lines = content.strip().split('\n')
|
|
144
|
+
current_rule = None
|
|
145
|
+
current_file = None
|
|
146
|
+
matched_strings = []
|
|
147
|
+
|
|
148
|
+
for line in lines:
|
|
149
|
+
if not line.strip():
|
|
150
|
+
continue
|
|
151
|
+
|
|
152
|
+
# Check if this is a rule match line (doesn't start with 0x)
|
|
153
|
+
if not line.startswith('0x'):
|
|
154
|
+
# This is a new rule match: rule_name file_path
|
|
155
|
+
# Save previous finding if exists
|
|
156
|
+
if current_rule and current_file:
|
|
157
|
+
save_finding(current_rule, current_file, scan_type, matched_strings, results, scan_detail)
|
|
158
|
+
matched_strings = []
|
|
159
|
+
|
|
160
|
+
# Parse new rule match
|
|
161
|
+
parts = line.split(maxsplit=1)
|
|
162
|
+
if len(parts) >= 1:
|
|
163
|
+
current_rule = parts[0]
|
|
164
|
+
current_file = parts[1] if len(parts) > 1 else 'unknown'
|
|
165
|
+
else:
|
|
166
|
+
# This is a matched string line: 0xoffset:$string_name: matched_content
|
|
167
|
+
# Parse: 0x239319a:$lib_dpt: libdpt
|
|
168
|
+
match_parts = line.split(':', 2)
|
|
169
|
+
if len(match_parts) >= 2:
|
|
170
|
+
offset = match_parts[0] # 0x239319a
|
|
171
|
+
string_id = match_parts[1] # $lib_dpt
|
|
172
|
+
content_match = match_parts[2].strip() if len(match_parts) > 2 else ''
|
|
173
|
+
|
|
174
|
+
matched_strings.append({
|
|
175
|
+
'offset': offset,
|
|
176
|
+
'identifier': string_id,
|
|
177
|
+
'content': content_match[:100] # Limit content length
|
|
178
|
+
})
|
|
179
|
+
|
|
180
|
+
# Save last finding
|
|
181
|
+
if current_rule and current_file:
|
|
182
|
+
save_finding(current_rule, current_file, scan_type, matched_strings, results, scan_detail)
|
|
183
|
+
|
|
184
|
+
except Exception as e:
|
|
185
|
+
print(f"Error parsing YARA output {yara_file}: {e}", file=sys.stderr)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def save_finding(rule_name, file_path, scan_type, matched_strings, results, scan_detail):
|
|
189
|
+
"""Save a YARA finding to results"""
|
|
190
|
+
if file_path not in scan_detail['files']:
|
|
191
|
+
scan_detail['files'].append(file_path)
|
|
192
|
+
scan_detail['matches'] += 1
|
|
193
|
+
|
|
194
|
+
finding = {
|
|
195
|
+
'rule_name': rule_name,
|
|
196
|
+
'file': file_path,
|
|
197
|
+
'scan_type': scan_type,
|
|
198
|
+
'matched_strings': matched_strings.copy(),
|
|
199
|
+
'string_count': len(matched_strings),
|
|
200
|
+
'severity': get_severity(rule_name),
|
|
201
|
+
'category': get_category(rule_name),
|
|
202
|
+
'description': get_description(rule_name),
|
|
203
|
+
'recommendation': get_recommendation(rule_name)
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
results['findings'].append(finding)
|
|
207
|
+
results['summary']['files_scanned'] += 1
|
|
208
|
+
|
|
209
|
+
# Group by category
|
|
210
|
+
category = finding['category']
|
|
211
|
+
if category not in results['categories']:
|
|
212
|
+
results['categories'][category] = []
|
|
213
|
+
results['categories'][category].append(finding)
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def get_severity(rule_name):
|
|
217
|
+
"""Determine severity based on rule name"""
|
|
218
|
+
rule_lower = rule_name.lower()
|
|
219
|
+
|
|
220
|
+
# Critical threats
|
|
221
|
+
if any(x in rule_lower for x in ['ransom', 'banker', 'trojan', 'rootkit', 'backdoor', 'keylogger']):
|
|
222
|
+
return 'CRITICAL'
|
|
223
|
+
|
|
224
|
+
# High severity
|
|
225
|
+
if any(x in rule_lower for x in ['obfuscation', 'packer', 'encrypted', 'suspicious', 'malicious', 'exploit']):
|
|
226
|
+
return 'HIGH'
|
|
227
|
+
|
|
228
|
+
# Medium severity
|
|
229
|
+
if any(x in rule_lower for x in ['webview', 'permission', 'crypto', 'network', 'url', 'reflection']):
|
|
230
|
+
return 'MEDIUM'
|
|
231
|
+
|
|
232
|
+
return 'LOW'
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def get_category(rule_name):
|
|
236
|
+
"""Categorize rule based on name"""
|
|
237
|
+
rule_lower = rule_name.lower()
|
|
238
|
+
|
|
239
|
+
categories = {
|
|
240
|
+
'Obfuscation': ['obfuscation', 'packer', 'dexprotector', 'jiagu', 'bangcle', 'dpt', 'encrypted'],
|
|
241
|
+
'Malware': ['trojan', 'banker', 'ransomware', 'spyware', 'adware', 'malware'],
|
|
242
|
+
'Network': ['url', 'network', 'http', 'socket', 'c2', 'command'],
|
|
243
|
+
'Permissions': ['permission', 'privilege', 'root', 'admin'],
|
|
244
|
+
'Crypto': ['crypto', 'encryption', 'cipher', 'aes', 'rsa', 'base64'],
|
|
245
|
+
'Suspicious': ['suspicious', 'backdoor', 'exploit', 'vulnerability', 'reflection'],
|
|
246
|
+
'Anti-Analysis': ['anti_debug', 'anti_vm', 'emulator', 'frida', 'xposed']
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
for category, keywords in categories.items():
|
|
250
|
+
if any(keyword in rule_lower for keyword in keywords):
|
|
251
|
+
return category
|
|
252
|
+
|
|
253
|
+
return 'Other'
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def get_description(rule_name):
|
|
257
|
+
"""Get detailed description for rule"""
|
|
258
|
+
descriptions = {
|
|
259
|
+
'dexprotector': 'DexProtector commercial obfuscator detected - code is heavily protected',
|
|
260
|
+
'jiagu': 'Qihoo 360 Jiagu packer detected - indicates protected/encrypted DEX',
|
|
261
|
+
'bangcle': 'Bangcle/SecShell packer detected - DEX is encrypted',
|
|
262
|
+
'dpt_shell': 'DPT-Shell packer detected - advanced protection mechanism',
|
|
263
|
+
'encrypted_dex': 'Encrypted DEX payload detected - real code hidden',
|
|
264
|
+
'suspicious_url': 'Suspicious or hardcoded URL found',
|
|
265
|
+
'root_detection': 'Root/jailbreak detection mechanism found',
|
|
266
|
+
'anti_debug': 'Anti-debugging technique detected',
|
|
267
|
+
'webview_javascript': 'JavaScript interface exposed in WebView - potential XSS',
|
|
268
|
+
'dynamic_code_loading': 'Dynamic code loading detected - may load malicious code at runtime',
|
|
269
|
+
'reflection_usage': 'Heavy reflection usage - may hide malicious behavior',
|
|
270
|
+
'native_hooks': 'Native hooking detected - may intercept system calls',
|
|
271
|
+
'obfuscated_strings': 'Obfuscated strings detected - hiding malicious content',
|
|
272
|
+
'command_execution': 'Command execution capability detected',
|
|
273
|
+
'file_encryption': 'File encryption capability detected',
|
|
274
|
+
'sms_interception': 'SMS interception detected - privacy risk',
|
|
275
|
+
'call_interception': 'Call recording/interception detected',
|
|
276
|
+
'data_exfiltration': 'Data exfiltration pattern detected',
|
|
277
|
+
'credential_theft': 'Credential stealing pattern found'
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
for key, desc in descriptions.items():
|
|
281
|
+
if key in rule_name.lower():
|
|
282
|
+
return desc
|
|
283
|
+
|
|
284
|
+
return f'Security finding: {rule_name}'
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def get_recommendation(rule_name):
|
|
288
|
+
"""Get remediation recommendation"""
|
|
289
|
+
rule_lower = rule_name.lower()
|
|
290
|
+
|
|
291
|
+
if 'packer' in rule_lower or 'obfuscation' in rule_lower or 'encrypted' in rule_lower:
|
|
292
|
+
return 'Use dynamic analysis with Frida to unpack at runtime and dump DEX from memory'
|
|
293
|
+
|
|
294
|
+
if 'trojan' in rule_lower or 'malware' in rule_lower:
|
|
295
|
+
return 'Quarantine immediately - manual review required before execution'
|
|
296
|
+
|
|
297
|
+
if 'suspicious' in rule_lower or 'backdoor' in rule_lower:
|
|
298
|
+
return 'Investigate code context and network behavior - may be false positive'
|
|
299
|
+
|
|
300
|
+
if 'anti' in rule_lower:
|
|
301
|
+
return 'Bypass detection in controlled environment to continue analysis'
|
|
302
|
+
|
|
303
|
+
if 'url' in rule_lower or 'network' in rule_lower:
|
|
304
|
+
return 'Monitor network traffic and validate all endpoints'
|
|
305
|
+
|
|
306
|
+
return 'Review code manually and assess risk in context'
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def main():
|
|
310
|
+
if len(sys.argv) < 3:
|
|
311
|
+
print("Usage: yara_enhanced_analyzer.py <temp_dir> <output_file>")
|
|
312
|
+
sys.exit(1)
|
|
313
|
+
|
|
314
|
+
temp_dir = sys.argv[1]
|
|
315
|
+
output_file = sys.argv[2]
|
|
316
|
+
|
|
317
|
+
results = analyze_yara_results(temp_dir, output_file)
|
|
318
|
+
|
|
319
|
+
print(f"\n✓ Enhanced YARA analysis complete")
|
|
320
|
+
print(f" Rules Checked: {results['summary']['total_rules_checked']}")
|
|
321
|
+
print(f" Rules Matched: {results['summary']['rules_matched']}")
|
|
322
|
+
print(f" Total Matches: {results['summary']['total_matches']}")
|
|
323
|
+
print(f" Critical: {results['summary']['critical_findings']}")
|
|
324
|
+
print(f" High: {results['summary']['high_severity']}")
|
|
325
|
+
print(f" Medium: {results['summary']['medium_severity']}")
|
|
326
|
+
print(f" Low: {results['summary']['low_severity']}")
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
if __name__ == '__main__':
|
|
330
|
+
main()
|