Kea2-python 1.0.6b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Kea2-python might be problematic. Click here for more details.
- kea2/__init__.py +3 -0
- kea2/absDriver.py +56 -0
- kea2/adbUtils.py +554 -0
- kea2/assets/config_version.json +16 -0
- kea2/assets/fastbot-thirdpart.jar +0 -0
- kea2/assets/fastbot_configs/abl.strings +2 -0
- kea2/assets/fastbot_configs/awl.strings +3 -0
- kea2/assets/fastbot_configs/max.config +7 -0
- kea2/assets/fastbot_configs/max.fuzzing.strings +699 -0
- kea2/assets/fastbot_configs/max.schema.strings +1 -0
- kea2/assets/fastbot_configs/max.strings +3 -0
- kea2/assets/fastbot_configs/max.tree.pruning +27 -0
- kea2/assets/fastbot_configs/teardown.py +18 -0
- kea2/assets/fastbot_configs/widget.block.py +38 -0
- kea2/assets/fastbot_libs/arm64-v8a/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/armeabi-v7a/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/x86/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/x86_64/libfastbot_native.so +0 -0
- kea2/assets/framework.jar +0 -0
- kea2/assets/kea2-thirdpart.jar +0 -0
- kea2/assets/monkeyq.jar +0 -0
- kea2/assets/quicktest.py +126 -0
- kea2/cli.py +320 -0
- kea2/fastbotManager.py +267 -0
- kea2/fastbotx/ActivityTimes.py +52 -0
- kea2/fastbotx/ReuseEntry.py +74 -0
- kea2/fastbotx/ReuseModel.py +63 -0
- kea2/fastbotx/__init__.py +7 -0
- kea2/fbm_parser.py +871 -0
- kea2/fs_lock.py +131 -0
- kea2/kea2_api.py +166 -0
- kea2/keaUtils.py +1112 -0
- kea2/kea_launcher.py +319 -0
- kea2/logWatcher.py +92 -0
- kea2/mixin.py +22 -0
- kea2/report/__init__.py +0 -0
- kea2/report/bug_report_generator.py +793 -0
- kea2/report/mixin.py +482 -0
- kea2/report/report_merger.py +797 -0
- kea2/report/templates/bug_report_template.html +3876 -0
- kea2/report/templates/merged_bug_report_template.html +3333 -0
- kea2/report/utils.py +10 -0
- kea2/resultSyncer.py +65 -0
- kea2/u2Driver.py +610 -0
- kea2/utils.py +184 -0
- kea2/version_manager.py +102 -0
- kea2_python-1.0.6b0.dist-info/METADATA +447 -0
- kea2_python-1.0.6b0.dist-info/RECORD +52 -0
- kea2_python-1.0.6b0.dist-info/WHEEL +5 -0
- kea2_python-1.0.6b0.dist-info/entry_points.txt +2 -0
- kea2_python-1.0.6b0.dist-info/licenses/LICENSE +16 -0
- kea2_python-1.0.6b0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,797 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Dict, List, Optional, Tuple, Union
|
|
7
|
+
from collections import defaultdict
|
|
8
|
+
|
|
9
|
+
from ..utils import getLogger, catchException
|
|
10
|
+
|
|
11
|
+
logger = getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class TestReportMerger:
|
|
15
|
+
"""
|
|
16
|
+
Merge multiple test result directories into a single combined dataset
|
|
17
|
+
Only processes result_*.json and coverage.log files for the simplified template
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self):
|
|
21
|
+
self.merged_data = {}
|
|
22
|
+
self.result_dirs = []
|
|
23
|
+
self._package_name: Optional[str] = None
|
|
24
|
+
|
|
25
|
+
@catchException("Error merging reports")
|
|
26
|
+
def merge_reports(self, result_paths: List[Union[str, Path]], output_dir: Optional[Union[str, Path]] = None) -> Optional[Path]:
|
|
27
|
+
"""
|
|
28
|
+
Merge multiple test result directories
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
result_paths: List of paths to test result directories (res_* directories)
|
|
32
|
+
output_dir: Output directory for merged data (optional)
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Path to the merged data directory, or None if validation fails
|
|
36
|
+
"""
|
|
37
|
+
# Convert paths and validate
|
|
38
|
+
self.result_dirs = [Path(p).resolve() for p in result_paths]
|
|
39
|
+
self._package_name = None
|
|
40
|
+
|
|
41
|
+
package_name, fatal_error = self._determine_package_name()
|
|
42
|
+
if fatal_error:
|
|
43
|
+
logger.error("Aborting merge because package validation failed.")
|
|
44
|
+
return None
|
|
45
|
+
self._package_name = package_name
|
|
46
|
+
|
|
47
|
+
# Setup output directory
|
|
48
|
+
timestamp = datetime.now().strftime("%Y%m%d%H_%M%S")
|
|
49
|
+
if output_dir is None:
|
|
50
|
+
output_dir = Path.cwd() / f"merged_report_{timestamp}"
|
|
51
|
+
else:
|
|
52
|
+
output_dir = Path(output_dir).resolve() / f"merged_report_{timestamp}"
|
|
53
|
+
|
|
54
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
55
|
+
|
|
56
|
+
logger.debug(f"Merging {len(self.result_dirs)} test result directories...")
|
|
57
|
+
|
|
58
|
+
# Merge different types of data
|
|
59
|
+
merged_property_stats, property_source_mapping = self._merge_property_results(output_dir)
|
|
60
|
+
merged_coverage_data = self._merge_coverage_data()
|
|
61
|
+
merged_crash_anr_data = self._merge_crash_dump_data(output_dir)
|
|
62
|
+
|
|
63
|
+
# Calculate final statistics
|
|
64
|
+
final_data = self._calculate_final_statistics(merged_property_stats, merged_coverage_data, merged_crash_anr_data, property_source_mapping)
|
|
65
|
+
|
|
66
|
+
# Add merge information to final data
|
|
67
|
+
final_data['merge_info'] = {
|
|
68
|
+
'merge_timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
69
|
+
'source_count': len(self.result_dirs),
|
|
70
|
+
'source_directories': [str(Path(d).name) for d in self.result_dirs],
|
|
71
|
+
'package_name': self._package_name or ""
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
# Generate HTML report (now includes merge info)
|
|
75
|
+
report_file = self._generate_html_report(final_data, output_dir)
|
|
76
|
+
|
|
77
|
+
logger.debug(f"Reports generated successfully in: {output_dir}")
|
|
78
|
+
return report_file
|
|
79
|
+
|
|
80
|
+
def _determine_package_name(self) -> Tuple[Optional[str], bool]:
|
|
81
|
+
"""
|
|
82
|
+
Ensure all reports belong to the same application and return the shared package name.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
tuple: (package_name, fatal_error)
|
|
86
|
+
package_name: shared package name if determined, otherwise None
|
|
87
|
+
fatal_error: True if validation should stop the merge
|
|
88
|
+
"""
|
|
89
|
+
if not self.result_dirs:
|
|
90
|
+
logger.error("No result directories provided for merge.")
|
|
91
|
+
return None, True
|
|
92
|
+
|
|
93
|
+
known_package: Optional[str] = None
|
|
94
|
+
|
|
95
|
+
for result_dir in self.result_dirs:
|
|
96
|
+
package_name, fatal_error = self._extract_package_name(result_dir)
|
|
97
|
+
if fatal_error:
|
|
98
|
+
return None, True
|
|
99
|
+
if package_name is None:
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
if known_package is None:
|
|
103
|
+
known_package = package_name
|
|
104
|
+
elif package_name != known_package:
|
|
105
|
+
logger.error(
|
|
106
|
+
f"Cannot merge reports generated for different applications: "
|
|
107
|
+
f"{result_dir.name} uses package '{package_name}' while others use '{known_package}'."
|
|
108
|
+
)
|
|
109
|
+
return None, True
|
|
110
|
+
|
|
111
|
+
if known_package:
|
|
112
|
+
logger.debug(f"Validated application package for merge: {known_package}")
|
|
113
|
+
else:
|
|
114
|
+
logger.warning("No package information found in provided report directories. Proceeding without package validation.")
|
|
115
|
+
return known_package, False
|
|
116
|
+
|
|
117
|
+
def _extract_package_name(self, result_dir: Path) -> Tuple[Optional[str], bool]:
|
|
118
|
+
"""
|
|
119
|
+
Extract the application package name from a report directory.
|
|
120
|
+
"""
|
|
121
|
+
config_path = result_dir / "bug_report_config.json"
|
|
122
|
+
if not config_path.exists():
|
|
123
|
+
logger.warning(f"Skipping package validation for {result_dir}: bug_report_config.json not found.")
|
|
124
|
+
return None, False
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
with open(config_path, "r", encoding="utf-8") as config_file:
|
|
128
|
+
config_data = json.load(config_file)
|
|
129
|
+
except Exception as exc:
|
|
130
|
+
logger.error(f"Failed to load bug_report_config.json from {result_dir}: {exc}")
|
|
131
|
+
return None, True
|
|
132
|
+
package_names = config_data.get("packageNames")
|
|
133
|
+
if isinstance(package_names, str):
|
|
134
|
+
package_name = package_names.strip()
|
|
135
|
+
if not package_name:
|
|
136
|
+
logger.error(f"Package name is empty in bug_report_config.json for {result_dir}")
|
|
137
|
+
return None, True
|
|
138
|
+
return package_name, False
|
|
139
|
+
|
|
140
|
+
if isinstance(package_names, list):
|
|
141
|
+
valid_names = [pkg.strip() for pkg in package_names if pkg and pkg.strip()]
|
|
142
|
+
if not valid_names:
|
|
143
|
+
logger.error(f"No valid packageNames found in bug_report_config.json for {result_dir}")
|
|
144
|
+
return None, True
|
|
145
|
+
if len(valid_names) > 1:
|
|
146
|
+
logger.error(f"Multiple packageNames found in {config_path}, only single package is supported.")
|
|
147
|
+
return None, True
|
|
148
|
+
return valid_names[0], False
|
|
149
|
+
|
|
150
|
+
logger.error(f"packageNames format is invalid in {config_path}")
|
|
151
|
+
return None, True
|
|
152
|
+
|
|
153
|
+
def _merge_property_results(self, output_dir: Path = None) -> Tuple[Dict[str, Dict], Dict[str, List[Dict]]]:
|
|
154
|
+
"""
|
|
155
|
+
Merge property test results from all directories
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
output_dir: The output directory where the merged report will be saved (for calculating relative paths)
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Tuple of (merged_property_results, property_source_mapping)
|
|
162
|
+
- merged_property_results: Merged property execution results
|
|
163
|
+
- property_source_mapping: Maps property names to list of source directory info with fail/error
|
|
164
|
+
Each entry contains: {'dir_name': str, 'report_path': str}
|
|
165
|
+
"""
|
|
166
|
+
merged_results = defaultdict(lambda: {
|
|
167
|
+
"precond_satisfied": 0,
|
|
168
|
+
"executed": 0,
|
|
169
|
+
"fail": 0,
|
|
170
|
+
"error": 0
|
|
171
|
+
})
|
|
172
|
+
|
|
173
|
+
# Track which directories have fail/error for each property
|
|
174
|
+
property_source_mapping = defaultdict(list)
|
|
175
|
+
|
|
176
|
+
for result_dir in self.result_dirs:
|
|
177
|
+
result_files = list(result_dir.glob("result_*.json"))
|
|
178
|
+
html_files = list(result_dir.glob("*.html"))
|
|
179
|
+
if not result_files:
|
|
180
|
+
logger.warning(f"No result file found in {result_dir}")
|
|
181
|
+
continue
|
|
182
|
+
if not html_files:
|
|
183
|
+
logger.warning(f"No html file found in {result_dir}")
|
|
184
|
+
continue
|
|
185
|
+
|
|
186
|
+
result_file = result_files[0] # Take the first (should be only one)
|
|
187
|
+
html_file = html_files[0]
|
|
188
|
+
dir_name = result_dir.name # Get the directory name (e.g., res_2025072011_5048015228)
|
|
189
|
+
|
|
190
|
+
# Find the HTML report file in the result directory
|
|
191
|
+
html_report_path = None
|
|
192
|
+
|
|
193
|
+
# Calculate relative path from output_dir to the HTML file
|
|
194
|
+
try:
|
|
195
|
+
html_report_path = os.path.relpath(html_file.resolve(), output_dir.resolve())
|
|
196
|
+
except ValueError:
|
|
197
|
+
# If on different drives (Windows), use absolute path as fallback
|
|
198
|
+
html_report_path = str(html_file.resolve())
|
|
199
|
+
|
|
200
|
+
with open(result_file, 'r', encoding='utf-8') as f:
|
|
201
|
+
test_results = json.load(f)
|
|
202
|
+
|
|
203
|
+
# Merge results for each property
|
|
204
|
+
for prop_name, prop_result in test_results.items():
|
|
205
|
+
for key in ["precond_satisfied", "executed", "fail", "error"]:
|
|
206
|
+
merged_results[prop_name][key] += prop_result.get(key, 0)
|
|
207
|
+
|
|
208
|
+
# Track source directories for properties with fail/error
|
|
209
|
+
if prop_result.get('fail', 0) > 0 or prop_result.get('error', 0) > 0:
|
|
210
|
+
# Check if this directory is already in the mapping
|
|
211
|
+
existing_dirs = [item['dir_name'] for item in property_source_mapping[prop_name]]
|
|
212
|
+
if dir_name not in existing_dirs:
|
|
213
|
+
property_source_mapping[prop_name].append({
|
|
214
|
+
'dir_name': dir_name,
|
|
215
|
+
'report_path': html_report_path
|
|
216
|
+
})
|
|
217
|
+
|
|
218
|
+
logger.debug(f"Merged results from: {result_file}")
|
|
219
|
+
|
|
220
|
+
return dict(merged_results), dict(property_source_mapping)
|
|
221
|
+
|
|
222
|
+
def _merge_coverage_data(self) -> Dict:
|
|
223
|
+
"""
|
|
224
|
+
Merge coverage data from all directories
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
Final merged coverage information
|
|
228
|
+
"""
|
|
229
|
+
all_activities = set()
|
|
230
|
+
tested_activities = set()
|
|
231
|
+
activity_counts = defaultdict(int)
|
|
232
|
+
total_steps = 0
|
|
233
|
+
|
|
234
|
+
for result_dir in self.result_dirs:
|
|
235
|
+
# Find coverage log file
|
|
236
|
+
output_dirs = list(result_dir.glob("output_*"))
|
|
237
|
+
if not output_dirs:
|
|
238
|
+
logger.warning(f"No output directory found in {result_dir}")
|
|
239
|
+
continue
|
|
240
|
+
|
|
241
|
+
coverage_file = output_dirs[0] / "coverage.log"
|
|
242
|
+
if not coverage_file.exists():
|
|
243
|
+
logger.warning(f"No coverage.log found in {output_dirs[0]}")
|
|
244
|
+
continue
|
|
245
|
+
|
|
246
|
+
# Read the last line of coverage.log to get final state
|
|
247
|
+
last_coverage = None
|
|
248
|
+
with open(coverage_file, 'r', encoding='utf-8') as f:
|
|
249
|
+
for line in f:
|
|
250
|
+
if line.strip():
|
|
251
|
+
last_coverage = json.loads(line)
|
|
252
|
+
|
|
253
|
+
if last_coverage:
|
|
254
|
+
# Collect all activities
|
|
255
|
+
all_activities.update(last_coverage.get("totalActivities", []))
|
|
256
|
+
tested_activities.update(last_coverage.get("testedActivities", []))
|
|
257
|
+
|
|
258
|
+
# Update activity counts (take maximum)
|
|
259
|
+
for activity, count in last_coverage.get("activityCountHistory", {}).items():
|
|
260
|
+
activity_counts[activity] += count
|
|
261
|
+
|
|
262
|
+
# Add steps count
|
|
263
|
+
total_steps += last_coverage.get("stepsCount", 0)
|
|
264
|
+
|
|
265
|
+
logger.debug(f"Merged coverage data from: {coverage_file}")
|
|
266
|
+
|
|
267
|
+
# Calculate final coverage percentage (rounded to 2 decimal places)
|
|
268
|
+
coverage_percent = round((len(tested_activities) / len(all_activities) * 100), 2) if all_activities else 0.00
|
|
269
|
+
|
|
270
|
+
return {
|
|
271
|
+
"coverage_percent": coverage_percent,
|
|
272
|
+
"total_activities": list(all_activities),
|
|
273
|
+
"tested_activities": list(tested_activities),
|
|
274
|
+
"total_activities_count": len(all_activities),
|
|
275
|
+
"tested_activities_count": len(tested_activities),
|
|
276
|
+
"activity_count_history": dict(activity_counts),
|
|
277
|
+
"total_steps": total_steps
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
def _merge_crash_dump_data(self, output_dir: Path = None) -> Dict:
|
|
281
|
+
"""
|
|
282
|
+
Merge crash and ANR data from all directories
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
Dict containing merged crash and ANR events
|
|
286
|
+
"""
|
|
287
|
+
all_crash_events = []
|
|
288
|
+
all_anr_events = []
|
|
289
|
+
|
|
290
|
+
for result_dir in self.result_dirs:
|
|
291
|
+
dir_name = result_dir.name
|
|
292
|
+
|
|
293
|
+
# Locate corresponding HTML report for hyperlinking
|
|
294
|
+
html_report_path = None
|
|
295
|
+
html_files = list(result_dir.glob("*.html"))
|
|
296
|
+
if not html_files:
|
|
297
|
+
continue
|
|
298
|
+
html_file = html_files[0]
|
|
299
|
+
try:
|
|
300
|
+
html_report_path = os.path.relpath(html_file.resolve(), output_dir.resolve())
|
|
301
|
+
except ValueError:
|
|
302
|
+
html_report_path = str(html_file.resolve())
|
|
303
|
+
|
|
304
|
+
# Find crash dump log file
|
|
305
|
+
output_dirs = list(result_dir.glob("output_*"))
|
|
306
|
+
if not output_dirs:
|
|
307
|
+
continue
|
|
308
|
+
|
|
309
|
+
crash_dump_file = output_dirs[0] / "crash-dump.log"
|
|
310
|
+
if not crash_dump_file.exists():
|
|
311
|
+
logger.debug(f"No crash-dump.log found in {output_dirs[0]}")
|
|
312
|
+
continue
|
|
313
|
+
|
|
314
|
+
try:
|
|
315
|
+
# Parse crash and ANR events from this file
|
|
316
|
+
crash_events, anr_events = self._parse_crash_dump_file(crash_dump_file)
|
|
317
|
+
|
|
318
|
+
for crash in crash_events:
|
|
319
|
+
crash["source_directory"] = dir_name
|
|
320
|
+
crash["report_path"] = html_report_path
|
|
321
|
+
|
|
322
|
+
for anr in anr_events:
|
|
323
|
+
anr["source_directory"] = dir_name
|
|
324
|
+
anr["report_path"] = html_report_path
|
|
325
|
+
|
|
326
|
+
all_crash_events.extend(crash_events)
|
|
327
|
+
all_anr_events.extend(anr_events)
|
|
328
|
+
|
|
329
|
+
logger.debug(f"Merged {len(crash_events)} crash events and {len(anr_events)} ANR events from: {crash_dump_file}")
|
|
330
|
+
|
|
331
|
+
except Exception as e:
|
|
332
|
+
logger.error(f"Error reading crash dump file {crash_dump_file}: {e}")
|
|
333
|
+
continue
|
|
334
|
+
|
|
335
|
+
# Deduplicate events based on content and timestamp
|
|
336
|
+
unique_crash_events = self._deduplicate_crash_events(all_crash_events)
|
|
337
|
+
unique_anr_events = self._deduplicate_anr_events(all_anr_events)
|
|
338
|
+
|
|
339
|
+
logger.debug(f"Total unique crash events: {len(unique_crash_events)}, ANR events: {len(unique_anr_events)}")
|
|
340
|
+
|
|
341
|
+
return {
|
|
342
|
+
"crash_events": unique_crash_events,
|
|
343
|
+
"anr_events": unique_anr_events,
|
|
344
|
+
"total_crash_count": len(unique_crash_events),
|
|
345
|
+
"total_anr_count": len(unique_anr_events)
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
@catchException("Error parsing crash-dump.log")
|
|
349
|
+
def _parse_crash_dump_file(self, crash_dump_file: Path) -> Tuple[List[Dict], List[Dict]]:
|
|
350
|
+
"""
|
|
351
|
+
Parse crash and ANR events from crash-dump.log file
|
|
352
|
+
|
|
353
|
+
Args:
|
|
354
|
+
crash_dump_file: Path to crash-dump.log file
|
|
355
|
+
|
|
356
|
+
Returns:
|
|
357
|
+
tuple: (crash_events, anr_events) - Lists of crash and ANR event dictionaries
|
|
358
|
+
"""
|
|
359
|
+
crash_events = []
|
|
360
|
+
anr_events = []
|
|
361
|
+
|
|
362
|
+
with open(crash_dump_file, "r", encoding="utf-8") as f:
|
|
363
|
+
content = f.read()
|
|
364
|
+
|
|
365
|
+
# Parse crash events
|
|
366
|
+
crash_events = self._parse_crash_events(content)
|
|
367
|
+
# Parse ANR events
|
|
368
|
+
anr_events = self._parse_anr_events(content)
|
|
369
|
+
|
|
370
|
+
return crash_events, anr_events
|
|
371
|
+
|
|
372
|
+
def _parse_crash_events(self, content: str) -> List[Dict]:
|
|
373
|
+
"""
|
|
374
|
+
Parse crash events from crash-dump.log content
|
|
375
|
+
|
|
376
|
+
Args:
|
|
377
|
+
content: Content of crash-dump.log file
|
|
378
|
+
|
|
379
|
+
Returns:
|
|
380
|
+
List[Dict]: List of crash event dictionaries
|
|
381
|
+
"""
|
|
382
|
+
crash_events = []
|
|
383
|
+
|
|
384
|
+
# Pattern to match crash blocks
|
|
385
|
+
crash_pattern = r'(\d{14})\ncrash:\n(.*?)\n// crash end'
|
|
386
|
+
|
|
387
|
+
for match in re.finditer(crash_pattern, content, re.DOTALL):
|
|
388
|
+
timestamp_str = match.group(1)
|
|
389
|
+
crash_content = match.group(2)
|
|
390
|
+
|
|
391
|
+
# Parse timestamp (format: YYYYMMDDHHMMSS)
|
|
392
|
+
try:
|
|
393
|
+
timestamp = datetime.strptime(timestamp_str, "%Y%m%d%H%M%S")
|
|
394
|
+
formatted_time = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
|
395
|
+
except ValueError:
|
|
396
|
+
formatted_time = timestamp_str
|
|
397
|
+
|
|
398
|
+
# Extract crash information
|
|
399
|
+
crash_info = self._extract_crash_info(crash_content)
|
|
400
|
+
|
|
401
|
+
crash_event = {
|
|
402
|
+
"time": formatted_time,
|
|
403
|
+
"exception_type": crash_info.get("exception_type", "Unknown"),
|
|
404
|
+
"process": crash_info.get("process", "Unknown"),
|
|
405
|
+
"stack_trace": crash_info.get("stack_trace", "")
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
crash_events.append(crash_event)
|
|
409
|
+
|
|
410
|
+
return crash_events
|
|
411
|
+
|
|
412
|
+
def _parse_anr_events(self, content: str) -> List[Dict]:
|
|
413
|
+
"""
|
|
414
|
+
Parse ANR events from crash-dump.log content
|
|
415
|
+
|
|
416
|
+
Args:
|
|
417
|
+
content: Content of crash-dump.log file
|
|
418
|
+
|
|
419
|
+
Returns:
|
|
420
|
+
List[Dict]: List of ANR event dictionaries
|
|
421
|
+
"""
|
|
422
|
+
anr_events = []
|
|
423
|
+
|
|
424
|
+
# Pattern to match ANR blocks
|
|
425
|
+
anr_pattern = r'(\d{14})\nanr:\n(.*?)\nanr end'
|
|
426
|
+
|
|
427
|
+
for match in re.finditer(anr_pattern, content, re.DOTALL):
|
|
428
|
+
timestamp_str = match.group(1)
|
|
429
|
+
anr_content = match.group(2)
|
|
430
|
+
|
|
431
|
+
# Parse timestamp (format: YYYYMMDDHHMMSS)
|
|
432
|
+
try:
|
|
433
|
+
timestamp = datetime.strptime(timestamp_str, "%Y%m%d%H%M%S")
|
|
434
|
+
formatted_time = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
|
435
|
+
except ValueError:
|
|
436
|
+
formatted_time = timestamp_str
|
|
437
|
+
|
|
438
|
+
# Extract ANR information
|
|
439
|
+
anr_info = self._extract_anr_info(anr_content)
|
|
440
|
+
|
|
441
|
+
anr_event = {
|
|
442
|
+
"time": formatted_time,
|
|
443
|
+
"reason": anr_info.get("reason", "Unknown"),
|
|
444
|
+
"process": anr_info.get("process", "Unknown"),
|
|
445
|
+
"trace": anr_info.get("trace", "")
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
anr_events.append(anr_event)
|
|
449
|
+
|
|
450
|
+
return anr_events
|
|
451
|
+
|
|
452
|
+
def _extract_crash_info(self, crash_content: str) -> Dict:
|
|
453
|
+
"""
|
|
454
|
+
Extract crash information from crash content
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
crash_content: Content of a single crash block
|
|
458
|
+
|
|
459
|
+
Returns:
|
|
460
|
+
Dict: Extracted crash information
|
|
461
|
+
"""
|
|
462
|
+
crash_info = {
|
|
463
|
+
"exception_type": "Unknown",
|
|
464
|
+
"process": "Unknown",
|
|
465
|
+
"stack_trace": ""
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
lines = crash_content.strip().split('\n')
|
|
469
|
+
|
|
470
|
+
for line in lines:
|
|
471
|
+
line = line.strip()
|
|
472
|
+
|
|
473
|
+
# Extract PID from CRASH line
|
|
474
|
+
if line.startswith("// CRASH:"):
|
|
475
|
+
# Pattern: // CRASH: process_name (pid xxxx) (dump time: ...)
|
|
476
|
+
pid_match = re.search(r'\(pid\s+(\d+)\)', line)
|
|
477
|
+
if pid_match:
|
|
478
|
+
crash_info["process"] = pid_match.group(1)
|
|
479
|
+
|
|
480
|
+
# Extract exception type from Long Msg line
|
|
481
|
+
elif line.startswith("// Long Msg:"):
|
|
482
|
+
# Pattern: // Long Msg: ExceptionType: message
|
|
483
|
+
exception_match = re.search(r'// Long Msg:\s+([^:]+)', line)
|
|
484
|
+
if exception_match:
|
|
485
|
+
crash_info["exception_type"] = exception_match.group(1).strip()
|
|
486
|
+
|
|
487
|
+
# Extract full stack trace (all lines starting with //)
|
|
488
|
+
stack_lines = []
|
|
489
|
+
for line in lines:
|
|
490
|
+
if line.startswith("//"):
|
|
491
|
+
# Remove the "// " prefix for cleaner display
|
|
492
|
+
clean_line = line[3:] if line.startswith("// ") else line[2:]
|
|
493
|
+
stack_lines.append(clean_line)
|
|
494
|
+
|
|
495
|
+
crash_info["stack_trace"] = '\n'.join(stack_lines)
|
|
496
|
+
|
|
497
|
+
return crash_info
|
|
498
|
+
|
|
499
|
+
def _extract_anr_info(self, anr_content: str) -> Dict:
|
|
500
|
+
"""
|
|
501
|
+
Extract ANR information from ANR content
|
|
502
|
+
|
|
503
|
+
Args:
|
|
504
|
+
anr_content: Content of a single ANR block
|
|
505
|
+
|
|
506
|
+
Returns:
|
|
507
|
+
Dict: Extracted ANR information
|
|
508
|
+
"""
|
|
509
|
+
anr_info = {
|
|
510
|
+
"reason": "Unknown",
|
|
511
|
+
"process": "Unknown",
|
|
512
|
+
"trace": ""
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
lines = anr_content.strip().split('\n')
|
|
516
|
+
|
|
517
|
+
for line in lines:
|
|
518
|
+
line = line.strip()
|
|
519
|
+
|
|
520
|
+
# Extract PID from ANR line
|
|
521
|
+
if line.startswith("// ANR:"):
|
|
522
|
+
# Pattern: // ANR: process_name (pid xxxx) (dump time: ...)
|
|
523
|
+
pid_match = re.search(r'\(pid\s+(\d+)\)', line)
|
|
524
|
+
if pid_match:
|
|
525
|
+
anr_info["process"] = pid_match.group(1)
|
|
526
|
+
|
|
527
|
+
# Extract reason from Reason line
|
|
528
|
+
elif line.startswith("Reason:"):
|
|
529
|
+
# Pattern: Reason: Input dispatching timed out (...)
|
|
530
|
+
reason_match = re.search(r'Reason:\s+(.+)', line)
|
|
531
|
+
if reason_match:
|
|
532
|
+
full_reason = reason_match.group(1).strip()
|
|
533
|
+
# Simplify the reason by extracting the main part before parentheses
|
|
534
|
+
simplified_reason = self._simplify_anr_reason(full_reason)
|
|
535
|
+
anr_info["reason"] = simplified_reason
|
|
536
|
+
|
|
537
|
+
# Store the full ANR trace content
|
|
538
|
+
anr_info["trace"] = anr_content
|
|
539
|
+
|
|
540
|
+
return anr_info
|
|
541
|
+
|
|
542
|
+
def _simplify_anr_reason(self, full_reason: str) -> str:
|
|
543
|
+
"""
|
|
544
|
+
Simplify ANR reason by extracting the main part
|
|
545
|
+
|
|
546
|
+
Args:
|
|
547
|
+
full_reason: Full ANR reason string
|
|
548
|
+
|
|
549
|
+
Returns:
|
|
550
|
+
str: Simplified ANR reason
|
|
551
|
+
"""
|
|
552
|
+
# Common ANR reason patterns to simplify
|
|
553
|
+
simplification_patterns = [
|
|
554
|
+
# Input dispatching timed out (details...) -> Input dispatching timed out
|
|
555
|
+
(r'^(Input dispatching timed out)\s*\(.*\).*$', r'\1'),
|
|
556
|
+
# Broadcast of Intent (details...) -> Broadcast timeout
|
|
557
|
+
(r'^Broadcast of Intent.*$', 'Broadcast timeout'),
|
|
558
|
+
# Service timeout -> Service timeout
|
|
559
|
+
(r'^Service.*timeout.*$', 'Service timeout'),
|
|
560
|
+
# ContentProvider timeout -> ContentProvider timeout
|
|
561
|
+
(r'^ContentProvider.*timeout.*$', 'ContentProvider timeout'),
|
|
562
|
+
]
|
|
563
|
+
|
|
564
|
+
# Apply simplification patterns
|
|
565
|
+
for pattern, replacement in simplification_patterns:
|
|
566
|
+
match = re.match(pattern, full_reason, re.IGNORECASE)
|
|
567
|
+
if match:
|
|
568
|
+
if callable(replacement):
|
|
569
|
+
return replacement(match)
|
|
570
|
+
elif '\\1' in replacement:
|
|
571
|
+
return re.sub(pattern, replacement, full_reason, flags=re.IGNORECASE)
|
|
572
|
+
else:
|
|
573
|
+
return replacement
|
|
574
|
+
|
|
575
|
+
# If no pattern matches, try to extract the part before the first parenthesis
|
|
576
|
+
paren_match = re.match(r'^([^(]+)', full_reason)
|
|
577
|
+
if paren_match:
|
|
578
|
+
simplified = paren_match.group(1).strip()
|
|
579
|
+
# Remove trailing punctuation
|
|
580
|
+
simplified = re.sub(r'[.,;:]+$', '', simplified)
|
|
581
|
+
return simplified
|
|
582
|
+
|
|
583
|
+
# If all else fails, return the original but truncated
|
|
584
|
+
return full_reason[:50] + "..." if len(full_reason) > 50 else full_reason
|
|
585
|
+
|
|
586
|
+
def _deduplicate_crash_events(self, crash_events: List[Dict]) -> List[Dict]:
|
|
587
|
+
"""
|
|
588
|
+
Deduplicate crash events based on exception type and stack trace
|
|
589
|
+
|
|
590
|
+
Args:
|
|
591
|
+
crash_events: List of crash events
|
|
592
|
+
|
|
593
|
+
Returns:
|
|
594
|
+
List[Dict]: Deduplicated crash events
|
|
595
|
+
"""
|
|
596
|
+
seen_crashes = set()
|
|
597
|
+
unique_crashes = []
|
|
598
|
+
|
|
599
|
+
for crash in crash_events:
|
|
600
|
+
# Create a hash key based on exception type and first few lines of stack trace
|
|
601
|
+
exception_type = crash.get("exception_type", "")
|
|
602
|
+
stack_trace = crash.get("stack_trace", "")
|
|
603
|
+
|
|
604
|
+
# Use first 3 lines of stack trace for deduplication
|
|
605
|
+
stack_lines = stack_trace.split('\n')[:3]
|
|
606
|
+
crash_key = (
|
|
607
|
+
exception_type,
|
|
608
|
+
'\n'.join(stack_lines),
|
|
609
|
+
crash.get("source_directory", "")
|
|
610
|
+
)
|
|
611
|
+
|
|
612
|
+
if crash_key not in seen_crashes:
|
|
613
|
+
seen_crashes.add(crash_key)
|
|
614
|
+
unique_crashes.append(crash)
|
|
615
|
+
|
|
616
|
+
return unique_crashes
|
|
617
|
+
|
|
618
|
+
def _deduplicate_anr_events(self, anr_events: List[Dict]) -> List[Dict]:
|
|
619
|
+
"""
|
|
620
|
+
Deduplicate ANR events based on reason and process
|
|
621
|
+
|
|
622
|
+
Args:
|
|
623
|
+
anr_events: List of ANR events
|
|
624
|
+
|
|
625
|
+
Returns:
|
|
626
|
+
List[Dict]: Deduplicated ANR events
|
|
627
|
+
"""
|
|
628
|
+
seen_anrs = set()
|
|
629
|
+
unique_anrs = []
|
|
630
|
+
|
|
631
|
+
for anr in anr_events:
|
|
632
|
+
# Create a hash key based on reason and process
|
|
633
|
+
reason = anr.get("reason", "")
|
|
634
|
+
process = anr.get("process", "")
|
|
635
|
+
anr_key = (reason, process, anr.get("source_directory", ""))
|
|
636
|
+
|
|
637
|
+
if anr_key not in seen_anrs:
|
|
638
|
+
seen_anrs.add(anr_key)
|
|
639
|
+
unique_anrs.append(anr)
|
|
640
|
+
|
|
641
|
+
return unique_anrs
|
|
642
|
+
|
|
643
|
+
def _calculate_final_statistics(self, property_stats: Dict, coverage_data: Dict, crash_anr_data: Dict = None, property_source_mapping: Dict = None) -> Dict:
|
|
644
|
+
"""
|
|
645
|
+
Calculate final statistics for template rendering
|
|
646
|
+
|
|
647
|
+
Note: Total bugs count only includes property test failures/errors,
|
|
648
|
+
not crashes or ANRs (which are tracked separately)
|
|
649
|
+
|
|
650
|
+
Args:
|
|
651
|
+
property_stats: Merged property statistics
|
|
652
|
+
coverage_data: Merged coverage data
|
|
653
|
+
crash_anr_data: Merged crash and ANR data (optional)
|
|
654
|
+
property_source_mapping: Maps property names to source directories with fail/error (optional)
|
|
655
|
+
|
|
656
|
+
Returns:
|
|
657
|
+
Complete data for template rendering
|
|
658
|
+
"""
|
|
659
|
+
# Calculate bug count from property failures
|
|
660
|
+
property_bugs_found = sum(1 for result in property_stats.values()
|
|
661
|
+
if result.get('fail', 0) > 0 or result.get('error', 0) > 0)
|
|
662
|
+
|
|
663
|
+
# Calculate property counts
|
|
664
|
+
all_properties_count = len(property_stats)
|
|
665
|
+
executed_properties_count = sum(1 for result in property_stats.values()
|
|
666
|
+
if result.get('executed', 0) > 0)
|
|
667
|
+
|
|
668
|
+
# Initialize crash/ANR data
|
|
669
|
+
crash_events = []
|
|
670
|
+
anr_events = []
|
|
671
|
+
total_crash_count = 0
|
|
672
|
+
total_anr_count = 0
|
|
673
|
+
|
|
674
|
+
if crash_anr_data:
|
|
675
|
+
crash_events = crash_anr_data.get('crash_events', [])
|
|
676
|
+
anr_events = crash_anr_data.get('anr_events', [])
|
|
677
|
+
total_crash_count = crash_anr_data.get('total_crash_count', 0)
|
|
678
|
+
total_anr_count = crash_anr_data.get('total_anr_count', 0)
|
|
679
|
+
|
|
680
|
+
# Calculate total bugs found (only property bugs, not including crashes/ANRs)
|
|
681
|
+
total_bugs_found = property_bugs_found
|
|
682
|
+
|
|
683
|
+
# Prepare enhanced property statistics with derived metrics
|
|
684
|
+
processed_property_stats = {}
|
|
685
|
+
property_stats_summary = {
|
|
686
|
+
"total_properties": 0,
|
|
687
|
+
"total_precond_satisfied": 0,
|
|
688
|
+
"total_executed": 0,
|
|
689
|
+
"total_passes": 0,
|
|
690
|
+
"total_fails": 0,
|
|
691
|
+
"total_errors": 0,
|
|
692
|
+
"total_not_executed": 0,
|
|
693
|
+
}
|
|
694
|
+
|
|
695
|
+
for prop_name, stats in property_stats.items():
|
|
696
|
+
precond_satisfied = stats.get("precond_satisfied", 0)
|
|
697
|
+
total_executions = stats.get("executed", 0)
|
|
698
|
+
fail_count = stats.get("fail", 0)
|
|
699
|
+
error_count = stats.get("error", 0)
|
|
700
|
+
|
|
701
|
+
pass_count = max(total_executions - fail_count - error_count, 0)
|
|
702
|
+
not_executed_count = max(precond_satisfied - total_executions, 0)
|
|
703
|
+
|
|
704
|
+
processed_property_stats[prop_name] = {
|
|
705
|
+
**stats,
|
|
706
|
+
"executed_total": total_executions,
|
|
707
|
+
"pass_count": pass_count,
|
|
708
|
+
"not_executed": not_executed_count,
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
property_stats_summary["total_properties"] += 1
|
|
712
|
+
property_stats_summary["total_precond_satisfied"] += precond_satisfied
|
|
713
|
+
property_stats_summary["total_executed"] += total_executions
|
|
714
|
+
property_stats_summary["total_passes"] += pass_count
|
|
715
|
+
property_stats_summary["total_fails"] += fail_count
|
|
716
|
+
property_stats_summary["total_errors"] += error_count
|
|
717
|
+
property_stats_summary["total_not_executed"] += not_executed_count
|
|
718
|
+
|
|
719
|
+
# Prepare final data
|
|
720
|
+
final_data = {
|
|
721
|
+
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
722
|
+
'bugs_found': total_bugs_found,
|
|
723
|
+
'property_bugs_found': property_bugs_found,
|
|
724
|
+
'all_properties_count': all_properties_count,
|
|
725
|
+
'executed_properties_count': executed_properties_count,
|
|
726
|
+
'property_stats': processed_property_stats,
|
|
727
|
+
'property_stats_summary': property_stats_summary,
|
|
728
|
+
'property_source_mapping': property_source_mapping or {},
|
|
729
|
+
'crash_events': crash_events,
|
|
730
|
+
'anr_events': anr_events,
|
|
731
|
+
'total_crash_count': total_crash_count,
|
|
732
|
+
'total_anr_count': total_anr_count,
|
|
733
|
+
**coverage_data # Include all coverage data
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
return final_data
|
|
737
|
+
|
|
738
|
+
def get_merge_summary(self) -> Dict:
|
|
739
|
+
"""
|
|
740
|
+
Get summary of the merge operation
|
|
741
|
+
|
|
742
|
+
Returns:
|
|
743
|
+
Dictionary containing merge summary information
|
|
744
|
+
"""
|
|
745
|
+
if not self.result_dirs:
|
|
746
|
+
return {}
|
|
747
|
+
|
|
748
|
+
summary = {
|
|
749
|
+
"merged_directories": len(self.result_dirs),
|
|
750
|
+
"source_paths": [str(p) for p in self.result_dirs],
|
|
751
|
+
"merge_timestamp": datetime.now().isoformat()
|
|
752
|
+
}
|
|
753
|
+
if self._package_name:
|
|
754
|
+
summary["package_name"] = self._package_name
|
|
755
|
+
return summary
|
|
756
|
+
|
|
757
|
+
@catchException("Error generating HTML report")
|
|
758
|
+
def _generate_html_report(self, data: Dict, output_dir: Path) -> Path:
|
|
759
|
+
"""
|
|
760
|
+
Generate HTML report using the merged template
|
|
761
|
+
|
|
762
|
+
Args:
|
|
763
|
+
data: Final merged data
|
|
764
|
+
output_dir: Output directory
|
|
765
|
+
|
|
766
|
+
Returns:
|
|
767
|
+
Path to the generated HTML report
|
|
768
|
+
"""
|
|
769
|
+
from jinja2 import Environment, FileSystemLoader, PackageLoader, select_autoescape
|
|
770
|
+
|
|
771
|
+
# Set up Jinja2 environment
|
|
772
|
+
try:
|
|
773
|
+
jinja_env = Environment(
|
|
774
|
+
loader=PackageLoader("kea2.report", "templates"),
|
|
775
|
+
autoescape=select_autoescape(['html', 'xml'])
|
|
776
|
+
)
|
|
777
|
+
except (ImportError, ValueError):
|
|
778
|
+
# Fallback to file system loader
|
|
779
|
+
current_dir = Path(__file__).parent
|
|
780
|
+
templates_dir = current_dir / "templates"
|
|
781
|
+
|
|
782
|
+
jinja_env = Environment(
|
|
783
|
+
loader=FileSystemLoader(templates_dir),
|
|
784
|
+
autoescape=select_autoescape(['html', 'xml'])
|
|
785
|
+
)
|
|
786
|
+
|
|
787
|
+
# Render template
|
|
788
|
+
template = jinja_env.get_template("merged_bug_report_template.html")
|
|
789
|
+
html_content = template.render(**data)
|
|
790
|
+
|
|
791
|
+
# Save HTML report
|
|
792
|
+
report_file = output_dir / "merged_report.html"
|
|
793
|
+
with open(report_file, 'w', encoding='utf-8') as f:
|
|
794
|
+
f.write(html_content)
|
|
795
|
+
|
|
796
|
+
logger.debug(f"HTML report generated: {report_file}")
|
|
797
|
+
return report_file
|