Kea2-python 1.1.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. kea2/__init__.py +8 -0
  2. kea2/absDriver.py +56 -0
  3. kea2/adbUtils.py +554 -0
  4. kea2/assets/config_version.json +16 -0
  5. kea2/assets/fastbot-thirdpart.jar +0 -0
  6. kea2/assets/fastbot_configs/abl.strings +2 -0
  7. kea2/assets/fastbot_configs/awl.strings +3 -0
  8. kea2/assets/fastbot_configs/max.config +7 -0
  9. kea2/assets/fastbot_configs/max.fuzzing.strings +699 -0
  10. kea2/assets/fastbot_configs/max.schema.strings +1 -0
  11. kea2/assets/fastbot_configs/max.strings +3 -0
  12. kea2/assets/fastbot_configs/max.tree.pruning +27 -0
  13. kea2/assets/fastbot_configs/teardown.py +18 -0
  14. kea2/assets/fastbot_configs/widget.block.py +38 -0
  15. kea2/assets/fastbot_libs/arm64-v8a/libfastbot_native.so +0 -0
  16. kea2/assets/fastbot_libs/armeabi-v7a/libfastbot_native.so +0 -0
  17. kea2/assets/fastbot_libs/x86/libfastbot_native.so +0 -0
  18. kea2/assets/fastbot_libs/x86_64/libfastbot_native.so +0 -0
  19. kea2/assets/framework.jar +0 -0
  20. kea2/assets/kea2-thirdpart.jar +0 -0
  21. kea2/assets/monkeyq.jar +0 -0
  22. kea2/assets/quicktest.py +126 -0
  23. kea2/cli.py +216 -0
  24. kea2/fastbotManager.py +269 -0
  25. kea2/kea2_api.py +166 -0
  26. kea2/keaUtils.py +926 -0
  27. kea2/kea_launcher.py +299 -0
  28. kea2/logWatcher.py +92 -0
  29. kea2/mixin.py +0 -0
  30. kea2/report/__init__.py +0 -0
  31. kea2/report/bug_report_generator.py +879 -0
  32. kea2/report/mixin.py +496 -0
  33. kea2/report/report_merger.py +1066 -0
  34. kea2/report/templates/bug_report_template.html +4028 -0
  35. kea2/report/templates/merged_bug_report_template.html +3602 -0
  36. kea2/report/utils.py +10 -0
  37. kea2/result.py +257 -0
  38. kea2/resultSyncer.py +65 -0
  39. kea2/state.py +22 -0
  40. kea2/typedefs.py +32 -0
  41. kea2/u2Driver.py +612 -0
  42. kea2/utils.py +192 -0
  43. kea2/version_manager.py +102 -0
  44. kea2_python-1.1.0b1.dist-info/METADATA +447 -0
  45. kea2_python-1.1.0b1.dist-info/RECORD +49 -0
  46. kea2_python-1.1.0b1.dist-info/WHEEL +5 -0
  47. kea2_python-1.1.0b1.dist-info/entry_points.txt +2 -0
  48. kea2_python-1.1.0b1.dist-info/licenses/LICENSE +16 -0
  49. kea2_python-1.1.0b1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1066 @@
1
+ import json
2
+ import os
3
+ import re
4
+ from datetime import datetime
5
+ from pathlib import Path
6
+ from typing import Dict, List, Optional, Tuple, Union
7
+ from collections import defaultdict
8
+
9
+ from ..utils import getLogger, catchException
10
+
11
+ logger = getLogger(__name__)
12
+
13
+
14
+ class TestReportMerger:
15
+ """
16
+ Merge multiple test result directories into a single combined dataset
17
+ Only processes result_*.json and coverage.log files for the simplified template
18
+ """
19
+
20
+ def __init__(self):
21
+ self.merged_data = {}
22
+ self.result_dirs = []
23
+ self._package_name: Optional[str] = None
24
+
25
+ @catchException("Error merging reports")
26
+ def merge_reports(self, result_paths: List[Union[str, Path]], output_dir: Optional[Union[str, Path]] = None) -> Optional[Path]:
27
+ """
28
+ Merge multiple test result directories
29
+
30
+ Args:
31
+ result_paths: List of paths to test result directories (res_* directories)
32
+ output_dir: Output directory for merged data (optional)
33
+
34
+ Returns:
35
+ Path to the merged data directory, or None if validation fails
36
+ """
37
+ # Convert paths and validate
38
+ self.result_dirs = [Path(p).resolve() for p in result_paths]
39
+ self._package_name = None
40
+
41
+ package_name, fatal_error = self._determine_package_name()
42
+ if fatal_error:
43
+ logger.error("Aborting merge because package validation failed.")
44
+ return None
45
+ self._package_name = package_name
46
+
47
+ # Setup output directory
48
+ timestamp = datetime.now().strftime("%Y%m%d%H_%M%S")
49
+ if output_dir is None:
50
+ output_dir = Path.cwd() / f"merged_report_{timestamp}"
51
+ else:
52
+ output_dir = Path(output_dir).resolve() / f"merged_report_{timestamp}"
53
+
54
+ output_dir.mkdir(parents=True, exist_ok=True)
55
+
56
+ logger.debug(f"Merging {len(self.result_dirs)} test result directories...")
57
+
58
+ # Merge different types of data
59
+ merged_property_stats, property_source_mapping = self._merge_property_results(output_dir)
60
+ property_kinds = self._collect_property_kinds()
61
+ source_summaries = self._collect_source_summaries()
62
+ merged_coverage_data = self._merge_coverage_data()
63
+ merged_crash_anr_data = self._merge_crash_dump_data(output_dir)
64
+
65
+ # Calculate final statistics
66
+ final_data = self._calculate_final_statistics(
67
+ merged_property_stats,
68
+ merged_coverage_data,
69
+ merged_crash_anr_data,
70
+ property_source_mapping,
71
+ property_kinds,
72
+ )
73
+
74
+ # Add merge information to final data
75
+ final_data['merge_info'] = {
76
+ 'merge_timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
77
+ 'source_count': len(self.result_dirs),
78
+ 'source_directories': [str(Path(d).name) for d in self.result_dirs],
79
+ 'source_summaries': source_summaries,
80
+ 'package_name': self._package_name or ""
81
+ }
82
+
83
+ # Generate HTML report (now includes merge info)
84
+ report_file = self._generate_html_report(final_data, output_dir)
85
+
86
+ logger.debug(f"Reports generated successfully in: {output_dir}")
87
+ return report_file
88
+
89
+ def _collect_source_summaries(self) -> List[Dict]:
90
+ """
91
+ Collect summary statistics for each source result directory.
92
+ """
93
+ summaries = []
94
+
95
+ for result_dir in self.result_dirs:
96
+ summaries.append(self._build_source_summary(result_dir))
97
+
98
+ return summaries
99
+
100
+ def _build_source_summary(self, result_dir: Path) -> Dict:
101
+ """
102
+ Build summary statistics for a single result directory.
103
+ """
104
+ summary = {
105
+ "dir_name": result_dir.name,
106
+ "property_violations": 0,
107
+ "invariant_violations": 0,
108
+ "total_testing_time": "00:00:00",
109
+ "executed_events": 0,
110
+ "coverage_percent": "0.00%",
111
+ "executed_properties": "0/0",
112
+ "crash_count": 0,
113
+ "anr_count": 0,
114
+ }
115
+
116
+ result_files = list(result_dir.glob("result_*.json"))
117
+ if not result_files:
118
+ return summary
119
+
120
+ output_dirs = list(result_dir.glob("output_*"))
121
+ output_dir = output_dirs[0] if output_dirs else None
122
+
123
+ property_kinds = self._collect_property_kinds_for_dir(result_dir)
124
+
125
+ try:
126
+ with open(result_files[0], "r", encoding="utf-8") as f:
127
+ property_stats = json.load(f)
128
+ except Exception as exc:
129
+ logger.warning(f"Failed to read {result_files[0]}: {exc}")
130
+ return summary
131
+
132
+ all_properties_count = 0
133
+ executed_properties_count = 0
134
+
135
+ for prop_name, stats in property_stats.items():
136
+ fail_count = stats.get("fail", 0)
137
+ error_count = stats.get("error", 0)
138
+ total_executions = stats.get("executed", 0)
139
+ kind = property_kinds.get(prop_name, "unknown")
140
+ if kind not in {"property", "invariant"}:
141
+ kind = "unknown"
142
+
143
+ if kind != "invariant":
144
+ all_properties_count += 1
145
+ if total_executions > 0:
146
+ executed_properties_count += 1
147
+ if fail_count > 0 or error_count > 0:
148
+ summary["property_violations"] += 1
149
+ elif fail_count > 0 or error_count > 0:
150
+ summary["invariant_violations"] += 1
151
+
152
+ summary["executed_properties"] = f"{executed_properties_count}/{all_properties_count}"
153
+
154
+ if output_dir:
155
+ steps_log = output_dir / "steps.log"
156
+ if steps_log.exists():
157
+ executed_events, total_time = self._extract_steps_log_summary(steps_log)
158
+ summary["executed_events"] = executed_events
159
+ summary["total_testing_time"] = total_time
160
+
161
+ coverage_file = output_dir / "coverage.log"
162
+ if coverage_file.exists():
163
+ coverage_percent = self._extract_coverage_percent(coverage_file)
164
+ summary["coverage_percent"] = f"{coverage_percent:.2f}%"
165
+
166
+ crash_dump_file = output_dir / "crash-dump.log"
167
+ if crash_dump_file.exists():
168
+ parsed_events = self._parse_crash_dump_file(crash_dump_file)
169
+ if parsed_events:
170
+ crash_events, anr_events = parsed_events
171
+ summary["crash_count"] = len(crash_events)
172
+ summary["anr_count"] = len(anr_events)
173
+
174
+ return summary
175
+
176
+ def _collect_property_kinds_for_dir(self, result_dir: Path) -> Dict[str, str]:
177
+ """
178
+ Collect property kind metadata from a single result directory.
179
+ """
180
+ property_kinds: Dict[str, str] = {}
181
+
182
+ result_files = list(result_dir.glob("result_*.json"))
183
+ if not result_files:
184
+ return property_kinds
185
+
186
+ try:
187
+ with open(result_files[0], "r", encoding="utf-8") as f:
188
+ result_data = json.load(f)
189
+
190
+ for prop_name, stats in result_data.items():
191
+ kind = stats.get("kind", "")
192
+ if not prop_name or not isinstance(kind, str) or not kind:
193
+ continue
194
+
195
+ normalized_kind = kind.strip().lower()
196
+ if not normalized_kind:
197
+ continue
198
+
199
+ property_kinds.setdefault(prop_name, normalized_kind)
200
+ except Exception as exc:
201
+ logger.warning(f"Failed to parse {result_files[0]}: {exc}")
202
+
203
+ return property_kinds
204
+
205
+ def _extract_steps_log_summary(self, steps_log: Path) -> Tuple[int, str]:
206
+ """
207
+ Extract executed events count and total testing time from steps.log.
208
+ """
209
+ executed_events = 0
210
+ first_step_time = None
211
+ last_step_time = None
212
+
213
+ with open(steps_log, "r", encoding="utf-8") as f:
214
+ for line in f:
215
+ line = line.strip()
216
+ if not line:
217
+ continue
218
+ try:
219
+ step_data = json.loads(line)
220
+ except json.JSONDecodeError:
221
+ continue
222
+
223
+ step_type = step_data.get("Type", "")
224
+ if step_type in {"Monkey", "Fuzz"}:
225
+ executed_events += 1
226
+
227
+ step_time = step_data.get("Time")
228
+ if step_time:
229
+ if first_step_time is None:
230
+ first_step_time = step_time
231
+ last_step_time = step_time
232
+
233
+ total_testing_time = "00:00:00"
234
+ if first_step_time and last_step_time:
235
+ def _get_datetime(raw_datetime: str) -> datetime:
236
+ return datetime.strptime(raw_datetime, r"%Y-%m-%d %H:%M:%S.%f")
237
+
238
+ time_delta = _get_datetime(last_step_time) - _get_datetime(first_step_time)
239
+ total_seconds = int(time_delta.total_seconds())
240
+ hours, remainder = divmod(total_seconds, 3600)
241
+ minutes, seconds = divmod(remainder, 60)
242
+ total_testing_time = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
243
+
244
+ return executed_events, total_testing_time
245
+
246
+ def _extract_coverage_percent(self, coverage_file: Path) -> float:
247
+ """
248
+ Extract final coverage percent from coverage.log.
249
+ """
250
+ last_coverage = None
251
+ with open(coverage_file, "r", encoding="utf-8") as f:
252
+ for line in f:
253
+ if line.strip():
254
+ last_coverage = json.loads(line)
255
+ if last_coverage and "coverage" in last_coverage:
256
+ return round(float(last_coverage.get("coverage", 0)), 2)
257
+ return 0.0
258
+
259
+ def _collect_property_kinds(self) -> Dict[str, str]:
260
+ """
261
+ Collect property kind metadata from result_*.json files across result directories.
262
+
263
+ Returns:
264
+ Dict[str, str]: Mapping of property name to kind (property/invariant)
265
+ """
266
+ property_kinds: Dict[str, str] = {}
267
+
268
+ for result_dir in self.result_dirs:
269
+ result_files = list(result_dir.glob("result_*.json"))
270
+ if not result_files:
271
+ continue
272
+
273
+ try:
274
+ with open(result_files[0], "r", encoding="utf-8") as f:
275
+ result_data = json.load(f)
276
+
277
+ for prop_name, stats in result_data.items():
278
+ kind = stats.get("kind", "")
279
+ if not prop_name or not isinstance(kind, str) or not kind:
280
+ continue
281
+
282
+ normalized_kind = kind.strip().lower()
283
+ if not normalized_kind:
284
+ continue
285
+
286
+ existing = property_kinds.get(prop_name)
287
+ if existing and existing != normalized_kind:
288
+ logger.debug(
289
+ "Property kind conflict for %s: %s vs %s",
290
+ prop_name,
291
+ existing,
292
+ normalized_kind,
293
+ )
294
+ continue
295
+
296
+ property_kinds.setdefault(prop_name, normalized_kind)
297
+ except Exception as exc:
298
+ logger.warning(f"Failed to parse {result_files[0]}: {exc}")
299
+ continue
300
+
301
+ return property_kinds
302
+
303
+ def _determine_package_name(self) -> Tuple[Optional[str], bool]:
304
+ """
305
+ Ensure all reports belong to the same application and return the shared package name.
306
+
307
+ Returns:
308
+ tuple: (package_name, fatal_error)
309
+ package_name: shared package name if determined, otherwise None
310
+ fatal_error: True if validation should stop the merge
311
+ """
312
+ if not self.result_dirs:
313
+ logger.error("No result directories provided for merge.")
314
+ return None, True
315
+
316
+ known_package: Optional[str] = None
317
+
318
+ for result_dir in self.result_dirs:
319
+ package_name, fatal_error = self._extract_package_name(result_dir)
320
+ if fatal_error:
321
+ return None, True
322
+ if package_name is None:
323
+ continue
324
+
325
+ if known_package is None:
326
+ known_package = package_name
327
+ elif package_name != known_package:
328
+ logger.error(
329
+ f"Cannot merge reports generated for different applications: "
330
+ f"{result_dir.name} uses package '{package_name}' while others use '{known_package}'."
331
+ )
332
+ return None, True
333
+
334
+ if known_package:
335
+ logger.debug(f"Validated application package for merge: {known_package}")
336
+ else:
337
+ logger.warning("No package information found in provided report directories. Proceeding without package validation.")
338
+ return known_package, False
339
+
340
+ def _extract_package_name(self, result_dir: Path) -> Tuple[Optional[str], bool]:
341
+ """
342
+ Extract the application package name from a report directory.
343
+ """
344
+ config_path = result_dir / "bug_report_config.json"
345
+ if not config_path.exists():
346
+ logger.warning(f"Skipping package validation for {result_dir}: bug_report_config.json not found.")
347
+ return None, False
348
+
349
+ try:
350
+ with open(config_path, "r", encoding="utf-8") as config_file:
351
+ config_data = json.load(config_file)
352
+ except Exception as exc:
353
+ logger.error(f"Failed to load bug_report_config.json from {result_dir}: {exc}")
354
+ return None, True
355
+ package_names = config_data.get("packageNames")
356
+ if isinstance(package_names, str):
357
+ package_name = package_names.strip()
358
+ if not package_name:
359
+ logger.error(f"Package name is empty in bug_report_config.json for {result_dir}")
360
+ return None, True
361
+ return package_name, False
362
+
363
+ if isinstance(package_names, list):
364
+ valid_names = [pkg.strip() for pkg in package_names if pkg and pkg.strip()]
365
+ if not valid_names:
366
+ logger.error(f"No valid packageNames found in bug_report_config.json for {result_dir}")
367
+ return None, True
368
+ if len(valid_names) > 1:
369
+ logger.error(f"Multiple packageNames found in {config_path}, only single package is supported.")
370
+ return None, True
371
+ return valid_names[0], False
372
+
373
+ logger.error(f"packageNames format is invalid in {config_path}")
374
+ return None, True
375
+
376
+ def _merge_property_results(self, output_dir: Path = None) -> Tuple[Dict[str, Dict], Dict[str, List[Dict]]]:
377
+ """
378
+ Merge property test results from all directories
379
+
380
+ Args:
381
+ output_dir: The output directory where the merged report will be saved (for calculating relative paths)
382
+
383
+ Returns:
384
+ Tuple of (merged_property_results, property_source_mapping)
385
+ - merged_property_results: Merged property execution results
386
+ - property_source_mapping: Maps property names to list of source directory info with fail/error
387
+ Each entry contains: {'dir_name': str, 'report_path': str}
388
+ """
389
+ merged_results = defaultdict(lambda: {
390
+ "precond_satisfied": 0,
391
+ "executed": 0,
392
+ "fail": 0,
393
+ "error": 0
394
+ })
395
+
396
+ # Track which directories have fail/error for each property
397
+ property_source_mapping = defaultdict(list)
398
+
399
+ for result_dir in self.result_dirs:
400
+ result_files = list(result_dir.glob("result_*.json"))
401
+ html_files = list(result_dir.glob("*.html"))
402
+ if not result_files:
403
+ logger.warning(f"No result file found in {result_dir}")
404
+ continue
405
+ if not html_files:
406
+ logger.warning(f"No html file found in {result_dir}")
407
+ continue
408
+
409
+ result_file = result_files[0] # Take the first (should be only one)
410
+ html_file = html_files[0]
411
+ dir_name = result_dir.name # Get the directory name (e.g., res_2025072011_5048015228)
412
+
413
+ # Find the HTML report file in the result directory
414
+ html_report_path = None
415
+
416
+ # Calculate relative path from output_dir to the HTML file
417
+ try:
418
+ html_report_path = os.path.relpath(html_file.resolve(), output_dir.resolve())
419
+ except ValueError:
420
+ # If on different drives (Windows), use absolute path as fallback
421
+ html_report_path = str(html_file.resolve())
422
+
423
+ with open(result_file, 'r', encoding='utf-8') as f:
424
+ test_results = json.load(f)
425
+
426
+ # Merge results for each property
427
+ for prop_name, prop_result in test_results.items():
428
+ for key in ["precond_satisfied", "executed", "fail", "error"]:
429
+ merged_results[prop_name][key] += prop_result.get(key, 0)
430
+
431
+ # Track source directories for properties with fail/error
432
+ if prop_result.get('fail', 0) > 0 or prop_result.get('error', 0) > 0:
433
+ # Check if this directory is already in the mapping
434
+ existing_dirs = [item['dir_name'] for item in property_source_mapping[prop_name]]
435
+ if dir_name not in existing_dirs:
436
+ property_source_mapping[prop_name].append({
437
+ 'dir_name': dir_name,
438
+ 'report_path': html_report_path
439
+ })
440
+
441
+ logger.debug(f"Merged results from: {result_file}")
442
+
443
+ return dict(merged_results), dict(property_source_mapping)
444
+
445
+ def _merge_coverage_data(self) -> Dict:
446
+ """
447
+ Merge coverage data from all directories
448
+
449
+ Returns:
450
+ Final merged coverage information
451
+ """
452
+ all_activities = set()
453
+ tested_activities = set()
454
+ activity_counts = defaultdict(int)
455
+ total_steps = 0
456
+
457
+ for result_dir in self.result_dirs:
458
+ # Find coverage log file
459
+ output_dirs = list(result_dir.glob("output_*"))
460
+ if not output_dirs:
461
+ logger.warning(f"No output directory found in {result_dir}")
462
+ continue
463
+
464
+ coverage_file = output_dirs[0] / "coverage.log"
465
+ if not coverage_file.exists():
466
+ logger.warning(f"No coverage.log found in {output_dirs[0]}")
467
+ continue
468
+
469
+ # Read the last line of coverage.log to get final state
470
+ last_coverage = None
471
+ with open(coverage_file, 'r', encoding='utf-8') as f:
472
+ for line in f:
473
+ if line.strip():
474
+ last_coverage = json.loads(line)
475
+
476
+ if last_coverage:
477
+ # Collect all activities
478
+ all_activities.update(last_coverage.get("totalActivities", []))
479
+ tested_activities.update(last_coverage.get("testedActivities", []))
480
+
481
+ # Update activity counts (take maximum)
482
+ for activity, count in last_coverage.get("activityCountHistory", {}).items():
483
+ activity_counts[activity] += count
484
+
485
+ # Add steps count
486
+ total_steps += last_coverage.get("stepsCount", 0)
487
+
488
+ logger.debug(f"Merged coverage data from: {coverage_file}")
489
+
490
+ # Calculate final coverage percentage (rounded to 2 decimal places)
491
+ coverage_percent = round((len(tested_activities) / len(all_activities) * 100), 2) if all_activities else 0.00
492
+
493
+ return {
494
+ "coverage_percent": coverage_percent,
495
+ "total_activities": list(all_activities),
496
+ "tested_activities": list(tested_activities),
497
+ "total_activities_count": len(all_activities),
498
+ "tested_activities_count": len(tested_activities),
499
+ "activity_count_history": dict(activity_counts),
500
+ "total_steps": total_steps
501
+ }
502
+
503
+ def _merge_crash_dump_data(self, output_dir: Path = None) -> Dict:
504
+ """
505
+ Merge crash and ANR data from all directories
506
+
507
+ Returns:
508
+ Dict containing merged crash and ANR events
509
+ """
510
+ all_crash_events = []
511
+ all_anr_events = []
512
+
513
+ for result_dir in self.result_dirs:
514
+ dir_name = result_dir.name
515
+
516
+ # Locate corresponding HTML report for hyperlinking
517
+ html_report_path = None
518
+ html_files = list(result_dir.glob("*.html"))
519
+ if not html_files:
520
+ continue
521
+ html_file = html_files[0]
522
+ try:
523
+ html_report_path = os.path.relpath(html_file.resolve(), output_dir.resolve())
524
+ except ValueError:
525
+ html_report_path = str(html_file.resolve())
526
+
527
+ # Find crash dump log file
528
+ output_dirs = list(result_dir.glob("output_*"))
529
+ if not output_dirs:
530
+ continue
531
+
532
+ crash_dump_file = output_dirs[0] / "crash-dump.log"
533
+ if not crash_dump_file.exists():
534
+ logger.debug(f"No crash-dump.log found in {output_dirs[0]}")
535
+ continue
536
+
537
+ try:
538
+ # Parse crash and ANR events from this file
539
+ crash_events, anr_events = self._parse_crash_dump_file(crash_dump_file)
540
+
541
+ for crash in crash_events:
542
+ crash["source_directory"] = dir_name
543
+ crash["report_path"] = html_report_path
544
+
545
+ for anr in anr_events:
546
+ anr["source_directory"] = dir_name
547
+ anr["report_path"] = html_report_path
548
+
549
+ all_crash_events.extend(crash_events)
550
+ all_anr_events.extend(anr_events)
551
+
552
+ logger.debug(f"Merged {len(crash_events)} crash events and {len(anr_events)} ANR events from: {crash_dump_file}")
553
+
554
+ except Exception as e:
555
+ logger.error(f"Error reading crash dump file {crash_dump_file}: {e}")
556
+ continue
557
+
558
+ # Deduplicate events based on content and timestamp
559
+ unique_crash_events = self._deduplicate_crash_events(all_crash_events)
560
+ unique_anr_events = self._deduplicate_anr_events(all_anr_events)
561
+
562
+ logger.debug(f"Total unique crash events: {len(unique_crash_events)}, ANR events: {len(unique_anr_events)}")
563
+
564
+ return {
565
+ "crash_events": unique_crash_events,
566
+ "anr_events": unique_anr_events,
567
+ "total_crash_count": len(unique_crash_events),
568
+ "total_anr_count": len(unique_anr_events)
569
+ }
570
+
571
+ @catchException("Error parsing crash-dump.log")
572
+ def _parse_crash_dump_file(self, crash_dump_file: Path) -> Tuple[List[Dict], List[Dict]]:
573
+ """
574
+ Parse crash and ANR events from crash-dump.log file
575
+
576
+ Args:
577
+ crash_dump_file: Path to crash-dump.log file
578
+
579
+ Returns:
580
+ tuple: (crash_events, anr_events) - Lists of crash and ANR event dictionaries
581
+ """
582
+ crash_events = []
583
+ anr_events = []
584
+
585
+ with open(crash_dump_file, "r", encoding="utf-8") as f:
586
+ content = f.read()
587
+
588
+ # Parse crash events
589
+ crash_events = self._parse_crash_events(content)
590
+ # Parse ANR events
591
+ anr_events = self._parse_anr_events(content)
592
+
593
+ return crash_events, anr_events
594
+
595
+ def _parse_crash_events(self, content: str) -> List[Dict]:
596
+ """
597
+ Parse crash events from crash-dump.log content
598
+
599
+ Args:
600
+ content: Content of crash-dump.log file
601
+
602
+ Returns:
603
+ List[Dict]: List of crash event dictionaries
604
+ """
605
+ crash_events = []
606
+
607
+ # Pattern to match crash blocks
608
+ crash_pattern = r'(\d{14})\ncrash:\n(.*?)\n// crash end'
609
+
610
+ for match in re.finditer(crash_pattern, content, re.DOTALL):
611
+ timestamp_str = match.group(1)
612
+ crash_content = match.group(2)
613
+
614
+ # Parse timestamp (format: YYYYMMDDHHMMSS)
615
+ try:
616
+ timestamp = datetime.strptime(timestamp_str, "%Y%m%d%H%M%S")
617
+ formatted_time = timestamp.strftime("%Y-%m-%d %H:%M:%S")
618
+ except ValueError:
619
+ formatted_time = timestamp_str
620
+
621
+ # Extract crash information
622
+ crash_info = self._extract_crash_info(crash_content)
623
+
624
+ crash_event = {
625
+ "time": formatted_time,
626
+ "exception_type": crash_info.get("exception_type", "Unknown"),
627
+ "process": crash_info.get("process", "Unknown"),
628
+ "stack_trace": crash_info.get("stack_trace", "")
629
+ }
630
+
631
+ crash_events.append(crash_event)
632
+
633
+ return crash_events
634
+
635
+ def _parse_anr_events(self, content: str) -> List[Dict]:
636
+ """
637
+ Parse ANR events from crash-dump.log content
638
+
639
+ Args:
640
+ content: Content of crash-dump.log file
641
+
642
+ Returns:
643
+ List[Dict]: List of ANR event dictionaries
644
+ """
645
+ anr_events = []
646
+
647
+ # Pattern to match ANR blocks
648
+ anr_pattern = r'(\d{14})\nanr:\n(.*?)\nanr end'
649
+
650
+ for match in re.finditer(anr_pattern, content, re.DOTALL):
651
+ timestamp_str = match.group(1)
652
+ anr_content = match.group(2)
653
+
654
+ # Parse timestamp (format: YYYYMMDDHHMMSS)
655
+ try:
656
+ timestamp = datetime.strptime(timestamp_str, "%Y%m%d%H%M%S")
657
+ formatted_time = timestamp.strftime("%Y-%m-%d %H:%M:%S")
658
+ except ValueError:
659
+ formatted_time = timestamp_str
660
+
661
+ # Extract ANR information
662
+ anr_info = self._extract_anr_info(anr_content)
663
+
664
+ anr_event = {
665
+ "time": formatted_time,
666
+ "reason": anr_info.get("reason", "Unknown"),
667
+ "process": anr_info.get("process", "Unknown"),
668
+ "trace": anr_info.get("trace", "")
669
+ }
670
+
671
+ anr_events.append(anr_event)
672
+
673
+ return anr_events
674
+
675
+ def _extract_crash_info(self, crash_content: str) -> Dict:
676
+ """
677
+ Extract crash information from crash content
678
+
679
+ Args:
680
+ crash_content: Content of a single crash block
681
+
682
+ Returns:
683
+ Dict: Extracted crash information
684
+ """
685
+ crash_info = {
686
+ "exception_type": "Unknown",
687
+ "process": "Unknown",
688
+ "stack_trace": ""
689
+ }
690
+
691
+ lines = crash_content.strip().split('\n')
692
+
693
+ for line in lines:
694
+ line = line.strip()
695
+
696
+ # Extract PID from CRASH line
697
+ if line.startswith("// CRASH:"):
698
+ # Pattern: // CRASH: process_name (pid xxxx) (dump time: ...)
699
+ pid_match = re.search(r'\(pid\s+(\d+)\)', line)
700
+ if pid_match:
701
+ crash_info["process"] = pid_match.group(1)
702
+
703
+ # Extract exception type from Long Msg line
704
+ elif line.startswith("// Long Msg:"):
705
+ # Pattern: // Long Msg: ExceptionType: message
706
+ exception_match = re.search(r'// Long Msg:\s+([^:]+)', line)
707
+ if exception_match:
708
+ crash_info["exception_type"] = exception_match.group(1).strip()
709
+
710
+ # Extract full stack trace (all lines starting with //)
711
+ stack_lines = []
712
+ for line in lines:
713
+ if line.startswith("//"):
714
+ # Remove the "// " prefix for cleaner display
715
+ clean_line = line[3:] if line.startswith("// ") else line[2:]
716
+ stack_lines.append(clean_line)
717
+
718
+ crash_info["stack_trace"] = '\n'.join(stack_lines)
719
+
720
+ return crash_info
721
+
722
+ def _extract_anr_info(self, anr_content: str) -> Dict:
723
+ """
724
+ Extract ANR information from ANR content
725
+
726
+ Args:
727
+ anr_content: Content of a single ANR block
728
+
729
+ Returns:
730
+ Dict: Extracted ANR information
731
+ """
732
+ anr_info = {
733
+ "reason": "Unknown",
734
+ "process": "Unknown",
735
+ "trace": ""
736
+ }
737
+
738
+ lines = anr_content.strip().split('\n')
739
+
740
+ for line in lines:
741
+ line = line.strip()
742
+
743
+ # Extract PID from ANR line
744
+ if line.startswith("// ANR:"):
745
+ # Pattern: // ANR: process_name (pid xxxx) (dump time: ...)
746
+ pid_match = re.search(r'\(pid\s+(\d+)\)', line)
747
+ if pid_match:
748
+ anr_info["process"] = pid_match.group(1)
749
+
750
+ # Extract reason from Reason line
751
+ elif line.startswith("Reason:"):
752
+ # Pattern: Reason: Input dispatching timed out (...)
753
+ reason_match = re.search(r'Reason:\s+(.+)', line)
754
+ if reason_match:
755
+ full_reason = reason_match.group(1).strip()
756
+ # Simplify the reason by extracting the main part before parentheses
757
+ simplified_reason = self._simplify_anr_reason(full_reason)
758
+ anr_info["reason"] = simplified_reason
759
+
760
+ # Store the full ANR trace content
761
+ anr_info["trace"] = anr_content
762
+
763
+ return anr_info
764
+
765
+ def _simplify_anr_reason(self, full_reason: str) -> str:
766
+ """
767
+ Simplify ANR reason by extracting the main part
768
+
769
+ Args:
770
+ full_reason: Full ANR reason string
771
+
772
+ Returns:
773
+ str: Simplified ANR reason
774
+ """
775
+ # Common ANR reason patterns to simplify
776
+ simplification_patterns = [
777
+ # Input dispatching timed out (details...) -> Input dispatching timed out
778
+ (r'^(Input dispatching timed out)\s*\(.*\).*$', r'\1'),
779
+ # Broadcast of Intent (details...) -> Broadcast timeout
780
+ (r'^Broadcast of Intent.*$', 'Broadcast timeout'),
781
+ # Service timeout -> Service timeout
782
+ (r'^Service.*timeout.*$', 'Service timeout'),
783
+ # ContentProvider timeout -> ContentProvider timeout
784
+ (r'^ContentProvider.*timeout.*$', 'ContentProvider timeout'),
785
+ ]
786
+
787
+ # Apply simplification patterns
788
+ for pattern, replacement in simplification_patterns:
789
+ match = re.match(pattern, full_reason, re.IGNORECASE)
790
+ if match:
791
+ if callable(replacement):
792
+ return replacement(match)
793
+ elif '\\1' in replacement:
794
+ return re.sub(pattern, replacement, full_reason, flags=re.IGNORECASE)
795
+ else:
796
+ return replacement
797
+
798
+ # If no pattern matches, try to extract the part before the first parenthesis
799
+ paren_match = re.match(r'^([^(]+)', full_reason)
800
+ if paren_match:
801
+ simplified = paren_match.group(1).strip()
802
+ # Remove trailing punctuation
803
+ simplified = re.sub(r'[.,;:]+$', '', simplified)
804
+ return simplified
805
+
806
+ # If all else fails, return the original but truncated
807
+ return full_reason[:50] + "..." if len(full_reason) > 50 else full_reason
808
+
809
+ def _deduplicate_crash_events(self, crash_events: List[Dict]) -> List[Dict]:
810
+ """
811
+ Deduplicate crash events based on exception type and stack trace
812
+
813
+ Args:
814
+ crash_events: List of crash events
815
+
816
+ Returns:
817
+ List[Dict]: Deduplicated crash events
818
+ """
819
+ seen_crashes = set()
820
+ unique_crashes = []
821
+
822
+ for crash in crash_events:
823
+ # Create a hash key based on exception type and first few lines of stack trace
824
+ exception_type = crash.get("exception_type", "")
825
+ stack_trace = crash.get("stack_trace", "")
826
+
827
+ # Use first 3 lines of stack trace for deduplication
828
+ stack_lines = stack_trace.split('\n')[:3]
829
+ crash_key = (
830
+ exception_type,
831
+ '\n'.join(stack_lines),
832
+ crash.get("source_directory", "")
833
+ )
834
+
835
+ if crash_key not in seen_crashes:
836
+ seen_crashes.add(crash_key)
837
+ unique_crashes.append(crash)
838
+
839
+ return unique_crashes
840
+
841
+ def _deduplicate_anr_events(self, anr_events: List[Dict]) -> List[Dict]:
842
+ """
843
+ Deduplicate ANR events based on reason and process
844
+
845
+ Args:
846
+ anr_events: List of ANR events
847
+
848
+ Returns:
849
+ List[Dict]: Deduplicated ANR events
850
+ """
851
+ seen_anrs = set()
852
+ unique_anrs = []
853
+
854
+ for anr in anr_events:
855
+ # Create a hash key based on reason and process
856
+ reason = anr.get("reason", "")
857
+ process = anr.get("process", "")
858
+ anr_key = (reason, process, anr.get("source_directory", ""))
859
+
860
+ if anr_key not in seen_anrs:
861
+ seen_anrs.add(anr_key)
862
+ unique_anrs.append(anr)
863
+
864
+ return unique_anrs
865
+
866
+ def _calculate_final_statistics(
867
+ self,
868
+ property_stats: Dict,
869
+ coverage_data: Dict,
870
+ crash_anr_data: Dict = None,
871
+ property_source_mapping: Dict = None,
872
+ property_kinds: Dict[str, str] = None,
873
+ ) -> Dict:
874
+ """
875
+ Calculate final statistics for template rendering
876
+
877
+ Note: Total bugs count only includes property test failures/errors,
878
+ not crashes or ANRs (which are tracked separately)
879
+
880
+ Args:
881
+ property_stats: Merged property statistics
882
+ coverage_data: Merged coverage data
883
+ crash_anr_data: Merged crash and ANR data (optional)
884
+ property_source_mapping: Maps property names to source directories with fail/error (optional)
885
+
886
+ Returns:
887
+ Complete data for template rendering
888
+ """
889
+ # Calculate bug count from property failures (exclude invariants)
890
+ property_bugs_found = 0
891
+ invariant_violations_count = 0
892
+
893
+ # Calculate property counts (exclude invariants from summary counts)
894
+ all_properties_count = 0
895
+ executed_properties_count = 0
896
+
897
+ # Initialize crash/ANR data
898
+ crash_events = []
899
+ anr_events = []
900
+ total_crash_count = 0
901
+ total_anr_count = 0
902
+
903
+ if crash_anr_data:
904
+ crash_events = crash_anr_data.get('crash_events', [])
905
+ anr_events = crash_anr_data.get('anr_events', [])
906
+ total_crash_count = crash_anr_data.get('total_crash_count', 0)
907
+ total_anr_count = crash_anr_data.get('total_anr_count', 0)
908
+
909
+ # Prepare enhanced property statistics with derived metrics
910
+ processed_property_stats = {}
911
+ property_stats_summary = {
912
+ "total_properties": 0,
913
+ "total_precond_satisfied": 0,
914
+ "total_executed": 0,
915
+ "total_passes": 0,
916
+ "total_fails": 0,
917
+ "total_errors": 0,
918
+ "total_not_executed": 0,
919
+ }
920
+
921
+ property_kind_summary = {
922
+ "all": 0,
923
+ "property": 0,
924
+ "invariant": 0,
925
+ "unknown": 0,
926
+ }
927
+ property_source_kind_summary = {
928
+ "all": 0,
929
+ "property": 0,
930
+ "invariant": 0,
931
+ "unknown": 0,
932
+ }
933
+
934
+ for prop_name, stats in property_stats.items():
935
+ precond_satisfied = stats.get("precond_satisfied", 0)
936
+ total_executions = stats.get("executed", 0)
937
+ fail_count = stats.get("fail", 0)
938
+ error_count = stats.get("error", 0)
939
+
940
+ pass_count = max(total_executions - fail_count - error_count, 0)
941
+ not_executed_count = max(precond_satisfied - total_executions, 0)
942
+ kind = (property_kinds or {}).get(prop_name, "unknown")
943
+ if kind not in {"property", "invariant"}:
944
+ kind = "unknown"
945
+
946
+ processed_property_stats[prop_name] = {
947
+ **stats,
948
+ "executed_total": total_executions,
949
+ "pass_count": pass_count,
950
+ "not_executed": not_executed_count,
951
+ "kind": kind,
952
+ }
953
+
954
+ if kind != "invariant":
955
+ all_properties_count += 1
956
+ if total_executions > 0:
957
+ executed_properties_count += 1
958
+ if fail_count > 0 or error_count > 0:
959
+ property_bugs_found += 1
960
+ elif fail_count > 0 or error_count > 0:
961
+ invariant_violations_count += 1
962
+
963
+ property_stats_summary["total_properties"] += 1
964
+ property_stats_summary["total_precond_satisfied"] += precond_satisfied
965
+ property_stats_summary["total_executed"] += total_executions
966
+ property_stats_summary["total_passes"] += pass_count
967
+ property_stats_summary["total_fails"] += fail_count
968
+ property_stats_summary["total_errors"] += error_count
969
+ property_stats_summary["total_not_executed"] += not_executed_count
970
+
971
+ property_kind_summary["all"] += 1
972
+ property_kind_summary[kind] += 1
973
+
974
+ if property_source_mapping:
975
+ for prop_name in property_source_mapping.keys():
976
+ kind = (property_kinds or {}).get(prop_name, "unknown")
977
+ if kind not in {"property", "invariant"}:
978
+ kind = "unknown"
979
+ property_source_kind_summary["all"] += 1
980
+ property_source_kind_summary[kind] += 1
981
+
982
+ # Calculate total bugs found (only property bugs, not including crashes/ANRs)
983
+ total_bugs_found = property_bugs_found
984
+
985
+ # Prepare final data
986
+ final_data = {
987
+ 'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
988
+ 'bugs_found': total_bugs_found,
989
+ 'invariant_violations_count': invariant_violations_count,
990
+ 'property_bugs_found': property_bugs_found,
991
+ 'all_properties_count': all_properties_count,
992
+ 'executed_properties_count': executed_properties_count,
993
+ 'property_stats': processed_property_stats,
994
+ 'property_stats_summary': property_stats_summary,
995
+ 'property_kind_summary': property_kind_summary,
996
+ 'property_source_kind_summary': property_source_kind_summary,
997
+ 'property_source_mapping': property_source_mapping or {},
998
+ 'crash_events': crash_events,
999
+ 'anr_events': anr_events,
1000
+ 'total_crash_count': total_crash_count,
1001
+ 'total_anr_count': total_anr_count,
1002
+ **coverage_data # Include all coverage data
1003
+ }
1004
+
1005
+ return final_data
1006
+
1007
+ def get_merge_summary(self) -> Dict:
1008
+ """
1009
+ Get summary of the merge operation
1010
+
1011
+ Returns:
1012
+ Dictionary containing merge summary information
1013
+ """
1014
+ if not self.result_dirs:
1015
+ return {}
1016
+
1017
+ summary = {
1018
+ "merged_directories": len(self.result_dirs),
1019
+ "source_paths": [str(p) for p in self.result_dirs],
1020
+ "merge_timestamp": datetime.now().isoformat()
1021
+ }
1022
+ if self._package_name:
1023
+ summary["package_name"] = self._package_name
1024
+ return summary
1025
+
1026
+ @catchException("Error generating HTML report")
1027
+ def _generate_html_report(self, data: Dict, output_dir: Path) -> Path:
1028
+ """
1029
+ Generate HTML report using the merged template
1030
+
1031
+ Args:
1032
+ data: Final merged data
1033
+ output_dir: Output directory
1034
+
1035
+ Returns:
1036
+ Path to the generated HTML report
1037
+ """
1038
+ from jinja2 import Environment, FileSystemLoader, PackageLoader, select_autoescape
1039
+
1040
+ # Set up Jinja2 environment
1041
+ try:
1042
+ jinja_env = Environment(
1043
+ loader=PackageLoader("kea2.report", "templates"),
1044
+ autoescape=select_autoescape(['html', 'xml'])
1045
+ )
1046
+ except (ImportError, ValueError):
1047
+ # Fallback to file system loader
1048
+ current_dir = Path(__file__).parent
1049
+ templates_dir = current_dir / "templates"
1050
+
1051
+ jinja_env = Environment(
1052
+ loader=FileSystemLoader(templates_dir),
1053
+ autoescape=select_autoescape(['html', 'xml'])
1054
+ )
1055
+
1056
+ # Render template
1057
+ template = jinja_env.get_template("merged_bug_report_template.html")
1058
+ html_content = template.render(**data)
1059
+
1060
+ # Save HTML report
1061
+ report_file = output_dir / "merged_report.html"
1062
+ with open(report_file, 'w', encoding='utf-8') as f:
1063
+ f.write(html_content)
1064
+
1065
+ logger.debug(f"HTML report generated: {report_file}")
1066
+ return report_file