Kea2-python 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Kea2-python might be problematic. Click here for more details.
- kea2/bug_report_generator.py +103 -75
- kea2/keaUtils.py +11 -6
- kea2/report_merger.py +29 -16
- kea2/templates/bug_report_template.html +746 -408
- kea2/templates/merged_bug_report_template.html +1082 -380
- {kea2_python-0.3.1.dist-info → kea2_python-0.3.3.dist-info}/METADATA +74 -8
- {kea2_python-0.3.1.dist-info → kea2_python-0.3.3.dist-info}/RECORD +11 -11
- {kea2_python-0.3.1.dist-info → kea2_python-0.3.3.dist-info}/WHEEL +0 -0
- {kea2_python-0.3.1.dist-info → kea2_python-0.3.3.dist-info}/entry_points.txt +0 -0
- {kea2_python-0.3.1.dist-info → kea2_python-0.3.3.dist-info}/licenses/LICENSE +0 -0
- {kea2_python-0.3.1.dist-info → kea2_python-0.3.3.dist-info}/top_level.txt +0 -0
kea2/bug_report_generator.py
CHANGED
|
@@ -9,7 +9,7 @@ from concurrent.futures import ThreadPoolExecutor
|
|
|
9
9
|
|
|
10
10
|
from PIL import Image, ImageDraw, ImageFont
|
|
11
11
|
from jinja2 import Environment, FileSystemLoader, select_autoescape, PackageLoader
|
|
12
|
-
from kea2.utils import getLogger
|
|
12
|
+
from kea2.utils import getLogger, catchException
|
|
13
13
|
|
|
14
14
|
logger = getLogger(__name__)
|
|
15
15
|
|
|
@@ -388,6 +388,10 @@ class BugReportGenerator:
|
|
|
388
388
|
data["all_properties_count"] = len(self.test_result)
|
|
389
389
|
data["executed_properties_count"] = sum(1 for result in self.test_result.values() if result.get("executed", 0) > 0)
|
|
390
390
|
|
|
391
|
+
# Calculate detailed property statistics for table headers
|
|
392
|
+
property_stats_summary = self._calculate_property_stats_summary(self.test_result)
|
|
393
|
+
data["property_stats_summary"] = property_stats_summary
|
|
394
|
+
|
|
391
395
|
# Process coverage data
|
|
392
396
|
data["coverage_trend"] = self.cov_trend
|
|
393
397
|
|
|
@@ -421,27 +425,24 @@ class BugReportGenerator:
|
|
|
421
425
|
step_data["Info"] = json.loads(step_data["Info"])
|
|
422
426
|
return step_data
|
|
423
427
|
|
|
428
|
+
@catchException("Error when marking screenshot")
|
|
424
429
|
def _mark_screenshot(self, step_data: StepData):
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
return
|
|
430
|
-
|
|
431
|
-
if step_type == "Monkey":
|
|
432
|
-
act = step_data["Info"].get("act")
|
|
433
|
-
pos = step_data["Info"].get("pos")
|
|
434
|
-
if act in ["CLICK", "LONG_CLICK"] or act.startswith("SCROLL"):
|
|
435
|
-
self._mark_screenshot_interaction(step_type, screenshot_name, act, pos)
|
|
436
|
-
|
|
437
|
-
elif step_type == "Script":
|
|
438
|
-
act = step_data["Info"].get("method")
|
|
439
|
-
pos = step_data["Info"].get("params")
|
|
440
|
-
if act in ["click", "setText", "swipe"]:
|
|
441
|
-
self._mark_screenshot_interaction(step_type, screenshot_name, act, pos)
|
|
430
|
+
step_type = step_data["Type"]
|
|
431
|
+
screenshot_name = step_data["Screenshot"]
|
|
432
|
+
if not screenshot_name:
|
|
433
|
+
return
|
|
442
434
|
|
|
443
|
-
|
|
444
|
-
|
|
435
|
+
if step_type == "Monkey":
|
|
436
|
+
act = step_data["Info"].get("act")
|
|
437
|
+
pos = step_data["Info"].get("pos")
|
|
438
|
+
if act in ["CLICK", "LONG_CLICK"] or act.startswith("SCROLL"):
|
|
439
|
+
self._mark_screenshot_interaction(step_type, screenshot_name, act, pos)
|
|
440
|
+
|
|
441
|
+
elif step_type == "Script":
|
|
442
|
+
act = step_data["Info"].get("method")
|
|
443
|
+
pos = step_data["Info"].get("params")
|
|
444
|
+
if act in ["click", "setText", "swipe"]:
|
|
445
|
+
self._mark_screenshot_interaction(step_type, screenshot_name, act, pos)
|
|
445
446
|
|
|
446
447
|
|
|
447
448
|
def _mark_screenshot_interaction(self, step_type: str, screenshot_name: str, action_type: str, position: Union[List, Tuple]) -> bool:
|
|
@@ -528,65 +529,62 @@ class BugReportGenerator:
|
|
|
528
529
|
img.save(screenshot_path)
|
|
529
530
|
return True
|
|
530
531
|
|
|
532
|
+
@catchException("Error rendering template")
|
|
531
533
|
def _generate_html_report(self, data: ReportData):
|
|
532
534
|
"""
|
|
533
535
|
Generate HTML format bug report
|
|
534
536
|
"""
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
# Check if template exists, if not create it
|
|
577
|
-
template_path = Path(__file__).parent / "templates" / "bug_report_template.html"
|
|
578
|
-
if not template_path.exists():
|
|
579
|
-
logger.warning("Template file does not exist, creating default template...")
|
|
537
|
+
# Format timestamp for display
|
|
538
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
539
|
+
|
|
540
|
+
# Ensure coverage_trend has data
|
|
541
|
+
if not data["coverage_trend"]:
|
|
542
|
+
logger.warning("No coverage trend data")
|
|
543
|
+
# Use the same field names as in coverage.log file
|
|
544
|
+
data["coverage_trend"] = [{"stepsCount": 0, "coverage": 0, "testedActivitiesCount": 0}]
|
|
545
|
+
|
|
546
|
+
# Convert coverage_trend to JSON string, ensuring all data points are included
|
|
547
|
+
coverage_trend_json = json.dumps(data["coverage_trend"])
|
|
548
|
+
logger.debug(f"Number of coverage trend data points: {len(data['coverage_trend'])}")
|
|
549
|
+
|
|
550
|
+
# Prepare template data
|
|
551
|
+
template_data = {
|
|
552
|
+
'timestamp': timestamp,
|
|
553
|
+
'bugs_found': data["bugs_found"],
|
|
554
|
+
'total_testing_time': data["total_testing_time"],
|
|
555
|
+
'executed_events': data["executed_events"],
|
|
556
|
+
'coverage_percent': round(data["coverage"], 2),
|
|
557
|
+
'total_activities_count': data["total_activities_count"],
|
|
558
|
+
'tested_activities_count': data["tested_activities_count"],
|
|
559
|
+
'tested_activities': data["tested_activities"],
|
|
560
|
+
'total_activities': data["total_activities"],
|
|
561
|
+
'all_properties_count': data["all_properties_count"],
|
|
562
|
+
'executed_properties_count': data["executed_properties_count"],
|
|
563
|
+
'items_per_page': 10, # Items to display per page
|
|
564
|
+
'screenshots': self.screenshots,
|
|
565
|
+
'property_violations': data["property_violations"],
|
|
566
|
+
'property_stats': data["property_stats"],
|
|
567
|
+
'property_error_details': data["property_error_details"],
|
|
568
|
+
'coverage_data': coverage_trend_json,
|
|
569
|
+
'take_screenshots': self.take_screenshots, # Pass screenshot setting to template
|
|
570
|
+
'property_execution_trend': data["property_execution_trend"],
|
|
571
|
+
'property_execution_data': json.dumps(data["property_execution_trend"]),
|
|
572
|
+
'activity_count_history': data["activity_count_history"],
|
|
573
|
+
'crash_events': data["crash_events"],
|
|
574
|
+
'anr_events': data["anr_events"],
|
|
575
|
+
'property_stats_summary': data["property_stats_summary"]
|
|
576
|
+
}
|
|
580
577
|
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
578
|
+
# Check if template exists, if not create it
|
|
579
|
+
template_path = Path(__file__).parent / "templates" / "bug_report_template.html"
|
|
580
|
+
if not template_path.exists():
|
|
581
|
+
logger.warning("Template file does not exist, creating default template...")
|
|
584
582
|
|
|
585
|
-
|
|
583
|
+
# Use Jinja2 to render template
|
|
584
|
+
template = self.jinja_env.get_template("bug_report_template.html")
|
|
585
|
+
html_content = template.render(**template_data)
|
|
586
586
|
|
|
587
|
-
|
|
588
|
-
logger.error(f"Error rendering template: {e}")
|
|
589
|
-
raise
|
|
587
|
+
return html_content
|
|
590
588
|
|
|
591
589
|
def _add_screenshot_info(self, step_data: StepData, step_index: int, data: Dict):
|
|
592
590
|
"""
|
|
@@ -697,9 +695,7 @@ class BugReportGenerator:
|
|
|
697
695
|
data["property_violations"].append({
|
|
698
696
|
"index": index,
|
|
699
697
|
"property_name": property_name,
|
|
700
|
-
"
|
|
701
|
-
"interaction_pages": [start_step, end_step],
|
|
702
|
-
"postcondition_page": end_step
|
|
698
|
+
"interaction_pages": [start_step, end_step]
|
|
703
699
|
})
|
|
704
700
|
index += 1
|
|
705
701
|
|
|
@@ -831,6 +827,38 @@ class BugReportGenerator:
|
|
|
831
827
|
|
|
832
828
|
return property_execution_trend
|
|
833
829
|
|
|
830
|
+
def _calculate_property_stats_summary(self, test_result: TestResult) -> Dict[str, int]:
|
|
831
|
+
"""
|
|
832
|
+
Calculate summary statistics for property checking table headers
|
|
833
|
+
|
|
834
|
+
Args:
|
|
835
|
+
test_result: Test result data containing property statistics
|
|
836
|
+
|
|
837
|
+
Returns:
|
|
838
|
+
Dict: Summary statistics for each column
|
|
839
|
+
"""
|
|
840
|
+
stats_summary = {
|
|
841
|
+
"total_properties": 0,
|
|
842
|
+
"total_precond_satisfied": 0,
|
|
843
|
+
"total_executed": 0,
|
|
844
|
+
"total_fails": 0,
|
|
845
|
+
"total_errors": 0,
|
|
846
|
+
"properties_with_errors": 0
|
|
847
|
+
}
|
|
848
|
+
|
|
849
|
+
for property_name, result in test_result.items():
|
|
850
|
+
stats_summary["total_properties"] += 1
|
|
851
|
+
stats_summary["total_precond_satisfied"] += result.get("precond_satisfied", 0)
|
|
852
|
+
stats_summary["total_executed"] += result.get("executed", 0)
|
|
853
|
+
stats_summary["total_fails"] += result.get("fail", 0)
|
|
854
|
+
stats_summary["total_errors"] += result.get("error", 0)
|
|
855
|
+
|
|
856
|
+
# Count properties that have errors or fails
|
|
857
|
+
if result.get("fail", 0) > 0 or result.get("error", 0) > 0:
|
|
858
|
+
stats_summary["properties_with_errors"] += 1
|
|
859
|
+
|
|
860
|
+
return stats_summary
|
|
861
|
+
|
|
834
862
|
def _load_crash_dump_data(self) -> Tuple[List[Dict], List[Dict]]:
|
|
835
863
|
"""
|
|
836
864
|
Load crash and ANR events from crash-dump.log file
|
kea2/keaUtils.py
CHANGED
|
@@ -198,6 +198,9 @@ class PropStatistic:
|
|
|
198
198
|
executed: int = 0
|
|
199
199
|
fail: int = 0
|
|
200
200
|
error: int = 0
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
PBTTestResult = NewType("PBTTestResult", Dict[PropName, PropStatistic])
|
|
201
204
|
|
|
202
205
|
|
|
203
206
|
PropertyExecutionInfoStore = NewType("PropertyExecutionInfoStore", Deque["PropertyExecutionInfo"])
|
|
@@ -209,11 +212,6 @@ class PropertyExecutionInfo:
|
|
|
209
212
|
tb: str
|
|
210
213
|
|
|
211
214
|
|
|
212
|
-
class PBTTestResult(dict):
|
|
213
|
-
def __getitem__(self, key) -> PropStatistic:
|
|
214
|
-
return super().__getitem__(key)
|
|
215
|
-
|
|
216
|
-
|
|
217
215
|
def getFullPropName(testCase: TestCase):
|
|
218
216
|
return ".".join([
|
|
219
217
|
testCase.__module__,
|
|
@@ -277,10 +275,15 @@ class JsonResult(TextTestResult):
|
|
|
277
275
|
self.lastExecutedInfo.state = "pass"
|
|
278
276
|
|
|
279
277
|
self.executionInfoStore.append(self.lastExecutedInfo)
|
|
280
|
-
|
|
281
278
|
|
|
282
279
|
def getExcuted(self, test: TestCase):
|
|
283
280
|
return self.res[getFullPropName(test)].executed
|
|
281
|
+
|
|
282
|
+
def logSummary(self):
|
|
283
|
+
fails = sum(_.fail for _ in self.res.values())
|
|
284
|
+
errors = sum(_.error for _ in self.res.values())
|
|
285
|
+
|
|
286
|
+
logger.info(f"[Property Exectution Summary] Errors:{errors}, Fails:{fails}")
|
|
284
287
|
|
|
285
288
|
|
|
286
289
|
class KeaTestRunner(TextTestRunner):
|
|
@@ -465,6 +468,8 @@ class KeaTestRunner(TextTestRunner):
|
|
|
465
468
|
else:
|
|
466
469
|
self.stream.write("\n")
|
|
467
470
|
self.stream.flush()
|
|
471
|
+
|
|
472
|
+
result.logSummary()
|
|
468
473
|
return result
|
|
469
474
|
|
|
470
475
|
@property
|
kea2/report_merger.py
CHANGED
|
@@ -3,7 +3,7 @@ import os
|
|
|
3
3
|
import re
|
|
4
4
|
from datetime import datetime
|
|
5
5
|
from pathlib import Path
|
|
6
|
-
from typing import Dict, List, Optional, Union
|
|
6
|
+
from typing import Dict, List, Optional, Tuple, Union
|
|
7
7
|
from collections import defaultdict
|
|
8
8
|
|
|
9
9
|
from kea2.utils import getLogger
|
|
@@ -49,12 +49,12 @@ class TestReportMerger:
|
|
|
49
49
|
logger.debug(f"Merging {len(self.result_dirs)} test result directories...")
|
|
50
50
|
|
|
51
51
|
# Merge different types of data
|
|
52
|
-
merged_property_stats = self._merge_property_results()
|
|
52
|
+
merged_property_stats, property_source_mapping = self._merge_property_results()
|
|
53
53
|
merged_coverage_data = self._merge_coverage_data()
|
|
54
54
|
merged_crash_anr_data = self._merge_crash_dump_data()
|
|
55
55
|
|
|
56
56
|
# Calculate final statistics
|
|
57
|
-
final_data = self._calculate_final_statistics(merged_property_stats, merged_coverage_data, merged_crash_anr_data)
|
|
57
|
+
final_data = self._calculate_final_statistics(merged_property_stats, merged_coverage_data, merged_crash_anr_data, property_source_mapping)
|
|
58
58
|
|
|
59
59
|
# Add merge information to final data
|
|
60
60
|
final_data['merge_info'] = {
|
|
@@ -86,12 +86,14 @@ class TestReportMerger:
|
|
|
86
86
|
|
|
87
87
|
logger.debug(f"Validated result directory: {result_dir}")
|
|
88
88
|
|
|
89
|
-
def _merge_property_results(self) -> Dict[str, Dict]:
|
|
89
|
+
def _merge_property_results(self) -> Tuple[Dict[str, Dict], Dict[str, List[str]]]:
|
|
90
90
|
"""
|
|
91
91
|
Merge property test results from all directories
|
|
92
|
-
|
|
92
|
+
|
|
93
93
|
Returns:
|
|
94
|
-
|
|
94
|
+
Tuple of (merged_property_results, property_source_mapping)
|
|
95
|
+
- merged_property_results: Merged property execution results
|
|
96
|
+
- property_source_mapping: Maps property names to list of source directories with fail/error
|
|
95
97
|
"""
|
|
96
98
|
merged_results = defaultdict(lambda: {
|
|
97
99
|
"precond_satisfied": 0,
|
|
@@ -99,31 +101,40 @@ class TestReportMerger:
|
|
|
99
101
|
"fail": 0,
|
|
100
102
|
"error": 0
|
|
101
103
|
})
|
|
102
|
-
|
|
104
|
+
|
|
105
|
+
# Track which directories have fail/error for each property
|
|
106
|
+
property_source_mapping = defaultdict(list)
|
|
107
|
+
|
|
103
108
|
for result_dir in self.result_dirs:
|
|
104
109
|
result_files = list(result_dir.glob("result_*.json"))
|
|
105
110
|
if not result_files:
|
|
106
111
|
logger.warning(f"No result file found in {result_dir}")
|
|
107
112
|
continue
|
|
108
|
-
|
|
113
|
+
|
|
109
114
|
result_file = result_files[0] # Take the first (should be only one)
|
|
110
|
-
|
|
115
|
+
dir_name = result_dir.name # Get the directory name (e.g., res_2025072011_5048015228)
|
|
116
|
+
|
|
111
117
|
try:
|
|
112
118
|
with open(result_file, 'r', encoding='utf-8') as f:
|
|
113
119
|
test_results = json.load(f)
|
|
114
|
-
|
|
120
|
+
|
|
115
121
|
# Merge results for each property
|
|
116
122
|
for prop_name, prop_result in test_results.items():
|
|
117
123
|
for key in ["precond_satisfied", "executed", "fail", "error"]:
|
|
118
124
|
merged_results[prop_name][key] += prop_result.get(key, 0)
|
|
119
|
-
|
|
125
|
+
|
|
126
|
+
# Track source directories for properties with fail/error
|
|
127
|
+
if prop_result.get('fail', 0) > 0 or prop_result.get('error', 0) > 0:
|
|
128
|
+
if dir_name not in property_source_mapping[prop_name]:
|
|
129
|
+
property_source_mapping[prop_name].append(dir_name)
|
|
130
|
+
|
|
120
131
|
logger.debug(f"Merged results from: {result_file}")
|
|
121
|
-
|
|
132
|
+
|
|
122
133
|
except Exception as e:
|
|
123
134
|
logger.error(f"Error reading result file {result_file}: {e}")
|
|
124
135
|
continue
|
|
125
|
-
|
|
126
|
-
return dict(merged_results)
|
|
136
|
+
|
|
137
|
+
return dict(merged_results), dict(property_source_mapping)
|
|
127
138
|
|
|
128
139
|
def _merge_coverage_data(self) -> Dict:
|
|
129
140
|
"""
|
|
@@ -234,7 +245,7 @@ class TestReportMerger:
|
|
|
234
245
|
"total_anr_count": len(unique_anr_events)
|
|
235
246
|
}
|
|
236
247
|
|
|
237
|
-
def _parse_crash_dump_file(self, crash_dump_file: Path) ->
|
|
248
|
+
def _parse_crash_dump_file(self, crash_dump_file: Path) -> Tuple[List[Dict], List[Dict]]:
|
|
238
249
|
"""
|
|
239
250
|
Parse crash and ANR events from crash-dump.log file
|
|
240
251
|
|
|
@@ -529,7 +540,7 @@ class TestReportMerger:
|
|
|
529
540
|
|
|
530
541
|
return unique_anrs
|
|
531
542
|
|
|
532
|
-
def _calculate_final_statistics(self, property_stats: Dict, coverage_data: Dict, crash_anr_data: Dict = None) -> Dict:
|
|
543
|
+
def _calculate_final_statistics(self, property_stats: Dict, coverage_data: Dict, crash_anr_data: Dict = None, property_source_mapping: Dict = None) -> Dict:
|
|
533
544
|
"""
|
|
534
545
|
Calculate final statistics for template rendering
|
|
535
546
|
|
|
@@ -540,6 +551,7 @@ class TestReportMerger:
|
|
|
540
551
|
property_stats: Merged property statistics
|
|
541
552
|
coverage_data: Merged coverage data
|
|
542
553
|
crash_anr_data: Merged crash and ANR data (optional)
|
|
554
|
+
property_source_mapping: Maps property names to source directories with fail/error (optional)
|
|
543
555
|
|
|
544
556
|
Returns:
|
|
545
557
|
Complete data for template rendering
|
|
@@ -576,6 +588,7 @@ class TestReportMerger:
|
|
|
576
588
|
'all_properties_count': all_properties_count,
|
|
577
589
|
'executed_properties_count': executed_properties_count,
|
|
578
590
|
'property_stats': property_stats,
|
|
591
|
+
'property_source_mapping': property_source_mapping or {},
|
|
579
592
|
'crash_events': crash_events,
|
|
580
593
|
'anr_events': anr_events,
|
|
581
594
|
'total_crash_count': total_crash_count,
|