Kea2-python 0.3.6__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Kea2-python might be problematic. Click here for more details.
- kea2/__init__.py +1 -1
- kea2/assets/config_version.json +16 -0
- kea2/assets/fastbot_configs/teardown.py +18 -0
- kea2/assets/monkeyq.jar +0 -0
- kea2/assets/quicktest.py +21 -2
- kea2/bug_report_generator.py +114 -34
- kea2/cli.py +19 -9
- kea2/fastbotManager.py +20 -4
- kea2/keaUtils.py +360 -110
- kea2/kea_launcher.py +61 -23
- kea2/mixin.py +22 -0
- kea2/report_merger.py +107 -42
- kea2/resultSyncer.py +1 -1
- kea2/templates/bug_report_template.html +139 -13
- kea2/templates/merged_bug_report_template.html +3293 -3213
- kea2/u2Driver.py +18 -8
- kea2/utils.py +60 -14
- kea2/version_manager.py +101 -0
- {kea2_python-0.3.6.dist-info → kea2_python-1.0.0.dist-info}/METADATA +63 -15
- {kea2_python-0.3.6.dist-info → kea2_python-1.0.0.dist-info}/RECORD +24 -20
- {kea2_python-0.3.6.dist-info → kea2_python-1.0.0.dist-info}/WHEEL +0 -0
- {kea2_python-0.3.6.dist-info → kea2_python-1.0.0.dist-info}/entry_points.txt +0 -0
- {kea2_python-0.3.6.dist-info → kea2_python-1.0.0.dist-info}/licenses/LICENSE +0 -0
- {kea2_python-0.3.6.dist-info → kea2_python-1.0.0.dist-info}/top_level.txt +0 -0
kea2/kea_launcher.py
CHANGED
|
@@ -2,6 +2,9 @@ import sys
|
|
|
2
2
|
import argparse
|
|
3
3
|
import unittest
|
|
4
4
|
from typing import List
|
|
5
|
+
import os
|
|
6
|
+
|
|
7
|
+
|
|
5
8
|
|
|
6
9
|
def _set_runner_parser(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]"):
|
|
7
10
|
parser = subparsers.add_parser("run", help="run kea2")
|
|
@@ -122,6 +125,15 @@ def _set_runner_parser(subparsers: "argparse._SubParsersAction[argparse.Argument
|
|
|
122
125
|
help="Take screenshots for every step.",
|
|
123
126
|
)
|
|
124
127
|
|
|
128
|
+
parser.add_argument(
|
|
129
|
+
"--pre-failure-screenshots",
|
|
130
|
+
dest="pre_failure_screenshots",
|
|
131
|
+
type=int,
|
|
132
|
+
required=False,
|
|
133
|
+
default=0,
|
|
134
|
+
help="Dump n screenshots before failure. 0 means take screenshots for every step.",
|
|
135
|
+
)
|
|
136
|
+
|
|
125
137
|
parser.add_argument(
|
|
126
138
|
"--act-whitelist-file",
|
|
127
139
|
dest="act_whitelist_file",
|
|
@@ -141,15 +153,19 @@ def _set_runner_parser(subparsers: "argparse._SubParsersAction[argparse.Argument
|
|
|
141
153
|
parser.add_argument(
|
|
142
154
|
"extra",
|
|
143
155
|
nargs=argparse.REMAINDER,
|
|
144
|
-
help="Extra args for
|
|
156
|
+
help="Extra args (e.g. propertytest & --). See docs (https://github.com/ecnusse/Kea2/blob/main/docs/manual_en.md) for details.",
|
|
145
157
|
)
|
|
146
158
|
|
|
147
159
|
|
|
148
|
-
def
|
|
160
|
+
def extra_args_info_logger(args):
|
|
149
161
|
if args.agent == "native":
|
|
150
162
|
print("[Warning] Property not availble in native agent.", flush=True)
|
|
151
163
|
if args.unittest_args:
|
|
152
164
|
print("Captured unittest args:", args.unittest_args, flush=True)
|
|
165
|
+
if args.propertytest_args:
|
|
166
|
+
print("Captured propertytest args:", args.propertytest_args, flush=True)
|
|
167
|
+
if args.extra:
|
|
168
|
+
print("Captured extra args (Will be appended to fastbot launcher):", args.extra, flush=True)
|
|
153
169
|
|
|
154
170
|
|
|
155
171
|
def driver_info_logger(args):
|
|
@@ -170,6 +186,8 @@ def driver_info_logger(args):
|
|
|
170
186
|
print(" log_stamp:", args.log_stamp, flush=True)
|
|
171
187
|
if args.take_screenshots:
|
|
172
188
|
print(" take_screenshots:", args.take_screenshots, flush=True)
|
|
189
|
+
if args.pre_failure_screenshots:
|
|
190
|
+
print(" pre_failure_screenshots:", args.pre_failure_screenshots, flush=True)
|
|
173
191
|
if args.max_step:
|
|
174
192
|
print(" max_step:", args.max_step, flush=True)
|
|
175
193
|
|
|
@@ -184,30 +202,44 @@ def parse_args(argv: List):
|
|
|
184
202
|
|
|
185
203
|
|
|
186
204
|
def _sanitize_args(args):
|
|
205
|
+
args.mode = None
|
|
206
|
+
args.propertytest_args = None
|
|
187
207
|
if args.agent == "u2" and not args.driver_name:
|
|
188
208
|
if args.extra == []:
|
|
189
209
|
args.driver_name = "d"
|
|
190
210
|
else:
|
|
191
211
|
raise ValueError("--driver-name should be specified when customizing script in --agent u2")
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
212
|
+
|
|
213
|
+
extra_args = {
|
|
214
|
+
"unittest": [],
|
|
215
|
+
"propertytest": [],
|
|
216
|
+
"extra": []
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
for i in range(len(args.extra)):
|
|
220
|
+
if args.extra[i] == "unittest":
|
|
221
|
+
current = "unittest"
|
|
222
|
+
elif args.extra[i] == "propertytest":
|
|
223
|
+
current = "propertytest"
|
|
224
|
+
elif args.extra[i] == "--":
|
|
225
|
+
current = "extra"
|
|
226
|
+
else:
|
|
227
|
+
extra_args[current].append(args.extra[i])
|
|
228
|
+
setattr(args, "unittest_args", [])
|
|
229
|
+
setattr(args, "propertytest_args", [])
|
|
230
|
+
args.unittest_args = extra_args["unittest"]
|
|
231
|
+
args.propertytest_args = extra_args["propertytest"]
|
|
232
|
+
args.extra = extra_args["extra"]
|
|
233
|
+
|
|
199
234
|
|
|
200
235
|
def run(args=None):
|
|
201
236
|
if args is None:
|
|
202
237
|
args = parse_args(sys.argv[1:])
|
|
203
238
|
_sanitize_args(args)
|
|
204
239
|
driver_info_logger(args)
|
|
205
|
-
|
|
206
|
-
if args.extra:
|
|
207
|
-
print("[Warning] Captured extra args:", args.extra, flush=True)
|
|
208
|
-
print("The extra args will be passed into fastbot launcher.", flush=True)
|
|
240
|
+
extra_args_info_logger(args)
|
|
209
241
|
|
|
210
|
-
from kea2 import KeaTestRunner, Options
|
|
242
|
+
from kea2 import KeaTestRunner, Options, HybridTestRunner
|
|
211
243
|
from kea2.u2Driver import U2Driver
|
|
212
244
|
options = Options(
|
|
213
245
|
agent=args.agent,
|
|
@@ -219,20 +251,26 @@ def run(args=None):
|
|
|
219
251
|
running_mins=args.running_minutes,
|
|
220
252
|
maxStep=args.max_step,
|
|
221
253
|
throttle=args.throttle_ms,
|
|
254
|
+
output_dir=args.output_dir,
|
|
222
255
|
log_stamp=args.log_stamp,
|
|
223
256
|
profile_period=args.profile_period,
|
|
224
257
|
take_screenshots=args.take_screenshots,
|
|
258
|
+
pre_failure_screenshots=args.pre_failure_screenshots,
|
|
225
259
|
device_output_root=args.device_output_root,
|
|
226
260
|
act_whitelist_file=args.act_whitelist_file,
|
|
227
261
|
act_blacklist_file=args.act_blacklist_file,
|
|
262
|
+
propertytest_args=args.propertytest_args,
|
|
263
|
+
unittest_args=args.unittest_args,
|
|
228
264
|
extra_args=args.extra,
|
|
229
265
|
)
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
266
|
+
|
|
267
|
+
is_hybrid_test = True if options.unittest_args else False
|
|
268
|
+
if is_hybrid_test:
|
|
269
|
+
HybridTestRunner.setOptions(options)
|
|
270
|
+
testRunner = HybridTestRunner
|
|
271
|
+
argv = ["python3 -m unittest"] + options.unittest_args
|
|
272
|
+
if not is_hybrid_test or options.agent == "u2":
|
|
273
|
+
KeaTestRunner.setOptions(options)
|
|
274
|
+
testRunner = KeaTestRunner
|
|
275
|
+
argv = ["python3 -m unittest"] + options.propertytest_args
|
|
276
|
+
unittest.main(module=None, argv=argv, testRunner=testRunner)
|
kea2/mixin.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from unittest import TextTestResult, TestCase
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class BetterConsoleLogExtensionMixin:
|
|
5
|
+
def __init__(self, stream, descriptions, verbosity):
|
|
6
|
+
super().__init__(stream, descriptions, verbosity)
|
|
7
|
+
self.showAll = True
|
|
8
|
+
|
|
9
|
+
def getDescription(self: "TextTestResult", test: "TestCase"):
|
|
10
|
+
doc_first_line = test.shortDescription()
|
|
11
|
+
if self.descriptions and doc_first_line:
|
|
12
|
+
doc_first_line = "# " + doc_first_line
|
|
13
|
+
return '\n'.join((str(test), doc_first_line))
|
|
14
|
+
else:
|
|
15
|
+
return str(test)
|
|
16
|
+
|
|
17
|
+
def startTest(self: "TextTestResult", test):
|
|
18
|
+
if self.showAll:
|
|
19
|
+
self.stream.write("[INFO] Start executing property: ")
|
|
20
|
+
self.stream.writeln(self.getDescription(test))
|
|
21
|
+
self.stream.flush()
|
|
22
|
+
self._newline = True
|
kea2/report_merger.py
CHANGED
|
@@ -35,7 +35,6 @@ class TestReportMerger:
|
|
|
35
35
|
try:
|
|
36
36
|
# Convert paths and validate
|
|
37
37
|
self.result_dirs = [Path(p).resolve() for p in result_paths]
|
|
38
|
-
self._validate_result_dirs()
|
|
39
38
|
|
|
40
39
|
# Setup output directory
|
|
41
40
|
timestamp = datetime.now().strftime("%Y%m%d%H_%M%S")
|
|
@@ -45,13 +44,13 @@ class TestReportMerger:
|
|
|
45
44
|
output_dir = Path(output_dir).resolve() / f"merged_report_{timestamp}"
|
|
46
45
|
|
|
47
46
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
48
|
-
|
|
47
|
+
|
|
49
48
|
logger.debug(f"Merging {len(self.result_dirs)} test result directories...")
|
|
50
|
-
|
|
49
|
+
|
|
51
50
|
# Merge different types of data
|
|
52
|
-
merged_property_stats, property_source_mapping = self._merge_property_results()
|
|
51
|
+
merged_property_stats, property_source_mapping = self._merge_property_results(output_dir)
|
|
53
52
|
merged_coverage_data = self._merge_coverage_data()
|
|
54
|
-
merged_crash_anr_data = self._merge_crash_dump_data()
|
|
53
|
+
merged_crash_anr_data = self._merge_crash_dump_data(output_dir)
|
|
55
54
|
|
|
56
55
|
# Calculate final statistics
|
|
57
56
|
final_data = self._calculate_final_statistics(merged_property_stats, merged_coverage_data, merged_crash_anr_data, property_source_mapping)
|
|
@@ -73,27 +72,18 @@ class TestReportMerger:
|
|
|
73
72
|
logger.error(f"Error merging test reports: {e}")
|
|
74
73
|
raise
|
|
75
74
|
|
|
76
|
-
def
|
|
77
|
-
"""Validate that all result directories exist and contain required files"""
|
|
78
|
-
for result_dir in self.result_dirs:
|
|
79
|
-
if not result_dir.exists():
|
|
80
|
-
raise FileNotFoundError(f"Result directory does not exist: {result_dir}")
|
|
81
|
-
|
|
82
|
-
# Check for required files pattern
|
|
83
|
-
result_files = list(result_dir.glob("result_*.json"))
|
|
84
|
-
if not result_files:
|
|
85
|
-
raise FileNotFoundError(f"No result_*.json file found in: {result_dir}")
|
|
86
|
-
|
|
87
|
-
logger.debug(f"Validated result directory: {result_dir}")
|
|
88
|
-
|
|
89
|
-
def _merge_property_results(self) -> Tuple[Dict[str, Dict], Dict[str, List[str]]]:
|
|
75
|
+
def _merge_property_results(self, output_dir: Path = None) -> Tuple[Dict[str, Dict], Dict[str, List[Dict]]]:
|
|
90
76
|
"""
|
|
91
77
|
Merge property test results from all directories
|
|
92
78
|
|
|
79
|
+
Args:
|
|
80
|
+
output_dir: The output directory where the merged report will be saved (for calculating relative paths)
|
|
81
|
+
|
|
93
82
|
Returns:
|
|
94
83
|
Tuple of (merged_property_results, property_source_mapping)
|
|
95
84
|
- merged_property_results: Merged property execution results
|
|
96
|
-
- property_source_mapping: Maps property names to list of source
|
|
85
|
+
- property_source_mapping: Maps property names to list of source directory info with fail/error
|
|
86
|
+
Each entry contains: {'dir_name': str, 'report_path': str}
|
|
97
87
|
"""
|
|
98
88
|
merged_results = defaultdict(lambda: {
|
|
99
89
|
"precond_satisfied": 0,
|
|
@@ -107,32 +97,47 @@ class TestReportMerger:
|
|
|
107
97
|
|
|
108
98
|
for result_dir in self.result_dirs:
|
|
109
99
|
result_files = list(result_dir.glob("result_*.json"))
|
|
100
|
+
html_files = list(result_dir.glob("*.html"))
|
|
110
101
|
if not result_files:
|
|
111
102
|
logger.warning(f"No result file found in {result_dir}")
|
|
112
103
|
continue
|
|
104
|
+
if not html_files:
|
|
105
|
+
logger.warning(f"No html file found in {result_dir}")
|
|
106
|
+
continue
|
|
113
107
|
|
|
114
108
|
result_file = result_files[0] # Take the first (should be only one)
|
|
109
|
+
html_file = html_files[0]
|
|
115
110
|
dir_name = result_dir.name # Get the directory name (e.g., res_2025072011_5048015228)
|
|
116
111
|
|
|
112
|
+
# Find the HTML report file in the result directory
|
|
113
|
+
html_report_path = None
|
|
114
|
+
|
|
115
|
+
# Calculate relative path from output_dir to the HTML file
|
|
117
116
|
try:
|
|
118
|
-
|
|
119
|
-
|
|
117
|
+
html_report_path = os.path.relpath(html_file.resolve(), output_dir.resolve())
|
|
118
|
+
except ValueError:
|
|
119
|
+
# If on different drives (Windows), use absolute path as fallback
|
|
120
|
+
html_report_path = str(html_file.resolve())
|
|
120
121
|
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
for key in ["precond_satisfied", "executed", "fail", "error"]:
|
|
124
|
-
merged_results[prop_name][key] += prop_result.get(key, 0)
|
|
122
|
+
with open(result_file, 'r', encoding='utf-8') as f:
|
|
123
|
+
test_results = json.load(f)
|
|
125
124
|
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
125
|
+
# Merge results for each property
|
|
126
|
+
for prop_name, prop_result in test_results.items():
|
|
127
|
+
for key in ["precond_satisfied", "executed", "fail", "error"]:
|
|
128
|
+
merged_results[prop_name][key] += prop_result.get(key, 0)
|
|
130
129
|
|
|
131
|
-
|
|
130
|
+
# Track source directories for properties with fail/error
|
|
131
|
+
if prop_result.get('fail', 0) > 0 or prop_result.get('error', 0) > 0:
|
|
132
|
+
# Check if this directory is already in the mapping
|
|
133
|
+
existing_dirs = [item['dir_name'] for item in property_source_mapping[prop_name]]
|
|
134
|
+
if dir_name not in existing_dirs:
|
|
135
|
+
property_source_mapping[prop_name].append({
|
|
136
|
+
'dir_name': dir_name,
|
|
137
|
+
'report_path': html_report_path
|
|
138
|
+
})
|
|
132
139
|
|
|
133
|
-
|
|
134
|
-
logger.error(f"Error reading result file {result_file}: {e}")
|
|
135
|
-
continue
|
|
140
|
+
logger.debug(f"Merged results from: {result_file}")
|
|
136
141
|
|
|
137
142
|
return dict(merged_results), dict(property_source_mapping)
|
|
138
143
|
|
|
@@ -194,11 +199,12 @@ class TestReportMerger:
|
|
|
194
199
|
"total_activities": list(all_activities),
|
|
195
200
|
"tested_activities": list(tested_activities),
|
|
196
201
|
"total_activities_count": len(all_activities),
|
|
202
|
+
"tested_activities_count": len(tested_activities),
|
|
197
203
|
"activity_count_history": dict(activity_counts),
|
|
198
204
|
"total_steps": total_steps
|
|
199
205
|
}
|
|
200
206
|
|
|
201
|
-
def _merge_crash_dump_data(self) -> Dict:
|
|
207
|
+
def _merge_crash_dump_data(self, output_dir: Path = None) -> Dict:
|
|
202
208
|
"""
|
|
203
209
|
Merge crash and ANR data from all directories
|
|
204
210
|
|
|
@@ -209,10 +215,22 @@ class TestReportMerger:
|
|
|
209
215
|
all_anr_events = []
|
|
210
216
|
|
|
211
217
|
for result_dir in self.result_dirs:
|
|
218
|
+
dir_name = result_dir.name
|
|
219
|
+
|
|
220
|
+
# Locate corresponding HTML report for hyperlinking
|
|
221
|
+
html_report_path = None
|
|
222
|
+
html_files = list(result_dir.glob("*.html"))
|
|
223
|
+
if not html_files:
|
|
224
|
+
continue
|
|
225
|
+
html_file = html_files[0]
|
|
226
|
+
try:
|
|
227
|
+
html_report_path = os.path.relpath(html_file.resolve(), output_dir.resolve())
|
|
228
|
+
except ValueError:
|
|
229
|
+
html_report_path = str(html_file.resolve())
|
|
230
|
+
|
|
212
231
|
# Find crash dump log file
|
|
213
232
|
output_dirs = list(result_dir.glob("output_*"))
|
|
214
233
|
if not output_dirs:
|
|
215
|
-
logger.warning(f"No output directory found in {result_dir}")
|
|
216
234
|
continue
|
|
217
235
|
|
|
218
236
|
crash_dump_file = output_dirs[0] / "crash-dump.log"
|
|
@@ -223,6 +241,15 @@ class TestReportMerger:
|
|
|
223
241
|
try:
|
|
224
242
|
# Parse crash and ANR events from this file
|
|
225
243
|
crash_events, anr_events = self._parse_crash_dump_file(crash_dump_file)
|
|
244
|
+
|
|
245
|
+
for crash in crash_events:
|
|
246
|
+
crash["source_directory"] = dir_name
|
|
247
|
+
crash["report_path"] = html_report_path
|
|
248
|
+
|
|
249
|
+
for anr in anr_events:
|
|
250
|
+
anr["source_directory"] = dir_name
|
|
251
|
+
anr["report_path"] = html_report_path
|
|
252
|
+
|
|
226
253
|
all_crash_events.extend(crash_events)
|
|
227
254
|
all_anr_events.extend(anr_events)
|
|
228
255
|
|
|
@@ -507,7 +534,11 @@ class TestReportMerger:
|
|
|
507
534
|
|
|
508
535
|
# Use first 3 lines of stack trace for deduplication
|
|
509
536
|
stack_lines = stack_trace.split('\n')[:3]
|
|
510
|
-
crash_key = (
|
|
537
|
+
crash_key = (
|
|
538
|
+
exception_type,
|
|
539
|
+
'\n'.join(stack_lines),
|
|
540
|
+
crash.get("source_directory", "")
|
|
541
|
+
)
|
|
511
542
|
|
|
512
543
|
if crash_key not in seen_crashes:
|
|
513
544
|
seen_crashes.add(crash_key)
|
|
@@ -532,7 +563,7 @@ class TestReportMerger:
|
|
|
532
563
|
# Create a hash key based on reason and process
|
|
533
564
|
reason = anr.get("reason", "")
|
|
534
565
|
process = anr.get("process", "")
|
|
535
|
-
anr_key = (reason, process)
|
|
566
|
+
anr_key = (reason, process, anr.get("source_directory", ""))
|
|
536
567
|
|
|
537
568
|
if anr_key not in seen_anrs:
|
|
538
569
|
seen_anrs.add(anr_key)
|
|
@@ -580,6 +611,42 @@ class TestReportMerger:
|
|
|
580
611
|
# Calculate total bugs found (only property bugs, not including crashes/ANRs)
|
|
581
612
|
total_bugs_found = property_bugs_found
|
|
582
613
|
|
|
614
|
+
# Prepare enhanced property statistics with derived metrics
|
|
615
|
+
processed_property_stats = {}
|
|
616
|
+
property_stats_summary = {
|
|
617
|
+
"total_properties": 0,
|
|
618
|
+
"total_precond_satisfied": 0,
|
|
619
|
+
"total_executed": 0,
|
|
620
|
+
"total_passes": 0,
|
|
621
|
+
"total_fails": 0,
|
|
622
|
+
"total_errors": 0,
|
|
623
|
+
"total_not_executed": 0,
|
|
624
|
+
}
|
|
625
|
+
|
|
626
|
+
for prop_name, stats in property_stats.items():
|
|
627
|
+
precond_satisfied = stats.get("precond_satisfied", 0)
|
|
628
|
+
total_executions = stats.get("executed", 0)
|
|
629
|
+
fail_count = stats.get("fail", 0)
|
|
630
|
+
error_count = stats.get("error", 0)
|
|
631
|
+
|
|
632
|
+
pass_count = max(total_executions - fail_count - error_count, 0)
|
|
633
|
+
not_executed_count = max(precond_satisfied - total_executions, 0)
|
|
634
|
+
|
|
635
|
+
processed_property_stats[prop_name] = {
|
|
636
|
+
**stats,
|
|
637
|
+
"executed_total": total_executions,
|
|
638
|
+
"pass_count": pass_count,
|
|
639
|
+
"not_executed": not_executed_count,
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
property_stats_summary["total_properties"] += 1
|
|
643
|
+
property_stats_summary["total_precond_satisfied"] += precond_satisfied
|
|
644
|
+
property_stats_summary["total_executed"] += total_executions
|
|
645
|
+
property_stats_summary["total_passes"] += pass_count
|
|
646
|
+
property_stats_summary["total_fails"] += fail_count
|
|
647
|
+
property_stats_summary["total_errors"] += error_count
|
|
648
|
+
property_stats_summary["total_not_executed"] += not_executed_count
|
|
649
|
+
|
|
583
650
|
# Prepare final data
|
|
584
651
|
final_data = {
|
|
585
652
|
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
@@ -587,7 +654,8 @@ class TestReportMerger:
|
|
|
587
654
|
'property_bugs_found': property_bugs_found,
|
|
588
655
|
'all_properties_count': all_properties_count,
|
|
589
656
|
'executed_properties_count': executed_properties_count,
|
|
590
|
-
'property_stats':
|
|
657
|
+
'property_stats': processed_property_stats,
|
|
658
|
+
'property_stats_summary': property_stats_summary,
|
|
591
659
|
'property_source_mapping': property_source_mapping or {},
|
|
592
660
|
'crash_events': crash_events,
|
|
593
661
|
'anr_events': anr_events,
|
|
@@ -659,6 +727,3 @@ class TestReportMerger:
|
|
|
659
727
|
except Exception as e:
|
|
660
728
|
logger.error(f"Error generating HTML report: {e}")
|
|
661
729
|
raise
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
kea2/resultSyncer.py
CHANGED
|
@@ -13,7 +13,7 @@ class ResultSyncer:
|
|
|
13
13
|
|
|
14
14
|
def __init__(self, device_output_dir, options: "Options"):
|
|
15
15
|
self.device_output_dir = device_output_dir
|
|
16
|
-
self.output_dir =
|
|
16
|
+
self.output_dir = options.output_dir / Path(device_output_dir).name
|
|
17
17
|
self.running = False
|
|
18
18
|
self.thread = None
|
|
19
19
|
self.sync_event = threading.Event()
|