Kea2-python 1.1.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kea2/__init__.py +8 -0
- kea2/absDriver.py +56 -0
- kea2/adbUtils.py +554 -0
- kea2/assets/config_version.json +16 -0
- kea2/assets/fastbot-thirdpart.jar +0 -0
- kea2/assets/fastbot_configs/abl.strings +2 -0
- kea2/assets/fastbot_configs/awl.strings +3 -0
- kea2/assets/fastbot_configs/max.config +7 -0
- kea2/assets/fastbot_configs/max.fuzzing.strings +699 -0
- kea2/assets/fastbot_configs/max.schema.strings +1 -0
- kea2/assets/fastbot_configs/max.strings +3 -0
- kea2/assets/fastbot_configs/max.tree.pruning +27 -0
- kea2/assets/fastbot_configs/teardown.py +18 -0
- kea2/assets/fastbot_configs/widget.block.py +38 -0
- kea2/assets/fastbot_libs/arm64-v8a/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/armeabi-v7a/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/x86/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/x86_64/libfastbot_native.so +0 -0
- kea2/assets/framework.jar +0 -0
- kea2/assets/kea2-thirdpart.jar +0 -0
- kea2/assets/monkeyq.jar +0 -0
- kea2/assets/quicktest.py +126 -0
- kea2/cli.py +216 -0
- kea2/fastbotManager.py +269 -0
- kea2/kea2_api.py +166 -0
- kea2/keaUtils.py +926 -0
- kea2/kea_launcher.py +299 -0
- kea2/logWatcher.py +92 -0
- kea2/mixin.py +0 -0
- kea2/report/__init__.py +0 -0
- kea2/report/bug_report_generator.py +879 -0
- kea2/report/mixin.py +496 -0
- kea2/report/report_merger.py +1066 -0
- kea2/report/templates/bug_report_template.html +4028 -0
- kea2/report/templates/merged_bug_report_template.html +3602 -0
- kea2/report/utils.py +10 -0
- kea2/result.py +257 -0
- kea2/resultSyncer.py +65 -0
- kea2/state.py +22 -0
- kea2/typedefs.py +32 -0
- kea2/u2Driver.py +612 -0
- kea2/utils.py +192 -0
- kea2/version_manager.py +102 -0
- kea2_python-1.1.0b1.dist-info/METADATA +447 -0
- kea2_python-1.1.0b1.dist-info/RECORD +49 -0
- kea2_python-1.1.0b1.dist-info/WHEEL +5 -0
- kea2_python-1.1.0b1.dist-info/entry_points.txt +2 -0
- kea2_python-1.1.0b1.dist-info/licenses/LICENSE +16 -0
- kea2_python-1.1.0b1.dist-info/top_level.txt +1 -0
kea2/keaUtils.py
ADDED
|
@@ -0,0 +1,926 @@
|
|
|
1
|
+
import random
|
|
2
|
+
import warnings
|
|
3
|
+
import types
|
|
4
|
+
import traceback
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
import functools
|
|
8
|
+
|
|
9
|
+
from collections import deque
|
|
10
|
+
from copy import deepcopy
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from time import perf_counter, sleep
|
|
13
|
+
from typing import Callable, Any, Deque, Dict, List, Literal, NewType, Tuple, Union
|
|
14
|
+
from contextvars import ContextVar
|
|
15
|
+
from unittest import TextTestRunner, TestLoader, TestSuite, TestCase
|
|
16
|
+
from unittest import registerResult, TextTestResult, SkipTest
|
|
17
|
+
from unittest import main as unittest_main
|
|
18
|
+
from dataclasses import dataclass, asdict
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
from fnmatch import fnmatchcase
|
|
21
|
+
|
|
22
|
+
import uiautomator2 as u2
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
from .typedefs import PRECONDITIONS_MARKER, PROB_MARKER, MAX_TRIES_MARKER, INTERRUPTABLE_MARKER
|
|
26
|
+
from .typedefs import PropertyStore
|
|
27
|
+
from .absDriver import AbstractDriver
|
|
28
|
+
from .report.bug_report_generator import BugReportGenerator
|
|
29
|
+
from .resultSyncer import ResultSyncer
|
|
30
|
+
from .logWatcher import LogWatcher
|
|
31
|
+
from .utils import TimeStamp, catchException, getProjectRoot, getLogger, loadFuncsFromFile, timer, getClassName, getFullPropName
|
|
32
|
+
from .u2Driver import StaticU2UiObject, StaticXpathObject, U2Driver, U2StaticDevice
|
|
33
|
+
from .fastbotManager import FastbotManager
|
|
34
|
+
from .adbUtils import ADBDevice
|
|
35
|
+
from .state import invariant, INVARIANT_MARKER
|
|
36
|
+
from .result import KeaJsonResult, KeaTextTestResult
|
|
37
|
+
|
|
38
|
+
logger = getLogger(__name__)
|
|
39
|
+
hybrid_mode = ContextVar("hybrid_mode", default=False)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
STAMP: str
|
|
43
|
+
LOGFILE: str
|
|
44
|
+
RESFILE: str
|
|
45
|
+
PROP_EXEC_RESFILE: str
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def precondition(precond: Callable[[Any], bool]) -> Callable:
|
|
49
|
+
"""the decorator @precondition
|
|
50
|
+
|
|
51
|
+
@precondition specifies when the property could be executed.
|
|
52
|
+
A property could have multiple preconditions, each of which is specified by @precondition.
|
|
53
|
+
"""
|
|
54
|
+
def accept(f):
|
|
55
|
+
preconds = getattr(f, PRECONDITIONS_MARKER, tuple())
|
|
56
|
+
setattr(f, PRECONDITIONS_MARKER, preconds + (precond,))
|
|
57
|
+
return f
|
|
58
|
+
|
|
59
|
+
return accept
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def prob(p: float):
|
|
63
|
+
"""the decorator @prob
|
|
64
|
+
|
|
65
|
+
@prob specify the propbability of execution when a property is satisfied.
|
|
66
|
+
"""
|
|
67
|
+
p = float(p)
|
|
68
|
+
if not 0 < p <= 1.0:
|
|
69
|
+
raise ValueError("The propbability should between 0 and 1")
|
|
70
|
+
|
|
71
|
+
def accept(f):
|
|
72
|
+
setattr(f, PROB_MARKER, p)
|
|
73
|
+
return f
|
|
74
|
+
|
|
75
|
+
return accept
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def max_tries(n: int):
|
|
79
|
+
"""the decorator @max_tries
|
|
80
|
+
|
|
81
|
+
@max_tries specify the maximum tries of executing a property.
|
|
82
|
+
"""
|
|
83
|
+
n = int(n)
|
|
84
|
+
if not n > 0:
|
|
85
|
+
raise ValueError("The maxium tries should be a positive integer.")
|
|
86
|
+
|
|
87
|
+
def accept(f):
|
|
88
|
+
setattr(f, MAX_TRIES_MARKER, n)
|
|
89
|
+
return f
|
|
90
|
+
|
|
91
|
+
return accept
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def interruptable(strategy='default'):
|
|
95
|
+
"""the decorator @interruptable
|
|
96
|
+
|
|
97
|
+
@interruptable specify the propbability of **fuzzing** when calling every line of code in a property.
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
def decorator(func):
|
|
101
|
+
setattr(func, INTERRUPTABLE_MARKER, True)
|
|
102
|
+
setattr(func, 'strategy', strategy)
|
|
103
|
+
return func
|
|
104
|
+
return decorator
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
@dataclass
|
|
108
|
+
class Options:
|
|
109
|
+
"""
|
|
110
|
+
Kea and Fastbot configurations
|
|
111
|
+
"""
|
|
112
|
+
# the driver_name in script (if self.d, then d.)
|
|
113
|
+
driverName: str = None
|
|
114
|
+
# the driver (only U2Driver available now)
|
|
115
|
+
Driver: AbstractDriver = U2Driver
|
|
116
|
+
# list of package names. Specify the apps under test
|
|
117
|
+
packageNames: List[str] = None
|
|
118
|
+
# target device
|
|
119
|
+
serial: str = None
|
|
120
|
+
# target device with transport_id
|
|
121
|
+
transport_id: str = None
|
|
122
|
+
# test agent. "native" for stage 1 and "u2" for stage 1~3
|
|
123
|
+
agent: Literal["u2", "native"] = "u2"
|
|
124
|
+
# max step in exploration (availble in stage 2~3)
|
|
125
|
+
maxStep: Union[str, float] = float("inf")
|
|
126
|
+
# time(mins) for exploration
|
|
127
|
+
running_mins: int = 10
|
|
128
|
+
# time(ms) to wait when exploring the app
|
|
129
|
+
throttle: int = 200
|
|
130
|
+
# the output_dir for saving logs and results
|
|
131
|
+
output_dir: str = "output"
|
|
132
|
+
# the stamp for log file and result file, default: current time stamp
|
|
133
|
+
log_stamp: str = None
|
|
134
|
+
# the profiling period to get the coverage result.
|
|
135
|
+
profile_period: int = 25
|
|
136
|
+
# take screenshots for every step
|
|
137
|
+
take_screenshots: bool = False
|
|
138
|
+
# Screenshots before failure (Dump n screenshots before failure. 0 means take screenshots for every step)
|
|
139
|
+
pre_failure_screenshots: int = 0
|
|
140
|
+
# Screenshots after failure (Dump n screenshots before failure. Should be smaller than pre_failure_screenshots)
|
|
141
|
+
post_failure_screenshots: int = 0
|
|
142
|
+
# The root of output dir on device
|
|
143
|
+
device_output_root: str = "/sdcard"
|
|
144
|
+
# the debug mode
|
|
145
|
+
debug: bool = False
|
|
146
|
+
# Activity WhiteList File
|
|
147
|
+
act_whitelist_file: str = None
|
|
148
|
+
# Activity BlackList File
|
|
149
|
+
act_blacklist_file: str = None
|
|
150
|
+
# propertytest sub-commands args (eg. discover -s xxx -p xxx)
|
|
151
|
+
propertytest_args: List[str] = None
|
|
152
|
+
# period (N steps) to restart the app under test
|
|
153
|
+
restart_app_period: int = None
|
|
154
|
+
# unittest sub-commands args (Feat 4)
|
|
155
|
+
unittest_args: List[str] = None
|
|
156
|
+
# Extra args (directly passed to fastbot)
|
|
157
|
+
extra_args: List[str] = None
|
|
158
|
+
|
|
159
|
+
def __setattr__(self, name, value):
|
|
160
|
+
if value is None:
|
|
161
|
+
return
|
|
162
|
+
super().__setattr__(name, value)
|
|
163
|
+
|
|
164
|
+
def __post_init__(self):
|
|
165
|
+
import logging
|
|
166
|
+
logging.basicConfig(level=logging.DEBUG if self.debug else logging.INFO)
|
|
167
|
+
|
|
168
|
+
if self.Driver:
|
|
169
|
+
self._set_driver()
|
|
170
|
+
|
|
171
|
+
global STAMP
|
|
172
|
+
STAMP = self.log_stamp if self.log_stamp else TimeStamp().getTimeStamp()
|
|
173
|
+
|
|
174
|
+
self._sanitize_stamp()
|
|
175
|
+
|
|
176
|
+
self.output_dir = Path(self.output_dir).absolute() / f"res_{STAMP}"
|
|
177
|
+
self.set_stamp()
|
|
178
|
+
|
|
179
|
+
self._sanitize_args()
|
|
180
|
+
|
|
181
|
+
_check_package_installation(self.packageNames)
|
|
182
|
+
_save_bug_report_configs(self)
|
|
183
|
+
|
|
184
|
+
def set_stamp(self, stamp: str = None):
|
|
185
|
+
global STAMP, LOGFILE, RESFILE, PROP_EXEC_RESFILE
|
|
186
|
+
if stamp:
|
|
187
|
+
STAMP = stamp
|
|
188
|
+
|
|
189
|
+
LOGFILE = f"fastbot_{STAMP}.log"
|
|
190
|
+
RESFILE = f"result_{STAMP}.json"
|
|
191
|
+
PROP_EXEC_RESFILE = f"property_exec_info_{STAMP}.json"
|
|
192
|
+
|
|
193
|
+
def _sanitize_stamp(self):
|
|
194
|
+
global STAMP
|
|
195
|
+
illegal_chars = ['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\n', '\r', '\t', '\0']
|
|
196
|
+
for char in illegal_chars:
|
|
197
|
+
if char in STAMP:
|
|
198
|
+
raise ValueError(
|
|
199
|
+
f"char: `{char}` is illegal in --log-stamp. current stamp: {STAMP}"
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
def _sanitize_args(self):
|
|
203
|
+
if not self.take_screenshots and self.pre_failure_screenshots > 0:
|
|
204
|
+
raise ValueError("--pre-failure-screenshots should be 0 when --take-screenshots is not set.")
|
|
205
|
+
|
|
206
|
+
if self.pre_failure_screenshots < self.post_failure_screenshots:
|
|
207
|
+
raise ValueError("--post-failure-screenshots should be smaller than --pre-failure-screenshots.")
|
|
208
|
+
|
|
209
|
+
self.profile_period = int(self.profile_period)
|
|
210
|
+
if self.profile_period < 1:
|
|
211
|
+
raise ValueError("--profile-period should be greater than 0")
|
|
212
|
+
|
|
213
|
+
self.throttle = int(self.throttle)
|
|
214
|
+
if self.throttle < 0:
|
|
215
|
+
raise ValueError("--throttle should be greater than or equal to 0")
|
|
216
|
+
|
|
217
|
+
if self.agent == 'u2' and self.driverName == None:
|
|
218
|
+
raise ValueError("--driver-name should be specified when customizing script in --agent u2")
|
|
219
|
+
|
|
220
|
+
def _set_driver(self):
|
|
221
|
+
target_device = dict()
|
|
222
|
+
if self.serial:
|
|
223
|
+
target_device["serial"] = self.serial
|
|
224
|
+
if self.transport_id:
|
|
225
|
+
target_device["transport_id"] = self.transport_id
|
|
226
|
+
self.Driver.setDevice(target_device)
|
|
227
|
+
ADBDevice.setDevice(self.serial, self.transport_id)
|
|
228
|
+
|
|
229
|
+
def getKeaTestOptions(self, hybrid_test_count: int) -> "Options":
|
|
230
|
+
""" Get the KeaTestOptions for hybrid test run when switching from unittest to kea2 test.
|
|
231
|
+
hybrid_test_count: the count of hybrid test runs
|
|
232
|
+
"""
|
|
233
|
+
if not self.unittest_args:
|
|
234
|
+
raise RuntimeError("unittest_args is None. Cannot get KeaTestOptions from it")
|
|
235
|
+
|
|
236
|
+
opts = deepcopy(self)
|
|
237
|
+
|
|
238
|
+
time_stamp = TimeStamp().getTimeStamp()
|
|
239
|
+
hybrid_test_stamp = f"{time_stamp}_hybrid_{hybrid_test_count}"
|
|
240
|
+
|
|
241
|
+
opts.output_dir = self.output_dir / f"res_{hybrid_test_stamp}"
|
|
242
|
+
|
|
243
|
+
opts.set_stamp(hybrid_test_stamp)
|
|
244
|
+
opts.unittest_args = []
|
|
245
|
+
return opts
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def _check_package_installation(packageNames):
|
|
249
|
+
installed_packages = set(ADBDevice().list_packages())
|
|
250
|
+
|
|
251
|
+
for package in packageNames:
|
|
252
|
+
if package not in installed_packages:
|
|
253
|
+
logger.error(f"package {package} not installed. Abort.")
|
|
254
|
+
raise ValueError(f"{package} not installed")
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def _save_bug_report_configs(options: Options):
|
|
258
|
+
output_dir = options.output_dir
|
|
259
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
260
|
+
configs = {
|
|
261
|
+
"driverName": options.driverName,
|
|
262
|
+
"packageNames": options.packageNames,
|
|
263
|
+
"take_screenshots": options.take_screenshots,
|
|
264
|
+
"pre_failure_screenshots": options.pre_failure_screenshots,
|
|
265
|
+
"post_failure_screenshots": options.post_failure_screenshots,
|
|
266
|
+
"device_output_root": options.device_output_root,
|
|
267
|
+
"log_stamp": options.log_stamp if options.log_stamp else STAMP,
|
|
268
|
+
"test_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
269
|
+
}
|
|
270
|
+
with open(output_dir / "bug_report_config.json", "w", encoding="utf-8") as fp:
|
|
271
|
+
json.dump(configs, fp, indent=4)
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
class KeaOptionSetter:
|
|
275
|
+
options: Options = None
|
|
276
|
+
|
|
277
|
+
@classmethod
|
|
278
|
+
def setOptions(cls, options: Options):
|
|
279
|
+
if not isinstance(options.packageNames, list) and len(options.packageNames) > 0:
|
|
280
|
+
raise ValueError("packageNames should be given in a list.")
|
|
281
|
+
if options.Driver is not None and options.agent == "native":
|
|
282
|
+
logger.warning("[Warning] Can not use any Driver when runing native mode.")
|
|
283
|
+
options.Driver = None
|
|
284
|
+
cls.options = options
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
class KeaTestSuite(TestSuite):
|
|
288
|
+
def addTest(self, test):
|
|
289
|
+
if isinstance(test, TestCase):
|
|
290
|
+
# inject the preconds, prob, max_tries, interruptable info into the test case
|
|
291
|
+
func = getattr(test, test._testMethodName)
|
|
292
|
+
for attr in {PRECONDITIONS_MARKER, INVARIANT_MARKER, PROB_MARKER, MAX_TRIES_MARKER, INTERRUPTABLE_MARKER}:
|
|
293
|
+
if hasattr(func, attr):
|
|
294
|
+
val = getattr(func, attr)
|
|
295
|
+
setattr(test, attr, val)
|
|
296
|
+
return super().addTest(test)
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
class KeaTestLoader(TestLoader):
|
|
300
|
+
testMethodPrefix = ""
|
|
301
|
+
suiteClass = KeaTestSuite
|
|
302
|
+
|
|
303
|
+
def loadTestsFromTestCase(self, testCaseClass):
|
|
304
|
+
# remove the setUp and tearDown functions in PBT
|
|
305
|
+
def setUp(self): ...
|
|
306
|
+
def tearDown(self): ...
|
|
307
|
+
testCaseClass.setUp = types.MethodType(setUp, testCaseClass)
|
|
308
|
+
testCaseClass.tearDown = types.MethodType(tearDown, testCaseClass)
|
|
309
|
+
return super().loadTestsFromTestCase(testCaseClass)
|
|
310
|
+
|
|
311
|
+
def getTestCaseNames(self, testCaseClass):
|
|
312
|
+
"""Return a sorted sequence of method names found within testCaseClass
|
|
313
|
+
"""
|
|
314
|
+
def shouldIncludeMethod(attrname: str):
|
|
315
|
+
if not attrname.startswith(self.testMethodPrefix):
|
|
316
|
+
return False
|
|
317
|
+
testFunc = getattr(testCaseClass, attrname)
|
|
318
|
+
if not callable(testFunc):
|
|
319
|
+
return False
|
|
320
|
+
# exclude the test methods that are not properties
|
|
321
|
+
if not hasattr(testFunc, PRECONDITIONS_MARKER) ^ hasattr(testFunc, INVARIANT_MARKER):
|
|
322
|
+
return False
|
|
323
|
+
fullName = f'%s.%s' % (getClassName(testCaseClass), attrname)
|
|
324
|
+
self.__log_loading_info(testFunc, fullName)
|
|
325
|
+
return self.testNamePatterns is None or \
|
|
326
|
+
any(fnmatchcase(fullName, pattern) for pattern in self.testNamePatterns)
|
|
327
|
+
testFnNames = list(filter(shouldIncludeMethod, dir(testCaseClass)))
|
|
328
|
+
if self.sortTestMethodsUsing:
|
|
329
|
+
testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing))
|
|
330
|
+
return testFnNames
|
|
331
|
+
|
|
332
|
+
def __log_loading_info(self, testFunc: Callable, fullName: str):
|
|
333
|
+
if hasattr(testFunc, PRECONDITIONS_MARKER):
|
|
334
|
+
print(f"[INFO] Load property: {fullName}", flush=True)
|
|
335
|
+
if hasattr(testFunc, INVARIANT_MARKER):
|
|
336
|
+
print(f"[INFO] Load invariant: {fullName}", flush=True)
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
keaTestLoader = KeaTestLoader()
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
class SetUpClassExtension:
|
|
343
|
+
# setting up setUpClass
|
|
344
|
+
_setup = set()
|
|
345
|
+
|
|
346
|
+
def setUpClass(self: "KeaTestRunner", test: TestCase):
|
|
347
|
+
testClass = test.__class__
|
|
348
|
+
if not repr(testClass) in self._setup:
|
|
349
|
+
self._setup.add(repr(testClass))
|
|
350
|
+
script_driver = U2Driver.getScriptDriver(mode="proxy")
|
|
351
|
+
setattr(testClass, self.options.driverName, script_driver)
|
|
352
|
+
try:
|
|
353
|
+
testClass.setUpClass()
|
|
354
|
+
except Exception:
|
|
355
|
+
logger.error(f"Error when executing {getClassName(testClass)}.setUpClass")
|
|
356
|
+
import traceback
|
|
357
|
+
traceback.print_exc()
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
class KeaTestRunner(TextTestRunner, KeaOptionSetter, SetUpClassExtension):
|
|
361
|
+
|
|
362
|
+
resultclass: KeaJsonResult
|
|
363
|
+
allProperties: PropertyStore
|
|
364
|
+
allInvariants: PropertyStore
|
|
365
|
+
_block_funcs: Dict[Literal["widgets", "trees"], List[Callable]] = None
|
|
366
|
+
|
|
367
|
+
def _setOuputDir(self):
|
|
368
|
+
output_dir = self.options.output_dir
|
|
369
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
370
|
+
global LOGFILE, RESFILE, PROP_EXEC_RESFILE
|
|
371
|
+
LOGFILE = output_dir / Path(LOGFILE)
|
|
372
|
+
RESFILE = output_dir / Path(RESFILE)
|
|
373
|
+
PROP_EXEC_RESFILE = output_dir / Path(PROP_EXEC_RESFILE)
|
|
374
|
+
logger.info(f"Log file: {LOGFILE}")
|
|
375
|
+
logger.info(f"Result file: {RESFILE}")
|
|
376
|
+
logger.info(f"Property execution info file: {PROP_EXEC_RESFILE}")
|
|
377
|
+
|
|
378
|
+
def run(self, test):
|
|
379
|
+
|
|
380
|
+
self.validateAndCollectProperties(test)
|
|
381
|
+
|
|
382
|
+
if len(self.allProperties) == 0:
|
|
383
|
+
logger.warning("No property has been found.")
|
|
384
|
+
|
|
385
|
+
self._setOuputDir()
|
|
386
|
+
|
|
387
|
+
# Setup JsonResult
|
|
388
|
+
KeaJsonResult.setProperties(self.allProperties)
|
|
389
|
+
KeaJsonResult.setInvariants(self.allInvariants)
|
|
390
|
+
KeaJsonResult.setOutputFile(result_file=RESFILE, property_exec_result_file=PROP_EXEC_RESFILE)
|
|
391
|
+
self.resultclass = KeaJsonResult
|
|
392
|
+
result: KeaJsonResult = self._makeResult()
|
|
393
|
+
registerResult(result)
|
|
394
|
+
|
|
395
|
+
result.failfast = self.failfast
|
|
396
|
+
result.buffer = self.buffer
|
|
397
|
+
result.tb_locals = self.tb_locals
|
|
398
|
+
|
|
399
|
+
with warnings.catch_warnings():
|
|
400
|
+
fb = FastbotManager(self.options, LOGFILE)
|
|
401
|
+
fb.start()
|
|
402
|
+
|
|
403
|
+
log_watcher = LogWatcher(LOGFILE)
|
|
404
|
+
|
|
405
|
+
if self.options.agent == "u2":
|
|
406
|
+
# initialize the result.json file
|
|
407
|
+
result.flushResult()
|
|
408
|
+
# setUp for the u2 driver
|
|
409
|
+
self.scriptDriver = U2Driver.getScriptDriver(mode="proxy")
|
|
410
|
+
|
|
411
|
+
for test in {**self.allProperties, **self.allInvariants}.values():
|
|
412
|
+
self.setUpClass(test)
|
|
413
|
+
|
|
414
|
+
fb.check_alive()
|
|
415
|
+
fb.init(options=self.options, stamp=STAMP)
|
|
416
|
+
|
|
417
|
+
resultSyncer = ResultSyncer(fb.device_output_dir, self.options)
|
|
418
|
+
resultSyncer.run()
|
|
419
|
+
start_time = perf_counter()
|
|
420
|
+
fb_is_running = True
|
|
421
|
+
self.stepsCount = 0
|
|
422
|
+
|
|
423
|
+
while self.stepsCount < self.options.maxStep:
|
|
424
|
+
logger.info(f"[Property based testing] [New Iteration] Elapsed: {perf_counter()-start_time:.1f}s")
|
|
425
|
+
if self.shouldStop(start_time):
|
|
426
|
+
logger.info("Exploration time up (--running-minutes).")
|
|
427
|
+
break
|
|
428
|
+
|
|
429
|
+
if self.options.restart_app_period and self.stepsCount and self.stepsCount % self.options.restart_app_period == 0:
|
|
430
|
+
self.stepsCount += 1
|
|
431
|
+
logger.info(f"Sending monkeyEvent {self._monkey_event_count}")
|
|
432
|
+
logger.info("Kill all test apps to restart the app under test.")
|
|
433
|
+
for app in self.options.packageNames:
|
|
434
|
+
logger.info(f"Stopping app: {app}")
|
|
435
|
+
self.scriptDriver.app_stop(app)
|
|
436
|
+
sleep(3)
|
|
437
|
+
fb.sendInfo("kill_apps")
|
|
438
|
+
continue
|
|
439
|
+
|
|
440
|
+
try:
|
|
441
|
+
# determine whether to stepMonkey (normal step) or dumpHierarchy (after executing a property)
|
|
442
|
+
# stepMonkey will change the ui state and return the new ui hierarchy
|
|
443
|
+
# dumpHierarchy will just return the current ui hierarchy
|
|
444
|
+
# this is to avoid losing the ui state after executing a property
|
|
445
|
+
xml_raw: str = ""
|
|
446
|
+
if fb.executed_prop:
|
|
447
|
+
fb.executed_prop = False
|
|
448
|
+
xml_raw = fb.dumpHierarchy()
|
|
449
|
+
else:
|
|
450
|
+
self.stepsCount += 1
|
|
451
|
+
logger.info(f"Sending monkeyEvent {self._monkey_event_count}")
|
|
452
|
+
xml_raw = fb.stepMonkey(self._monkeyStepInfo)
|
|
453
|
+
# If the connection is refused, fastbot might have stpped running
|
|
454
|
+
except u2.HTTPError:
|
|
455
|
+
logger.info("Connection refused by remote.")
|
|
456
|
+
# If fastbot has exited normally, end the testing process
|
|
457
|
+
if fb.get_return_code() == 0:
|
|
458
|
+
logger.info("Exploration times up (--running-minutes).")
|
|
459
|
+
fb_is_running = False
|
|
460
|
+
break
|
|
461
|
+
else:
|
|
462
|
+
import traceback
|
|
463
|
+
traceback.print_exc()
|
|
464
|
+
raise RuntimeError("Fastbot Aborted.")
|
|
465
|
+
|
|
466
|
+
if not xml_raw:
|
|
467
|
+
logger.warning("Empty ui hierarchy returned. Skip this step.")
|
|
468
|
+
continue
|
|
469
|
+
|
|
470
|
+
result.setCurrentStepsCount(self.stepsCount)
|
|
471
|
+
|
|
472
|
+
# check all invariants
|
|
473
|
+
staticCheckerDriver = U2Driver.getStaticChecker(hierarchy=xml_raw)
|
|
474
|
+
if self.allInvariants:
|
|
475
|
+
print(f"[INFO] Checking {len(self.allInvariants)} invariants...", flush=True)
|
|
476
|
+
for _, test in self.allInvariants.items():
|
|
477
|
+
setattr(test, self.options.driverName, staticCheckerDriver)
|
|
478
|
+
try:
|
|
479
|
+
test(result)
|
|
480
|
+
finally:
|
|
481
|
+
result.printError(test)
|
|
482
|
+
result.updateExecutionInfo(test)
|
|
483
|
+
if result.lastInvariantInfo.state in {"fail", "error"}:
|
|
484
|
+
fb.logScript(result.lastInvariantInfo)
|
|
485
|
+
|
|
486
|
+
# Trigger the result syncer to get the coverage result periodically (Set by profile_period)
|
|
487
|
+
if self.options.profile_period and self.stepsCount % self.options.profile_period == 0:
|
|
488
|
+
resultSyncer.sync_event.set()
|
|
489
|
+
|
|
490
|
+
# get the checkable properties
|
|
491
|
+
checkableProperties = self.getCheckableProperties(xml_raw, result, staticCheckerDriver)
|
|
492
|
+
|
|
493
|
+
if not checkableProperties:
|
|
494
|
+
continue
|
|
495
|
+
|
|
496
|
+
self.scriptDriver = U2Driver.getScriptDriver(mode="proxy")
|
|
497
|
+
|
|
498
|
+
# randomly select a property to execute
|
|
499
|
+
propertyName = random.choice(checkableProperties)
|
|
500
|
+
test = self.allProperties[propertyName]
|
|
501
|
+
result.addExcutedProperty(test, self.stepsCount)
|
|
502
|
+
fb.logScript(result.lastPropertyInfo)
|
|
503
|
+
# Dependency Injection. driver when doing scripts
|
|
504
|
+
setattr(test, self.options.driverName, self.scriptDriver)
|
|
505
|
+
try:
|
|
506
|
+
test(result)
|
|
507
|
+
finally:
|
|
508
|
+
result.printError(test)
|
|
509
|
+
result.updateExecutionInfo(test)
|
|
510
|
+
fb.logScript(result.lastPropertyInfo)
|
|
511
|
+
fb.executed_prop = True
|
|
512
|
+
result.flushResult()
|
|
513
|
+
|
|
514
|
+
if fb_is_running:
|
|
515
|
+
fb.stopMonkey()
|
|
516
|
+
result.flushResult()
|
|
517
|
+
resultSyncer.close()
|
|
518
|
+
|
|
519
|
+
fb.join()
|
|
520
|
+
print(f"Finish sending monkey events.", flush=True)
|
|
521
|
+
log_watcher.close()
|
|
522
|
+
|
|
523
|
+
result.logSummary()
|
|
524
|
+
|
|
525
|
+
if self.options.agent == "u2":
|
|
526
|
+
self._generate_bug_report()
|
|
527
|
+
|
|
528
|
+
self.tearDown()
|
|
529
|
+
return result
|
|
530
|
+
|
|
531
|
+
def shouldStop(self, start_time):
|
|
532
|
+
if self.options.running_mins is None:
|
|
533
|
+
return False
|
|
534
|
+
return (perf_counter() - start_time) >= self.options.running_mins * 60
|
|
535
|
+
|
|
536
|
+
@property
|
|
537
|
+
def _monkeyStepInfo(self):
|
|
538
|
+
r = self._get_block_widgets()
|
|
539
|
+
r["steps_count"] = self.stepsCount
|
|
540
|
+
return r
|
|
541
|
+
|
|
542
|
+
@property
|
|
543
|
+
def _monkey_event_count(self):
|
|
544
|
+
return f"({self.stepsCount} / {self.options.maxStep})" if self.options.maxStep != float("inf") else f"({self.stepsCount})"
|
|
545
|
+
|
|
546
|
+
def _get_block_widgets(self):
|
|
547
|
+
block_dict = self._getBlockedWidgets()
|
|
548
|
+
block_widgets: List[str] = block_dict['widgets']
|
|
549
|
+
block_trees: List[str] = block_dict['trees']
|
|
550
|
+
logger.debug(f"Blocking widgets: {block_widgets}")
|
|
551
|
+
logger.debug(f"Blocking trees: {block_trees}")
|
|
552
|
+
return {
|
|
553
|
+
"block_widgets": block_widgets,
|
|
554
|
+
"block_trees": block_trees
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
def getCheckableProperties(self, xml_raw: str, result: KeaJsonResult, staticCheckerDriver: U2StaticDevice) -> List:
|
|
558
|
+
# Get the precondition satisfied properties
|
|
559
|
+
precondSatisfiedProperties = list()
|
|
560
|
+
for propName, test in self.allProperties.items():
|
|
561
|
+
valid = True
|
|
562
|
+
property = getattr(test, test._testMethodName)
|
|
563
|
+
# check if all preconds passed
|
|
564
|
+
for precond in property.preconds:
|
|
565
|
+
# Dependency injection. Static driver checker for precond
|
|
566
|
+
setattr(test, self.options.driverName, staticCheckerDriver)
|
|
567
|
+
# excecute the precondition
|
|
568
|
+
try:
|
|
569
|
+
if not precond(test):
|
|
570
|
+
valid = False
|
|
571
|
+
break
|
|
572
|
+
except u2.UiObjectNotFoundError as e:
|
|
573
|
+
valid = False
|
|
574
|
+
break
|
|
575
|
+
except Exception as e:
|
|
576
|
+
logger.error(f"Error when checking precond: {propName}")
|
|
577
|
+
traceback.print_exc()
|
|
578
|
+
valid = False
|
|
579
|
+
break
|
|
580
|
+
# if all the precond passed. make it the candidate prop.
|
|
581
|
+
if valid:
|
|
582
|
+
result.addPropertyPrecondSatisfied(test)
|
|
583
|
+
precondSatisfiedProperties.append(propName)
|
|
584
|
+
|
|
585
|
+
# get the checkable properties
|
|
586
|
+
checkableProperties = []
|
|
587
|
+
u = random.random() # sample the execution probability threshold u ~ U(0, 1)
|
|
588
|
+
for propName in precondSatisfiedProperties:
|
|
589
|
+
test = self.allProperties[propName]
|
|
590
|
+
p = getattr(test, PROB_MARKER, 1)
|
|
591
|
+
max_tries = getattr(test, MAX_TRIES_MARKER, float("inf"))
|
|
592
|
+
# filter the properties according to the given u
|
|
593
|
+
if p < u:
|
|
594
|
+
print(f"{propName} will not execute due to probability (@prob). Skip.", flush=True)
|
|
595
|
+
continue
|
|
596
|
+
# filter the property reached max_tries
|
|
597
|
+
if result.getExcutedProperty(test) >= max_tries:
|
|
598
|
+
print(f"{propName} has reached its max_tries {max_tries} (@max_tries). Skip.", flush=True)
|
|
599
|
+
continue
|
|
600
|
+
checkableProperties.append(propName)
|
|
601
|
+
|
|
602
|
+
# log the checkable properties information
|
|
603
|
+
if len(checkableProperties) > 0:
|
|
604
|
+
print(f"[INFO] {len(checkableProperties)} Checkable properties:", flush=True)
|
|
605
|
+
print("\n".join([f' - {_}' for _ in checkableProperties]), flush=True)
|
|
606
|
+
else:
|
|
607
|
+
print(f"[INFO] {len(checkableProperties)} Checkable property.", flush=True)
|
|
608
|
+
|
|
609
|
+
return checkableProperties
|
|
610
|
+
|
|
611
|
+
def validateAndCollectProperties(self, test: TestSuite):
|
|
612
|
+
""" validate and collect all the properties to prepare for PBT
|
|
613
|
+
:Why validate here?:
|
|
614
|
+
Because some properties may not be importable due to ImportError (e.g., missing dependencies
|
|
615
|
+
or syntax errors). We need to validate them before PBT to avoid runtime errors.
|
|
616
|
+
"""
|
|
617
|
+
self.allProperties = dict()
|
|
618
|
+
self.allInvariants = dict()
|
|
619
|
+
|
|
620
|
+
def iter_tests(suite):
|
|
621
|
+
for test in suite:
|
|
622
|
+
if isinstance(test, TestSuite):
|
|
623
|
+
yield from iter_tests(test)
|
|
624
|
+
else:
|
|
625
|
+
yield test
|
|
626
|
+
# Traverse the TestCase to get all properties
|
|
627
|
+
_result = TextTestResult(self.stream, self.descriptions, self.verbosity)
|
|
628
|
+
for t in iter_tests(test):
|
|
629
|
+
# Find all the _FailedTest (Caused by ImportError) and directly run it to report errors
|
|
630
|
+
if type(t).__name__ == "_FailedTest":
|
|
631
|
+
t(_result)
|
|
632
|
+
continue
|
|
633
|
+
if hasattr(t, PRECONDITIONS_MARKER):
|
|
634
|
+
self.allProperties[getFullPropName(t)] = t
|
|
635
|
+
if hasattr(t, INVARIANT_MARKER):
|
|
636
|
+
self.allInvariants[getFullPropName(t)] = t
|
|
637
|
+
# Print errors caused by ImportError
|
|
638
|
+
_result.printErrors()
|
|
639
|
+
|
|
640
|
+
@property
|
|
641
|
+
def _blockWidgetFuncs(self):
|
|
642
|
+
"""
|
|
643
|
+
load and process blocking functions from widget.block.py configuration file.
|
|
644
|
+
|
|
645
|
+
Returns:
|
|
646
|
+
dict: A dictionary containing two lists:
|
|
647
|
+
- 'widgets': List of functions that block individual widgets
|
|
648
|
+
- 'trees': List of functions that block widget trees
|
|
649
|
+
"""
|
|
650
|
+
if self._block_funcs is None:
|
|
651
|
+
self._block_funcs = {"widgets": list(), "trees": list()}
|
|
652
|
+
root_dir = getProjectRoot()
|
|
653
|
+
if root_dir is None or not os.path.exists(
|
|
654
|
+
file_block_widgets := root_dir / "configs" / "widget.block.py"
|
|
655
|
+
):
|
|
656
|
+
print(f"[WARNING] widget.block.py not find", flush=True)
|
|
657
|
+
|
|
658
|
+
def __get_block_widgets_module():
|
|
659
|
+
import importlib.util
|
|
660
|
+
module_name = "block_widgets"
|
|
661
|
+
spec = importlib.util.spec_from_file_location(module_name, file_block_widgets)
|
|
662
|
+
mod = importlib.util.module_from_spec(spec)
|
|
663
|
+
spec.loader.exec_module(mod)
|
|
664
|
+
return mod
|
|
665
|
+
|
|
666
|
+
mod = __get_block_widgets_module()
|
|
667
|
+
|
|
668
|
+
import inspect
|
|
669
|
+
for func_name, func in inspect.getmembers(mod, inspect.isfunction):
|
|
670
|
+
if func_name == "global_block_widgets":
|
|
671
|
+
self._block_funcs["widgets"].append(func)
|
|
672
|
+
setattr(func, PRECONDITIONS_MARKER, (lambda d: True,))
|
|
673
|
+
continue
|
|
674
|
+
if func_name == "global_block_tree":
|
|
675
|
+
self._block_funcs["trees"].append(func)
|
|
676
|
+
setattr(func, PRECONDITIONS_MARKER, (lambda d: True,))
|
|
677
|
+
continue
|
|
678
|
+
if func_name.startswith("block_") and not func_name.startswith("block_tree_"):
|
|
679
|
+
if getattr(func, PRECONDITIONS_MARKER, None) is None:
|
|
680
|
+
logger.warning(f"No precondition in block widget function: {func_name}. Default globally active.")
|
|
681
|
+
setattr(func, PRECONDITIONS_MARKER, (lambda d: True,))
|
|
682
|
+
self._block_funcs["widgets"].append(func)
|
|
683
|
+
continue
|
|
684
|
+
if func_name.startswith("block_tree_"):
|
|
685
|
+
if getattr(func, PRECONDITIONS_MARKER, None) is None:
|
|
686
|
+
logger.warning(f"No precondition in block tree function: {func_name}. Default globally active.")
|
|
687
|
+
setattr(func, PRECONDITIONS_MARKER, (lambda d: True,))
|
|
688
|
+
self._block_funcs["trees"].append(func)
|
|
689
|
+
|
|
690
|
+
return self._block_funcs
|
|
691
|
+
|
|
692
|
+
def _getBlockedWidgets(self):
|
|
693
|
+
"""
|
|
694
|
+
Executes all blocking functions to get lists of widgets and trees to be blocked during testing.
|
|
695
|
+
|
|
696
|
+
Returns:
|
|
697
|
+
dict: A dictionary containing:
|
|
698
|
+
- 'widgets': List of XPath strings for individual widgets to block
|
|
699
|
+
- 'trees': List of XPath strings for widget trees to block
|
|
700
|
+
"""
|
|
701
|
+
def _get_xpath_widgets(func):
|
|
702
|
+
blocked_set = set()
|
|
703
|
+
script_driver = self.options.Driver.getScriptDriver()
|
|
704
|
+
preconds = getattr(func, PRECONDITIONS_MARKER, [])
|
|
705
|
+
|
|
706
|
+
def preconds_pass(preconds):
|
|
707
|
+
try:
|
|
708
|
+
return all(precond(script_driver) for precond in preconds)
|
|
709
|
+
except u2.UiObjectNotFoundError as e:
|
|
710
|
+
return False
|
|
711
|
+
except Exception as e:
|
|
712
|
+
logger.error(f"Error processing precond. Check if precond: {e}")
|
|
713
|
+
traceback.print_exc()
|
|
714
|
+
return False
|
|
715
|
+
|
|
716
|
+
if preconds_pass(preconds):
|
|
717
|
+
try:
|
|
718
|
+
_widgets = func(U2Driver.getStaticChecker())
|
|
719
|
+
_widgets = _widgets if isinstance(_widgets, list) else [_widgets]
|
|
720
|
+
for w in _widgets:
|
|
721
|
+
if isinstance(w, (StaticU2UiObject, StaticXpathObject)):
|
|
722
|
+
xpath = w.selector_to_xpath(w.selector)
|
|
723
|
+
if xpath != '//error':
|
|
724
|
+
blocked_set.add(xpath)
|
|
725
|
+
else:
|
|
726
|
+
logger.error(f"block widget defined in {func.__name__} Not supported.")
|
|
727
|
+
except Exception as e:
|
|
728
|
+
logger.error(f"Error processing blocked widgets in: {func}")
|
|
729
|
+
logger.error(e)
|
|
730
|
+
traceback.print_exc()
|
|
731
|
+
return blocked_set
|
|
732
|
+
|
|
733
|
+
result = {
|
|
734
|
+
"widgets": set(),
|
|
735
|
+
"trees": set()
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
for func in self._blockWidgetFuncs["widgets"]:
|
|
739
|
+
widgets = _get_xpath_widgets(func)
|
|
740
|
+
result["widgets"].update(widgets)
|
|
741
|
+
|
|
742
|
+
for func in self._blockWidgetFuncs["trees"]:
|
|
743
|
+
trees = _get_xpath_widgets(func)
|
|
744
|
+
result["trees"].update(trees)
|
|
745
|
+
|
|
746
|
+
result["widgets"] = list(result["widgets"] - result["trees"])
|
|
747
|
+
result["trees"] = list(result["trees"])
|
|
748
|
+
|
|
749
|
+
return result
|
|
750
|
+
|
|
751
|
+
@timer(r"Generating bug report cost %cost_time seconds.")
|
|
752
|
+
@catchException("Error when generating bug report")
|
|
753
|
+
def _generate_bug_report(self):
|
|
754
|
+
logger.info("Generating bug report")
|
|
755
|
+
BugReportGenerator(self.options.output_dir).generate_report()
|
|
756
|
+
|
|
757
|
+
def tearDown(self):
|
|
758
|
+
"""tearDown method. Cleanup the env.
|
|
759
|
+
"""
|
|
760
|
+
if self.options.Driver:
|
|
761
|
+
self.options.Driver.tearDown()
|
|
762
|
+
|
|
763
|
+
def __del__(self):
|
|
764
|
+
"""tearDown method. Cleanup the env.
|
|
765
|
+
"""
|
|
766
|
+
try:
|
|
767
|
+
self.tearDown()
|
|
768
|
+
except Exception:
|
|
769
|
+
# Ignore exceptions in __del__ to avoid "Exception ignored" warnings
|
|
770
|
+
pass
|
|
771
|
+
|
|
772
|
+
|
|
773
|
+
class HybridTestRunner(TextTestRunner, KeaOptionSetter):
|
|
774
|
+
|
|
775
|
+
allTestCases: Dict[str, Tuple[TestCase, bool]]
|
|
776
|
+
_common_teardown_func = None
|
|
777
|
+
resultclass = KeaTextTestResult
|
|
778
|
+
|
|
779
|
+
def __init__(self, stream = None, descriptions = True, verbosity = 1, failfast = False, buffer = False, resultclass = None, warnings = None, *, tb_locals = False):
|
|
780
|
+
super().__init__(stream, descriptions, verbosity, failfast, buffer, resultclass, warnings, tb_locals=tb_locals)
|
|
781
|
+
hybrid_mode.set(True)
|
|
782
|
+
self.hybrid_report_dirs = []
|
|
783
|
+
|
|
784
|
+
def run(self, test):
|
|
785
|
+
|
|
786
|
+
self.allTestCases = dict()
|
|
787
|
+
self.collectAllTestCases(test)
|
|
788
|
+
if len(self.allTestCases) == 0:
|
|
789
|
+
logger.warning("[Warning] No test case has been found.")
|
|
790
|
+
|
|
791
|
+
result: KeaTextTestResult = self._makeResult()
|
|
792
|
+
registerResult(result)
|
|
793
|
+
result.failfast = self.failfast
|
|
794
|
+
result.buffer = self.buffer
|
|
795
|
+
result.tb_locals = self.tb_locals
|
|
796
|
+
with warnings.catch_warnings():
|
|
797
|
+
if self.warnings:
|
|
798
|
+
# if self.warnings is set, use it to filter all the warnings
|
|
799
|
+
warnings.simplefilter(self.warnings)
|
|
800
|
+
# if the filter is 'default' or 'always', special-case the
|
|
801
|
+
# warnings from the deprecated unittest methods to show them
|
|
802
|
+
# no more than once per module, because they can be fairly
|
|
803
|
+
# noisy. The -Wd and -Wa flags can be used to bypass this
|
|
804
|
+
# only when self.warnings is None.
|
|
805
|
+
if self.warnings in ["default", "always"]:
|
|
806
|
+
warnings.filterwarnings(
|
|
807
|
+
"module",
|
|
808
|
+
category=DeprecationWarning,
|
|
809
|
+
message=r"Please use assert\w+ instead.",
|
|
810
|
+
)
|
|
811
|
+
|
|
812
|
+
hybrid_test_count = 0
|
|
813
|
+
for testCaseName, test in self.allTestCases.items():
|
|
814
|
+
test, isInterruptable = test, getattr(test, "isInterruptable", False)
|
|
815
|
+
|
|
816
|
+
# Dependency Injection. driver when doing scripts
|
|
817
|
+
self.scriptDriver = U2Driver.getScriptDriver(mode="direct")
|
|
818
|
+
setattr(test, self.options.driverName, self.scriptDriver)
|
|
819
|
+
logger.info("Executing unittest testCase %s." % testCaseName)
|
|
820
|
+
|
|
821
|
+
try:
|
|
822
|
+
test._common_setUp()
|
|
823
|
+
ret: KeaTextTestResult = test(result)
|
|
824
|
+
if ret.wasFail:
|
|
825
|
+
logger.error(f"Fail when running test.")
|
|
826
|
+
if isInterruptable and not ret.wasFail:
|
|
827
|
+
logger.info(f"Launch fastbot after interruptable script.")
|
|
828
|
+
hybrid_test_count += 1
|
|
829
|
+
hybrid_test_options = self.options.getKeaTestOptions(hybrid_test_count)
|
|
830
|
+
|
|
831
|
+
# Track the sub-report directory for later merging
|
|
832
|
+
self.hybrid_report_dirs.append(hybrid_test_options.output_dir)
|
|
833
|
+
|
|
834
|
+
argv = ["python3 -m unittest"] + hybrid_test_options.propertytest_args
|
|
835
|
+
KeaTestRunner.setOptions(hybrid_test_options)
|
|
836
|
+
unittest_main(module=None, argv=argv, testRunner=KeaTestRunner, testLoader=keaTestLoader, exit=False)
|
|
837
|
+
finally:
|
|
838
|
+
test._common_tearDown()
|
|
839
|
+
result.printErrors()
|
|
840
|
+
|
|
841
|
+
# Auto-merge all hybrid test reports after all tests complete
|
|
842
|
+
if len(self.hybrid_report_dirs) > 0:
|
|
843
|
+
self._merge_hybrid_reports()
|
|
844
|
+
|
|
845
|
+
return result
|
|
846
|
+
|
|
847
|
+
def _merge_hybrid_reports(self):
|
|
848
|
+
"""
|
|
849
|
+
Merge all hybrid test reports into a single merged report
|
|
850
|
+
"""
|
|
851
|
+
try:
|
|
852
|
+
from kea2.report.report_merger import TestReportMerger
|
|
853
|
+
|
|
854
|
+
if len(self.hybrid_report_dirs) < 2:
|
|
855
|
+
logger.info("Only one hybrid test report generated, skipping merge.")
|
|
856
|
+
return
|
|
857
|
+
|
|
858
|
+
main_output_dir = self.options.output_dir
|
|
859
|
+
|
|
860
|
+
merger = TestReportMerger()
|
|
861
|
+
merged_dir = merger.merge_reports(
|
|
862
|
+
result_paths=self.hybrid_report_dirs,
|
|
863
|
+
output_dir=main_output_dir
|
|
864
|
+
)
|
|
865
|
+
|
|
866
|
+
merge_summary = merger.get_merge_summary()
|
|
867
|
+
except Exception as e:
|
|
868
|
+
logger.error(f"Error merging hybrid test reports: {e}")
|
|
869
|
+
|
|
870
|
+
def collectAllTestCases(self, test: TestSuite):
|
|
871
|
+
"""collect all the properties to prepare for PBT
|
|
872
|
+
"""
|
|
873
|
+
|
|
874
|
+
def iter_tests(suite):
|
|
875
|
+
for test in suite:
|
|
876
|
+
if isinstance(test, TestSuite):
|
|
877
|
+
yield from iter_tests(test)
|
|
878
|
+
else:
|
|
879
|
+
yield test
|
|
880
|
+
|
|
881
|
+
funcs = loadFuncsFromFile(getProjectRoot() / "configs" / "teardown.py")
|
|
882
|
+
setUp = funcs.get("setUp", None)
|
|
883
|
+
tearDown = funcs.get("tearDown", None)
|
|
884
|
+
if setUp is None:
|
|
885
|
+
raise ValueError("setUp function not found in teardown.py.")
|
|
886
|
+
if tearDown is None:
|
|
887
|
+
raise ValueError("tearDown function not found in teardown.py.")
|
|
888
|
+
|
|
889
|
+
# Traverse the TestCase to get all properties
|
|
890
|
+
for t in iter_tests(test):
|
|
891
|
+
|
|
892
|
+
def dummy(self): ...
|
|
893
|
+
# remove the hook func in its TestCase
|
|
894
|
+
t.setUp = types.MethodType(dummy, t)
|
|
895
|
+
t.tearDown = types.MethodType(dummy, t)
|
|
896
|
+
t._common_setUp = types.MethodType(setUp, t)
|
|
897
|
+
t._common_tearDown = types.MethodType(tearDown, t)
|
|
898
|
+
|
|
899
|
+
# check if it's interruptable (reflection)
|
|
900
|
+
testMethodName = t._testMethodName
|
|
901
|
+
testMethod = getattr(t, testMethodName)
|
|
902
|
+
isInterruptable = hasattr(testMethod, INTERRUPTABLE_MARKER)
|
|
903
|
+
|
|
904
|
+
# save it into allTestCases, if interruptable, mark as true
|
|
905
|
+
setattr(t, "isInterruptable", isInterruptable)
|
|
906
|
+
self.allTestCases[testMethodName] = t
|
|
907
|
+
logger.info(f"Load TestCase: {getFullPropName(t)} , interruptable: {t.isInterruptable}")
|
|
908
|
+
|
|
909
|
+
def __del__(self):
|
|
910
|
+
"""tearDown method. Cleanup the env.
|
|
911
|
+
"""
|
|
912
|
+
try:
|
|
913
|
+
if hasattr(self, 'options') and self.options and self.options.Driver:
|
|
914
|
+
self.options.Driver.tearDown()
|
|
915
|
+
except Exception:
|
|
916
|
+
# Ignore exceptions in __del__ to avoid "Exception ignored" warnings
|
|
917
|
+
pass
|
|
918
|
+
|
|
919
|
+
|
|
920
|
+
def kea2_breakpoint():
|
|
921
|
+
"""kea2 entrance. Call this function in TestCase.
|
|
922
|
+
Kea2 will automatically switch to Kea2 Test in kea2_breakpoint in HybridTest mode.
|
|
923
|
+
The normal launch in unittest will not be affected.
|
|
924
|
+
"""
|
|
925
|
+
if hybrid_mode.get():
|
|
926
|
+
raise SkipTest("Skip the test after the breakpoint and run kea2 in hybrid mode.")
|