Kea2-python 1.0.6b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Kea2-python might be problematic. Click here for more details.
- kea2/__init__.py +3 -0
- kea2/absDriver.py +56 -0
- kea2/adbUtils.py +554 -0
- kea2/assets/config_version.json +16 -0
- kea2/assets/fastbot-thirdpart.jar +0 -0
- kea2/assets/fastbot_configs/abl.strings +2 -0
- kea2/assets/fastbot_configs/awl.strings +3 -0
- kea2/assets/fastbot_configs/max.config +7 -0
- kea2/assets/fastbot_configs/max.fuzzing.strings +699 -0
- kea2/assets/fastbot_configs/max.schema.strings +1 -0
- kea2/assets/fastbot_configs/max.strings +3 -0
- kea2/assets/fastbot_configs/max.tree.pruning +27 -0
- kea2/assets/fastbot_configs/teardown.py +18 -0
- kea2/assets/fastbot_configs/widget.block.py +38 -0
- kea2/assets/fastbot_libs/arm64-v8a/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/armeabi-v7a/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/x86/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/x86_64/libfastbot_native.so +0 -0
- kea2/assets/framework.jar +0 -0
- kea2/assets/kea2-thirdpart.jar +0 -0
- kea2/assets/monkeyq.jar +0 -0
- kea2/assets/quicktest.py +126 -0
- kea2/cli.py +320 -0
- kea2/fastbotManager.py +267 -0
- kea2/fastbotx/ActivityTimes.py +52 -0
- kea2/fastbotx/ReuseEntry.py +74 -0
- kea2/fastbotx/ReuseModel.py +63 -0
- kea2/fastbotx/__init__.py +7 -0
- kea2/fbm_parser.py +871 -0
- kea2/fs_lock.py +131 -0
- kea2/kea2_api.py +166 -0
- kea2/keaUtils.py +1112 -0
- kea2/kea_launcher.py +319 -0
- kea2/logWatcher.py +92 -0
- kea2/mixin.py +22 -0
- kea2/report/__init__.py +0 -0
- kea2/report/bug_report_generator.py +793 -0
- kea2/report/mixin.py +482 -0
- kea2/report/report_merger.py +797 -0
- kea2/report/templates/bug_report_template.html +3876 -0
- kea2/report/templates/merged_bug_report_template.html +3333 -0
- kea2/report/utils.py +10 -0
- kea2/resultSyncer.py +65 -0
- kea2/u2Driver.py +610 -0
- kea2/utils.py +184 -0
- kea2/version_manager.py +102 -0
- kea2_python-1.0.6b0.dist-info/METADATA +447 -0
- kea2_python-1.0.6b0.dist-info/RECORD +52 -0
- kea2_python-1.0.6b0.dist-info/WHEEL +5 -0
- kea2_python-1.0.6b0.dist-info/entry_points.txt +2 -0
- kea2_python-1.0.6b0.dist-info/licenses/LICENSE +16 -0
- kea2_python-1.0.6b0.dist-info/top_level.txt +1 -0
kea2/keaUtils.py
ADDED
|
@@ -0,0 +1,1112 @@
|
|
|
1
|
+
import random
|
|
2
|
+
import warnings
|
|
3
|
+
import types
|
|
4
|
+
import traceback
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
|
|
8
|
+
from collections import deque
|
|
9
|
+
from copy import deepcopy
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from time import perf_counter, sleep
|
|
12
|
+
from typing import Callable, Any, Deque, Dict, List, Literal, NewType, Tuple, Union
|
|
13
|
+
from contextvars import ContextVar
|
|
14
|
+
from unittest import TextTestRunner, registerResult, TestSuite, TestCase, TextTestResult, defaultTestLoader, SkipTest
|
|
15
|
+
from unittest import main as unittest_main
|
|
16
|
+
from dataclasses import dataclass, asdict
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
|
|
19
|
+
import uiautomator2 as u2
|
|
20
|
+
|
|
21
|
+
from .absDriver import AbstractDriver
|
|
22
|
+
from .report.bug_report_generator import BugReportGenerator
|
|
23
|
+
from .resultSyncer import ResultSyncer
|
|
24
|
+
from .logWatcher import LogWatcher
|
|
25
|
+
from .utils import TimeStamp, catchException, getProjectRoot, getLogger, loadFuncsFromFile, timer
|
|
26
|
+
from .u2Driver import StaticU2UiObject, StaticXpathObject, U2Driver
|
|
27
|
+
from .fastbotManager import FastbotManager
|
|
28
|
+
from .adbUtils import ADBDevice
|
|
29
|
+
from .mixin import BetterConsoleLogExtensionMixin
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
hybrid_mode = ContextVar("hybrid_mode", default=False)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
PRECONDITIONS_MARKER = "preconds"
|
|
36
|
+
PROB_MARKER = "prob"
|
|
37
|
+
MAX_TRIES_MARKER = "max_tries"
|
|
38
|
+
INTERRUPTABLE_MARKER = "interruptable"
|
|
39
|
+
|
|
40
|
+
logger = getLogger(__name__)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# Class Typing
|
|
44
|
+
PropName = NewType("PropName", str)
|
|
45
|
+
PropertyStore = NewType("PropertyStore", Dict[PropName, TestCase])
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
STAMP: str
|
|
49
|
+
LOGFILE: str
|
|
50
|
+
RESFILE: str
|
|
51
|
+
PROP_EXEC_RESFILE: str
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def precondition(precond: Callable[[Any], bool]) -> Callable:
|
|
55
|
+
"""the decorator @precondition
|
|
56
|
+
|
|
57
|
+
@precondition specifies when the property could be executed.
|
|
58
|
+
A property could have multiple preconditions, each of which is specified by @precondition.
|
|
59
|
+
"""
|
|
60
|
+
def accept(f):
|
|
61
|
+
preconds = getattr(f, PRECONDITIONS_MARKER, tuple())
|
|
62
|
+
setattr(f, PRECONDITIONS_MARKER, preconds + (precond,))
|
|
63
|
+
return f
|
|
64
|
+
|
|
65
|
+
return accept
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def prob(p: float):
|
|
69
|
+
"""the decorator @prob
|
|
70
|
+
|
|
71
|
+
@prob specify the propbability of execution when a property is satisfied.
|
|
72
|
+
"""
|
|
73
|
+
p = float(p)
|
|
74
|
+
if not 0 < p <= 1.0:
|
|
75
|
+
raise ValueError("The propbability should between 0 and 1")
|
|
76
|
+
|
|
77
|
+
def accept(f):
|
|
78
|
+
setattr(f, PROB_MARKER, p)
|
|
79
|
+
return f
|
|
80
|
+
|
|
81
|
+
return accept
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def max_tries(n: int):
|
|
85
|
+
"""the decorator @max_tries
|
|
86
|
+
|
|
87
|
+
@max_tries specify the maximum tries of executing a property.
|
|
88
|
+
"""
|
|
89
|
+
n = int(n)
|
|
90
|
+
if not n > 0:
|
|
91
|
+
raise ValueError("The maxium tries should be a positive integer.")
|
|
92
|
+
|
|
93
|
+
def accept(f):
|
|
94
|
+
setattr(f, MAX_TRIES_MARKER, n)
|
|
95
|
+
return f
|
|
96
|
+
|
|
97
|
+
return accept
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def interruptable(strategy='default'):
|
|
101
|
+
"""the decorator @interruptable
|
|
102
|
+
|
|
103
|
+
@interruptable specify the propbability of **fuzzing** when calling every line of code in a property.
|
|
104
|
+
"""
|
|
105
|
+
|
|
106
|
+
def decorator(func):
|
|
107
|
+
setattr(func, INTERRUPTABLE_MARKER, True)
|
|
108
|
+
setattr(func, 'strategy', strategy)
|
|
109
|
+
return func
|
|
110
|
+
return decorator
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
@dataclass
|
|
114
|
+
class Options:
|
|
115
|
+
"""
|
|
116
|
+
Kea and Fastbot configurations
|
|
117
|
+
"""
|
|
118
|
+
# the driver_name in script (if self.d, then d.)
|
|
119
|
+
driverName: str = None
|
|
120
|
+
# the driver (only U2Driver available now)
|
|
121
|
+
Driver: AbstractDriver = U2Driver
|
|
122
|
+
# list of package names. Specify the apps under test
|
|
123
|
+
packageNames: List[str] = None
|
|
124
|
+
# target device
|
|
125
|
+
serial: str = None
|
|
126
|
+
# target device with transport_id
|
|
127
|
+
transport_id: str = None
|
|
128
|
+
# test agent. "native" for stage 1 and "u2" for stage 1~3
|
|
129
|
+
agent: Literal["u2", "native"] = "u2"
|
|
130
|
+
# max step in exploration (availble in stage 2~3)
|
|
131
|
+
maxStep: Union[str, float] = float("inf")
|
|
132
|
+
# time(mins) for exploration
|
|
133
|
+
running_mins: int = 10
|
|
134
|
+
# time(ms) to wait when exploring the app
|
|
135
|
+
throttle: int = 200
|
|
136
|
+
# the output_dir for saving logs and results
|
|
137
|
+
output_dir: str = "output"
|
|
138
|
+
# the stamp for log file and result file, default: current time stamp
|
|
139
|
+
log_stamp: str = None
|
|
140
|
+
# the profiling period to get the coverage result.
|
|
141
|
+
profile_period: int = 25
|
|
142
|
+
# take screenshots for every step
|
|
143
|
+
take_screenshots: bool = False
|
|
144
|
+
# Screenshots before failure (Dump n screenshots before failure. 0 means take screenshots for every step)
|
|
145
|
+
pre_failure_screenshots: int = 0
|
|
146
|
+
# Screenshots after failure (Dump n screenshots before failure. Should be smaller than pre_failure_screenshots)
|
|
147
|
+
post_failure_screenshots: int = 0
|
|
148
|
+
# The root of output dir on device
|
|
149
|
+
device_output_root: str = "/sdcard"
|
|
150
|
+
# the debug mode
|
|
151
|
+
debug: bool = False
|
|
152
|
+
# Activity WhiteList File
|
|
153
|
+
act_whitelist_file: str = None
|
|
154
|
+
# Activity BlackList File
|
|
155
|
+
act_blacklist_file: str = None
|
|
156
|
+
# propertytest sub-commands args (eg. discover -s xxx -p xxx)
|
|
157
|
+
propertytest_args: List[str] = None
|
|
158
|
+
# period (N steps) to restart the app under test
|
|
159
|
+
restart_app_period: int = None
|
|
160
|
+
# unittest sub-commands args (Feat 4)
|
|
161
|
+
unittest_args: List[str] = None
|
|
162
|
+
# Extra args (directly passed to fastbot)
|
|
163
|
+
extra_args: List[str] = None
|
|
164
|
+
# Whether to pull device FBM and merge into PC after test finishes
|
|
165
|
+
upload_fbm: bool = False
|
|
166
|
+
# Whether to pull device FBM(s) at start, merge with PC FBM and push merged back to device
|
|
167
|
+
download_fbm: bool = False
|
|
168
|
+
|
|
169
|
+
def __setattr__(self, name, value):
|
|
170
|
+
if value is None:
|
|
171
|
+
return
|
|
172
|
+
super().__setattr__(name, value)
|
|
173
|
+
|
|
174
|
+
def __post_init__(self):
|
|
175
|
+
import logging
|
|
176
|
+
logging.basicConfig(level=logging.DEBUG if self.debug else logging.INFO)
|
|
177
|
+
|
|
178
|
+
if self.Driver:
|
|
179
|
+
self._set_driver()
|
|
180
|
+
|
|
181
|
+
global STAMP
|
|
182
|
+
STAMP = self.log_stamp if self.log_stamp else TimeStamp().getTimeStamp()
|
|
183
|
+
|
|
184
|
+
self._sanitize_stamp()
|
|
185
|
+
|
|
186
|
+
self.output_dir = Path(self.output_dir).absolute() / f"res_{STAMP}"
|
|
187
|
+
self.set_stamp()
|
|
188
|
+
|
|
189
|
+
self._sanitize_args()
|
|
190
|
+
|
|
191
|
+
_check_package_installation(self.packageNames)
|
|
192
|
+
_save_bug_report_configs(self)
|
|
193
|
+
|
|
194
|
+
def set_stamp(self, stamp: str = None):
|
|
195
|
+
global STAMP, LOGFILE, RESFILE, PROP_EXEC_RESFILE
|
|
196
|
+
if stamp:
|
|
197
|
+
STAMP = stamp
|
|
198
|
+
|
|
199
|
+
LOGFILE = f"fastbot_{STAMP}.log"
|
|
200
|
+
RESFILE = f"result_{STAMP}.json"
|
|
201
|
+
PROP_EXEC_RESFILE = f"property_exec_info_{STAMP}.json"
|
|
202
|
+
|
|
203
|
+
def _sanitize_stamp(self):
|
|
204
|
+
global STAMP
|
|
205
|
+
illegal_chars = ['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\n', '\r', '\t', '\0']
|
|
206
|
+
for char in illegal_chars:
|
|
207
|
+
if char in STAMP:
|
|
208
|
+
raise ValueError(
|
|
209
|
+
f"char: `{char}` is illegal in --log-stamp. current stamp: {STAMP}"
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
def _sanitize_args(self):
|
|
213
|
+
if not self.take_screenshots and self.pre_failure_screenshots > 0:
|
|
214
|
+
raise ValueError("--pre-failure-screenshots should be 0 when --take-screenshots is not set.")
|
|
215
|
+
|
|
216
|
+
if self.pre_failure_screenshots < self.post_failure_screenshots:
|
|
217
|
+
raise ValueError("--post-failure-screenshots should be smaller than --pre-failure-screenshots.")
|
|
218
|
+
|
|
219
|
+
self.profile_period = int(self.profile_period)
|
|
220
|
+
if self.profile_period < 1:
|
|
221
|
+
raise ValueError("--profile-period should be greater than 0")
|
|
222
|
+
|
|
223
|
+
self.throttle = int(self.throttle)
|
|
224
|
+
if self.throttle < 0:
|
|
225
|
+
raise ValueError("--throttle should be greater than or equal to 0")
|
|
226
|
+
|
|
227
|
+
if self.agent == 'u2' and self.driverName == None:
|
|
228
|
+
raise ValueError("--driver-name should be specified when customizing script in --agent u2")
|
|
229
|
+
|
|
230
|
+
def _set_driver(self):
|
|
231
|
+
target_device = dict()
|
|
232
|
+
if self.serial:
|
|
233
|
+
target_device["serial"] = self.serial
|
|
234
|
+
if self.transport_id:
|
|
235
|
+
target_device["transport_id"] = self.transport_id
|
|
236
|
+
self.Driver.setDevice(target_device)
|
|
237
|
+
ADBDevice.setDevice(self.serial, self.transport_id)
|
|
238
|
+
|
|
239
|
+
def getKeaTestOptions(self, hybrid_test_count: int) -> "Options":
|
|
240
|
+
""" Get the KeaTestOptions for hybrid test run when switching from unittest to kea2 test.
|
|
241
|
+
hybrid_test_count: the count of hybrid test runs
|
|
242
|
+
"""
|
|
243
|
+
if not self.unittest_args:
|
|
244
|
+
raise RuntimeError("unittest_args is None. Cannot get KeaTestOptions from it")
|
|
245
|
+
|
|
246
|
+
opts = deepcopy(self)
|
|
247
|
+
|
|
248
|
+
time_stamp = TimeStamp().getTimeStamp()
|
|
249
|
+
hybrid_test_stamp = f"{time_stamp}_hybrid_{hybrid_test_count}"
|
|
250
|
+
|
|
251
|
+
opts.output_dir = self.output_dir / f"res_{hybrid_test_stamp}"
|
|
252
|
+
|
|
253
|
+
opts.set_stamp(hybrid_test_stamp)
|
|
254
|
+
opts.unittest_args = []
|
|
255
|
+
return opts
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def _check_package_installation(packageNames):
|
|
259
|
+
installed_packages = set(ADBDevice().list_packages())
|
|
260
|
+
|
|
261
|
+
for package in packageNames:
|
|
262
|
+
if package not in installed_packages:
|
|
263
|
+
logger.error(f"package {package} not installed. Abort.")
|
|
264
|
+
raise ValueError(f"{package} not installed")
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def _save_bug_report_configs(options: Options):
|
|
268
|
+
output_dir = options.output_dir
|
|
269
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
270
|
+
configs = {
|
|
271
|
+
"driverName": options.driverName,
|
|
272
|
+
"packageNames": options.packageNames,
|
|
273
|
+
"take_screenshots": options.take_screenshots,
|
|
274
|
+
"pre_failure_screenshots": options.pre_failure_screenshots,
|
|
275
|
+
"post_failure_screenshots": options.post_failure_screenshots,
|
|
276
|
+
"device_output_root": options.device_output_root,
|
|
277
|
+
"log_stamp": options.log_stamp if options.log_stamp else STAMP,
|
|
278
|
+
"test_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
279
|
+
}
|
|
280
|
+
with open(output_dir / "bug_report_config.json", "w", encoding="utf-8") as fp:
|
|
281
|
+
json.dump(configs, fp, indent=4)
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
@dataclass
|
|
285
|
+
class PropStatistic:
|
|
286
|
+
precond_satisfied: int = 0
|
|
287
|
+
executed: int = 0
|
|
288
|
+
fail: int = 0
|
|
289
|
+
error: int = 0
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
PBTTestResult = NewType("PBTTestResult", Dict[PropName, PropStatistic])
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
PropertyExecutionInfoStore = NewType("PropertyExecutionInfoStore", Deque["PropertyExecutionInfo"])
|
|
296
|
+
@dataclass
|
|
297
|
+
class PropertyExecutionInfo:
|
|
298
|
+
startStepsCount: int
|
|
299
|
+
propName: PropName
|
|
300
|
+
state: Literal["start", "pass", "fail", "error"]
|
|
301
|
+
tb: str
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def getFullPropName(testCase: TestCase):
|
|
305
|
+
return ".".join([
|
|
306
|
+
testCase.__module__,
|
|
307
|
+
testCase.__class__.__name__,
|
|
308
|
+
testCase._testMethodName
|
|
309
|
+
])
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
class JsonResult(BetterConsoleLogExtensionMixin, TextTestResult):
|
|
313
|
+
|
|
314
|
+
res: PBTTestResult
|
|
315
|
+
lastExecutedInfo: PropertyExecutionInfo
|
|
316
|
+
executionInfoStore: PropertyExecutionInfoStore = deque()
|
|
317
|
+
|
|
318
|
+
def __init__(self, stream, descriptions, verbosity):
|
|
319
|
+
super().__init__(stream, descriptions, verbosity)
|
|
320
|
+
self.showAll = True
|
|
321
|
+
|
|
322
|
+
@classmethod
|
|
323
|
+
def setProperties(cls, allProperties: Dict):
|
|
324
|
+
cls.res = dict()
|
|
325
|
+
for testCase in allProperties.values():
|
|
326
|
+
cls.res[getFullPropName(testCase)] = PropStatistic()
|
|
327
|
+
|
|
328
|
+
def flushResult(self):
|
|
329
|
+
global RESFILE, PROP_EXEC_RESFILE
|
|
330
|
+
json_res = dict()
|
|
331
|
+
for propName, propStatitic in self.res.items():
|
|
332
|
+
json_res[propName] = asdict(propStatitic)
|
|
333
|
+
with open(RESFILE, "w", encoding="utf-8") as fp:
|
|
334
|
+
json.dump(json_res, fp, indent=4)
|
|
335
|
+
|
|
336
|
+
while self.executionInfoStore:
|
|
337
|
+
execInfo = self.executionInfoStore.popleft()
|
|
338
|
+
with open(PROP_EXEC_RESFILE, "a", encoding="utf-8") as fp:
|
|
339
|
+
fp.write(f"{json.dumps(asdict(execInfo))}\n")
|
|
340
|
+
|
|
341
|
+
def addExcuted(self, test: TestCase, stepsCount: int):
|
|
342
|
+
self.res[getFullPropName(test)].executed += 1
|
|
343
|
+
|
|
344
|
+
self.lastExecutedInfo = PropertyExecutionInfo(
|
|
345
|
+
propName=getFullPropName(test),
|
|
346
|
+
state="start",
|
|
347
|
+
tb="",
|
|
348
|
+
startStepsCount=stepsCount
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
def addPrecondSatisfied(self, test: TestCase):
|
|
352
|
+
self.res[getFullPropName(test)].precond_satisfied += 1
|
|
353
|
+
|
|
354
|
+
def addFailure(self, test, err):
|
|
355
|
+
super().addFailure(test, err)
|
|
356
|
+
self.res[getFullPropName(test)].fail += 1
|
|
357
|
+
self.lastExecutedInfo.state = "fail"
|
|
358
|
+
self.lastExecutedInfo.tb = self._exc_info_to_string(err, test)
|
|
359
|
+
|
|
360
|
+
def addError(self, test, err):
|
|
361
|
+
super().addError(test, err)
|
|
362
|
+
self.res[getFullPropName(test)].error += 1
|
|
363
|
+
self.lastExecutedInfo.state = "error"
|
|
364
|
+
self.lastExecutedInfo.tb = self._exc_info_to_string(err, test)
|
|
365
|
+
|
|
366
|
+
def updateExectedInfo(self):
|
|
367
|
+
if self.lastExecutedInfo.state == "start":
|
|
368
|
+
self.lastExecutedInfo.state = "pass"
|
|
369
|
+
|
|
370
|
+
self.executionInfoStore.append(self.lastExecutedInfo)
|
|
371
|
+
|
|
372
|
+
def getExcuted(self, test: TestCase):
|
|
373
|
+
return self.res[getFullPropName(test)].executed
|
|
374
|
+
|
|
375
|
+
def printError(self, test):
|
|
376
|
+
if self.lastExecutedInfo.state in ["fail", "error"]:
|
|
377
|
+
flavour = self.lastExecutedInfo.state.upper()
|
|
378
|
+
self.stream.writeln("")
|
|
379
|
+
self.stream.writeln(self.separator1)
|
|
380
|
+
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
|
|
381
|
+
self.stream.writeln(self.separator2)
|
|
382
|
+
self.stream.writeln("%s" % self.lastExecutedInfo.tb)
|
|
383
|
+
self.stream.writeln(self.separator1)
|
|
384
|
+
self.stream.flush()
|
|
385
|
+
|
|
386
|
+
def logSummary(self):
|
|
387
|
+
fails = sum(_.fail for _ in self.res.values())
|
|
388
|
+
errors = sum(_.error for _ in self.res.values())
|
|
389
|
+
|
|
390
|
+
logger.info(f"[Property Exectution Summary] Errors:{errors}, Fails:{fails}")
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
class KeaOptionSetter:
|
|
394
|
+
options: Options = None
|
|
395
|
+
|
|
396
|
+
@classmethod
|
|
397
|
+
def setOptions(cls, options: Options):
|
|
398
|
+
if not isinstance(options.packageNames, list) and len(options.packageNames) > 0:
|
|
399
|
+
raise ValueError("packageNames should be given in a list.")
|
|
400
|
+
if options.Driver is not None and options.agent == "native":
|
|
401
|
+
logger.warning("[Warning] Can not use any Driver when runing native mode.")
|
|
402
|
+
options.Driver = None
|
|
403
|
+
cls.options = options
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
class KeaTestRunner(TextTestRunner, KeaOptionSetter):
|
|
407
|
+
|
|
408
|
+
resultclass: JsonResult
|
|
409
|
+
allProperties: PropertyStore
|
|
410
|
+
_block_funcs: Dict[Literal["widgets", "trees"], List[Callable]] = None
|
|
411
|
+
|
|
412
|
+
def _setOuputDir(self):
|
|
413
|
+
output_dir = self.options.output_dir
|
|
414
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
415
|
+
global LOGFILE, RESFILE, PROP_EXEC_RESFILE
|
|
416
|
+
LOGFILE = output_dir / Path(LOGFILE)
|
|
417
|
+
RESFILE = output_dir / Path(RESFILE)
|
|
418
|
+
PROP_EXEC_RESFILE = output_dir / Path(PROP_EXEC_RESFILE)
|
|
419
|
+
logger.info(f"Log file: {LOGFILE}")
|
|
420
|
+
logger.info(f"Result file: {RESFILE}")
|
|
421
|
+
logger.info(f"Property execution info file: {PROP_EXEC_RESFILE}")
|
|
422
|
+
|
|
423
|
+
def run(self, test):
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
# take device-side snapshots once at the beginning of the run
|
|
427
|
+
try:
|
|
428
|
+
self._copy_fbm()
|
|
429
|
+
except Exception as e:
|
|
430
|
+
logger.debug(f"Initial device snapshot failed: {e}")
|
|
431
|
+
|
|
432
|
+
self.allProperties = dict()
|
|
433
|
+
self.collectAllProperties(test)
|
|
434
|
+
|
|
435
|
+
if len(self.allProperties) == 0:
|
|
436
|
+
logger.warning("[Warning] No property has been found.")
|
|
437
|
+
|
|
438
|
+
self._setOuputDir()
|
|
439
|
+
|
|
440
|
+
JsonResult.setProperties(self.allProperties)
|
|
441
|
+
self.resultclass = JsonResult
|
|
442
|
+
|
|
443
|
+
result: JsonResult = self._makeResult()
|
|
444
|
+
registerResult(result)
|
|
445
|
+
result.failfast = self.failfast
|
|
446
|
+
result.buffer = self.buffer
|
|
447
|
+
result.tb_locals = self.tb_locals
|
|
448
|
+
|
|
449
|
+
with warnings.catch_warnings():
|
|
450
|
+
if self.warnings:
|
|
451
|
+
# if self.warnings is set, use it to filter all the warnings
|
|
452
|
+
warnings.simplefilter(self.warnings)
|
|
453
|
+
# if the filter is 'default' or 'always', special-case the
|
|
454
|
+
# warnings from the deprecated unittest methods to show them
|
|
455
|
+
# no more than once per module, because they can be fairly
|
|
456
|
+
# noisy. The -Wd and -Wa flags can be used to bypass this
|
|
457
|
+
# only when self.warnings is None.
|
|
458
|
+
if self.warnings in ["default", "always"]:
|
|
459
|
+
warnings.filterwarnings(
|
|
460
|
+
"module",
|
|
461
|
+
category=DeprecationWarning,
|
|
462
|
+
message=r"Please use assert\w+ instead.",
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
fb = FastbotManager(self.options, LOGFILE)
|
|
466
|
+
fb.start()
|
|
467
|
+
|
|
468
|
+
log_watcher = LogWatcher(LOGFILE)
|
|
469
|
+
|
|
470
|
+
if self.options.agent == "u2":
|
|
471
|
+
# initialize the result.json file
|
|
472
|
+
result.flushResult()
|
|
473
|
+
# setUp for the u2 driver
|
|
474
|
+
self.scriptDriver = U2Driver.getScriptDriver(mode="proxy")
|
|
475
|
+
fb.check_alive()
|
|
476
|
+
|
|
477
|
+
fb.init(options=self.options, stamp=STAMP)
|
|
478
|
+
|
|
479
|
+
resultSyncer = ResultSyncer(fb.device_output_dir, self.options)
|
|
480
|
+
resultSyncer.run()
|
|
481
|
+
start_time = perf_counter()
|
|
482
|
+
fb_is_running = True
|
|
483
|
+
self.stepsCount = 0
|
|
484
|
+
|
|
485
|
+
while self.stepsCount < self.options.maxStep:
|
|
486
|
+
if self.shouldStop(start_time):
|
|
487
|
+
logger.info("Exploration time up (--running-minutes).")
|
|
488
|
+
break
|
|
489
|
+
|
|
490
|
+
if self.options.restart_app_period and self.stepsCount and self.stepsCount % self.options.restart_app_period == 0:
|
|
491
|
+
self.stepsCount += 1
|
|
492
|
+
logger.info(f"Sending monkeyEvent {self._monkey_event_count}")
|
|
493
|
+
logger.info("Kill all test apps to restart the app under test.")
|
|
494
|
+
for app in self.options.packageNames:
|
|
495
|
+
logger.info(f"Stopping app: {app}")
|
|
496
|
+
self.scriptDriver.app_stop(app)
|
|
497
|
+
sleep(3)
|
|
498
|
+
fb.sendInfo("kill_apps")
|
|
499
|
+
continue
|
|
500
|
+
|
|
501
|
+
try:
|
|
502
|
+
if fb.executed_prop:
|
|
503
|
+
fb.executed_prop = False
|
|
504
|
+
xml_raw = fb.dumpHierarchy()
|
|
505
|
+
else:
|
|
506
|
+
self.stepsCount += 1
|
|
507
|
+
logger.info(f"Sending monkeyEvent {self._monkey_event_count}")
|
|
508
|
+
xml_raw = fb.stepMonkey(self._monkeyStepInfo)
|
|
509
|
+
propsSatisfiedPrecond = self.getValidProperties(xml_raw, result)
|
|
510
|
+
except u2.HTTPError:
|
|
511
|
+
logger.info("Connection refused by remote.")
|
|
512
|
+
if fb.get_return_code() == 0:
|
|
513
|
+
logger.info("Exploration times up (--running-minutes).")
|
|
514
|
+
fb_is_running = False
|
|
515
|
+
break
|
|
516
|
+
raise RuntimeError("Fastbot Aborted.")
|
|
517
|
+
|
|
518
|
+
if self.options.profile_period and self.stepsCount % self.options.profile_period == 0:
|
|
519
|
+
resultSyncer.sync_event.set()
|
|
520
|
+
|
|
521
|
+
# Go to the next round if no precond satisfied
|
|
522
|
+
if len(propsSatisfiedPrecond) == 0:
|
|
523
|
+
continue
|
|
524
|
+
|
|
525
|
+
# get the random probability p
|
|
526
|
+
p = random.random()
|
|
527
|
+
propsNameFilteredByP = []
|
|
528
|
+
# filter the properties according to the given p
|
|
529
|
+
for propName, test in propsSatisfiedPrecond.items():
|
|
530
|
+
result.addPrecondSatisfied(test)
|
|
531
|
+
if getattr(test, PROB_MARKER, 1) >= p:
|
|
532
|
+
propsNameFilteredByP.append(propName)
|
|
533
|
+
|
|
534
|
+
if len(propsNameFilteredByP) == 0:
|
|
535
|
+
print("Not executed any property due to probability.", flush=True)
|
|
536
|
+
continue
|
|
537
|
+
|
|
538
|
+
execPropName = random.choice(propsNameFilteredByP)
|
|
539
|
+
test = propsSatisfiedPrecond[execPropName]
|
|
540
|
+
# Dependency Injection. driver when doing scripts
|
|
541
|
+
self.scriptDriver = U2Driver.getScriptDriver(mode="proxy")
|
|
542
|
+
|
|
543
|
+
setattr(test, self.options.driverName, self.scriptDriver)
|
|
544
|
+
|
|
545
|
+
result.addExcuted(test, self.stepsCount)
|
|
546
|
+
fb.logScript(result.lastExecutedInfo)
|
|
547
|
+
try:
|
|
548
|
+
test(result)
|
|
549
|
+
finally:
|
|
550
|
+
result.printError(test)
|
|
551
|
+
|
|
552
|
+
result.updateExectedInfo()
|
|
553
|
+
fb.logScript(result.lastExecutedInfo)
|
|
554
|
+
fb.executed_prop = True
|
|
555
|
+
result.flushResult()
|
|
556
|
+
|
|
557
|
+
if fb_is_running:
|
|
558
|
+
fb.stopMonkey()
|
|
559
|
+
result.flushResult()
|
|
560
|
+
resultSyncer.close()
|
|
561
|
+
|
|
562
|
+
fb.join()
|
|
563
|
+
print(f"Finish sending monkey events.", flush=True)
|
|
564
|
+
log_watcher.close()
|
|
565
|
+
|
|
566
|
+
# After run: compute per-device deltas and merge into PC core
|
|
567
|
+
try:
|
|
568
|
+
self._finalize_and_merge_deltas()
|
|
569
|
+
except Exception as e:
|
|
570
|
+
logger.debug(f"Finalize delta merge failed: {e}")
|
|
571
|
+
|
|
572
|
+
result.logSummary()
|
|
573
|
+
|
|
574
|
+
if self.options.agent == "u2":
|
|
575
|
+
self._generate_bug_report()
|
|
576
|
+
|
|
577
|
+
# self._upload_fbm()
|
|
578
|
+
|
|
579
|
+
self.tearDown()
|
|
580
|
+
return result
|
|
581
|
+
|
|
582
|
+
def shouldStop(self, start_time):
|
|
583
|
+
if self.options.running_mins is None:
|
|
584
|
+
return False
|
|
585
|
+
return (perf_counter() - start_time) >= self.options.running_mins * 60
|
|
586
|
+
|
|
587
|
+
@property
|
|
588
|
+
def _monkeyStepInfo(self):
|
|
589
|
+
r = self._get_block_widgets()
|
|
590
|
+
r["steps_count"] = self.stepsCount
|
|
591
|
+
return r
|
|
592
|
+
|
|
593
|
+
@property
|
|
594
|
+
def _monkey_event_count(self):
|
|
595
|
+
return f"({self.stepsCount} / {self.options.maxStep})" if self.options.maxStep != float("inf") else f"({self.stepsCount})"
|
|
596
|
+
|
|
597
|
+
def _get_block_widgets(self):
|
|
598
|
+
block_dict = self._getBlockedWidgets()
|
|
599
|
+
block_widgets: List[str] = block_dict['widgets']
|
|
600
|
+
block_trees: List[str] = block_dict['trees']
|
|
601
|
+
logger.debug(f"Blocking widgets: {block_widgets}")
|
|
602
|
+
logger.debug(f"Blocking trees: {block_trees}")
|
|
603
|
+
return {
|
|
604
|
+
"block_widgets": block_widgets,
|
|
605
|
+
"block_trees": block_trees
|
|
606
|
+
}
|
|
607
|
+
|
|
608
|
+
def getValidProperties(self, xml_raw: str, result: JsonResult) -> PropertyStore:
|
|
609
|
+
|
|
610
|
+
staticCheckerDriver = U2Driver.getStaticChecker(hierarchy=xml_raw)
|
|
611
|
+
|
|
612
|
+
validProps: PropertyStore = dict()
|
|
613
|
+
for propName, test in self.allProperties.items():
|
|
614
|
+
valid = True
|
|
615
|
+
prop = getattr(test, propName)
|
|
616
|
+
p = getattr(prop, PROB_MARKER, 1)
|
|
617
|
+
setattr(test, PROB_MARKER, p)
|
|
618
|
+
# check if all preconds passed
|
|
619
|
+
for precond in prop.preconds:
|
|
620
|
+
# Dependency injection. Static driver checker for precond
|
|
621
|
+
setattr(test, self.options.driverName, staticCheckerDriver)
|
|
622
|
+
# excecute the precond
|
|
623
|
+
try:
|
|
624
|
+
if not precond(test):
|
|
625
|
+
valid = False
|
|
626
|
+
break
|
|
627
|
+
except u2.UiObjectNotFoundError as e:
|
|
628
|
+
valid = False
|
|
629
|
+
break
|
|
630
|
+
except Exception as e:
|
|
631
|
+
logger.error(f"Error when checking precond: {getFullPropName(test)}")
|
|
632
|
+
traceback.print_exc()
|
|
633
|
+
valid = False
|
|
634
|
+
break
|
|
635
|
+
# if all the precond passed. make it the candidate prop.
|
|
636
|
+
if valid:
|
|
637
|
+
if result.getExcuted(test) >= getattr(prop, MAX_TRIES_MARKER, float("inf")):
|
|
638
|
+
print(f"{getFullPropName(test)} has reached its max_tries. Skip.", flush=True)
|
|
639
|
+
continue
|
|
640
|
+
validProps[propName] = test
|
|
641
|
+
|
|
642
|
+
staticCheckerDriver.clear_cache()
|
|
643
|
+
|
|
644
|
+
print(f"{len(validProps)} precond satisfied.", flush=True)
|
|
645
|
+
if len(validProps) > 0:
|
|
646
|
+
print("[INFO] Valid properties:",flush=True)
|
|
647
|
+
print("\n".join([f' - {getFullPropName(p)}' for p in validProps.values()]), flush=True)
|
|
648
|
+
return validProps
|
|
649
|
+
|
|
650
|
+
def collectAllProperties(self, test: TestSuite):
|
|
651
|
+
"""collect all the properties to prepare for PBT
|
|
652
|
+
"""
|
|
653
|
+
|
|
654
|
+
def remove_setUp(testCase: TestCase):
|
|
655
|
+
"""remove the setup function in PBT
|
|
656
|
+
"""
|
|
657
|
+
def setUp(self): ...
|
|
658
|
+
testCase.setUp = types.MethodType(setUp, testCase)
|
|
659
|
+
|
|
660
|
+
def remove_tearDown(testCase: TestCase):
|
|
661
|
+
"""remove the tearDown function in PBT
|
|
662
|
+
"""
|
|
663
|
+
def tearDown(self): ...
|
|
664
|
+
testCase.tearDown = types.MethodType(tearDown, testCase)
|
|
665
|
+
|
|
666
|
+
def iter_tests(suite):
|
|
667
|
+
for test in suite:
|
|
668
|
+
if isinstance(test, TestSuite):
|
|
669
|
+
yield from iter_tests(test)
|
|
670
|
+
else:
|
|
671
|
+
yield test
|
|
672
|
+
|
|
673
|
+
# Traverse the TestCase to get all properties
|
|
674
|
+
_result = TextTestResult(self.stream, self.descriptions, self.verbosity)
|
|
675
|
+
for t in iter_tests(test):
|
|
676
|
+
# Find all the _FailedTest (Caused by ImportError) and directly run it to report errors
|
|
677
|
+
if type(t).__name__ == "_FailedTest":
|
|
678
|
+
t(_result)
|
|
679
|
+
continue
|
|
680
|
+
testMethodName = t._testMethodName
|
|
681
|
+
# get the test method name and check if it's a property
|
|
682
|
+
testMethod = getattr(t, testMethodName)
|
|
683
|
+
if hasattr(testMethod, PRECONDITIONS_MARKER):
|
|
684
|
+
# remove the hook func in its TestCase
|
|
685
|
+
remove_setUp(t)
|
|
686
|
+
remove_tearDown(t)
|
|
687
|
+
# save it into allProperties for PBT
|
|
688
|
+
self.allProperties[testMethodName] = t
|
|
689
|
+
print(f"[INFO] Load property: {getFullPropName(t)}", flush=True)
|
|
690
|
+
# Print errors caused by ImportError
|
|
691
|
+
_result.printErrors()
|
|
692
|
+
|
|
693
|
+
@property
|
|
694
|
+
def _blockWidgetFuncs(self):
|
|
695
|
+
"""
|
|
696
|
+
load and process blocking functions from widget.block.py configuration file.
|
|
697
|
+
|
|
698
|
+
Returns:
|
|
699
|
+
dict: A dictionary containing two lists:
|
|
700
|
+
- 'widgets': List of functions that block individual widgets
|
|
701
|
+
- 'trees': List of functions that block widget trees
|
|
702
|
+
"""
|
|
703
|
+
if self._block_funcs is None:
|
|
704
|
+
self._block_funcs = {"widgets": list(), "trees": list()}
|
|
705
|
+
root_dir = getProjectRoot()
|
|
706
|
+
if root_dir is None or not os.path.exists(
|
|
707
|
+
file_block_widgets := root_dir / "configs" / "widget.block.py"
|
|
708
|
+
):
|
|
709
|
+
print(f"[WARNING] widget.block.py not find", flush=True)
|
|
710
|
+
|
|
711
|
+
def __get_block_widgets_module():
|
|
712
|
+
import importlib.util
|
|
713
|
+
module_name = "block_widgets"
|
|
714
|
+
spec = importlib.util.spec_from_file_location(module_name, file_block_widgets)
|
|
715
|
+
mod = importlib.util.module_from_spec(spec)
|
|
716
|
+
spec.loader.exec_module(mod)
|
|
717
|
+
return mod
|
|
718
|
+
|
|
719
|
+
mod = __get_block_widgets_module()
|
|
720
|
+
|
|
721
|
+
import inspect
|
|
722
|
+
for func_name, func in inspect.getmembers(mod, inspect.isfunction):
|
|
723
|
+
if func_name == "global_block_widgets":
|
|
724
|
+
self._block_funcs["widgets"].append(func)
|
|
725
|
+
setattr(func, PRECONDITIONS_MARKER, (lambda d: True,))
|
|
726
|
+
continue
|
|
727
|
+
if func_name == "global_block_tree":
|
|
728
|
+
self._block_funcs["trees"].append(func)
|
|
729
|
+
setattr(func, PRECONDITIONS_MARKER, (lambda d: True,))
|
|
730
|
+
continue
|
|
731
|
+
if func_name.startswith("block_") and not func_name.startswith("block_tree_"):
|
|
732
|
+
if getattr(func, PRECONDITIONS_MARKER, None) is None:
|
|
733
|
+
logger.warning(f"No precondition in block widget function: {func_name}. Default globally active.")
|
|
734
|
+
setattr(func, PRECONDITIONS_MARKER, (lambda d: True,))
|
|
735
|
+
self._block_funcs["widgets"].append(func)
|
|
736
|
+
continue
|
|
737
|
+
if func_name.startswith("block_tree_"):
|
|
738
|
+
if getattr(func, PRECONDITIONS_MARKER, None) is None:
|
|
739
|
+
logger.warning(f"No precondition in block tree function: {func_name}. Default globally active.")
|
|
740
|
+
setattr(func, PRECONDITIONS_MARKER, (lambda d: True,))
|
|
741
|
+
self._block_funcs["trees"].append(func)
|
|
742
|
+
|
|
743
|
+
return self._block_funcs
|
|
744
|
+
|
|
745
|
+
|
|
746
|
+
def _getBlockedWidgets(self):
|
|
747
|
+
"""
|
|
748
|
+
Executes all blocking functions to get lists of widgets and trees to be blocked during testing.
|
|
749
|
+
|
|
750
|
+
Returns:
|
|
751
|
+
dict: A dictionary containing:
|
|
752
|
+
- 'widgets': List of XPath strings for individual widgets to block
|
|
753
|
+
- 'trees': List of XPath strings for widget trees to block
|
|
754
|
+
"""
|
|
755
|
+
def _get_xpath_widgets(func):
|
|
756
|
+
blocked_set = set()
|
|
757
|
+
script_driver = self.options.Driver.getScriptDriver()
|
|
758
|
+
preconds = getattr(func, PRECONDITIONS_MARKER, [])
|
|
759
|
+
|
|
760
|
+
def preconds_pass(preconds):
|
|
761
|
+
try:
|
|
762
|
+
return all(precond(script_driver) for precond in preconds)
|
|
763
|
+
except u2.UiObjectNotFoundError as e:
|
|
764
|
+
return False
|
|
765
|
+
except Exception as e:
|
|
766
|
+
logger.error(f"Error processing precond. Check if precond: {e}")
|
|
767
|
+
traceback.print_exc()
|
|
768
|
+
return False
|
|
769
|
+
|
|
770
|
+
if preconds_pass(preconds):
|
|
771
|
+
try:
|
|
772
|
+
_widgets = func(U2Driver.getStaticChecker())
|
|
773
|
+
_widgets = _widgets if isinstance(_widgets, list) else [_widgets]
|
|
774
|
+
for w in _widgets:
|
|
775
|
+
if isinstance(w, (StaticU2UiObject, StaticXpathObject)):
|
|
776
|
+
xpath = w.selector_to_xpath(w.selector)
|
|
777
|
+
if xpath != '//error':
|
|
778
|
+
blocked_set.add(xpath)
|
|
779
|
+
else:
|
|
780
|
+
logger.error(f"block widget defined in {func.__name__} Not supported.")
|
|
781
|
+
except Exception as e:
|
|
782
|
+
logger.error(f"Error processing blocked widgets in: {func}")
|
|
783
|
+
logger.error(e)
|
|
784
|
+
traceback.print_exc()
|
|
785
|
+
return blocked_set
|
|
786
|
+
|
|
787
|
+
result = {
|
|
788
|
+
"widgets": set(),
|
|
789
|
+
"trees": set()
|
|
790
|
+
}
|
|
791
|
+
|
|
792
|
+
for func in self._blockWidgetFuncs["widgets"]:
|
|
793
|
+
widgets = _get_xpath_widgets(func)
|
|
794
|
+
result["widgets"].update(widgets)
|
|
795
|
+
|
|
796
|
+
for func in self._blockWidgetFuncs["trees"]:
|
|
797
|
+
trees = _get_xpath_widgets(func)
|
|
798
|
+
result["trees"].update(trees)
|
|
799
|
+
|
|
800
|
+
result["widgets"] = list(result["widgets"] - result["trees"])
|
|
801
|
+
result["trees"] = list(result["trees"])
|
|
802
|
+
|
|
803
|
+
return result
|
|
804
|
+
|
|
805
|
+
@timer(r"Generating bug report cost %cost_time seconds.")
|
|
806
|
+
@catchException("Error when generating bug report")
|
|
807
|
+
def _generate_bug_report(self):
|
|
808
|
+
logger.info("Generating bug report")
|
|
809
|
+
BugReportGenerator(self.options.output_dir).generate_report()
|
|
810
|
+
|
|
811
|
+
def tearDown(self):
|
|
812
|
+
"""tearDown method. Cleanup the env.
|
|
813
|
+
"""
|
|
814
|
+
if self.options.Driver:
|
|
815
|
+
self.options.Driver.tearDown()
|
|
816
|
+
|
|
817
|
+
def __del__(self):
|
|
818
|
+
"""tearDown method. Cleanup the env.
|
|
819
|
+
"""
|
|
820
|
+
try:
|
|
821
|
+
self.tearDown()
|
|
822
|
+
except Exception:
|
|
823
|
+
# Ignore exceptions in __del__ to avoid "Exception ignored" warnings
|
|
824
|
+
pass
|
|
825
|
+
|
|
826
|
+
|
|
827
|
+
def _finalize_and_merge_deltas(self):
|
|
828
|
+
"""Pull device fbms, compute deltas (snapshot->current) and merge deltas into PC core fbm.
|
|
829
|
+
|
|
830
|
+
This function iterates over configured packages and uses pull_and_merge_to_pc which
|
|
831
|
+
already implements snapshot-aware delta merging when a snapshot is present on device.
|
|
832
|
+
"""
|
|
833
|
+
try:
|
|
834
|
+
from kea2.fbm_parser import FBMMerger
|
|
835
|
+
except Exception as e:
|
|
836
|
+
logger.debug(f"FBM merger unavailable for finalize: {e}")
|
|
837
|
+
return
|
|
838
|
+
|
|
839
|
+
merger = FBMMerger()
|
|
840
|
+
pkgs = getattr(self.options, 'packageNames', []) or []
|
|
841
|
+
for pkg in pkgs:
|
|
842
|
+
try:
|
|
843
|
+
logger.info(f"Finalizing FBM delta for package: {pkg}")
|
|
844
|
+
ok = merger.pull_and_merge_to_pc(pkg, device=self.options.serial,
|
|
845
|
+
transport_id=self.options.transport_id)
|
|
846
|
+
if ok:
|
|
847
|
+
logger.info(f"Delta merge completed for package: {pkg}")
|
|
848
|
+
else:
|
|
849
|
+
logger.debug(f"Delta merge reported failure for package: {pkg}")
|
|
850
|
+
except Exception as e:
|
|
851
|
+
logger.debug(f"Error finalizing delta for {pkg}: {e}")
|
|
852
|
+
|
|
853
|
+
|
|
854
|
+
def _copy_fbm(self):
|
|
855
|
+
"""If options.download_fbm is True, create an on-device snapshot for each package by copying
|
|
856
|
+
`/sdcard/fastbot_{pkg}.fbm` -> `/sdcard/fastbot_{pkg}.snapshot.fbm` using `adb shell cp`.
|
|
857
|
+
|
|
858
|
+
Behavior:
|
|
859
|
+
- Only runs if options.download_fbm is True.
|
|
860
|
+
- Tries `adb shell cp` up to `max_retries` times with backoff. Does NOT perform pull/push.
|
|
861
|
+
- Logs per-package success/failure and does not raise to avoid blocking startup.
|
|
862
|
+
"""
|
|
863
|
+
# if not getattr(self.options, 'download_fbm', False):
|
|
864
|
+
# return
|
|
865
|
+
|
|
866
|
+
try:
|
|
867
|
+
from kea2.adbUtils import adb_shell
|
|
868
|
+
except Exception:
|
|
869
|
+
try:
|
|
870
|
+
from adbUtils import adb_shell # type: ignore
|
|
871
|
+
except Exception as e:
|
|
872
|
+
print(f"ADB utilities not available for creating device snapshot: {e}", flush=True)
|
|
873
|
+
return
|
|
874
|
+
|
|
875
|
+
import time
|
|
876
|
+
import random
|
|
877
|
+
|
|
878
|
+
pkgs = getattr(self.options, 'packageNames', []) or []
|
|
879
|
+
for pkg in pkgs:
|
|
880
|
+
src = f"/sdcard/fastbot_{pkg}.fbm"
|
|
881
|
+
dst = f"/sdcard/fastbot_{pkg}.snapshot.fbm"
|
|
882
|
+
|
|
883
|
+
# First check if the source FBM exists on device. If not, skip this package.
|
|
884
|
+
try:
|
|
885
|
+
# use a single-string shell command so adb runs: adb -s <dev> shell "test -f <src> && echo OK || echo NO"
|
|
886
|
+
check_src = adb_shell([f'test -f "{src}" && echo OK || echo NO'], device=self.options.serial, transport_id=self.options.transport_id)
|
|
887
|
+
if not (isinstance(check_src, str) and "OK" in check_src):
|
|
888
|
+
print(f"Source FBM not found on device for package {pkg}: {src}. Skipping snapshot creation.", flush=True)
|
|
889
|
+
continue
|
|
890
|
+
except Exception as e:
|
|
891
|
+
print(f"Failed to verify source FBM existence for {pkg}: {e}. Skipping.", flush=True)
|
|
892
|
+
continue
|
|
893
|
+
|
|
894
|
+
max_retries = 3
|
|
895
|
+
success = False
|
|
896
|
+
for attempt in range(1, max_retries + 1):
|
|
897
|
+
try:
|
|
898
|
+
print(f"Attempt {attempt}: creating device snapshot: cp {src} {dst}", flush=True)
|
|
899
|
+
adb_shell(["cp", src, dst], device=self.options.serial, transport_id=self.options.transport_id)
|
|
900
|
+
|
|
901
|
+
# verify snapshot exists on device using a single-command form
|
|
902
|
+
try:
|
|
903
|
+
# verify snapshot exists on device using a single-string command (matches: adb shell "test -f ... && echo OK || echo NO")
|
|
904
|
+
verify = adb_shell([f'test -f "{dst}" && echo OK || echo NO'], device=self.options.serial, transport_id=self.options.transport_id)
|
|
905
|
+
if isinstance(verify, str) and "OK" in verify:
|
|
906
|
+
print(f"Snapshot created on device for package {pkg}: {dst}", flush=True)
|
|
907
|
+
success = True
|
|
908
|
+
break
|
|
909
|
+
else:
|
|
910
|
+
print(f"Snapshot verify failed on attempt {attempt} for {pkg}: {verify}", flush=True)
|
|
911
|
+
except Exception as ve:
|
|
912
|
+
print(f"Verification command failed after cp attempt {attempt} for {pkg}: {ve}", flush=True)
|
|
913
|
+
except Exception as e:
|
|
914
|
+
print(f"adb shell cp attempt {attempt} failed for {pkg}: {e}", flush=True)
|
|
915
|
+
|
|
916
|
+
# backoff before next attempt
|
|
917
|
+
sleep_time = min(5.0, 0.5 * (2 ** (attempt - 1))) + random.uniform(0, 0.1)
|
|
918
|
+
time.sleep(sleep_time)
|
|
919
|
+
|
|
920
|
+
if not success:
|
|
921
|
+
print(f"Giving up creating snapshot on device for {pkg} after {max_retries} attempts", flush=True)
|
|
922
|
+
|
|
923
|
+
|
|
924
|
+
|
|
925
|
+
|
|
926
|
+
class KeaTextTestResult(BetterConsoleLogExtensionMixin, TextTestResult):
|
|
927
|
+
|
|
928
|
+
@property
|
|
929
|
+
def wasFail(self):
|
|
930
|
+
return self._wasFail
|
|
931
|
+
|
|
932
|
+
def addError(self, test, err):
|
|
933
|
+
self._wasFail = True
|
|
934
|
+
return super().addError(test, err)
|
|
935
|
+
|
|
936
|
+
def addFailure(self, test, err):
|
|
937
|
+
self._wasFail = True
|
|
938
|
+
return super().addFailure(test, err)
|
|
939
|
+
|
|
940
|
+
def addSuccess(self, test):
|
|
941
|
+
self._wasFail = False
|
|
942
|
+
return super().addSuccess(test)
|
|
943
|
+
|
|
944
|
+
def addSkip(self, test, reason):
|
|
945
|
+
self._wasFail = False
|
|
946
|
+
return super().addSkip(test, reason)
|
|
947
|
+
|
|
948
|
+
def addExpectedFailure(self, test, err):
|
|
949
|
+
self._wasFail = False
|
|
950
|
+
return super().addExpectedFailure(test, err)
|
|
951
|
+
|
|
952
|
+
def addUnexpectedSuccess(self, test):
|
|
953
|
+
self._wasFail = False
|
|
954
|
+
return super().addUnexpectedSuccess(test)
|
|
955
|
+
|
|
956
|
+
|
|
957
|
+
class HybridTestRunner(TextTestRunner, KeaOptionSetter):
|
|
958
|
+
|
|
959
|
+
allTestCases: Dict[str, Tuple[TestCase, bool]]
|
|
960
|
+
_common_teardown_func = None
|
|
961
|
+
resultclass = KeaTextTestResult
|
|
962
|
+
|
|
963
|
+
def __init__(self, stream = None, descriptions = True, verbosity = 1, failfast = False, buffer = False, resultclass = None, warnings = None, *, tb_locals = False):
|
|
964
|
+
super().__init__(stream, descriptions, verbosity, failfast, buffer, resultclass, warnings, tb_locals=tb_locals)
|
|
965
|
+
hybrid_mode.set(True)
|
|
966
|
+
self.hybrid_report_dirs = []
|
|
967
|
+
|
|
968
|
+
def run(self, test):
|
|
969
|
+
|
|
970
|
+
self.allTestCases = dict()
|
|
971
|
+
self.collectAllTestCases(test)
|
|
972
|
+
if len(self.allTestCases) == 0:
|
|
973
|
+
logger.warning("[Warning] No test case has been found.")
|
|
974
|
+
|
|
975
|
+
result: KeaTextTestResult = self._makeResult()
|
|
976
|
+
registerResult(result)
|
|
977
|
+
result.failfast = self.failfast
|
|
978
|
+
result.buffer = self.buffer
|
|
979
|
+
result.tb_locals = self.tb_locals
|
|
980
|
+
with warnings.catch_warnings():
|
|
981
|
+
if self.warnings:
|
|
982
|
+
# if self.warnings is set, use it to filter all the warnings
|
|
983
|
+
warnings.simplefilter(self.warnings)
|
|
984
|
+
# if the filter is 'default' or 'always', special-case the
|
|
985
|
+
# warnings from the deprecated unittest methods to show them
|
|
986
|
+
# no more than once per module, because they can be fairly
|
|
987
|
+
# noisy. The -Wd and -Wa flags can be used to bypass this
|
|
988
|
+
# only when self.warnings is None.
|
|
989
|
+
if self.warnings in ["default", "always"]:
|
|
990
|
+
warnings.filterwarnings(
|
|
991
|
+
"module",
|
|
992
|
+
category=DeprecationWarning,
|
|
993
|
+
message=r"Please use assert\w+ instead.",
|
|
994
|
+
)
|
|
995
|
+
|
|
996
|
+
hybrid_test_count = 0
|
|
997
|
+
for testCaseName, test in self.allTestCases.items():
|
|
998
|
+
test, isInterruptable = test, getattr(test, "isInterruptable", False)
|
|
999
|
+
|
|
1000
|
+
# Dependency Injection. driver when doing scripts
|
|
1001
|
+
self.scriptDriver = U2Driver.getScriptDriver(mode="direct")
|
|
1002
|
+
setattr(test, self.options.driverName, self.scriptDriver)
|
|
1003
|
+
logger.info("Executing unittest testCase %s." % testCaseName)
|
|
1004
|
+
|
|
1005
|
+
try:
|
|
1006
|
+
test._common_setUp()
|
|
1007
|
+
ret: KeaTextTestResult = test(result)
|
|
1008
|
+
if ret.wasFail:
|
|
1009
|
+
logger.error(f"Fail when running test.")
|
|
1010
|
+
if isInterruptable and not ret.wasFail:
|
|
1011
|
+
logger.info(f"Launch fastbot after interruptable script.")
|
|
1012
|
+
hybrid_test_count += 1
|
|
1013
|
+
hybrid_test_options = self.options.getKeaTestOptions(hybrid_test_count)
|
|
1014
|
+
|
|
1015
|
+
# Track the sub-report directory for later merging
|
|
1016
|
+
self.hybrid_report_dirs.append(hybrid_test_options.output_dir)
|
|
1017
|
+
|
|
1018
|
+
argv = ["python3 -m unittest"] + hybrid_test_options.propertytest_args
|
|
1019
|
+
KeaTestRunner.setOptions(hybrid_test_options)
|
|
1020
|
+
unittest_main(module=None, argv=argv, testRunner=KeaTestRunner, exit=False)
|
|
1021
|
+
|
|
1022
|
+
finally:
|
|
1023
|
+
test._common_tearDown()
|
|
1024
|
+
result.printErrors()
|
|
1025
|
+
|
|
1026
|
+
# Auto-merge all hybrid test reports after all tests complete
|
|
1027
|
+
if len(self.hybrid_report_dirs) > 0:
|
|
1028
|
+
self._merge_hybrid_reports()
|
|
1029
|
+
|
|
1030
|
+
return result
|
|
1031
|
+
|
|
1032
|
+
def _merge_hybrid_reports(self):
|
|
1033
|
+
"""
|
|
1034
|
+
Merge all hybrid test reports into a single merged report
|
|
1035
|
+
"""
|
|
1036
|
+
try:
|
|
1037
|
+
from kea2.report.report_merger import TestReportMerger
|
|
1038
|
+
|
|
1039
|
+
if len(self.hybrid_report_dirs) < 2:
|
|
1040
|
+
logger.info("Only one hybrid test report generated, skipping merge.")
|
|
1041
|
+
return
|
|
1042
|
+
|
|
1043
|
+
main_output_dir = self.options.output_dir
|
|
1044
|
+
|
|
1045
|
+
merger = TestReportMerger()
|
|
1046
|
+
merged_dir = merger.merge_reports(
|
|
1047
|
+
result_paths=self.hybrid_report_dirs,
|
|
1048
|
+
output_dir=main_output_dir
|
|
1049
|
+
)
|
|
1050
|
+
|
|
1051
|
+
merge_summary = merger.get_merge_summary()
|
|
1052
|
+
except Exception as e:
|
|
1053
|
+
logger.error(f"Error merging hybrid test reports: {e}")
|
|
1054
|
+
|
|
1055
|
+
def collectAllTestCases(self, test: TestSuite):
|
|
1056
|
+
"""collect all the properties to prepare for PBT
|
|
1057
|
+
"""
|
|
1058
|
+
|
|
1059
|
+
def iter_tests(suite):
|
|
1060
|
+
for test in suite:
|
|
1061
|
+
if isinstance(test, TestSuite):
|
|
1062
|
+
yield from iter_tests(test)
|
|
1063
|
+
else:
|
|
1064
|
+
yield test
|
|
1065
|
+
|
|
1066
|
+
funcs = loadFuncsFromFile(getProjectRoot() / "configs" / "teardown.py")
|
|
1067
|
+
setUp = funcs.get("setUp", None)
|
|
1068
|
+
tearDown = funcs.get("tearDown", None)
|
|
1069
|
+
if setUp is None:
|
|
1070
|
+
raise ValueError("setUp function not found in teardown.py.")
|
|
1071
|
+
if tearDown is None:
|
|
1072
|
+
raise ValueError("tearDown function not found in teardown.py.")
|
|
1073
|
+
|
|
1074
|
+
# Traverse the TestCase to get all properties
|
|
1075
|
+
for t in iter_tests(test):
|
|
1076
|
+
|
|
1077
|
+
def dummy(self): ...
|
|
1078
|
+
# remove the hook func in its TestCase
|
|
1079
|
+
t.setUp = types.MethodType(dummy, t)
|
|
1080
|
+
t.tearDown = types.MethodType(dummy, t)
|
|
1081
|
+
t._common_setUp = types.MethodType(setUp, t)
|
|
1082
|
+
t._common_tearDown = types.MethodType(tearDown, t)
|
|
1083
|
+
|
|
1084
|
+
# check if it's interruptable (reflection)
|
|
1085
|
+
testMethodName = t._testMethodName
|
|
1086
|
+
testMethod = getattr(t, testMethodName)
|
|
1087
|
+
isInterruptable = hasattr(testMethod, INTERRUPTABLE_MARKER)
|
|
1088
|
+
|
|
1089
|
+
# save it into allTestCases, if interruptable, mark as true
|
|
1090
|
+
setattr(t, "isInterruptable", isInterruptable)
|
|
1091
|
+
self.allTestCases[testMethodName] = t
|
|
1092
|
+
logger.info(f"Load TestCase: {getFullPropName(t)} , interruptable: {t.isInterruptable}")
|
|
1093
|
+
|
|
1094
|
+
def __del__(self):
|
|
1095
|
+
"""tearDown method. Cleanup the env.
|
|
1096
|
+
"""
|
|
1097
|
+
try:
|
|
1098
|
+
if hasattr(self, 'options') and self.options and self.options.Driver:
|
|
1099
|
+
self.options.Driver.tearDown()
|
|
1100
|
+
except Exception:
|
|
1101
|
+
# Ignore exceptions in __del__ to avoid "Exception ignored" warnings
|
|
1102
|
+
pass
|
|
1103
|
+
|
|
1104
|
+
|
|
1105
|
+
def kea2_breakpoint():
|
|
1106
|
+
"""kea2 entrance. Call this function in TestCase.
|
|
1107
|
+
Kea2 will automatically switch to Kea2 Test in kea2_breakpoint in HybridTest mode.
|
|
1108
|
+
The normal launch in unittest will not be affected.
|
|
1109
|
+
"""
|
|
1110
|
+
if hybrid_mode.get():
|
|
1111
|
+
raise SkipTest("Skip the test after the breakpoint and run kea2 in hybrid mode.")
|
|
1112
|
+
|