Kea2-python 0.3.5__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of Kea2-python might be problematic. Click here for more details.

kea2/keaUtils.py CHANGED
@@ -1,11 +1,13 @@
1
1
  from collections import deque
2
+ from copy import deepcopy
2
3
  import json
3
4
  import os
4
5
  from pathlib import Path
5
6
  import traceback
6
- import time
7
- from typing import Callable, Any, Deque, Dict, List, Literal, NewType, Union
8
- from unittest import TextTestRunner, registerResult, TestSuite, TestCase, TextTestResult
7
+ from typing import Callable, Any, Deque, Dict, List, Literal, NewType, Tuple, Union
8
+ from contextvars import ContextVar
9
+ from unittest import TextTestRunner, registerResult, TestSuite, TestCase, TextTestResult, defaultTestLoader, SkipTest
10
+ from unittest import main as unittest_main
9
11
  import random
10
12
  import warnings
11
13
  from dataclasses import dataclass, asdict
@@ -14,16 +16,22 @@ from functools import wraps
14
16
  from kea2.bug_report_generator import BugReportGenerator
15
17
  from kea2.resultSyncer import ResultSyncer
16
18
  from kea2.logWatcher import LogWatcher
17
- from kea2.utils import TimeStamp, catchException, getProjectRoot, getLogger, timer
18
- from kea2.u2Driver import StaticU2UiObject, StaticXpathUiObject
19
+ from kea2.utils import TimeStamp, catchException, getProjectRoot, getLogger, loadFuncsFromFile, timer
20
+ from kea2.u2Driver import StaticU2UiObject, StaticXpathUiObject, U2Driver
19
21
  from kea2.fastbotManager import FastbotManager
20
22
  from kea2.adbUtils import ADBDevice
23
+ from kea2.mixin import BetterConsoleLogExtensionMixin
21
24
  import uiautomator2 as u2
22
25
  import types
23
26
 
27
+
28
+ hybrid_mode = ContextVar("hybrid_mode", default=False)
29
+
30
+
24
31
  PRECONDITIONS_MARKER = "preconds"
25
- PROP_MARKER = "prop"
32
+ PROB_MARKER = "prob"
26
33
  MAX_TRIES_MARKER = "max_tries"
34
+ INTERRUPTABLE_MARKER = "interruptable"
27
35
 
28
36
  logger = getLogger(__name__)
29
37
 
@@ -38,6 +46,7 @@ LOGFILE: str
38
46
  RESFILE: str
39
47
  PROP_EXEC_RESFILE: str
40
48
 
49
+
41
50
  def precondition(precond: Callable[[Any], bool]) -> Callable:
42
51
  """the decorator @precondition
43
52
 
@@ -45,18 +54,13 @@ def precondition(precond: Callable[[Any], bool]) -> Callable:
45
54
  A property could have multiple preconditions, each of which is specified by @precondition.
46
55
  """
47
56
  def accept(f):
48
- @wraps(f)
49
- def precondition_wrapper(*args, **kwargs):
50
- return f(*args, **kwargs)
51
-
52
57
  preconds = getattr(f, PRECONDITIONS_MARKER, tuple())
53
-
54
- setattr(precondition_wrapper, PRECONDITIONS_MARKER, preconds + (precond,))
55
-
56
- return precondition_wrapper
58
+ setattr(f, PRECONDITIONS_MARKER, preconds + (precond,))
59
+ return f
57
60
 
58
61
  return accept
59
62
 
63
+
60
64
  def prob(p: float):
61
65
  """the decorator @prob
62
66
 
@@ -65,14 +69,10 @@ def prob(p: float):
65
69
  p = float(p)
66
70
  if not 0 < p <= 1.0:
67
71
  raise ValueError("The propbability should between 0 and 1")
68
- def accept(f):
69
- @wraps(f)
70
- def precondition_wrapper(*args, **kwargs):
71
- return f(*args, **kwargs)
72
72
 
73
- setattr(precondition_wrapper, PROP_MARKER, p)
74
-
75
- return precondition_wrapper
73
+ def accept(f):
74
+ setattr(f, PROB_MARKER, p)
75
+ return f
76
76
 
77
77
  return accept
78
78
 
@@ -85,16 +85,25 @@ def max_tries(n: int):
85
85
  n = int(n)
86
86
  if not n > 0:
87
87
  raise ValueError("The maxium tries should be a positive integer.")
88
+
88
89
  def accept(f):
89
- @wraps(f)
90
- def precondition_wrapper(*args, **kwargs):
91
- return f(*args, **kwargs)
90
+ setattr(f, MAX_TRIES_MARKER, n)
91
+ return f
92
92
 
93
- setattr(precondition_wrapper, MAX_TRIES_MARKER, n)
93
+ return accept
94
94
 
95
- return precondition_wrapper
96
95
 
97
- return accept
96
+ def interruptable(strategy='default'):
97
+ """the decorator @interruptable
98
+
99
+ @interruptable specify the propbability of **fuzzing** when calling every line of code in a property.
100
+ """
101
+
102
+ def decorator(func):
103
+ setattr(func, INTERRUPTABLE_MARKER, True)
104
+ setattr(func, 'strategy', strategy)
105
+ return func
106
+ return decorator
98
107
 
99
108
 
100
109
  @dataclass
@@ -103,11 +112,11 @@ class Options:
103
112
  Kea and Fastbot configurations
104
113
  """
105
114
  # the driver_name in script (if self.d, then d.)
106
- driverName: str
115
+ driverName: str = None
107
116
  # the driver (only U2Driver available now)
108
- Driver: AbstractDriver
117
+ Driver: AbstractDriver = None
109
118
  # list of package names. Specify the apps under test
110
- packageNames: List[str]
119
+ packageNames: List[str] = None
111
120
  # target device
112
121
  serial: str = None
113
122
  # target device with transport_id
@@ -128,6 +137,8 @@ class Options:
128
137
  profile_period: int = 25
129
138
  # take screenshots for every step
130
139
  take_screenshots: bool = False
140
+ # Screenshots before failure (Dump n screenshots before failure. 0 means take screenshots for every step)
141
+ pre_failure_screenshots: int = 0
131
142
  # The root of output dir on device
132
143
  device_output_root: str = "/sdcard"
133
144
  # the debug mode
@@ -136,42 +147,60 @@ class Options:
136
147
  act_whitelist_file: str = None
137
148
  # Activity BlackList File
138
149
  act_blacklist_file: str = None
150
+ # Feat4. propertytest args(eg. discover -s xxx -p xxx)
151
+ propertytest_args: str = None
152
+ # Feat4. unittest args(eg. -v -s xxx -p xxx)
153
+ unittest_args: List[str] = None
154
+ # Extra args
155
+ extra_args: List[str] = None
139
156
 
140
157
  def __setattr__(self, name, value):
141
158
  if value is None:
142
159
  return
143
160
  super().__setattr__(name, value)
144
-
161
+
145
162
  def __post_init__(self):
146
163
  import logging
147
164
  logging.basicConfig(level=logging.DEBUG if self.debug else logging.INFO)
148
-
165
+
149
166
  if self.Driver:
150
- target_device = dict()
151
- if self.serial:
152
- target_device["serial"] = self.serial
153
- if self.transport_id:
154
- target_device["transport_id"] = self.transport_id
155
- self.Driver.setDevice(target_device)
156
- ADBDevice.setDevice(self.serial, self.transport_id)
157
-
158
- global LOGFILE, RESFILE, PROP_EXEC_RESFILE, STAMP
167
+ self._set_driver()
168
+
159
169
  if self.log_stamp:
160
- illegal_chars = ['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\n', '\r', '\t', '\0']
161
- for char in illegal_chars:
162
- if char in self.log_stamp:
163
- raise ValueError(
164
- f"char: `{char}` is illegal in --log-stamp. current stamp: {self.log_stamp}"
165
- )
166
- STAMP = self.log_stamp
167
-
168
- self.log_stamp = STAMP
169
-
170
+ self._sanitize_custom_stamp()
171
+
172
+ global STAMP
170
173
  self.output_dir = Path(self.output_dir).absolute() / f"res_{STAMP}"
174
+ self.set_stamp()
175
+
176
+ self._sanitize_args()
177
+
178
+ _check_package_installation(self.packageNames)
179
+ _save_bug_report_configs(self)
180
+
181
+ def set_stamp(self, stamp: str = None):
182
+ global STAMP, LOGFILE, RESFILE, PROP_EXEC_RESFILE
183
+ if stamp:
184
+ STAMP = stamp
185
+
171
186
  LOGFILE = f"fastbot_{STAMP}.log"
172
187
  RESFILE = f"result_{STAMP}.json"
173
188
  PROP_EXEC_RESFILE = f"property_exec_info_{STAMP}.json"
174
189
 
190
+ def _sanitize_custom_stamp(self):
191
+ global STAMP
192
+ illegal_chars = ['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\n', '\r', '\t', '\0']
193
+ for char in illegal_chars:
194
+ if char in self.log_stamp:
195
+ raise ValueError(
196
+ f"char: `{char}` is illegal in --log-stamp. current stamp: {self.log_stamp}"
197
+ )
198
+ STAMP = self.log_stamp
199
+
200
+ def _sanitize_args(self):
201
+ if not self.take_screenshots and self.pre_failure_screenshots > 0:
202
+ raise ValueError("--screenshots-before-error should be 0 when --take-screenshots is not set.")
203
+
175
204
  self.profile_period = int(self.profile_period)
176
205
  if self.profile_period < 1:
177
206
  raise ValueError("--profile-period should be greater than 0")
@@ -180,7 +209,35 @@ class Options:
180
209
  if self.throttle < 0:
181
210
  raise ValueError("--throttle should be greater than or equal to 0")
182
211
 
183
- _check_package_installation(self.packageNames)
212
+ if self.agent == 'u2' and self.driverName == None:
213
+ raise ValueError("--driver-name should be specified when customizing script in --agent u2")
214
+
215
+ def _set_driver(self):
216
+ target_device = dict()
217
+ if self.serial:
218
+ target_device["serial"] = self.serial
219
+ if self.transport_id:
220
+ target_device["transport_id"] = self.transport_id
221
+ self.Driver.setDevice(target_device)
222
+ ADBDevice.setDevice(self.serial, self.transport_id)
223
+
224
+ def getKeaTestOptions(self, hybrid_test_count: int) -> "Options":
225
+ """ Get the KeaTestOptions for hybrid test run when switching from unittest to kea2 test.
226
+ hybrid_test_count: the count of hybrid test runs
227
+ """
228
+ if not self.unittest_args:
229
+ raise RuntimeError("unittest_args is None. Cannot get KeaTestOptions from it")
230
+
231
+ opts = deepcopy(self)
232
+
233
+ time_stamp = TimeStamp().getTimeStamp()
234
+ hybrid_test_stamp = f"{time_stamp}_hybrid_{hybrid_test_count}"
235
+
236
+ opts.output_dir = self.output_dir / f"res_{hybrid_test_stamp}"
237
+
238
+ opts.set_stamp(hybrid_test_stamp)
239
+ opts.unittest_args = []
240
+ return opts
184
241
 
185
242
 
186
243
  def _check_package_installation(packageNames):
@@ -192,6 +249,20 @@ def _check_package_installation(packageNames):
192
249
  raise ValueError("package not installed")
193
250
 
194
251
 
252
+ def _save_bug_report_configs(options: Options):
253
+ output_dir = options.output_dir
254
+ output_dir.mkdir(parents=True, exist_ok=True)
255
+ configs = {
256
+ "driverName": options.driverName,
257
+ "packageNames": options.packageNames,
258
+ "take_screenshots": options.take_screenshots,
259
+ "pre_failure_screenshots": options.pre_failure_screenshots,
260
+ "device_output_root": options.device_output_root,
261
+ }
262
+ with open(output_dir / "bug_report_config.json", "w", encoding="utf-8") as fp:
263
+ json.dump(configs, fp, indent=4)
264
+
265
+
195
266
  @dataclass
196
267
  class PropStatistic:
197
268
  precond_satisfied: int = 0
@@ -220,12 +291,16 @@ def getFullPropName(testCase: TestCase):
220
291
  ])
221
292
 
222
293
 
223
- class JsonResult(TextTestResult):
294
+ class JsonResult(BetterConsoleLogExtensionMixin, TextTestResult):
224
295
 
225
296
  res: PBTTestResult
226
297
  lastExecutedInfo: PropertyExecutionInfo
227
298
  executionInfoStore: PropertyExecutionInfoStore = deque()
228
299
 
300
+ def __init__(self, stream, descriptions, verbosity):
301
+ super().__init__(stream, descriptions, verbosity)
302
+ self.showAll = True
303
+
229
304
  @classmethod
230
305
  def setProperties(cls, allProperties: Dict):
231
306
  cls.res = dict()
@@ -279,6 +354,17 @@ class JsonResult(TextTestResult):
279
354
  def getExcuted(self, test: TestCase):
280
355
  return self.res[getFullPropName(test)].executed
281
356
 
357
+ def printError(self, test):
358
+ if self.lastExecutedInfo.state in ["fail", "error"]:
359
+ flavour = self.lastExecutedInfo.state.upper()
360
+ self.stream.writeln("")
361
+ self.stream.writeln(self.separator1)
362
+ self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
363
+ self.stream.writeln(self.separator2)
364
+ self.stream.writeln("%s" % self.lastExecutedInfo.tb)
365
+ self.stream.writeln(self.separator1)
366
+ self.stream.flush()
367
+
282
368
  def logSummary(self):
283
369
  fails = sum(_.fail for _ in self.res.values())
284
370
  errors = sum(_.error for _ in self.res.values())
@@ -286,12 +372,8 @@ class JsonResult(TextTestResult):
286
372
  logger.info(f"[Property Exectution Summary] Errors:{errors}, Fails:{fails}")
287
373
 
288
374
 
289
- class KeaTestRunner(TextTestRunner):
290
-
291
- resultclass: JsonResult
292
- allProperties: PropertyStore
375
+ class KeaOptionSetter:
293
376
  options: Options = None
294
- _block_funcs: Dict[Literal["widgets", "trees"], List[Callable]] = None
295
377
 
296
378
  @classmethod
297
379
  def setOptions(cls, options: Options):
@@ -301,9 +383,16 @@ class KeaTestRunner(TextTestRunner):
301
383
  logger.warning("[Warning] Can not use any Driver when runing native mode.")
302
384
  options.Driver = None
303
385
  cls.options = options
386
+
387
+
388
+ class KeaTestRunner(TextTestRunner, KeaOptionSetter):
389
+
390
+ resultclass: JsonResult
391
+ allProperties: PropertyStore
392
+ _block_funcs: Dict[Literal["widgets", "trees"], List[Callable]] = None
304
393
 
305
394
  def _setOuputDir(self):
306
- output_dir = Path(self.options.output_dir).absolute()
395
+ output_dir = self.options.output_dir
307
396
  output_dir.mkdir(parents=True, exist_ok=True)
308
397
  global LOGFILE, RESFILE, PROP_EXEC_RESFILE
309
398
  LOGFILE = output_dir / Path(LOGFILE)
@@ -357,7 +446,7 @@ class KeaTestRunner(TextTestRunner):
357
446
  # initialize the result.json file
358
447
  result.flushResult()
359
448
  # setUp for the u2 driver
360
- self.scriptDriver = self.options.Driver.getScriptDriver()
449
+ self.scriptDriver = U2Driver.getScriptDriver(mode="proxy")
361
450
  fb.check_alive()
362
451
 
363
452
  fb.init(options=self.options, stamp=STAMP)
@@ -369,15 +458,18 @@ class KeaTestRunner(TextTestRunner):
369
458
  self.stepsCount = 0
370
459
  while self.stepsCount < self.options.maxStep:
371
460
 
372
- self.stepsCount += 1
373
- logger.info("Sending monkeyEvent {}".format(
374
- f"({self.stepsCount} / {self.options.maxStep})" if self.options.maxStep != float("inf")
375
- else f"({self.stepsCount})"
376
- )
377
- )
378
-
379
461
  try:
380
- xml_raw = fb.stepMonkey(self._monkeyStepInfo)
462
+ if fb.executed_prop:
463
+ fb.executed_prop = False
464
+ xml_raw = fb.dumpHierarchy()
465
+ else:
466
+ self.stepsCount += 1
467
+ logger.info("Sending monkeyEvent {}".format(
468
+ f"({self.stepsCount} / {self.options.maxStep})" if self.options.maxStep != float("inf")
469
+ else f"({self.stepsCount})"
470
+ )
471
+ )
472
+ xml_raw = fb.stepMonkey(self._monkeyStepInfo)
381
473
  propsSatisfiedPrecond = self.getValidProperties(xml_raw, result)
382
474
  except u2.HTTPError:
383
475
  logger.info("Connection refused by remote.")
@@ -400,7 +492,7 @@ class KeaTestRunner(TextTestRunner):
400
492
  # filter the properties according to the given p
401
493
  for propName, test in propsSatisfiedPrecond.items():
402
494
  result.addPrecondSatisfied(test)
403
- if getattr(test, "p", 1) >= p:
495
+ if getattr(test, PROB_MARKER, 1) >= p:
404
496
  propsNameFilteredByP.append(propName)
405
497
 
406
498
  if len(propsNameFilteredByP) == 0:
@@ -410,19 +502,20 @@ class KeaTestRunner(TextTestRunner):
410
502
  execPropName = random.choice(propsNameFilteredByP)
411
503
  test = propsSatisfiedPrecond[execPropName]
412
504
  # Dependency Injection. driver when doing scripts
413
- self.scriptDriver = self.options.Driver.getScriptDriver()
505
+ self.scriptDriver = U2Driver.getScriptDriver(mode="proxy")
506
+
414
507
  setattr(test, self.options.driverName, self.scriptDriver)
415
- print("execute property %s." % execPropName, flush=True)
416
508
 
417
509
  result.addExcuted(test, self.stepsCount)
418
510
  fb.logScript(result.lastExecutedInfo)
419
511
  try:
420
512
  test(result)
421
513
  finally:
422
- result.printErrors()
514
+ result.printError(test)
423
515
 
424
516
  result.updateExectedInfo()
425
517
  fb.logScript(result.lastExecutedInfo)
518
+ fb.executed_prop = True
426
519
  result.flushResult()
427
520
 
428
521
  if not end_by_remote:
@@ -433,41 +526,6 @@ class KeaTestRunner(TextTestRunner):
433
526
  fb.join()
434
527
  print(f"Finish sending monkey events.", flush=True)
435
528
  log_watcher.close()
436
-
437
- # Source code from unittest Runner
438
- # process the result
439
- expectedFails = unexpectedSuccesses = skipped = 0
440
- try:
441
- results = map(
442
- len,
443
- (result.expectedFailures, result.unexpectedSuccesses, result.skipped),
444
- )
445
- except AttributeError:
446
- pass
447
- else:
448
- expectedFails, unexpectedSuccesses, skipped = results
449
-
450
- infos = []
451
- if not result.wasSuccessful():
452
- self.stream.write("FAILED")
453
- failed, errored = len(result.failures), len(result.errors)
454
- if failed:
455
- infos.append("failures=%d" % failed)
456
- if errored:
457
- infos.append("errors=%d" % errored)
458
- else:
459
- self.stream.write("OK")
460
- if skipped:
461
- infos.append("skipped=%d" % skipped)
462
- if expectedFails:
463
- infos.append("expected failures=%d" % expectedFails)
464
- if unexpectedSuccesses:
465
- infos.append("unexpected successes=%d" % unexpectedSuccesses)
466
- if infos:
467
- self.stream.writeln(" (%s)" % (", ".join(infos),))
468
- else:
469
- self.stream.write("\n")
470
- self.stream.flush()
471
529
 
472
530
  result.logSummary()
473
531
  return result
@@ -491,12 +549,14 @@ class KeaTestRunner(TextTestRunner):
491
549
 
492
550
  def getValidProperties(self, xml_raw: str, result: JsonResult) -> PropertyStore:
493
551
 
494
- staticCheckerDriver = self.options.Driver.getStaticChecker(hierarchy=xml_raw)
552
+ staticCheckerDriver = U2Driver.getStaticChecker(hierarchy=xml_raw)
495
553
 
496
554
  validProps: PropertyStore = dict()
497
555
  for propName, test in self.allProperties.items():
498
556
  valid = True
499
557
  prop = getattr(test, propName)
558
+ p = getattr(prop, PROB_MARKER, 1)
559
+ setattr(test, PROB_MARKER, p)
500
560
  # check if all preconds passed
501
561
  for precond in prop.preconds:
502
562
  # Dependency injection. Static driver checker for precond
@@ -551,7 +611,12 @@ class KeaTestRunner(TextTestRunner):
551
611
  yield test
552
612
 
553
613
  # Traverse the TestCase to get all properties
614
+ _result = TextTestResult(self.stream, self.descriptions, self.verbosity)
554
615
  for t in iter_tests(test):
616
+ # Find all the _FailedTest (Caused by ImportError) and directly run it to report errors
617
+ if type(t).__name__ == "_FailedTest":
618
+ t(_result)
619
+ continue
555
620
  testMethodName = t._testMethodName
556
621
  # get the test method name and check if it's a property
557
622
  testMethod = getattr(t, testMethodName)
@@ -562,6 +627,8 @@ class KeaTestRunner(TextTestRunner):
562
627
  # save it into allProperties for PBT
563
628
  self.allProperties[testMethodName] = t
564
629
  print(f"[INFO] Load property: {getFullPropName(t)}", flush=True)
630
+ # Print errors caused by ImportError
631
+ _result.printErrors()
565
632
 
566
633
  @property
567
634
  def _blockWidgetFuncs(self):
@@ -642,7 +709,7 @@ class KeaTestRunner(TextTestRunner):
642
709
 
643
710
  if preconds_pass(preconds):
644
711
  try:
645
- _widgets = func(self.options.Driver.getStaticChecker())
712
+ _widgets = func(U2Driver.getStaticChecker())
646
713
  _widgets = _widgets if isinstance(_widgets, list) else [_widgets]
647
714
  for w in _widgets:
648
715
  if isinstance(w, (StaticU2UiObject, StaticXpathUiObject)):
@@ -688,4 +755,189 @@ class KeaTestRunner(TextTestRunner):
688
755
  if self.options.Driver:
689
756
  self.options.Driver.tearDown()
690
757
 
691
- self._generate_bug_report()
758
+ if self.options.agent == "u2":
759
+ self._generate_bug_report()
760
+
761
+
762
+ class KeaTextTestResult(BetterConsoleLogExtensionMixin, TextTestResult):
763
+
764
+ @property
765
+ def wasFail(self):
766
+ return self._wasFail
767
+
768
+ def addError(self, test, err):
769
+ self._wasFail = True
770
+ return super().addError(test, err)
771
+
772
+ def addFailure(self, test, err):
773
+ self._wasFail = True
774
+ return super().addFailure(test, err)
775
+
776
+ def addSuccess(self, test):
777
+ self._wasFail = False
778
+ return super().addSuccess(test)
779
+
780
+ def addSkip(self, test, reason):
781
+ self._wasFail = False
782
+ return super().addSkip(test, reason)
783
+
784
+ def addExpectedFailure(self, test, err):
785
+ self._wasFail = False
786
+ return super().addExpectedFailure(test, err)
787
+
788
+ def addUnexpectedSuccess(self, test):
789
+ self._wasFail = False
790
+ return super().addUnexpectedSuccess(test)
791
+
792
+
793
+ class HybridTestRunner(TextTestRunner, KeaOptionSetter):
794
+
795
+ allTestCases: Dict[str, Tuple[TestCase, bool]]
796
+ _common_teardown_func = None
797
+ resultclass = KeaTextTestResult
798
+
799
+ def __init__(self, stream = None, descriptions = True, verbosity = 1, failfast = False, buffer = False, resultclass = None, warnings = None, *, tb_locals = False):
800
+ super().__init__(stream, descriptions, verbosity, failfast, buffer, resultclass, warnings, tb_locals=tb_locals)
801
+ hybrid_mode.set(True)
802
+ self.hybrid_report_dirs = []
803
+
804
+ def run(self, test):
805
+
806
+ self.allTestCases = dict()
807
+ self.collectAllTestCases(test)
808
+ if len(self.allTestCases) == 0:
809
+ logger.warning("[Warning] No test case has been found.")
810
+
811
+ result: KeaTextTestResult = self._makeResult()
812
+ registerResult(result)
813
+ result.failfast = self.failfast
814
+ result.buffer = self.buffer
815
+ result.tb_locals = self.tb_locals
816
+ with warnings.catch_warnings():
817
+ if self.warnings:
818
+ # if self.warnings is set, use it to filter all the warnings
819
+ warnings.simplefilter(self.warnings)
820
+ # if the filter is 'default' or 'always', special-case the
821
+ # warnings from the deprecated unittest methods to show them
822
+ # no more than once per module, because they can be fairly
823
+ # noisy. The -Wd and -Wa flags can be used to bypass this
824
+ # only when self.warnings is None.
825
+ if self.warnings in ["default", "always"]:
826
+ warnings.filterwarnings(
827
+ "module",
828
+ category=DeprecationWarning,
829
+ message=r"Please use assert\w+ instead.",
830
+ )
831
+
832
+ hybrid_test_count = 0
833
+ for testCaseName, test in self.allTestCases.items():
834
+ test, isInterruptable = test, getattr(test, "isInterruptable", False)
835
+
836
+ # Dependency Injection. driver when doing scripts
837
+ self.scriptDriver = U2Driver.getScriptDriver(mode="direct")
838
+ setattr(test, self.options.driverName, self.scriptDriver)
839
+ logger.info("Executing unittest testCase %s." % testCaseName)
840
+
841
+ try:
842
+ test._common_setUp()
843
+ ret: KeaTextTestResult = test(result)
844
+ if ret.wasFail:
845
+ logger.error(f"Fail when running test.")
846
+ if isInterruptable and not ret.wasFail:
847
+ logger.info(f"Launch fastbot after interruptable script.")
848
+ hybrid_test_count += 1
849
+ hybrid_test_options = self.options.getKeaTestOptions(hybrid_test_count)
850
+
851
+ # Track the sub-report directory for later merging
852
+ self.hybrid_report_dirs.append(hybrid_test_options.output_dir)
853
+
854
+ argv = ["python3 -m unittest"] + hybrid_test_options.propertytest_args
855
+ KeaTestRunner.setOptions(hybrid_test_options)
856
+ unittest_main(module=None, argv=argv, testRunner=KeaTestRunner, exit=False)
857
+
858
+ finally:
859
+ test._common_tearDown()
860
+ result.printErrors()
861
+
862
+ # Auto-merge all hybrid test reports after all tests complete
863
+ if len(self.hybrid_report_dirs) > 0:
864
+ self._merge_hybrid_reports()
865
+
866
+ return result
867
+
868
+ def _merge_hybrid_reports(self):
869
+ """
870
+ Merge all hybrid test reports into a single merged report
871
+ """
872
+ try:
873
+ from kea2.report_merger import TestReportMerger
874
+
875
+ if len(self.hybrid_report_dirs) < 2:
876
+ logger.info("Only one hybrid test report generated, skipping merge.")
877
+ return
878
+
879
+ main_output_dir = self.options.output_dir
880
+
881
+ merger = TestReportMerger()
882
+ merged_dir = merger.merge_reports(
883
+ result_paths=self.hybrid_report_dirs,
884
+ output_dir=main_output_dir
885
+ )
886
+
887
+ merge_summary = merger.get_merge_summary()
888
+ except Exception as e:
889
+ logger.error(f"Error merging hybrid test reports: {e}")
890
+
891
+ def collectAllTestCases(self, test: TestSuite):
892
+ """collect all the properties to prepare for PBT
893
+ """
894
+
895
+ def iter_tests(suite):
896
+ for test in suite:
897
+ if isinstance(test, TestSuite):
898
+ yield from iter_tests(test)
899
+ else:
900
+ yield test
901
+
902
+ funcs = loadFuncsFromFile(getProjectRoot() / "configs" / "teardown.py")
903
+ setUp = funcs.get("setUp", None)
904
+ tearDown = funcs.get("tearDown", None)
905
+ if setUp is None:
906
+ raise ValueError("setUp function not found in teardown.py.")
907
+ if tearDown is None:
908
+ raise ValueError("tearDown function not found in teardown.py.")
909
+
910
+ # Traverse the TestCase to get all properties
911
+ for t in iter_tests(test):
912
+
913
+ def dummy(self): ...
914
+ # remove the hook func in its TestCase
915
+ t.setUp = types.MethodType(dummy, t)
916
+ t.tearDown = types.MethodType(dummy, t)
917
+ t._common_setUp = types.MethodType(setUp, t)
918
+ t._common_tearDown = types.MethodType(tearDown, t)
919
+
920
+ # check if it's interruptable (reflection)
921
+ testMethodName = t._testMethodName
922
+ testMethod = getattr(t, testMethodName)
923
+ isInterruptable = hasattr(testMethod, INTERRUPTABLE_MARKER)
924
+
925
+ # save it into allTestCases, if interruptable, mark as true
926
+ setattr(t, "isInterruptable", isInterruptable)
927
+ self.allTestCases[testMethodName] = t
928
+ logger.info(f"Load TestCase: {getFullPropName(t)} , interruptable: {t.isInterruptable}")
929
+
930
+ def __del__(self):
931
+ """tearDown method. Cleanup the env.
932
+ """
933
+ if self.options.Driver:
934
+ self.options.Driver.tearDown()
935
+
936
+
937
+ def kea2_breakpoint():
938
+ """kea2 entrance. Call this function in TestCase.
939
+ Kea2 will automatically switch to Kea2 Test in kea2_breakpoint in HybridTest mode.
940
+ The normal launch in unittest will not be affected.
941
+ """
942
+ if hybrid_mode.get():
943
+ raise SkipTest("Skip the test after the breakpoint and run kea2 in hybrid mode.")