Kea2-python 0.3.6__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of Kea2-python might be problematic. Click here for more details.

kea2/keaUtils.py CHANGED
@@ -1,11 +1,13 @@
1
1
  from collections import deque
2
+ from copy import deepcopy
2
3
  import json
3
4
  import os
4
5
  from pathlib import Path
5
6
  import traceback
6
- import time
7
- from typing import Callable, Any, Deque, Dict, List, Literal, NewType, Union
8
- from unittest import TextTestRunner, registerResult, TestSuite, TestCase, TextTestResult
7
+ from typing import Callable, Any, Deque, Dict, List, Literal, NewType, Tuple, Union
8
+ from contextvars import ContextVar
9
+ from unittest import TextTestRunner, registerResult, TestSuite, TestCase, TextTestResult, defaultTestLoader, SkipTest
10
+ from unittest import main as unittest_main
9
11
  import random
10
12
  import warnings
11
13
  from dataclasses import dataclass, asdict
@@ -14,16 +16,22 @@ from functools import wraps
14
16
  from kea2.bug_report_generator import BugReportGenerator
15
17
  from kea2.resultSyncer import ResultSyncer
16
18
  from kea2.logWatcher import LogWatcher
17
- from kea2.utils import TimeStamp, catchException, getProjectRoot, getLogger, timer
18
- from kea2.u2Driver import StaticU2UiObject, StaticXpathUiObject
19
+ from kea2.utils import TimeStamp, catchException, getProjectRoot, getLogger, loadFuncsFromFile, timer
20
+ from kea2.u2Driver import StaticU2UiObject, StaticXpathUiObject, U2Driver
19
21
  from kea2.fastbotManager import FastbotManager
20
22
  from kea2.adbUtils import ADBDevice
23
+ from kea2.mixin import BetterConsoleLogExtensionMixin
21
24
  import uiautomator2 as u2
22
25
  import types
23
26
 
27
+
28
+ hybrid_mode = ContextVar("hybrid_mode", default=False)
29
+
30
+
24
31
  PRECONDITIONS_MARKER = "preconds"
25
- PROP_MARKER = "prop"
32
+ PROB_MARKER = "prob"
26
33
  MAX_TRIES_MARKER = "max_tries"
34
+ INTERRUPTABLE_MARKER = "interruptable"
27
35
 
28
36
  logger = getLogger(__name__)
29
37
 
@@ -38,6 +46,7 @@ LOGFILE: str
38
46
  RESFILE: str
39
47
  PROP_EXEC_RESFILE: str
40
48
 
49
+
41
50
  def precondition(precond: Callable[[Any], bool]) -> Callable:
42
51
  """the decorator @precondition
43
52
 
@@ -45,18 +54,13 @@ def precondition(precond: Callable[[Any], bool]) -> Callable:
45
54
  A property could have multiple preconditions, each of which is specified by @precondition.
46
55
  """
47
56
  def accept(f):
48
- @wraps(f)
49
- def precondition_wrapper(*args, **kwargs):
50
- return f(*args, **kwargs)
51
-
52
57
  preconds = getattr(f, PRECONDITIONS_MARKER, tuple())
53
-
54
- setattr(precondition_wrapper, PRECONDITIONS_MARKER, preconds + (precond,))
55
-
56
- return precondition_wrapper
58
+ setattr(f, PRECONDITIONS_MARKER, preconds + (precond,))
59
+ return f
57
60
 
58
61
  return accept
59
62
 
63
+
60
64
  def prob(p: float):
61
65
  """the decorator @prob
62
66
 
@@ -65,14 +69,10 @@ def prob(p: float):
65
69
  p = float(p)
66
70
  if not 0 < p <= 1.0:
67
71
  raise ValueError("The propbability should between 0 and 1")
68
- def accept(f):
69
- @wraps(f)
70
- def precondition_wrapper(*args, **kwargs):
71
- return f(*args, **kwargs)
72
72
 
73
- setattr(precondition_wrapper, PROP_MARKER, p)
74
-
75
- return precondition_wrapper
73
+ def accept(f):
74
+ setattr(f, PROB_MARKER, p)
75
+ return f
76
76
 
77
77
  return accept
78
78
 
@@ -85,16 +85,25 @@ def max_tries(n: int):
85
85
  n = int(n)
86
86
  if not n > 0:
87
87
  raise ValueError("The maxium tries should be a positive integer.")
88
+
88
89
  def accept(f):
89
- @wraps(f)
90
- def precondition_wrapper(*args, **kwargs):
91
- return f(*args, **kwargs)
90
+ setattr(f, MAX_TRIES_MARKER, n)
91
+ return f
92
92
 
93
- setattr(precondition_wrapper, MAX_TRIES_MARKER, n)
93
+ return accept
94
94
 
95
- return precondition_wrapper
96
95
 
97
- return accept
96
+ def interruptable(strategy='default'):
97
+ """the decorator @interruptable
98
+
99
+ @interruptable specify the propbability of **fuzzing** when calling every line of code in a property.
100
+ """
101
+
102
+ def decorator(func):
103
+ setattr(func, INTERRUPTABLE_MARKER, True)
104
+ setattr(func, 'strategy', strategy)
105
+ return func
106
+ return decorator
98
107
 
99
108
 
100
109
  @dataclass
@@ -103,11 +112,11 @@ class Options:
103
112
  Kea and Fastbot configurations
104
113
  """
105
114
  # the driver_name in script (if self.d, then d.)
106
- driverName: str
115
+ driverName: str = None
107
116
  # the driver (only U2Driver available now)
108
- Driver: AbstractDriver
117
+ Driver: AbstractDriver = None
109
118
  # list of package names. Specify the apps under test
110
- packageNames: List[str]
119
+ packageNames: List[str] = None
111
120
  # target device
112
121
  serial: str = None
113
122
  # target device with transport_id
@@ -128,6 +137,8 @@ class Options:
128
137
  profile_period: int = 25
129
138
  # take screenshots for every step
130
139
  take_screenshots: bool = False
140
+ # Screenshots before failure (Dump n screenshots before failure. 0 means take screenshots for every step)
141
+ pre_failure_screenshots: int = 0
131
142
  # The root of output dir on device
132
143
  device_output_root: str = "/sdcard"
133
144
  # the debug mode
@@ -136,6 +147,10 @@ class Options:
136
147
  act_whitelist_file: str = None
137
148
  # Activity BlackList File
138
149
  act_blacklist_file: str = None
150
+ # Feat4. propertytest args(eg. discover -s xxx -p xxx)
151
+ propertytest_args: str = None
152
+ # Feat4. unittest args(eg. -v -s xxx -p xxx)
153
+ unittest_args: List[str] = None
139
154
  # Extra args
140
155
  extra_args: List[str] = None
141
156
 
@@ -143,37 +158,49 @@ class Options:
143
158
  if value is None:
144
159
  return
145
160
  super().__setattr__(name, value)
146
-
161
+
147
162
  def __post_init__(self):
148
163
  import logging
149
164
  logging.basicConfig(level=logging.DEBUG if self.debug else logging.INFO)
150
-
165
+
151
166
  if self.Driver:
152
- target_device = dict()
153
- if self.serial:
154
- target_device["serial"] = self.serial
155
- if self.transport_id:
156
- target_device["transport_id"] = self.transport_id
157
- self.Driver.setDevice(target_device)
158
- ADBDevice.setDevice(self.serial, self.transport_id)
159
-
160
- global LOGFILE, RESFILE, PROP_EXEC_RESFILE, STAMP
167
+ self._set_driver()
168
+
161
169
  if self.log_stamp:
162
- illegal_chars = ['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\n', '\r', '\t', '\0']
163
- for char in illegal_chars:
164
- if char in self.log_stamp:
165
- raise ValueError(
166
- f"char: `{char}` is illegal in --log-stamp. current stamp: {self.log_stamp}"
167
- )
168
- STAMP = self.log_stamp
169
-
170
- self.log_stamp = STAMP
171
-
170
+ self._sanitize_custom_stamp()
171
+
172
+ global STAMP
172
173
  self.output_dir = Path(self.output_dir).absolute() / f"res_{STAMP}"
174
+ self.set_stamp()
175
+
176
+ self._sanitize_args()
177
+
178
+ _check_package_installation(self.packageNames)
179
+ _save_bug_report_configs(self)
180
+
181
+ def set_stamp(self, stamp: str = None):
182
+ global STAMP, LOGFILE, RESFILE, PROP_EXEC_RESFILE
183
+ if stamp:
184
+ STAMP = stamp
185
+
173
186
  LOGFILE = f"fastbot_{STAMP}.log"
174
187
  RESFILE = f"result_{STAMP}.json"
175
188
  PROP_EXEC_RESFILE = f"property_exec_info_{STAMP}.json"
176
189
 
190
+ def _sanitize_custom_stamp(self):
191
+ global STAMP
192
+ illegal_chars = ['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\n', '\r', '\t', '\0']
193
+ for char in illegal_chars:
194
+ if char in self.log_stamp:
195
+ raise ValueError(
196
+ f"char: `{char}` is illegal in --log-stamp. current stamp: {self.log_stamp}"
197
+ )
198
+ STAMP = self.log_stamp
199
+
200
+ def _sanitize_args(self):
201
+ if not self.take_screenshots and self.pre_failure_screenshots > 0:
202
+ raise ValueError("--screenshots-before-error should be 0 when --take-screenshots is not set.")
203
+
177
204
  self.profile_period = int(self.profile_period)
178
205
  if self.profile_period < 1:
179
206
  raise ValueError("--profile-period should be greater than 0")
@@ -182,7 +209,35 @@ class Options:
182
209
  if self.throttle < 0:
183
210
  raise ValueError("--throttle should be greater than or equal to 0")
184
211
 
185
- _check_package_installation(self.packageNames)
212
+ if self.agent == 'u2' and self.driverName == None:
213
+ raise ValueError("--driver-name should be specified when customizing script in --agent u2")
214
+
215
+ def _set_driver(self):
216
+ target_device = dict()
217
+ if self.serial:
218
+ target_device["serial"] = self.serial
219
+ if self.transport_id:
220
+ target_device["transport_id"] = self.transport_id
221
+ self.Driver.setDevice(target_device)
222
+ ADBDevice.setDevice(self.serial, self.transport_id)
223
+
224
+ def getKeaTestOptions(self, hybrid_test_count: int) -> "Options":
225
+ """ Get the KeaTestOptions for hybrid test run when switching from unittest to kea2 test.
226
+ hybrid_test_count: the count of hybrid test runs
227
+ """
228
+ if not self.unittest_args:
229
+ raise RuntimeError("unittest_args is None. Cannot get KeaTestOptions from it")
230
+
231
+ opts = deepcopy(self)
232
+
233
+ time_stamp = TimeStamp().getTimeStamp()
234
+ hybrid_test_stamp = f"{time_stamp}_hybrid_{hybrid_test_count}"
235
+
236
+ opts.output_dir = self.output_dir / f"res_{hybrid_test_stamp}"
237
+
238
+ opts.set_stamp(hybrid_test_stamp)
239
+ opts.unittest_args = []
240
+ return opts
186
241
 
187
242
 
188
243
  def _check_package_installation(packageNames):
@@ -194,6 +249,20 @@ def _check_package_installation(packageNames):
194
249
  raise ValueError("package not installed")
195
250
 
196
251
 
252
+ def _save_bug_report_configs(options: Options):
253
+ output_dir = options.output_dir
254
+ output_dir.mkdir(parents=True, exist_ok=True)
255
+ configs = {
256
+ "driverName": options.driverName,
257
+ "packageNames": options.packageNames,
258
+ "take_screenshots": options.take_screenshots,
259
+ "pre_failure_screenshots": options.pre_failure_screenshots,
260
+ "device_output_root": options.device_output_root,
261
+ }
262
+ with open(output_dir / "bug_report_config.json", "w", encoding="utf-8") as fp:
263
+ json.dump(configs, fp, indent=4)
264
+
265
+
197
266
  @dataclass
198
267
  class PropStatistic:
199
268
  precond_satisfied: int = 0
@@ -222,12 +291,16 @@ def getFullPropName(testCase: TestCase):
222
291
  ])
223
292
 
224
293
 
225
- class JsonResult(TextTestResult):
294
+ class JsonResult(BetterConsoleLogExtensionMixin, TextTestResult):
226
295
 
227
296
  res: PBTTestResult
228
297
  lastExecutedInfo: PropertyExecutionInfo
229
298
  executionInfoStore: PropertyExecutionInfoStore = deque()
230
299
 
300
+ def __init__(self, stream, descriptions, verbosity):
301
+ super().__init__(stream, descriptions, verbosity)
302
+ self.showAll = True
303
+
231
304
  @classmethod
232
305
  def setProperties(cls, allProperties: Dict):
233
306
  cls.res = dict()
@@ -281,6 +354,17 @@ class JsonResult(TextTestResult):
281
354
  def getExcuted(self, test: TestCase):
282
355
  return self.res[getFullPropName(test)].executed
283
356
 
357
+ def printError(self, test):
358
+ if self.lastExecutedInfo.state in ["fail", "error"]:
359
+ flavour = self.lastExecutedInfo.state.upper()
360
+ self.stream.writeln("")
361
+ self.stream.writeln(self.separator1)
362
+ self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
363
+ self.stream.writeln(self.separator2)
364
+ self.stream.writeln("%s" % self.lastExecutedInfo.tb)
365
+ self.stream.writeln(self.separator1)
366
+ self.stream.flush()
367
+
284
368
  def logSummary(self):
285
369
  fails = sum(_.fail for _ in self.res.values())
286
370
  errors = sum(_.error for _ in self.res.values())
@@ -288,12 +372,8 @@ class JsonResult(TextTestResult):
288
372
  logger.info(f"[Property Exectution Summary] Errors:{errors}, Fails:{fails}")
289
373
 
290
374
 
291
- class KeaTestRunner(TextTestRunner):
292
-
293
- resultclass: JsonResult
294
- allProperties: PropertyStore
375
+ class KeaOptionSetter:
295
376
  options: Options = None
296
- _block_funcs: Dict[Literal["widgets", "trees"], List[Callable]] = None
297
377
 
298
378
  @classmethod
299
379
  def setOptions(cls, options: Options):
@@ -303,9 +383,16 @@ class KeaTestRunner(TextTestRunner):
303
383
  logger.warning("[Warning] Can not use any Driver when runing native mode.")
304
384
  options.Driver = None
305
385
  cls.options = options
386
+
387
+
388
+ class KeaTestRunner(TextTestRunner, KeaOptionSetter):
389
+
390
+ resultclass: JsonResult
391
+ allProperties: PropertyStore
392
+ _block_funcs: Dict[Literal["widgets", "trees"], List[Callable]] = None
306
393
 
307
394
  def _setOuputDir(self):
308
- output_dir = Path(self.options.output_dir).absolute()
395
+ output_dir = self.options.output_dir
309
396
  output_dir.mkdir(parents=True, exist_ok=True)
310
397
  global LOGFILE, RESFILE, PROP_EXEC_RESFILE
311
398
  LOGFILE = output_dir / Path(LOGFILE)
@@ -359,7 +446,7 @@ class KeaTestRunner(TextTestRunner):
359
446
  # initialize the result.json file
360
447
  result.flushResult()
361
448
  # setUp for the u2 driver
362
- self.scriptDriver = self.options.Driver.getScriptDriver()
449
+ self.scriptDriver = U2Driver.getScriptDriver(mode="proxy")
363
450
  fb.check_alive()
364
451
 
365
452
  fb.init(options=self.options, stamp=STAMP)
@@ -371,15 +458,18 @@ class KeaTestRunner(TextTestRunner):
371
458
  self.stepsCount = 0
372
459
  while self.stepsCount < self.options.maxStep:
373
460
 
374
- self.stepsCount += 1
375
- logger.info("Sending monkeyEvent {}".format(
376
- f"({self.stepsCount} / {self.options.maxStep})" if self.options.maxStep != float("inf")
377
- else f"({self.stepsCount})"
378
- )
379
- )
380
-
381
461
  try:
382
- xml_raw = fb.stepMonkey(self._monkeyStepInfo)
462
+ if fb.executed_prop:
463
+ fb.executed_prop = False
464
+ xml_raw = fb.dumpHierarchy()
465
+ else:
466
+ self.stepsCount += 1
467
+ logger.info("Sending monkeyEvent {}".format(
468
+ f"({self.stepsCount} / {self.options.maxStep})" if self.options.maxStep != float("inf")
469
+ else f"({self.stepsCount})"
470
+ )
471
+ )
472
+ xml_raw = fb.stepMonkey(self._monkeyStepInfo)
383
473
  propsSatisfiedPrecond = self.getValidProperties(xml_raw, result)
384
474
  except u2.HTTPError:
385
475
  logger.info("Connection refused by remote.")
@@ -402,7 +492,7 @@ class KeaTestRunner(TextTestRunner):
402
492
  # filter the properties according to the given p
403
493
  for propName, test in propsSatisfiedPrecond.items():
404
494
  result.addPrecondSatisfied(test)
405
- if getattr(test, "p", 1) >= p:
495
+ if getattr(test, PROB_MARKER, 1) >= p:
406
496
  propsNameFilteredByP.append(propName)
407
497
 
408
498
  if len(propsNameFilteredByP) == 0:
@@ -412,19 +502,20 @@ class KeaTestRunner(TextTestRunner):
412
502
  execPropName = random.choice(propsNameFilteredByP)
413
503
  test = propsSatisfiedPrecond[execPropName]
414
504
  # Dependency Injection. driver when doing scripts
415
- self.scriptDriver = self.options.Driver.getScriptDriver()
505
+ self.scriptDriver = U2Driver.getScriptDriver(mode="proxy")
506
+
416
507
  setattr(test, self.options.driverName, self.scriptDriver)
417
- print("execute property %s." % execPropName, flush=True)
418
508
 
419
509
  result.addExcuted(test, self.stepsCount)
420
510
  fb.logScript(result.lastExecutedInfo)
421
511
  try:
422
512
  test(result)
423
513
  finally:
424
- result.printErrors()
514
+ result.printError(test)
425
515
 
426
516
  result.updateExectedInfo()
427
517
  fb.logScript(result.lastExecutedInfo)
518
+ fb.executed_prop = True
428
519
  result.flushResult()
429
520
 
430
521
  if not end_by_remote:
@@ -435,41 +526,6 @@ class KeaTestRunner(TextTestRunner):
435
526
  fb.join()
436
527
  print(f"Finish sending monkey events.", flush=True)
437
528
  log_watcher.close()
438
-
439
- # Source code from unittest Runner
440
- # process the result
441
- expectedFails = unexpectedSuccesses = skipped = 0
442
- try:
443
- results = map(
444
- len,
445
- (result.expectedFailures, result.unexpectedSuccesses, result.skipped),
446
- )
447
- except AttributeError:
448
- pass
449
- else:
450
- expectedFails, unexpectedSuccesses, skipped = results
451
-
452
- infos = []
453
- if not result.wasSuccessful():
454
- self.stream.write("FAILED")
455
- failed, errored = len(result.failures), len(result.errors)
456
- if failed:
457
- infos.append("failures=%d" % failed)
458
- if errored:
459
- infos.append("errors=%d" % errored)
460
- else:
461
- self.stream.write("OK")
462
- if skipped:
463
- infos.append("skipped=%d" % skipped)
464
- if expectedFails:
465
- infos.append("expected failures=%d" % expectedFails)
466
- if unexpectedSuccesses:
467
- infos.append("unexpected successes=%d" % unexpectedSuccesses)
468
- if infos:
469
- self.stream.writeln(" (%s)" % (", ".join(infos),))
470
- else:
471
- self.stream.write("\n")
472
- self.stream.flush()
473
529
 
474
530
  result.logSummary()
475
531
  return result
@@ -493,12 +549,14 @@ class KeaTestRunner(TextTestRunner):
493
549
 
494
550
  def getValidProperties(self, xml_raw: str, result: JsonResult) -> PropertyStore:
495
551
 
496
- staticCheckerDriver = self.options.Driver.getStaticChecker(hierarchy=xml_raw)
552
+ staticCheckerDriver = U2Driver.getStaticChecker(hierarchy=xml_raw)
497
553
 
498
554
  validProps: PropertyStore = dict()
499
555
  for propName, test in self.allProperties.items():
500
556
  valid = True
501
557
  prop = getattr(test, propName)
558
+ p = getattr(prop, PROB_MARKER, 1)
559
+ setattr(test, PROB_MARKER, p)
502
560
  # check if all preconds passed
503
561
  for precond in prop.preconds:
504
562
  # Dependency injection. Static driver checker for precond
@@ -553,7 +611,12 @@ class KeaTestRunner(TextTestRunner):
553
611
  yield test
554
612
 
555
613
  # Traverse the TestCase to get all properties
614
+ _result = TextTestResult(self.stream, self.descriptions, self.verbosity)
556
615
  for t in iter_tests(test):
616
+ # Find all the _FailedTest (Caused by ImportError) and directly run it to report errors
617
+ if type(t).__name__ == "_FailedTest":
618
+ t(_result)
619
+ continue
557
620
  testMethodName = t._testMethodName
558
621
  # get the test method name and check if it's a property
559
622
  testMethod = getattr(t, testMethodName)
@@ -564,6 +627,8 @@ class KeaTestRunner(TextTestRunner):
564
627
  # save it into allProperties for PBT
565
628
  self.allProperties[testMethodName] = t
566
629
  print(f"[INFO] Load property: {getFullPropName(t)}", flush=True)
630
+ # Print errors caused by ImportError
631
+ _result.printErrors()
567
632
 
568
633
  @property
569
634
  def _blockWidgetFuncs(self):
@@ -644,7 +709,7 @@ class KeaTestRunner(TextTestRunner):
644
709
 
645
710
  if preconds_pass(preconds):
646
711
  try:
647
- _widgets = func(self.options.Driver.getStaticChecker())
712
+ _widgets = func(U2Driver.getStaticChecker())
648
713
  _widgets = _widgets if isinstance(_widgets, list) else [_widgets]
649
714
  for w in _widgets:
650
715
  if isinstance(w, (StaticU2UiObject, StaticXpathUiObject)):
@@ -690,4 +755,189 @@ class KeaTestRunner(TextTestRunner):
690
755
  if self.options.Driver:
691
756
  self.options.Driver.tearDown()
692
757
 
693
- self._generate_bug_report()
758
+ if self.options.agent == "u2":
759
+ self._generate_bug_report()
760
+
761
+
762
+ class KeaTextTestResult(BetterConsoleLogExtensionMixin, TextTestResult):
763
+
764
+ @property
765
+ def wasFail(self):
766
+ return self._wasFail
767
+
768
+ def addError(self, test, err):
769
+ self._wasFail = True
770
+ return super().addError(test, err)
771
+
772
+ def addFailure(self, test, err):
773
+ self._wasFail = True
774
+ return super().addFailure(test, err)
775
+
776
+ def addSuccess(self, test):
777
+ self._wasFail = False
778
+ return super().addSuccess(test)
779
+
780
+ def addSkip(self, test, reason):
781
+ self._wasFail = False
782
+ return super().addSkip(test, reason)
783
+
784
+ def addExpectedFailure(self, test, err):
785
+ self._wasFail = False
786
+ return super().addExpectedFailure(test, err)
787
+
788
+ def addUnexpectedSuccess(self, test):
789
+ self._wasFail = False
790
+ return super().addUnexpectedSuccess(test)
791
+
792
+
793
+ class HybridTestRunner(TextTestRunner, KeaOptionSetter):
794
+
795
+ allTestCases: Dict[str, Tuple[TestCase, bool]]
796
+ _common_teardown_func = None
797
+ resultclass = KeaTextTestResult
798
+
799
+ def __init__(self, stream = None, descriptions = True, verbosity = 1, failfast = False, buffer = False, resultclass = None, warnings = None, *, tb_locals = False):
800
+ super().__init__(stream, descriptions, verbosity, failfast, buffer, resultclass, warnings, tb_locals=tb_locals)
801
+ hybrid_mode.set(True)
802
+ self.hybrid_report_dirs = []
803
+
804
+ def run(self, test):
805
+
806
+ self.allTestCases = dict()
807
+ self.collectAllTestCases(test)
808
+ if len(self.allTestCases) == 0:
809
+ logger.warning("[Warning] No test case has been found.")
810
+
811
+ result: KeaTextTestResult = self._makeResult()
812
+ registerResult(result)
813
+ result.failfast = self.failfast
814
+ result.buffer = self.buffer
815
+ result.tb_locals = self.tb_locals
816
+ with warnings.catch_warnings():
817
+ if self.warnings:
818
+ # if self.warnings is set, use it to filter all the warnings
819
+ warnings.simplefilter(self.warnings)
820
+ # if the filter is 'default' or 'always', special-case the
821
+ # warnings from the deprecated unittest methods to show them
822
+ # no more than once per module, because they can be fairly
823
+ # noisy. The -Wd and -Wa flags can be used to bypass this
824
+ # only when self.warnings is None.
825
+ if self.warnings in ["default", "always"]:
826
+ warnings.filterwarnings(
827
+ "module",
828
+ category=DeprecationWarning,
829
+ message=r"Please use assert\w+ instead.",
830
+ )
831
+
832
+ hybrid_test_count = 0
833
+ for testCaseName, test in self.allTestCases.items():
834
+ test, isInterruptable = test, getattr(test, "isInterruptable", False)
835
+
836
+ # Dependency Injection. driver when doing scripts
837
+ self.scriptDriver = U2Driver.getScriptDriver(mode="direct")
838
+ setattr(test, self.options.driverName, self.scriptDriver)
839
+ logger.info("Executing unittest testCase %s." % testCaseName)
840
+
841
+ try:
842
+ test._common_setUp()
843
+ ret: KeaTextTestResult = test(result)
844
+ if ret.wasFail:
845
+ logger.error(f"Fail when running test.")
846
+ if isInterruptable and not ret.wasFail:
847
+ logger.info(f"Launch fastbot after interruptable script.")
848
+ hybrid_test_count += 1
849
+ hybrid_test_options = self.options.getKeaTestOptions(hybrid_test_count)
850
+
851
+ # Track the sub-report directory for later merging
852
+ self.hybrid_report_dirs.append(hybrid_test_options.output_dir)
853
+
854
+ argv = ["python3 -m unittest"] + hybrid_test_options.propertytest_args
855
+ KeaTestRunner.setOptions(hybrid_test_options)
856
+ unittest_main(module=None, argv=argv, testRunner=KeaTestRunner, exit=False)
857
+
858
+ finally:
859
+ test._common_tearDown()
860
+ result.printErrors()
861
+
862
+ # Auto-merge all hybrid test reports after all tests complete
863
+ if len(self.hybrid_report_dirs) > 0:
864
+ self._merge_hybrid_reports()
865
+
866
+ return result
867
+
868
+ def _merge_hybrid_reports(self):
869
+ """
870
+ Merge all hybrid test reports into a single merged report
871
+ """
872
+ try:
873
+ from kea2.report_merger import TestReportMerger
874
+
875
+ if len(self.hybrid_report_dirs) < 2:
876
+ logger.info("Only one hybrid test report generated, skipping merge.")
877
+ return
878
+
879
+ main_output_dir = self.options.output_dir
880
+
881
+ merger = TestReportMerger()
882
+ merged_dir = merger.merge_reports(
883
+ result_paths=self.hybrid_report_dirs,
884
+ output_dir=main_output_dir
885
+ )
886
+
887
+ merge_summary = merger.get_merge_summary()
888
+ except Exception as e:
889
+ logger.error(f"Error merging hybrid test reports: {e}")
890
+
891
+ def collectAllTestCases(self, test: TestSuite):
892
+ """collect all the properties to prepare for PBT
893
+ """
894
+
895
+ def iter_tests(suite):
896
+ for test in suite:
897
+ if isinstance(test, TestSuite):
898
+ yield from iter_tests(test)
899
+ else:
900
+ yield test
901
+
902
+ funcs = loadFuncsFromFile(getProjectRoot() / "configs" / "teardown.py")
903
+ setUp = funcs.get("setUp", None)
904
+ tearDown = funcs.get("tearDown", None)
905
+ if setUp is None:
906
+ raise ValueError("setUp function not found in teardown.py.")
907
+ if tearDown is None:
908
+ raise ValueError("tearDown function not found in teardown.py.")
909
+
910
+ # Traverse the TestCase to get all properties
911
+ for t in iter_tests(test):
912
+
913
+ def dummy(self): ...
914
+ # remove the hook func in its TestCase
915
+ t.setUp = types.MethodType(dummy, t)
916
+ t.tearDown = types.MethodType(dummy, t)
917
+ t._common_setUp = types.MethodType(setUp, t)
918
+ t._common_tearDown = types.MethodType(tearDown, t)
919
+
920
+ # check if it's interruptable (reflection)
921
+ testMethodName = t._testMethodName
922
+ testMethod = getattr(t, testMethodName)
923
+ isInterruptable = hasattr(testMethod, INTERRUPTABLE_MARKER)
924
+
925
+ # save it into allTestCases, if interruptable, mark as true
926
+ setattr(t, "isInterruptable", isInterruptable)
927
+ self.allTestCases[testMethodName] = t
928
+ logger.info(f"Load TestCase: {getFullPropName(t)} , interruptable: {t.isInterruptable}")
929
+
930
+ def __del__(self):
931
+ """tearDown method. Cleanup the env.
932
+ """
933
+ if self.options.Driver:
934
+ self.options.Driver.tearDown()
935
+
936
+
937
+ def kea2_breakpoint():
938
+ """kea2 entrance. Call this function in TestCase.
939
+ Kea2 will automatically switch to Kea2 Test in kea2_breakpoint in HybridTest mode.
940
+ The normal launch in unittest will not be affected.
941
+ """
942
+ if hybrid_mode.get():
943
+ raise SkipTest("Skip the test after the breakpoint and run kea2 in hybrid mode.")