orionis 0.591.0__py3-none-any.whl → 0.592.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orionis/metadata/framework.py +1 -1
- orionis/test/contracts/unit_test.py +0 -81
- orionis/test/core/unit_test.py +732 -602
- orionis/test/output/printer.py +173 -118
- orionis/test/records/logs.py +223 -83
- orionis/test/view/render.py +45 -17
- {orionis-0.591.0.dist-info → orionis-0.592.0.dist-info}/METADATA +1 -1
- {orionis-0.591.0.dist-info → orionis-0.592.0.dist-info}/RECORD +11 -11
- {orionis-0.591.0.dist-info → orionis-0.592.0.dist-info}/WHEEL +0 -0
- {orionis-0.591.0.dist-info → orionis-0.592.0.dist-info}/licenses/LICENCE +0 -0
- {orionis-0.591.0.dist-info → orionis-0.592.0.dist-info}/top_level.txt +0 -0
orionis/test/core/unit_test.py
CHANGED
@@ -6,7 +6,6 @@ import time
|
|
6
6
|
import traceback
|
7
7
|
import unittest
|
8
8
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
9
|
-
from contextlib import redirect_stdout, redirect_stderr
|
10
9
|
from datetime import datetime
|
11
10
|
from importlib import import_module
|
12
11
|
from os import walk
|
@@ -42,6 +41,7 @@ from orionis.test.validators import (
|
|
42
41
|
ValidWorkers,
|
43
42
|
)
|
44
43
|
from orionis.test.view.render import TestingResultRender
|
44
|
+
import inspect
|
45
45
|
|
46
46
|
class UnitTest(IUnitTest):
|
47
47
|
"""
|
@@ -104,18 +104,24 @@ class UnitTest(IUnitTest):
|
|
104
104
|
|
105
105
|
# Initialize the test suite to hold discovered tests
|
106
106
|
self.__suite = unittest.TestSuite()
|
107
|
+
self.__flatten_test_suite: Optional[List[unittest.TestCase]] = None
|
107
108
|
|
108
109
|
# List to store imported test modules
|
109
|
-
self.
|
110
|
+
self.__imported_modules: List = []
|
110
111
|
|
111
|
-
#
|
112
|
-
self.
|
112
|
+
# Sets to track discovered test cases, modules, and IDs
|
113
|
+
self.__discovered_test_cases: set = set()
|
114
|
+
self.__discovered_test_modules: set = set()
|
115
|
+
self.__discovered_test_ids: set = set()
|
113
116
|
|
114
117
|
# Variable to store the result summary after test execution
|
115
118
|
self.__result: Optional[Dict[str, Any]] = None
|
116
119
|
|
117
|
-
#
|
118
|
-
self.
|
120
|
+
# Define keywords to detect debugging or dump calls in test code
|
121
|
+
self.__debbug_keywords: list = ['self.dd', 'self.dump']
|
122
|
+
|
123
|
+
# Use live console output during test execution
|
124
|
+
self.__live_console: bool = True
|
119
125
|
|
120
126
|
# Load and set internal paths for test discovery and result storage
|
121
127
|
self.__loadPaths()
|
@@ -129,27 +135,6 @@ class UnitTest(IUnitTest):
|
|
129
135
|
# Discover and load all test cases from the imported modules into the suite
|
130
136
|
self.__loadTests()
|
131
137
|
|
132
|
-
def __loadOutputBuffer(
|
133
|
-
self
|
134
|
-
) -> None:
|
135
|
-
"""
|
136
|
-
Load the output buffer from the last test execution.
|
137
|
-
|
138
|
-
This method retrieves the output buffer containing standard output generated during
|
139
|
-
the last test run. It stores the output as a string in an internal attribute for later access.
|
140
|
-
|
141
|
-
Parameters
|
142
|
-
----------
|
143
|
-
None
|
144
|
-
|
145
|
-
Returns
|
146
|
-
-------
|
147
|
-
None
|
148
|
-
This method does not return a value. It sets the internal output buffer attribute.
|
149
|
-
"""
|
150
|
-
self.__output_buffer = None
|
151
|
-
self.__error_buffer = None
|
152
|
-
|
153
138
|
def __loadPaths(
|
154
139
|
self
|
155
140
|
) -> None:
|
@@ -176,8 +161,8 @@ class UnitTest(IUnitTest):
|
|
176
161
|
"""
|
177
162
|
|
178
163
|
# Get the base test path and project root path from the application
|
179
|
-
self.__test_path = ValidBasePath(self.__app.path('tests'))
|
180
|
-
self.__root_path = ValidBasePath(self.__app.path('root'))
|
164
|
+
self.__test_path: Path = ValidBasePath(self.__app.path('tests'))
|
165
|
+
self.__root_path: Path = ValidBasePath(self.__app.path('root'))
|
181
166
|
|
182
167
|
# Compute the base path for test discovery, relative to the project root
|
183
168
|
# Remove the root path prefix and leading slash
|
@@ -323,7 +308,7 @@ class UnitTest(IUnitTest):
|
|
323
308
|
-------
|
324
309
|
None
|
325
310
|
This method does not return any value. It updates the internal state of the UnitTest instance by extending
|
326
|
-
the `self.
|
311
|
+
the `self.__imported_modules` list with the discovered and imported module objects.
|
327
312
|
|
328
313
|
Raises
|
329
314
|
------
|
@@ -337,7 +322,8 @@ class UnitTest(IUnitTest):
|
|
337
322
|
- Updates the internal module list for subsequent test discovery.
|
338
323
|
"""
|
339
324
|
|
340
|
-
|
325
|
+
# Use a set to avoid duplicate module imports
|
326
|
+
modules = set()
|
341
327
|
|
342
328
|
# If folder_path is '*', discover all modules matching the pattern in the test directory
|
343
329
|
if self.__folder_path == '*':
|
@@ -355,7 +341,248 @@ class UnitTest(IUnitTest):
|
|
355
341
|
modules.update(list_modules)
|
356
342
|
|
357
343
|
# Extend the internal module list with the sorted discovered modules
|
358
|
-
self.
|
344
|
+
self.__imported_modules.extend(modules)
|
345
|
+
|
346
|
+
def __listMatchingModules(
|
347
|
+
self,
|
348
|
+
root_path: Path,
|
349
|
+
test_path: Path,
|
350
|
+
custom_path: Path,
|
351
|
+
pattern_file: str
|
352
|
+
) -> List[str]:
|
353
|
+
"""
|
354
|
+
Discover and import Python modules containing test files that match a given filename pattern within a specified directory.
|
355
|
+
|
356
|
+
This method recursively searches for Python files in the directory specified by `test_path / custom_path` that match the provided
|
357
|
+
filename pattern. For each matching file, it constructs the module's fully qualified name relative to the project root, imports
|
358
|
+
the module using `importlib.import_module`, and adds it to a set to avoid duplicates. The method returns a list of imported module objects.
|
359
|
+
|
360
|
+
Parameters
|
361
|
+
----------
|
362
|
+
root_path : Path
|
363
|
+
The root directory of the project, used to calculate the relative module path.
|
364
|
+
test_path : Path
|
365
|
+
The base directory where tests are located.
|
366
|
+
custom_path : Path
|
367
|
+
The subdirectory within `test_path` to search for matching test files.
|
368
|
+
pattern_file : str
|
369
|
+
The filename pattern to match (supports '*' and '?' wildcards).
|
370
|
+
|
371
|
+
Returns
|
372
|
+
-------
|
373
|
+
List[module]
|
374
|
+
A list of imported Python module objects corresponding to test files that match the pattern.
|
375
|
+
|
376
|
+
Notes
|
377
|
+
-----
|
378
|
+
- Only files ending with `.py` are considered as Python modules.
|
379
|
+
- Duplicate modules are avoided by using a set.
|
380
|
+
- The module name is constructed by converting the relative path to dot notation.
|
381
|
+
- If the relative path is '.', only the module name is used.
|
382
|
+
- The method imports modules dynamically and returns them as objects.
|
383
|
+
"""
|
384
|
+
|
385
|
+
# Compile the filename pattern into a regular expression for matching.
|
386
|
+
regex = re.compile('^' + pattern_file.replace('*', '.*').replace('?', '.') + '$')
|
387
|
+
|
388
|
+
# Use a set to avoid duplicate module imports.
|
389
|
+
matched_folders = set()
|
390
|
+
|
391
|
+
# Walk through all files in the target directory.
|
392
|
+
for root, _, files in walk(str(test_path / custom_path) if custom_path else str(test_path)):
|
393
|
+
|
394
|
+
# Iterate through each file in the current directory
|
395
|
+
for file in files:
|
396
|
+
|
397
|
+
# Check if the file matches the pattern and is a Python file.
|
398
|
+
if regex.fullmatch(file) and file.endswith('.py'):
|
399
|
+
|
400
|
+
# Calculate the relative path from the root, convert to module notation.
|
401
|
+
ralative_path = str(Path(root).relative_to(root_path)).replace(os.sep, '.')
|
402
|
+
|
403
|
+
# Remove '.py' extension.
|
404
|
+
module_name = file[:-3]
|
405
|
+
|
406
|
+
# Build the full module name.
|
407
|
+
full_module = f"{ralative_path}.{module_name}" if ralative_path != '.' else module_name
|
408
|
+
|
409
|
+
# Import the module and add to the set.
|
410
|
+
matched_folders.add(import_module(ValidModuleName(full_module)))
|
411
|
+
|
412
|
+
# Return the list of imported module objects.
|
413
|
+
return list(matched_folders)
|
414
|
+
|
415
|
+
def __raiseIsFailedTest(
|
416
|
+
self,
|
417
|
+
test_case: unittest.TestCase
|
418
|
+
) -> None:
|
419
|
+
"""
|
420
|
+
Raises an error if the provided test case represents a failed import.
|
421
|
+
|
422
|
+
This method checks whether the given test case is an instance of a failed import
|
423
|
+
(typically indicated by the class name '_FailedTest'). If so, it extracts the error
|
424
|
+
details from the test case and raises an `OrionisTestValueError` with a descriptive
|
425
|
+
message, including the test case ID and error information. This helps to surface
|
426
|
+
import errors or missing dependencies during test discovery.
|
427
|
+
|
428
|
+
Parameters
|
429
|
+
----------
|
430
|
+
test_case : unittest.TestCase
|
431
|
+
The test case to check for failed import status.
|
432
|
+
|
433
|
+
Returns
|
434
|
+
-------
|
435
|
+
None
|
436
|
+
This method does not return a value. If the test case is a failed import,
|
437
|
+
an exception is raised.
|
438
|
+
|
439
|
+
Raises
|
440
|
+
------
|
441
|
+
OrionisTestValueError
|
442
|
+
If the test case is a failed import, with details about the failure.
|
443
|
+
|
444
|
+
Notes
|
445
|
+
-----
|
446
|
+
- The error message is extracted from the `_exception` attribute if present,
|
447
|
+
otherwise from the `_outcome.errors` or the string representation of the test case.
|
448
|
+
- This method is typically used during test discovery to halt execution and
|
449
|
+
provide immediate feedback about import failures.
|
450
|
+
"""
|
451
|
+
|
452
|
+
# Check if the test case is a failed import by its class name
|
453
|
+
if test_case.__class__.__name__ == "_FailedTest":
|
454
|
+
error_message = ""
|
455
|
+
|
456
|
+
# Try to extract the error message from known attributes
|
457
|
+
if hasattr(test_case, "_exception"):
|
458
|
+
error_message = str(test_case._exception)
|
459
|
+
elif hasattr(test_case, "_outcome") and hasattr(test_case._outcome, "errors"):
|
460
|
+
error_message = str(test_case._outcome.errors)
|
461
|
+
else:
|
462
|
+
error_message = str(test_case)
|
463
|
+
|
464
|
+
# Raise a value error with detailed information about the failure
|
465
|
+
raise OrionisTestValueError(
|
466
|
+
f"Failed to import test module: {test_case.id()}.\n"
|
467
|
+
f"Error details: {error_message}\n"
|
468
|
+
"Please check for import errors or missing dependencies."
|
469
|
+
)
|
470
|
+
|
471
|
+
def __raiseIfNotFoundTestMethod(
|
472
|
+
self,
|
473
|
+
test_case: unittest.TestCase
|
474
|
+
) -> None:
|
475
|
+
"""
|
476
|
+
Raises an error if the provided test case does not have a valid test method.
|
477
|
+
|
478
|
+
This method uses reflection to check whether the given `unittest.TestCase` instance
|
479
|
+
contains a valid test method. It retrieves the method name from the test case and
|
480
|
+
verifies that the method exists in the test case's class. If the method is missing
|
481
|
+
or invalid, an `OrionisTestValueError` is raised with a descriptive message.
|
482
|
+
|
483
|
+
Parameters
|
484
|
+
----------
|
485
|
+
test_case : unittest.TestCase
|
486
|
+
The test case instance to validate.
|
487
|
+
|
488
|
+
Returns
|
489
|
+
-------
|
490
|
+
None
|
491
|
+
This method does not return any value. If the test case is invalid, an exception is raised.
|
492
|
+
|
493
|
+
Raises
|
494
|
+
------
|
495
|
+
OrionisTestValueError
|
496
|
+
If the test case does not have a valid test method.
|
497
|
+
|
498
|
+
Notes
|
499
|
+
-----
|
500
|
+
- Uses `ReflectionInstance` to retrieve the test method name.
|
501
|
+
- Checks for both missing method names and missing attributes in the test case class.
|
502
|
+
- Provides detailed error information including test case ID, class name, and module name.
|
503
|
+
"""
|
504
|
+
|
505
|
+
# Use reflection to get the test method name
|
506
|
+
rf_instance = ReflectionInstance(test_case)
|
507
|
+
method_name = rf_instance.getAttribute("_testMethodName")
|
508
|
+
|
509
|
+
# Check for missing or invalid test method
|
510
|
+
if not method_name or not hasattr(test_case.__class__, method_name):
|
511
|
+
class_name = test_case.__class__.__name__
|
512
|
+
module_name = getattr(test_case, "__module__", "unknown")
|
513
|
+
|
514
|
+
# Raise an error with detailed information
|
515
|
+
raise OrionisTestValueError(
|
516
|
+
f"Test case '{test_case.id()}' in class '{class_name}' (module '{module_name}') "
|
517
|
+
f"does not have a valid test method '{method_name}'. "
|
518
|
+
"Please ensure the test case is correctly defined and contains valid test methods."
|
519
|
+
)
|
520
|
+
|
521
|
+
def __isDecoratedMethod(
|
522
|
+
self,
|
523
|
+
test_case: unittest.TestCase
|
524
|
+
) -> bool:
|
525
|
+
"""
|
526
|
+
Determines whether the test method of a given test case is decorated (i.e., wrapped by one or more Python decorators).
|
527
|
+
|
528
|
+
This method inspects the test method associated with the provided `unittest.TestCase` instance to detect the presence of decorators.
|
529
|
+
It traverses the decorator chain by following the `__wrapped__` attribute, which is set by Python's `functools.wraps` or similar mechanisms.
|
530
|
+
Decorators are identified by the existence of the `__wrapped__` attribute, and their names are collected from the `__qualname__` or `__name__` attributes.
|
531
|
+
|
532
|
+
Parameters
|
533
|
+
----------
|
534
|
+
test_case : unittest.TestCase
|
535
|
+
The test case instance whose test method will be checked for decorators.
|
536
|
+
|
537
|
+
Returns
|
538
|
+
-------
|
539
|
+
bool
|
540
|
+
True if the test method has one or more decorators applied (i.e., if any decorators are found in the chain).
|
541
|
+
False if the test method is not decorated or if no test method is found.
|
542
|
+
|
543
|
+
Notes
|
544
|
+
-----
|
545
|
+
- The method checks for decorators by traversing the `__wrapped__` attribute chain.
|
546
|
+
- Decorator names are collected for informational purposes but are not returned.
|
547
|
+
- If the test method is not decorated, or if no test method is found, the method returns False.
|
548
|
+
- This method does not modify the test case or its method; it only inspects for decoration.
|
549
|
+
"""
|
550
|
+
|
551
|
+
# Retrieve the test method from the test case's class using the test method name
|
552
|
+
test_method = getattr(test_case.__class__, getattr(test_case, "_testMethodName"), None)
|
553
|
+
|
554
|
+
# List to store decorator names found during traversal
|
555
|
+
decorators = []
|
556
|
+
|
557
|
+
# Check if the method has the __wrapped__ attribute, indicating it is decorated
|
558
|
+
if hasattr(test_method, '__wrapped__'):
|
559
|
+
|
560
|
+
# Start with the outermost decorated method
|
561
|
+
original = test_method
|
562
|
+
|
563
|
+
# Traverse the decorator chain by following __wrapped__ attributes
|
564
|
+
while hasattr(original, '__wrapped__'):
|
565
|
+
|
566
|
+
# Collect decorator name information for tracking purposes
|
567
|
+
if hasattr(original, '__qualname__'):
|
568
|
+
|
569
|
+
# Prefer __qualname__ for detailed naming information
|
570
|
+
decorators.append(original.__qualname__)
|
571
|
+
|
572
|
+
elif hasattr(original, '__name__'):
|
573
|
+
|
574
|
+
# Fall back to __name__ if __qualname__ is not available
|
575
|
+
decorators.append(original.__name__)
|
576
|
+
|
577
|
+
# Move to the next level in the decorator chain
|
578
|
+
original = original.__wrapped__
|
579
|
+
|
580
|
+
# Return True if any decorators were found during the traversal
|
581
|
+
if decorators:
|
582
|
+
return True
|
583
|
+
|
584
|
+
# Return False if no decorators are found or if the method is not decorated
|
585
|
+
return False
|
359
586
|
|
360
587
|
def __loadTests(
|
361
588
|
self
|
@@ -368,9 +595,15 @@ class UnitTest(IUnitTest):
|
|
368
595
|
and adds the discovered tests to the main test suite. It also tracks the number of discovered
|
369
596
|
tests per module and raises detailed errors for import failures or missing tests.
|
370
597
|
|
598
|
+
Parameters
|
599
|
+
----------
|
600
|
+
None
|
601
|
+
|
371
602
|
Returns
|
372
603
|
-------
|
373
604
|
None
|
605
|
+
This method does not return any value. It updates the internal test suite and
|
606
|
+
discovered tests metadata.
|
374
607
|
|
375
608
|
Raises
|
376
609
|
------
|
@@ -386,60 +619,77 @@ class UnitTest(IUnitTest):
|
|
386
619
|
"""
|
387
620
|
try:
|
388
621
|
|
389
|
-
#
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
for
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
622
|
+
# Lists to categorize tests with and without debugger calls
|
623
|
+
normal_tests = []
|
624
|
+
debug_tests = []
|
625
|
+
|
626
|
+
# Use a progress bar to indicate module loading status
|
627
|
+
with self.__printer.progressBar() as progress:
|
628
|
+
|
629
|
+
# Set total steps for the progress bar
|
630
|
+
steps = len(self.__imported_modules) + 1
|
631
|
+
|
632
|
+
# Add a task to the progress bar for loading modules
|
633
|
+
task = progress.add_task("Loading test modules...", total=steps)
|
634
|
+
|
635
|
+
# Print a newline for better console formatting
|
636
|
+
self.__printer.line(1)
|
637
|
+
|
638
|
+
# Iterate through all imported test modules
|
639
|
+
for test_module in self.__imported_modules:
|
640
|
+
|
641
|
+
# Load all tests from the current module using the unittest loader
|
642
|
+
module_suite = self.__loader.loadTestsFromModule(test_module)
|
643
|
+
|
644
|
+
# Flatten the suite to get individual test cases
|
645
|
+
flat_tests = self.__flattenTestSuite(module_suite)
|
646
|
+
|
647
|
+
# Iterate through each test case
|
648
|
+
for test in flat_tests:
|
649
|
+
|
650
|
+
# Raise an error if the test case is a failed import
|
651
|
+
self.__raiseIsFailedTest(test)
|
652
|
+
|
653
|
+
# Raise an error if the test case does not have a valid test method
|
654
|
+
self.__raiseIfNotFoundTestMethod(test)
|
655
|
+
|
656
|
+
# Add the test case to the discovered tests list
|
657
|
+
self.__discovered_test_cases.add(test.__class__)
|
658
|
+
|
659
|
+
# Track the module name of the discovered test case
|
660
|
+
self.__discovered_test_modules.add(test.__module__)
|
661
|
+
|
662
|
+
# Track the test ID of the discovered test case
|
663
|
+
self.__discovered_test_ids.add(test.id())
|
664
|
+
|
665
|
+
# Categorize and resolve test dependencies efficiently
|
666
|
+
target_list = debug_tests if self.__withDebugger(test) else normal_tests
|
667
|
+
resolved_test = test
|
668
|
+
if not self.__isDecoratedMethod(test):
|
669
|
+
resolved_test = self.__resolveTestDependencies(test)
|
670
|
+
target_list.append(resolved_test)
|
671
|
+
|
672
|
+
# If no tests are found, raise an error
|
673
|
+
if not flat_tests:
|
408
674
|
raise OrionisTestValueError(
|
409
|
-
f"
|
410
|
-
|
411
|
-
"Please check for import errors or missing dependencies."
|
675
|
+
f"No tests found in module '{test_module.__name__}'. "
|
676
|
+
"Please ensure that the module contains valid unittest.TestCase classes with test methods."
|
412
677
|
)
|
413
678
|
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
# If a test name pattern is provided, filter tests by name
|
418
|
-
if self.__test_name_pattern:
|
419
|
-
valid_suite = self.__filterTestsByName(
|
420
|
-
suite=valid_suite,
|
421
|
-
pattern=self.__test_name_pattern
|
422
|
-
)
|
679
|
+
# Update the progress bar after processing each module
|
680
|
+
progress.advance(task, advance=1)
|
423
681
|
|
424
|
-
#
|
425
|
-
|
426
|
-
raise OrionisTestValueError(
|
427
|
-
f"No tests found in module '{test_module.__name__}' matching file pattern '{self.__pattern}'"
|
428
|
-
+ (f", test name pattern '{self.__test_name_pattern}'" if self.__test_name_pattern else "")
|
429
|
-
+ ". Please check your patterns and test files."
|
430
|
-
)
|
682
|
+
# Add debug tests first
|
683
|
+
self.__suite.addTests(debug_tests)
|
431
684
|
|
432
|
-
#
|
433
|
-
self.__suite.addTests(
|
685
|
+
# Then add normal tests
|
686
|
+
self.__suite.addTests(normal_tests)
|
434
687
|
|
435
|
-
#
|
436
|
-
|
688
|
+
# Flatten the entire suite for easier access later
|
689
|
+
self.__flatten_test_suite = self.__flattenTestSuite(self.__suite)
|
437
690
|
|
438
|
-
#
|
439
|
-
|
440
|
-
"module": test_module.__name__,
|
441
|
-
"test_count": test_count,
|
442
|
-
})
|
691
|
+
# Finalize the progress bar
|
692
|
+
progress.update(task, completed=steps)
|
443
693
|
|
444
694
|
except ImportError as e:
|
445
695
|
|
@@ -457,6 +707,70 @@ class UnitTest(IUnitTest):
|
|
457
707
|
"Ensure that the test files are valid and that there are no syntax errors or missing dependencies."
|
458
708
|
)
|
459
709
|
|
710
|
+
def __withDebugger(
|
711
|
+
self,
|
712
|
+
test_case: unittest.TestCase
|
713
|
+
) -> bool:
|
714
|
+
"""
|
715
|
+
Check if the given test case contains any debugging or dump calls.
|
716
|
+
|
717
|
+
This method inspects the source code of the provided test case to determine
|
718
|
+
whether it contains any lines that invoke debugging or dump functions, as
|
719
|
+
specified by the internal `__debbug_keywords` list (e.g., 'self.dd', 'self.dump').
|
720
|
+
It ignores commented lines and only considers actual code statements.
|
721
|
+
|
722
|
+
Parameters
|
723
|
+
----------
|
724
|
+
test_case : unittest.TestCase
|
725
|
+
The test case instance whose source code will be inspected.
|
726
|
+
|
727
|
+
Returns
|
728
|
+
-------
|
729
|
+
bool
|
730
|
+
True if any debug or dump keyword is found in the test case source code,
|
731
|
+
or if the internal debug flag (`__debbug`) is set. False otherwise.
|
732
|
+
|
733
|
+
Notes
|
734
|
+
-----
|
735
|
+
- The method uses reflection to retrieve the source code of the test case.
|
736
|
+
- Lines that are commented out are skipped during inspection.
|
737
|
+
- If an error occurs during source code retrieval or inspection, the method returns False.
|
738
|
+
"""
|
739
|
+
|
740
|
+
try:
|
741
|
+
|
742
|
+
# Retrieve the source code of the test case using reflection
|
743
|
+
method_name = getattr(test_case, "_testMethodName", None)
|
744
|
+
|
745
|
+
# If a method name is found, proceed to inspect its source code
|
746
|
+
if method_name:
|
747
|
+
|
748
|
+
# Get the source code of the specific test method
|
749
|
+
source = inspect.getsource(getattr(test_case, method_name))
|
750
|
+
|
751
|
+
# Check each line of the source code
|
752
|
+
for line in source.splitlines():
|
753
|
+
|
754
|
+
# Strip leading and trailing whitespace from the line
|
755
|
+
stripped = line.strip()
|
756
|
+
|
757
|
+
# Skip lines that are commented out
|
758
|
+
if stripped.startswith('#') or re.match(r'^\s*#', line):
|
759
|
+
continue
|
760
|
+
|
761
|
+
# If any debug keyword is present in the line, return True
|
762
|
+
if any(keyword in line for keyword in self.__debbug_keywords):
|
763
|
+
self.__live_console = False if self.__live_console is True else self.__live_console
|
764
|
+
return True
|
765
|
+
|
766
|
+
except Exception:
|
767
|
+
|
768
|
+
# If any error occurs during inspection, return False
|
769
|
+
return False
|
770
|
+
|
771
|
+
# No debug keywords found; return False
|
772
|
+
return False
|
773
|
+
|
460
774
|
def run(
|
461
775
|
self,
|
462
776
|
performance_counter: IPerformanceCounter
|
@@ -479,7 +793,7 @@ class UnitTest(IUnitTest):
|
|
479
793
|
performance_counter.start()
|
480
794
|
|
481
795
|
# Length of all tests in the suite
|
482
|
-
total_tests =
|
796
|
+
total_tests = self.getTestCount()
|
483
797
|
|
484
798
|
# If no tests are found, print a message and return early
|
485
799
|
if total_tests == 0:
|
@@ -493,21 +807,16 @@ class UnitTest(IUnitTest):
|
|
493
807
|
)
|
494
808
|
|
495
809
|
# Execute the test suite and capture result, output, and error buffers
|
496
|
-
result
|
497
|
-
|
498
|
-
|
810
|
+
result = self.__printer.executePanel(
|
811
|
+
func=self.__runSuite,
|
812
|
+
live_console=self.__live_console
|
499
813
|
)
|
500
814
|
|
501
|
-
# Store the captured output and error buffers as strings
|
502
|
-
self.__output_buffer = output_buffer.getvalue()
|
503
|
-
self.__error_buffer = error_buffer.getvalue()
|
504
|
-
|
505
815
|
# Calculate execution time in milliseconds
|
506
816
|
performance_counter.stop()
|
507
|
-
execution_time = performance_counter.getSeconds()
|
508
817
|
|
509
818
|
# Generate a summary of the test results
|
510
|
-
summary = self.__generateSummary(result,
|
819
|
+
summary = self.__generateSummary(result, performance_counter.getSeconds())
|
511
820
|
|
512
821
|
# Display the test results using the printer
|
513
822
|
self.__printer.displayResults(summary=summary)
|
@@ -523,296 +832,192 @@ class UnitTest(IUnitTest):
|
|
523
832
|
return summary
|
524
833
|
|
525
834
|
def __flattenTestSuite(
|
526
|
-
self,
|
527
|
-
suite: unittest.TestSuite
|
528
|
-
) -> List[unittest.TestCase]:
|
529
|
-
"""
|
530
|
-
Recursively flattens a unittest.TestSuite into a list of unique unittest.TestCase instances.
|
531
|
-
|
532
|
-
Parameters
|
533
|
-
----------
|
534
|
-
suite : unittest.TestSuite
|
535
|
-
The test suite to be flattened.
|
536
|
-
|
537
|
-
Returns
|
538
|
-
-------
|
539
|
-
List[unittest.TestCase]
|
540
|
-
A flat list containing unique unittest.TestCase instances extracted from the suite.
|
541
|
-
|
542
|
-
Notes
|
543
|
-
-----
|
544
|
-
Test uniqueness is determined by a shortened test identifier (the last two components of the test id).
|
545
|
-
This helps avoid duplicate test cases in the returned list.
|
546
|
-
"""
|
547
|
-
|
548
|
-
# Initialize an empty list to hold unique test cases and a set to track seen test IDs
|
549
|
-
tests = []
|
550
|
-
seen_ids = set()
|
551
|
-
|
552
|
-
# Recursive function to flatten the test suite
|
553
|
-
def _flatten(item):
|
554
|
-
if isinstance(item, unittest.TestSuite):
|
555
|
-
for sub_item in item:
|
556
|
-
_flatten(sub_item)
|
557
|
-
elif hasattr(item, "id"):
|
558
|
-
test_id = item.id()
|
559
|
-
|
560
|
-
# Use the last two components of the test id for uniqueness
|
561
|
-
parts = test_id.split('.')
|
562
|
-
if len(parts) >= 2:
|
563
|
-
short_id = '.'.join(parts[-2:])
|
564
|
-
else:
|
565
|
-
short_id = test_id
|
566
|
-
if short_id not in seen_ids:
|
567
|
-
seen_ids.add(short_id)
|
568
|
-
tests.append(item)
|
569
|
-
|
570
|
-
# Start the flattening process
|
571
|
-
_flatten(suite)
|
572
|
-
return tests
|
573
|
-
|
574
|
-
def __runSuite(
|
575
|
-
self
|
576
|
-
) -> Tuple[unittest.TestResult, io.StringIO, io.StringIO]:
|
577
|
-
"""
|
578
|
-
Executes the test suite according to the configured execution mode, capturing both standard output and error streams.
|
579
|
-
|
580
|
-
Returns
|
581
|
-
-------
|
582
|
-
tuple
|
583
|
-
result : unittest.TestResult
|
584
|
-
The result object containing the outcomes of the executed tests.
|
585
|
-
output_buffer : io.StringIO
|
586
|
-
Buffer capturing the standard output generated during test execution.
|
587
|
-
error_buffer : io.StringIO
|
588
|
-
Buffer capturing the standard error generated during test execution.
|
589
|
-
"""
|
590
|
-
|
591
|
-
# Initialize output and error buffers to capture test execution output
|
592
|
-
output_buffer = io.StringIO()
|
593
|
-
error_buffer = io.StringIO()
|
594
|
-
|
595
|
-
# Run tests in parallel mode using multiple workers
|
596
|
-
if self.__execution_mode == ExecutionMode.PARALLEL.value:
|
597
|
-
result = self.__runTestsInParallel(
|
598
|
-
output_buffer,
|
599
|
-
error_buffer
|
600
|
-
)
|
601
|
-
|
602
|
-
# Run tests sequentially
|
603
|
-
else:
|
604
|
-
result = self.__runTestsSequentially(
|
605
|
-
output_buffer,
|
606
|
-
error_buffer
|
607
|
-
)
|
608
|
-
|
609
|
-
# Return the result, output, and error buffers
|
610
|
-
return result, output_buffer, error_buffer
|
611
|
-
|
612
|
-
def __isFailedImport(
|
613
|
-
self,
|
614
|
-
test_case: unittest.TestCase
|
615
|
-
) -> bool:
|
616
|
-
"""
|
617
|
-
Check if the given test case is a failed import.
|
618
|
-
|
619
|
-
Parameters
|
620
|
-
----------
|
621
|
-
test_case : unittest.TestCase
|
622
|
-
The test case to check.
|
623
|
-
|
624
|
-
Returns
|
625
|
-
-------
|
626
|
-
bool
|
627
|
-
True if the test case is a failed import, False otherwise.
|
628
|
-
"""
|
629
|
-
|
630
|
-
return test_case.__class__.__name__ == "_FailedTest"
|
631
|
-
|
632
|
-
def __notFoundTestMethod(
|
633
|
-
self,
|
634
|
-
test_case: unittest.TestCase
|
635
|
-
) -> bool:
|
835
|
+
self,
|
836
|
+
suite: unittest.TestSuite
|
837
|
+
) -> List[unittest.TestCase]:
|
636
838
|
"""
|
637
|
-
|
839
|
+
Recursively flatten a unittest.TestSuite into a list of unique unittest.TestCase instances.
|
840
|
+
|
841
|
+
This method traverses the given test suite, recursively extracting all individual test cases,
|
842
|
+
while preserving their order and ensuring uniqueness by test ID. If a test name pattern is configured,
|
843
|
+
only test cases whose IDs match the regular expression are included.
|
638
844
|
|
639
845
|
Parameters
|
640
846
|
----------
|
641
|
-
|
642
|
-
The test
|
847
|
+
suite : unittest.TestSuite
|
848
|
+
The test suite to flatten.
|
643
849
|
|
644
850
|
Returns
|
645
851
|
-------
|
646
|
-
|
647
|
-
|
852
|
+
List[unittest.TestCase]
|
853
|
+
List of unique test case instances contained in the suite, optionally filtered by name pattern.
|
854
|
+
|
855
|
+
Raises
|
856
|
+
------
|
857
|
+
OrionisTestValueError
|
858
|
+
If the configured test name pattern is not a valid regular expression.
|
859
|
+
|
860
|
+
Notes
|
861
|
+
-----
|
862
|
+
- The returned list preserves the order in which test cases appear in the suite.
|
863
|
+
- If a test name pattern is set, only test cases matching the pattern are included.
|
864
|
+
- Uniqueness is enforced by test ID.
|
648
865
|
"""
|
866
|
+
# Determine if test name pattern filtering is enabled
|
867
|
+
regex = None
|
868
|
+
if self.__test_name_pattern:
|
869
|
+
try:
|
870
|
+
regex = re.compile(self.__test_name_pattern)
|
871
|
+
except re.error as e:
|
872
|
+
raise OrionisTestValueError(
|
873
|
+
f"The provided test name pattern is invalid: '{self.__test_name_pattern}'. "
|
874
|
+
f"Regular expression compilation error: {str(e)}. "
|
875
|
+
"Please check the pattern syntax and try again."
|
876
|
+
)
|
649
877
|
|
650
|
-
# Use
|
651
|
-
|
652
|
-
|
878
|
+
# Use an ordered dict to preserve order and uniqueness by test id
|
879
|
+
tests = {}
|
880
|
+
|
881
|
+
def _flatten(item):
|
882
|
+
if isinstance(item, unittest.TestSuite):
|
883
|
+
for sub_item in item:
|
884
|
+
_flatten(sub_item)
|
885
|
+
elif isinstance(item, unittest.TestCase):
|
886
|
+
test_id = item.id() if hasattr(item, "id") else None
|
887
|
+
if test_id and test_id not in tests:
|
888
|
+
if regex:
|
889
|
+
if regex.search(test_id):
|
890
|
+
tests[test_id] = item
|
891
|
+
else:
|
892
|
+
tests[test_id] = item
|
653
893
|
|
654
|
-
|
655
|
-
return
|
894
|
+
_flatten(suite)
|
895
|
+
return list(tests.values())
|
656
896
|
|
657
|
-
def
|
658
|
-
self
|
659
|
-
|
660
|
-
) -> bool:
|
897
|
+
def __runSuite(
|
898
|
+
self
|
899
|
+
) -> unittest.TestResult:
|
661
900
|
"""
|
662
|
-
|
901
|
+
Executes the test suite according to the configured execution mode, capturing both standard output and error streams.
|
663
902
|
|
664
|
-
This method
|
665
|
-
|
666
|
-
following the `__wrapped__` attribute to identify the presence of any decorators.
|
667
|
-
Decorated methods typically have a `__wrapped__` attribute that points to the
|
668
|
-
original unwrapped function.
|
903
|
+
This method determines whether to run the test suite sequentially or in parallel based on the configured execution mode.
|
904
|
+
It delegates execution to either `__runTestsSequentially` or `__runTestsInParallel`, and returns the aggregated test result.
|
669
905
|
|
670
906
|
Parameters
|
671
907
|
----------
|
672
|
-
|
673
|
-
The test case instance whose test method will be examined for decorators.
|
908
|
+
None
|
674
909
|
|
675
910
|
Returns
|
676
911
|
-------
|
677
|
-
|
678
|
-
|
679
|
-
|
912
|
+
unittest.TestResult
|
913
|
+
The aggregated result object containing the outcomes of all executed test cases, including
|
914
|
+
detailed per-test results, aggregated statistics, and error information.
|
680
915
|
|
681
916
|
Notes
|
682
917
|
-----
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
the method returns True.
|
918
|
+
- If the execution mode is set to parallel, tests are run concurrently using multiple workers.
|
919
|
+
- If the execution mode is sequential, tests are run one after another.
|
920
|
+
- The returned result object contains all test outcomes, including successes, failures, errors, skips, and custom metadata.
|
687
921
|
"""
|
688
922
|
|
689
|
-
#
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
decorators = []
|
694
|
-
|
695
|
-
# Check if the method has the __wrapped__ attribute, indicating it's decorated
|
696
|
-
if hasattr(test_method, '__wrapped__'):
|
697
|
-
# Start with the outermost decorated method
|
698
|
-
original = test_method
|
699
|
-
|
700
|
-
# Traverse the decorator chain by following __wrapped__ attributes
|
701
|
-
while hasattr(original, '__wrapped__'):
|
702
|
-
# Collect decorator name information for tracking purposes
|
703
|
-
if hasattr(original, '__qualname__'):
|
704
|
-
# Prefer __qualname__ as it provides more detailed naming information
|
705
|
-
decorators.append(original.__qualname__)
|
706
|
-
elif hasattr(original, '__name__'):
|
707
|
-
# Fall back to __name__ if __qualname__ is not available
|
708
|
-
decorators.append(original.__name__)
|
709
|
-
|
710
|
-
# Move to the next level in the decorator chain
|
711
|
-
original = original.__wrapped__
|
923
|
+
# Run tests in parallel mode using multiple workers if configured
|
924
|
+
if self.__execution_mode == ExecutionMode.PARALLEL.value:
|
925
|
+
# Execute tests concurrently and aggregate results
|
926
|
+
result = self.__runTestsInParallel()
|
712
927
|
|
713
|
-
#
|
714
|
-
|
715
|
-
|
928
|
+
# Otherwise, run tests sequentially
|
929
|
+
else:
|
930
|
+
# Execute tests one by one and aggregate results
|
931
|
+
result = self.__runTestsSequentially()
|
716
932
|
|
717
|
-
# Return
|
718
|
-
return
|
933
|
+
# Return the aggregated test result object
|
934
|
+
return result
|
719
935
|
|
720
|
-
def
|
721
|
-
self
|
936
|
+
def __resolveTestDependencies(
|
937
|
+
self,
|
938
|
+
test_case: unittest.TestCase
|
722
939
|
) -> unittest.TestSuite:
|
723
940
|
"""
|
724
|
-
|
941
|
+
Inject dependencies into a single test case if required, returning a TestSuite containing the resolved test case.
|
725
942
|
|
726
|
-
This method
|
727
|
-
|
728
|
-
|
729
|
-
|
943
|
+
This method uses reflection to inspect the test method's dependencies. If all dependencies are resolved,
|
944
|
+
it injects them using the application's resolver. If there are unresolved dependencies, the original test case
|
945
|
+
is returned as-is. Decorated methods and failed imports are also returned without modification. The returned
|
946
|
+
TestSuite contains the test case with dependencies injected if applicable.
|
947
|
+
|
948
|
+
Parameters
|
949
|
+
----------
|
950
|
+
test_case : unittest.TestCase
|
951
|
+
The test case instance to resolve dependencies for.
|
730
952
|
|
731
953
|
Returns
|
732
954
|
-------
|
733
955
|
unittest.TestSuite
|
734
|
-
A
|
956
|
+
A TestSuite containing the test case with dependencies injected if required.
|
957
|
+
If dependency injection is not possible or fails, the original test case is returned as-is within the suite.
|
735
958
|
|
736
959
|
Raises
|
737
960
|
------
|
738
961
|
OrionisTestValueError
|
739
|
-
If
|
740
|
-
"""
|
741
|
-
|
742
|
-
# Create a new TestSuite to hold the resolved test cases
|
743
|
-
flattened_suite = unittest.TestSuite()
|
962
|
+
If the test method has unresolved dependencies.
|
744
963
|
|
745
|
-
|
746
|
-
|
964
|
+
Notes
|
965
|
+
-----
|
966
|
+
- Uses reflection to determine method dependencies.
|
967
|
+
- If dependencies are resolved, injects them into the test method.
|
968
|
+
- If dependencies are unresolved or an error occurs, the original test case is returned.
|
969
|
+
- The returned value is always a unittest.TestSuite containing the test case (with or without injected dependencies).
|
970
|
+
"""
|
747
971
|
|
748
|
-
|
749
|
-
|
750
|
-
flattened_suite.addTest(test_case)
|
751
|
-
continue
|
972
|
+
# Create a new TestSuite to hold the resolved test case
|
973
|
+
suite = unittest.TestSuite()
|
752
974
|
|
753
|
-
|
754
|
-
if self.__notFoundTestMethod(test_case):
|
755
|
-
flattened_suite.addTest(test_case)
|
756
|
-
continue
|
975
|
+
try:
|
757
976
|
|
758
|
-
#
|
759
|
-
|
760
|
-
flattened_suite.addTest(test_case)
|
761
|
-
continue
|
977
|
+
# Get the reflection instance for the test case
|
978
|
+
rf_instance = ReflectionInstance(test_case)
|
762
979
|
|
763
|
-
|
980
|
+
# Get the test method name
|
981
|
+
method_name = getattr(test_case, "_testMethodName", None)
|
764
982
|
|
765
|
-
|
766
|
-
|
767
|
-
dependencies = rf_instance.getMethodDependencies(
|
768
|
-
method_name=getattr(test_case, "_testMethodName")
|
769
|
-
)
|
983
|
+
# Get method dependencies (resolved and unresolved)
|
984
|
+
dependencies = rf_instance.getMethodDependencies(method_name)
|
770
985
|
|
771
|
-
|
772
|
-
|
773
|
-
|
774
|
-
continue
|
986
|
+
# If there are unresolved dependencies, return the original test case as-is
|
987
|
+
if dependencies.unresolved:
|
988
|
+
return test_case
|
775
989
|
|
776
|
-
|
777
|
-
|
778
|
-
raise OrionisTestValueError(
|
779
|
-
f"Test method '{getattr(test_case, "_testMethodName")}' in class '{test_case.__class__.__name__}' has unresolved dependencies: {dependencies.unresolved}. "
|
780
|
-
"Please ensure all dependencies are correctly defined and available."
|
781
|
-
)
|
990
|
+
# If there are resolved dependencies, inject them into the test method
|
991
|
+
if dependencies.resolved:
|
782
992
|
|
783
|
-
# Get the
|
993
|
+
# Get the test class and original method
|
784
994
|
test_class = rf_instance.getClass()
|
785
|
-
original_method = getattr(test_class,
|
995
|
+
original_method = getattr(test_class, method_name)
|
786
996
|
|
787
|
-
# Resolve
|
788
|
-
|
789
|
-
rf_instance.getClassName(),
|
790
|
-
dependencies
|
791
|
-
)
|
997
|
+
# Resolve dependencies using the application container
|
998
|
+
resolved_args = self.__app.resolveDependencyArguments(rf_instance.getClassName(), dependencies)
|
792
999
|
|
793
|
-
#
|
794
|
-
def
|
795
|
-
|
796
|
-
return original_test(self_instance, **resolved_args)
|
797
|
-
return wrapper
|
1000
|
+
# Define a wrapper function to inject dependencies
|
1001
|
+
def wrapper(self_instance):
|
1002
|
+
return original_method(self_instance, **resolved_args)
|
798
1003
|
|
799
1004
|
# Bind the wrapped method to the test case instance
|
800
|
-
|
801
|
-
|
802
|
-
setattr(test_case, getattr(test_case, "_testMethodName"), bound_method)
|
803
|
-
flattened_suite.addTest(test_case)
|
1005
|
+
bound_method = wrapper.__get__(test_case, test_case.__class__)
|
1006
|
+
setattr(test_case, method_name, bound_method)
|
804
1007
|
|
805
|
-
|
1008
|
+
# Add the test case to the suite (with injected dependencies if applicable)
|
1009
|
+
suite.addTest(test_case)
|
806
1010
|
|
807
|
-
|
808
|
-
|
1011
|
+
# Return the TestSuite containing the resolved test case
|
1012
|
+
return suite
|
809
1013
|
|
810
|
-
|
1014
|
+
except Exception as e:
|
1015
|
+
|
1016
|
+
# On any error, return the original test case without injection
|
1017
|
+
return test_case
|
811
1018
|
|
812
1019
|
def __runTestsSequentially(
|
813
|
-
self
|
814
|
-
output_buffer: io.StringIO,
|
815
|
-
error_buffer: io.StringIO
|
1020
|
+
self
|
816
1021
|
) -> unittest.TestResult:
|
817
1022
|
"""
|
818
1023
|
Executes all test cases in the test suite sequentially, capturing standard output and error streams.
|
@@ -842,27 +1047,20 @@ class UnitTest(IUnitTest):
|
|
842
1047
|
"""
|
843
1048
|
|
844
1049
|
# Initialize output and error buffers to capture test execution output
|
845
|
-
result = None
|
1050
|
+
result: unittest.TestResult = None
|
846
1051
|
|
847
1052
|
# Iterate through all resolved test cases in the suite
|
848
|
-
for case in self.
|
1053
|
+
for case in self.__flatten_test_suite:
|
849
1054
|
|
850
|
-
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
)
|
1055
|
+
runner = unittest.TextTestRunner(
|
1056
|
+
stream=io.StringIO(),
|
1057
|
+
verbosity=self.__verbosity,
|
1058
|
+
failfast=self.__fail_fast,
|
1059
|
+
resultclass=self.__customResultClass()
|
1060
|
+
)
|
855
1061
|
|
856
|
-
#
|
857
|
-
|
858
|
-
runner = unittest.TextTestRunner(
|
859
|
-
stream=output_buffer,
|
860
|
-
verbosity=self.__verbosity,
|
861
|
-
failfast=self.__fail_fast,
|
862
|
-
resultclass=self.__customResultClass()
|
863
|
-
)
|
864
|
-
# Run the current test case and obtain the result
|
865
|
-
single_result: IOrionisTestResult = runner.run(unittest.TestSuite([case]))
|
1062
|
+
# Run the current test case and obtain the result
|
1063
|
+
single_result: IOrionisTestResult = runner.run(unittest.TestSuite([case]))
|
866
1064
|
|
867
1065
|
# Print the result of the current test case using the printer
|
868
1066
|
self.__printer.unittestResult(single_result.test_results[0])
|
@@ -877,41 +1075,32 @@ class UnitTest(IUnitTest):
|
|
877
1075
|
return result
|
878
1076
|
|
879
1077
|
def __runTestsInParallel(
|
880
|
-
self
|
881
|
-
output_buffer: io.StringIO,
|
882
|
-
error_buffer: io.StringIO
|
1078
|
+
self
|
883
1079
|
) -> unittest.TestResult:
|
884
1080
|
"""
|
885
1081
|
Executes all test cases in the test suite concurrently using a thread pool and aggregates their results.
|
886
1082
|
|
887
1083
|
Parameters
|
888
1084
|
----------
|
889
|
-
|
890
|
-
Buffer to capture the standard output generated during test execution.
|
891
|
-
error_buffer : io.StringIO
|
892
|
-
Buffer to capture the standard error generated during test execution.
|
1085
|
+
None
|
893
1086
|
|
894
1087
|
Returns
|
895
1088
|
-------
|
896
1089
|
unittest.TestResult
|
897
|
-
|
1090
|
+
A combined `unittest.TestResult` object containing the outcomes of all executed test cases.
|
1091
|
+
This includes detailed per-test results, aggregated statistics, error information, and custom metadata.
|
898
1092
|
|
899
1093
|
Notes
|
900
1094
|
-----
|
901
|
-
Each test case is executed in a separate thread using
|
902
|
-
Results from all threads are merged into a single result object.
|
903
|
-
Output and error streams are redirected for
|
904
|
-
If fail-fast is enabled, execution stops as soon as a failure is detected.
|
1095
|
+
- Each test case is executed in a separate thread using `ThreadPoolExecutor`.
|
1096
|
+
- Results from all threads are merged into a single aggregated result object.
|
1097
|
+
- Output and error streams are redirected for each test case.
|
1098
|
+
- If fail-fast is enabled, execution stops as soon as a failure is detected and remaining tests are cancelled.
|
1099
|
+
- The returned result object contains all test outcomes, including successes, failures, errors, skips, and custom metadata.
|
905
1100
|
"""
|
906
1101
|
|
907
|
-
#
|
908
|
-
|
909
|
-
|
910
|
-
# Get the custom result class for enhanced test tracking
|
911
|
-
result_class = self.__customResultClass()
|
912
|
-
|
913
|
-
# Create a combined result object to aggregate all individual test results
|
914
|
-
combined_result = result_class(io.StringIO(), descriptions=True, verbosity=self.__verbosity)
|
1102
|
+
# Initialize the aggregated result object
|
1103
|
+
result: unittest.TestResult = None
|
915
1104
|
|
916
1105
|
# Define a function to run a single test case and return its result
|
917
1106
|
def run_single_test(test):
|
@@ -919,36 +1108,40 @@ class UnitTest(IUnitTest):
|
|
919
1108
|
stream=io.StringIO(),
|
920
1109
|
verbosity=self.__verbosity,
|
921
1110
|
failfast=False,
|
922
|
-
resultclass=
|
1111
|
+
resultclass=self.__customResultClass()
|
923
1112
|
)
|
924
1113
|
return runner.run(unittest.TestSuite([test]))
|
925
1114
|
|
926
|
-
#
|
927
|
-
with
|
1115
|
+
# Create a thread pool with the configured number of workers
|
1116
|
+
with ThreadPoolExecutor(max_workers=self.__max_workers) as executor:
|
928
1117
|
|
929
|
-
#
|
930
|
-
|
1118
|
+
# Submit all test cases to the thread pool for execution
|
1119
|
+
futures = [executor.submit(run_single_test, test) for test in self.__flatten_test_suite]
|
931
1120
|
|
932
|
-
|
933
|
-
|
1121
|
+
# As each test completes, merge its result into the combined result
|
1122
|
+
for future in as_completed(futures):
|
934
1123
|
|
935
|
-
#
|
936
|
-
|
937
|
-
test_result = future.result()
|
938
|
-
self.__mergeTestResults(combined_result, test_result)
|
1124
|
+
# Get the result of the completed test case
|
1125
|
+
single_result: IOrionisTestResult = future.result()
|
939
1126
|
|
940
|
-
|
941
|
-
|
942
|
-
|
943
|
-
f.cancel()
|
944
|
-
break
|
1127
|
+
# Print the result of the current test case using the printer
|
1128
|
+
# Ensure print goes to the real stdout even inside redirected context
|
1129
|
+
self.__printer.unittestResult(single_result.test_results[0])
|
945
1130
|
|
946
|
-
|
947
|
-
|
948
|
-
|
1131
|
+
# Merge the result of the current test case into the aggregated result
|
1132
|
+
if result is None:
|
1133
|
+
result = single_result
|
1134
|
+
else:
|
1135
|
+
self.__mergeTestResults(result, single_result)
|
1136
|
+
|
1137
|
+
# If fail-fast is enabled and a failure occurs, cancel remaining tests
|
1138
|
+
if self.__fail_fast and not result.wasSuccessful():
|
1139
|
+
for f in futures:
|
1140
|
+
f.cancel()
|
1141
|
+
break
|
949
1142
|
|
950
1143
|
# Return the aggregated result containing all test outcomes
|
951
|
-
return
|
1144
|
+
return result
|
952
1145
|
|
953
1146
|
def __mergeTestResults(
|
954
1147
|
self,
|
@@ -956,47 +1149,51 @@ class UnitTest(IUnitTest):
|
|
956
1149
|
individual_result: unittest.TestResult
|
957
1150
|
) -> None:
|
958
1151
|
"""
|
959
|
-
Merge the results of two unittest.TestResult objects into a single result.
|
1152
|
+
Merge the results of two unittest.TestResult objects into a single aggregated result.
|
1153
|
+
|
1154
|
+
This method updates the `combined_result` in place by aggregating test statistics and detailed results
|
1155
|
+
from `individual_result`. It ensures that all test outcomes, including failures, errors, skipped tests,
|
1156
|
+
expected failures, unexpected successes, and custom test result entries, are merged for comprehensive reporting.
|
960
1157
|
|
961
1158
|
Parameters
|
962
1159
|
----------
|
963
1160
|
combined_result : unittest.TestResult
|
964
|
-
The
|
1161
|
+
The result object to be updated with merged statistics and details.
|
965
1162
|
individual_result : unittest.TestResult
|
966
|
-
The
|
1163
|
+
The result object whose statistics and details will be merged into `combined_result`.
|
967
1164
|
|
968
1165
|
Returns
|
969
1166
|
-------
|
970
1167
|
None
|
971
|
-
This method does not return
|
1168
|
+
This method does not return any value. The `combined_result` is updated in place with merged data.
|
972
1169
|
|
973
1170
|
Notes
|
974
1171
|
-----
|
975
|
-
|
976
|
-
|
977
|
-
|
978
|
-
|
1172
|
+
- Increments the total number of tests run.
|
1173
|
+
- Extends lists of failures, errors, skipped tests, expected failures, and unexpected successes.
|
1174
|
+
- If present, merges custom `test_results` entries for detailed per-test reporting.
|
1175
|
+
- This method is used to aggregate results from parallel or sequential test execution.
|
979
1176
|
"""
|
980
1177
|
|
981
1178
|
# Increment the total number of tests run
|
982
1179
|
combined_result.testsRun += individual_result.testsRun
|
983
1180
|
|
984
|
-
#
|
1181
|
+
# Merge failures from the individual result
|
985
1182
|
combined_result.failures.extend(individual_result.failures)
|
986
1183
|
|
987
|
-
#
|
1184
|
+
# Merge errors from the individual result
|
988
1185
|
combined_result.errors.extend(individual_result.errors)
|
989
1186
|
|
990
|
-
#
|
1187
|
+
# Merge skipped tests from the individual result
|
991
1188
|
combined_result.skipped.extend(individual_result.skipped)
|
992
1189
|
|
993
|
-
#
|
1190
|
+
# Merge expected failures from the individual result
|
994
1191
|
combined_result.expectedFailures.extend(individual_result.expectedFailures)
|
995
1192
|
|
996
|
-
#
|
1193
|
+
# Merge unexpected successes from the individual result
|
997
1194
|
combined_result.unexpectedSuccesses.extend(individual_result.unexpectedSuccesses)
|
998
1195
|
|
999
|
-
#
|
1196
|
+
# Merge custom detailed test results if available
|
1000
1197
|
if hasattr(individual_result, 'test_results'):
|
1001
1198
|
if not hasattr(combined_result, 'test_results'):
|
1002
1199
|
combined_result.test_results = []
|
@@ -1022,7 +1219,7 @@ class UnitTest(IUnitTest):
|
|
1022
1219
|
includes execution time, error details, and test metadata, which are stored
|
1023
1220
|
in a list of TestResult objects for later reporting and analysis.
|
1024
1221
|
"""
|
1025
|
-
this = self
|
1222
|
+
this: "UnitTest" = self
|
1026
1223
|
|
1027
1224
|
class OrionisTestResult(unittest.TextTestResult):
|
1028
1225
|
|
@@ -1191,26 +1388,45 @@ class UnitTest(IUnitTest):
|
|
1191
1388
|
execution_time: float
|
1192
1389
|
) -> Dict[str, Any]:
|
1193
1390
|
"""
|
1194
|
-
|
1195
|
-
|
1196
|
-
|
1391
|
+
Generate a summary dictionary of the test suite execution.
|
1392
|
+
|
1393
|
+
This method aggregates statistics, timing, and detailed results for each test case in the suite.
|
1394
|
+
It optionally persists the summary and/or generates a web report if configured in the test manager.
|
1197
1395
|
|
1198
1396
|
Parameters
|
1199
1397
|
----------
|
1200
1398
|
result : unittest.TestResult
|
1201
|
-
The result object containing details of the test execution.
|
1399
|
+
The result object containing details of the test execution, including per-test outcomes.
|
1202
1400
|
execution_time : float
|
1203
1401
|
The total execution time of the test suite in seconds.
|
1204
1402
|
|
1205
1403
|
Returns
|
1206
1404
|
-------
|
1207
|
-
|
1208
|
-
|
1405
|
+
Dict[str, Any]
|
1406
|
+
Dictionary containing:
|
1407
|
+
- total_tests: int
|
1408
|
+
Total number of tests executed.
|
1409
|
+
- passed: int
|
1410
|
+
Number of tests that passed.
|
1411
|
+
- failed: int
|
1412
|
+
Number of tests that failed.
|
1413
|
+
- errors: int
|
1414
|
+
Number of tests that raised errors.
|
1415
|
+
- skipped: int
|
1416
|
+
Number of tests that were skipped.
|
1417
|
+
- total_time: float
|
1418
|
+
Total execution time in seconds.
|
1419
|
+
- success_rate: float
|
1420
|
+
Percentage of tests that passed.
|
1421
|
+
- test_details: List[dict]
|
1422
|
+
List of dictionaries with per-test details (ID, class, method, status, timing, error info, traceback, etc.).
|
1423
|
+
- timestamp: str
|
1424
|
+
ISO-formatted timestamp of when the summary was generated.
|
1209
1425
|
|
1210
1426
|
Notes
|
1211
1427
|
-----
|
1212
|
-
- If persistence is enabled, the summary is saved to storage.
|
1213
|
-
- If web reporting is enabled, a web report is generated.
|
1428
|
+
- If persistence is enabled, the summary is saved to storage using the configured driver.
|
1429
|
+
- If web reporting is enabled, a web report is generated and a link is printed.
|
1214
1430
|
- The summary includes per-test details, overall statistics, and a timestamp.
|
1215
1431
|
"""
|
1216
1432
|
|
@@ -1219,7 +1435,7 @@ class UnitTest(IUnitTest):
|
|
1219
1435
|
for test_result in result.test_results:
|
1220
1436
|
rst: TestResult = test_result
|
1221
1437
|
|
1222
|
-
#
|
1438
|
+
# Extract traceback frames from the exception, if available
|
1223
1439
|
traceback_frames = []
|
1224
1440
|
if rst.exception and rst.exception.__traceback__:
|
1225
1441
|
tb = traceback.extract_tb(rst.exception.__traceback__)
|
@@ -1231,6 +1447,7 @@ class UnitTest(IUnitTest):
|
|
1231
1447
|
'code': frame.line
|
1232
1448
|
})
|
1233
1449
|
|
1450
|
+
# Build the per-test detail dictionary
|
1234
1451
|
test_details.append({
|
1235
1452
|
'id': rst.id,
|
1236
1453
|
'class': rst.class_name,
|
@@ -1271,7 +1488,7 @@ class UnitTest(IUnitTest):
|
|
1271
1488
|
if self.__web_report:
|
1272
1489
|
self.__handleWebReport(self.__result)
|
1273
1490
|
|
1274
|
-
# Return the summary dictionary
|
1491
|
+
# Return the summary dictionary containing all test statistics and details
|
1275
1492
|
return self.__result
|
1276
1493
|
|
1277
1494
|
def __handleWebReport(
|
@@ -1284,31 +1501,31 @@ class UnitTest(IUnitTest):
|
|
1284
1501
|
Parameters
|
1285
1502
|
----------
|
1286
1503
|
summary : dict
|
1287
|
-
|
1504
|
+
Dictionary containing the summary of test results to be used for web report generation.
|
1288
1505
|
|
1289
1506
|
Returns
|
1290
1507
|
-------
|
1291
1508
|
None
|
1509
|
+
This method does not return any value. It generates a web report and prints a link to it.
|
1292
1510
|
|
1293
1511
|
Notes
|
1294
1512
|
-----
|
1295
|
-
This method creates a web-based report for the given test results summary.
|
1296
|
-
It
|
1297
|
-
the
|
1298
|
-
|
1299
|
-
web report using the printer.
|
1513
|
+
This method creates a web-based report for the given test results summary using the `TestingResultRender` class.
|
1514
|
+
It passes the storage path, the summary result, and a persistence flag (True if persistence is enabled and the driver is set to 'sqlite').
|
1515
|
+
After rendering the report, it prints a link to the generated web report using the internal printer.
|
1516
|
+
The report is persisted only if configured to do so.
|
1300
1517
|
"""
|
1301
1518
|
|
1302
|
-
# Create a TestingResultRender instance
|
1303
|
-
#
|
1304
|
-
|
1305
|
-
|
1306
|
-
|
1307
|
-
persist=self.__persistent and self.__persistent_driver ==
|
1519
|
+
# Create a TestingResultRender instance to generate the web report.
|
1520
|
+
# The 'persist' flag is True only if persistence is enabled and the driver is 'sqlite'.
|
1521
|
+
html_report = TestingResultRender(
|
1522
|
+
result = summary,
|
1523
|
+
storage_path = self.__storage,
|
1524
|
+
persist = self.__persistent and self.__persistent_driver == PersistentDrivers.SQLITE.value
|
1308
1525
|
)
|
1309
1526
|
|
1310
|
-
# Print the link to the generated web report
|
1311
|
-
self.__printer.linkWebReport(
|
1527
|
+
# Print the link to the generated web report using the printer.
|
1528
|
+
self.__printer.linkWebReport(html_report.render())
|
1312
1529
|
|
1313
1530
|
def __handlePersistResults(
|
1314
1531
|
self,
|
@@ -1320,7 +1537,12 @@ class UnitTest(IUnitTest):
|
|
1320
1537
|
Parameters
|
1321
1538
|
----------
|
1322
1539
|
summary : dict
|
1323
|
-
|
1540
|
+
Dictionary containing the test results and metadata to be persisted.
|
1541
|
+
|
1542
|
+
Returns
|
1543
|
+
-------
|
1544
|
+
None
|
1545
|
+
This method does not return any value. It performs persistence operations as a side effect.
|
1324
1546
|
|
1325
1547
|
Raises
|
1326
1548
|
------
|
@@ -1331,200 +1553,158 @@ class UnitTest(IUnitTest):
|
|
1331
1553
|
|
1332
1554
|
Notes
|
1333
1555
|
-----
|
1334
|
-
This method
|
1335
|
-
If the driver is set to 'sqlite', the summary is stored in a SQLite database using the TestLogs class.
|
1336
|
-
If the driver is set to 'json', the summary is saved as a JSON file in the specified storage directory,
|
1337
|
-
|
1338
|
-
and handles any errors that may
|
1556
|
+
This method saves the test results summary according to the configured persistence driver.
|
1557
|
+
- If the driver is set to 'sqlite', the summary is stored in a SQLite database using the TestLogs class.
|
1558
|
+
- If the driver is set to 'json', the summary is saved as a JSON file in the specified storage directory,
|
1559
|
+
with a filename based on the current timestamp.
|
1560
|
+
The method ensures that the target directory exists before writing files, and handles any errors that may
|
1561
|
+
occur during file or database operations.
|
1339
1562
|
"""
|
1563
|
+
|
1340
1564
|
try:
|
1341
1565
|
|
1342
|
-
#
|
1566
|
+
# Persist results using SQLite database if configured
|
1343
1567
|
if self.__persistent_driver == PersistentDrivers.SQLITE.value:
|
1344
|
-
|
1345
|
-
history.create(summary)
|
1568
|
+
TestLogs(self.__storage).create(summary)
|
1346
1569
|
|
1347
|
-
#
|
1570
|
+
# Persist results as a JSON file if configured
|
1348
1571
|
elif self.__persistent_driver == PersistentDrivers.JSON.value:
|
1349
|
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
1350
|
-
log_path = Path(self.__storage) / f"{timestamp}_test_results.json"
|
1351
1572
|
|
1352
|
-
#
|
1573
|
+
# Generate a unique filename based on the current timestamp
|
1574
|
+
timestamp = str(int(datetime.now().timestamp()))
|
1575
|
+
log_path = Path(self.__storage) / f"{timestamp}.json"
|
1576
|
+
|
1577
|
+
# Ensure the parent directory exists before writing the file
|
1353
1578
|
log_path.parent.mkdir(parents=True, exist_ok=True)
|
1354
1579
|
|
1355
|
-
# Write the summary to the JSON file
|
1580
|
+
# Write the summary dictionary to the JSON file
|
1356
1581
|
with open(log_path, 'w', encoding='utf-8') as log:
|
1357
1582
|
json.dump(summary, log, indent=4)
|
1583
|
+
|
1358
1584
|
except OSError as e:
|
1359
1585
|
|
1360
1586
|
# Raise an error if directory creation or file writing fails
|
1361
|
-
raise OSError(
|
1587
|
+
raise OSError(
|
1588
|
+
f"Failed to create directories or write the test results file: {str(e)}. "
|
1589
|
+
"Please check the storage path permissions and ensure there is enough disk space."
|
1590
|
+
)
|
1591
|
+
|
1362
1592
|
except Exception as e:
|
1363
1593
|
|
1364
1594
|
# Raise a persistence error for any other exceptions
|
1365
|
-
raise OrionisTestPersistenceError(
|
1595
|
+
raise OrionisTestPersistenceError(
|
1596
|
+
f"An unexpected error occurred while persisting test results: {str(e)}. "
|
1597
|
+
"Please verify the persistence configuration and check for possible issues with the storage backend."
|
1598
|
+
)
|
1366
1599
|
|
1367
|
-
def
|
1368
|
-
self
|
1369
|
-
|
1370
|
-
pattern: str
|
1371
|
-
) -> unittest.TestSuite:
|
1600
|
+
def getDiscoveredTestCases(
|
1601
|
+
self
|
1602
|
+
) -> List[unittest.TestCase]:
|
1372
1603
|
"""
|
1373
|
-
|
1604
|
+
Return a list of all discovered test case classes in the test suite.
|
1374
1605
|
|
1375
|
-
|
1376
|
-
|
1377
|
-
|
1378
|
-
The test suite containing the tests to be filtered.
|
1379
|
-
pattern : str
|
1380
|
-
Regular expression pattern to match against test names (test IDs).
|
1606
|
+
This method provides access to all unique test case classes that have been discovered
|
1607
|
+
during test suite initialization and loading. It does not execute any tests, but simply
|
1608
|
+
reports the discovered test case classes.
|
1381
1609
|
|
1382
1610
|
Returns
|
1383
1611
|
-------
|
1384
|
-
unittest.
|
1385
|
-
A
|
1386
|
-
|
1387
|
-
Raises
|
1388
|
-
------
|
1389
|
-
OrionisTestValueError
|
1390
|
-
If the provided pattern is not a valid regular expression.
|
1612
|
+
List[unittest.TestCase]
|
1613
|
+
A list of unique `unittest.TestCase` classes that have been discovered in the suite.
|
1391
1614
|
|
1392
1615
|
Notes
|
1393
1616
|
-----
|
1394
|
-
|
1395
|
-
|
1396
|
-
|
1617
|
+
- The returned list contains the test case classes, not instances or names.
|
1618
|
+
- The classes are derived from the `__class__` attribute of each discovered test case.
|
1619
|
+
- This method is useful for introspection or reporting purposes.
|
1397
1620
|
"""
|
1398
1621
|
|
1399
|
-
#
|
1400
|
-
|
1401
|
-
|
1402
|
-
try:
|
1403
|
-
|
1404
|
-
# Compile the provided regular expression pattern
|
1405
|
-
regex = re.compile(pattern)
|
1406
|
-
|
1407
|
-
except re.error as e:
|
1408
|
-
|
1409
|
-
# Raise a value error if the regex is invalid
|
1410
|
-
raise OrionisTestValueError(
|
1411
|
-
f"The provided test name pattern is invalid: '{pattern}'. "
|
1412
|
-
f"Regular expression compilation error: {str(e)}. "
|
1413
|
-
"Please check the pattern syntax and try again."
|
1414
|
-
)
|
1415
|
-
|
1416
|
-
# Iterate through all test cases in the flattened suite
|
1417
|
-
for test in self.__flattenTestSuite(suite):
|
1418
|
-
|
1419
|
-
# Add the test to the filtered suite if its ID matches the regex
|
1420
|
-
if regex.search(test.id()):
|
1421
|
-
filtered_suite.addTest(test)
|
1622
|
+
# Return all unique discovered test case classes as a list
|
1623
|
+
return list(self.__discovered_test_cases)
|
1422
1624
|
|
1423
|
-
|
1424
|
-
|
1425
|
-
|
1426
|
-
def __listMatchingModules(
|
1427
|
-
self,
|
1428
|
-
root_path: Path,
|
1429
|
-
test_path: Path,
|
1430
|
-
custom_path: Path,
|
1431
|
-
pattern_file: str
|
1432
|
-
) -> List[str]:
|
1625
|
+
def getDiscoveredModules(
|
1626
|
+
self
|
1627
|
+
) -> List:
|
1433
1628
|
"""
|
1434
|
-
|
1629
|
+
Return a list of all discovered test module names in the test suite.
|
1435
1630
|
|
1436
|
-
This method
|
1437
|
-
|
1438
|
-
the
|
1631
|
+
This method provides access to all unique test modules that have been discovered
|
1632
|
+
during test suite initialization and loading. It does not execute any tests, but simply
|
1633
|
+
reports the discovered module names.
|
1439
1634
|
|
1440
1635
|
Parameters
|
1441
1636
|
----------
|
1442
|
-
|
1443
|
-
The root directory of the project, used to calculate the relative module path.
|
1444
|
-
test_path : Path
|
1445
|
-
The base directory where tests are located.
|
1446
|
-
custom_path : Path
|
1447
|
-
The subdirectory within `test_path` to search for matching test files.
|
1448
|
-
pattern_file : str
|
1449
|
-
The filename pattern to match (supports '*' and '?' wildcards).
|
1637
|
+
None
|
1450
1638
|
|
1451
1639
|
Returns
|
1452
1640
|
-------
|
1453
|
-
List[
|
1454
|
-
A list of
|
1641
|
+
List[str]
|
1642
|
+
A list of unique module names (as strings) that have been discovered in the suite.
|
1455
1643
|
|
1456
1644
|
Notes
|
1457
1645
|
-----
|
1458
|
-
-
|
1459
|
-
-
|
1460
|
-
-
|
1461
|
-
- If the relative path is '.', only the module name is used.
|
1462
|
-
- The method imports modules dynamically and returns them as objects.
|
1646
|
+
- The returned list contains the module names, not module objects.
|
1647
|
+
- The module names are derived from the `__module__` attribute of each discovered test case.
|
1648
|
+
- This method is useful for introspection or reporting purposes.
|
1463
1649
|
"""
|
1464
1650
|
|
1465
|
-
#
|
1466
|
-
|
1467
|
-
|
1468
|
-
# Use a set to avoid duplicate module imports.
|
1469
|
-
matched_folders = set()
|
1470
|
-
|
1471
|
-
# Walk through all files in the target directory.
|
1472
|
-
for root, _, files in walk(str(test_path / custom_path) if custom_path else str(test_path)):
|
1473
|
-
for file in files:
|
1474
|
-
|
1475
|
-
# Check if the file matches the pattern and is a Python file.
|
1476
|
-
if regex.fullmatch(file) and file.endswith('.py'):
|
1477
|
-
|
1478
|
-
# Calculate the relative path from the root, convert to module notation.
|
1479
|
-
ralative_path = str(Path(root).relative_to(root_path)).replace(os.sep, '.')
|
1480
|
-
module_name = file[:-3] # Remove '.py' extension.
|
1481
|
-
|
1482
|
-
# Build the full module name.
|
1483
|
-
full_module = f"{ralative_path}.{module_name}" if ralative_path != '.' else module_name
|
1484
|
-
|
1485
|
-
# Import the module and add to the set.
|
1486
|
-
matched_folders.add(import_module(ValidModuleName(full_module)))
|
1487
|
-
|
1488
|
-
# Return the list of imported module objects.
|
1489
|
-
return list(matched_folders)
|
1651
|
+
# Return all unique discovered test module names as a list
|
1652
|
+
return list(self.__discovered_test_modules)
|
1490
1653
|
|
1491
|
-
def
|
1654
|
+
def getTestIds(
|
1492
1655
|
self
|
1493
1656
|
) -> List[str]:
|
1494
1657
|
"""
|
1495
|
-
|
1658
|
+
Return a list of all unique test IDs discovered in the test suite.
|
1659
|
+
|
1660
|
+
This method provides access to the unique identifiers (IDs) of all test cases
|
1661
|
+
that have been discovered and loaded into the suite. The IDs are collected from
|
1662
|
+
each `unittest.TestCase` instance during test discovery and are returned as a list
|
1663
|
+
of strings. This is useful for introspection, reporting, or filtering purposes.
|
1664
|
+
|
1665
|
+
Parameters
|
1666
|
+
----------
|
1667
|
+
None
|
1496
1668
|
|
1497
1669
|
Returns
|
1498
1670
|
-------
|
1499
|
-
|
1500
|
-
|
1671
|
+
List[str]
|
1672
|
+
A list of strings, where each string is the unique ID of a discovered test case.
|
1673
|
+
The IDs are generated by the `id()` method of each `unittest.TestCase` instance.
|
1674
|
+
|
1675
|
+
Notes
|
1676
|
+
-----
|
1677
|
+
- The returned list contains only unique test IDs.
|
1678
|
+
- This method does not execute any tests; it only reports the discovered IDs.
|
1679
|
+
- The IDs typically include the module, class, and method name for each test case.
|
1501
1680
|
"""
|
1502
|
-
|
1681
|
+
|
1682
|
+
# Return all unique discovered test IDs as a list
|
1683
|
+
return list(self.__discovered_test_ids)
|
1503
1684
|
|
1504
1685
|
def getTestCount(
|
1505
1686
|
self
|
1506
1687
|
) -> int:
|
1507
1688
|
"""
|
1508
|
-
|
1689
|
+
Return the total number of individual test cases discovered in the test suite.
|
1690
|
+
|
1691
|
+
This method calculates and returns the total number of test cases that have been
|
1692
|
+
discovered and loaded into the suite, including all modules and filtered tests.
|
1693
|
+
It uses the internal metadata collected during test discovery to provide an accurate count.
|
1509
1694
|
|
1510
1695
|
Returns
|
1511
1696
|
-------
|
1512
1697
|
int
|
1513
|
-
|
1514
|
-
"""
|
1515
|
-
return len(list(self.__flattenTestSuite(self.__suite)))
|
1698
|
+
The total number of individual test cases discovered and loaded in the suite.
|
1516
1699
|
|
1517
|
-
|
1518
|
-
|
1519
|
-
|
1700
|
+
Notes
|
1701
|
+
-----
|
1702
|
+
- The count reflects all tests after applying any name pattern or folder filtering.
|
1703
|
+
- This method does not execute any tests; it only reports the discovered count.
|
1520
1704
|
"""
|
1521
|
-
Clear all tests from the current test suite.
|
1522
1705
|
|
1523
|
-
|
1524
|
-
|
1525
|
-
None
|
1526
|
-
"""
|
1527
|
-
self.__suite = unittest.TestSuite()
|
1706
|
+
# Return the sum of all discovered test cases across modules
|
1707
|
+
return len(self.__discovered_test_ids)
|
1528
1708
|
|
1529
1709
|
def getResult(
|
1530
1710
|
self
|
@@ -1537,54 +1717,4 @@ class UnitTest(IUnitTest):
|
|
1537
1717
|
dict
|
1538
1718
|
Result of the executed test suite.
|
1539
1719
|
"""
|
1540
|
-
return self.__result
|
1541
|
-
|
1542
|
-
def getOutputBuffer(
|
1543
|
-
self
|
1544
|
-
) -> int:
|
1545
|
-
"""
|
1546
|
-
Get the output buffer used for capturing test results.
|
1547
|
-
|
1548
|
-
Returns
|
1549
|
-
-------
|
1550
|
-
int
|
1551
|
-
Output buffer containing the results of the test execution.
|
1552
|
-
"""
|
1553
|
-
return self.__output_buffer
|
1554
|
-
|
1555
|
-
def printOutputBuffer(
|
1556
|
-
self
|
1557
|
-
) -> None:
|
1558
|
-
"""
|
1559
|
-
Print the contents of the output buffer to the console.
|
1560
|
-
|
1561
|
-
Returns
|
1562
|
-
-------
|
1563
|
-
None
|
1564
|
-
"""
|
1565
|
-
self.__printer.print(self.__output_buffer)
|
1566
|
-
|
1567
|
-
def getErrorBuffer(
|
1568
|
-
self
|
1569
|
-
) -> int:
|
1570
|
-
"""
|
1571
|
-
Get the error buffer used for capturing test errors.
|
1572
|
-
|
1573
|
-
Returns
|
1574
|
-
-------
|
1575
|
-
int
|
1576
|
-
Error buffer containing errors encountered during test execution.
|
1577
|
-
"""
|
1578
|
-
return self.__error_buffer
|
1579
|
-
|
1580
|
-
def printErrorBuffer(
|
1581
|
-
self
|
1582
|
-
) -> None:
|
1583
|
-
"""
|
1584
|
-
Print the contents of the error buffer to the console.
|
1585
|
-
|
1586
|
-
Returns
|
1587
|
-
-------
|
1588
|
-
None
|
1589
|
-
"""
|
1590
|
-
self.__printer.print(self.__error_buffer)
|
1720
|
+
return self.__result
|