orionis 0.314.0__py3-none-any.whl → 0.316.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orionis/metadata/framework.py +1 -1
- orionis/test/contracts/printer.py +188 -0
- orionis/test/contracts/test_unit.py +42 -16
- orionis/test/output/printer.py +448 -0
- orionis/test/suite/test_unit.py +574 -659
- {orionis-0.314.0.dist-info → orionis-0.316.0.dist-info}/METADATA +1 -1
- {orionis-0.314.0.dist-info → orionis-0.316.0.dist-info}/RECORD +12 -10
- tests/testing/test_testing_unit.py +3 -8
- {orionis-0.314.0.dist-info → orionis-0.316.0.dist-info}/WHEEL +0 -0
- {orionis-0.314.0.dist-info → orionis-0.316.0.dist-info}/licenses/LICENCE +0 -0
- {orionis-0.314.0.dist-info → orionis-0.316.0.dist-info}/top_level.txt +0 -0
- {orionis-0.314.0.dist-info → orionis-0.316.0.dist-info}/zip-safe +0 -0
orionis/test/suite/test_unit.py
CHANGED
@@ -11,13 +11,7 @@ from contextlib import redirect_stdout, redirect_stderr
|
|
11
11
|
from datetime import datetime
|
12
12
|
from pathlib import Path
|
13
13
|
from typing import Any, Dict, List, Optional, Tuple
|
14
|
-
from
|
15
|
-
from rich.live import Live
|
16
|
-
from rich.panel import Panel
|
17
|
-
from rich.syntax import Syntax
|
18
|
-
from rich.table import Table
|
19
|
-
from rich.text import Text
|
20
|
-
from orionis.console.output.console import Console
|
14
|
+
from orionis.services.system.workers import Workers
|
21
15
|
from orionis.test.entities.test_result import TestResult
|
22
16
|
from orionis.test.enums.test_mode import ExecutionMode
|
23
17
|
from orionis.test.enums.test_status import TestStatus
|
@@ -26,484 +20,575 @@ from orionis.test.exceptions.test_persistence_error import OrionisTestPersistenc
|
|
26
20
|
from orionis.test.exceptions.test_value_error import OrionisTestValueError
|
27
21
|
from orionis.test.logs.history import TestHistory
|
28
22
|
from orionis.test.contracts.test_unit import IUnitTest
|
23
|
+
from orionis.test.output.printer import TestPrinter
|
29
24
|
from orionis.test.view.render import TestingResultRender
|
30
25
|
|
31
26
|
class UnitTest(IUnitTest):
|
32
27
|
"""
|
33
|
-
UnitTest
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
test_results : list of TestResult
|
45
|
-
A list to store the results of executed tests.
|
46
|
-
start_time : float
|
47
|
-
The start time of the test execution.
|
48
|
-
print_result : bool
|
49
|
-
Flag to determine whether to print test results.
|
50
|
-
verbosity : int
|
51
|
-
The verbosity level for test output.
|
52
|
-
execution_mode : str
|
53
|
-
The mode of test execution (e.g., 'SEQUENTIAL' or 'PARALLEL').
|
54
|
-
max_workers : int
|
55
|
-
The maximum number of workers for parallel execution.
|
56
|
-
fail_fast : bool
|
57
|
-
Flag to stop execution on the first failure.
|
58
|
-
rich_console : RichConsole
|
59
|
-
Console for rich text output.
|
60
|
-
orionis_console : Console
|
61
|
-
Console for standard output.
|
62
|
-
discovered_tests : list
|
63
|
-
A list to store discovered test cases.
|
64
|
-
width_output_component : int
|
65
|
-
The width of the table for displaying results.
|
66
|
-
throw_exception : bool
|
67
|
-
Flag to determine whether to throw exceptions on test failures.
|
68
|
-
persistent : bool
|
69
|
-
Flag to determine whether to persist test results in a database.
|
70
|
-
base_path : str
|
71
|
-
The base directory for test discovery and persistence.
|
28
|
+
Orionis UnitTest
|
29
|
+
|
30
|
+
The main class of the Orionis framework for advanced unit test management.
|
31
|
+
|
32
|
+
This class provides a comprehensive solution for discovering, executing, and reporting unit tests in a flexible and configurable way, surpassing the usual limitations of traditional frameworks.
|
33
|
+
It supports sequential or parallel execution, filtering by name or tags, and detailed result capture, including timings, errors, and tracebacks.
|
34
|
+
|
35
|
+
It includes persistence options in multiple formats (SQLite or JSON) and generates rich reports both in the console and on the web.
|
36
|
+
Its intuitive interface and high degree of customization make it easy to integrate into CI/CD pipelines and adapt to the specific needs of any project.
|
37
|
+
|
38
|
+
This is an especially suitable choice for those seeking greater robustness, traceability, and visibility in their automated testing processes, offering advantages often missing from other alternatives.
|
72
39
|
"""
|
73
40
|
|
74
41
|
def __init__(self) -> None:
|
75
42
|
"""
|
76
|
-
|
43
|
+
Initializes the test suite configuration and supporting components.
|
77
44
|
|
78
45
|
Parameters
|
79
46
|
----------
|
80
|
-
|
81
|
-
The instance of the UnitTest class.
|
47
|
+
None
|
82
48
|
|
83
49
|
Attributes
|
84
50
|
----------
|
85
|
-
loader : unittest.TestLoader
|
86
|
-
The test loader used to discover tests.
|
87
|
-
suite : unittest.TestSuite
|
88
|
-
The test suite to hold the discovered tests.
|
89
|
-
test_results : list of TestResult
|
90
|
-
A list to store the results of executed tests.
|
91
|
-
start_time : float
|
92
|
-
The start time of the test execution.
|
93
|
-
print_result : bool
|
94
|
-
Flag to determine whether to print test results.
|
95
51
|
verbosity : int
|
96
|
-
|
52
|
+
Level of verbosity for test output.
|
97
53
|
execution_mode : str
|
98
|
-
|
54
|
+
Mode in which tests are executed.
|
99
55
|
max_workers : int
|
100
|
-
|
56
|
+
Maximum number of worker threads/processes.
|
101
57
|
fail_fast : bool
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
orionis_console : Console
|
106
|
-
Console for standard output.
|
107
|
-
discovered_tests : list
|
108
|
-
A list to store discovered test cases.
|
109
|
-
width_output_component : int
|
110
|
-
The width of the table for displaying results.
|
58
|
+
Whether to stop on the first test failure.
|
59
|
+
print_result : bool
|
60
|
+
Whether to print test results to the console.
|
111
61
|
throw_exception : bool
|
112
|
-
|
62
|
+
Whether to raise exceptions on test failures.
|
113
63
|
persistent : bool
|
114
|
-
|
64
|
+
Whether to use persistent storage for test results.
|
65
|
+
persistent_driver : str
|
66
|
+
Driver used for persistent storage.
|
67
|
+
web_report : bool
|
68
|
+
Whether to generate a web-based report.
|
69
|
+
full_path : Optional[str]
|
70
|
+
Full path for test discovery.
|
71
|
+
folder_path : str
|
72
|
+
Folder path for test discovery.
|
115
73
|
base_path : str
|
116
|
-
|
74
|
+
Base path for test discovery.
|
75
|
+
pattern : str
|
76
|
+
Pattern to match test files.
|
77
|
+
test_name_pattern : Optional[str]
|
78
|
+
Pattern to match test names.
|
79
|
+
tags : Optional[List[str]]
|
80
|
+
Tags to filter tests.
|
81
|
+
module_name : str
|
82
|
+
Name of the module for test discovery.
|
83
|
+
loader : unittest.TestLoader
|
84
|
+
Loader for discovering tests.
|
85
|
+
suite : unittest.TestSuite
|
86
|
+
Test suite to hold discovered tests.
|
87
|
+
discovered_tests : list
|
88
|
+
List of discovered tests.
|
89
|
+
printer : TestPrinter
|
90
|
+
Utility for printing test results to the console.
|
91
|
+
__output_buffer
|
92
|
+
Buffer for capturing standard output during tests.
|
93
|
+
__error_buffer
|
94
|
+
Buffer for capturing error output during tests.
|
95
|
+
__result
|
96
|
+
Result of the test execution.
|
117
97
|
"""
|
98
|
+
|
99
|
+
# Values for configuration
|
100
|
+
self.verbosity: int
|
101
|
+
self.execution_mode: str
|
102
|
+
self.max_workers: int
|
103
|
+
self.fail_fast: bool
|
104
|
+
self.print_result: bool
|
105
|
+
self.throw_exception: bool
|
106
|
+
self.persistent: bool
|
107
|
+
self.persistent_driver: str
|
108
|
+
self.web_report: bool
|
109
|
+
|
110
|
+
# Values for discovering tests in folders
|
111
|
+
self.full_path: Optional[str]
|
112
|
+
self.folder_path: str
|
113
|
+
self.base_path: str
|
114
|
+
self.pattern: str
|
115
|
+
self.test_name_pattern: Optional[str]
|
116
|
+
self.tags: Optional[List[str]]
|
117
|
+
|
118
|
+
# Values for discovering tests in modules
|
119
|
+
self.module_name: str
|
120
|
+
self.test_name_pattern: Optional[str]
|
121
|
+
|
122
|
+
# Initialize the test loader and suite
|
118
123
|
self.loader = unittest.TestLoader()
|
119
124
|
self.suite = unittest.TestSuite()
|
120
|
-
self.test_results: List[TestResult] = []
|
121
|
-
self.start_time: float = 0.0
|
122
|
-
self.print_result: bool = True
|
123
|
-
self.verbosity: int = 2
|
124
|
-
self.execution_mode: str = ExecutionMode.SEQUENTIAL.value
|
125
|
-
self.max_workers: int = 4
|
126
|
-
self.fail_fast: bool = False
|
127
|
-
self.rich_console = RichConsole()
|
128
|
-
self.orionis_console = Console()
|
129
125
|
self.discovered_tests: List = []
|
130
|
-
|
131
|
-
|
132
|
-
self.
|
133
|
-
|
134
|
-
|
135
|
-
self.base_path: str = "tests"
|
136
|
-
self.withliveconsole: bool = True
|
126
|
+
|
127
|
+
# Initialize the class for printing in the console
|
128
|
+
self.printer = TestPrinter()
|
129
|
+
|
130
|
+
# Variables for capturing output and error streams
|
137
131
|
self.__output_buffer = None
|
138
132
|
self.__error_buffer = None
|
133
|
+
|
134
|
+
# Result of the test execution
|
139
135
|
self.__result = None
|
140
136
|
|
141
137
|
def configure(
|
142
138
|
self,
|
143
139
|
*,
|
144
|
-
verbosity: int =
|
145
|
-
execution_mode: str | ExecutionMode =
|
146
|
-
max_workers: int =
|
147
|
-
fail_fast: bool =
|
148
|
-
print_result: bool =
|
140
|
+
verbosity: int = 2,
|
141
|
+
execution_mode: str | ExecutionMode = ExecutionMode.SEQUENTIAL,
|
142
|
+
max_workers: int = Workers().calculate(),
|
143
|
+
fail_fast: bool = False,
|
144
|
+
print_result: bool = True,
|
149
145
|
throw_exception: bool = False,
|
150
146
|
persistent: bool = False,
|
151
147
|
persistent_driver: str = 'sqlite',
|
152
148
|
web_report: bool = False
|
153
149
|
) -> 'UnitTest':
|
154
150
|
"""
|
155
|
-
|
151
|
+
Configure the UnitTest instance with various execution and reporting options.
|
156
152
|
|
157
153
|
Parameters
|
158
154
|
----------
|
159
155
|
verbosity : int, optional
|
160
|
-
|
156
|
+
Level of output verbosity.
|
161
157
|
execution_mode : str or ExecutionMode, optional
|
162
|
-
|
158
|
+
Test execution mode.
|
163
159
|
max_workers : int, optional
|
164
|
-
|
160
|
+
Maximum number of worker threads/processes for parallel execution. Must be a positive integer.
|
165
161
|
fail_fast : bool, optional
|
166
|
-
|
162
|
+
If True, stop execution on first failure.
|
167
163
|
print_result : bool, optional
|
168
|
-
|
169
|
-
throw_exception : bool,
|
170
|
-
|
171
|
-
persistent : bool,
|
172
|
-
|
173
|
-
persistent_driver : str,
|
174
|
-
|
164
|
+
If True, print test results to the console.
|
165
|
+
throw_exception : bool, default: False
|
166
|
+
If True, raise exceptions on test failures.
|
167
|
+
persistent : bool, default: False
|
168
|
+
If True, enable persistent storage of test results.
|
169
|
+
persistent_driver : str, default: 'sqlite'
|
170
|
+
Backend for persistent storage. Must be 'sqlite' or 'json'.
|
171
|
+
web_report : bool, default: False
|
172
|
+
If True, enable web-based reporting.
|
175
173
|
|
176
174
|
Returns
|
177
175
|
-------
|
178
176
|
UnitTest
|
179
177
|
The configured UnitTest instance.
|
178
|
+
|
179
|
+
Raises
|
180
|
+
------
|
181
|
+
OrionisTestValueError
|
182
|
+
If any parameter value is invalid.
|
180
183
|
"""
|
184
|
+
|
185
|
+
# Validate and set verbosity
|
181
186
|
if verbosity is not None:
|
182
|
-
|
187
|
+
if isinstance(verbosity, int) and verbosity in [0, 1, 2]:
|
188
|
+
self.verbosity = verbosity
|
189
|
+
else:
|
190
|
+
raise OrionisTestValueError("Verbosity must be an integer: 0 (quiet), 1 (default), or 2 (verbose).")
|
183
191
|
|
192
|
+
# Validate and set execution mode
|
184
193
|
if execution_mode is not None and isinstance(execution_mode, ExecutionMode):
|
185
194
|
self.execution_mode = execution_mode.value
|
186
195
|
else:
|
187
|
-
|
196
|
+
if isinstance(execution_mode, str) and execution_mode in [ExecutionMode.SEQUENTIAL.value, ExecutionMode.PARALLEL.value]:
|
197
|
+
self.execution_mode = execution_mode
|
198
|
+
else:
|
199
|
+
raise OrionisTestValueError("Execution mode must be 'SEQUENTIAL' or 'PARALLEL'.")
|
188
200
|
|
201
|
+
# Validate and set max_workers
|
189
202
|
if max_workers is not None:
|
190
|
-
|
203
|
+
if isinstance(max_workers, int) and max_workers > 0:
|
204
|
+
self.max_workers = max_workers
|
205
|
+
else:
|
206
|
+
raise OrionisTestValueError("Max workers must be a positive integer.")
|
191
207
|
|
208
|
+
# Validate and set other parameters
|
192
209
|
if fail_fast is not None:
|
193
|
-
|
210
|
+
if isinstance(fail_fast, bool):
|
211
|
+
self.fail_fast = fail_fast
|
212
|
+
else:
|
213
|
+
raise OrionisTestValueError("Fail fast must be a boolean value.")
|
194
214
|
|
215
|
+
# Validate and set print_result
|
195
216
|
if print_result is not None:
|
196
|
-
|
217
|
+
if isinstance(print_result, bool):
|
218
|
+
self.print_result = print_result
|
219
|
+
else:
|
220
|
+
raise OrionisTestValueError("Print result must be a boolean value.")
|
197
221
|
|
222
|
+
# Validate and set throw_exception
|
198
223
|
if throw_exception is not None:
|
199
|
-
|
224
|
+
if isinstance(throw_exception, bool):
|
225
|
+
self.throw_exception = throw_exception
|
226
|
+
else:
|
227
|
+
raise OrionisTestValueError("Throw exception must be a boolean value.")
|
200
228
|
|
229
|
+
# Validate and set persistent and persistent_driver
|
201
230
|
if persistent is not None:
|
202
|
-
|
231
|
+
if isinstance(persistent, bool):
|
232
|
+
self.persistent = persistent
|
233
|
+
else:
|
234
|
+
raise OrionisTestValueError("Persistent must be a boolean value.")
|
203
235
|
|
236
|
+
# Validate and set persistent_driver
|
204
237
|
if persistent_driver is not None:
|
205
|
-
|
238
|
+
if isinstance(persistent_driver, str) and persistent_driver in ['sqlite', 'json']:
|
239
|
+
self.persistent_driver = persistent_driver
|
240
|
+
else:
|
241
|
+
raise OrionisTestValueError("Persistent driver must be 'sqlite' or 'json'.")
|
206
242
|
|
243
|
+
# Validate and set web_report
|
207
244
|
if web_report is not None:
|
208
|
-
|
245
|
+
if isinstance(web_report, bool):
|
246
|
+
self.web_report = web_report
|
247
|
+
else:
|
248
|
+
raise OrionisTestValueError("Web report must be a boolean value.")
|
209
249
|
|
250
|
+
# Return the configured instance
|
210
251
|
return self
|
211
252
|
|
212
253
|
def discoverTestsInFolder(
|
213
254
|
self,
|
214
255
|
*,
|
215
|
-
folder_path: str,
|
216
256
|
base_path: str = "tests",
|
257
|
+
folder_path: str,
|
217
258
|
pattern: str = "test_*.py",
|
218
259
|
test_name_pattern: Optional[str] = None,
|
219
260
|
tags: Optional[List[str]] = None
|
220
261
|
) -> 'UnitTest':
|
221
262
|
"""
|
263
|
+
Discover and add unit tests from a specified folder to the test suite.
|
264
|
+
|
265
|
+
Searches for test files in the given folder path, optionally filtering by file name pattern,
|
266
|
+
test name pattern, and tags. Discovered tests are added to the suite, and information about
|
267
|
+
the discovery is recorded.
|
268
|
+
|
222
269
|
Parameters
|
223
270
|
----------
|
224
|
-
folder_path : str
|
225
|
-
The relative path to the folder containing the tests.
|
226
271
|
base_path : str, optional
|
227
|
-
The base directory
|
272
|
+
The base directory to search for tests. Defaults to "tests".
|
273
|
+
folder_path : str
|
274
|
+
The relative path to the folder containing test files.
|
228
275
|
pattern : str, optional
|
229
|
-
The
|
230
|
-
test_name_pattern : str
|
276
|
+
The file name pattern to match test files. Defaults to "test_*.py".
|
277
|
+
test_name_pattern : Optional[str], optional
|
231
278
|
A pattern to filter test names. Defaults to None.
|
232
|
-
tags :
|
279
|
+
tags : Optional[List[str]], optional
|
233
280
|
A list of tags to filter tests. Defaults to None.
|
234
281
|
|
235
282
|
Returns
|
236
283
|
-------
|
237
284
|
UnitTest
|
238
|
-
The current instance
|
285
|
+
The current instance with discovered tests added to the suite.
|
239
286
|
|
240
287
|
Raises
|
241
288
|
------
|
242
289
|
OrionisTestValueError
|
243
|
-
If the
|
244
|
-
|
245
|
-
Notes
|
246
|
-
-----
|
247
|
-
This method updates the internal test suite with the discovered tests and tracks the number of tests found.
|
290
|
+
If any argument is invalid, the folder does not exist, no tests are found,
|
291
|
+
or if there are import or discovery errors.
|
248
292
|
"""
|
293
|
+
|
294
|
+
# Validate folder_path
|
295
|
+
if folder_path is None or not isinstance(folder_path, str):
|
296
|
+
raise OrionisTestValueError(
|
297
|
+
f"Invalid folder_path: Expected a non-empty string, got '{folder_path}' ({type(folder_path).__name__})."
|
298
|
+
)
|
299
|
+
self.folder_path = folder_path
|
300
|
+
|
301
|
+
# Validate base_path and set value
|
302
|
+
if base_path is None or not isinstance(base_path, str):
|
303
|
+
raise OrionisTestValueError(
|
304
|
+
f"Invalid base_path: Expected a non-empty string, got '{base_path}' ({type(base_path).__name__})."
|
305
|
+
)
|
306
|
+
self.base_path = base_path
|
307
|
+
|
308
|
+
# Validate pattern
|
309
|
+
if pattern is None or not isinstance(pattern, str):
|
310
|
+
raise OrionisTestValueError(
|
311
|
+
f"Invalid pattern: Expected a non-empty string, got '{pattern}' ({type(pattern).__name__})."
|
312
|
+
)
|
313
|
+
self.pattern = pattern
|
314
|
+
|
315
|
+
# Validate test_name_pattern
|
316
|
+
if test_name_pattern is not None:
|
317
|
+
if not isinstance(test_name_pattern, str):
|
318
|
+
raise OrionisTestValueError(
|
319
|
+
f"Invalid test_name_pattern: Expected a string, got '{test_name_pattern}' ({type(test_name_pattern).__name__})."
|
320
|
+
)
|
321
|
+
self.test_name_pattern = test_name_pattern
|
322
|
+
|
323
|
+
# Validate tags
|
324
|
+
if tags is not None:
|
325
|
+
if not isinstance(tags, list) or not all(isinstance(tag, str) for tag in tags):
|
326
|
+
raise OrionisTestValueError(
|
327
|
+
f"Invalid tags: Expected a list of strings, got '{tags}' ({type(tags).__name__})."
|
328
|
+
)
|
329
|
+
self.tags = tags
|
330
|
+
|
331
|
+
# Try to discover tests in the specified folder
|
249
332
|
try:
|
250
|
-
self.base_path = base_path
|
251
333
|
|
252
|
-
|
334
|
+
# Ensure the folder path is absolute
|
335
|
+
full_path = Path(self.base_path) / self.folder_path
|
253
336
|
if not full_path.exists():
|
254
|
-
raise OrionisTestValueError(
|
337
|
+
raise OrionisTestValueError(
|
338
|
+
f"Test folder not found at the specified path: '{full_path}'. "
|
339
|
+
"Please verify that the path is correct and the folder exists."
|
340
|
+
)
|
341
|
+
self.full_path = str(full_path.resolve())
|
255
342
|
|
343
|
+
# Discover tests using the unittest TestLoader
|
256
344
|
tests = self.loader.discover(
|
257
345
|
start_dir=str(full_path),
|
258
346
|
pattern=pattern,
|
259
347
|
top_level_dir=None
|
260
348
|
)
|
261
349
|
|
350
|
+
# If name pattern is provided, filter tests by name
|
262
351
|
if test_name_pattern:
|
263
|
-
tests = self.
|
352
|
+
tests = self.__filterTestsByName(
|
353
|
+
suite=tests,
|
354
|
+
pattern=test_name_pattern
|
355
|
+
)
|
264
356
|
|
357
|
+
# If tags are provided, filter tests by tags
|
265
358
|
if tags:
|
266
|
-
tests = self.
|
359
|
+
tests = self.__filterTestsByTags(
|
360
|
+
suite=tests,
|
361
|
+
tags=tags
|
362
|
+
)
|
267
363
|
|
364
|
+
# If no tests are found, raise an error
|
268
365
|
if not list(tests):
|
269
|
-
raise OrionisTestValueError(
|
366
|
+
raise OrionisTestValueError(
|
367
|
+
f"No tests were found in the path '{full_path}' matching the file pattern '{pattern}'"
|
368
|
+
+ (f" and the test name pattern '{test_name_pattern}'" if test_name_pattern else "")
|
369
|
+
+ (f" and the tags {tags}" if tags else "") +
|
370
|
+
".\nPlease ensure that test files exist and that the patterns and tags are correct."
|
371
|
+
)
|
270
372
|
|
373
|
+
# Add discovered tests to the suite
|
271
374
|
self.suite.addTests(tests)
|
272
375
|
|
273
|
-
|
376
|
+
# Count the number of tests discovered
|
377
|
+
# Using __flattenTestSuite to ensure we count all individual test cases
|
378
|
+
test_count = len(list(self.__flattenTestSuite(tests)))
|
379
|
+
|
380
|
+
# Append the discovered tests information
|
274
381
|
self.discovered_tests.append({
|
275
382
|
"folder": str(full_path),
|
276
383
|
"test_count": test_count,
|
277
384
|
})
|
278
385
|
|
386
|
+
# Rereturn the current instance
|
279
387
|
return self
|
280
388
|
|
281
389
|
except ImportError as e:
|
282
|
-
|
390
|
+
|
391
|
+
# Raise a specific error if the import fails
|
392
|
+
raise OrionisTestValueError(
|
393
|
+
f"Error importing tests from path '{full_path}': {str(e)}.\n"
|
394
|
+
"Please verify that the directory and test modules are accessible and correct."
|
395
|
+
)
|
283
396
|
except Exception as e:
|
284
|
-
raise OrionisTestValueError(f"Unexpected error discovering tests: {str(e)}")
|
285
397
|
|
286
|
-
|
398
|
+
# Raise a general error for unexpected issues
|
399
|
+
raise OrionisTestValueError(
|
400
|
+
f"Unexpected error while discovering tests in '{full_path}': {str(e)}.\n"
|
401
|
+
"Ensure that the test files are valid and that there are no syntax errors or missing dependencies."
|
402
|
+
)
|
403
|
+
|
404
|
+
def discoverTestsInModule(
|
405
|
+
self,
|
406
|
+
*,
|
407
|
+
module_name: str,
|
408
|
+
test_name_pattern: Optional[str] = None
|
409
|
+
) -> 'UnitTest':
|
287
410
|
"""
|
288
|
-
|
411
|
+
Discover and add unit tests from a specified module to the test suite.
|
289
412
|
|
290
413
|
Parameters
|
291
414
|
----------
|
292
415
|
module_name : str
|
293
|
-
|
294
|
-
test_name_pattern : str, optional
|
295
|
-
|
416
|
+
The name of the module from which to discover tests. Must be a non-empty string.
|
417
|
+
test_name_pattern : Optional[str], optional
|
418
|
+
A pattern to filter test names. If provided, only tests matching this pattern will be included.
|
296
419
|
|
297
420
|
Returns
|
298
421
|
-------
|
299
422
|
UnitTest
|
300
|
-
The current instance
|
423
|
+
The current instance with the discovered tests added to the suite.
|
301
424
|
|
302
|
-
|
303
|
-
|
425
|
+
Raises
|
426
|
+
------
|
304
427
|
OrionisTestValueError
|
305
|
-
If the
|
428
|
+
If the module_name is invalid, the test_name_pattern is invalid, the module cannot be imported,
|
429
|
+
or any unexpected error occurs during test discovery.
|
430
|
+
|
431
|
+
Notes
|
432
|
+
-----
|
433
|
+
- The method validates the input parameters before attempting to discover tests.
|
434
|
+
- If a test_name_pattern is provided, only tests matching the pattern are included.
|
435
|
+
- Information about the discovered tests is appended to the 'discovered_tests' attribute.
|
306
436
|
"""
|
437
|
+
|
438
|
+
# Validate module_name
|
439
|
+
if not module_name or not isinstance(module_name, str):
|
440
|
+
raise OrionisTestValueError(
|
441
|
+
f"Invalid module_name: Expected a non-empty string, got '{module_name}' ({type(module_name).__name__})."
|
442
|
+
)
|
443
|
+
self.module_name = module_name
|
444
|
+
|
445
|
+
# Validate test_name_pattern
|
446
|
+
if test_name_pattern is not None and not isinstance(test_name_pattern, str):
|
447
|
+
raise OrionisTestValueError(
|
448
|
+
f"Invalid test_name_pattern: Expected a string, got '{test_name_pattern}' ({type(test_name_pattern).__name__})."
|
449
|
+
)
|
450
|
+
self.test_name_pattern = test_name_pattern
|
451
|
+
|
452
|
+
# Try to load tests from the specified module
|
307
453
|
try:
|
308
454
|
|
309
|
-
tests
|
455
|
+
# Load the tests from the specified module
|
456
|
+
tests = self.loader.loadTestsFromName(
|
457
|
+
name=module_name
|
458
|
+
)
|
310
459
|
|
460
|
+
# If test_name_pattern provided
|
311
461
|
if test_name_pattern:
|
312
|
-
tests = self.
|
462
|
+
tests = self.__filterTestsByName(
|
463
|
+
suite=tests,
|
464
|
+
pattern=test_name_pattern
|
465
|
+
)
|
313
466
|
|
467
|
+
# Add the discovered tests to the suite
|
314
468
|
self.suite.addTests(tests)
|
315
469
|
|
316
|
-
|
470
|
+
# Count the number of tests discovered
|
471
|
+
test_count = len(list(self.__flattenTestSuite(tests)))
|
472
|
+
|
473
|
+
# Append the discovered tests information
|
317
474
|
self.discovered_tests.append({
|
318
475
|
"module": module_name,
|
319
476
|
"test_count": test_count,
|
320
477
|
})
|
321
478
|
|
479
|
+
# Return the current instance
|
322
480
|
return self
|
323
|
-
except ImportError as e:
|
324
|
-
raise OrionisTestValueError(f"Error importing module '{module_name}': {str(e)}")
|
325
481
|
|
326
|
-
|
327
|
-
"""
|
328
|
-
Prints a formatted message indicating the start of the test suite execution.
|
329
|
-
|
330
|
-
Parameters
|
331
|
-
----------
|
332
|
-
self : UnitTest
|
333
|
-
The instance of the UnitTest class.
|
334
|
-
|
335
|
-
Notes
|
336
|
-
-----
|
337
|
-
This method displays details about the test suite, including the total number of tests,
|
338
|
-
the execution mode (parallel or sequential), and the start time. The message is styled
|
339
|
-
and displayed using the `rich` library.
|
482
|
+
except ImportError as e:
|
340
483
|
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
suite : unittest.TestSuite
|
346
|
-
The test suite containing the tests to be executed.
|
347
|
-
max_workers : int
|
348
|
-
The number of workers used in parallel execution mode.
|
349
|
-
execution_mode : str
|
350
|
-
The mode of execution ('SEQUENTIAL' or 'PARALLEL').
|
351
|
-
orionis_console : Console
|
352
|
-
The console object for handling standard output.
|
353
|
-
rich_console : RichConsole
|
354
|
-
The rich console object for styled output.
|
355
|
-
width_output_component : int
|
356
|
-
The calculated width of the message panel for formatting.
|
357
|
-
"""
|
358
|
-
if self.print_result:
|
359
|
-
test_count = len(list(self._flattenTestSuite(self.suite)))
|
360
|
-
mode_text = f"[stat]Parallel with {self.max_workers} workers[/stat]" if self.execution_mode == ExecutionMode.PARALLEL.value else "Sequential"
|
361
|
-
textlines = [
|
362
|
-
f"[bold]Total Tests:[/bold] [dim]{test_count}[/dim]",
|
363
|
-
f"[bold]Mode:[/bold] [dim]{mode_text}[/dim]",
|
364
|
-
f"[bold]Started at:[/bold] [dim]{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}[/dim]"
|
365
|
-
]
|
366
|
-
|
367
|
-
self.orionis_console.newLine()
|
368
|
-
self.rich_console.print(
|
369
|
-
Panel(
|
370
|
-
str('\n').join(textlines),
|
371
|
-
border_style="blue",
|
372
|
-
title="🧪 Orionis Framework - Component Test Suite",
|
373
|
-
title_align="center",
|
374
|
-
width=self.width_output_component,
|
375
|
-
padding=(0, 1)
|
376
|
-
)
|
484
|
+
# Raise a specific error if the import fails
|
485
|
+
raise OrionisTestValueError(
|
486
|
+
f"Error importing tests from module '{module_name}': {str(e)}.\n"
|
487
|
+
"Please verify that the module exists, is accessible, and contains valid test cases."
|
377
488
|
)
|
378
|
-
|
379
|
-
|
380
|
-
def run(self, print_result: bool = None, throw_exception: bool = None) -> Dict[str, Any]:
|
381
|
-
"""
|
382
|
-
Executes the test suite and processes the results.
|
383
|
-
|
384
|
-
Parameters
|
385
|
-
----------
|
386
|
-
print_result : bool, optional
|
387
|
-
If provided, overrides the instance's `print_result` attribute to determine whether to print results.
|
388
|
-
throw_exception : bool, optional
|
389
|
-
If True, raises an exception if any test failures or errors are detected.
|
390
|
-
|
391
|
-
Returns
|
392
|
-
-------
|
393
|
-
dict
|
394
|
-
A summary of the test execution, including details such as execution time, results, and timestamp.
|
395
|
-
|
396
|
-
Raises
|
397
|
-
------
|
398
|
-
OrionisTestFailureException
|
399
|
-
If `throw_exception` is True and there are test failures or errors.
|
400
|
-
"""
|
489
|
+
except Exception as e:
|
401
490
|
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
491
|
+
# Raise a general error for unexpected issues
|
492
|
+
raise OrionisTestValueError(
|
493
|
+
f"Unexpected error while discovering tests in module '{module_name}': {str(e)}.\n"
|
494
|
+
"Ensure that the module name is correct, the test methods are valid, and there are no syntax errors or missing dependencies."
|
495
|
+
)
|
407
496
|
|
408
|
-
|
409
|
-
self
|
497
|
+
def run(
|
498
|
+
self
|
499
|
+
) -> Dict[str, Any]:
|
410
500
|
|
411
501
|
# Start the timer and print the start message
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
running_panel = Panel(
|
422
|
-
message,
|
423
|
-
border_style="yellow",
|
424
|
-
title="In Progress",
|
425
|
-
title_align="left",
|
426
|
-
width=self.width_output_component,
|
427
|
-
padding=(1, 2)
|
428
|
-
)
|
502
|
+
start_time = time.time()
|
503
|
+
|
504
|
+
# Print the start message
|
505
|
+
self.printer.startMessage(
|
506
|
+
print_result=self.print_result,
|
507
|
+
length_tests=len(list(self.__flattenTestSuite(self.suite))),
|
508
|
+
execution_mode=self.execution_mode,
|
509
|
+
max_workers=self.max_workers
|
510
|
+
)
|
429
511
|
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
result, output_buffer, error_buffer = self._runSuite()
|
437
|
-
else:
|
438
|
-
# If not printing results, run the suite without live console
|
439
|
-
result, output_buffer, error_buffer = self._runSuite()
|
512
|
+
# Execute the test suite and capture the results
|
513
|
+
result, output_buffer, error_buffer = self.printer.executePanel(
|
514
|
+
print_result=self.print_result,
|
515
|
+
flatten_test_suite= self.__flattenTestSuite(self.suite),
|
516
|
+
callable=self.__runSuite
|
517
|
+
)
|
440
518
|
|
441
519
|
# Save Outputs
|
442
520
|
self.__output_buffer = output_buffer.getvalue()
|
443
521
|
self.__error_buffer = error_buffer.getvalue()
|
444
522
|
|
445
523
|
# Process results
|
446
|
-
execution_time = time.time() -
|
447
|
-
summary = self.
|
524
|
+
execution_time = time.time() - start_time
|
525
|
+
summary = self.__generateSummary(result, execution_time)
|
448
526
|
|
449
527
|
# Print captured output
|
450
|
-
|
451
|
-
self.
|
528
|
+
self.printer.displayResults(
|
529
|
+
print_result=self.print_result,
|
530
|
+
summary=summary
|
531
|
+
)
|
452
532
|
|
453
533
|
# Print Execution Time
|
454
534
|
if not result.wasSuccessful() and self.throw_exception:
|
455
535
|
raise OrionisTestFailureException(result)
|
456
536
|
|
537
|
+
# Print the final summary message
|
538
|
+
self.printer.finishMessage(
|
539
|
+
print_result=self.print_result,
|
540
|
+
summary=summary
|
541
|
+
)
|
542
|
+
|
457
543
|
# Return the summary of the test results
|
458
|
-
self.__result = summary
|
459
544
|
return summary
|
460
545
|
|
461
|
-
def
|
546
|
+
def __flattenTestSuite(
|
547
|
+
self,
|
548
|
+
suite: unittest.TestSuite
|
549
|
+
) -> List[unittest.TestCase]:
|
462
550
|
"""
|
463
|
-
|
551
|
+
Recursively flattens a nested unittest.TestSuite into a list of unique unittest.TestCase instances.
|
552
|
+
|
553
|
+
Parameters
|
554
|
+
----------
|
555
|
+
suite : unittest.TestSuite
|
556
|
+
The test suite to flatten, which may contain nested suites or test cases.
|
464
557
|
|
465
558
|
Returns
|
466
559
|
-------
|
467
|
-
|
468
|
-
|
469
|
-
"""
|
470
|
-
if self.withliveconsole:
|
471
|
-
|
472
|
-
try:
|
473
|
-
|
474
|
-
# Flatten the test suite to get all test cases
|
475
|
-
for test_case in self._flattenTestSuite(self.suite):
|
476
|
-
|
477
|
-
# Get the source code of the test case class
|
478
|
-
source = inspect.getsource(test_case.__class__)
|
479
|
-
|
480
|
-
# Only match if the keyword is not inside a comment
|
481
|
-
for keyword in ('self.dd', 'self.dump'):
|
482
|
-
|
483
|
-
# Find all lines containing the keyword
|
484
|
-
for line in source.splitlines():
|
485
|
-
if keyword in line:
|
486
|
-
|
487
|
-
# Remove leading/trailing whitespace
|
488
|
-
stripped = line.strip()
|
489
|
-
|
490
|
-
# Ignore lines that start with '#' (comments)
|
491
|
-
if not stripped.startswith('#') and not re.match(r'^\s*#', line):
|
492
|
-
self.withliveconsole = False
|
493
|
-
break
|
560
|
+
List[unittest.TestCase]
|
561
|
+
A list containing all unique TestCase instances extracted from the suite.
|
494
562
|
|
495
|
-
|
496
|
-
|
497
|
-
|
563
|
+
Notes
|
564
|
+
-----
|
565
|
+
This method traverses the given TestSuite recursively, collecting all TestCase instances
|
566
|
+
and ensuring that each test appears only once in the resulting list.
|
567
|
+
"""
|
568
|
+
tests = []
|
569
|
+
seen_ids = set()
|
498
570
|
|
499
|
-
|
500
|
-
|
501
|
-
|
571
|
+
def _flatten(item):
|
572
|
+
if isinstance(item, unittest.TestSuite):
|
573
|
+
for sub_item in item:
|
574
|
+
_flatten(sub_item)
|
575
|
+
elif hasattr(item, "id"):
|
576
|
+
test_id = item.id()
|
577
|
+
parts = test_id.split('.')
|
578
|
+
if len(parts) >= 2:
|
579
|
+
short_id = '.'.join(parts[-2:])
|
580
|
+
else:
|
581
|
+
short_id = test_id
|
582
|
+
if short_id not in seen_ids:
|
583
|
+
seen_ids.add(short_id)
|
584
|
+
tests.append(item)
|
502
585
|
|
503
|
-
|
504
|
-
|
586
|
+
_flatten(suite)
|
587
|
+
return tests
|
505
588
|
|
506
|
-
def
|
589
|
+
def __runSuite(
|
590
|
+
self
|
591
|
+
):
|
507
592
|
"""
|
508
593
|
Run the test suite according to the selected execution mode (parallel or sequential),
|
509
594
|
capturing standard output and error streams during execution.
|
@@ -525,14 +610,29 @@ class UnitTest(IUnitTest):
|
|
525
610
|
|
526
611
|
# Execute tests based on selected mode
|
527
612
|
if self.execution_mode == ExecutionMode.PARALLEL.value:
|
528
|
-
|
613
|
+
|
614
|
+
# Run tests in parallel
|
615
|
+
result = self.__runTestsInParallel(
|
616
|
+
output_buffer,
|
617
|
+
error_buffer
|
618
|
+
)
|
619
|
+
|
529
620
|
else:
|
530
|
-
|
621
|
+
|
622
|
+
# Run tests sequentially
|
623
|
+
result = self.__runTestsSequentially(
|
624
|
+
output_buffer,
|
625
|
+
error_buffer
|
626
|
+
)
|
531
627
|
|
532
628
|
# Return the result along with captured output and error streams
|
533
629
|
return result, output_buffer, error_buffer
|
534
630
|
|
535
|
-
def
|
631
|
+
def __runTestsSequentially(
|
632
|
+
self,
|
633
|
+
output_buffer: io.StringIO,
|
634
|
+
error_buffer: io.StringIO
|
635
|
+
) -> unittest.TestResult:
|
536
636
|
"""
|
537
637
|
Executes the test suite sequentially, capturing the output and error streams.
|
538
638
|
|
@@ -549,18 +649,28 @@ class UnitTest(IUnitTest):
|
|
549
649
|
The result of the test suite execution, containing information about
|
550
650
|
passed, failed, and skipped tests.
|
551
651
|
"""
|
652
|
+
|
653
|
+
# Flatten the suite to avoid duplicate tests
|
654
|
+
flattened_suite = unittest.TestSuite(self.__flattenTestSuite(self.suite))
|
655
|
+
|
656
|
+
# Create a custom result class to capture detailed test results
|
552
657
|
with redirect_stdout(output_buffer), redirect_stderr(error_buffer):
|
553
658
|
runner = unittest.TextTestRunner(
|
554
659
|
stream=output_buffer,
|
555
660
|
verbosity=self.verbosity,
|
556
661
|
failfast=self.fail_fast,
|
557
|
-
resultclass=self.
|
662
|
+
resultclass=self.__customResultClass()
|
558
663
|
)
|
559
|
-
result = runner.run(
|
664
|
+
result = runner.run(flattened_suite)
|
560
665
|
|
666
|
+
# Return the result object containing test outcomes
|
561
667
|
return result
|
562
668
|
|
563
|
-
def
|
669
|
+
def __runTestsInParallel(
|
670
|
+
self,
|
671
|
+
output_buffer: io.StringIO,
|
672
|
+
error_buffer: io.StringIO
|
673
|
+
) -> unittest.TestResult:
|
564
674
|
"""
|
565
675
|
Runs all test cases in the provided test suite concurrently using a thread pool,
|
566
676
|
aggregating the results into a single result object. Standard output and error
|
@@ -585,10 +695,10 @@ class UnitTest(IUnitTest):
|
|
585
695
|
"""
|
586
696
|
|
587
697
|
# Flatten the test suite to get individual test cases
|
588
|
-
test_cases = list(self.
|
698
|
+
test_cases = list(self.__flattenTestSuite(self.suite))
|
589
699
|
|
590
700
|
# Create a custom result instance to collect all results
|
591
|
-
result_class = self.
|
701
|
+
result_class = self.__customResultClass()
|
592
702
|
combined_result = result_class(io.StringIO(), descriptions=True, verbosity=self.verbosity)
|
593
703
|
|
594
704
|
# Helper function to run a single test and return its result.
|
@@ -602,22 +712,34 @@ class UnitTest(IUnitTest):
|
|
602
712
|
)
|
603
713
|
return runner.run(unittest.TestSuite([test]))
|
604
714
|
|
715
|
+
# Use ThreadPoolExecutor to run tests concurrently
|
605
716
|
with redirect_stdout(output_buffer), redirect_stderr(error_buffer):
|
717
|
+
|
718
|
+
# Create a ThreadPoolExecutor to run tests in parallel
|
606
719
|
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
720
|
+
|
721
|
+
# Submit all test cases to the executor
|
607
722
|
futures = [executor.submit(run_single_test, test) for test in test_cases]
|
608
723
|
|
724
|
+
# Process the results as they complete
|
609
725
|
for future in as_completed(futures):
|
610
726
|
test_result = future.result()
|
611
|
-
self.
|
727
|
+
self.__mergeTestResults(combined_result, test_result)
|
612
728
|
|
729
|
+
# If fail_fast is enabled and a test failed, cancel remaining futures
|
613
730
|
if self.fail_fast and not combined_result.wasSuccessful():
|
614
731
|
for f in futures:
|
615
732
|
f.cancel()
|
616
733
|
break
|
617
734
|
|
735
|
+
# Return the combined result object
|
618
736
|
return combined_result
|
619
737
|
|
620
|
-
def
|
738
|
+
def __mergeTestResults(
|
739
|
+
self,
|
740
|
+
combined_result: unittest.TestResult,
|
741
|
+
individual_result: unittest.TestResult
|
742
|
+
) -> None:
|
621
743
|
"""
|
622
744
|
Merge the results of two unittest.TestResult objects.
|
623
745
|
|
@@ -637,6 +759,8 @@ class UnitTest(IUnitTest):
|
|
637
759
|
-------
|
638
760
|
None
|
639
761
|
"""
|
762
|
+
|
763
|
+
# Update the combined result with counts and lists from the individual result
|
640
764
|
combined_result.testsRun += individual_result.testsRun
|
641
765
|
combined_result.failures.extend(individual_result.failures)
|
642
766
|
combined_result.errors.extend(individual_result.errors)
|
@@ -650,24 +774,26 @@ class UnitTest(IUnitTest):
|
|
650
774
|
combined_result.test_results = []
|
651
775
|
combined_result.test_results.extend(individual_result.test_results)
|
652
776
|
|
653
|
-
def
|
777
|
+
def __customResultClass(
|
778
|
+
self
|
779
|
+
) -> type:
|
654
780
|
"""
|
655
781
|
Creates a custom test result class for enhanced test tracking.
|
656
|
-
This method dynamically generates an `
|
782
|
+
This method dynamically generates an `OrionisTestResult` class that extends
|
657
783
|
`unittest.TextTestResult`. The custom class provides advanced functionality for
|
658
784
|
tracking test execution details, including timings, statuses, and error information.
|
659
785
|
|
660
786
|
Returns
|
661
787
|
-------
|
662
788
|
type
|
663
|
-
A dynamically created class `
|
789
|
+
A dynamically created class `OrionisTestResult` that overrides methods to handle
|
664
790
|
test results, including success, failure, error, and skipped tests. The class
|
665
791
|
collects detailed information about each test, such as execution time, error
|
666
792
|
messages, traceback, and file path.
|
667
793
|
|
668
794
|
Notes
|
669
795
|
-----
|
670
|
-
The `
|
796
|
+
The `OrionisTestResult` class includes the following method overrides:
|
671
797
|
The method uses the `this` reference to access the outer class's methods, such as
|
672
798
|
`_extractErrorInfo`, for extracting and formatting error information.
|
673
799
|
"""
|
@@ -676,7 +802,7 @@ class UnitTest(IUnitTest):
|
|
676
802
|
this = self
|
677
803
|
|
678
804
|
# Define the custom test result class
|
679
|
-
class
|
805
|
+
class OrionisTestResult(unittest.TextTestResult):
|
680
806
|
def __init__(self, *args, **kwargs):
|
681
807
|
super().__init__(*args, **kwargs)
|
682
808
|
self.test_results = []
|
@@ -769,10 +895,65 @@ class UnitTest(IUnitTest):
|
|
769
895
|
)
|
770
896
|
)
|
771
897
|
|
772
|
-
# Return the dynamically created
|
773
|
-
return
|
898
|
+
# Return the dynamically created OrionisTestResult class
|
899
|
+
return OrionisTestResult
|
900
|
+
|
901
|
+
def _extractErrorInfo(
|
902
|
+
self,
|
903
|
+
traceback_str: str
|
904
|
+
) -> Tuple[Optional[str], Optional[str]]:
|
905
|
+
"""
|
906
|
+
Extract error information from a traceback string.
|
907
|
+
This method processes a traceback string to extract the file path of the Python file where the error occurred and
|
908
|
+
cleans up the traceback by removing framework internals and irrelevant noise.
|
909
|
+
|
910
|
+
Parameters
|
911
|
+
----------
|
912
|
+
traceback_str : str
|
913
|
+
The traceback string to process.
|
914
|
+
|
915
|
+
Returns
|
916
|
+
-------
|
917
|
+
Tuple[Optional[str], Optional[str]]
|
918
|
+
A tuple containing:
|
919
|
+
|
920
|
+
Notes
|
921
|
+
-----
|
922
|
+
Framework internals and lines containing 'unittest/', 'lib/python', or 'site-packages' are removed from the traceback.
|
923
|
+
The cleaned traceback starts from the first occurrence of the test file path.
|
924
|
+
"""
|
925
|
+
# Extract file path
|
926
|
+
file_matches = re.findall(r'File ["\'](.*?.py)["\']', traceback_str)
|
927
|
+
file_path = file_matches[-1] if file_matches else None
|
928
|
+
|
929
|
+
# Clean up traceback by removing framework internals and noise
|
930
|
+
tb_lines = traceback_str.split('\n')
|
931
|
+
clean_lines = []
|
932
|
+
relevant_lines_started = False
|
933
|
+
|
934
|
+
# Iterate through each line in the traceback
|
935
|
+
for line in tb_lines:
|
936
|
+
|
937
|
+
# Skip framework internal lines
|
938
|
+
if any(s in line for s in ['unittest/', 'lib/python', 'site-packages']):
|
939
|
+
continue
|
940
|
+
|
941
|
+
# Start capturing when we hit the test file
|
942
|
+
if file_path and file_path in line and not relevant_lines_started:
|
943
|
+
relevant_lines_started = True
|
944
|
+
|
945
|
+
if relevant_lines_started:
|
946
|
+
clean_lines.append(line)
|
947
|
+
|
948
|
+
clean_tb = str('\n').join(clean_lines) if clean_lines else traceback_str
|
949
|
+
|
950
|
+
return file_path, clean_tb
|
774
951
|
|
775
|
-
def
|
952
|
+
def __generateSummary(
|
953
|
+
self,
|
954
|
+
result: unittest.TestResult,
|
955
|
+
execution_time: float
|
956
|
+
) -> Dict[str, Any]:
|
776
957
|
"""
|
777
958
|
Generate a summary of the test results, including statistics and details for each test.
|
778
959
|
|
@@ -842,7 +1023,7 @@ class UnitTest(IUnitTest):
|
|
842
1023
|
success_rate = (passed / result.testsRun * 100) if result.testsRun > 0 else 100.0
|
843
1024
|
|
844
1025
|
# Create a summary report
|
845
|
-
|
1026
|
+
self.__result = {
|
846
1027
|
"total_tests": result.testsRun,
|
847
1028
|
"passed": passed,
|
848
1029
|
"failed": len(result.failures),
|
@@ -856,33 +1037,19 @@ class UnitTest(IUnitTest):
|
|
856
1037
|
|
857
1038
|
# Handle persistence of the report
|
858
1039
|
if self.persistent:
|
859
|
-
self.
|
1040
|
+
self.__handlePersistResults(self.__result)
|
860
1041
|
|
861
1042
|
# Handle Web Report Rendering
|
862
1043
|
if self.web_report:
|
863
|
-
|
864
|
-
# Generate the web report and get the path
|
865
|
-
path = self._webReport(report)
|
866
|
-
|
867
|
-
# Elegant invitation to view the results, with underlined path
|
868
|
-
invite_text = Text("Test results saved. ", style="green")
|
869
|
-
invite_text.append("View report: ", style="bold green")
|
870
|
-
invite_text.append(str(path), style="underline blue")
|
871
|
-
self.rich_console.print(invite_text)
|
1044
|
+
self.__handleWebReport(self.__result)
|
872
1045
|
|
873
1046
|
# Return the summary
|
874
|
-
return
|
875
|
-
"total_tests": result.testsRun,
|
876
|
-
"passed": passed,
|
877
|
-
"failed": len(result.failures),
|
878
|
-
"errors": len(result.errors),
|
879
|
-
"skipped": len(result.skipped),
|
880
|
-
"total_time": float(execution_time),
|
881
|
-
"success_rate": success_rate,
|
882
|
-
"test_details": test_details
|
883
|
-
}
|
1047
|
+
return self.__result
|
884
1048
|
|
885
|
-
def
|
1049
|
+
def __handleWebReport(
|
1050
|
+
self,
|
1051
|
+
summary: Dict[str, Any]
|
1052
|
+
) -> None:
|
886
1053
|
"""
|
887
1054
|
Generates a web report for the test results summary.
|
888
1055
|
|
@@ -903,6 +1070,7 @@ class UnitTest(IUnitTest):
|
|
903
1070
|
- If persistence is enabled and the driver is 'sqlite', the report is marked as persistent.
|
904
1071
|
- Returns the path to the generated report for further use.
|
905
1072
|
"""
|
1073
|
+
|
906
1074
|
# Determine the absolute path for storing results
|
907
1075
|
project = os.path.basename(os.getcwd())
|
908
1076
|
storage_path = os.path.abspath(os.path.join(os.getcwd(), self.base_path))
|
@@ -918,10 +1086,13 @@ class UnitTest(IUnitTest):
|
|
918
1086
|
persist=self.persistent and self.persistent_driver == 'sqlite'
|
919
1087
|
)
|
920
1088
|
|
921
|
-
# Render the report and
|
922
|
-
|
1089
|
+
# Render the report and print the web report link
|
1090
|
+
self.printer.linkWebReport(render.render())
|
923
1091
|
|
924
|
-
def
|
1092
|
+
def __handlePersistResults(
|
1093
|
+
self,
|
1094
|
+
summary: Dict[str, Any]
|
1095
|
+
) -> None:
|
925
1096
|
"""
|
926
1097
|
Persist the test results summary using the configured persistent driver.
|
927
1098
|
|
@@ -945,6 +1116,7 @@ class UnitTest(IUnitTest):
|
|
945
1116
|
"""
|
946
1117
|
|
947
1118
|
try:
|
1119
|
+
|
948
1120
|
# Determine the absolute path for storing results
|
949
1121
|
project = os.getcwd().split(os.sep)[-1]
|
950
1122
|
storage_path = None
|
@@ -977,64 +1149,22 @@ class UnitTest(IUnitTest):
|
|
977
1149
|
# Write the summary to the JSON file
|
978
1150
|
with open(log_path, 'w', encoding='utf-8') as log:
|
979
1151
|
json.dump(summary, log, indent=4)
|
1152
|
+
|
980
1153
|
except OSError as e:
|
981
|
-
raise OSError(f"Error creating directories or writing files: {str(e)}")
|
982
|
-
except Exception as e:
|
983
|
-
raise OrionisTestPersistenceError(f"Error persisting test results: {str(e)}")
|
984
1154
|
|
985
|
-
|
986
|
-
|
987
|
-
Prints a summary table of test results using the Rich library.
|
1155
|
+
# Raise an OSError if there is an issue with file or directory operations
|
1156
|
+
raise OSError(f"Error creating directories or writing files: {str(e)}")
|
988
1157
|
|
989
|
-
|
990
|
-
----------
|
991
|
-
summary : dict
|
992
|
-
Dictionary with the test summary data. Must contain the following keys:
|
993
|
-
total_tests : int
|
994
|
-
Total number of tests executed.
|
995
|
-
passed : int
|
996
|
-
Number of tests that passed.
|
997
|
-
failed : int
|
998
|
-
Number of tests that failed.
|
999
|
-
errors : int
|
1000
|
-
Number of tests that had errors.
|
1001
|
-
skipped : int
|
1002
|
-
Number of tests that were skipped.
|
1003
|
-
total_time : float
|
1004
|
-
Total duration of the test execution in seconds.
|
1005
|
-
success_rate : float
|
1006
|
-
Percentage of tests that passed.
|
1158
|
+
except Exception as e:
|
1007
1159
|
|
1008
|
-
|
1009
|
-
|
1010
|
-
None
|
1011
|
-
"""
|
1012
|
-
table = Table(
|
1013
|
-
show_header=True,
|
1014
|
-
header_style="bold white",
|
1015
|
-
width=self.width_output_component,
|
1016
|
-
border_style="blue"
|
1017
|
-
)
|
1018
|
-
table.add_column("Total", justify="center")
|
1019
|
-
table.add_column("Passed", justify="center")
|
1020
|
-
table.add_column("Failed", justify="center")
|
1021
|
-
table.add_column("Errors", justify="center")
|
1022
|
-
table.add_column("Skipped", justify="center")
|
1023
|
-
table.add_column("Duration", justify="center")
|
1024
|
-
table.add_column("Success Rate", justify="center")
|
1025
|
-
table.add_row(
|
1026
|
-
str(summary["total_tests"]),
|
1027
|
-
str(summary["passed"]),
|
1028
|
-
str(summary["failed"]),
|
1029
|
-
str(summary["errors"]),
|
1030
|
-
str(summary["skipped"]),
|
1031
|
-
f"{summary['total_time']:.2f}s",
|
1032
|
-
f"{summary['success_rate']:.2f}%"
|
1033
|
-
)
|
1034
|
-
self.rich_console.print(table)
|
1035
|
-
self.orionis_console.newLine()
|
1160
|
+
# Raise a general exception for any other issues during persistence
|
1161
|
+
raise OrionisTestPersistenceError(f"Error persisting test results: {str(e)}")
|
1036
1162
|
|
1037
|
-
def
|
1163
|
+
def __filterTestsByName(
|
1164
|
+
self,
|
1165
|
+
suite: unittest.TestSuite,
|
1166
|
+
pattern: str
|
1167
|
+
) -> unittest.TestSuite:
|
1038
1168
|
"""
|
1039
1169
|
Filters tests in a given test suite based on a specified name pattern.
|
1040
1170
|
Parameters
|
@@ -1054,19 +1184,33 @@ class UnitTest(IUnitTest):
|
|
1054
1184
|
Notes
|
1055
1185
|
-----
|
1056
1186
|
"""
|
1187
|
+
|
1188
|
+
# Initialize an empty TestSuite to hold the filtered tests
|
1057
1189
|
filtered_suite = unittest.TestSuite()
|
1190
|
+
|
1191
|
+
# Validate the pattern
|
1058
1192
|
try:
|
1059
1193
|
regex = re.compile(pattern)
|
1060
1194
|
except re.error as e:
|
1061
|
-
raise OrionisTestValueError(
|
1195
|
+
raise OrionisTestValueError(
|
1196
|
+
f"The provided test name pattern is invalid: '{pattern}'. "
|
1197
|
+
f"Regular expression compilation error: {str(e)}. "
|
1198
|
+
"Please check the pattern syntax and try again."
|
1199
|
+
)
|
1062
1200
|
|
1063
|
-
|
1201
|
+
# Iterate through all tests in the suite and filter by the regex pattern
|
1202
|
+
for test in self.__flattenTestSuite(suite):
|
1064
1203
|
if regex.search(test.id()):
|
1065
1204
|
filtered_suite.addTest(test)
|
1066
1205
|
|
1206
|
+
# Return the filtered suite containing only tests that match the pattern
|
1067
1207
|
return filtered_suite
|
1068
1208
|
|
1069
|
-
def
|
1209
|
+
def __filterTestsByTags(
|
1210
|
+
self,
|
1211
|
+
suite: unittest.TestSuite,
|
1212
|
+
tags: List[str]
|
1213
|
+
) -> unittest.TestSuite:
|
1070
1214
|
"""
|
1071
1215
|
Filter tests in a unittest TestSuite by specified tags.
|
1072
1216
|
|
@@ -1091,7 +1235,7 @@ class UnitTest(IUnitTest):
|
|
1091
1235
|
filtered_suite = unittest.TestSuite()
|
1092
1236
|
tag_set = set(tags)
|
1093
1237
|
|
1094
|
-
for test in self.
|
1238
|
+
for test in self.__flattenTestSuite(suite):
|
1095
1239
|
|
1096
1240
|
# Get test method if this is a TestCase instance
|
1097
1241
|
test_method = getattr(test, test._testMethodName, None)
|
@@ -1111,250 +1255,9 @@ class UnitTest(IUnitTest):
|
|
1111
1255
|
# Return the filtered suite containing only tests with matching tags
|
1112
1256
|
return filtered_suite
|
1113
1257
|
|
1114
|
-
def
|
1115
|
-
|
1116
|
-
|
1117
|
-
|
1118
|
-
Parameters
|
1119
|
-
----------
|
1120
|
-
suite : unittest.TestSuite
|
1121
|
-
The test suite to flatten, which may contain nested suites or test cases.
|
1122
|
-
|
1123
|
-
Returns
|
1124
|
-
-------
|
1125
|
-
List[unittest.TestCase]
|
1126
|
-
A list containing all unique TestCase instances extracted from the suite.
|
1127
|
-
|
1128
|
-
Notes
|
1129
|
-
-----
|
1130
|
-
This method traverses the given TestSuite recursively, collecting all TestCase instances
|
1131
|
-
and ensuring that each test appears only once in the resulting list.
|
1132
|
-
"""
|
1133
|
-
tests = []
|
1134
|
-
seen_ids = set()
|
1135
|
-
|
1136
|
-
def _flatten(item):
|
1137
|
-
if isinstance(item, unittest.TestSuite):
|
1138
|
-
for sub_item in item:
|
1139
|
-
_flatten(sub_item)
|
1140
|
-
elif hasattr(item, "id"):
|
1141
|
-
test_id = item.id()
|
1142
|
-
parts = test_id.split('.')
|
1143
|
-
if len(parts) >= 2:
|
1144
|
-
short_id = '.'.join(parts[-2:])
|
1145
|
-
else:
|
1146
|
-
short_id = test_id
|
1147
|
-
if short_id not in seen_ids:
|
1148
|
-
seen_ids.add(short_id)
|
1149
|
-
tests.append(item)
|
1150
|
-
|
1151
|
-
_flatten(suite)
|
1152
|
-
return tests
|
1153
|
-
|
1154
|
-
def _sanitizeTraceback(self, test_path: str, traceback_test: str) -> str:
|
1155
|
-
"""
|
1156
|
-
Sanitize a traceback string to extract and display the most relevant parts
|
1157
|
-
related to a specific test file.
|
1158
|
-
|
1159
|
-
Parameters
|
1160
|
-
----------
|
1161
|
-
test_path : str
|
1162
|
-
The file path of the test file being analyzed.
|
1163
|
-
traceback_test : str
|
1164
|
-
The full traceback string to be sanitized.
|
1165
|
-
|
1166
|
-
Returns
|
1167
|
-
-------
|
1168
|
-
str
|
1169
|
-
A sanitized traceback string containing only the relevant parts related to the test file.
|
1170
|
-
If no relevant parts are found, the full traceback is returned.
|
1171
|
-
If the traceback is empty, a default message "No traceback available for this test." is returned.
|
1172
|
-
"""
|
1173
|
-
if not traceback_test:
|
1174
|
-
return "No traceback available for this test."
|
1175
|
-
|
1176
|
-
# Try to extract the test file name
|
1177
|
-
file_match = re.search(r'([^/\\]+)\.py', test_path)
|
1178
|
-
file_name = file_match.group(1) if file_match else None
|
1179
|
-
|
1180
|
-
if not file_name:
|
1181
|
-
return traceback_test
|
1182
|
-
|
1183
|
-
# Process traceback to show most relevant parts
|
1184
|
-
lines = traceback_test.splitlines()
|
1185
|
-
relevant_lines = []
|
1186
|
-
found_test_file = False if file_name in traceback_test else True
|
1187
|
-
|
1188
|
-
for line in lines:
|
1189
|
-
if file_name in line and not found_test_file:
|
1190
|
-
found_test_file = True
|
1191
|
-
if found_test_file:
|
1192
|
-
if 'File' in line:
|
1193
|
-
relevant_lines.append(line.strip())
|
1194
|
-
elif line.strip() != '':
|
1195
|
-
relevant_lines.append(line)
|
1196
|
-
|
1197
|
-
# If we didn't find the test file, return the full traceback
|
1198
|
-
if not relevant_lines:
|
1199
|
-
return traceback_test
|
1200
|
-
|
1201
|
-
# Remove any lines that are not relevant to the test file
|
1202
|
-
return str('\n').join(relevant_lines)
|
1203
|
-
|
1204
|
-
def _displayResults(self, summary: Dict[str, Any]) -> None:
|
1205
|
-
"""
|
1206
|
-
Display the results of the test execution, including a summary table and detailed
|
1207
|
-
information about failed or errored tests grouped by their test classes.
|
1208
|
-
|
1209
|
-
Parameters
|
1210
|
-
----------
|
1211
|
-
summary : dict
|
1212
|
-
Dictionary containing the summary of the test execution, including test details,
|
1213
|
-
statuses, and execution times.
|
1214
|
-
|
1215
|
-
Notes
|
1216
|
-
-----
|
1217
|
-
- Prints a summary table of the test results.
|
1218
|
-
- Groups failed and errored tests by their test class and displays them in a structured
|
1219
|
-
format using panels.
|
1220
|
-
- For each failed or errored test, displays the traceback in a syntax-highlighted panel
|
1221
|
-
with additional metadata such as the test method name and execution time.
|
1222
|
-
- Uses different icons and border colors to distinguish between failed and errored tests.
|
1223
|
-
- Calls a finishing message method after displaying all results.
|
1224
|
-
"""
|
1225
|
-
|
1226
|
-
# Print summary table
|
1227
|
-
self._printSummaryTable(summary)
|
1228
|
-
|
1229
|
-
# Group failures and errors by test class
|
1230
|
-
failures_by_class = {}
|
1231
|
-
for test in summary["test_details"]:
|
1232
|
-
if test["status"] in (TestStatus.FAILED.name, TestStatus.ERRORED.name):
|
1233
|
-
class_name = test["class"]
|
1234
|
-
if class_name not in failures_by_class:
|
1235
|
-
failures_by_class[class_name] = []
|
1236
|
-
failures_by_class[class_name].append(test)
|
1237
|
-
|
1238
|
-
# Display grouped failures
|
1239
|
-
for class_name, tests in failures_by_class.items():
|
1240
|
-
|
1241
|
-
class_panel = Panel.fit(f"[bold]{class_name}[/bold]", border_style="red", padding=(0, 2))
|
1242
|
-
self.rich_console.print(class_panel)
|
1243
|
-
|
1244
|
-
for test in tests:
|
1245
|
-
traceback_str = self._sanitizeTraceback(test['file_path'], test['traceback'])
|
1246
|
-
syntax = Syntax(
|
1247
|
-
traceback_str,
|
1248
|
-
lexer="python",
|
1249
|
-
line_numbers=False,
|
1250
|
-
background_color="default",
|
1251
|
-
word_wrap=True,
|
1252
|
-
theme="monokai"
|
1253
|
-
)
|
1254
|
-
|
1255
|
-
icon = "❌" if test["status"] == TestStatus.FAILED.name else "💥"
|
1256
|
-
border_color = "yellow" if test["status"] == TestStatus.FAILED.name else "red"
|
1257
|
-
|
1258
|
-
# Ensure execution time is never zero for display purposes
|
1259
|
-
if not test['execution_time'] or test['execution_time'] == 0:
|
1260
|
-
test['execution_time'] = 0.001
|
1261
|
-
|
1262
|
-
panel = Panel(
|
1263
|
-
syntax,
|
1264
|
-
title=f"{icon} {test['method']}",
|
1265
|
-
subtitle=f"Duration: {test['execution_time']:.3f}s",
|
1266
|
-
border_style=border_color,
|
1267
|
-
title_align="left",
|
1268
|
-
padding=(1, 1),
|
1269
|
-
subtitle_align="right",
|
1270
|
-
width=self.width_output_component
|
1271
|
-
)
|
1272
|
-
self.rich_console.print(panel)
|
1273
|
-
self.orionis_console.newLine()
|
1274
|
-
|
1275
|
-
self._finishMessage(summary)
|
1276
|
-
|
1277
|
-
def _extractErrorInfo(self, traceback_str: str) -> Tuple[Optional[str], Optional[str]]:
|
1278
|
-
"""
|
1279
|
-
Extract error information from a traceback string.
|
1280
|
-
This method processes a traceback string to extract the file path of the Python file where the error occurred and
|
1281
|
-
cleans up the traceback by removing framework internals and irrelevant noise.
|
1282
|
-
|
1283
|
-
Parameters
|
1284
|
-
----------
|
1285
|
-
traceback_str : str
|
1286
|
-
The traceback string to process.
|
1287
|
-
|
1288
|
-
Returns
|
1289
|
-
-------
|
1290
|
-
Tuple[Optional[str], Optional[str]]
|
1291
|
-
A tuple containing:
|
1292
|
-
|
1293
|
-
Notes
|
1294
|
-
-----
|
1295
|
-
Framework internals and lines containing 'unittest/', 'lib/python', or 'site-packages' are removed from the traceback.
|
1296
|
-
The cleaned traceback starts from the first occurrence of the test file path.
|
1297
|
-
"""
|
1298
|
-
# Extract file path
|
1299
|
-
file_matches = re.findall(r'File ["\'](.*?.py)["\']', traceback_str)
|
1300
|
-
file_path = file_matches[-1] if file_matches else None
|
1301
|
-
|
1302
|
-
# Clean up traceback by removing framework internals and noise
|
1303
|
-
tb_lines = traceback_str.split('\n')
|
1304
|
-
clean_lines = []
|
1305
|
-
relevant_lines_started = False
|
1306
|
-
|
1307
|
-
for line in tb_lines:
|
1308
|
-
# Skip framework internal lines
|
1309
|
-
if any(s in line for s in ['unittest/', 'lib/python', 'site-packages']):
|
1310
|
-
continue
|
1311
|
-
|
1312
|
-
# Start capturing when we hit the test file
|
1313
|
-
if file_path and file_path in line and not relevant_lines_started:
|
1314
|
-
relevant_lines_started = True
|
1315
|
-
|
1316
|
-
if relevant_lines_started:
|
1317
|
-
clean_lines.append(line)
|
1318
|
-
|
1319
|
-
clean_tb = str('\n').join(clean_lines) if clean_lines else traceback_str
|
1320
|
-
|
1321
|
-
return file_path, clean_tb
|
1322
|
-
|
1323
|
-
def _finishMessage(self, summary: Dict[str, Any]) -> None:
|
1324
|
-
"""
|
1325
|
-
Display a summary message for the test suite execution.
|
1326
|
-
|
1327
|
-
Parameters
|
1328
|
-
----------
|
1329
|
-
summary : dict
|
1330
|
-
Dictionary containing the test suite summary, including keys such as
|
1331
|
-
'failed', 'errors', and 'total_time'.
|
1332
|
-
|
1333
|
-
Notes
|
1334
|
-
-----
|
1335
|
-
- If `self.print_result` is False, the method returns without displaying anything.
|
1336
|
-
- Shows a status icon (✅ for success, ❌ for failure) based on the presence of
|
1337
|
-
failures or errors in the test suite.
|
1338
|
-
- Formats and prints the message within a styled panel using the `rich` library.
|
1339
|
-
"""
|
1340
|
-
if not self.print_result:
|
1341
|
-
return
|
1342
|
-
|
1343
|
-
status_icon = "✅" if (summary['failed'] + summary['errors']) == 0 else "❌"
|
1344
|
-
msg = f"Test suite completed in {summary['total_time']:.2f} seconds"
|
1345
|
-
self.rich_console.print(
|
1346
|
-
Panel(
|
1347
|
-
msg,
|
1348
|
-
border_style="blue",
|
1349
|
-
title=f"{status_icon} Test Suite Finished",
|
1350
|
-
title_align='left',
|
1351
|
-
width=self.width_output_component,
|
1352
|
-
padding=(0, 1)
|
1353
|
-
)
|
1354
|
-
)
|
1355
|
-
self.rich_console.print()
|
1356
|
-
|
1357
|
-
def getTestNames(self) -> List[str]:
|
1258
|
+
def getTestNames(
|
1259
|
+
self
|
1260
|
+
) -> List[str]:
|
1358
1261
|
"""
|
1359
1262
|
Get a list of test names (unique identifiers) from the test suite.
|
1360
1263
|
|
@@ -1363,9 +1266,11 @@ class UnitTest(IUnitTest):
|
|
1363
1266
|
List[str]
|
1364
1267
|
List of test names (unique identifiers) from the test suite.
|
1365
1268
|
"""
|
1366
|
-
return [test.id() for test in self.
|
1269
|
+
return [test.id() for test in self.__flattenTestSuite(self.suite)]
|
1367
1270
|
|
1368
|
-
def getTestCount(
|
1271
|
+
def getTestCount(
|
1272
|
+
self
|
1273
|
+
) -> int:
|
1369
1274
|
"""
|
1370
1275
|
Returns the total number of test cases in the test suite.
|
1371
1276
|
|
@@ -1374,9 +1279,11 @@ class UnitTest(IUnitTest):
|
|
1374
1279
|
int
|
1375
1280
|
The total number of individual test cases in the suite.
|
1376
1281
|
"""
|
1377
|
-
return len(list(self.
|
1282
|
+
return len(list(self.__flattenTestSuite(self.suite)))
|
1378
1283
|
|
1379
|
-
def clearTests(
|
1284
|
+
def clearTests(
|
1285
|
+
self
|
1286
|
+
) -> None:
|
1380
1287
|
"""
|
1381
1288
|
Clear all tests from the current test suite.
|
1382
1289
|
|
@@ -1384,7 +1291,9 @@ class UnitTest(IUnitTest):
|
|
1384
1291
|
"""
|
1385
1292
|
self.suite = unittest.TestSuite()
|
1386
1293
|
|
1387
|
-
def getResult(
|
1294
|
+
def getResult(
|
1295
|
+
self
|
1296
|
+
) -> dict:
|
1388
1297
|
"""
|
1389
1298
|
Returns the results of the executed test suite.
|
1390
1299
|
|
@@ -1395,7 +1304,9 @@ class UnitTest(IUnitTest):
|
|
1395
1304
|
"""
|
1396
1305
|
return self.__result
|
1397
1306
|
|
1398
|
-
def getOutputBuffer(
|
1307
|
+
def getOutputBuffer(
|
1308
|
+
self
|
1309
|
+
) -> int:
|
1399
1310
|
"""
|
1400
1311
|
Returns the output buffer used for capturing test results.
|
1401
1312
|
This method returns the internal output buffer that collects the results of the test execution.
|
@@ -1406,15 +1317,18 @@ class UnitTest(IUnitTest):
|
|
1406
1317
|
"""
|
1407
1318
|
return self.__output_buffer
|
1408
1319
|
|
1409
|
-
def printOutputBuffer(
|
1320
|
+
def printOutputBuffer(
|
1321
|
+
self
|
1322
|
+
) -> None:
|
1410
1323
|
"""
|
1411
1324
|
Prints the contents of the output buffer to the console.
|
1412
1325
|
This method retrieves the output buffer and prints its contents using the rich console.
|
1413
1326
|
"""
|
1414
|
-
|
1415
|
-
print(self.__output_buffer)
|
1327
|
+
self.printer.print(self.__output_buffer)
|
1416
1328
|
|
1417
|
-
def getErrorBuffer(
|
1329
|
+
def getErrorBuffer(
|
1330
|
+
self
|
1331
|
+
) -> int:
|
1418
1332
|
"""
|
1419
1333
|
Returns the error buffer used for capturing test errors.
|
1420
1334
|
This method returns the internal error buffer that collects any errors encountered during test execution.
|
@@ -1425,10 +1339,11 @@ class UnitTest(IUnitTest):
|
|
1425
1339
|
"""
|
1426
1340
|
return self.__error_buffer
|
1427
1341
|
|
1428
|
-
def printErrorBuffer(
|
1342
|
+
def printErrorBuffer(
|
1343
|
+
self
|
1344
|
+
) -> None:
|
1429
1345
|
"""
|
1430
1346
|
Prints the contents of the error buffer to the console.
|
1431
1347
|
This method retrieves the error buffer and prints its contents using the rich console.
|
1432
1348
|
"""
|
1433
|
-
|
1434
|
-
print(self.__error_buffer)
|
1349
|
+
self.printer.print(self.__error_buffer)
|