orionis 0.313.0__py3-none-any.whl → 0.315.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,13 +11,7 @@ from contextlib import redirect_stdout, redirect_stderr
11
11
  from datetime import datetime
12
12
  from pathlib import Path
13
13
  from typing import Any, Dict, List, Optional, Tuple
14
- from rich.console import Console as RichConsole
15
- from rich.live import Live
16
- from rich.panel import Panel
17
- from rich.syntax import Syntax
18
- from rich.table import Table
19
- from rich.text import Text
20
- from orionis.console.output.console import Console
14
+ from orionis.services.system.workers import Workers
21
15
  from orionis.test.entities.test_result import TestResult
22
16
  from orionis.test.enums.test_mode import ExecutionMode
23
17
  from orionis.test.enums.test_status import TestStatus
@@ -26,482 +20,575 @@ from orionis.test.exceptions.test_persistence_error import OrionisTestPersistenc
26
20
  from orionis.test.exceptions.test_value_error import OrionisTestValueError
27
21
  from orionis.test.logs.history import TestHistory
28
22
  from orionis.test.contracts.test_unit import IUnitTest
23
+ from orionis.test.output.printer import TestPrinter
29
24
  from orionis.test.view.render import TestingResultRender
30
25
 
31
26
  class UnitTest(IUnitTest):
32
27
  """
33
- UnitTest is a comprehensive testing utility class for discovering, configuring, and executing unit tests.
34
-
35
- This class supports both sequential and parallel test execution, customizable verbosity, fail-fast behavior,
36
- and rich output formatting using the `rich` library.
37
-
38
- Attributes
39
- ----------
40
- loader : unittest.TestLoader
41
- The test loader used to discover and load tests.
42
- suite : unittest.TestSuite
43
- The test suite containing the discovered tests.
44
- test_results : list of TestResult
45
- A list to store the results of executed tests.
46
- start_time : float
47
- The start time of the test execution.
48
- print_result : bool
49
- Flag to determine whether to print test results.
50
- verbosity : int
51
- The verbosity level for test output.
52
- execution_mode : str
53
- The mode of test execution (e.g., 'SEQUENTIAL' or 'PARALLEL').
54
- max_workers : int
55
- The maximum number of workers for parallel execution.
56
- fail_fast : bool
57
- Flag to stop execution on the first failure.
58
- rich_console : RichConsole
59
- Console for rich text output.
60
- orionis_console : Console
61
- Console for standard output.
62
- discovered_tests : list
63
- A list to store discovered test cases.
64
- width_output_component : int
65
- The width of the table for displaying results.
66
- throw_exception : bool
67
- Flag to determine whether to throw exceptions on test failures.
68
- persistent : bool
69
- Flag to determine whether to persist test results in a database.
70
- base_path : str
71
- The base directory for test discovery and persistence.
28
+ Orionis UnitTest
29
+
30
+ The main class of the Orionis framework for advanced unit test management.
31
+
32
+ This class provides a comprehensive solution for discovering, executing, and reporting unit tests in a flexible and configurable way, surpassing the usual limitations of traditional frameworks.
33
+ It supports sequential or parallel execution, filtering by name or tags, and detailed result capture, including timings, errors, and tracebacks.
34
+
35
+ It includes persistence options in multiple formats (SQLite or JSON) and generates rich reports both in the console and on the web.
36
+ Its intuitive interface and high degree of customization make it easy to integrate into CI/CD pipelines and adapt to the specific needs of any project.
37
+
38
+ This is an especially suitable choice for those seeking greater robustness, traceability, and visibility in their automated testing processes, offering advantages often missing from other alternatives.
72
39
  """
73
40
 
74
41
  def __init__(self) -> None:
75
42
  """
76
- Initialize the UnitTest instance with default configurations.
43
+ Initializes the test suite configuration and supporting components.
77
44
 
78
45
  Parameters
79
46
  ----------
80
- self : UnitTest
81
- The instance of the UnitTest class.
47
+ None
82
48
 
83
49
  Attributes
84
50
  ----------
85
- loader : unittest.TestLoader
86
- The test loader used to discover tests.
87
- suite : unittest.TestSuite
88
- The test suite to hold the discovered tests.
89
- test_results : list of TestResult
90
- A list to store the results of executed tests.
91
- start_time : float
92
- The start time of the test execution.
93
- print_result : bool
94
- Flag to determine whether to print test results.
95
51
  verbosity : int
96
- The verbosity level for test output.
52
+ Level of verbosity for test output.
97
53
  execution_mode : str
98
- The mode of test execution (e.g., 'SEQUENTIAL' or 'PARALLEL').
54
+ Mode in which tests are executed.
99
55
  max_workers : int
100
- The maximum number of workers for parallel execution.
56
+ Maximum number of worker threads/processes.
101
57
  fail_fast : bool
102
- Flag to stop execution on the first failure.
103
- rich_console : RichConsole
104
- Console for rich text output.
105
- orionis_console : Console
106
- Console for standard output.
107
- discovered_tests : list
108
- A list to store discovered test cases.
109
- width_output_component : int
110
- The width of the table for displaying results.
58
+ Whether to stop on the first test failure.
59
+ print_result : bool
60
+ Whether to print test results to the console.
111
61
  throw_exception : bool
112
- Flag to determine whether to throw exceptions on test failures.
62
+ Whether to raise exceptions on test failures.
113
63
  persistent : bool
114
- Flag to determine whether to persist test results in a database.
64
+ Whether to use persistent storage for test results.
65
+ persistent_driver : str
66
+ Driver used for persistent storage.
67
+ web_report : bool
68
+ Whether to generate a web-based report.
69
+ full_path : Optional[str]
70
+ Full path for test discovery.
71
+ folder_path : str
72
+ Folder path for test discovery.
115
73
  base_path : str
116
- The base directory for test discovery and persistence.
74
+ Base path for test discovery.
75
+ pattern : str
76
+ Pattern to match test files.
77
+ test_name_pattern : Optional[str]
78
+ Pattern to match test names.
79
+ tags : Optional[List[str]]
80
+ Tags to filter tests.
81
+ module_name : str
82
+ Name of the module for test discovery.
83
+ loader : unittest.TestLoader
84
+ Loader for discovering tests.
85
+ suite : unittest.TestSuite
86
+ Test suite to hold discovered tests.
87
+ discovered_tests : list
88
+ List of discovered tests.
89
+ printer : TestPrinter
90
+ Utility for printing test results to the console.
91
+ __output_buffer
92
+ Buffer for capturing standard output during tests.
93
+ __error_buffer
94
+ Buffer for capturing error output during tests.
95
+ __result
96
+ Result of the test execution.
117
97
  """
98
+
99
+ # Values for configuration
100
+ self.verbosity: int
101
+ self.execution_mode: str
102
+ self.max_workers: int
103
+ self.fail_fast: bool
104
+ self.print_result: bool
105
+ self.throw_exception: bool
106
+ self.persistent: bool
107
+ self.persistent_driver: str
108
+ self.web_report: bool
109
+
110
+ # Values for discovering tests in folders
111
+ self.full_path: Optional[str]
112
+ self.folder_path: str
113
+ self.base_path: str
114
+ self.pattern: str
115
+ self.test_name_pattern: Optional[str]
116
+ self.tags: Optional[List[str]]
117
+
118
+ # Values for discovering tests in modules
119
+ self.module_name: str
120
+ self.test_name_pattern: Optional[str]
121
+
122
+ # Initialize the test loader and suite
118
123
  self.loader = unittest.TestLoader()
119
124
  self.suite = unittest.TestSuite()
120
- self.test_results: List[TestResult] = []
121
- self.start_time: float = 0.0
122
- self.print_result: bool = True
123
- self.verbosity: int = 2
124
- self.execution_mode: str = ExecutionMode.SEQUENTIAL.value
125
- self.max_workers: int = 4
126
- self.fail_fast: bool = False
127
- self.rich_console = RichConsole()
128
- self.orionis_console = Console()
129
125
  self.discovered_tests: List = []
130
- self.width_output_component: int = int(self.rich_console.width * 0.75)
131
- self.throw_exception: bool = False
132
- self.persistent: bool = False
133
- self.persistent_driver: str = 'sqlite'
134
- self.web_report: bool = False
135
- self.base_path: str = "tests"
136
- self.withliveconsole: bool = True
126
+
127
+ # Initialize the class for printing in the console
128
+ self.printer = TestPrinter()
129
+
130
+ # Variables for capturing output and error streams
137
131
  self.__output_buffer = None
138
132
  self.__error_buffer = None
133
+
134
+ # Result of the test execution
139
135
  self.__result = None
140
136
 
141
137
  def configure(
142
138
  self,
143
- verbosity: int = None,
144
- execution_mode: str | ExecutionMode = None,
145
- max_workers: int = None,
146
- fail_fast: bool = None,
147
- print_result: bool = None,
139
+ *,
140
+ verbosity: int = 2,
141
+ execution_mode: str | ExecutionMode = ExecutionMode.SEQUENTIAL,
142
+ max_workers: int = Workers().calculate(),
143
+ fail_fast: bool = False,
144
+ print_result: bool = True,
148
145
  throw_exception: bool = False,
149
146
  persistent: bool = False,
150
147
  persistent_driver: str = 'sqlite',
151
148
  web_report: bool = False
152
149
  ) -> 'UnitTest':
153
150
  """
154
- Configures the UnitTest instance with the specified parameters.
151
+ Configure the UnitTest instance with various execution and reporting options.
155
152
 
156
153
  Parameters
157
154
  ----------
158
155
  verbosity : int, optional
159
- The verbosity level for test output. If None, the current setting is retained.
156
+ Level of output verbosity.
160
157
  execution_mode : str or ExecutionMode, optional
161
- The mode in which the tests will be executed ('SEQUENTIAL' or 'PARALLEL'). If None, the current setting is retained.
158
+ Test execution mode.
162
159
  max_workers : int, optional
163
- The maximum number of workers to use for parallel execution. If None, the current setting is retained.
160
+ Maximum number of worker threads/processes for parallel execution. Must be a positive integer.
164
161
  fail_fast : bool, optional
165
- Whether to stop execution upon the first failure. If None, the current setting is retained.
162
+ If True, stop execution on first failure.
166
163
  print_result : bool, optional
167
- Whether to print the test results after execution. If None, the current setting is retained.
168
- throw_exception : bool, optional
169
- Whether to throw an exception if any test fails. Defaults to False.
170
- persistent : bool, optional
171
- Whether to persist the test results in a database. Defaults to False.
172
- persistent_driver : str, optional
173
- The driver to use for persistent storage. Defaults to 'sqlite'.
164
+ If True, print test results to the console.
165
+ throw_exception : bool, default: False
166
+ If True, raise exceptions on test failures.
167
+ persistent : bool, default: False
168
+ If True, enable persistent storage of test results.
169
+ persistent_driver : str, default: 'sqlite'
170
+ Backend for persistent storage. Must be 'sqlite' or 'json'.
171
+ web_report : bool, default: False
172
+ If True, enable web-based reporting.
174
173
 
175
174
  Returns
176
175
  -------
177
176
  UnitTest
178
177
  The configured UnitTest instance.
178
+
179
+ Raises
180
+ ------
181
+ OrionisTestValueError
182
+ If any parameter value is invalid.
179
183
  """
184
+
185
+ # Validate and set verbosity
180
186
  if verbosity is not None:
181
- self.verbosity = verbosity
187
+ if isinstance(verbosity, int) and verbosity in [0, 1, 2]:
188
+ self.verbosity = verbosity
189
+ else:
190
+ raise OrionisTestValueError("Verbosity must be an integer: 0 (quiet), 1 (default), or 2 (verbose).")
182
191
 
192
+ # Validate and set execution mode
183
193
  if execution_mode is not None and isinstance(execution_mode, ExecutionMode):
184
194
  self.execution_mode = execution_mode.value
185
195
  else:
186
- self.execution_mode = execution_mode
196
+ if isinstance(execution_mode, str) and execution_mode in [ExecutionMode.SEQUENTIAL.value, ExecutionMode.PARALLEL.value]:
197
+ self.execution_mode = execution_mode
198
+ else:
199
+ raise OrionisTestValueError("Execution mode must be 'SEQUENTIAL' or 'PARALLEL'.")
187
200
 
201
+ # Validate and set max_workers
188
202
  if max_workers is not None:
189
- self.max_workers = max_workers
203
+ if isinstance(max_workers, int) and max_workers > 0:
204
+ self.max_workers = max_workers
205
+ else:
206
+ raise OrionisTestValueError("Max workers must be a positive integer.")
190
207
 
208
+ # Validate and set other parameters
191
209
  if fail_fast is not None:
192
- self.fail_fast = fail_fast
210
+ if isinstance(fail_fast, bool):
211
+ self.fail_fast = fail_fast
212
+ else:
213
+ raise OrionisTestValueError("Fail fast must be a boolean value.")
193
214
 
215
+ # Validate and set print_result
194
216
  if print_result is not None:
195
- self.print_result = print_result
217
+ if isinstance(print_result, bool):
218
+ self.print_result = print_result
219
+ else:
220
+ raise OrionisTestValueError("Print result must be a boolean value.")
196
221
 
222
+ # Validate and set throw_exception
197
223
  if throw_exception is not None:
198
- self.throw_exception = throw_exception
224
+ if isinstance(throw_exception, bool):
225
+ self.throw_exception = throw_exception
226
+ else:
227
+ raise OrionisTestValueError("Throw exception must be a boolean value.")
199
228
 
229
+ # Validate and set persistent and persistent_driver
200
230
  if persistent is not None:
201
- self.persistent = persistent
231
+ if isinstance(persistent, bool):
232
+ self.persistent = persistent
233
+ else:
234
+ raise OrionisTestValueError("Persistent must be a boolean value.")
202
235
 
236
+ # Validate and set persistent_driver
203
237
  if persistent_driver is not None:
204
- self.persistent_driver = persistent_driver
238
+ if isinstance(persistent_driver, str) and persistent_driver in ['sqlite', 'json']:
239
+ self.persistent_driver = persistent_driver
240
+ else:
241
+ raise OrionisTestValueError("Persistent driver must be 'sqlite' or 'json'.")
205
242
 
243
+ # Validate and set web_report
206
244
  if web_report is not None:
207
- self.web_report = web_report
245
+ if isinstance(web_report, bool):
246
+ self.web_report = web_report
247
+ else:
248
+ raise OrionisTestValueError("Web report must be a boolean value.")
208
249
 
250
+ # Return the configured instance
209
251
  return self
210
252
 
211
253
  def discoverTestsInFolder(
212
254
  self,
213
- folder_path: str,
255
+ *,
214
256
  base_path: str = "tests",
257
+ folder_path: str,
215
258
  pattern: str = "test_*.py",
216
259
  test_name_pattern: Optional[str] = None,
217
260
  tags: Optional[List[str]] = None
218
261
  ) -> 'UnitTest':
219
262
  """
263
+ Discover and add unit tests from a specified folder to the test suite.
264
+
265
+ Searches for test files in the given folder path, optionally filtering by file name pattern,
266
+ test name pattern, and tags. Discovered tests are added to the suite, and information about
267
+ the discovery is recorded.
268
+
220
269
  Parameters
221
270
  ----------
222
- folder_path : str
223
- The relative path to the folder containing the tests.
224
271
  base_path : str, optional
225
- The base directory where the test folder is located. Defaults to "tests".
272
+ The base directory to search for tests. Defaults to "tests".
273
+ folder_path : str
274
+ The relative path to the folder containing test files.
226
275
  pattern : str, optional
227
- The filename pattern to match test files. Defaults to "test_*.py".
228
- test_name_pattern : str or None, optional
276
+ The file name pattern to match test files. Defaults to "test_*.py".
277
+ test_name_pattern : Optional[str], optional
229
278
  A pattern to filter test names. Defaults to None.
230
- tags : list of str or None, optional
279
+ tags : Optional[List[str]], optional
231
280
  A list of tags to filter tests. Defaults to None.
232
281
 
233
282
  Returns
234
283
  -------
235
284
  UnitTest
236
- The current instance of the UnitTest class with the discovered tests added.
285
+ The current instance with discovered tests added to the suite.
237
286
 
238
287
  Raises
239
288
  ------
240
289
  OrionisTestValueError
241
- If the test folder does not exist, no tests are found, or an error occurs during test discovery.
242
-
243
- Notes
244
- -----
245
- This method updates the internal test suite with the discovered tests and tracks the number of tests found.
290
+ If any argument is invalid, the folder does not exist, no tests are found,
291
+ or if there are import or discovery errors.
246
292
  """
293
+
294
+ # Validate folder_path
295
+ if folder_path is None or not isinstance(folder_path, str):
296
+ raise OrionisTestValueError(
297
+ f"Invalid folder_path: Expected a non-empty string, got '{folder_path}' ({type(folder_path).__name__})."
298
+ )
299
+ self.folder_path = folder_path
300
+
301
+ # Validate base_path and set value
302
+ if base_path is None or not isinstance(base_path, str):
303
+ raise OrionisTestValueError(
304
+ f"Invalid base_path: Expected a non-empty string, got '{base_path}' ({type(base_path).__name__})."
305
+ )
306
+ self.base_path = base_path
307
+
308
+ # Validate pattern
309
+ if pattern is None or not isinstance(pattern, str):
310
+ raise OrionisTestValueError(
311
+ f"Invalid pattern: Expected a non-empty string, got '{pattern}' ({type(pattern).__name__})."
312
+ )
313
+ self.pattern = pattern
314
+
315
+ # Validate test_name_pattern
316
+ if test_name_pattern is not None:
317
+ if not isinstance(test_name_pattern, str):
318
+ raise OrionisTestValueError(
319
+ f"Invalid test_name_pattern: Expected a string, got '{test_name_pattern}' ({type(test_name_pattern).__name__})."
320
+ )
321
+ self.test_name_pattern = test_name_pattern
322
+
323
+ # Validate tags
324
+ if tags is not None:
325
+ if not isinstance(tags, list) or not all(isinstance(tag, str) for tag in tags):
326
+ raise OrionisTestValueError(
327
+ f"Invalid tags: Expected a list of strings, got '{tags}' ({type(tags).__name__})."
328
+ )
329
+ self.tags = tags
330
+
331
+ # Try to discover tests in the specified folder
247
332
  try:
248
- self.base_path = base_path
249
333
 
250
- full_path = Path(base_path) / folder_path
334
+ # Ensure the folder path is absolute
335
+ full_path = Path(self.base_path) / self.folder_path
251
336
  if not full_path.exists():
252
- raise OrionisTestValueError(f"Test folder not found: {full_path}")
337
+ raise OrionisTestValueError(
338
+ f"Test folder not found at the specified path: '{full_path}'. "
339
+ "Please verify that the path is correct and the folder exists."
340
+ )
341
+ self.full_path = str(full_path.resolve())
253
342
 
343
+ # Discover tests using the unittest TestLoader
254
344
  tests = self.loader.discover(
255
345
  start_dir=str(full_path),
256
346
  pattern=pattern,
257
347
  top_level_dir=None
258
348
  )
259
349
 
350
+ # If name pattern is provided, filter tests by name
260
351
  if test_name_pattern:
261
- tests = self._filterTestsByName(tests, test_name_pattern)
352
+ tests = self.__filterTestsByName(
353
+ suite=tests,
354
+ pattern=test_name_pattern
355
+ )
262
356
 
357
+ # If tags are provided, filter tests by tags
263
358
  if tags:
264
- tests = self._filterTestsByTags(tests, tags)
359
+ tests = self.__filterTestsByTags(
360
+ suite=tests,
361
+ tags=tags
362
+ )
265
363
 
364
+ # If no tests are found, raise an error
266
365
  if not list(tests):
267
- raise OrionisTestValueError(f"No tests found in '{full_path}' matching pattern '{pattern}'")
366
+ raise OrionisTestValueError(
367
+ f"No tests were found in the path '{full_path}' matching the file pattern '{pattern}'"
368
+ + (f" and the test name pattern '{test_name_pattern}'" if test_name_pattern else "")
369
+ + (f" and the tags {tags}" if tags else "") +
370
+ ".\nPlease ensure that test files exist and that the patterns and tags are correct."
371
+ )
268
372
 
373
+ # Add discovered tests to the suite
269
374
  self.suite.addTests(tests)
270
375
 
271
- test_count = len(list(self._flattenTestSuite(tests)))
376
+ # Count the number of tests discovered
377
+ # Using __flattenTestSuite to ensure we count all individual test cases
378
+ test_count = len(list(self.__flattenTestSuite(tests)))
379
+
380
+ # Append the discovered tests information
272
381
  self.discovered_tests.append({
273
382
  "folder": str(full_path),
274
383
  "test_count": test_count,
275
384
  })
276
385
 
386
+ # Rereturn the current instance
277
387
  return self
278
388
 
279
389
  except ImportError as e:
280
- raise OrionisTestValueError(f"Error importing tests from '{full_path}': {str(e)}")
390
+
391
+ # Raise a specific error if the import fails
392
+ raise OrionisTestValueError(
393
+ f"Error importing tests from path '{full_path}': {str(e)}.\n"
394
+ "Please verify that the directory and test modules are accessible and correct."
395
+ )
281
396
  except Exception as e:
282
- raise OrionisTestValueError(f"Unexpected error discovering tests: {str(e)}")
283
397
 
284
- def discoverTestsInModule(self, module_name: str, test_name_pattern: Optional[str] = None) -> 'UnitTest':
398
+ # Raise a general error for unexpected issues
399
+ raise OrionisTestValueError(
400
+ f"Unexpected error while discovering tests in '{full_path}': {str(e)}.\n"
401
+ "Ensure that the test files are valid and that there are no syntax errors or missing dependencies."
402
+ )
403
+
404
+ def discoverTestsInModule(
405
+ self,
406
+ *,
407
+ module_name: str,
408
+ test_name_pattern: Optional[str] = None
409
+ ) -> 'UnitTest':
285
410
  """
286
- Discovers and loads tests from a specified module, optionally filtering by a test name pattern, and adds them to the test suite.
411
+ Discover and add unit tests from a specified module to the test suite.
287
412
 
288
413
  Parameters
289
414
  ----------
290
415
  module_name : str
291
- Name of the module from which to discover tests.
292
- test_name_pattern : str, optional
293
- Pattern to filter test names. Only tests matching this pattern will be included. Defaults to None.
416
+ The name of the module from which to discover tests. Must be a non-empty string.
417
+ test_name_pattern : Optional[str], optional
418
+ A pattern to filter test names. If provided, only tests matching this pattern will be included.
294
419
 
295
420
  Returns
296
421
  -------
297
422
  UnitTest
298
- The current instance of the UnitTest class, allowing method chaining.
423
+ The current instance with the discovered tests added to the suite.
299
424
 
300
- Exceptions
301
- ----------
425
+ Raises
426
+ ------
302
427
  OrionisTestValueError
303
- If the specified module cannot be imported.
428
+ If the module_name is invalid, the test_name_pattern is invalid, the module cannot be imported,
429
+ or any unexpected error occurs during test discovery.
430
+
431
+ Notes
432
+ -----
433
+ - The method validates the input parameters before attempting to discover tests.
434
+ - If a test_name_pattern is provided, only tests matching the pattern are included.
435
+ - Information about the discovered tests is appended to the 'discovered_tests' attribute.
304
436
  """
437
+
438
+ # Validate module_name
439
+ if not module_name or not isinstance(module_name, str):
440
+ raise OrionisTestValueError(
441
+ f"Invalid module_name: Expected a non-empty string, got '{module_name}' ({type(module_name).__name__})."
442
+ )
443
+ self.module_name = module_name
444
+
445
+ # Validate test_name_pattern
446
+ if test_name_pattern is not None and not isinstance(test_name_pattern, str):
447
+ raise OrionisTestValueError(
448
+ f"Invalid test_name_pattern: Expected a string, got '{test_name_pattern}' ({type(test_name_pattern).__name__})."
449
+ )
450
+ self.test_name_pattern = test_name_pattern
451
+
452
+ # Try to load tests from the specified module
305
453
  try:
306
454
 
307
- tests = self.loader.loadTestsFromName(module_name)
455
+ # Load the tests from the specified module
456
+ tests = self.loader.loadTestsFromName(
457
+ name=module_name
458
+ )
308
459
 
460
+ # If test_name_pattern provided
309
461
  if test_name_pattern:
310
- tests = self._filterTestsByName(tests, test_name_pattern)
462
+ tests = self.__filterTestsByName(
463
+ suite=tests,
464
+ pattern=test_name_pattern
465
+ )
311
466
 
467
+ # Add the discovered tests to the suite
312
468
  self.suite.addTests(tests)
313
469
 
314
- test_count = len(list(self._flattenTestSuite(tests)))
470
+ # Count the number of tests discovered
471
+ test_count = len(list(self.__flattenTestSuite(tests)))
472
+
473
+ # Append the discovered tests information
315
474
  self.discovered_tests.append({
316
475
  "module": module_name,
317
476
  "test_count": test_count,
318
477
  })
319
478
 
479
+ # Return the current instance
320
480
  return self
321
- except ImportError as e:
322
- raise OrionisTestValueError(f"Error importing module '{module_name}': {str(e)}")
323
-
324
- def _startMessage(self) -> None:
325
- """
326
- Prints a formatted message indicating the start of the test suite execution.
327
-
328
- Parameters
329
- ----------
330
- self : UnitTest
331
- The instance of the UnitTest class.
332
481
 
333
- Notes
334
- -----
335
- This method displays details about the test suite, including the total number of tests,
336
- the execution mode (parallel or sequential), and the start time. The message is styled
337
- and displayed using the `rich` library.
482
+ except ImportError as e:
338
483
 
339
- Attributes Used
340
- --------------
341
- print_result : bool
342
- Determines whether the message should be printed.
343
- suite : unittest.TestSuite
344
- The test suite containing the tests to be executed.
345
- max_workers : int
346
- The number of workers used in parallel execution mode.
347
- execution_mode : str
348
- The mode of execution ('SEQUENTIAL' or 'PARALLEL').
349
- orionis_console : Console
350
- The console object for handling standard output.
351
- rich_console : RichConsole
352
- The rich console object for styled output.
353
- width_output_component : int
354
- The calculated width of the message panel for formatting.
355
- """
356
- if self.print_result:
357
- test_count = len(list(self._flattenTestSuite(self.suite)))
358
- mode_text = f"[stat]Parallel with {self.max_workers} workers[/stat]" if self.execution_mode == ExecutionMode.PARALLEL.value else "Sequential"
359
- textlines = [
360
- f"[bold]Total Tests:[/bold] [dim]{test_count}[/dim]",
361
- f"[bold]Mode:[/bold] [dim]{mode_text}[/dim]",
362
- f"[bold]Started at:[/bold] [dim]{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}[/dim]"
363
- ]
364
-
365
- self.orionis_console.newLine()
366
- self.rich_console.print(
367
- Panel(
368
- str('\n').join(textlines),
369
- border_style="blue",
370
- title="🧪 Orionis Framework - Component Test Suite",
371
- title_align="center",
372
- width=self.width_output_component,
373
- padding=(0, 1)
374
- )
484
+ # Raise a specific error if the import fails
485
+ raise OrionisTestValueError(
486
+ f"Error importing tests from module '{module_name}': {str(e)}.\n"
487
+ "Please verify that the module exists, is accessible, and contains valid test cases."
375
488
  )
376
- self.orionis_console.newLine()
377
-
378
- def run(self, print_result: bool = None, throw_exception: bool = None) -> Dict[str, Any]:
379
- """
380
- Executes the test suite and processes the results.
381
-
382
- Parameters
383
- ----------
384
- print_result : bool, optional
385
- If provided, overrides the instance's `print_result` attribute to determine whether to print results.
386
- throw_exception : bool, optional
387
- If True, raises an exception if any test failures or errors are detected.
388
-
389
- Returns
390
- -------
391
- dict
392
- A summary of the test execution, including details such as execution time, results, and timestamp.
393
-
394
- Raises
395
- ------
396
- OrionisTestFailureException
397
- If `throw_exception` is True and there are test failures or errors.
398
- """
489
+ except Exception as e:
399
490
 
400
- # Check if required print_result and throw_exception
401
- if print_result is not None:
402
- self.print_result = print_result
403
- if throw_exception is not None:
404
- self.throw_exception = throw_exception
491
+ # Raise a general error for unexpected issues
492
+ raise OrionisTestValueError(
493
+ f"Unexpected error while discovering tests in module '{module_name}': {str(e)}.\n"
494
+ "Ensure that the module name is correct, the test methods are valid, and there are no syntax errors or missing dependencies."
495
+ )
405
496
 
406
- # Dynamically determine if live console should be enabled based on test code usage
407
- self._withLiveConsole()
497
+ def run(
498
+ self
499
+ ) -> Dict[str, Any]:
408
500
 
409
501
  # Start the timer and print the start message
410
- self.start_time = time.time()
411
- self._startMessage()
412
-
413
- # Prepare the running message based on whether live console is enabled
414
- if self.print_result:
415
- message = "[bold yellow]⏳ Running tests...[/bold yellow]\n"
416
- message += "[dim]This may take a few seconds. Please wait...[/dim]" if self.withliveconsole else "[dim]Please wait, results will appear below...[/dim]"
417
-
418
- # Panel for running message
419
- running_panel = Panel(
420
- message,
421
- border_style="yellow",
422
- title="In Progress",
423
- title_align="left",
424
- width=self.width_output_component,
425
- padding=(1, 2)
426
- )
502
+ start_time = time.time()
503
+
504
+ # Print the start message
505
+ self.printer.startMessage(
506
+ print_result=self.print_result,
507
+ length_tests=len(list(self.__flattenTestSuite(self.suite))),
508
+ execution_mode=self.execution_mode,
509
+ max_workers=self.max_workers
510
+ )
427
511
 
428
- # Elegant "running" message using Rich Panel
429
- if self.withliveconsole:
430
- with Live(running_panel, console=self.rich_console, refresh_per_second=4, transient=True):
431
- result, output_buffer, error_buffer = self._runSuite()
432
- else:
433
- self.rich_console.print(running_panel)
434
- result, output_buffer, error_buffer = self._runSuite()
435
- else:
436
- # If not printing results, run the suite without live console
437
- result, output_buffer, error_buffer = self._runSuite()
512
+ # Execute the test suite and capture the results
513
+ result, output_buffer, error_buffer = self.printer.executePanel(
514
+ print_result=self.print_result,
515
+ flatten_test_suite= self.__flattenTestSuite(self.suite),
516
+ callable=self.__runSuite
517
+ )
438
518
 
439
519
  # Save Outputs
440
520
  self.__output_buffer = output_buffer.getvalue()
441
521
  self.__error_buffer = error_buffer.getvalue()
442
522
 
443
523
  # Process results
444
- execution_time = time.time() - self.start_time
445
- summary = self._generateSummary(result, execution_time)
524
+ execution_time = time.time() - start_time
525
+ summary = self.__generateSummary(result, execution_time)
446
526
 
447
527
  # Print captured output
448
- if self.print_result:
449
- self._displayResults(summary)
528
+ self.printer.displayResults(
529
+ print_result=self.print_result,
530
+ summary=summary
531
+ )
450
532
 
451
533
  # Print Execution Time
452
534
  if not result.wasSuccessful() and self.throw_exception:
453
535
  raise OrionisTestFailureException(result)
454
536
 
537
+ # Print the final summary message
538
+ self.printer.finishMessage(
539
+ print_result=self.print_result,
540
+ summary=summary
541
+ )
542
+
455
543
  # Return the summary of the test results
456
- self.__result = summary
457
544
  return summary
458
545
 
459
- def _withLiveConsole(self) -> None:
546
+ def __flattenTestSuite(
547
+ self,
548
+ suite: unittest.TestSuite
549
+ ) -> List[unittest.TestCase]:
460
550
  """
461
- Determines if the live console should be used based on the presence of debug or dump calls in the test code.
551
+ Recursively flattens a nested unittest.TestSuite into a list of unique unittest.TestCase instances.
552
+
553
+ Parameters
554
+ ----------
555
+ suite : unittest.TestSuite
556
+ The test suite to flatten, which may contain nested suites or test cases.
462
557
 
463
558
  Returns
464
559
  -------
465
- bool
466
- True if the live console should be used, False otherwise.
467
- """
468
- if self.withliveconsole:
469
-
470
- try:
471
-
472
- # Flatten the test suite to get all test cases
473
- for test_case in self._flattenTestSuite(self.suite):
474
-
475
- # Get the source code of the test case class
476
- source = inspect.getsource(test_case.__class__)
477
-
478
- # Only match if the keyword is not inside a comment
479
- for keyword in ('self.dd', 'self.dump'):
480
-
481
- # Find all lines containing the keyword
482
- for line in source.splitlines():
483
- if keyword in line:
484
-
485
- # Remove leading/trailing whitespace
486
- stripped = line.strip()
487
-
488
- # Ignore lines that start with '#' (comments)
489
- if not stripped.startswith('#') and not re.match(r'^\s*#', line):
490
- self.withliveconsole = False
491
- break
560
+ List[unittest.TestCase]
561
+ A list containing all unique TestCase instances extracted from the suite.
492
562
 
493
- # If we found a keyword, no need to check further
494
- if not self.withliveconsole:
495
- break
563
+ Notes
564
+ -----
565
+ This method traverses the given TestSuite recursively, collecting all TestCase instances
566
+ and ensuring that each test appears only once in the resulting list.
567
+ """
568
+ tests = []
569
+ seen_ids = set()
496
570
 
497
- # If we found a keyword in any test case, no need to check further
498
- if not self.withliveconsole:
499
- break
571
+ def _flatten(item):
572
+ if isinstance(item, unittest.TestSuite):
573
+ for sub_item in item:
574
+ _flatten(sub_item)
575
+ elif hasattr(item, "id"):
576
+ test_id = item.id()
577
+ parts = test_id.split('.')
578
+ if len(parts) >= 2:
579
+ short_id = '.'.join(parts[-2:])
580
+ else:
581
+ short_id = test_id
582
+ if short_id not in seen_ids:
583
+ seen_ids.add(short_id)
584
+ tests.append(item)
500
585
 
501
- except Exception:
502
- pass
586
+ _flatten(suite)
587
+ return tests
503
588
 
504
- def _runSuite(self):
589
+ def __runSuite(
590
+ self
591
+ ):
505
592
  """
506
593
  Run the test suite according to the selected execution mode (parallel or sequential),
507
594
  capturing standard output and error streams during execution.
@@ -523,14 +610,29 @@ class UnitTest(IUnitTest):
523
610
 
524
611
  # Execute tests based on selected mode
525
612
  if self.execution_mode == ExecutionMode.PARALLEL.value:
526
- result = self._runTestsInParallel(output_buffer, error_buffer)
613
+
614
+ # Run tests in parallel
615
+ result = self.__runTestsInParallel(
616
+ output_buffer,
617
+ error_buffer
618
+ )
619
+
527
620
  else:
528
- result = self._runTestsSequentially(output_buffer, error_buffer)
621
+
622
+ # Run tests sequentially
623
+ result = self.__runTestsSequentially(
624
+ output_buffer,
625
+ error_buffer
626
+ )
529
627
 
530
628
  # Return the result along with captured output and error streams
531
629
  return result, output_buffer, error_buffer
532
630
 
533
- def _runTestsSequentially(self, output_buffer: io.StringIO, error_buffer: io.StringIO) -> unittest.TestResult:
631
+ def __runTestsSequentially(
632
+ self,
633
+ output_buffer: io.StringIO,
634
+ error_buffer: io.StringIO
635
+ ) -> unittest.TestResult:
534
636
  """
535
637
  Executes the test suite sequentially, capturing the output and error streams.
536
638
 
@@ -547,18 +649,28 @@ class UnitTest(IUnitTest):
547
649
  The result of the test suite execution, containing information about
548
650
  passed, failed, and skipped tests.
549
651
  """
652
+
653
+ # Flatten the suite to avoid duplicate tests
654
+ flattened_suite = unittest.TestSuite(self.__flattenTestSuite(self.suite))
655
+
656
+ # Create a custom result class to capture detailed test results
550
657
  with redirect_stdout(output_buffer), redirect_stderr(error_buffer):
551
658
  runner = unittest.TextTestRunner(
552
659
  stream=output_buffer,
553
660
  verbosity=self.verbosity,
554
661
  failfast=self.fail_fast,
555
- resultclass=self._createCustomResultClass()
662
+ resultclass=self.__customResultClass()
556
663
  )
557
- result = runner.run(self.suite)
664
+ result = runner.run(flattened_suite)
558
665
 
666
+ # Return the result object containing test outcomes
559
667
  return result
560
668
 
561
- def _runTestsInParallel(self, output_buffer: io.StringIO, error_buffer: io.StringIO) -> unittest.TestResult:
669
+ def __runTestsInParallel(
670
+ self,
671
+ output_buffer: io.StringIO,
672
+ error_buffer: io.StringIO
673
+ ) -> unittest.TestResult:
562
674
  """
563
675
  Runs all test cases in the provided test suite concurrently using a thread pool,
564
676
  aggregating the results into a single result object. Standard output and error
@@ -583,10 +695,10 @@ class UnitTest(IUnitTest):
583
695
  """
584
696
 
585
697
  # Flatten the test suite to get individual test cases
586
- test_cases = list(self._flattenTestSuite(self.suite))
698
+ test_cases = list(self.__flattenTestSuite(self.suite))
587
699
 
588
700
  # Create a custom result instance to collect all results
589
- result_class = self._createCustomResultClass()
701
+ result_class = self.__customResultClass()
590
702
  combined_result = result_class(io.StringIO(), descriptions=True, verbosity=self.verbosity)
591
703
 
592
704
  # Helper function to run a single test and return its result.
@@ -600,22 +712,34 @@ class UnitTest(IUnitTest):
600
712
  )
601
713
  return runner.run(unittest.TestSuite([test]))
602
714
 
715
+ # Use ThreadPoolExecutor to run tests concurrently
603
716
  with redirect_stdout(output_buffer), redirect_stderr(error_buffer):
717
+
718
+ # Create a ThreadPoolExecutor to run tests in parallel
604
719
  with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
720
+
721
+ # Submit all test cases to the executor
605
722
  futures = [executor.submit(run_single_test, test) for test in test_cases]
606
723
 
724
+ # Process the results as they complete
607
725
  for future in as_completed(futures):
608
726
  test_result = future.result()
609
- self._mergeTestResults(combined_result, test_result)
727
+ self.__mergeTestResults(combined_result, test_result)
610
728
 
729
+ # If fail_fast is enabled and a test failed, cancel remaining futures
611
730
  if self.fail_fast and not combined_result.wasSuccessful():
612
731
  for f in futures:
613
732
  f.cancel()
614
733
  break
615
734
 
735
+ # Return the combined result object
616
736
  return combined_result
617
737
 
618
- def _mergeTestResults(self, combined_result: unittest.TestResult, individual_result: unittest.TestResult) -> None:
738
+ def __mergeTestResults(
739
+ self,
740
+ combined_result: unittest.TestResult,
741
+ individual_result: unittest.TestResult
742
+ ) -> None:
619
743
  """
620
744
  Merge the results of two unittest.TestResult objects.
621
745
 
@@ -635,6 +759,8 @@ class UnitTest(IUnitTest):
635
759
  -------
636
760
  None
637
761
  """
762
+
763
+ # Update the combined result with counts and lists from the individual result
638
764
  combined_result.testsRun += individual_result.testsRun
639
765
  combined_result.failures.extend(individual_result.failures)
640
766
  combined_result.errors.extend(individual_result.errors)
@@ -648,24 +774,26 @@ class UnitTest(IUnitTest):
648
774
  combined_result.test_results = []
649
775
  combined_result.test_results.extend(individual_result.test_results)
650
776
 
651
- def _createCustomResultClass(self) -> type:
777
+ def __customResultClass(
778
+ self
779
+ ) -> type:
652
780
  """
653
781
  Creates a custom test result class for enhanced test tracking.
654
- This method dynamically generates an `EnhancedTestResult` class that extends
782
+ This method dynamically generates an `OrionisTestResult` class that extends
655
783
  `unittest.TextTestResult`. The custom class provides advanced functionality for
656
784
  tracking test execution details, including timings, statuses, and error information.
657
785
 
658
786
  Returns
659
787
  -------
660
788
  type
661
- A dynamically created class `EnhancedTestResult` that overrides methods to handle
789
+ A dynamically created class `OrionisTestResult` that overrides methods to handle
662
790
  test results, including success, failure, error, and skipped tests. The class
663
791
  collects detailed information about each test, such as execution time, error
664
792
  messages, traceback, and file path.
665
793
 
666
794
  Notes
667
795
  -----
668
- The `EnhancedTestResult` class includes the following method overrides:
796
+ The `OrionisTestResult` class includes the following method overrides:
669
797
  The method uses the `this` reference to access the outer class's methods, such as
670
798
  `_extractErrorInfo`, for extracting and formatting error information.
671
799
  """
@@ -674,7 +802,7 @@ class UnitTest(IUnitTest):
674
802
  this = self
675
803
 
676
804
  # Define the custom test result class
677
- class EnhancedTestResult(unittest.TextTestResult):
805
+ class OrionisTestResult(unittest.TextTestResult):
678
806
  def __init__(self, *args, **kwargs):
679
807
  super().__init__(*args, **kwargs)
680
808
  self.test_results = []
@@ -767,10 +895,65 @@ class UnitTest(IUnitTest):
767
895
  )
768
896
  )
769
897
 
770
- # Return the dynamically created EnhancedTestResult class
771
- return EnhancedTestResult
898
+ # Return the dynamically created OrionisTestResult class
899
+ return OrionisTestResult
772
900
 
773
- def _generateSummary(self, result: unittest.TestResult, execution_time: float) -> Dict[str, Any]:
901
+ def _extractErrorInfo(
902
+ self,
903
+ traceback_str: str
904
+ ) -> Tuple[Optional[str], Optional[str]]:
905
+ """
906
+ Extract error information from a traceback string.
907
+ This method processes a traceback string to extract the file path of the Python file where the error occurred and
908
+ cleans up the traceback by removing framework internals and irrelevant noise.
909
+
910
+ Parameters
911
+ ----------
912
+ traceback_str : str
913
+ The traceback string to process.
914
+
915
+ Returns
916
+ -------
917
+ Tuple[Optional[str], Optional[str]]
918
+ A tuple containing:
919
+
920
+ Notes
921
+ -----
922
+ Framework internals and lines containing 'unittest/', 'lib/python', or 'site-packages' are removed from the traceback.
923
+ The cleaned traceback starts from the first occurrence of the test file path.
924
+ """
925
+ # Extract file path
926
+ file_matches = re.findall(r'File ["\'](.*?.py)["\']', traceback_str)
927
+ file_path = file_matches[-1] if file_matches else None
928
+
929
+ # Clean up traceback by removing framework internals and noise
930
+ tb_lines = traceback_str.split('\n')
931
+ clean_lines = []
932
+ relevant_lines_started = False
933
+
934
+ # Iterate through each line in the traceback
935
+ for line in tb_lines:
936
+
937
+ # Skip framework internal lines
938
+ if any(s in line for s in ['unittest/', 'lib/python', 'site-packages']):
939
+ continue
940
+
941
+ # Start capturing when we hit the test file
942
+ if file_path and file_path in line and not relevant_lines_started:
943
+ relevant_lines_started = True
944
+
945
+ if relevant_lines_started:
946
+ clean_lines.append(line)
947
+
948
+ clean_tb = str('\n').join(clean_lines) if clean_lines else traceback_str
949
+
950
+ return file_path, clean_tb
951
+
952
+ def __generateSummary(
953
+ self,
954
+ result: unittest.TestResult,
955
+ execution_time: float
956
+ ) -> Dict[str, Any]:
774
957
  """
775
958
  Generate a summary of the test results, including statistics and details for each test.
776
959
 
@@ -840,7 +1023,7 @@ class UnitTest(IUnitTest):
840
1023
  success_rate = (passed / result.testsRun * 100) if result.testsRun > 0 else 100.0
841
1024
 
842
1025
  # Create a summary report
843
- report = {
1026
+ self.__result = {
844
1027
  "total_tests": result.testsRun,
845
1028
  "passed": passed,
846
1029
  "failed": len(result.failures),
@@ -854,33 +1037,19 @@ class UnitTest(IUnitTest):
854
1037
 
855
1038
  # Handle persistence of the report
856
1039
  if self.persistent:
857
- self._persistTestResults(report)
1040
+ self.__handlePersistResults(self.__result)
858
1041
 
859
1042
  # Handle Web Report Rendering
860
1043
  if self.web_report:
861
-
862
- # Generate the web report and get the path
863
- path = self._webReport(report)
864
-
865
- # Elegant invitation to view the results, with underlined path
866
- invite_text = Text("Test results saved. ", style="green")
867
- invite_text.append("View report: ", style="bold green")
868
- invite_text.append(str(path), style="underline blue")
869
- self.rich_console.print(invite_text)
1044
+ self.__handleWebReport(self.__result)
870
1045
 
871
1046
  # Return the summary
872
- return {
873
- "total_tests": result.testsRun,
874
- "passed": passed,
875
- "failed": len(result.failures),
876
- "errors": len(result.errors),
877
- "skipped": len(result.skipped),
878
- "total_time": float(execution_time),
879
- "success_rate": success_rate,
880
- "test_details": test_details
881
- }
1047
+ return self.__result
882
1048
 
883
- def _webReport(self, summary: Dict[str, Any]) -> None:
1049
+ def __handleWebReport(
1050
+ self,
1051
+ summary: Dict[str, Any]
1052
+ ) -> None:
884
1053
  """
885
1054
  Generates a web report for the test results summary.
886
1055
 
@@ -901,6 +1070,7 @@ class UnitTest(IUnitTest):
901
1070
  - If persistence is enabled and the driver is 'sqlite', the report is marked as persistent.
902
1071
  - Returns the path to the generated report for further use.
903
1072
  """
1073
+
904
1074
  # Determine the absolute path for storing results
905
1075
  project = os.path.basename(os.getcwd())
906
1076
  storage_path = os.path.abspath(os.path.join(os.getcwd(), self.base_path))
@@ -916,10 +1086,13 @@ class UnitTest(IUnitTest):
916
1086
  persist=self.persistent and self.persistent_driver == 'sqlite'
917
1087
  )
918
1088
 
919
- # Render the report and return the path
920
- return render.render()
1089
+ # Render the report and print the web report link
1090
+ self.printer.linkWebReport(render.render())
921
1091
 
922
- def _persistTestResults(self, summary: Dict[str, Any]) -> None:
1092
+ def __handlePersistResults(
1093
+ self,
1094
+ summary: Dict[str, Any]
1095
+ ) -> None:
923
1096
  """
924
1097
  Persist the test results summary using the configured persistent driver.
925
1098
 
@@ -943,6 +1116,7 @@ class UnitTest(IUnitTest):
943
1116
  """
944
1117
 
945
1118
  try:
1119
+
946
1120
  # Determine the absolute path for storing results
947
1121
  project = os.getcwd().split(os.sep)[-1]
948
1122
  storage_path = None
@@ -975,64 +1149,22 @@ class UnitTest(IUnitTest):
975
1149
  # Write the summary to the JSON file
976
1150
  with open(log_path, 'w', encoding='utf-8') as log:
977
1151
  json.dump(summary, log, indent=4)
1152
+
978
1153
  except OSError as e:
979
- raise OSError(f"Error creating directories or writing files: {str(e)}")
980
- except Exception as e:
981
- raise OrionisTestPersistenceError(f"Error persisting test results: {str(e)}")
982
1154
 
983
- def _printSummaryTable(self, summary: Dict[str, Any]) -> None:
984
- """
985
- Prints a summary table of test results using the Rich library.
1155
+ # Raise an OSError if there is an issue with file or directory operations
1156
+ raise OSError(f"Error creating directories or writing files: {str(e)}")
986
1157
 
987
- Parameters
988
- ----------
989
- summary : dict
990
- Dictionary with the test summary data. Must contain the following keys:
991
- total_tests : int
992
- Total number of tests executed.
993
- passed : int
994
- Number of tests that passed.
995
- failed : int
996
- Number of tests that failed.
997
- errors : int
998
- Number of tests that had errors.
999
- skipped : int
1000
- Number of tests that were skipped.
1001
- total_time : float
1002
- Total duration of the test execution in seconds.
1003
- success_rate : float
1004
- Percentage of tests that passed.
1158
+ except Exception as e:
1005
1159
 
1006
- Returns
1007
- -------
1008
- None
1009
- """
1010
- table = Table(
1011
- show_header=True,
1012
- header_style="bold white",
1013
- width=self.width_output_component,
1014
- border_style="blue"
1015
- )
1016
- table.add_column("Total", justify="center")
1017
- table.add_column("Passed", justify="center")
1018
- table.add_column("Failed", justify="center")
1019
- table.add_column("Errors", justify="center")
1020
- table.add_column("Skipped", justify="center")
1021
- table.add_column("Duration", justify="center")
1022
- table.add_column("Success Rate", justify="center")
1023
- table.add_row(
1024
- str(summary["total_tests"]),
1025
- str(summary["passed"]),
1026
- str(summary["failed"]),
1027
- str(summary["errors"]),
1028
- str(summary["skipped"]),
1029
- f"{summary['total_time']:.2f}s",
1030
- f"{summary['success_rate']:.2f}%"
1031
- )
1032
- self.rich_console.print(table)
1033
- self.orionis_console.newLine()
1160
+ # Raise a general exception for any other issues during persistence
1161
+ raise OrionisTestPersistenceError(f"Error persisting test results: {str(e)}")
1034
1162
 
1035
- def _filterTestsByName(self, suite: unittest.TestSuite, pattern: str) -> unittest.TestSuite:
1163
+ def __filterTestsByName(
1164
+ self,
1165
+ suite: unittest.TestSuite,
1166
+ pattern: str
1167
+ ) -> unittest.TestSuite:
1036
1168
  """
1037
1169
  Filters tests in a given test suite based on a specified name pattern.
1038
1170
  Parameters
@@ -1052,19 +1184,33 @@ class UnitTest(IUnitTest):
1052
1184
  Notes
1053
1185
  -----
1054
1186
  """
1187
+
1188
+ # Initialize an empty TestSuite to hold the filtered tests
1055
1189
  filtered_suite = unittest.TestSuite()
1190
+
1191
+ # Validate the pattern
1056
1192
  try:
1057
1193
  regex = re.compile(pattern)
1058
1194
  except re.error as e:
1059
- raise OrionisTestValueError(f"Invalid test name pattern: {str(e)}")
1195
+ raise OrionisTestValueError(
1196
+ f"The provided test name pattern is invalid: '{pattern}'. "
1197
+ f"Regular expression compilation error: {str(e)}. "
1198
+ "Please check the pattern syntax and try again."
1199
+ )
1060
1200
 
1061
- for test in self._flattenTestSuite(suite):
1201
+ # Iterate through all tests in the suite and filter by the regex pattern
1202
+ for test in self.__flattenTestSuite(suite):
1062
1203
  if regex.search(test.id()):
1063
1204
  filtered_suite.addTest(test)
1064
1205
 
1206
+ # Return the filtered suite containing only tests that match the pattern
1065
1207
  return filtered_suite
1066
1208
 
1067
- def _filterTestsByTags(self, suite: unittest.TestSuite, tags: List[str]) -> unittest.TestSuite:
1209
+ def __filterTestsByTags(
1210
+ self,
1211
+ suite: unittest.TestSuite,
1212
+ tags: List[str]
1213
+ ) -> unittest.TestSuite:
1068
1214
  """
1069
1215
  Filter tests in a unittest TestSuite by specified tags.
1070
1216
 
@@ -1089,7 +1235,7 @@ class UnitTest(IUnitTest):
1089
1235
  filtered_suite = unittest.TestSuite()
1090
1236
  tag_set = set(tags)
1091
1237
 
1092
- for test in self._flattenTestSuite(suite):
1238
+ for test in self.__flattenTestSuite(suite):
1093
1239
 
1094
1240
  # Get test method if this is a TestCase instance
1095
1241
  test_method = getattr(test, test._testMethodName, None)
@@ -1109,243 +1255,9 @@ class UnitTest(IUnitTest):
1109
1255
  # Return the filtered suite containing only tests with matching tags
1110
1256
  return filtered_suite
1111
1257
 
1112
- def _flattenTestSuite(self, suite: unittest.TestSuite) -> List[unittest.TestCase]:
1113
- """
1114
- Recursively flattens a nested unittest.TestSuite into a list of unique unittest.TestCase instances.
1115
-
1116
- Parameters
1117
- ----------
1118
- suite : unittest.TestSuite
1119
- The test suite to flatten, which may contain nested suites or test cases.
1120
-
1121
- Returns
1122
- -------
1123
- List[unittest.TestCase]
1124
- A list containing all unique TestCase instances extracted from the suite.
1125
-
1126
- Notes
1127
- -----
1128
- This method traverses the given TestSuite recursively, collecting all TestCase instances
1129
- and ensuring that each test appears only once in the resulting list.
1130
- """
1131
- tests = []
1132
- seen = set()
1133
-
1134
- def _flatten(item):
1135
- if isinstance(item, unittest.TestSuite):
1136
- for sub_item in item:
1137
- _flatten(sub_item)
1138
- elif item not in seen:
1139
- seen.add(item)
1140
- tests.append(item)
1141
-
1142
- _flatten(suite)
1143
- return tests
1144
-
1145
- def _sanitizeTraceback(self, test_path: str, traceback_test: str) -> str:
1146
- """
1147
- Sanitize a traceback string to extract and display the most relevant parts
1148
- related to a specific test file.
1149
-
1150
- Parameters
1151
- ----------
1152
- test_path : str
1153
- The file path of the test file being analyzed.
1154
- traceback_test : str
1155
- The full traceback string to be sanitized.
1156
-
1157
- Returns
1158
- -------
1159
- str
1160
- A sanitized traceback string containing only the relevant parts related to the test file.
1161
- If no relevant parts are found, the full traceback is returned.
1162
- If the traceback is empty, a default message "No traceback available for this test." is returned.
1163
- """
1164
- if not traceback_test:
1165
- return "No traceback available for this test."
1166
-
1167
- # Try to extract the test file name
1168
- file_match = re.search(r'([^/\\]+)\.py', test_path)
1169
- file_name = file_match.group(1) if file_match else None
1170
-
1171
- if not file_name:
1172
- return traceback_test
1173
-
1174
- # Process traceback to show most relevant parts
1175
- lines = traceback_test.splitlines()
1176
- relevant_lines = []
1177
- found_test_file = False if file_name in traceback_test else True
1178
-
1179
- for line in lines:
1180
- if file_name in line and not found_test_file:
1181
- found_test_file = True
1182
- if found_test_file:
1183
- if 'File' in line:
1184
- relevant_lines.append(line.strip())
1185
- elif line.strip() != '':
1186
- relevant_lines.append(line)
1187
-
1188
- # If we didn't find the test file, return the full traceback
1189
- if not relevant_lines:
1190
- return traceback_test
1191
-
1192
- # Remove any lines that are not relevant to the test file
1193
- return str('\n').join(relevant_lines)
1194
-
1195
- def _displayResults(self, summary: Dict[str, Any]) -> None:
1196
- """
1197
- Display the results of the test execution, including a summary table and detailed
1198
- information about failed or errored tests grouped by their test classes.
1199
-
1200
- Parameters
1201
- ----------
1202
- summary : dict
1203
- Dictionary containing the summary of the test execution, including test details,
1204
- statuses, and execution times.
1205
-
1206
- Notes
1207
- -----
1208
- - Prints a summary table of the test results.
1209
- - Groups failed and errored tests by their test class and displays them in a structured
1210
- format using panels.
1211
- - For each failed or errored test, displays the traceback in a syntax-highlighted panel
1212
- with additional metadata such as the test method name and execution time.
1213
- - Uses different icons and border colors to distinguish between failed and errored tests.
1214
- - Calls a finishing message method after displaying all results.
1215
- """
1216
-
1217
- # Print summary table
1218
- self._printSummaryTable(summary)
1219
-
1220
- # Group failures and errors by test class
1221
- failures_by_class = {}
1222
- for test in summary["test_details"]:
1223
- if test["status"] in (TestStatus.FAILED.name, TestStatus.ERRORED.name):
1224
- class_name = test["class"]
1225
- if class_name not in failures_by_class:
1226
- failures_by_class[class_name] = []
1227
- failures_by_class[class_name].append(test)
1228
-
1229
- # Display grouped failures
1230
- for class_name, tests in failures_by_class.items():
1231
-
1232
- class_panel = Panel.fit(f"[bold]{class_name}[/bold]", border_style="red", padding=(0, 2))
1233
- self.rich_console.print(class_panel)
1234
-
1235
- for test in tests:
1236
- traceback_str = self._sanitizeTraceback(test['file_path'], test['traceback'])
1237
- syntax = Syntax(
1238
- traceback_str,
1239
- lexer="python",
1240
- line_numbers=False,
1241
- background_color="default",
1242
- word_wrap=True,
1243
- theme="monokai"
1244
- )
1245
-
1246
- icon = "❌" if test["status"] == TestStatus.FAILED.name else "💥"
1247
- border_color = "yellow" if test["status"] == TestStatus.FAILED.name else "red"
1248
-
1249
- # Ensure execution time is never zero for display purposes
1250
- if not test['execution_time'] or test['execution_time'] == 0:
1251
- test['execution_time'] = 0.001
1252
-
1253
- panel = Panel(
1254
- syntax,
1255
- title=f"{icon} {test['method']}",
1256
- subtitle=f"Duration: {test['execution_time']:.3f}s",
1257
- border_style=border_color,
1258
- title_align="left",
1259
- padding=(1, 1),
1260
- subtitle_align="right",
1261
- width=self.width_output_component
1262
- )
1263
- self.rich_console.print(panel)
1264
- self.orionis_console.newLine()
1265
-
1266
- self._finishMessage(summary)
1267
-
1268
- def _extractErrorInfo(self, traceback_str: str) -> Tuple[Optional[str], Optional[str]]:
1269
- """
1270
- Extract error information from a traceback string.
1271
- This method processes a traceback string to extract the file path of the Python file where the error occurred and
1272
- cleans up the traceback by removing framework internals and irrelevant noise.
1273
-
1274
- Parameters
1275
- ----------
1276
- traceback_str : str
1277
- The traceback string to process.
1278
-
1279
- Returns
1280
- -------
1281
- Tuple[Optional[str], Optional[str]]
1282
- A tuple containing:
1283
-
1284
- Notes
1285
- -----
1286
- Framework internals and lines containing 'unittest/', 'lib/python', or 'site-packages' are removed from the traceback.
1287
- The cleaned traceback starts from the first occurrence of the test file path.
1288
- """
1289
- # Extract file path
1290
- file_matches = re.findall(r'File ["\'](.*?.py)["\']', traceback_str)
1291
- file_path = file_matches[-1] if file_matches else None
1292
-
1293
- # Clean up traceback by removing framework internals and noise
1294
- tb_lines = traceback_str.split('\n')
1295
- clean_lines = []
1296
- relevant_lines_started = False
1297
-
1298
- for line in tb_lines:
1299
- # Skip framework internal lines
1300
- if any(s in line for s in ['unittest/', 'lib/python', 'site-packages']):
1301
- continue
1302
-
1303
- # Start capturing when we hit the test file
1304
- if file_path and file_path in line and not relevant_lines_started:
1305
- relevant_lines_started = True
1306
-
1307
- if relevant_lines_started:
1308
- clean_lines.append(line)
1309
-
1310
- clean_tb = str('\n').join(clean_lines) if clean_lines else traceback_str
1311
-
1312
- return file_path, clean_tb
1313
-
1314
- def _finishMessage(self, summary: Dict[str, Any]) -> None:
1315
- """
1316
- Display a summary message for the test suite execution.
1317
-
1318
- Parameters
1319
- ----------
1320
- summary : dict
1321
- Dictionary containing the test suite summary, including keys such as
1322
- 'failed', 'errors', and 'total_time'.
1323
-
1324
- Notes
1325
- -----
1326
- - If `self.print_result` is False, the method returns without displaying anything.
1327
- - Shows a status icon (✅ for success, ❌ for failure) based on the presence of
1328
- failures or errors in the test suite.
1329
- - Formats and prints the message within a styled panel using the `rich` library.
1330
- """
1331
- if not self.print_result:
1332
- return
1333
-
1334
- status_icon = "✅" if (summary['failed'] + summary['errors']) == 0 else "❌"
1335
- msg = f"Test suite completed in {summary['total_time']:.2f} seconds"
1336
- self.rich_console.print(
1337
- Panel(
1338
- msg,
1339
- border_style="blue",
1340
- title=f"{status_icon} Test Suite Finished",
1341
- title_align='left',
1342
- width=self.width_output_component,
1343
- padding=(0, 1)
1344
- )
1345
- )
1346
- self.rich_console.print()
1347
-
1348
- def getTestNames(self) -> List[str]:
1258
+ def getTestNames(
1259
+ self
1260
+ ) -> List[str]:
1349
1261
  """
1350
1262
  Get a list of test names (unique identifiers) from the test suite.
1351
1263
 
@@ -1354,9 +1266,11 @@ class UnitTest(IUnitTest):
1354
1266
  List[str]
1355
1267
  List of test names (unique identifiers) from the test suite.
1356
1268
  """
1357
- return [test.id() for test in self._flattenTestSuite(self.suite)]
1269
+ return [test.id() for test in self.__flattenTestSuite(self.suite)]
1358
1270
 
1359
- def getTestCount(self) -> int:
1271
+ def getTestCount(
1272
+ self
1273
+ ) -> int:
1360
1274
  """
1361
1275
  Returns the total number of test cases in the test suite.
1362
1276
 
@@ -1365,9 +1279,11 @@ class UnitTest(IUnitTest):
1365
1279
  int
1366
1280
  The total number of individual test cases in the suite.
1367
1281
  """
1368
- return len(list(self._flattenTestSuite(self.suite)))
1282
+ return len(list(self.__flattenTestSuite(self.suite)))
1369
1283
 
1370
- def clearTests(self) -> None:
1284
+ def clearTests(
1285
+ self
1286
+ ) -> None:
1371
1287
  """
1372
1288
  Clear all tests from the current test suite.
1373
1289
 
@@ -1375,7 +1291,9 @@ class UnitTest(IUnitTest):
1375
1291
  """
1376
1292
  self.suite = unittest.TestSuite()
1377
1293
 
1378
- def getResult(self) -> dict:
1294
+ def getResult(
1295
+ self
1296
+ ) -> dict:
1379
1297
  """
1380
1298
  Returns the results of the executed test suite.
1381
1299
 
@@ -1386,7 +1304,9 @@ class UnitTest(IUnitTest):
1386
1304
  """
1387
1305
  return self.__result
1388
1306
 
1389
- def getOutputBuffer(self) -> int:
1307
+ def getOutputBuffer(
1308
+ self
1309
+ ) -> int:
1390
1310
  """
1391
1311
  Returns the output buffer used for capturing test results.
1392
1312
  This method returns the internal output buffer that collects the results of the test execution.
@@ -1397,17 +1317,18 @@ class UnitTest(IUnitTest):
1397
1317
  """
1398
1318
  return self.__output_buffer
1399
1319
 
1400
- def printOutputBuffer(self) -> None:
1320
+ def printOutputBuffer(
1321
+ self
1322
+ ) -> None:
1401
1323
  """
1402
1324
  Prints the contents of the output buffer to the console.
1403
1325
  This method retrieves the output buffer and prints its contents using the rich console.
1404
1326
  """
1405
- if self.__output_buffer:
1406
- print(self.__output_buffer)
1407
- else:
1408
- print("No output buffer available.")
1327
+ self.printer.print(self.__output_buffer)
1409
1328
 
1410
- def getErrorBuffer(self) -> int:
1329
+ def getErrorBuffer(
1330
+ self
1331
+ ) -> int:
1411
1332
  """
1412
1333
  Returns the error buffer used for capturing test errors.
1413
1334
  This method returns the internal error buffer that collects any errors encountered during test execution.
@@ -1418,12 +1339,11 @@ class UnitTest(IUnitTest):
1418
1339
  """
1419
1340
  return self.__error_buffer
1420
1341
 
1421
- def printErrorBuffer(self) -> None:
1342
+ def printErrorBuffer(
1343
+ self
1344
+ ) -> None:
1422
1345
  """
1423
1346
  Prints the contents of the error buffer to the console.
1424
1347
  This method retrieves the error buffer and prints its contents using the rich console.
1425
1348
  """
1426
- if self.__error_buffer:
1427
- print(self.__error_buffer)
1428
- else:
1429
- print("No error buffer available.")
1349
+ self.printer.print(self.__error_buffer)