orionis 0.245.0__py3-none-any.whl → 0.247.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. orionis/framework.py +1 -1
  2. orionis/luminate/config/app/__init__.py +10 -0
  3. orionis/luminate/config/app/entities/app.py +205 -0
  4. orionis/luminate/config/app/enums/ciphers.py +34 -0
  5. orionis/luminate/config/app/enums/environments.py +15 -0
  6. orionis/luminate/config/auth/__init__.py +7 -0
  7. orionis/luminate/config/auth/entities/auth.py +11 -0
  8. orionis/luminate/config/cache/__init__.py +9 -0
  9. orionis/luminate/config/cache/entities/cache.py +58 -0
  10. orionis/luminate/config/cache/entities/file.py +29 -0
  11. orionis/luminate/config/cache/entities/stores.py +35 -0
  12. orionis/luminate/config/cache/enums/drivers.py +12 -0
  13. orionis/luminate/config/contracts/config.py +27 -0
  14. orionis/luminate/config/entities/testing.py +215 -0
  15. orionis/luminate/config/exceptions/integrity_exception.py +30 -0
  16. orionis/luminate/console/dumper/dump_die.py +418 -0
  17. orionis/luminate/contracts/facades/commands/scheduler_facade.py +1 -1
  18. orionis/luminate/facades/files/path_facade.py +1 -1
  19. orionis/luminate/patterns/__init__.py +4 -0
  20. orionis/luminate/patterns/singleton/__init__.py +10 -0
  21. orionis/luminate/patterns/singleton/meta_class.py +56 -0
  22. orionis/luminate/providers/commands/reactor_commands_service_provider.py +3 -3
  23. orionis/luminate/providers/commands/scheduler_provider.py +1 -1
  24. orionis/luminate/providers/config/config_service_provider.py +1 -1
  25. orionis/luminate/providers/environment/environment__service_provider.py +2 -2
  26. orionis/luminate/providers/files/paths_provider.py +1 -1
  27. orionis/luminate/providers/log/log_service_provider.py +2 -2
  28. orionis/luminate/services/environment/__init__.py +10 -0
  29. orionis/luminate/services/environment/contracts/__init__.py +5 -0
  30. orionis/luminate/services/environment/contracts/env.py +93 -0
  31. orionis/luminate/services/environment/dot_env.py +293 -0
  32. orionis/luminate/services/environment/env.py +77 -0
  33. orionis/luminate/services/paths/__init__.py +9 -0
  34. orionis/luminate/services/paths/contracts/__init__.py +0 -0
  35. orionis/luminate/services/paths/contracts/resolver.py +67 -0
  36. orionis/luminate/services/paths/resolver.py +83 -0
  37. orionis/luminate/services/workers/__init__.py +10 -0
  38. orionis/luminate/services/workers/maximum_workers.py +36 -0
  39. orionis/luminate/services_/__init__.py +0 -0
  40. orionis/luminate/services_/commands/__init__.py +0 -0
  41. orionis/luminate/services_/config/__init__.py +0 -0
  42. orionis/luminate/services_/log/__init__.py +0 -0
  43. orionis/luminate/support/introspection/abstracts/entities/__init__.py +0 -0
  44. orionis/luminate/support/introspection/abstracts/entities/abstract_class_attributes.py +11 -0
  45. orionis/luminate/support/introspection/abstracts/reflect_abstract.py +154 -16
  46. orionis/luminate/support/introspection/instances/reflection_instance.py +2 -2
  47. orionis/luminate/test/__init__.py +11 -1
  48. orionis/luminate/test/cases/test_async.py +1 -10
  49. orionis/luminate/test/cases/test_case.py +8 -3
  50. orionis/luminate/test/cases/test_sync.py +1 -0
  51. orionis/luminate/test/core/contracts/test_suite.py +19 -31
  52. orionis/luminate/test/core/contracts/test_unit.py +103 -59
  53. orionis/luminate/test/core/test_suite.py +50 -42
  54. orionis/luminate/test/core/test_unit.py +756 -196
  55. orionis/luminate/test/entities/test_result.py +19 -18
  56. orionis/luminate/test/enums/test_mode.py +16 -0
  57. orionis/luminate/test/exceptions/test_config_exception.py +28 -0
  58. orionis/luminate/test/exceptions/test_exception.py +41 -34
  59. orionis/luminate/test/output/contracts/test_std_out.py +22 -11
  60. orionis/luminate/test/output/test_std_out.py +79 -48
  61. {orionis-0.245.0.dist-info → orionis-0.247.0.dist-info}/METADATA +4 -1
  62. {orionis-0.245.0.dist-info → orionis-0.247.0.dist-info}/RECORD +98 -61
  63. tests/config/__init__.py +0 -0
  64. tests/config/test_app.py +122 -0
  65. tests/config/test_auth.py +21 -0
  66. tests/config/test_cache.py +20 -0
  67. tests/patterns/__init__.py +0 -0
  68. tests/patterns/singleton/__init__.py +0 -0
  69. tests/patterns/singleton/test_singleton.py +18 -0
  70. tests/services/__init__.py +0 -0
  71. tests/services/environment/__init__.py +0 -0
  72. tests/services/environment/test_env.py +33 -0
  73. tests/support/inspection/fakes/fake_reflect_abstract.py +61 -5
  74. tests/support/inspection/test_reflect_abstract.py +62 -1
  75. tests/support/inspection/test_reflect_instance.py +0 -1
  76. orionis/luminate/config/app.py +0 -47
  77. orionis/luminate/config/auth.py +0 -15
  78. orionis/luminate/config/cache.py +0 -51
  79. orionis/luminate/support/environment/contracts/env.py +0 -68
  80. orionis/luminate/support/environment/env.py +0 -138
  81. orionis/luminate/support/environment/functions.py +0 -49
  82. orionis/luminate/support/environment/helper.py +0 -26
  83. orionis/luminate/support/patterns/singleton.py +0 -44
  84. tests/support/environment/test_env.py +0 -91
  85. tests/support/patterns/test_singleton.py +0 -18
  86. /orionis/luminate/{services/commands → config/app/entities}/__init__.py +0 -0
  87. /orionis/luminate/{services/config → config/app/enums}/__init__.py +0 -0
  88. /orionis/luminate/{services/log → config/auth/entities}/__init__.py +0 -0
  89. /orionis/luminate/{support/environment → config/cache/entities}/__init__.py +0 -0
  90. /orionis/luminate/{support/environment/contracts → config/cache/enums}/__init__.py +0 -0
  91. /orionis/luminate/{support/patterns → config/contracts}/__init__.py +0 -0
  92. /orionis/luminate/config/{cors.py → entities/cors.py} +0 -0
  93. /orionis/luminate/config/{database.py → entities/database.py} +0 -0
  94. /orionis/luminate/config/{filesystems.py → entities/filesystems.py} +0 -0
  95. /orionis/luminate/config/{logging.py → entities/logging.py} +0 -0
  96. /orionis/luminate/config/{mail.py → entities/mail.py} +0 -0
  97. /orionis/luminate/config/{queue.py → entities/queue.py} +0 -0
  98. /orionis/luminate/config/{session.py → entities/session.py} +0 -0
  99. {tests/support/environment → orionis/luminate/config/exceptions}/__init__.py +0 -0
  100. {tests/support/patterns → orionis/luminate/console/dumper}/__init__.py +0 -0
  101. /orionis/luminate/{services → services_}/commands/reactor_commands_service.py +0 -0
  102. /orionis/luminate/{services → services_}/commands/scheduler_service.py +0 -0
  103. /orionis/luminate/{services → services_}/config/config_service.py +0 -0
  104. /orionis/luminate/{services → services_}/log/log_service.py +0 -0
  105. {orionis-0.245.0.dist-info → orionis-0.247.0.dist-info}/LICENCE +0 -0
  106. {orionis-0.245.0.dist-info → orionis-0.247.0.dist-info}/WHEEL +0 -0
  107. {orionis-0.245.0.dist-info → orionis-0.247.0.dist-info}/entry_points.txt +0 -0
  108. {orionis-0.245.0.dist-info → orionis-0.247.0.dist-info}/top_level.txt +0 -0
@@ -1,328 +1,888 @@
1
1
  import io
2
2
  import re
3
3
  import time
4
+ import inspect
4
5
  import traceback
5
6
  import unittest
6
- from contextlib import redirect_stdout, redirect_stderr
7
- from dataclasses import asdict
7
+ from pathlib import Path
8
+ from datetime import datetime
8
9
  from typing import Any, Dict, List, Optional, Tuple
10
+ from contextlib import redirect_stdout, redirect_stderr
11
+ from concurrent.futures import ThreadPoolExecutor, as_completed
12
+ from rich.console import Console as RichConsole
13
+ from rich.panel import Panel
14
+ from rich.syntax import Syntax
15
+ from rich.table import Table
9
16
  from orionis.luminate.console.output.console import Console
10
17
  from orionis.luminate.test.core.contracts.test_unit import IUnitTest
11
- from orionis.luminate.test.exceptions.test_exception import OrionisTestFailureException
12
18
  from orionis.luminate.test.entities.test_result import TestResult
19
+ from orionis.luminate.test.enums.test_mode import ExecutionMode
13
20
  from orionis.luminate.test.enums.test_status import TestStatus
21
+ from orionis.luminate.test.exceptions.test_exception import OrionisTestFailureException
14
22
 
15
23
  class UnitTest(IUnitTest):
16
24
  """
17
- An advanced testing framework for discovering, running, and analyzing unit tests.
18
-
19
- Features include:
20
- - Detailed test discovery and filtering
21
- - Comprehensive result reporting
22
- - Performance timing
23
- - Customizable output formatting
24
- - Failure analysis
25
-
26
- Attributes
27
- ----------
28
- loader : unittest.TestLoader
29
- Test loader instance for discovering tests
30
- suite : unittest.TestSuite
31
- Test suite holding discovered tests
32
- test_results : List[TestResult]
33
- Detailed results of executed tests
34
- start_time : float
35
- Timestamp when test execution began
25
+ UnitTest is a comprehensive testing utility class designed to facilitate the discovery, configuration,
26
+ and execution of unit tests. It provides features for sequential and parallel test execution,
27
+ customizable verbosity, fail-fast behavior, and rich output formatting using the `rich` library.
28
+ loader (unittest.TestLoader): The test loader used to discover and load tests.
29
+ suite (unittest.TestSuite): The test suite containing the discovered tests.
36
30
  """
37
31
 
38
32
  def __init__(self) -> None:
39
- """Initialize the testing framework."""
33
+ """
34
+ Initializes the test unit with default configurations.
35
+
36
+ Attributes:
37
+ loader (unittest.TestLoader): The test loader used to discover tests.
38
+ suite (unittest.TestSuite): The test suite to hold the discovered tests.
39
+ test_results (List[TestResult]): A list to store the results of executed tests.
40
+ start_time (float): The start time of the test execution.
41
+ print_result (bool): Flag to determine whether to print test results.
42
+ verbosity (int): The verbosity level for test output.
43
+ execution_mode (ExecutionMode): The mode of test execution (e.g., SEQUENTIAL or PARALLEL).
44
+ max_workers (int): The maximum number of workers for parallel execution.
45
+ fail_fast (bool): Flag to stop execution on the first failure.
46
+ rich_console (RichConsole): Console for rich text output.
47
+ orionis_console (Console): Console for standard output.
48
+ discovered_tests (List): A list to store discovered test cases.
49
+ width_table (int): The width of the table for displaying results.
50
+ throw_exception (bool): Flag to determine whether to throw exceptions on test failures.
51
+ """
40
52
  self.loader = unittest.TestLoader()
41
53
  self.suite = unittest.TestSuite()
42
54
  self.test_results: List[TestResult] = []
43
55
  self.start_time: float = 0.0
44
-
45
- def discoverTestsInFolder(
56
+ self.print_result: bool = True
57
+ self.verbosity: int = 2
58
+ self.execution_mode: ExecutionMode = ExecutionMode.SEQUENTIAL
59
+ self.max_workers: int = 4
60
+ self.fail_fast: bool = False
61
+ self.rich_console = RichConsole()
62
+ self.orionis_console = Console()
63
+ self.discovered_tests: List = []
64
+ self.width_output_component: int = int(self.rich_console.width * 0.75)
65
+ self.throw_exception: bool = False
66
+
67
+ def configure(
46
68
  self,
47
- folder_path: str,
48
- base_path: str = "tests",
49
- pattern: str = "test_*.py",
50
- test_name_pattern: Optional[str] = None
69
+ verbosity: int = None,
70
+ execution_mode: ExecutionMode = None,
71
+ max_workers: int = None,
72
+ fail_fast: bool = None,
73
+ print_result: bool = None,
74
+ throw_exception: bool = False
51
75
  ) -> 'UnitTest':
52
76
  """
53
- Discover and add tests from a specified folder to the test suite.
77
+ Configures the UnitTest instance with the specified parameters.
54
78
 
55
- Parameters
56
- ----------
57
- folder_path : str
58
- Path to the folder containing test files
59
- pattern : str, optional
60
- Pattern to match test files (default 'test_*.py')
61
- test_name_pattern : Optional[str], optional
62
- Regex pattern to filter test names
79
+ Parameters:
80
+ verbosity (int, optional): The verbosity level for test output. Defaults to None.
81
+ execution_mode (ExecutionMode, optional): The mode in which the tests will be executed. Defaults to None.
82
+ max_workers (int, optional): The maximum number of workers to use for parallel execution. Defaults to None.
83
+ fail_fast (bool, optional): Whether to stop execution upon the first failure. Defaults to None.
84
+ print_result (bool, optional): Whether to print the test results after execution. Defaults to None.
85
+
86
+ Returns:
87
+ UnitTest: The configured UnitTest instance.
88
+ """
89
+ if verbosity is not None:
90
+ self.verbosity = verbosity
91
+ if execution_mode is not None:
92
+ self.execution_mode = execution_mode
93
+ if max_workers is not None:
94
+ self.max_workers = max_workers
95
+ if fail_fast is not None:
96
+ self.fail_fast = fail_fast
97
+ if print_result is not None:
98
+ self.print_result = print_result
99
+ if throw_exception is not None:
100
+ self.throw_exception = throw_exception
101
+ return self
63
102
 
64
- Raises
65
- ------
66
- ValueError
67
- If the folder is invalid or no tests are found
103
+ def discoverTestsInFolder(
104
+ self,
105
+ folder_path: str,
106
+ base_path: str = "tests",
107
+ pattern: str = "test_*.py",
108
+ test_name_pattern: Optional[str] = None,
109
+ tags: Optional[List[str]] = None
110
+ ) -> 'UnitTest':
111
+ """
112
+ Discovers and loads unit tests from a specified folder.
113
+ Args:
114
+ folder_path (str): The relative path to the folder containing the tests.
115
+ base_path (str, optional): The base directory where the test folder is located. Defaults to "tests".
116
+ pattern (str, optional): The filename pattern to match test files. Defaults to "test_*.py".
117
+ test_name_pattern (Optional[str], optional): A pattern to filter test names. Defaults to None.
118
+ tags (Optional[List[str]], optional): A list of tags to filter tests. Defaults to None.
119
+ Returns:
120
+ UnitTest: The current instance of the UnitTest class with the discovered tests added.
121
+ Raises:
122
+ ValueError: If the test folder does not exist, no tests are found, or an error occurs during test discovery.
68
123
  """
69
124
  try:
125
+ full_path = Path(base_path) / folder_path
126
+ if not full_path.exists():
127
+ raise ValueError(f"Test folder not found: {full_path}")
128
+
70
129
  tests = self.loader.discover(
71
- start_dir=f"{base_path}/{folder_path}",
130
+ start_dir=str(full_path),
72
131
  pattern=pattern,
73
132
  top_level_dir=None
74
133
  )
75
134
 
76
135
  if test_name_pattern:
77
- tests = self._filter_tests_by_name(tests, test_name_pattern)
136
+ tests = self._filterTestsByName(tests, test_name_pattern)
137
+
138
+ if tags:
139
+ tests = self._filterTestsByTags(tests, tags)
78
140
 
79
141
  if not list(tests):
80
- raise ValueError(f"No tests found in '{base_path}/{folder_path}' matching pattern '{pattern}'")
142
+ raise ValueError(f"No tests found in '{full_path}' matching pattern '{pattern}'")
81
143
 
82
144
  self.suite.addTests(tests)
83
145
 
146
+ test_count = len(list(self._flattenTestSuite(tests)))
147
+ self.discovered_tests.append({
148
+ "folder": str(full_path),
149
+ "test_count": test_count,
150
+ })
151
+
84
152
  return self
85
153
 
86
154
  except ImportError as e:
87
- raise ValueError(f"Error importing tests from '{base_path}/{folder_path}': {str(e)}")
155
+ raise ValueError(f"Error importing tests from '{full_path}': {str(e)}")
88
156
  except Exception as e:
89
157
  raise ValueError(f"Unexpected error discovering tests: {str(e)}")
90
158
 
91
- def _filter_tests_by_name(self, suite: unittest.TestSuite, pattern: str) -> unittest.TestSuite:
92
- """Filter tests based on a name pattern."""
93
- filtered_suite = unittest.TestSuite()
94
- regex = re.compile(pattern)
159
+ def discoverTestsInModule(self, module_name: str, test_name_pattern: Optional[str] = None) -> 'UnitTest':
160
+ """
161
+ Discovers and loads tests from a specified module, optionally filtering them
162
+ by a test name pattern, and adds them to the test suite.
163
+ Args:
164
+ module_name (str): The name of the module to discover tests from.
165
+ test_name_pattern (Optional[str]): A pattern to filter test names. Only
166
+ tests matching this pattern will be included. Defaults to None.
167
+ Returns:
168
+ UnitTest: The current instance of the UnitTest class, allowing method chaining.
169
+ Raises:
170
+ ValueError: If the specified module cannot be imported.
171
+ """
172
+ try:
95
173
 
96
- for test in self._flatten_test_suite(suite):
97
- if regex.search(test.id()):
98
- filtered_suite.addTest(test)
174
+ tests = self.loader.loadTestsFromName(module_name)
99
175
 
100
- return filtered_suite
176
+ if test_name_pattern:
177
+ tests = self._filterTestsByName(tests, test_name_pattern)
101
178
 
102
- def _flatten_test_suite(self, suite: unittest.TestSuite) -> List[unittest.TestCase]:
103
- """Flatten a test suite into a list of test cases."""
104
- tests = []
105
- for item in suite:
106
- if isinstance(item, unittest.TestSuite):
107
- tests.extend(self._flatten_test_suite(item))
108
- else:
109
- tests.append(item)
110
- return tests
179
+ self.suite.addTests(tests)
111
180
 
112
- def _extract_error_info(self, traceback_str: str) -> Tuple[Optional[str], Optional[str]]:
113
- """
114
- Extract file path and clean traceback from error output.
181
+ test_count = len(list(self._flattenTestSuite(tests)))
182
+ self.discovered_tests.append({
183
+ "module": module_name,
184
+ "test_count": test_count,
185
+ })
115
186
 
116
- Parameters
117
- ----------
118
- traceback_str : str
119
- The full traceback string
187
+ return self
188
+ except ImportError as e:
189
+ raise ValueError(f"Error importing module '{module_name}': {str(e)}")
120
190
 
121
- Returns
122
- -------
123
- Tuple[Optional[str], Optional[str]]
124
- (file_path, clean_traceback)
191
+ def _startMessage(self) -> None:
125
192
  """
126
- file_match = re.search(r'File "([^"]+)"', traceback_str)
127
- file_path = file_match.group(1) if file_match else None
128
-
129
- # Clean up traceback by removing framework internals
130
- tb_lines = traceback_str.split('\n')
131
- clean_tb = '\n'.join(line for line in tb_lines if not any(s in line for s in ['unittest/', 'lib/python']))
132
-
133
- return file_path, clean_tb
134
-
135
- def run(self, print_result:bool = True, throw_exception:bool = False) -> Dict[str, Any]:
193
+ Displays a formatted message indicating the start of the test suite execution.
194
+ This method prints details about the test suite, including the total number of tests,
195
+ the execution mode (parallel or sequential), and the start time. The message is styled
196
+ and displayed using the `rich` library.
197
+ Attributes:
198
+ print_result (bool): Determines whether the message should be printed.
199
+ suite (TestSuite): The test suite containing the tests to be executed.
200
+ max_workers (int): The number of workers used in parallel execution mode.
201
+ execution_mode (ExecutionMode): The mode of execution (parallel or sequential).
202
+ orionis_console (Console): The console object for handling standard output.
203
+ rich_console (Console): The rich console object for styled output.
204
+ width_table (int): The calculated width of the message panel for formatting.
205
+ Raises:
206
+ AttributeError: If required attributes are not set before calling this method.
136
207
  """
137
- Execute all tests in the test suite with comprehensive reporting.
138
-
139
- Returns
140
- -------
141
- Dict[str, Any]
142
- Detailed summary of test results including:
143
- - total_tests
144
- - passed
145
- - failed
146
- - errors
147
- - skipped
148
- - total_time
149
- - test_details
208
+ if self.print_result:
209
+ test_count = len(list(self._flattenTestSuite(self.suite)))
210
+ mode_text = f"[stat]Parallel with {self.max_workers} workers[/stat]" if self.execution_mode == ExecutionMode.PARALLEL else "Sequential"
211
+ textlines = [
212
+ f"[bold]Total Tests:[/bold] [dim]{test_count}[/dim]",
213
+ f"[bold]Mode:[/bold] [dim]{mode_text}[/dim]",
214
+ f"[bold]Started at:[/bold] [dim]{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}[/dim]"
215
+ ]
216
+
217
+ self.orionis_console.newLine()
218
+ self.rich_console.print(
219
+ Panel(
220
+ '\n'.join(textlines),
221
+ border_style="blue",
222
+ title="🧪 Orionis Framework - Component Test Suite",
223
+ title_align="center",
224
+ width=self.width_output_component,
225
+ padding=(0, 1)
226
+ )
227
+ )
228
+ self.orionis_console.newLine()
150
229
 
151
- Raises
152
- ------
153
- OrionisTestFailureException
154
- If any tests fail or error occurs
230
+ def run(self, print_result: bool = None, throw_exception: bool = None) -> Dict[str, Any]:
231
+ """
232
+ Executes the test suite and processes the results.
233
+ Args:
234
+ print_result (bool, optional): If provided, overrides the instance's
235
+ `print_result` attribute to determine whether to print the test results.
236
+ throw_exception (bool, optional): If True, raises an exception if any
237
+ test failures or errors are detected.
238
+ Returns:
239
+ Dict[str, Any]: A summary of the test execution, including details such as
240
+ execution time, test results, and a timestamp.
241
+ Raises:
242
+ OrionisTestFailureException: If `throw_exception` is True and there are
243
+ test failures or errors.
155
244
  """
245
+ if print_result is not None:
246
+ self.print_result = print_result
247
+ if throw_exception is not None:
248
+ self.throw_exception = throw_exception
249
+
156
250
  self.start_time = time.time()
157
- if print_result:
158
- Console.newLine()
159
- Console.info("🚀 Starting Test Execution...")
160
- Console.newLine()
251
+ self._startMessage()
161
252
 
162
253
  # Setup output capture
163
254
  output_buffer = io.StringIO()
164
255
  error_buffer = io.StringIO()
165
256
 
166
- # Execute tests
257
+ # Execute tests based on selected mode
258
+ if self.execution_mode == ExecutionMode.PARALLEL:
259
+ result = self._runTestsInParallel(output_buffer, error_buffer)
260
+ else:
261
+ result = self._runTestsSequentially(output_buffer, error_buffer)
262
+
263
+
264
+ # Process results
265
+ execution_time = time.time() - self.start_time
266
+ summary = self._generateSummary(result, execution_time)
267
+
268
+ # Print captured output
269
+ if self.print_result:
270
+ self._displayResults(summary, result)
271
+
272
+ # Generate performance report
273
+ summary["timestamp"] = datetime.now().isoformat()
274
+
275
+ # Print Execution Time
276
+ if not result.wasSuccessful() and self.throw_exception:
277
+ raise OrionisTestFailureException(result)
278
+
279
+ return summary
280
+
281
+ def _runTestsSequentially(self, output_buffer: io.StringIO, error_buffer: io.StringIO) -> unittest.TestResult:
282
+ """
283
+ Executes the test suite sequentially, capturing the output and error streams.
284
+ Args:
285
+ output_buffer (io.StringIO): A buffer to capture the standard output during test execution.
286
+ error_buffer (io.StringIO): A buffer to capture the standard error during test execution.
287
+ Returns:
288
+ unittest.TestResult: The result of the test suite execution, containing information about
289
+ passed, failed, and skipped tests.
290
+ """
167
291
  with redirect_stdout(output_buffer), redirect_stderr(error_buffer):
168
292
  runner = unittest.TextTestRunner(
169
293
  stream=output_buffer,
170
- verbosity=2,
171
- resultclass=self._create_custom_result_class()
294
+ verbosity=self.verbosity,
295
+ failfast=self.fail_fast,
296
+ resultclass=self._createCustomResultClass()
172
297
  )
173
298
  result = runner.run(self.suite)
174
299
 
175
- # Process results
176
- execution_time = time.time() - self.start_time
177
- summary = self._generate_summary(result, execution_time)
300
+ return result
178
301
 
179
- # Print captured output
180
- if print_result:
181
- self._display_results(summary, result)
182
-
183
- # if there are any errors or failures, raise an exception
184
- if not result.wasSuccessful():
185
- if throw_exception:
186
- raise OrionisTestFailureException(
187
- f"{summary['failed'] + summary['errors']} test(s) failed"
188
- )
302
+ def _runTestsInParallel(self, output_buffer: io.StringIO, error_buffer: io.StringIO) -> unittest.TestResult:
303
+ """
304
+ Execute tests in parallel using a thread pool.
305
+ This method runs all test cases in the provided test suite concurrently,
306
+ utilizing a thread pool for parallel execution. It collects and combines
307
+ the results of all test cases into a single result object.
308
+ Args:
309
+ output_buffer (io.StringIO): A buffer to capture standard output during test execution.
310
+ error_buffer (io.StringIO): A buffer to capture standard error during test execution.
311
+ Returns:
312
+ unittest.TestResult: A combined result object containing the outcomes of all executed tests.
313
+ Notes:
314
+ - The method uses a custom result class to aggregate test results.
315
+ - If `fail_fast` is enabled and a test fails, the remaining tests are canceled.
316
+ - Minimal output is produced for individual test runs during parallel execution.
317
+ """
318
+ """Execute tests in parallel with thread pooling."""
319
+ test_cases = list(self._flattenTestSuite(self.suite))
189
320
 
190
- # Return summary of results
191
- return summary
321
+ # Create a custom result instance to collect all results
322
+ result_class = self._createCustomResultClass()
323
+ combined_result = result_class(io.StringIO(), descriptions=True, verbosity=self.verbosity)
324
+
325
+ def run_single_test(test):
326
+ """Helper function to run a single test and return its result."""
327
+ runner = unittest.TextTestRunner(
328
+ stream=io.StringIO(),
329
+ verbosity=0, # Minimal output for parallel runs
330
+ failfast=False,
331
+ resultclass=result_class
332
+ )
333
+ return runner.run(unittest.TestSuite([test]))
334
+
335
+ with redirect_stdout(output_buffer), redirect_stderr(error_buffer):
336
+ with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
337
+ futures = [executor.submit(run_single_test, test) for test in test_cases]
338
+
339
+ for future in as_completed(futures):
340
+ test_result = future.result()
341
+ self._mergeTestResults(combined_result, test_result)
192
342
 
193
- def _create_custom_result_class(self) -> type:
194
- """Create a custom TestResult class to capture detailed information."""
343
+ if self.fail_fast and not combined_result.wasSuccessful():
344
+ for f in futures:
345
+ f.cancel()
346
+ break
195
347
 
348
+ return combined_result
349
+
350
+ def _mergeTestResults(self, combined_result: unittest.TestResult, individual_result: unittest.TestResult) -> None:
351
+ """
352
+ Merges the results of two unittest.TestResult objects into a combined result.
353
+ This method updates the combined_result object by adding the test run counts,
354
+ failures, errors, skipped tests, expected failures, and unexpected successes
355
+ from the individual_result object. Additionally, it merges any custom test
356
+ results stored in the 'test_results' attribute, if present.
357
+ Args:
358
+ combined_result (unittest.TestResult): The TestResult object to which the
359
+ results will be merged.
360
+ individual_result (unittest.TestResult): The TestResult object containing
361
+ the results to be merged into the combined_result.
362
+ Returns:
363
+ None
364
+ """
365
+ combined_result.testsRun += individual_result.testsRun
366
+ combined_result.failures.extend(individual_result.failures)
367
+ combined_result.errors.extend(individual_result.errors)
368
+ combined_result.skipped.extend(individual_result.skipped)
369
+ combined_result.expectedFailures.extend(individual_result.expectedFailures)
370
+ combined_result.unexpectedSuccesses.extend(individual_result.unexpectedSuccesses)
371
+
372
+ # Merge our custom test results
373
+ if hasattr(individual_result, 'test_results'):
374
+ if not hasattr(combined_result, 'test_results'):
375
+ combined_result.test_results = []
376
+ combined_result.test_results.extend(individual_result.test_results)
377
+
378
+ def _createCustomResultClass(self) -> type:
379
+ """
380
+ Creates a custom test result class that extends `unittest.TextTestResult` to provide enhanced
381
+ functionality for tracking test execution details, including timings, statuses, and error information.
382
+ Returns:
383
+ type: A dynamically created class `EnhancedTestResult` that overrides methods to handle
384
+ test results, including success, failure, error, and skipped tests. The class collects
385
+ detailed information about each test, such as execution time, error messages, traceback,
386
+ and file path.
387
+ The `EnhancedTestResult` class includes:
388
+ - `startTest`: Records the start time of a test.
389
+ - `stopTest`: Calculates and stores the elapsed time for a test.
390
+ - `addSuccess`: Logs details of a successful test.
391
+ - `addFailure`: Logs details of a failed test, including error message and traceback.
392
+ - `addError`: Logs details of a test that encountered an error, including error message and traceback.
393
+ - `addSkip`: Logs details of a skipped test, including the reason for skipping.
394
+ Note:
395
+ This method uses the `this` reference to access the outer class's methods, such as `_extractErrorInfo`.
396
+ """
196
397
  this = self
197
- class OrionisTestResult(unittest.TextTestResult):
398
+
399
+ class EnhancedTestResult(unittest.TextTestResult):
198
400
  def __init__(self, *args, **kwargs):
199
401
  super().__init__(*args, **kwargs)
200
402
  self.test_results = []
403
+ self._test_timings = {}
404
+ self._current_test_start = None
405
+
406
+ def startTest(self, test):
407
+ self._current_test_start = time.time()
408
+ super().startTest(test)
409
+
410
+ def stopTest(self, test):
411
+ elapsed = time.time() - self._current_test_start
412
+ self._test_timings[test] = elapsed
413
+ super().stopTest(test)
201
414
 
202
415
  def addSuccess(self, test):
203
416
  super().addSuccess(test)
417
+ elapsed = self._test_timings.get(test, 0.0)
204
418
  self.test_results.append(
205
419
  TestResult(
420
+ id=test.id(),
206
421
  name=str(test),
207
422
  status=TestStatus.PASSED,
208
- execution_time=0.0
423
+ execution_time=elapsed,
424
+ class_name=test.__class__.__name__,
425
+ method=getattr(test, "_testMethodName", None),
426
+ module=getattr(test, "__module__", None),
427
+ file_path=inspect.getfile(test.__class__),
209
428
  )
210
429
  )
211
430
 
212
431
  def addFailure(self, test, err):
213
432
  super().addFailure(test, err)
433
+ elapsed = self._test_timings.get(test, 0.0)
214
434
  tb_str = ''.join(traceback.format_exception(*err))
215
- file_path, clean_tb = this._extract_error_info(tb_str)
435
+ file_path, clean_tb = this._extractErrorInfo(tb_str)
216
436
  self.test_results.append(
217
437
  TestResult(
438
+ id=test.id(),
218
439
  name=str(test),
219
440
  status=TestStatus.FAILED,
220
- execution_time=0.0,
441
+ execution_time=elapsed,
221
442
  error_message=str(err[1]),
222
443
  traceback=clean_tb,
223
- file_path=file_path
444
+ class_name=test.__class__.__name__,
445
+ method=getattr(test, "_testMethodName", None),
446
+ module=getattr(test, "__module__", None),
447
+ file_path=inspect.getfile(test.__class__),
224
448
  )
225
449
  )
226
450
 
227
451
  def addError(self, test, err):
228
452
  super().addError(test, err)
453
+ elapsed = self._test_timings.get(test, 0.0)
229
454
  tb_str = ''.join(traceback.format_exception(*err))
230
- file_path, clean_tb = this._extract_error_info(tb_str)
455
+ file_path, clean_tb = this._extractErrorInfo(tb_str)
231
456
  self.test_results.append(
232
457
  TestResult(
458
+ id=test.id(),
233
459
  name=str(test),
234
460
  status=TestStatus.ERRORED,
235
- execution_time=0.0,
461
+ execution_time=elapsed,
236
462
  error_message=str(err[1]),
237
463
  traceback=clean_tb,
238
- file_path=file_path
464
+ class_name=test.__class__.__name__,
465
+ method=getattr(test, "_testMethodName", None),
466
+ module=getattr(test, "__module__", None),
467
+ file_path=inspect.getfile(test.__class__),
239
468
  )
240
469
  )
241
470
 
242
471
  def addSkip(self, test, reason):
243
472
  super().addSkip(test, reason)
473
+ elapsed = self._test_timings.get(test, 0.0)
244
474
  self.test_results.append(
245
475
  TestResult(
476
+ id=test.id(),
246
477
  name=str(test),
247
478
  status=TestStatus.SKIPPED,
248
- execution_time=0.0,
249
- error_message=reason
479
+ execution_time=elapsed,
480
+ error_message=reason,
481
+ class_name=test.__class__.__name__,
482
+ method=getattr(test, "_testMethodName", None),
483
+ module=getattr(test, "__module__", None),
484
+ file_path=inspect.getfile(test.__class__),
250
485
  )
251
486
  )
252
487
 
253
- return OrionisTestResult
254
-
255
- def _generate_summary(self, result: unittest.TestResult, execution_time: float) -> Dict[str, Any]:
256
- """Generate a comprehensive test summary."""
488
+ return EnhancedTestResult
257
489
 
490
+ def _generateSummary(self, result: unittest.TestResult, execution_time: float) -> Dict[str, Any]:
491
+ """
492
+ Generates a summary of the test results, including details about each test,
493
+ performance data, and overall statistics.
494
+ Args:
495
+ result (unittest.TestResult): The result object containing details of the test execution.
496
+ execution_time (float): The total execution time of the test suite in seconds.
497
+ Returns:
498
+ Dict[str, Any]: A dictionary containing the following keys:
499
+ - "total_tests" (int): The total number of tests executed.
500
+ - "passed" (int): The number of tests that passed.
501
+ - "failed" (int): The number of tests that failed.
502
+ - "errors" (int): The number of tests that encountered errors.
503
+ - "skipped" (int): The number of tests that were skipped.
504
+ - "total_time" (float): The total execution time of the test suite.
505
+ - "success_rate" (float): The percentage of tests that passed.
506
+ - "test_details" (List[Dict[str, Any]]): A list of dictionaries containing details about each test:
507
+ - "id" (str): The unique identifier of the test.
508
+ - "class" (str): The class name of the test.
509
+ - "method" (str): The method name of the test.
510
+ - "status" (str): The status of the test (e.g., "PASSED", "FAILED").
511
+ - "execution_time" (float): The execution time of the test in seconds.
512
+ - "error_message" (str): The error message if the test failed or errored.
513
+ - "traceback" (str): The traceback information if the test failed or errored.
514
+ - "file_path" (str): The file path of the test.
515
+ - "performance_data" (List[Dict[str, float]]): A list containing performance data:
516
+ - "duration" (float): The total execution time of the test suite.
517
+ """
258
518
  test_details = []
519
+ performance_data = []
520
+
259
521
  for test_result in result.test_results:
260
- rst:dict = asdict(test_result)
522
+ rst: TestResult = test_result
261
523
  test_details.append({
262
- 'name': rst.get('name'),
263
- 'status': rst.get('status').name,
264
- 'execution_time': float(rst.get('execution_time', 0)),
265
- 'error_message': rst.get('error_message', None),
266
- 'traceback': rst.get('traceback', None),
267
- 'file_path': rst.get('file_path', None)
524
+ 'id': rst.id,
525
+ 'class': rst.class_name,
526
+ 'method': rst.method,
527
+ 'status': rst.status.name,
528
+ 'execution_time': float(rst.execution_time),
529
+ 'error_message': rst.error_message,
530
+ 'traceback': rst.traceback,
531
+ 'file_path': rst.file_path
268
532
  })
269
533
 
534
+ performance_data.append({
535
+ 'duration': float(execution_time)
536
+ })
537
+
538
+ passed = result.testsRun - len(result.failures) - len(result.errors) - len(result.skipped)
539
+ success_rate = (passed / result.testsRun * 100) if result.testsRun > 0 else 100.0
540
+
270
541
  return {
271
542
  "total_tests": result.testsRun,
272
- "passed": result.testsRun - len(result.failures) - len(result.errors) - len(result.skipped),
543
+ "passed": passed,
273
544
  "failed": len(result.failures),
274
545
  "errors": len(result.errors),
275
546
  "skipped": len(result.skipped),
276
- "total_time": f"{execution_time:.3f} seconds",
277
- "success_rate": f"{((result.testsRun - len(result.failures) - len(result.errors)) / result.testsRun * 100):.1f}%",
278
- "test_details": test_details
547
+ "total_time": execution_time,
548
+ "success_rate": success_rate,
549
+ "test_details": test_details,
550
+ "performance_data": performance_data
279
551
  }
280
552
 
281
- def _display_results(self, summary: Dict[str, Any], result: unittest.TestResult) -> None:
282
- """Display test results in a formatted manner."""
283
- # Summary table
284
- Console.table(
285
- headers=["Total", "Passed", "Failed", "Errors", "Skipped", "Duration", "Success Rate"],
286
- rows=[[
287
- summary["total_tests"],
288
- summary["passed"],
289
- summary["failed"],
290
- summary["errors"],
291
- summary["skipped"],
292
- summary["total_time"],
293
- summary["success_rate"]
294
- ]]
553
+ def _printSummaryTable(self, summary: Dict[str, Any]) -> None:
554
+ """
555
+ Prints a summary table of test results using the Rich library.
556
+
557
+ Args:
558
+ summary (Dict[str, Any]): A dictionary containing the test summary data.
559
+ Expected keys in the dictionary:
560
+ - "total_tests" (int): Total number of tests executed.
561
+ - "passed" (int): Number of tests that passed.
562
+ - "failed" (int): Number of tests that failed.
563
+ - "errors" (int): Number of tests that encountered errors.
564
+ - "skipped" (int): Number of tests that were skipped.
565
+ - "total_time" (float): Total duration of the test run in seconds.
566
+ - "success_rate" (float): Percentage of tests that passed.
567
+
568
+ Returns:
569
+ None
570
+ """
571
+ table = Table(
572
+ show_header=True,
573
+ header_style="bold white",
574
+ width=self.width_output_component,
575
+ border_style="blue"
295
576
  )
296
- Console.newLine()
297
-
298
- # Detailed failure/error reporting
299
- if result.failures or result.errors:
300
- Console.textSuccessBold("Test Failures and Errors")
301
- for test, traceback_str in result.failures + result.errors:
302
- file_path, clean_tb = self._extract_error_info(traceback_str)
303
- title = f"❌ {test.id()}" + (f" ({file_path})" if file_path else "")
304
- Console.fail(title)
305
- Console.write(clean_tb)
306
- Console.newLine()
307
-
308
- # Performance highlights
309
- if len(self.test_results) > 10:
310
- slow_tests = sorted(
311
- [r for r in self.test_results if r.status == TestStatus.PASSED],
312
- key=lambda x: x.execution_time,
313
- reverse=True
314
- )[:3]
315
- if slow_tests:
316
- Console.textSuccessBold("⏱️ Slowest Passing Tests")
317
- for test in slow_tests:
318
- Console.warning(f"{test.name}: {test.execution_time:.3f}s")
319
-
320
- # Final status
321
- if result.wasSuccessful():
322
- Console.success("✅ All tests passed successfully!")
323
- else:
324
- Console.error(
325
- f"❌ {summary['failed'] + summary['errors']} test(s) failed "
326
- f"(Success Rate: {summary['success_rate']})"
577
+ table.add_column("Total", justify="center")
578
+ table.add_column("Passed", justify="center")
579
+ table.add_column("Failed", justify="center")
580
+ table.add_column("Errors", justify="center")
581
+ table.add_column("Skipped", justify="center")
582
+ table.add_column("Duration", justify="center")
583
+ table.add_column("Success Rate", justify="center")
584
+ table.add_row(
585
+ str(summary["total_tests"]),
586
+ str(summary["passed"]),
587
+ str(summary["failed"]),
588
+ str(summary["errors"]),
589
+ str(summary["skipped"]),
590
+ f"{summary['total_time']:.2f}s",
591
+ f"{summary['success_rate']:.2f}%"
592
+ )
593
+ self.rich_console.print(table)
594
+ self.orionis_console.newLine()
595
+
596
+ def _filterTestsByName(self, suite: unittest.TestSuite, pattern: str) -> unittest.TestSuite:
597
+ """
598
+ Filters the tests in a given test suite based on a specified name pattern.
599
+ Args:
600
+ suite (unittest.TestSuite): The test suite containing the tests to filter.
601
+ pattern (str): A regular expression pattern to match test names.
602
+ Returns:
603
+ unittest.TestSuite: A new test suite containing only the tests that match the pattern.
604
+ Raises:
605
+ ValueError: If the provided pattern is not a valid regular expression.
606
+ Notes:
607
+ - The method flattens the input test suite to iterate over individual tests.
608
+ - A test is included in the filtered suite if its ID matches the provided regex pattern.
609
+ """
610
+ filtered_suite = unittest.TestSuite()
611
+ try:
612
+ regex = re.compile(pattern)
613
+ except re.error as e:
614
+ raise ValueError(f"Invalid test name pattern: {str(e)}")
615
+
616
+ for test in self._flattenTestSuite(suite):
617
+ if regex.search(test.id()):
618
+ filtered_suite.addTest(test)
619
+
620
+ return filtered_suite
621
+
622
+ def _filterTestsByTags(self, suite: unittest.TestSuite, tags: List[str]) -> unittest.TestSuite:
623
+ """
624
+ Filters a unittest TestSuite to include only tests that match the specified tags.
625
+ This method iterates through all tests in the provided TestSuite and checks
626
+ for a `__tags__` attribute either on the test method or the test case class.
627
+ If any of the specified tags match the tags associated with the test, the test
628
+ is added to the filtered TestSuite.
629
+ Args:
630
+ suite (unittest.TestSuite): The original TestSuite containing all tests.
631
+ tags (List[str]): A list of tags to filter the tests by.
632
+ Returns:
633
+ unittest.TestSuite: A new TestSuite containing only the tests that match
634
+ the specified tags.
635
+ """
636
+ filtered_suite = unittest.TestSuite()
637
+ tag_set = set(tags)
638
+
639
+ for test in self._flattenTestSuite(suite):
640
+ # Get test method if this is a TestCase instance
641
+ test_method = getattr(test, test._testMethodName, None)
642
+
643
+ # Check for tags attribute on the test method
644
+ if hasattr(test_method, '__tags__'):
645
+ method_tags = set(getattr(test_method, '__tags__'))
646
+ if tag_set.intersection(method_tags):
647
+ filtered_suite.addTest(test)
648
+ # Also check on the test case class
649
+ elif hasattr(test, '__tags__'):
650
+ class_tags = set(getattr(test, '__tags__'))
651
+ if tag_set.intersection(class_tags):
652
+ filtered_suite.addTest(test)
653
+
654
+ return filtered_suite
655
+
656
+ def _flattenTestSuite(self, suite: unittest.TestSuite) -> List[unittest.TestCase]:
657
+ """
658
+ Flattens a nested unittest.TestSuite into a list of individual unittest.TestCase instances.
659
+ This method recursively traverses the given TestSuite, extracting all TestCase instances
660
+ while avoiding duplicates. It ensures that each TestCase appears only once in the resulting list.
661
+ Args:
662
+ suite (unittest.TestSuite): The TestSuite to be flattened.
663
+ Returns:
664
+ List[unittest.TestCase]: A list of unique TestCase instances extracted from the TestSuite.
665
+ """
666
+ tests = []
667
+ seen = set()
668
+
669
+ def _flatten(item):
670
+ if isinstance(item, unittest.TestSuite):
671
+ for sub_item in item:
672
+ _flatten(sub_item)
673
+ elif item not in seen:
674
+ seen.add(item)
675
+ tests.append(item)
676
+
677
+ _flatten(suite)
678
+ return tests
679
+
680
+ def _sanitizeTraceback(self, test_path: str, traceback_test: str) -> str:
681
+ """
682
+ Sanitizes a traceback string to extract and display the most relevant parts
683
+ related to a specific test file.
684
+ Args:
685
+ test_path (str): The file path of the test file being analyzed.
686
+ traceback_test (str): The full traceback string to be sanitized.
687
+ Returns:
688
+ str: A sanitized traceback string containing only the relevant parts
689
+ related to the test file. If no relevant parts are found, the full
690
+ traceback is returned. If the traceback is empty, a default message
691
+ "No traceback available" is returned.
692
+ """
693
+ if not traceback_test:
694
+ return "No traceback available"
695
+
696
+ # Try to extract the test file name
697
+ file_match = re.search(r'([^/\\]+)\.py', test_path)
698
+ file_name = file_match.group(1) if file_match else None
699
+
700
+ if not file_name:
701
+ return traceback_test
702
+
703
+ # Process traceback to show most relevant parts
704
+ lines = traceback_test.splitlines()
705
+ relevant_lines = []
706
+ found_test_file = False if file_name in traceback_test else True
707
+
708
+ for line in lines:
709
+ if file_name in line and not found_test_file:
710
+ found_test_file = True
711
+ if found_test_file:
712
+ if 'File' in line:
713
+ relevant_lines.append(line.strip())
714
+ elif line.strip() != '':
715
+ relevant_lines.append(line)
716
+
717
+ # If we didn't find the test file, return the full traceback
718
+ if not relevant_lines:
719
+ return traceback_test
720
+
721
+ return '\n'.join(relevant_lines)
722
+
723
+ def _displayResults(self, summary: Dict[str, Any], result: unittest.TestResult) -> None:
724
+ """
725
+ Displays the results of the test execution, including a summary table and detailed
726
+ information about failed or errored tests grouped by their test classes.
727
+ Args:
728
+ summary (Dict[str, Any]): A dictionary containing the summary of the test execution,
729
+ including test details, statuses, and execution times.
730
+ result (unittest.TestResult): The result object containing information about the
731
+ test run, including successes, failures, and errors.
732
+ Behavior:
733
+ - Prints a summary table of the test results.
734
+ - Groups failed and errored tests by their test class and displays them in a
735
+ structured format using panels.
736
+ - For each failed or errored test, displays the traceback in a syntax-highlighted
737
+ panel with additional metadata such as the test method name and execution time.
738
+ - Uses different icons and border colors to distinguish between failed and errored tests.
739
+ - Calls a finishing message method after displaying all results.
740
+ """
741
+ self._printSummaryTable(summary)
742
+
743
+ # Group failures and errors by test class
744
+ failures_by_class = {}
745
+ for test in summary["test_details"]:
746
+ if test["status"] in (TestStatus.FAILED.name, TestStatus.ERRORED.name):
747
+ class_name = test["class"]
748
+ if class_name not in failures_by_class:
749
+ failures_by_class[class_name] = []
750
+ failures_by_class[class_name].append(test)
751
+
752
+ # Display grouped failures
753
+ for class_name, tests in failures_by_class.items():
754
+
755
+ class_panel = Panel.fit(f"[bold]{class_name}[/bold]", border_style="red", padding=(0, 2))
756
+ self.rich_console.print(class_panel)
757
+
758
+ for test in tests:
759
+ traceback_str = self._sanitizeTraceback(test['file_path'], test['traceback'])
760
+ syntax = Syntax(
761
+ traceback_str,
762
+ lexer="python",
763
+ line_numbers=False,
764
+ background_color="default",
765
+ word_wrap=True,
766
+ theme="monokai"
767
+ )
768
+
769
+ icon = "❌" if test["status"] == TestStatus.FAILED.name else "💥"
770
+ border_color = "yellow" if test["status"] == TestStatus.FAILED.name else "red"
771
+
772
+ if test['execution_time'] == 0:
773
+ test['execution_time'] = 0.001
774
+
775
+ panel = Panel(
776
+ syntax,
777
+ title=f"{icon} {test['method']}",
778
+ subtitle=f"Duration: {test['execution_time']:.3f}s",
779
+ border_style=border_color,
780
+ title_align="left",
781
+ padding=(1, 1),
782
+ subtitle_align="right",
783
+ width=self.width_output_component
784
+ )
785
+ self.rich_console.print(panel)
786
+ self.orionis_console.newLine()
787
+
788
+ self._finishMessage(summary)
789
+
790
+ def _extractErrorInfo(self, traceback_str: str) -> Tuple[Optional[str], Optional[str]]:
791
+ """
792
+ Extracts error information from a traceback string.
793
+ This method processes a traceback string to extract the file path of the
794
+ Python file where the error occurred and cleans up the traceback by
795
+ removing framework internals and irrelevant noise.
796
+ Args:
797
+ traceback_str (str): The traceback string to process.
798
+ Returns:
799
+ Tuple[Optional[str], Optional[str]]: A tuple containing:
800
+ - The file path of the Python file where the error occurred, or None if not found.
801
+ - The cleaned-up traceback string, or the original traceback string if no cleanup was performed.
802
+ """
803
+ # Extract file path
804
+ file_matches = re.findall(r'File ["\'](.*?.py)["\']', traceback_str)
805
+ file_path = file_matches[-1] if file_matches else None
806
+
807
+ # Clean up traceback by removing framework internals and noise
808
+ tb_lines = traceback_str.split('\n')
809
+ clean_lines = []
810
+ relevant_lines_started = False
811
+
812
+ for line in tb_lines:
813
+ # Skip framework internal lines
814
+ if any(s in line for s in ['unittest/', 'lib/python', 'site-packages']):
815
+ continue
816
+
817
+ # Start capturing when we hit the test file
818
+ if file_path and file_path in line and not relevant_lines_started:
819
+ relevant_lines_started = True
820
+
821
+ if relevant_lines_started:
822
+ clean_lines.append(line)
823
+
824
+ clean_tb = '\n'.join(clean_lines) if clean_lines else traceback_str
825
+
826
+ return file_path, clean_tb
827
+
828
+ def _finishMessage(self, summary: Dict[str, Any]) -> None:
829
+ """
830
+ Displays a summary message for the test suite execution if result printing is enabled.
831
+ Args:
832
+ summary (Dict[str, Any]): A dictionary containing the test suite summary,
833
+ including keys such as 'failed', 'errors', and 'total_time'.
834
+ Behavior:
835
+ - If `self.print_result` is False, the method returns without displaying anything.
836
+ - Constructs a message indicating the total execution time of the test suite.
837
+ - Displays a status icon (✅ for success, ❌ for failure) based on the presence of
838
+ failures or errors in the test suite.
839
+ - Formats and prints the message within a styled panel using the `rich` library.
840
+ """
841
+ if not self.print_result:
842
+ return
843
+
844
+ status_icon = "✅" if (summary['failed'] + summary['errors']) == 0 else "❌"
845
+ msg = f"Test suite completed in {summary['total_time']:.2f} seconds"
846
+ self.rich_console.print(
847
+ Panel(
848
+ msg,
849
+ border_style="blue",
850
+ title=f"{status_icon} Test Suite Finished",
851
+ title_align='left',
852
+ width=self.width_output_component,
853
+ padding=(0, 1)
327
854
  )
328
- Console.newLine()
855
+ )
856
+ self.rich_console.print()
857
+
858
+ def getTestNames(self) -> List[str]:
859
+ """
860
+ Retrieves a list of test names from the test suite.
861
+
862
+ This method flattens the test suite and extracts the unique identifier
863
+ (`id`) of each test case.
864
+
865
+ Returns:
866
+ List[str]: A list of test names (unique identifiers) from the test suite.
867
+ """
868
+ return [test.id() for test in self._flattenTestSuite(self.suite)]
869
+
870
+ def getTestCount(self) -> int:
871
+ """
872
+ Calculate the total number of tests in the test suite.
873
+
874
+ This method flattens the test suite structure and counts the total
875
+ number of individual test cases.
876
+
877
+ Returns:
878
+ int: The total number of test cases in the test suite.
879
+ """
880
+ return len(list(self._flattenTestSuite(self.suite)))
881
+
882
+ def clearTests(self) -> None:
883
+ """
884
+ Clears the current test suite by reinitializing it to an empty `unittest.TestSuite`.
885
+
886
+ This method is used to reset the test suite, removing any previously added tests.
887
+ """
888
+ self.suite = unittest.TestSuite()