orionis 0.245.0__py3-none-any.whl → 0.246.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. orionis/framework.py +1 -1
  2. orionis/luminate/config/contracts/__init__.py +0 -0
  3. orionis/luminate/config/contracts/config.py +27 -0
  4. orionis/luminate/config/entities/__init__.py +0 -0
  5. orionis/luminate/config/entities/testing.py +37 -0
  6. orionis/luminate/support/environment/env.py +1 -0
  7. orionis/luminate/support/introspection/abstracts/entities/__init__.py +0 -0
  8. orionis/luminate/support/introspection/abstracts/entities/abstract_class_attributes.py +11 -0
  9. orionis/luminate/support/introspection/abstracts/reflect_abstract.py +154 -16
  10. orionis/luminate/support/introspection/instances/reflection_instance.py +2 -2
  11. orionis/luminate/test/core/contracts/test_unit.py +100 -60
  12. orionis/luminate/test/core/test_suite.py +52 -45
  13. orionis/luminate/test/core/test_unit.py +774 -197
  14. orionis/luminate/test/entities/test_result.py +6 -2
  15. orionis/luminate/test/enums/test_mode.py +16 -0
  16. orionis/luminate/test/exceptions/test_config_exception.py +28 -0
  17. orionis/luminate/test/exceptions/test_exception.py +40 -33
  18. orionis/luminate/test/output/test_std_out.py +55 -13
  19. {orionis-0.245.0.dist-info → orionis-0.246.0.dist-info}/METADATA +1 -1
  20. {orionis-0.245.0.dist-info → orionis-0.246.0.dist-info}/RECORD +37 -29
  21. tests/support/inspection/fakes/fake_reflect_abstract.py +61 -5
  22. tests/support/inspection/test_reflect_abstract.py +62 -1
  23. tests/support/inspection/test_reflect_instance.py +0 -1
  24. /orionis/luminate/config/{app.py → entities/app.py} +0 -0
  25. /orionis/luminate/config/{auth.py → entities/auth.py} +0 -0
  26. /orionis/luminate/config/{cache.py → entities/cache.py} +0 -0
  27. /orionis/luminate/config/{cors.py → entities/cors.py} +0 -0
  28. /orionis/luminate/config/{database.py → entities/database.py} +0 -0
  29. /orionis/luminate/config/{filesystems.py → entities/filesystems.py} +0 -0
  30. /orionis/luminate/config/{logging.py → entities/logging.py} +0 -0
  31. /orionis/luminate/config/{mail.py → entities/mail.py} +0 -0
  32. /orionis/luminate/config/{queue.py → entities/queue.py} +0 -0
  33. /orionis/luminate/config/{session.py → entities/session.py} +0 -0
  34. {orionis-0.245.0.dist-info → orionis-0.246.0.dist-info}/LICENCE +0 -0
  35. {orionis-0.245.0.dist-info → orionis-0.246.0.dist-info}/WHEEL +0 -0
  36. {orionis-0.245.0.dist-info → orionis-0.246.0.dist-info}/entry_points.txt +0 -0
  37. {orionis-0.245.0.dist-info → orionis-0.246.0.dist-info}/top_level.txt +0 -0
@@ -1,328 +1,905 @@
1
1
  import io
2
2
  import re
3
3
  import time
4
+ import inspect
4
5
  import traceback
5
6
  import unittest
6
- from contextlib import redirect_stdout, redirect_stderr
7
- from dataclasses import asdict
7
+ from pathlib import Path
8
+ from datetime import datetime
8
9
  from typing import Any, Dict, List, Optional, Tuple
10
+ from contextlib import redirect_stdout, redirect_stderr
11
+ from concurrent.futures import ThreadPoolExecutor, as_completed
12
+ from rich.console import Console as RichConsole
13
+ from rich.panel import Panel
14
+ from rich.syntax import Syntax
15
+ from rich.table import Table
9
16
  from orionis.luminate.console.output.console import Console
10
17
  from orionis.luminate.test.core.contracts.test_unit import IUnitTest
11
- from orionis.luminate.test.exceptions.test_exception import OrionisTestFailureException
12
18
  from orionis.luminate.test.entities.test_result import TestResult
19
+ from orionis.luminate.test.enums.test_mode import ExecutionMode
13
20
  from orionis.luminate.test.enums.test_status import TestStatus
21
+ from orionis.luminate.test.exceptions.test_exception import OrionisTestFailureException
14
22
 
15
23
  class UnitTest(IUnitTest):
16
24
  """
17
- An advanced testing framework for discovering, running, and analyzing unit tests.
18
-
19
- Features include:
20
- - Detailed test discovery and filtering
21
- - Comprehensive result reporting
22
- - Performance timing
23
- - Customizable output formatting
24
- - Failure analysis
25
-
26
- Attributes
27
- ----------
28
- loader : unittest.TestLoader
29
- Test loader instance for discovering tests
30
- suite : unittest.TestSuite
31
- Test suite holding discovered tests
32
- test_results : List[TestResult]
33
- Detailed results of executed tests
34
- start_time : float
35
- Timestamp when test execution began
25
+ UnitTest is a comprehensive testing utility class designed to facilitate the discovery, configuration,
26
+ and execution of unit tests. It provides features for sequential and parallel test execution,
27
+ customizable verbosity, fail-fast behavior, and rich output formatting using the `rich` library.
28
+ loader (unittest.TestLoader): The test loader used to discover and load tests.
29
+ suite (unittest.TestSuite): The test suite containing the discovered tests.
30
+ Methods:
31
+ configure(verbosity, execution_mode, max_workers, fail_fast, print_result):
32
+ discoverTestsInFolder(folder_path, base_path, pattern, test_name_pattern, tags):
33
+ discoverTestsInModule(module_name, test_name_pattern):
34
+ Discovers and loads tests from a specified module, optionally filtering them by a test name pattern.
35
+ run(print_result, throw_exception):
36
+ getTestNames():
37
+ getTestCount():
38
+ Calculates the total number of tests in the test suite.
39
+ clearTests():
40
+ Private Methods:
41
+ _startMessage():
42
+ _runTestsSequentially(output_buffer, error_buffer):
43
+ _runTestsInParallel(output_buffer, error_buffer):
44
+ Executes tests in parallel using a thread pool.
45
+ _mergeTestResults(combined_result, individual_result):
46
+ _createCustomResultClass():
47
+ Creates a custom test result class that extends `unittest.TextTestResult` for enhanced functionality.
48
+ _generateSummary(result, execution_time):
49
+ Generates a summary of the test results, including details about each test and overall statistics.
50
+ _printSummaryTable(summary):
51
+ _filterTestsByName(suite, pattern):
52
+ _filterTestsByTags(suite, tags):
53
+ _flattenTestSuite(suite):
54
+ _sanitizeTraceback(test_path, traceback_test):
55
+ Sanitizes a traceback string to extract and display the most relevant parts related to a specific test file.
56
+ _displayResults(summary, result):
57
+ Displays the results of the test execution, including a summary table and detailed information about failed or errored tests.
58
+ _extractErrorInfo(traceback_str):
59
+ Extracts error information from a traceback string, including the file path and cleaned-up traceback.
60
+ _finishMessage(summary):
61
+ Displays a formatted message indicating the completion of the test suite execution.
36
62
  """
37
63
 
38
64
  def __init__(self) -> None:
39
- """Initialize the testing framework."""
65
+ """
66
+ Initializes the test unit with default configurations.
67
+
68
+ Attributes:
69
+ loader (unittest.TestLoader): The test loader used to discover tests.
70
+ suite (unittest.TestSuite): The test suite to hold the discovered tests.
71
+ test_results (List[TestResult]): A list to store the results of executed tests.
72
+ start_time (float): The start time of the test execution.
73
+ print_result (bool): Flag to determine whether to print test results.
74
+ verbosity (int): The verbosity level for test output.
75
+ execution_mode (ExecutionMode): The mode of test execution (e.g., SEQUENTIAL or PARALLEL).
76
+ max_workers (int): The maximum number of workers for parallel execution.
77
+ fail_fast (bool): Flag to stop execution on the first failure.
78
+ rich_console (RichConsole): Console for rich text output.
79
+ orionis_console (Console): Console for standard output.
80
+ discovered_tests (List): A list to store discovered test cases.
81
+ width_table (int): The width of the table for displaying results.
82
+ throw_exception (bool): Flag to determine whether to throw exceptions on test failures.
83
+ """
40
84
  self.loader = unittest.TestLoader()
41
85
  self.suite = unittest.TestSuite()
42
86
  self.test_results: List[TestResult] = []
43
87
  self.start_time: float = 0.0
44
-
45
- def discoverTestsInFolder(
88
+ self.print_result: bool = True
89
+ self.verbosity: int = 2
90
+ self.execution_mode: ExecutionMode = ExecutionMode.SEQUENTIAL
91
+ self.max_workers: int = 4
92
+ self.fail_fast: bool = False
93
+ self.rich_console = RichConsole()
94
+ self.orionis_console = Console()
95
+ self.discovered_tests: List = []
96
+ self.width_table: int = 0
97
+ self.throw_exception: bool = False
98
+
99
+ def configure(
46
100
  self,
47
- folder_path: str,
48
- base_path: str = "tests",
49
- pattern: str = "test_*.py",
50
- test_name_pattern: Optional[str] = None
101
+ verbosity: int = None,
102
+ execution_mode: ExecutionMode = None,
103
+ max_workers: int = None,
104
+ fail_fast: bool = None,
105
+ print_result: bool = None,
106
+ throw_exception: bool = False
51
107
  ) -> 'UnitTest':
52
108
  """
53
- Discover and add tests from a specified folder to the test suite.
109
+ Configures the UnitTest instance with the specified parameters.
54
110
 
55
- Parameters
56
- ----------
57
- folder_path : str
58
- Path to the folder containing test files
59
- pattern : str, optional
60
- Pattern to match test files (default 'test_*.py')
61
- test_name_pattern : Optional[str], optional
62
- Regex pattern to filter test names
111
+ Parameters:
112
+ verbosity (int, optional): The verbosity level for test output. Defaults to None.
113
+ execution_mode (ExecutionMode, optional): The mode in which the tests will be executed. Defaults to None.
114
+ max_workers (int, optional): The maximum number of workers to use for parallel execution. Defaults to None.
115
+ fail_fast (bool, optional): Whether to stop execution upon the first failure. Defaults to None.
116
+ print_result (bool, optional): Whether to print the test results after execution. Defaults to None.
117
+
118
+ Returns:
119
+ UnitTest: The configured UnitTest instance.
120
+ """
121
+ if verbosity is not None:
122
+ self.verbosity = verbosity
123
+ if execution_mode is not None:
124
+ self.execution_mode = execution_mode
125
+ if max_workers is not None:
126
+ self.max_workers = max_workers
127
+ if fail_fast is not None:
128
+ self.fail_fast = fail_fast
129
+ if print_result is not None:
130
+ self.print_result = print_result
131
+ if throw_exception is not None:
132
+ self.throw_exception = throw_exception
133
+ return self
63
134
 
64
- Raises
65
- ------
66
- ValueError
67
- If the folder is invalid or no tests are found
135
+ def discoverTestsInFolder(
136
+ self,
137
+ folder_path: str,
138
+ base_path: str = "tests",
139
+ pattern: str = "test_*.py",
140
+ test_name_pattern: Optional[str] = None,
141
+ tags: Optional[List[str]] = None
142
+ ) -> 'UnitTest':
143
+ """
144
+ Discovers and loads unit tests from a specified folder.
145
+ Args:
146
+ folder_path (str): The relative path to the folder containing the tests.
147
+ base_path (str, optional): The base directory where the test folder is located. Defaults to "tests".
148
+ pattern (str, optional): The filename pattern to match test files. Defaults to "test_*.py".
149
+ test_name_pattern (Optional[str], optional): A pattern to filter test names. Defaults to None.
150
+ tags (Optional[List[str]], optional): A list of tags to filter tests. Defaults to None.
151
+ Returns:
152
+ UnitTest: The current instance of the UnitTest class with the discovered tests added.
153
+ Raises:
154
+ ValueError: If the test folder does not exist, no tests are found, or an error occurs during test discovery.
68
155
  """
69
156
  try:
157
+ full_path = Path(base_path) / folder_path
158
+ if not full_path.exists():
159
+ raise ValueError(f"Test folder not found: {full_path}")
160
+
70
161
  tests = self.loader.discover(
71
- start_dir=f"{base_path}/{folder_path}",
162
+ start_dir=str(full_path),
72
163
  pattern=pattern,
73
164
  top_level_dir=None
74
165
  )
75
166
 
76
167
  if test_name_pattern:
77
- tests = self._filter_tests_by_name(tests, test_name_pattern)
168
+ tests = self._filterTestsByName(tests, test_name_pattern)
169
+
170
+ if tags:
171
+ tests = self._filterTestsByTags(tests, tags)
78
172
 
79
173
  if not list(tests):
80
- raise ValueError(f"No tests found in '{base_path}/{folder_path}' matching pattern '{pattern}'")
174
+ raise ValueError(f"No tests found in '{full_path}' matching pattern '{pattern}'")
81
175
 
82
176
  self.suite.addTests(tests)
83
177
 
178
+ test_count = len(list(self._flattenTestSuite(tests)))
179
+ self.discovered_tests.append({
180
+ "folder": str(full_path),
181
+ "test_count": test_count,
182
+ })
183
+
84
184
  return self
85
185
 
86
186
  except ImportError as e:
87
- raise ValueError(f"Error importing tests from '{base_path}/{folder_path}': {str(e)}")
187
+ raise ValueError(f"Error importing tests from '{full_path}': {str(e)}")
88
188
  except Exception as e:
89
189
  raise ValueError(f"Unexpected error discovering tests: {str(e)}")
90
190
 
91
- def _filter_tests_by_name(self, suite: unittest.TestSuite, pattern: str) -> unittest.TestSuite:
92
- """Filter tests based on a name pattern."""
93
- filtered_suite = unittest.TestSuite()
94
- regex = re.compile(pattern)
191
+ def discoverTestsInModule(self, module_name: str, test_name_pattern: Optional[str] = None) -> 'UnitTest':
192
+ """
193
+ Discovers and loads tests from a specified module, optionally filtering them
194
+ by a test name pattern, and adds them to the test suite.
195
+ Args:
196
+ module_name (str): The name of the module to discover tests from.
197
+ test_name_pattern (Optional[str]): A pattern to filter test names. Only
198
+ tests matching this pattern will be included. Defaults to None.
199
+ Returns:
200
+ UnitTest: The current instance of the UnitTest class, allowing method chaining.
201
+ Raises:
202
+ ValueError: If the specified module cannot be imported.
203
+ """
204
+ try:
95
205
 
96
- for test in self._flatten_test_suite(suite):
97
- if regex.search(test.id()):
98
- filtered_suite.addTest(test)
206
+ tests = self.loader.loadTestsFromName(module_name)
99
207
 
100
- return filtered_suite
208
+ if test_name_pattern:
209
+ tests = self._filterTestsByName(tests, test_name_pattern)
101
210
 
102
- def _flatten_test_suite(self, suite: unittest.TestSuite) -> List[unittest.TestCase]:
103
- """Flatten a test suite into a list of test cases."""
104
- tests = []
105
- for item in suite:
106
- if isinstance(item, unittest.TestSuite):
107
- tests.extend(self._flatten_test_suite(item))
108
- else:
109
- tests.append(item)
110
- return tests
211
+ self.suite.addTests(tests)
111
212
 
112
- def _extract_error_info(self, traceback_str: str) -> Tuple[Optional[str], Optional[str]]:
113
- """
114
- Extract file path and clean traceback from error output.
213
+ test_count = len(list(self._flattenTestSuite(tests)))
214
+ self.discovered_tests.append({
215
+ "module": module_name,
216
+ "test_count": test_count,
217
+ })
115
218
 
116
- Parameters
117
- ----------
118
- traceback_str : str
119
- The full traceback string
219
+ return self
220
+ except ImportError as e:
221
+ raise ValueError(f"Error importing module '{module_name}': {str(e)}")
120
222
 
121
- Returns
122
- -------
123
- Tuple[Optional[str], Optional[str]]
124
- (file_path, clean_traceback)
223
+ def _startMessage(self) -> None:
125
224
  """
126
- file_match = re.search(r'File "([^"]+)"', traceback_str)
127
- file_path = file_match.group(1) if file_match else None
128
-
129
- # Clean up traceback by removing framework internals
130
- tb_lines = traceback_str.split('\n')
131
- clean_tb = '\n'.join(line for line in tb_lines if not any(s in line for s in ['unittest/', 'lib/python']))
132
-
133
- return file_path, clean_tb
134
-
135
- def run(self, print_result:bool = True, throw_exception:bool = False) -> Dict[str, Any]:
225
+ Displays a formatted message indicating the start of the test suite execution.
226
+ This method prints details about the test suite, including the total number of tests,
227
+ the execution mode (parallel or sequential), and the start time. The message is styled
228
+ and displayed using the `rich` library.
229
+ Attributes:
230
+ print_result (bool): Determines whether the message should be printed.
231
+ suite (TestSuite): The test suite containing the tests to be executed.
232
+ max_workers (int): The number of workers used in parallel execution mode.
233
+ execution_mode (ExecutionMode): The mode of execution (parallel or sequential).
234
+ orionis_console (Console): The console object for handling standard output.
235
+ rich_console (Console): The rich console object for styled output.
236
+ width_table (int): The calculated width of the message panel for formatting.
237
+ Raises:
238
+ AttributeError: If required attributes are not set before calling this method.
136
239
  """
137
- Execute all tests in the test suite with comprehensive reporting.
138
-
139
- Returns
140
- -------
141
- Dict[str, Any]
142
- Detailed summary of test results including:
143
- - total_tests
144
- - passed
145
- - failed
146
- - errors
147
- - skipped
148
- - total_time
149
- - test_details
150
-
151
- Raises
152
- ------
153
- OrionisTestFailureException
154
- If any tests fail or error occurs
240
+ if self.print_result:
241
+ test_count = len(list(self._flattenTestSuite(self.suite)))
242
+ mode_text = f"[stat]Parallel with {self.max_workers} workers[/stat]" if self.execution_mode == ExecutionMode.PARALLEL else "Sequential"
243
+ textlines = [
244
+ f"[bold]Total Tests:[/bold] [stat]{test_count}[/stat]",
245
+ f"[bold]Mode:[/bold] {mode_text}",
246
+ f"[bold]Started at:[/bold] [dim]{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}[/dim]"
247
+ ]
248
+ text = ' | '.join(textlines).replace('[bold]', '').replace('[/bold]', '').replace('[stat]', '').replace('[/stat]', '').replace('[dim]', '').replace('[/dim]', '')
249
+ self.width_table = len(text) + 4
250
+
251
+ self.orionis_console.newLine()
252
+ self.rich_console.print(Panel.fit(text, border_style="blue", title="🧪 Orionis Framework - Test Suite", title_align="left"))
253
+ self.orionis_console.newLine()
254
+
255
+ def run(self, print_result: bool = None, throw_exception: bool = False) -> Dict[str, Any]:
256
+ """
257
+ Executes the test suite and processes the results.
258
+ Args:
259
+ print_result (bool, optional): If provided, overrides the instance's
260
+ `print_result` attribute to determine whether to print the test results.
261
+ throw_exception (bool, optional): If True, raises an exception if any
262
+ test failures or errors are detected.
263
+ Returns:
264
+ Dict[str, Any]: A summary of the test execution, including details such as
265
+ execution time, test results, and a timestamp.
266
+ Raises:
267
+ OrionisTestFailureException: If `throw_exception` is True and there are
268
+ test failures or errors.
155
269
  """
270
+ if print_result is not None:
271
+ self.print_result = print_result
272
+ if throw_exception is not None:
273
+ self.throw_exception = throw_exception
274
+
156
275
  self.start_time = time.time()
157
- if print_result:
158
- Console.newLine()
159
- Console.info("🚀 Starting Test Execution...")
160
- Console.newLine()
276
+ self._startMessage()
161
277
 
162
278
  # Setup output capture
163
279
  output_buffer = io.StringIO()
164
280
  error_buffer = io.StringIO()
165
281
 
166
- # Execute tests
282
+ # Execute tests based on selected mode
283
+ if self.execution_mode == ExecutionMode.PARALLEL:
284
+ result = self._runTestsInParallel(output_buffer, error_buffer)
285
+ else:
286
+ result = self._runTestsSequentially(output_buffer, error_buffer)
287
+
288
+
289
+ # Process results
290
+ execution_time = time.time() - self.start_time
291
+ summary = self._generateSummary(result, execution_time)
292
+
293
+ # Print captured output
294
+ if self.print_result:
295
+ self._displayResults(summary, result)
296
+
297
+ # Generate performance report
298
+ summary["timestamp"] = datetime.now().isoformat()
299
+
300
+ # Print Execution Time
301
+ if not result.wasSuccessful() and self.throw_exception:
302
+ raise OrionisTestFailureException(result)
303
+
304
+ return summary
305
+
306
+ def _runTestsSequentially(self, output_buffer: io.StringIO, error_buffer: io.StringIO) -> unittest.TestResult:
307
+ """
308
+ Executes the test suite sequentially, capturing the output and error streams.
309
+ Args:
310
+ output_buffer (io.StringIO): A buffer to capture the standard output during test execution.
311
+ error_buffer (io.StringIO): A buffer to capture the standard error during test execution.
312
+ Returns:
313
+ unittest.TestResult: The result of the test suite execution, containing information about
314
+ passed, failed, and skipped tests.
315
+ """
167
316
  with redirect_stdout(output_buffer), redirect_stderr(error_buffer):
168
317
  runner = unittest.TextTestRunner(
169
318
  stream=output_buffer,
170
- verbosity=2,
171
- resultclass=self._create_custom_result_class()
319
+ verbosity=self.verbosity,
320
+ failfast=self.fail_fast,
321
+ resultclass=self._createCustomResultClass()
172
322
  )
173
323
  result = runner.run(self.suite)
174
324
 
175
- # Process results
176
- execution_time = time.time() - self.start_time
177
- summary = self._generate_summary(result, execution_time)
325
+ return result
178
326
 
179
- # Print captured output
180
- if print_result:
181
- self._display_results(summary, result)
182
-
183
- # if there are any errors or failures, raise an exception
184
- if not result.wasSuccessful():
185
- if throw_exception:
186
- raise OrionisTestFailureException(
187
- f"{summary['failed'] + summary['errors']} test(s) failed"
188
- )
327
+ def _runTestsInParallel(self, output_buffer: io.StringIO, error_buffer: io.StringIO) -> unittest.TestResult:
328
+ """
329
+ Execute tests in parallel using a thread pool.
330
+ This method runs all test cases in the provided test suite concurrently,
331
+ utilizing a thread pool for parallel execution. It collects and combines
332
+ the results of all test cases into a single result object.
333
+ Args:
334
+ output_buffer (io.StringIO): A buffer to capture standard output during test execution.
335
+ error_buffer (io.StringIO): A buffer to capture standard error during test execution.
336
+ Returns:
337
+ unittest.TestResult: A combined result object containing the outcomes of all executed tests.
338
+ Notes:
339
+ - The method uses a custom result class to aggregate test results.
340
+ - If `fail_fast` is enabled and a test fails, the remaining tests are canceled.
341
+ - Minimal output is produced for individual test runs during parallel execution.
342
+ """
343
+ """Execute tests in parallel with thread pooling."""
344
+ test_cases = list(self._flattenTestSuite(self.suite))
189
345
 
190
- # Return summary of results
191
- return summary
346
+ # Create a custom result instance to collect all results
347
+ result_class = self._createCustomResultClass()
348
+ combined_result = result_class(io.StringIO(), descriptions=True, verbosity=self.verbosity)
349
+
350
+ def run_single_test(test):
351
+ """Helper function to run a single test and return its result."""
352
+ runner = unittest.TextTestRunner(
353
+ stream=io.StringIO(),
354
+ verbosity=0, # Minimal output for parallel runs
355
+ failfast=False,
356
+ resultclass=result_class
357
+ )
358
+ return runner.run(unittest.TestSuite([test]))
359
+
360
+ with redirect_stdout(output_buffer), redirect_stderr(error_buffer):
361
+ with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
362
+ futures = [executor.submit(run_single_test, test) for test in test_cases]
363
+
364
+ for future in as_completed(futures):
365
+ test_result = future.result()
366
+ self._mergeTestResults(combined_result, test_result)
192
367
 
193
- def _create_custom_result_class(self) -> type:
194
- """Create a custom TestResult class to capture detailed information."""
368
+ if self.fail_fast and not combined_result.wasSuccessful():
369
+ for f in futures:
370
+ f.cancel()
371
+ break
195
372
 
373
+ return combined_result
374
+
375
+ def _mergeTestResults(self, combined_result: unittest.TestResult, individual_result: unittest.TestResult) -> None:
376
+ """
377
+ Merges the results of two unittest.TestResult objects into a combined result.
378
+ This method updates the combined_result object by adding the test run counts,
379
+ failures, errors, skipped tests, expected failures, and unexpected successes
380
+ from the individual_result object. Additionally, it merges any custom test
381
+ results stored in the 'test_results' attribute, if present.
382
+ Args:
383
+ combined_result (unittest.TestResult): The TestResult object to which the
384
+ results will be merged.
385
+ individual_result (unittest.TestResult): The TestResult object containing
386
+ the results to be merged into the combined_result.
387
+ Returns:
388
+ None
389
+ """
390
+ combined_result.testsRun += individual_result.testsRun
391
+ combined_result.failures.extend(individual_result.failures)
392
+ combined_result.errors.extend(individual_result.errors)
393
+ combined_result.skipped.extend(individual_result.skipped)
394
+ combined_result.expectedFailures.extend(individual_result.expectedFailures)
395
+ combined_result.unexpectedSuccesses.extend(individual_result.unexpectedSuccesses)
396
+
397
+ # Merge our custom test results
398
+ if hasattr(individual_result, 'test_results'):
399
+ if not hasattr(combined_result, 'test_results'):
400
+ combined_result.test_results = []
401
+ combined_result.test_results.extend(individual_result.test_results)
402
+
403
+ def _createCustomResultClass(self) -> type:
404
+ """
405
+ Creates a custom test result class that extends `unittest.TextTestResult` to provide enhanced
406
+ functionality for tracking test execution details, including timings, statuses, and error information.
407
+ Returns:
408
+ type: A dynamically created class `EnhancedTestResult` that overrides methods to handle
409
+ test results, including success, failure, error, and skipped tests. The class collects
410
+ detailed information about each test, such as execution time, error messages, traceback,
411
+ and file path.
412
+ The `EnhancedTestResult` class includes:
413
+ - `startTest`: Records the start time of a test.
414
+ - `stopTest`: Calculates and stores the elapsed time for a test.
415
+ - `addSuccess`: Logs details of a successful test.
416
+ - `addFailure`: Logs details of a failed test, including error message and traceback.
417
+ - `addError`: Logs details of a test that encountered an error, including error message and traceback.
418
+ - `addSkip`: Logs details of a skipped test, including the reason for skipping.
419
+ Note:
420
+ This method uses the `this` reference to access the outer class's methods, such as `_extractErrorInfo`.
421
+ """
196
422
  this = self
197
- class OrionisTestResult(unittest.TextTestResult):
423
+
424
+ class EnhancedTestResult(unittest.TextTestResult):
198
425
  def __init__(self, *args, **kwargs):
199
426
  super().__init__(*args, **kwargs)
200
427
  self.test_results = []
428
+ self._test_timings = {}
429
+ self._current_test_start = None
430
+
431
+ def startTest(self, test):
432
+ self._current_test_start = time.time()
433
+ super().startTest(test)
434
+
435
+ def stopTest(self, test):
436
+ elapsed = time.time() - self._current_test_start
437
+ self._test_timings[test] = elapsed
438
+ super().stopTest(test)
201
439
 
202
440
  def addSuccess(self, test):
203
441
  super().addSuccess(test)
442
+ elapsed = self._test_timings.get(test, 0.0)
204
443
  self.test_results.append(
205
444
  TestResult(
445
+ id=test.id(),
206
446
  name=str(test),
207
447
  status=TestStatus.PASSED,
208
- execution_time=0.0
448
+ execution_time=elapsed,
449
+ class_name=test.__class__.__name__,
450
+ method=getattr(test, "_testMethodName", None),
451
+ module=getattr(test, "__module__", None),
452
+ file_path=inspect.getfile(test.__class__),
209
453
  )
210
454
  )
211
455
 
212
456
  def addFailure(self, test, err):
213
457
  super().addFailure(test, err)
458
+ elapsed = self._test_timings.get(test, 0.0)
214
459
  tb_str = ''.join(traceback.format_exception(*err))
215
- file_path, clean_tb = this._extract_error_info(tb_str)
460
+ file_path, clean_tb = this._extractErrorInfo(tb_str)
216
461
  self.test_results.append(
217
462
  TestResult(
463
+ id=test.id(),
218
464
  name=str(test),
219
465
  status=TestStatus.FAILED,
220
- execution_time=0.0,
466
+ execution_time=elapsed,
221
467
  error_message=str(err[1]),
222
468
  traceback=clean_tb,
223
- file_path=file_path
469
+ class_name=test.__class__.__name__,
470
+ method=getattr(test, "_testMethodName", None),
471
+ module=getattr(test, "__module__", None),
472
+ file_path=inspect.getfile(test.__class__),
224
473
  )
225
474
  )
226
475
 
227
476
  def addError(self, test, err):
228
477
  super().addError(test, err)
478
+ elapsed = self._test_timings.get(test, 0.0)
229
479
  tb_str = ''.join(traceback.format_exception(*err))
230
- file_path, clean_tb = this._extract_error_info(tb_str)
480
+ file_path, clean_tb = this._extractErrorInfo(tb_str)
231
481
  self.test_results.append(
232
482
  TestResult(
483
+ id=test.id(),
233
484
  name=str(test),
234
485
  status=TestStatus.ERRORED,
235
- execution_time=0.0,
486
+ execution_time=elapsed,
236
487
  error_message=str(err[1]),
237
488
  traceback=clean_tb,
238
- file_path=file_path
489
+ class_name=test.__class__.__name__,
490
+ method=getattr(test, "_testMethodName", None),
491
+ module=getattr(test, "__module__", None),
492
+ file_path=inspect.getfile(test.__class__),
239
493
  )
240
494
  )
241
495
 
242
496
  def addSkip(self, test, reason):
243
497
  super().addSkip(test, reason)
498
+ elapsed = self._test_timings.get(test, 0.0)
244
499
  self.test_results.append(
245
500
  TestResult(
501
+ id=test.id(),
246
502
  name=str(test),
247
503
  status=TestStatus.SKIPPED,
248
- execution_time=0.0,
249
- error_message=reason
504
+ execution_time=elapsed,
505
+ error_message=reason,
506
+ class_name=test.__class__.__name__,
507
+ method=getattr(test, "_testMethodName", None),
508
+ module=getattr(test, "__module__", None),
509
+ file_path=inspect.getfile(test.__class__),
250
510
  )
251
511
  )
252
512
 
253
- return OrionisTestResult
254
-
255
- def _generate_summary(self, result: unittest.TestResult, execution_time: float) -> Dict[str, Any]:
256
- """Generate a comprehensive test summary."""
513
+ return EnhancedTestResult
257
514
 
515
+ def _generateSummary(self, result: unittest.TestResult, execution_time: float) -> Dict[str, Any]:
516
+ """
517
+ Generates a summary of the test results, including details about each test,
518
+ performance data, and overall statistics.
519
+ Args:
520
+ result (unittest.TestResult): The result object containing details of the test execution.
521
+ execution_time (float): The total execution time of the test suite in seconds.
522
+ Returns:
523
+ Dict[str, Any]: A dictionary containing the following keys:
524
+ - "total_tests" (int): The total number of tests executed.
525
+ - "passed" (int): The number of tests that passed.
526
+ - "failed" (int): The number of tests that failed.
527
+ - "errors" (int): The number of tests that encountered errors.
528
+ - "skipped" (int): The number of tests that were skipped.
529
+ - "total_time" (float): The total execution time of the test suite.
530
+ - "success_rate" (float): The percentage of tests that passed.
531
+ - "test_details" (List[Dict[str, Any]]): A list of dictionaries containing details about each test:
532
+ - "id" (str): The unique identifier of the test.
533
+ - "class" (str): The class name of the test.
534
+ - "method" (str): The method name of the test.
535
+ - "status" (str): The status of the test (e.g., "PASSED", "FAILED").
536
+ - "execution_time" (float): The execution time of the test in seconds.
537
+ - "error_message" (str): The error message if the test failed or errored.
538
+ - "traceback" (str): The traceback information if the test failed or errored.
539
+ - "file_path" (str): The file path of the test.
540
+ - "performance_data" (List[Dict[str, float]]): A list containing performance data:
541
+ - "duration" (float): The total execution time of the test suite.
542
+ """
258
543
  test_details = []
544
+ performance_data = []
545
+
259
546
  for test_result in result.test_results:
260
- rst:dict = asdict(test_result)
547
+ rst: TestResult = test_result
261
548
  test_details.append({
262
- 'name': rst.get('name'),
263
- 'status': rst.get('status').name,
264
- 'execution_time': float(rst.get('execution_time', 0)),
265
- 'error_message': rst.get('error_message', None),
266
- 'traceback': rst.get('traceback', None),
267
- 'file_path': rst.get('file_path', None)
549
+ 'id': rst.id,
550
+ 'class': rst.class_name,
551
+ 'method': rst.method,
552
+ 'status': rst.status.name,
553
+ 'execution_time': float(rst.execution_time),
554
+ 'error_message': rst.error_message,
555
+ 'traceback': rst.traceback,
556
+ 'file_path': rst.file_path
268
557
  })
269
558
 
559
+ performance_data.append({
560
+ 'duration': float(execution_time)
561
+ })
562
+
563
+ passed = result.testsRun - len(result.failures) - len(result.errors) - len(result.skipped)
564
+ success_rate = (passed / result.testsRun * 100) if result.testsRun > 0 else 100.0
565
+
270
566
  return {
271
567
  "total_tests": result.testsRun,
272
- "passed": result.testsRun - len(result.failures) - len(result.errors) - len(result.skipped),
568
+ "passed": passed,
273
569
  "failed": len(result.failures),
274
570
  "errors": len(result.errors),
275
571
  "skipped": len(result.skipped),
276
- "total_time": f"{execution_time:.3f} seconds",
277
- "success_rate": f"{((result.testsRun - len(result.failures) - len(result.errors)) / result.testsRun * 100):.1f}%",
278
- "test_details": test_details
572
+ "total_time": execution_time,
573
+ "success_rate": success_rate,
574
+ "test_details": test_details,
575
+ "performance_data": performance_data
279
576
  }
280
577
 
281
- def _display_results(self, summary: Dict[str, Any], result: unittest.TestResult) -> None:
282
- """Display test results in a formatted manner."""
283
- # Summary table
284
- Console.table(
285
- headers=["Total", "Passed", "Failed", "Errors", "Skipped", "Duration", "Success Rate"],
286
- rows=[[
287
- summary["total_tests"],
288
- summary["passed"],
289
- summary["failed"],
290
- summary["errors"],
291
- summary["skipped"],
292
- summary["total_time"],
293
- summary["success_rate"]
294
- ]]
578
+ def _printSummaryTable(self, summary: Dict[str, Any]) -> None:
579
+ """
580
+ Prints a summary table of test results using the Rich library.
581
+
582
+ Args:
583
+ summary (Dict[str, Any]): A dictionary containing the test summary data.
584
+ Expected keys in the dictionary:
585
+ - "total_tests" (int): Total number of tests executed.
586
+ - "passed" (int): Number of tests that passed.
587
+ - "failed" (int): Number of tests that failed.
588
+ - "errors" (int): Number of tests that encountered errors.
589
+ - "skipped" (int): Number of tests that were skipped.
590
+ - "total_time" (float): Total duration of the test run in seconds.
591
+ - "success_rate" (float): Percentage of tests that passed.
592
+
593
+ Returns:
594
+ None
595
+ """
596
+ table = Table(show_header=True, header_style="bold white", width=self.width_table, border_style="blue")
597
+ table.add_column("Total", justify="center")
598
+ table.add_column("Passed", justify="center")
599
+ table.add_column("Failed", justify="center")
600
+ table.add_column("Errors", justify="center")
601
+ table.add_column("Skipped", justify="center")
602
+ table.add_column("Duration", justify="center")
603
+ table.add_column("Success Rate", justify="center")
604
+ table.add_row(
605
+ str(summary["total_tests"]),
606
+ str(summary["passed"]),
607
+ str(summary["failed"]),
608
+ str(summary["errors"]),
609
+ str(summary["skipped"]),
610
+ f"{summary['total_time']:.2f}s",
611
+ f"{summary['success_rate']:.2f}%"
295
612
  )
296
- Console.newLine()
297
-
298
- # Detailed failure/error reporting
299
- if result.failures or result.errors:
300
- Console.textSuccessBold("Test Failures and Errors")
301
- for test, traceback_str in result.failures + result.errors:
302
- file_path, clean_tb = self._extract_error_info(traceback_str)
303
- title = f"❌ {test.id()}" + (f" ({file_path})" if file_path else "")
304
- Console.fail(title)
305
- Console.write(clean_tb)
306
- Console.newLine()
307
-
308
- # Performance highlights
309
- if len(self.test_results) > 10:
310
- slow_tests = sorted(
311
- [r for r in self.test_results if r.status == TestStatus.PASSED],
312
- key=lambda x: x.execution_time,
313
- reverse=True
314
- )[:3]
315
- if slow_tests:
316
- Console.textSuccessBold("⏱️ Slowest Passing Tests")
317
- for test in slow_tests:
318
- Console.warning(f"{test.name}: {test.execution_time:.3f}s")
319
-
320
- # Final status
321
- if result.wasSuccessful():
322
- Console.success("✅ All tests passed successfully!")
323
- else:
324
- Console.error(
325
- f"❌ {summary['failed'] + summary['errors']} test(s) failed "
326
- f"(Success Rate: {summary['success_rate']})"
613
+ self.rich_console.print(table)
614
+ self.orionis_console.newLine()
615
+
616
+ def _filterTestsByName(self, suite: unittest.TestSuite, pattern: str) -> unittest.TestSuite:
617
+ """
618
+ Filters the tests in a given test suite based on a specified name pattern.
619
+ Args:
620
+ suite (unittest.TestSuite): The test suite containing the tests to filter.
621
+ pattern (str): A regular expression pattern to match test names.
622
+ Returns:
623
+ unittest.TestSuite: A new test suite containing only the tests that match the pattern.
624
+ Raises:
625
+ ValueError: If the provided pattern is not a valid regular expression.
626
+ Notes:
627
+ - The method flattens the input test suite to iterate over individual tests.
628
+ - A test is included in the filtered suite if its ID matches the provided regex pattern.
629
+ """
630
+ filtered_suite = unittest.TestSuite()
631
+ try:
632
+ regex = re.compile(pattern)
633
+ except re.error as e:
634
+ raise ValueError(f"Invalid test name pattern: {str(e)}")
635
+
636
+ for test in self._flattenTestSuite(suite):
637
+ if regex.search(test.id()):
638
+ filtered_suite.addTest(test)
639
+
640
+ return filtered_suite
641
+
642
+ def _filterTestsByTags(self, suite: unittest.TestSuite, tags: List[str]) -> unittest.TestSuite:
643
+ """
644
+ Filters a unittest TestSuite to include only tests that match the specified tags.
645
+ This method iterates through all tests in the provided TestSuite and checks
646
+ for a `__tags__` attribute either on the test method or the test case class.
647
+ If any of the specified tags match the tags associated with the test, the test
648
+ is added to the filtered TestSuite.
649
+ Args:
650
+ suite (unittest.TestSuite): The original TestSuite containing all tests.
651
+ tags (List[str]): A list of tags to filter the tests by.
652
+ Returns:
653
+ unittest.TestSuite: A new TestSuite containing only the tests that match
654
+ the specified tags.
655
+ """
656
+ filtered_suite = unittest.TestSuite()
657
+ tag_set = set(tags)
658
+
659
+ for test in self._flattenTestSuite(suite):
660
+ # Get test method if this is a TestCase instance
661
+ test_method = getattr(test, test._testMethodName, None)
662
+
663
+ # Check for tags attribute on the test method
664
+ if hasattr(test_method, '__tags__'):
665
+ method_tags = set(getattr(test_method, '__tags__'))
666
+ if tag_set.intersection(method_tags):
667
+ filtered_suite.addTest(test)
668
+ # Also check on the test case class
669
+ elif hasattr(test, '__tags__'):
670
+ class_tags = set(getattr(test, '__tags__'))
671
+ if tag_set.intersection(class_tags):
672
+ filtered_suite.addTest(test)
673
+
674
+ return filtered_suite
675
+
676
+ def _flattenTestSuite(self, suite: unittest.TestSuite) -> List[unittest.TestCase]:
677
+ """
678
+ Flattens a nested unittest.TestSuite into a list of individual unittest.TestCase instances.
679
+ This method recursively traverses the given TestSuite, extracting all TestCase instances
680
+ while avoiding duplicates. It ensures that each TestCase appears only once in the resulting list.
681
+ Args:
682
+ suite (unittest.TestSuite): The TestSuite to be flattened.
683
+ Returns:
684
+ List[unittest.TestCase]: A list of unique TestCase instances extracted from the TestSuite.
685
+ """
686
+ tests = []
687
+ seen = set()
688
+
689
+ def _flatten(item):
690
+ if isinstance(item, unittest.TestSuite):
691
+ for sub_item in item:
692
+ _flatten(sub_item)
693
+ elif item not in seen:
694
+ seen.add(item)
695
+ tests.append(item)
696
+
697
+ _flatten(suite)
698
+ return tests
699
+
700
+ def _sanitizeTraceback(self, test_path: str, traceback_test: str) -> str:
701
+ """
702
+ Sanitizes a traceback string to extract and display the most relevant parts
703
+ related to a specific test file.
704
+ Args:
705
+ test_path (str): The file path of the test file being analyzed.
706
+ traceback_test (str): The full traceback string to be sanitized.
707
+ Returns:
708
+ str: A sanitized traceback string containing only the relevant parts
709
+ related to the test file. If no relevant parts are found, the full
710
+ traceback is returned. If the traceback is empty, a default message
711
+ "No traceback available" is returned.
712
+ """
713
+ if not traceback_test:
714
+ return "No traceback available"
715
+
716
+ # Try to extract the test file name
717
+ file_match = re.search(r'([^/\\]+)\.py', test_path)
718
+ file_name = file_match.group(1) if file_match else None
719
+
720
+ if not file_name:
721
+ return traceback_test
722
+
723
+ # Process traceback to show most relevant parts
724
+ lines = traceback_test.splitlines()
725
+ relevant_lines = []
726
+ found_test_file = False if file_name in traceback_test else True
727
+
728
+ for line in lines:
729
+ if file_name in line and not found_test_file:
730
+ found_test_file = True
731
+ if found_test_file:
732
+ if 'File' in line:
733
+ relevant_lines.append(line.strip())
734
+ elif line.strip() != '':
735
+ relevant_lines.append(line)
736
+
737
+ # If we didn't find the test file, return the full traceback
738
+ if not relevant_lines:
739
+ return traceback_test
740
+
741
+ return '\n'.join(relevant_lines)
742
+
743
+ def _displayResults(self, summary: Dict[str, Any], result: unittest.TestResult) -> None:
744
+ """
745
+ Displays the results of the test execution, including a summary table and detailed
746
+ information about failed or errored tests grouped by their test classes.
747
+ Args:
748
+ summary (Dict[str, Any]): A dictionary containing the summary of the test execution,
749
+ including test details, statuses, and execution times.
750
+ result (unittest.TestResult): The result object containing information about the
751
+ test run, including successes, failures, and errors.
752
+ Behavior:
753
+ - Prints a summary table of the test results.
754
+ - Groups failed and errored tests by their test class and displays them in a
755
+ structured format using panels.
756
+ - For each failed or errored test, displays the traceback in a syntax-highlighted
757
+ panel with additional metadata such as the test method name and execution time.
758
+ - Uses different icons and border colors to distinguish between failed and errored tests.
759
+ - Calls a finishing message method after displaying all results.
760
+ """
761
+ self._printSummaryTable(summary)
762
+
763
+ # Group failures and errors by test class
764
+ failures_by_class = {}
765
+ for test in summary["test_details"]:
766
+ if test["status"] in (TestStatus.FAILED.name, TestStatus.ERRORED.name):
767
+ class_name = test["class"]
768
+ if class_name not in failures_by_class:
769
+ failures_by_class[class_name] = []
770
+ failures_by_class[class_name].append(test)
771
+
772
+ # Display grouped failures
773
+ for class_name, tests in failures_by_class.items():
774
+
775
+ class_panel = Panel.fit(f"[bold]{class_name}[/bold]", border_style="red", padding=(0, 2))
776
+ self.rich_console.print(class_panel)
777
+
778
+ for test in tests:
779
+ traceback_str = self._sanitizeTraceback(test['file_path'], test['traceback'])
780
+ syntax = Syntax(
781
+ traceback_str,
782
+ lexer="python",
783
+ line_numbers=False,
784
+ background_color="default",
785
+ word_wrap=True,
786
+ theme="monokai"
787
+ )
788
+
789
+ icon = "❌" if test["status"] == TestStatus.FAILED.name else "💥"
790
+ border_color = "yellow" if test["status"] == TestStatus.FAILED.name else "red"
791
+
792
+ panel = Panel(
793
+ syntax,
794
+ title=f"{icon} {test['method']}",
795
+ subtitle=f"Duration: {test['execution_time']:.3f}s",
796
+ border_style=border_color,
797
+ title_align="left",
798
+ padding=(1, 1),
799
+ subtitle_align="right"
800
+ )
801
+ self.rich_console.print(panel)
802
+ self.orionis_console.newLine()
803
+
804
+ self._finishMessage(summary)
805
+
806
+ def _extractErrorInfo(self, traceback_str: str) -> Tuple[Optional[str], Optional[str]]:
807
+ """
808
+ Extracts error information from a traceback string.
809
+ This method processes a traceback string to extract the file path of the
810
+ Python file where the error occurred and cleans up the traceback by
811
+ removing framework internals and irrelevant noise.
812
+ Args:
813
+ traceback_str (str): The traceback string to process.
814
+ Returns:
815
+ Tuple[Optional[str], Optional[str]]: A tuple containing:
816
+ - The file path of the Python file where the error occurred, or None if not found.
817
+ - The cleaned-up traceback string, or the original traceback string if no cleanup was performed.
818
+ """
819
+ # Extract file path
820
+ file_matches = re.findall(r'File ["\'](.*?.py)["\']', traceback_str)
821
+ file_path = file_matches[-1] if file_matches else None
822
+
823
+ # Clean up traceback by removing framework internals and noise
824
+ tb_lines = traceback_str.split('\n')
825
+ clean_lines = []
826
+ relevant_lines_started = False
827
+
828
+ for line in tb_lines:
829
+ # Skip framework internal lines
830
+ if any(s in line for s in ['unittest/', 'lib/python', 'site-packages']):
831
+ continue
832
+
833
+ # Start capturing when we hit the test file
834
+ if file_path and file_path in line and not relevant_lines_started:
835
+ relevant_lines_started = True
836
+
837
+ if relevant_lines_started:
838
+ clean_lines.append(line)
839
+
840
+ clean_tb = '\n'.join(clean_lines) if clean_lines else traceback_str
841
+
842
+ return file_path, clean_tb
843
+
844
+ def _finishMessage(self, summary: Dict[str, Any]) -> None:
845
+ """
846
+ Displays a summary message for the test suite execution if result printing is enabled.
847
+ Args:
848
+ summary (Dict[str, Any]): A dictionary containing the test suite summary,
849
+ including keys such as 'failed', 'errors', and 'total_time'.
850
+ Behavior:
851
+ - If `self.print_result` is False, the method returns without displaying anything.
852
+ - Constructs a message indicating the total execution time of the test suite.
853
+ - Displays a status icon (✅ for success, ❌ for failure) based on the presence of
854
+ failures or errors in the test suite.
855
+ - Formats and prints the message within a styled panel using the `rich` library.
856
+ """
857
+ if not self.print_result:
858
+ return
859
+
860
+ status_icon = "✅" if (summary['failed'] + summary['errors']) == 0 else "❌"
861
+ msg = f"Test suite completed in {summary['total_time']:.2f} seconds"
862
+ len_spaces = max(0, int(self.width_table - len(msg)))
863
+ spaces = ' ' * (len_spaces - 4)
864
+ self.rich_console.print(
865
+ Panel.fit(
866
+ f"{msg}{spaces}",
867
+ border_style="blue",
868
+ title=f"{status_icon} Test Suite Finished",
869
+ title_align='left',
870
+ padding=(0, 1)
327
871
  )
328
- Console.newLine()
872
+ )
873
+ self.rich_console.print()
874
+
875
+ def getTestNames(self) -> List[str]:
876
+ """
877
+ Retrieves a list of test names from the test suite.
878
+
879
+ This method flattens the test suite and extracts the unique identifier
880
+ (`id`) of each test case.
881
+
882
+ Returns:
883
+ List[str]: A list of test names (unique identifiers) from the test suite.
884
+ """
885
+ return [test.id() for test in self._flattenTestSuite(self.suite)]
886
+
887
+ def getTestCount(self) -> int:
888
+ """
889
+ Calculate the total number of tests in the test suite.
890
+
891
+ This method flattens the test suite structure and counts the total
892
+ number of individual test cases.
893
+
894
+ Returns:
895
+ int: The total number of test cases in the test suite.
896
+ """
897
+ return len(list(self._flattenTestSuite(self.suite)))
898
+
899
+ def clearTests(self) -> None:
900
+ """
901
+ Clears the current test suite by reinitializing it to an empty `unittest.TestSuite`.
902
+
903
+ This method is used to reset the test suite, removing any previously added tests.
904
+ """
905
+ self.suite = unittest.TestSuite()