orionis 0.314.0__py3-none-any.whl → 0.315.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,448 @@
1
+ import re
2
+ from datetime import datetime
3
+ from typing import Any, Dict
4
+ from rich.console import Console
5
+ from rich.live import Live
6
+ from rich.panel import Panel
7
+ from rich.syntax import Syntax
8
+ from rich.table import Table
9
+ from rich.text import Text
10
+ from orionis.services.introspection.instances.reflection_instance import ReflectionInstance
11
+ from orionis.test.contracts.printer import ITestPrinter
12
+ from orionis.test.enums.test_status import TestStatus
13
+
14
+ class TestPrinter(ITestPrinter):
15
+
16
+ def __init__(
17
+ self
18
+ ) -> None:
19
+ self.__rich_console = Console()
20
+ self.__panel_title: str = "🧪 Orionis Framework - Component Test Suite"
21
+ self.__panel_width: int = int(self.__rich_console.width * 0.75)
22
+ self.__debbug_keywords: list = ['self.dd', 'self.dump']
23
+
24
+ def print(
25
+ self,
26
+ value: Any
27
+ ) -> None:
28
+ """
29
+ Prints a value to the console using the rich console.
30
+ Parameters
31
+ ----------
32
+ value : Any
33
+ The value to be printed. It can be a string, object, or any other type.
34
+ Notes
35
+ -----
36
+ - If the value is a string, it is printed directly.
37
+ - If the value is an object, its string representation is printed.
38
+ - If the value is a list, each item is printed on a new line.
39
+ """
40
+ if isinstance(value, str):
41
+ self.__rich_console.print(value)
42
+ elif isinstance(value, object):
43
+ self.__rich_console.print(str(value))
44
+ elif isinstance(value, list):
45
+ for item in value:
46
+ self.__rich_console.print(item)
47
+ else:
48
+ self.__rich_console.print(str(value))
49
+
50
+ def startMessage(
51
+ self,
52
+ *,
53
+ print_result: bool,
54
+ length_tests: int,
55
+ execution_mode: str,
56
+ max_workers: int
57
+ ):
58
+ """
59
+ Displays a formatted start message for the test execution session.
60
+
61
+ Parameters
62
+ ----------
63
+ print_result : bool
64
+ Whether to print the start message.
65
+ length_tests : int
66
+ The total number of tests to be executed.
67
+ execution_mode : str
68
+ The mode of execution, either "parallel" or "sequential".
69
+ max_workers : int
70
+ The number of worker threads/processes for parallel execution.
71
+
72
+ Side Effects
73
+ ------------
74
+ Prints a styled panel with test session information to the console if `print_result` is True.
75
+ """
76
+ if print_result:
77
+ mode_text = f"[stat]Parallel with {max_workers} workers[/stat]" if execution_mode == "parallel" else "Sequential"
78
+ textlines = [
79
+ f"[bold]Total Tests:[/bold] [dim]{length_tests}[/dim]",
80
+ f"[bold]Mode:[/bold] [dim]{mode_text}[/dim]",
81
+ f"[bold]Started at:[/bold] [dim]{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}[/dim]"
82
+ ]
83
+
84
+ self.__rich_console.line(1)
85
+ self.__rich_console.print(
86
+ Panel(
87
+ str('\n').join(textlines),
88
+ border_style="blue",
89
+ title=self.__panel_title,
90
+ title_align="center",
91
+ width=self.__panel_width,
92
+ padding=(0, 1)
93
+ )
94
+ )
95
+ self.__rich_console.line(1)
96
+
97
+ def finishMessage(
98
+ self,
99
+ *,
100
+ print_result: bool,
101
+ summary: Dict[str, Any]
102
+ ) -> None:
103
+ """
104
+ Display a summary message for the test suite execution.
105
+
106
+ Parameters
107
+ ----------
108
+ summary : dict
109
+ Dictionary containing the test suite summary, including keys such as
110
+ 'failed', 'errors', and 'total_time'.
111
+
112
+ Notes
113
+ -----
114
+ - If `self.print_result` is False, the method returns without displaying anything.
115
+ - Shows a status icon (✅ for success, ❌ for failure) based on the presence of
116
+ failures or errors in the test suite.
117
+ - Formats and prints the message within a styled panel using the `rich` library.
118
+ """
119
+ if print_result:
120
+ status_icon = "✅" if (summary['failed'] + summary['errors']) == 0 else "❌"
121
+ msg = f"Test suite completed in {summary['total_time']:.2f} seconds"
122
+ self.__rich_console.print(
123
+ Panel(
124
+ msg,
125
+ border_style="blue",
126
+ title=f"{status_icon} Test Suite Finished",
127
+ title_align='left',
128
+ width=self.__panel_width,
129
+ padding=(0, 1)
130
+ )
131
+ )
132
+ self.__rich_console.line(1)
133
+
134
+ def executePanel(
135
+ self,
136
+ *,
137
+ print_result: bool,
138
+ flatten_test_suite: list,
139
+ callable: callable
140
+ ):
141
+ """
142
+ Executes a test suite panel with optional live console output.
143
+
144
+ Parameters
145
+ ----------
146
+ print_result : bool
147
+ If True, displays a running message panel while executing the test suite.
148
+ flatten_test_suite : list
149
+ The flattened list of test cases or test suite items to be executed.
150
+ callable : callable
151
+ The function or method to execute the test suite.
152
+
153
+ Returns
154
+ -------
155
+ Any
156
+ The result returned by the provided callable after execution.
157
+
158
+ Notes
159
+ -----
160
+ This method manages the display of a running message panel using the Rich library,
161
+ depending on whether debugging is enabled in the test suite and whether results should be printed.
162
+ If debugging or dump calls are detected in the test code, a live console is used to display
163
+ real-time updates. Otherwise, a static panel is shown before executing the test suite.
164
+ """
165
+
166
+ # Determines if the live console should be used based on the presence of debug or dump calls in the test code.
167
+ use_debugger = self.__withDebugger(
168
+ flatten_test_suite=flatten_test_suite
169
+ )
170
+
171
+ # Prepare the running message based on whether live console is enabled
172
+ if print_result:
173
+ message = "[bold yellow]⏳ Running tests...[/bold yellow]\n"
174
+ message += "[dim]This may take a few seconds. Please wait...[/dim]" if use_debugger else "[dim]Please wait, results will appear below...[/dim]"
175
+
176
+ # Panel for running message
177
+ running_panel = Panel(
178
+ message,
179
+ border_style="yellow",
180
+ title="In Progress",
181
+ title_align="left",
182
+ width=self.__panel_width,
183
+ padding=(1, 2)
184
+ )
185
+
186
+ # Elegant "running" message using Rich Panel
187
+ if use_debugger:
188
+ with Live(running_panel, console=self.__rich_console, refresh_per_second=4, transient=True):
189
+ return callable()
190
+ else:
191
+ self.__rich_console.print(running_panel)
192
+ return callable()
193
+ else:
194
+ # If not printing results, run the suite without live console
195
+ return callable()
196
+
197
+ def linkWebReport(
198
+ self,
199
+ path: str
200
+ ):
201
+ """
202
+ Prints an elegant invitation to view the test results, with an underlined path.
203
+
204
+ Parameters
205
+ ----------
206
+ path : str or Path
207
+ The path to the test results report.
208
+ """
209
+ invite_text = Text("Test results saved. ", style="green")
210
+ invite_text.append("View report: ", style="bold green")
211
+ invite_text.append(str(path), style="underline blue")
212
+ self.__rich_console.print(invite_text)
213
+
214
+ def summaryTable(
215
+ self,
216
+ summary: Dict[str, Any]
217
+ ) -> None:
218
+ """
219
+ Prints a summary table of test results using the Rich library.
220
+
221
+ Parameters
222
+ ----------
223
+ summary : dict
224
+ Dictionary with the test summary data. Must contain the following keys:
225
+ total_tests : int
226
+ Total number of tests executed.
227
+ passed : int
228
+ Number of tests that passed.
229
+ failed : int
230
+ Number of tests that failed.
231
+ errors : int
232
+ Number of tests that had errors.
233
+ skipped : int
234
+ Number of tests that were skipped.
235
+ total_time : float
236
+ Total duration of the test execution in seconds.
237
+ success_rate : float
238
+ Percentage of tests that passed.
239
+
240
+ Returns
241
+ -------
242
+ None
243
+ """
244
+ table = Table(
245
+ show_header=True,
246
+ header_style="bold white",
247
+ width=self.__panel_width,
248
+ border_style="blue"
249
+ )
250
+ table.add_column("Total", justify="center")
251
+ table.add_column("Passed", justify="center")
252
+ table.add_column("Failed", justify="center")
253
+ table.add_column("Errors", justify="center")
254
+ table.add_column("Skipped", justify="center")
255
+ table.add_column("Duration", justify="center")
256
+ table.add_column("Success Rate", justify="center")
257
+ table.add_row(
258
+ str(summary["total_tests"]),
259
+ str(summary["passed"]),
260
+ str(summary["failed"]),
261
+ str(summary["errors"]),
262
+ str(summary["skipped"]),
263
+ f"{summary['total_time']:.2f}s",
264
+ f"{summary['success_rate']:.2f}%"
265
+ )
266
+ self.__rich_console.print(table)
267
+ self.__rich_console.line(1)
268
+
269
+ def displayResults(
270
+ self,
271
+ *,
272
+ print_result: bool,
273
+ summary: Dict[str, Any]
274
+ ) -> None:
275
+ """
276
+ Display the results of the test execution, including a summary table and detailed
277
+ information about failed or errored tests grouped by their test classes.
278
+
279
+ Parameters
280
+ ----------
281
+ summary : dict
282
+ Dictionary containing the summary of the test execution, including test details,
283
+ statuses, and execution times.
284
+
285
+ Notes
286
+ -----
287
+ - Prints a summary table of the test results.
288
+ - Groups failed and errored tests by their test class and displays them in a structured
289
+ format using panels.
290
+ - For each failed or errored test, displays the traceback in a syntax-highlighted panel
291
+ with additional metadata such as the test method name and execution time.
292
+ - Uses different icons and border colors to distinguish between failed and errored tests.
293
+ - Calls a finishing message method after displaying all results.
294
+ """
295
+
296
+ # If not printing results, return early
297
+ if not print_result:
298
+ return
299
+
300
+ # Print summary table
301
+ self.summaryTable(summary)
302
+
303
+ # Group failures and errors by test class
304
+ failures_by_class = {}
305
+ for test in summary["test_details"]:
306
+ if test["status"] in (TestStatus.FAILED.name, TestStatus.ERRORED.name):
307
+ class_name = test["class"]
308
+ if class_name not in failures_by_class:
309
+ failures_by_class[class_name] = []
310
+ failures_by_class[class_name].append(test)
311
+
312
+ # Display grouped failures
313
+ for class_name, tests in failures_by_class.items():
314
+
315
+ class_panel = Panel.fit(f"[bold]{class_name}[/bold]", border_style="red", padding=(0, 2))
316
+ self.__rich_console.print(class_panel)
317
+
318
+ for test in tests:
319
+ traceback_str = self.__sanitizeTraceback(test['file_path'], test['traceback'])
320
+ syntax = Syntax(
321
+ traceback_str,
322
+ lexer="python",
323
+ line_numbers=False,
324
+ background_color="default",
325
+ word_wrap=True,
326
+ theme="monokai"
327
+ )
328
+
329
+ icon = "❌" if test["status"] == TestStatus.FAILED.name else "💥"
330
+ border_color = "yellow" if test["status"] == TestStatus.FAILED.name else "red"
331
+
332
+ # Ensure execution time is never zero for display purposes
333
+ if not test['execution_time'] or test['execution_time'] == 0:
334
+ test['execution_time'] = 0.001
335
+
336
+ panel = Panel(
337
+ syntax,
338
+ title=f"{icon} {test['method']}",
339
+ subtitle=f"Duration: {test['execution_time']:.3f}s",
340
+ border_style=border_color,
341
+ title_align="left",
342
+ padding=(1, 1),
343
+ subtitle_align="right",
344
+ width=self.__panel_width
345
+ )
346
+ self.__rich_console.print(panel)
347
+ self.__rich_console.line(1)
348
+
349
+ def __withDebugger(
350
+ self,
351
+ flatten_test_suite: list
352
+ ) -> bool:
353
+ """
354
+ Checks if any test case in the provided flattened test suite uses debugging or dumping methods.
355
+ This method inspects the source code of each test case to determine if it contains
356
+ calls to 'self.dd' or 'self.dump' that are not commented out. If such a call is found,
357
+ the method returns True, indicating that a debugger or dump method is used.
358
+
359
+ Parameters
360
+ ----------
361
+ flatten_test_suite : list
362
+ A list of test case instances to inspect.
363
+
364
+ Returns
365
+ -------
366
+ bool
367
+ True if any test case uses 'self.dd' or 'self.dump' outside of comments,
368
+ False otherwise.
369
+
370
+ Notes
371
+ -----
372
+ Lines that contain the keywords but are commented out (i.e., start with '#') are ignored.
373
+ If an exception occurs during the inspection process, the method conservatively returns False.
374
+ """
375
+
376
+ try:
377
+ for test_case in flatten_test_suite:
378
+ source = ReflectionInstance(test_case).getSourceCode()
379
+ for line in source.splitlines():
380
+ stripped = line.strip()
381
+ # Ignore commented lines
382
+ if stripped.startswith('#') or re.match(r'^\s*#', line):
383
+ continue
384
+ # Check for any debug keyword in the line
385
+ if any(keyword in line for keyword in self.__debbug_keywords):
386
+ return False
387
+ return True
388
+ except Exception:
389
+ # If any error occurs, assume debugger is not used
390
+ return False
391
+
392
+ def __sanitizeTraceback(
393
+ self,
394
+ test_path: str,
395
+ traceback_test: str
396
+ ) -> str:
397
+ """
398
+ Sanitize a traceback string to extract and display the most relevant parts
399
+ related to a specific test file.
400
+
401
+ Parameters
402
+ ----------
403
+ test_path : str
404
+ The file path of the test file being analyzed.
405
+ traceback_test : str
406
+ The full traceback string to be sanitized.
407
+
408
+ Returns
409
+ -------
410
+ str
411
+ A sanitized traceback string containing only the relevant parts related to the test file.
412
+ If no relevant parts are found, the full traceback is returned.
413
+ If the traceback is empty, a default message "No traceback available for this test." is returned.
414
+ """
415
+
416
+ # Check if the traceback is empty
417
+ if not traceback_test:
418
+ return "No traceback available for this test."
419
+
420
+ # Try to extract the test file name
421
+ file_match = re.search(r'([^/\\]+)\.py', test_path)
422
+ file_name = file_match.group(1) if file_match else None
423
+
424
+ # If we can't find the file name, return the full traceback
425
+ if not file_name:
426
+ return traceback_test
427
+
428
+ # Process traceback to show most relevant parts
429
+ lines = traceback_test.splitlines()
430
+ relevant_lines = []
431
+ found_test_file = False if file_name in traceback_test else True
432
+
433
+ # Iterate through the traceback lines to find relevant parts
434
+ for line in lines:
435
+ if file_name in line and not found_test_file:
436
+ found_test_file = True
437
+ if found_test_file:
438
+ if 'File' in line:
439
+ relevant_lines.append(line.strip())
440
+ elif line.strip() != '':
441
+ relevant_lines.append(line)
442
+
443
+ # If we didn't find the test file, return the full traceback
444
+ if not relevant_lines:
445
+ return traceback_test
446
+
447
+ # Remove any lines that are not relevant to the test file
448
+ return str('\n').join(relevant_lines)