eazytester 26.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eazytester-26.5.0/PKG-INFO +8 -0
- eazytester-26.5.0/eazytester/__init__.py +0 -0
- eazytester-26.5.0/eazytester/engine.py +140 -0
- eazytester-26.5.0/eazytester.egg-info/PKG-INFO +8 -0
- eazytester-26.5.0/eazytester.egg-info/SOURCES.txt +7 -0
- eazytester-26.5.0/eazytester.egg-info/dependency_links.txt +1 -0
- eazytester-26.5.0/eazytester.egg-info/top_level.txt +1 -0
- eazytester-26.5.0/pyproject.toml +35 -0
- eazytester-26.5.0/setup.cfg +4 -0
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: eazytester
|
|
3
|
+
Version: 26.5.0
|
|
4
|
+
Summary: Utility for running unit tests and logging results in a structured format.
|
|
5
|
+
Author-email: Aric Kraft <kraft.aric@gmail.com>
|
|
6
|
+
Classifier: Programming Language :: Python :: 3
|
|
7
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
8
|
+
Classifier: Operating System :: OS Independent
|
|
File without changes
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from io import StringIO
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Optional, Type
|
|
6
|
+
from unittest import TestCase, TestSuite, TextTestRunner
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Engine:
|
|
10
|
+
"""Robust test runner that executes one or more test classes and logs results."""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self, output_file: Optional[str] = None, open_log: bool = True
|
|
14
|
+
) -> None:
|
|
15
|
+
"""
|
|
16
|
+
Initialize the test engine.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
output_file: Path to the log file. Defaults to 'test_results.log'
|
|
20
|
+
open_log: Whether to automatically open the log file after writing.
|
|
21
|
+
Defaults to True.
|
|
22
|
+
"""
|
|
23
|
+
self.output_file = output_file or "test_results.log"
|
|
24
|
+
self.open_log = open_log
|
|
25
|
+
self._test_results = {}
|
|
26
|
+
self._test_suite = None
|
|
27
|
+
|
|
28
|
+
def add_tests(self, *test_classes: Type[TestCase]) -> "Engine":
|
|
29
|
+
"""
|
|
30
|
+
Add one or more test classes to run.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
*test_classes: One or more unittest.TestCase subclasses
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Engine instance for method chaining
|
|
37
|
+
"""
|
|
38
|
+
if self._test_suite is None:
|
|
39
|
+
self._test_suite = TestSuite()
|
|
40
|
+
|
|
41
|
+
for test_class in test_classes:
|
|
42
|
+
if not issubclass(test_class, TestCase):
|
|
43
|
+
raise ValueError(
|
|
44
|
+
f"{test_class} must be a subclass of unittest.TestCase"
|
|
45
|
+
)
|
|
46
|
+
self._test_suite.addTests(
|
|
47
|
+
TestSuite().loadTestsFromTestCase(test_class)
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
return self
|
|
51
|
+
|
|
52
|
+
def run_tests(self) -> bool:
|
|
53
|
+
"""
|
|
54
|
+
Run all added test classes and log results.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
True if all tests passed, False otherwise
|
|
58
|
+
"""
|
|
59
|
+
if self._test_suite is None or self._test_suite.countTestCases() == 0:
|
|
60
|
+
raise RuntimeError("No test classes added. Use add_tests() first.")
|
|
61
|
+
|
|
62
|
+
# Capture test output
|
|
63
|
+
stream = StringIO()
|
|
64
|
+
runner = TextTestRunner(stream=stream, verbosity=2)
|
|
65
|
+
|
|
66
|
+
print(f"Running {self._test_suite.countTestCases()} tests...")
|
|
67
|
+
result = runner.run(self._test_suite)
|
|
68
|
+
|
|
69
|
+
# Store results
|
|
70
|
+
self._test_results = {
|
|
71
|
+
"total_tests": result.testsRun,
|
|
72
|
+
"failures": len(result.failures),
|
|
73
|
+
"errors": len(result.errors),
|
|
74
|
+
"skipped": len(result.skipped),
|
|
75
|
+
"success": result.wasSuccessful(),
|
|
76
|
+
"output": stream.getvalue(),
|
|
77
|
+
"failures_detail": result.failures,
|
|
78
|
+
"errors_detail": result.errors,
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
# Log to file
|
|
82
|
+
self._write_log_file()
|
|
83
|
+
|
|
84
|
+
return result.wasSuccessful()
|
|
85
|
+
|
|
86
|
+
def _write_log_file(self) -> None:
|
|
87
|
+
"""Write test results to log file."""
|
|
88
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
89
|
+
|
|
90
|
+
log_content = f"Test Results - {timestamp}\n"
|
|
91
|
+
log_content += "=" * 60 + "\n\n"
|
|
92
|
+
|
|
93
|
+
# Summary
|
|
94
|
+
log_content += "SUMMARY\n"
|
|
95
|
+
log_content += "-" * 60 + "\n"
|
|
96
|
+
log_content += f"Total Tests: {self._test_results['total_tests']}\n"
|
|
97
|
+
log_content += f"Passed: {self._test_results['total_tests'] - self._test_results['failures'] - self._test_results['errors']}\n"
|
|
98
|
+
log_content += f"Failures: {self._test_results['failures']}\n"
|
|
99
|
+
log_content += f"Errors: {self._test_results['errors']}\n"
|
|
100
|
+
log_content += f"Skipped: {self._test_results['skipped']}\n"
|
|
101
|
+
log_content += f"Status: {'✓ PASSED' if self._test_results['success'] else '✗ FAILED'}\n\n"
|
|
102
|
+
|
|
103
|
+
# Detailed output
|
|
104
|
+
log_content += "DETAILED OUTPUT\n"
|
|
105
|
+
log_content += "-" * 60 + "\n"
|
|
106
|
+
log_content += self._test_results["output"] + "\n"
|
|
107
|
+
|
|
108
|
+
# Failures detail
|
|
109
|
+
if self._test_results["failures_detail"]:
|
|
110
|
+
log_content += "\nFAILURES\n"
|
|
111
|
+
log_content += "-" * 60 + "\n"
|
|
112
|
+
for test, traceback in self._test_results["failures_detail"]:
|
|
113
|
+
log_content += f"\n{test}:\n{traceback}\n"
|
|
114
|
+
|
|
115
|
+
# Errors detail
|
|
116
|
+
if self._test_results["errors_detail"]:
|
|
117
|
+
log_content += "\nERRORS\n"
|
|
118
|
+
log_content += "-" * 60 + "\n"
|
|
119
|
+
for test, traceback in self._test_results["errors_detail"]:
|
|
120
|
+
log_content += f"\n{test}:\n{traceback}\n"
|
|
121
|
+
|
|
122
|
+
# Ensure directory exists
|
|
123
|
+
Path(self.output_file).parent.mkdir(parents=True, exist_ok=True)
|
|
124
|
+
|
|
125
|
+
with open(self.output_file, "w") as f:
|
|
126
|
+
f.write(log_content)
|
|
127
|
+
|
|
128
|
+
print(f"Test results logged to: {self.output_file}")
|
|
129
|
+
|
|
130
|
+
if self.open_log:
|
|
131
|
+
os.startfile(self.output_file)
|
|
132
|
+
|
|
133
|
+
def get_results(self) -> dict:
|
|
134
|
+
"""
|
|
135
|
+
Get test results summary.
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
Dictionary containing test results
|
|
139
|
+
"""
|
|
140
|
+
return self._test_results.copy()
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: eazytester
|
|
3
|
+
Version: 26.5.0
|
|
4
|
+
Summary: Utility for running unit tests and logging results in a structured format.
|
|
5
|
+
Author-email: Aric Kraft <kraft.aric@gmail.com>
|
|
6
|
+
Classifier: Programming Language :: Python :: 3
|
|
7
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
8
|
+
Classifier: Operating System :: OS Independent
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
eazytester
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
[project]
|
|
7
|
+
name = "eazytester"
|
|
8
|
+
version = "26.5.0"
|
|
9
|
+
description = "Utility for running unit tests and logging results in a structured format."
|
|
10
|
+
authors = [
|
|
11
|
+
{name="Aric Kraft", email="kraft.aric@gmail.com"},
|
|
12
|
+
]
|
|
13
|
+
dependencies = []
|
|
14
|
+
classifiers=[
|
|
15
|
+
"Programming Language :: Python :: 3",
|
|
16
|
+
"License :: OSI Approved :: MIT License",
|
|
17
|
+
"Operating System :: OS Independent",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
[tool.setuptools]
|
|
21
|
+
include-package-data = true
|
|
22
|
+
|
|
23
|
+
[tool.setuptools.packages.find]
|
|
24
|
+
include = ["eazytester.*", "eazytester"]
|
|
25
|
+
exclude=["*.tests*"]
|
|
26
|
+
namespaces = false
|
|
27
|
+
|
|
28
|
+
[tool.setuptools.exclude-package-data]
|
|
29
|
+
"*" = ["*.c", "*.h"]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
|