pyforge-test 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyforge_test/__init__.py +23 -0
- pyforge_test/__main__.py +8 -0
- pyforge_test/core/__init__.py +8 -0
- pyforge_test/core/collector.py +253 -0
- pyforge_test/core/main.py +227 -0
- pyforge_test/core/py.typed +0 -0
- pyforge_test/core/registry.py +45 -0
- pyforge_test/core/reporter.py +373 -0
- pyforge_test/core/runner.py +222 -0
- pyforge_test-0.1.0.dist-info/METADATA +188 -0
- pyforge_test-0.1.0.dist-info/RECORD +15 -0
- pyforge_test-0.1.0.dist-info/WHEEL +5 -0
- pyforge_test-0.1.0.dist-info/entry_points.txt +2 -0
- pyforge_test-0.1.0.dist-info/licenses/LICENSE +21 -0
- pyforge_test-0.1.0.dist-info/top_level.txt +1 -0
pyforge_test/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""PyForge testing framework.
|
|
2
|
+
|
|
3
|
+
A lightweight unit testing framework with simple decorator-based tests.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from .core.collector import (
|
|
7
|
+
BUILTIN_MARKERS,
|
|
8
|
+
test,
|
|
9
|
+
test_marker,
|
|
10
|
+
test_parameterized,
|
|
11
|
+
test_skip,
|
|
12
|
+
test_skipif,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
__version__ = "0.1.0"
|
|
16
|
+
__all__ = [
|
|
17
|
+
"BUILTIN_MARKERS",
|
|
18
|
+
"test",
|
|
19
|
+
"test_marker",
|
|
20
|
+
"test_parameterized",
|
|
21
|
+
"test_skip",
|
|
22
|
+
"test_skipif",
|
|
23
|
+
]
|
pyforge_test/__main__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
"""PyForge core testing components."""
|
|
2
|
+
|
|
3
|
+
from .collector import test, test_parameterized, test_skip, test_skipif
|
|
4
|
+
from .registry import TESTS
|
|
5
|
+
from .reporter import report
|
|
6
|
+
from .runner import execute
|
|
7
|
+
|
|
8
|
+
__all__ = ["TESTS", "execute", "report", "test", "test_parameterized", "test_skip", "test_skipif"]
|
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from .registry import TESTS
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# The test function is a decorator that collects test functions
|
|
8
|
+
# and adds them to the TESTS list.
|
|
9
|
+
def test(function: Callable[..., None]) -> Callable[..., None]:
|
|
10
|
+
"""Collects the test functions and adds them to the TESTS list.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
function (Callable[..., None]): The test function to be collected.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
Callable[..., None]: The original test function, unmodified.
|
|
17
|
+
"""
|
|
18
|
+
try:
|
|
19
|
+
# check if the function is already in the TESTS list to avoid duplicates
|
|
20
|
+
if any(test_dict["function"] == function for test_dict in TESTS):
|
|
21
|
+
raise ValueError(f"Test function '{function.__name__}' is already collected.")
|
|
22
|
+
# check if the function name starts with "test_"
|
|
23
|
+
if not function.__name__.startswith("test_"):
|
|
24
|
+
raise ValueError(f"Test function '{function.__name__}' must start with 'test_'.")
|
|
25
|
+
# check if the function is valid or not (should be callable and should not have parameters)
|
|
26
|
+
if not callable(function):
|
|
27
|
+
raise ValueError(f"Test function '{function.__name__}' must be callable.")
|
|
28
|
+
elif function.__code__.co_argcount > 0:
|
|
29
|
+
raise ValueError(f"Test function '{function.__name__}' must not have parameters.")
|
|
30
|
+
# Add the function to TESTS list with the file name and line number for better debugging
|
|
31
|
+
TESTS.append(
|
|
32
|
+
{
|
|
33
|
+
"function": function,
|
|
34
|
+
"filename": function.__code__.co_filename,
|
|
35
|
+
"line_number": function.__code__.co_firstlineno,
|
|
36
|
+
"skip_info": None,
|
|
37
|
+
"marker": None,
|
|
38
|
+
}
|
|
39
|
+
)
|
|
40
|
+
return function
|
|
41
|
+
except Exception as e:
|
|
42
|
+
raise RuntimeError(
|
|
43
|
+
f"An error occurred while collecting test function '{function.__name__}': {e}"
|
|
44
|
+
) from e
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# test_cases will be a list of tuples, where each tuple contains expression and expected result.
|
|
48
|
+
def test_parameterized(
|
|
49
|
+
test_cases: list[tuple[Any, ...]],
|
|
50
|
+
) -> Callable[[Callable[..., None]], Callable[..., None]]:
|
|
51
|
+
"""A decorator to parameterize test functions with multiple test cases.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
test_cases (list[tuple[Any, ...]]): A list of tuples, where each tuple
|
|
55
|
+
contains the parameters to be passed to the test function.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
Callable[[Callable[..., None]], Callable[..., None]]:
|
|
59
|
+
A decorator that can be applied to a test function.
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
def decorator(function: Callable[..., None]) -> Callable[..., None]:
|
|
63
|
+
"""The actual decorator that wraps the test function.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
function (Callable[..., None]): The test function to be decorated.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Callable[..., None]: The wrapped test function that will run with all test cases.
|
|
70
|
+
"""
|
|
71
|
+
try:
|
|
72
|
+
# Check if the function is already in the TESTS list to avoid duplicates
|
|
73
|
+
if any(test_dict["function"] == function for test_dict in TESTS):
|
|
74
|
+
raise ValueError(f"Test function '{function.__name__}' is already collected.")
|
|
75
|
+
# Check if the function name starts with "test_"
|
|
76
|
+
if not function.__name__.startswith("test_"):
|
|
77
|
+
raise ValueError(f"Test function '{function.__name__}' must start with 'test_'.")
|
|
78
|
+
# Check if the function is valid or not (should be callable)
|
|
79
|
+
if not callable(function):
|
|
80
|
+
raise ValueError(f"Test function '{function.__name__}' must be callable.")
|
|
81
|
+
# Add the parameterized test cases to TESTS list with the file name
|
|
82
|
+
# and line number for better debugging
|
|
83
|
+
for case_index, case in enumerate(test_cases):
|
|
84
|
+
|
|
85
|
+
def make_test(c: tuple[Any, ...] = case) -> None:
|
|
86
|
+
function(*c)
|
|
87
|
+
|
|
88
|
+
make_test.__name__ = f"{function.__name__}_{case_index}"
|
|
89
|
+
|
|
90
|
+
TESTS.append(
|
|
91
|
+
{
|
|
92
|
+
"function": make_test,
|
|
93
|
+
"filename": function.__code__.co_filename,
|
|
94
|
+
"line_number": function.__code__.co_firstlineno,
|
|
95
|
+
"skip_info": None,
|
|
96
|
+
"marker": None,
|
|
97
|
+
}
|
|
98
|
+
)
|
|
99
|
+
return function
|
|
100
|
+
except Exception as e:
|
|
101
|
+
raise RuntimeError(
|
|
102
|
+
f"An error occurred while collecting parameterized test function "
|
|
103
|
+
f"'{function.__name__}': {e}"
|
|
104
|
+
) from e
|
|
105
|
+
|
|
106
|
+
return decorator
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
# The test_skipif function is a decorator that allows skipping a test function based on a condition.
|
|
110
|
+
def test_skipif(
|
|
111
|
+
condition: bool, reason: str
|
|
112
|
+
) -> Callable[[Callable[..., None]], Callable[..., None]]:
|
|
113
|
+
"""A decorator to skip a test function if a certain condition is met.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
condition (bool): The condition that determines whether the test should be skipped.
|
|
117
|
+
reason (str): The reason for skipping the test, which will be displayed in the report.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Callable[[Callable[..., None]], Callable[..., None]]:
|
|
121
|
+
A decorator that can be applied to a test function.
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
def decorator(function: Callable[..., None]) -> Callable[..., None]:
|
|
125
|
+
"""The actual decorator that wraps the test function.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
function (Callable[..., None]): The test function to be decorated.
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Callable[..., None]: The wrapped test function that will
|
|
132
|
+
be skipped if the condition is met.
|
|
133
|
+
"""
|
|
134
|
+
try:
|
|
135
|
+
# Check if the function is already in the TESTS list to avoid duplicates
|
|
136
|
+
if any(test_dict["function"] == function for test_dict in TESTS):
|
|
137
|
+
raise ValueError(f"Test function '{function.__name__}' is already collected.")
|
|
138
|
+
# Check if the function name starts with "test_"
|
|
139
|
+
if not function.__name__.startswith("test_"):
|
|
140
|
+
raise ValueError(f"Test function '{function.__name__}' must start with 'test_'.")
|
|
141
|
+
# Check if the function is valid or not (should be callable)
|
|
142
|
+
if not callable(function):
|
|
143
|
+
raise ValueError(f"Test function '{function.__name__}' must be callable.")
|
|
144
|
+
# Add the skip information to TESTS list with the file name
|
|
145
|
+
# and line number for better debugging
|
|
146
|
+
TESTS.append(
|
|
147
|
+
{
|
|
148
|
+
"function": function,
|
|
149
|
+
"filename": function.__code__.co_filename,
|
|
150
|
+
"line_number": function.__code__.co_firstlineno,
|
|
151
|
+
"skip_info": {"skip": condition, "reason": reason},
|
|
152
|
+
"marker": None,
|
|
153
|
+
}
|
|
154
|
+
)
|
|
155
|
+
return function
|
|
156
|
+
except Exception as e:
|
|
157
|
+
raise RuntimeError(
|
|
158
|
+
f"An error occurred while collecting skipif test function "
|
|
159
|
+
f"'{function.__name__}': {e}"
|
|
160
|
+
) from e
|
|
161
|
+
|
|
162
|
+
return decorator
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
# The test_skip function is a decorator that allows skipping a test function unconditionally.
|
|
166
|
+
def test_skip(reason: str) -> Callable[[Callable[..., None]], Callable[..., None]]:
|
|
167
|
+
"""A decorator to skip a test function unconditionally.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
reason (str): The reason for skipping the test, which will be displayed in the report.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
Callable[[Callable[..., None]], Callable[..., None]]:
|
|
174
|
+
A decorator that can be applied to a test function.
|
|
175
|
+
"""
|
|
176
|
+
|
|
177
|
+
def decorator(function: Callable[..., None]) -> Callable[..., None]:
|
|
178
|
+
"""The actual decorator that wraps the test function.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
function (Callable[..., None]): The test function to be decorated.
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
Callable[..., None]: The wrapped test function that will be skipped unconditionally.
|
|
185
|
+
"""
|
|
186
|
+
return test_skipif(True, reason)(function)
|
|
187
|
+
|
|
188
|
+
return decorator
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
# Built-in markers
|
|
192
|
+
BUILTIN_MARKERS = {
|
|
193
|
+
"slow": "Mark test as slow-running",
|
|
194
|
+
"integration": "Mark test as an integration test (requires external resources)",
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
# The test_marker function is a decorator that allows marking a test function with a custom marker.
|
|
199
|
+
def test_marker(marker: str) -> Callable[[Callable[..., None]], Callable[..., None]]:
|
|
200
|
+
"""A decorator to apply a built-in marker to a test function.
|
|
201
|
+
|
|
202
|
+
Usage:
|
|
203
|
+
@test_marker("integration")
|
|
204
|
+
@test
|
|
205
|
+
def test_database_connection() -> None:
|
|
206
|
+
pass
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
marker (str): The marker to be applied to the test function.
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
Callable[[Callable[..., None]], Callable[..., None]]:
|
|
213
|
+
A decorator that can be applied to a test function.
|
|
214
|
+
|
|
215
|
+
Raises:
|
|
216
|
+
ValueError: If the marker is not a built-in marker.
|
|
217
|
+
"""
|
|
218
|
+
|
|
219
|
+
def decorator(function: Callable[..., None]) -> Callable[..., None]:
|
|
220
|
+
"""The actual decorator that applies the marker to the test function.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
function (Callable[..., None]): The test function to be marked.
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
Callable[..., None]: The test function with the applied marker.
|
|
227
|
+
|
|
228
|
+
Raises:
|
|
229
|
+
ValueError: If the test function is not collected before applying the marker.
|
|
230
|
+
RuntimeError: If an error occurs while applying the marker.
|
|
231
|
+
"""
|
|
232
|
+
try:
|
|
233
|
+
# Check if the marker is a built-in marker
|
|
234
|
+
if marker not in BUILTIN_MARKERS:
|
|
235
|
+
raise ValueError(f"Marker '{marker}' is not a built-in marker.")
|
|
236
|
+
# Check if the marker is applied to collected test function
|
|
237
|
+
if not any(test_dict["function"] == function for test_dict in TESTS):
|
|
238
|
+
raise ValueError(
|
|
239
|
+
f"Test function '{function.__name__}' must be collected before applying marker."
|
|
240
|
+
)
|
|
241
|
+
# Update the marker information in TESTS list
|
|
242
|
+
for test_dict in TESTS:
|
|
243
|
+
if test_dict["function"] == function:
|
|
244
|
+
test_dict["marker"] = marker
|
|
245
|
+
break
|
|
246
|
+
return function
|
|
247
|
+
except Exception as e:
|
|
248
|
+
raise RuntimeError(
|
|
249
|
+
f"An error occurred while applying marker '{marker}' to test function "
|
|
250
|
+
f"'{function.__name__}': {e}"
|
|
251
|
+
) from e
|
|
252
|
+
|
|
253
|
+
return decorator
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
"""PyForge test runner entry point.
|
|
2
|
+
|
|
3
|
+
Executes collected tests and displays results.
|
|
4
|
+
Discovers and loads all test files from the tests directory.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import argparse
|
|
8
|
+
import contextlib
|
|
9
|
+
import importlib.util
|
|
10
|
+
import sys
|
|
11
|
+
import traceback
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
from .reporter import VERBOSITY_NORMAL, VERBOSITY_QUIET, VERBOSITY_VERBOSE, report
|
|
15
|
+
from .runner import execute
|
|
16
|
+
|
|
17
|
+
# Set up path for tests package import
|
|
18
|
+
_pyforge_root: Path = Path(__file__).parent.parent.parent.parent # project root
|
|
19
|
+
sys.path.insert(0, str(_pyforge_root))
|
|
20
|
+
with contextlib.suppress(ImportError):
|
|
21
|
+
import tests # pyright: ignore[reportUnusedImport] # noqa: F401
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _find_project_root() -> Path:
|
|
25
|
+
"""Find the project root directory.
|
|
26
|
+
|
|
27
|
+
First checks current working directory, then the directory where
|
|
28
|
+
pyforge is installed.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
Path: The project root directory containing tests/ subdirectory.
|
|
32
|
+
|
|
33
|
+
Raises:
|
|
34
|
+
FileNotFoundError: If no project structure is found.
|
|
35
|
+
"""
|
|
36
|
+
# Check current working directory first (for user projects)
|
|
37
|
+
cwd: Path = Path.cwd()
|
|
38
|
+
if (cwd / "tests").exists() and (cwd / "tests").is_dir():
|
|
39
|
+
return cwd
|
|
40
|
+
|
|
41
|
+
# Check one level up from CWD
|
|
42
|
+
parent: Path = cwd.parent
|
|
43
|
+
if (parent / "tests").exists() and (parent / "tests").is_dir():
|
|
44
|
+
return parent
|
|
45
|
+
|
|
46
|
+
# Fall back to pyforge installation directory
|
|
47
|
+
pyforge_root: Path = Path(__file__).parent.parent.parent.parent
|
|
48
|
+
if (pyforge_root / "tests").exists() and (pyforge_root / "tests").is_dir():
|
|
49
|
+
return pyforge_root
|
|
50
|
+
|
|
51
|
+
raise FileNotFoundError(
|
|
52
|
+
"No project structure found. Expected 'tests/' directory in "
|
|
53
|
+
f"current directory ({cwd}), parent directory ({parent}), "
|
|
54
|
+
f"or pyforge installation ({pyforge_root})"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _setup_src_path(project_root: Path) -> None:
|
|
59
|
+
"""Add src directory to sys.path if it exists.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
project_root: The root directory of the project.
|
|
63
|
+
"""
|
|
64
|
+
src_dir: Path = project_root / "src"
|
|
65
|
+
if src_dir.exists() and src_dir.is_dir():
|
|
66
|
+
src_path: str = str(src_dir)
|
|
67
|
+
if src_path not in sys.path:
|
|
68
|
+
sys.path.insert(0, src_path)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def discover_and_load_tests(project_root: Path) -> int:
|
|
72
|
+
"""Discover and load all test modules from the tests directory.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
project_root: The root directory containing the tests/ folder.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
int: Number of test modules successfully loaded.
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
FileNotFoundError: If the tests directory does not exist.
|
|
82
|
+
"""
|
|
83
|
+
tests_dir: Path = project_root / "tests"
|
|
84
|
+
|
|
85
|
+
if not tests_dir.exists() or not tests_dir.is_dir():
|
|
86
|
+
raise FileNotFoundError(f"Tests directory '{tests_dir}' does not exist.")
|
|
87
|
+
|
|
88
|
+
# Find all test files (anything matching test*.py pattern)
|
|
89
|
+
test_files: list[Path] = sorted(tests_dir.glob("test*.py"))
|
|
90
|
+
|
|
91
|
+
if not test_files:
|
|
92
|
+
print(f"No test modules found in '{tests_dir}'")
|
|
93
|
+
return 0
|
|
94
|
+
|
|
95
|
+
loaded_count: int = 0
|
|
96
|
+
print(f"Discovering test modules in '{tests_dir}'...")
|
|
97
|
+
|
|
98
|
+
for test_file in test_files:
|
|
99
|
+
try:
|
|
100
|
+
# Load the module using importlib
|
|
101
|
+
spec = importlib.util.spec_from_file_location(test_file.stem, test_file)
|
|
102
|
+
if spec is None or spec.loader is None:
|
|
103
|
+
raise ImportError(f"Could not create module spec for {test_file.name}")
|
|
104
|
+
|
|
105
|
+
module = importlib.util.module_from_spec(spec)
|
|
106
|
+
sys.modules[test_file.stem] = module
|
|
107
|
+
spec.loader.exec_module(module)
|
|
108
|
+
|
|
109
|
+
# Verify that the module has a __name__ attribute (it should)
|
|
110
|
+
if not hasattr(module, "__name__"):
|
|
111
|
+
raise ImportError(f"Module {test_file.name} does not have a __name__ attribute")
|
|
112
|
+
|
|
113
|
+
print(f"Loaded: {test_file.name}")
|
|
114
|
+
loaded_count += 1
|
|
115
|
+
|
|
116
|
+
except Exception as e:
|
|
117
|
+
print(f"✗ Error loading {test_file.name}: {type(e).__name__}: {e}")
|
|
118
|
+
|
|
119
|
+
return loaded_count
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def main() -> int:
|
|
123
|
+
"""Main entry point for PyForge test runner.
|
|
124
|
+
|
|
125
|
+
Supports verbosity levels, fail-fast option, and selective running:
|
|
126
|
+
- Default: Normal output
|
|
127
|
+
- -q/--quiet: Only show summary and failures
|
|
128
|
+
- -v/--verbose: Show detailed info with tracebacks
|
|
129
|
+
- --fail-fast: Stop on first failure
|
|
130
|
+
- -k: Substring filtering on test names
|
|
131
|
+
- FILES: File paths to run tests from (positional arguments)
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
int: Exit code (0 for success, 1 for failure).
|
|
135
|
+
"""
|
|
136
|
+
# Parse CLI arguments
|
|
137
|
+
parser = argparse.ArgumentParser(
|
|
138
|
+
description="PyForge - Lightweight Python testing framework",
|
|
139
|
+
epilog="Examples:\n"
|
|
140
|
+
" pyforge # Run all tests\n"
|
|
141
|
+
" pyforge -k test_basic # Run tests with 'test_basic' in name\n"
|
|
142
|
+
" pyforge test_name.py # Run tests from specific file(s)\n"
|
|
143
|
+
" pyforge -k basic -v # Verbose output for matching tests",
|
|
144
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
145
|
+
)
|
|
146
|
+
parser.add_argument(
|
|
147
|
+
"-q",
|
|
148
|
+
"--quiet",
|
|
149
|
+
action="store_true",
|
|
150
|
+
help="Quiet mode: only show summary and failures",
|
|
151
|
+
)
|
|
152
|
+
parser.add_argument(
|
|
153
|
+
"-v",
|
|
154
|
+
"--verbose",
|
|
155
|
+
action="store_true",
|
|
156
|
+
help="Verbose mode: show detailed info with tracebacks",
|
|
157
|
+
)
|
|
158
|
+
parser.add_argument(
|
|
159
|
+
"--fail-fast",
|
|
160
|
+
action="store_true",
|
|
161
|
+
help="Stop execution at first failure",
|
|
162
|
+
)
|
|
163
|
+
parser.add_argument(
|
|
164
|
+
"-k",
|
|
165
|
+
dest="name_pattern",
|
|
166
|
+
default=None,
|
|
167
|
+
help="Substring filter: run tests with this string in their name",
|
|
168
|
+
)
|
|
169
|
+
parser.add_argument(
|
|
170
|
+
"files",
|
|
171
|
+
nargs="*",
|
|
172
|
+
help="File paths to run tests from (supports partial paths and filenames)",
|
|
173
|
+
)
|
|
174
|
+
args = parser.parse_args()
|
|
175
|
+
|
|
176
|
+
# Determine verbosity level
|
|
177
|
+
if args.quiet:
|
|
178
|
+
verbosity = VERBOSITY_QUIET
|
|
179
|
+
elif args.verbose:
|
|
180
|
+
verbosity = VERBOSITY_VERBOSE
|
|
181
|
+
else:
|
|
182
|
+
verbosity = VERBOSITY_NORMAL
|
|
183
|
+
|
|
184
|
+
try:
|
|
185
|
+
# Find project root (where tests/ directory is located)
|
|
186
|
+
project_root: Path = _find_project_root()
|
|
187
|
+
|
|
188
|
+
# Setup src/ directory in sys.path if it exists
|
|
189
|
+
_setup_src_path(project_root)
|
|
190
|
+
|
|
191
|
+
# Add src directory to path for core module imports
|
|
192
|
+
sys.path.insert(0, str(Path(__file__).parent))
|
|
193
|
+
|
|
194
|
+
# Discover and load test modules
|
|
195
|
+
modules_loaded: int = discover_and_load_tests(project_root)
|
|
196
|
+
if modules_loaded == 0:
|
|
197
|
+
return 0
|
|
198
|
+
|
|
199
|
+
if verbosity >= VERBOSITY_NORMAL:
|
|
200
|
+
print(f"\nLoaded {modules_loaded} test module(s).\n")
|
|
201
|
+
|
|
202
|
+
# Execute collected tests with filters
|
|
203
|
+
results, summary = execute(
|
|
204
|
+
fail_fast=args.fail_fast,
|
|
205
|
+
name_pattern=args.name_pattern,
|
|
206
|
+
file_patterns=args.files if args.files else None,
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
# Generate and display report
|
|
210
|
+
output: str = report(results, summary, verbosity=verbosity)
|
|
211
|
+
print(output)
|
|
212
|
+
|
|
213
|
+
# Return 1 if any tests failed or errors occurred
|
|
214
|
+
has_failures: bool = int(summary.get("failed", 0)) > 0 or int(summary.get("errors", 0)) > 0
|
|
215
|
+
return 1 if has_failures else 0
|
|
216
|
+
|
|
217
|
+
except FileNotFoundError as e:
|
|
218
|
+
print(f"Error: {e}")
|
|
219
|
+
return 1
|
|
220
|
+
except Exception as e:
|
|
221
|
+
print(f"Unexpected error: {type(e).__name__}: {e}")
|
|
222
|
+
traceback.print_exc()
|
|
223
|
+
return 1
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
if __name__ == "__main__":
|
|
227
|
+
sys.exit(main())
|
|
File without changes
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
from typing import Any, TypedDict
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class TestCase(TypedDict):
|
|
6
|
+
"""TypedDict for test case structure.
|
|
7
|
+
|
|
8
|
+
Attributes:
|
|
9
|
+
function: The test function to execute.
|
|
10
|
+
filename: The source file where the test is defined.
|
|
11
|
+
line_number: The line number where the test is defined.
|
|
12
|
+
skip_info: Dictionary containing skip information and reason.
|
|
13
|
+
marker: The test marker (e.g., 'slow', 'integration', 'wip').
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
function: Callable[..., None]
|
|
17
|
+
filename: str
|
|
18
|
+
line_number: int
|
|
19
|
+
skip_info: dict[str, Any] | None
|
|
20
|
+
marker: str | None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ResultDict(TypedDict):
|
|
24
|
+
"""TypedDict for test result structure.
|
|
25
|
+
|
|
26
|
+
Attributes:
|
|
27
|
+
name: The test function name.
|
|
28
|
+
result: The test result status (Passed, Failed, Error, or Skipped).
|
|
29
|
+
filename: The source file where the test is defined.
|
|
30
|
+
line_number: The line number where the test is defined.
|
|
31
|
+
skip_info: Dictionary containing skip information and reason.
|
|
32
|
+
marker: The test marker (e.g., 'slow', 'integration', 'wip').
|
|
33
|
+
traceback: The traceback string for failed/error tests, None otherwise.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
name: str
|
|
37
|
+
result: str
|
|
38
|
+
filename: str
|
|
39
|
+
line_number: int
|
|
40
|
+
skip_info: dict[str, Any] | None
|
|
41
|
+
marker: str | None
|
|
42
|
+
traceback: str | None
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
TESTS: list[TestCase] = []
|