rustest 0.1.0__cp311-cp311-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rustest/__init__.py ADDED
@@ -0,0 +1,24 @@
1
+ """Public Python API for rustest."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from . import _decorators
6
+ from ._cli import main
7
+ from ._reporting import RunReport, TestResult
8
+ from .core import run
9
+
10
+ fixture = _decorators.fixture
11
+ mark = _decorators.mark
12
+ parametrize = _decorators.parametrize
13
+ skip = _decorators.skip
14
+
15
+ __all__ = [
16
+ "RunReport",
17
+ "TestResult",
18
+ "fixture",
19
+ "main",
20
+ "mark",
21
+ "parametrize",
22
+ "run",
23
+ "skip",
24
+ ]
rustest/__main__.py ADDED
@@ -0,0 +1,10 @@
1
+ """Module executed when running ``python -m rustest``."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import sys
6
+
7
+ from ._cli import main
8
+
9
+ if __name__ == "__main__":
10
+ sys.exit(main())
rustest/_cli.py ADDED
@@ -0,0 +1,259 @@
1
+ """Command line interface helpers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import argparse
6
+ from collections.abc import Sequence
7
+
8
+ from ._reporting import RunReport, TestResult
9
+ from .core import run
10
+
11
+
12
+ # ANSI color codes
13
+ class _ColorsNamespace:
14
+ """Namespace for ANSI color codes."""
15
+
16
+ def __init__(self) -> None: # pyright: ignore[reportMissingSuperCall]
17
+ self.green = "\033[92m"
18
+ self.red = "\033[91m"
19
+ self.yellow = "\033[93m"
20
+ self.cyan = "\033[96m"
21
+ self.bold = "\033[1m"
22
+ self.dim = "\033[2m"
23
+ self.reset = "\033[0m"
24
+
25
+ def disable(self) -> None:
26
+ """Disable all colors."""
27
+ self.green = ""
28
+ self.red = ""
29
+ self.yellow = ""
30
+ self.cyan = ""
31
+ self.bold = ""
32
+ self.dim = ""
33
+ self.reset = ""
34
+
35
+
36
+ Colors = _ColorsNamespace()
37
+
38
+
39
+ def build_parser() -> argparse.ArgumentParser:
40
+ parser = argparse.ArgumentParser(
41
+ prog="rustest",
42
+ description="Run Python tests at blazing speed with a Rust powered core.",
43
+ )
44
+ _ = parser.add_argument(
45
+ "paths",
46
+ nargs="*",
47
+ default=(".",),
48
+ help="Files or directories to collect tests from.",
49
+ )
50
+ _ = parser.add_argument(
51
+ "-k",
52
+ "--pattern",
53
+ help="Substring to filter tests by (case insensitive).",
54
+ )
55
+ _ = parser.add_argument(
56
+ "-n",
57
+ "--workers",
58
+ type=int,
59
+ help="Number of worker slots to use (experimental).",
60
+ )
61
+ _ = parser.add_argument(
62
+ "--no-capture",
63
+ dest="capture_output",
64
+ action="store_false",
65
+ help="Do not capture stdout/stderr during test execution.",
66
+ )
67
+ _ = parser.add_argument(
68
+ "-v",
69
+ "--verbose",
70
+ action="store_true",
71
+ help="Show verbose output with hierarchical test structure.",
72
+ )
73
+ _ = parser.add_argument(
74
+ "--ascii",
75
+ action="store_true",
76
+ help="Use ASCII characters instead of Unicode symbols for output.",
77
+ )
78
+ _ = parser.add_argument(
79
+ "--no-color",
80
+ dest="color",
81
+ action="store_false",
82
+ help="Disable colored output.",
83
+ )
84
+ _ = parser.set_defaults(capture_output=True, color=True)
85
+ return parser
86
+
87
+
88
+ def main(argv: Sequence[str] | None = None) -> int:
89
+ parser = build_parser()
90
+ args = parser.parse_args(argv)
91
+
92
+ # Disable colors if requested
93
+ if not args.color:
94
+ Colors.disable()
95
+
96
+ report = run(
97
+ paths=tuple(args.paths),
98
+ pattern=args.pattern,
99
+ workers=args.workers,
100
+ capture_output=args.capture_output,
101
+ )
102
+ _print_report(report, verbose=args.verbose, ascii_mode=args.ascii)
103
+ return 0 if report.failed == 0 else 1
104
+
105
+
106
+ def _print_report(report: RunReport, verbose: bool = False, ascii_mode: bool = False) -> None:
107
+ """Print test report with configurable output format.
108
+
109
+ Args:
110
+ report: The test run report
111
+ verbose: If True, show hierarchical verbose output (vitest-style)
112
+ ascii_mode: If True, use ASCII characters instead of Unicode symbols
113
+ """
114
+ if verbose:
115
+ _print_verbose_report(report, ascii_mode)
116
+ else:
117
+ _print_default_report(report, ascii_mode)
118
+
119
+ # Print summary line with colors
120
+ passed_str = (
121
+ f"{Colors.green}{report.passed} passed{Colors.reset}"
122
+ if report.passed > 0
123
+ else f"{report.passed} passed"
124
+ )
125
+ failed_str = (
126
+ f"{Colors.red}{report.failed} failed{Colors.reset}"
127
+ if report.failed > 0
128
+ else f"{report.failed} failed"
129
+ )
130
+ skipped_str = (
131
+ f"{Colors.yellow}{report.skipped} skipped{Colors.reset}"
132
+ if report.skipped > 0
133
+ else f"{report.skipped} skipped"
134
+ )
135
+
136
+ summary = (
137
+ f"\n{Colors.bold}{report.total} tests:{Colors.reset} "
138
+ f"{passed_str}, "
139
+ f"{failed_str}, "
140
+ f"{skipped_str} in {Colors.dim}{report.duration:.3f}s{Colors.reset}"
141
+ )
142
+ print(summary)
143
+
144
+
145
+ def _print_default_report(report: RunReport, ascii_mode: bool) -> None:
146
+ """Print pytest-style progress indicators followed by failure details."""
147
+ # Define symbols
148
+ if ascii_mode:
149
+ # pytest-style: . (pass), F (fail), s (skip)
150
+ pass_symbol = "."
151
+ fail_symbol = "F"
152
+ skip_symbol = "s"
153
+ else:
154
+ # Unicode symbols (no spaces, with colors)
155
+ pass_symbol = f"{Colors.green}✓{Colors.reset}"
156
+ fail_symbol = f"{Colors.red}✗{Colors.reset}"
157
+ skip_symbol = f"{Colors.yellow}⊘{Colors.reset}"
158
+
159
+ # Print progress indicators
160
+ for result in report.results:
161
+ if result.status == "passed":
162
+ print(pass_symbol, end="")
163
+ elif result.status == "failed":
164
+ print(fail_symbol, end="")
165
+ elif result.status == "skipped":
166
+ print(skip_symbol, end="")
167
+ print() # Newline after progress indicators
168
+
169
+ # Print failure details
170
+ failures = [r for r in report.results if r.status == "failed"]
171
+ if failures:
172
+ print(f"\n{Colors.red}{'=' * 70}")
173
+ print(f"{Colors.bold}FAILURES{Colors.reset}")
174
+ print(f"{Colors.red}{'=' * 70}{Colors.reset}")
175
+ for result in failures:
176
+ print(
177
+ f"\n{Colors.bold}{result.name}{Colors.reset} ({Colors.cyan}{result.path}{Colors.reset})"
178
+ )
179
+ print(f"{Colors.red}{'-' * 70}{Colors.reset}")
180
+ if result.message:
181
+ print(result.message.rstrip())
182
+
183
+
184
+ def _print_verbose_report(report: RunReport, ascii_mode: bool) -> None:
185
+ """Print vitest-style hierarchical output with nesting and timing."""
186
+ # Define symbols
187
+ if ascii_mode:
188
+ pass_symbol = "PASS"
189
+ fail_symbol = "FAIL"
190
+ skip_symbol = "SKIP"
191
+ else:
192
+ pass_symbol = f"{Colors.green}✓{Colors.reset}"
193
+ fail_symbol = f"{Colors.red}✗{Colors.reset}"
194
+ skip_symbol = f"{Colors.yellow}⊘{Colors.reset}"
195
+
196
+ # Group tests by file path and organize hierarchically
197
+ from collections import defaultdict
198
+
199
+ tests_by_file: dict[str, list[TestResult]] = defaultdict(list)
200
+ for result in report.results:
201
+ tests_by_file[result.path].append(result)
202
+
203
+ # Print hierarchical structure
204
+ for file_path in sorted(tests_by_file.keys()):
205
+ print(f"\n{Colors.bold}{file_path}{Colors.reset}")
206
+
207
+ # Group tests by class within this file
208
+ tests_by_class: dict[str | None, list[tuple[TestResult, str | None]]] = defaultdict(list)
209
+ for result in tests_by_file[file_path]:
210
+ # Parse test name to extract class if present
211
+ # Format can be: "test_name" or "ClassName.test_name" or "module::Class::test"
212
+ class_name: str | None
213
+ if "::" in result.name:
214
+ parts = result.name.split("::")
215
+ class_name = "::".join(parts[:-1]) if len(parts) > 1 else None
216
+ elif "." in result.name:
217
+ parts = result.name.split(".")
218
+ class_name = parts[0] if len(parts) > 1 else None
219
+ else:
220
+ class_name = None
221
+ tests_by_class[class_name].append((result, class_name))
222
+
223
+ # Print tests organized by class
224
+ for class_name in sorted(tests_by_class.keys(), key=lambda x: (x is None, x)):
225
+ # Print class name if present
226
+ if class_name:
227
+ print(f" {Colors.cyan}{class_name}{Colors.reset}")
228
+
229
+ for result, _ in tests_by_class[class_name]:
230
+ # Get symbol based on status
231
+ if result.status == "passed":
232
+ symbol = pass_symbol
233
+ elif result.status == "failed":
234
+ symbol = fail_symbol
235
+ elif result.status == "skipped":
236
+ symbol = skip_symbol
237
+ else:
238
+ symbol = "?"
239
+
240
+ # Extract just the test method name
241
+ if "::" in result.name:
242
+ display_name = result.name.split("::")[-1]
243
+ elif "." in result.name:
244
+ display_name = result.name.split(".")[-1]
245
+ else:
246
+ display_name = result.name
247
+
248
+ # Indent based on whether it's in a class
249
+ indent = " " if class_name else " "
250
+
251
+ # Print with symbol, name, and timing
252
+ duration_str = f"{Colors.dim}{result.duration * 1000:.0f}ms{Colors.reset}"
253
+ print(f"{indent}{symbol} {display_name} {duration_str}")
254
+
255
+ # Show error message for failures
256
+ if result.status == "failed" and result.message:
257
+ error_lines = result.message.rstrip().split("\n")
258
+ for line in error_lines:
259
+ print(f"{indent} {line}")
rustest/_decorators.py ADDED
@@ -0,0 +1,194 @@
1
+ """User facing decorators mirroring the most common pytest helpers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Callable, Mapping, Sequence
6
+ from typing import Any, TypeVar
7
+
8
+ F = TypeVar("F", bound=Callable[..., object])
9
+
10
+ # Valid fixture scopes
11
+ VALID_SCOPES = frozenset(["function", "class", "module", "session"])
12
+
13
+
14
+ def fixture(
15
+ func: F | None = None,
16
+ *,
17
+ scope: str = "function",
18
+ ) -> F | Callable[[F], F]:
19
+ """Mark a function as a fixture with a specific scope.
20
+
21
+ Args:
22
+ func: The function to decorate (when used without parentheses)
23
+ scope: The scope of the fixture. One of:
24
+ - "function": New instance for each test function (default)
25
+ - "class": Shared across all test methods in a class
26
+ - "module": Shared across all tests in a module
27
+ - "session": Shared across all tests in the session
28
+
29
+ Usage:
30
+ @fixture
31
+ def my_fixture():
32
+ return 42
33
+
34
+ @fixture(scope="module")
35
+ def shared_fixture():
36
+ return expensive_setup()
37
+ """
38
+ if scope not in VALID_SCOPES:
39
+ valid = ", ".join(sorted(VALID_SCOPES))
40
+ msg = f"Invalid fixture scope '{scope}'. Must be one of: {valid}"
41
+ raise ValueError(msg)
42
+
43
+ def decorator(f: F) -> F:
44
+ setattr(f, "__rustest_fixture__", True)
45
+ setattr(f, "__rustest_fixture_scope__", scope)
46
+ return f
47
+
48
+ # Support both @fixture and @fixture(scope="...")
49
+ if func is not None:
50
+ return decorator(func)
51
+ return decorator
52
+
53
+
54
+ def skip(reason: str | None = None) -> Callable[[F], F]:
55
+ """Skip a test or fixture."""
56
+
57
+ def decorator(func: F) -> F:
58
+ setattr(func, "__rustest_skip__", reason or "skipped via rustest.skip")
59
+ return func
60
+
61
+ return decorator
62
+
63
+
64
+ def parametrize(
65
+ arg_names: str | Sequence[str],
66
+ values: Sequence[Sequence[object] | Mapping[str, object]],
67
+ *,
68
+ ids: Sequence[str] | None = None,
69
+ ) -> Callable[[F], F]:
70
+ """Parametrise a test function."""
71
+
72
+ normalized_names = _normalize_arg_names(arg_names)
73
+
74
+ def decorator(func: F) -> F:
75
+ cases = _build_cases(normalized_names, values, ids)
76
+ setattr(func, "__rustest_parametrization__", cases)
77
+ return func
78
+
79
+ return decorator
80
+
81
+
82
+ def _normalize_arg_names(arg_names: str | Sequence[str]) -> tuple[str, ...]:
83
+ if isinstance(arg_names, str):
84
+ parts = [part.strip() for part in arg_names.split(",") if part.strip()]
85
+ if not parts:
86
+ msg = "parametrize() expected at least one argument name"
87
+ raise ValueError(msg)
88
+ return tuple(parts)
89
+ return tuple(arg_names)
90
+
91
+
92
+ def _build_cases(
93
+ names: tuple[str, ...],
94
+ values: Sequence[Sequence[object] | Mapping[str, object]],
95
+ ids: Sequence[str] | None,
96
+ ) -> tuple[dict[str, object], ...]:
97
+ case_payloads: list[dict[str, object]] = []
98
+ if ids is not None and len(ids) != len(values):
99
+ msg = "ids must match the number of value sets"
100
+ raise ValueError(msg)
101
+
102
+ for index, case in enumerate(values):
103
+ # Mappings are only treated as parameter mappings when there are multiple parameters
104
+ # For single parameters, dicts/mappings are treated as values
105
+ if isinstance(case, Mapping) and len(names) > 1:
106
+ data = {name: case[name] for name in names}
107
+ elif isinstance(case, tuple) and len(case) == len(names):
108
+ # Tuples are unpacked to match parameter names (pytest convention)
109
+ # This handles both single and multiple parameters
110
+ data = {name: case[pos] for pos, name in enumerate(names)}
111
+ else:
112
+ # Everything else is treated as a single value
113
+ # This includes: primitives, lists (even if len==names), dicts (single param), objects
114
+ if len(names) == 1:
115
+ data = {names[0]: case}
116
+ else:
117
+ raise ValueError("Parametrized value does not match argument names")
118
+ case_id = ids[index] if ids is not None else f"case_{index}"
119
+ case_payloads.append({"id": case_id, "values": data})
120
+ return tuple(case_payloads)
121
+
122
+
123
+ class MarkDecorator:
124
+ """A decorator for applying a mark to a test function."""
125
+
126
+ def __init__(self, name: str, args: tuple[Any, ...], kwargs: dict[str, Any]) -> None:
127
+ super().__init__()
128
+ self.name = name
129
+ self.args = args
130
+ self.kwargs = kwargs
131
+
132
+ def __call__(self, func: F) -> F:
133
+ """Apply this mark to the given function."""
134
+ # Get existing marks or create a new list
135
+ existing_marks: list[dict[str, Any]] = getattr(func, "__rustest_marks__", [])
136
+
137
+ # Add this mark to the list
138
+ mark_data = {
139
+ "name": self.name,
140
+ "args": self.args,
141
+ "kwargs": self.kwargs,
142
+ }
143
+ existing_marks.append(mark_data)
144
+
145
+ # Store the marks list on the function
146
+ setattr(func, "__rustest_marks__", existing_marks)
147
+ return func
148
+
149
+ def __repr__(self) -> str:
150
+ return f"Mark({self.name!r}, {self.args!r}, {self.kwargs!r})"
151
+
152
+
153
+ class MarkGenerator:
154
+ """Namespace for dynamically creating marks like pytest.mark.
155
+
156
+ Usage:
157
+ @mark.slow
158
+ @mark.integration
159
+ @mark.timeout(seconds=30)
160
+ """
161
+
162
+ def __getattr__(self, name: str) -> Any:
163
+ """Create a mark decorator for the given name."""
164
+ # Return a callable that can be used as @mark.name or @mark.name(args)
165
+ return self._create_mark(name)
166
+
167
+ def _create_mark(self, name: str) -> Any:
168
+ """Create a MarkDecorator that can be called with or without arguments."""
169
+
170
+ class _MarkDecoratorFactory:
171
+ """Factory that allows @mark.name or @mark.name(args)."""
172
+
173
+ def __init__(self, mark_name: str) -> None:
174
+ super().__init__()
175
+ self.mark_name = mark_name
176
+
177
+ def __call__(self, *args: Any, **kwargs: Any) -> Any:
178
+ # If called with a single argument that's a function, it's @mark.name
179
+ if (
180
+ len(args) == 1
181
+ and not kwargs
182
+ and callable(args[0])
183
+ and hasattr(args[0], "__name__")
184
+ ):
185
+ decorator = MarkDecorator(self.mark_name, (), {})
186
+ return decorator(args[0])
187
+ # Otherwise it's @mark.name(args) - return a decorator
188
+ return MarkDecorator(self.mark_name, args, kwargs)
189
+
190
+ return _MarkDecoratorFactory(name)
191
+
192
+
193
+ # Create a singleton instance
194
+ mark = MarkGenerator()
rustest/_reporting.py ADDED
@@ -0,0 +1,63 @@
1
+ """Utilities for converting raw results from the Rust layer."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Iterable
6
+ from dataclasses import dataclass
7
+
8
+ from . import _rust
9
+
10
+
11
+ @dataclass(slots=True)
12
+ class TestResult:
13
+ """Structured view of a single test outcome."""
14
+
15
+ __test__ = False # Tell pytest this is not a test class
16
+
17
+ name: str
18
+ path: str
19
+ status: str
20
+ duration: float
21
+ message: str | None
22
+ stdout: str | None
23
+ stderr: str | None
24
+
25
+ @classmethod
26
+ def from_py(cls, result: _rust.PyTestResult) -> "TestResult":
27
+ return cls(
28
+ name=result.name,
29
+ path=result.path,
30
+ status=result.status,
31
+ duration=result.duration,
32
+ message=result.message,
33
+ stdout=result.stdout,
34
+ stderr=result.stderr,
35
+ )
36
+
37
+
38
+ @dataclass(slots=True)
39
+ class RunReport:
40
+ """Aggregate statistics for an entire test session."""
41
+
42
+ total: int
43
+ passed: int
44
+ failed: int
45
+ skipped: int
46
+ duration: float
47
+ results: tuple[TestResult, ...]
48
+
49
+ @classmethod
50
+ def from_py(cls, report: _rust.PyRunReport) -> "RunReport":
51
+ return cls(
52
+ total=report.total,
53
+ passed=report.passed,
54
+ failed=report.failed,
55
+ skipped=report.skipped,
56
+ duration=report.duration,
57
+ results=tuple(TestResult.from_py(result) for result in report.results),
58
+ )
59
+
60
+ def iter_status(self, status: str) -> Iterable[TestResult]:
61
+ """Yield results with the requested status."""
62
+
63
+ return (result for result in self.results if result.status == status)
Binary file
rustest/_rust.py ADDED
@@ -0,0 +1,23 @@
1
+ """Fallback stub for the compiled rustest extension.
2
+
3
+ This module is packaged with the Python distribution so unit tests can import the
4
+ package without building the Rust extension. Individual tests are expected to
5
+ monkeypatch the functions they exercise.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Any, Sequence
11
+
12
+
13
+ def run(
14
+ _paths: Sequence[str],
15
+ _pattern: str | None,
16
+ _workers: int | None,
17
+ _capture_output: bool,
18
+ ) -> Any:
19
+ """Placeholder implementation that mirrors the extension signature."""
20
+
21
+ raise NotImplementedError(
22
+ "The rustest native extension is unavailable. Tests must patch rustest._rust.run."
23
+ )
rustest/_rust.pyi ADDED
@@ -0,0 +1,35 @@
1
+ """Type stubs for the Rust extension module."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Sequence
6
+
7
+ class PyTestResult:
8
+ """Test result from Rust layer."""
9
+
10
+ name: str
11
+ path: str
12
+ status: str
13
+ duration: float
14
+ message: str | None
15
+ stdout: str | None
16
+ stderr: str | None
17
+
18
+ class PyRunReport:
19
+ """Run report from Rust layer."""
20
+
21
+ total: int
22
+ passed: int
23
+ failed: int
24
+ skipped: int
25
+ duration: float
26
+ results: Sequence[PyTestResult]
27
+
28
+ def run(
29
+ paths: list[str],
30
+ pattern: str | None,
31
+ workers: int | None,
32
+ capture_output: bool,
33
+ ) -> PyRunReport:
34
+ """Run tests and return a report."""
35
+ ...
rustest/core.py ADDED
@@ -0,0 +1,21 @@
1
+ """High level Python API wrapping the Rust extension."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Sequence
6
+
7
+ from . import _rust
8
+ from ._reporting import RunReport
9
+
10
+
11
+ def run(
12
+ *,
13
+ paths: Sequence[str],
14
+ pattern: str | None,
15
+ workers: int | None,
16
+ capture_output: bool,
17
+ ) -> RunReport:
18
+ """Execute tests and return a rich report."""
19
+
20
+ raw_report = _rust.run(list(paths), pattern, workers, capture_output)
21
+ return RunReport.from_py(raw_report)
@@ -0,0 +1,487 @@
1
+ Metadata-Version: 2.4
2
+ Name: rustest
3
+ Version: 0.1.0
4
+ Classifier: Development Status :: 3 - Alpha
5
+ Classifier: Intended Audience :: Developers
6
+ Classifier: License :: OSI Approved :: MIT License
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: Programming Language :: Python :: 3.10
9
+ Classifier: Programming Language :: Python :: 3.11
10
+ Classifier: Programming Language :: Python :: 3.12
11
+ Classifier: Programming Language :: Python :: 3.13
12
+ Classifier: Programming Language :: Python :: 3.14
13
+ Classifier: Programming Language :: Rust
14
+ Classifier: Topic :: Software Development :: Testing
15
+ Requires-Dist: typing-extensions>=4.8
16
+ Requires-Dist: basedpyright>=1.19 ; extra == 'dev'
17
+ Requires-Dist: maturin>=1.4,<2 ; extra == 'dev'
18
+ Requires-Dist: poethepoet>=0.22 ; extra == 'dev'
19
+ Requires-Dist: pre-commit>=3.5 ; extra == 'dev'
20
+ Requires-Dist: pytest>=7.0 ; extra == 'dev'
21
+ Requires-Dist: ruff>=0.1.9 ; extra == 'dev'
22
+ Provides-Extra: dev
23
+ License-File: LICENSE
24
+ Summary: Rust powered pytest-compatible runner
25
+ Author: rustest contributors
26
+ Requires-Python: >=3.10
27
+ Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
28
+ Project-URL: Homepage, https://github.com/Apex-Engineers-Inc/rustest
29
+ Project-URL: Repository, https://github.com/Apex-Engineers-Inc/rustest
30
+
31
+ # rustest
32
+
33
+ Rustest (pronounced like Russ-Test) is a Rust-powered test runner that aims to provide the most common pytest ergonomics with a focus on raw performance. Get **78x faster** test execution with familiar syntax and minimal setup.
34
+
35
+ ## Why rustest?
36
+
37
+ - 🚀 **78x faster** than pytest (measured on real-world integration tests)
38
+ - ✅ Familiar `@fixture`, `@parametrize`, `@skip`, and `@mark` decorators
39
+ - 🔍 Automatic test discovery (`test_*.py` and `*_test.py` files)
40
+ - 🎯 Simple, clean API—if you know pytest, you already know rustest
41
+ - 📦 Easy installation with pip or uv
42
+ - ⚡ Sub-10ms execution for small test suites—tests feel instant
43
+
44
+ ## Performance
45
+
46
+ Rustest is designed for speed. Our benchmarks show **78x faster** execution compared to pytest on the rustest integration test suite (~199 tests):
47
+
48
+ | Test Runner | Time | Tests/Second | Speedup |
49
+ |-------------|------|--------------|---------|
50
+ | pytest | 0.39s | 502 | 1.0x (baseline) |
51
+ | rustest | 0.005s | 39,800 | **78x faster** |
52
+
53
+ **Actual CI measurements:**
54
+ - **pytest**: 196 passed, 5 skipped in 0.39s
55
+ - **rustest**: 194 passed, 5 skipped in 0.005s
56
+
57
+ **Why so fast?**
58
+ - **Near-zero startup time**: Native Rust binary vs Python interpreter startup
59
+ - **Rust-native test discovery**: Minimal imports until test execution
60
+ - **Optimized fixture resolution**: Efficient dependency graph in Rust
61
+ - **Efficient orchestration**: ~50-100μs per-test overhead vs ~1-2ms in pytest
62
+
63
+ **Real-world impact:**
64
+ - **200 tests**: 0.39s → 0.005s (instant feedback)
65
+ - **1,000 tests**: ~2s → ~0.025s (tests complete before you can switch tabs)
66
+ - **10,000 tests**: ~20s → ~0.25s (dramatically faster feedback loops)
67
+
68
+ See [BENCHMARKS.md](BENCHMARKS.md) for detailed performance analysis and methodology.
69
+
70
+ ## Installation
71
+
72
+ Rustest supports Python **3.10 through 3.14**.
73
+
74
+ ### Using pip
75
+ ```bash
76
+ pip install rustest
77
+ ```
78
+
79
+ ### Using uv
80
+ ```bash
81
+ uv add rustest
82
+ ```
83
+
84
+ ### For Development
85
+ If you want to contribute to rustest, see [DEVELOPMENT.md](DEVELOPMENT.md) for setup instructions.
86
+
87
+ ## Quick Start
88
+
89
+ ### 1. Write Your Tests
90
+
91
+ Create a file `test_math.py`:
92
+
93
+ ```python
94
+ from rustest import fixture, parametrize, mark
95
+
96
+ @fixture
97
+ def numbers() -> list[int]:
98
+ return [1, 2, 3, 4, 5]
99
+
100
+ def test_sum(numbers: list[int]) -> None:
101
+ assert sum(numbers) == 15
102
+
103
+ @parametrize("value,expected", [(2, 4), (3, 9), (4, 16)])
104
+ def test_square(value: int, expected: int) -> None:
105
+ assert value ** 2 == expected
106
+
107
+ @mark.slow
108
+ def test_expensive_operation() -> None:
109
+ # This test is marked as slow for filtering
110
+ result = sum(range(1000000))
111
+ assert result > 0
112
+ ```
113
+
114
+ ### 2. Run Your Tests
115
+
116
+ ```bash
117
+ # Run all tests in the current directory
118
+ rustest
119
+
120
+ # Run tests in a specific directory
121
+ rustest tests/
122
+
123
+ # Run tests matching a pattern
124
+ rustest -k "test_sum"
125
+
126
+ # Show output during test execution
127
+ rustest --no-capture
128
+ ```
129
+
130
+ ## Usage Examples
131
+
132
+ ### CLI Usage
133
+
134
+ ```bash
135
+ # Run all tests in current directory
136
+ rustest
137
+
138
+ # Run tests in specific paths
139
+ rustest tests/ integration/
140
+
141
+ # Filter tests by name pattern
142
+ rustest -k "user" # Runs test_user_login, test_user_signup, etc.
143
+ rustest -k "auth" # Runs all tests with "auth" in the name
144
+
145
+ # Control output capture
146
+ rustest --no-capture # See print statements during test execution
147
+ ```
148
+
149
+ ### Python API Usage
150
+
151
+ You can also run rustest programmatically from Python:
152
+
153
+ ```python
154
+ from rustest import run
155
+
156
+ # Basic usage
157
+ report = run(paths=["tests"])
158
+ print(f"Passed: {report.passed}, Failed: {report.failed}")
159
+
160
+ # With pattern filtering
161
+ report = run(paths=["tests"], pattern="user")
162
+
163
+ # Without output capture (see print statements)
164
+ report = run(paths=["tests"], capture_output=False)
165
+
166
+ # Access individual test results
167
+ for result in report.results:
168
+ print(f"{result.name}: {result.status} ({result.duration:.3f}s)")
169
+ if result.status == "failed":
170
+ print(f" Error: {result.message}")
171
+ ```
172
+
173
+ ### Writing Tests
174
+
175
+ #### Basic Test Functions
176
+
177
+ ```python
178
+ def test_simple_assertion() -> None:
179
+ assert 1 + 1 == 2
180
+
181
+ def test_string_operations() -> None:
182
+ text = "hello world"
183
+ assert text.startswith("hello")
184
+ assert "world" in text
185
+ ```
186
+
187
+ #### Using Fixtures
188
+
189
+ Fixtures provide reusable test data and setup:
190
+
191
+ ```python
192
+ from rustest import fixture
193
+
194
+ @fixture
195
+ def database_connection() -> dict:
196
+ # Setup: create a connection
197
+ conn = {"host": "localhost", "port": 5432}
198
+ return conn
199
+ # Teardown happens automatically
200
+
201
+ @fixture
202
+ def sample_user() -> dict:
203
+ return {"id": 1, "name": "Alice", "email": "alice@example.com"}
204
+
205
+ def test_database_query(database_connection: dict) -> None:
206
+ assert database_connection["host"] == "localhost"
207
+
208
+ def test_user_email(sample_user: dict) -> None:
209
+ assert "@" in sample_user["email"]
210
+ ```
211
+
212
+ #### Fixtures with Dependencies
213
+
214
+ Fixtures can depend on other fixtures:
215
+
216
+ ```python
217
+ from rustest import fixture
218
+
219
+ @fixture
220
+ def api_url() -> str:
221
+ return "https://api.example.com"
222
+
223
+ @fixture
224
+ def api_client(api_url: str) -> dict:
225
+ return {"base_url": api_url, "timeout": 30}
226
+
227
+ def test_api_configuration(api_client: dict) -> None:
228
+ assert api_client["base_url"].startswith("https://")
229
+ assert api_client["timeout"] == 30
230
+ ```
231
+
232
+ #### Yield Fixtures with Setup/Teardown
233
+
234
+ Fixtures can use `yield` to perform cleanup after tests:
235
+
236
+ ```python
237
+ from rustest import fixture
238
+
239
+ @fixture
240
+ def database_connection():
241
+ # Setup: create connection
242
+ conn = create_db_connection()
243
+ print("Database connected")
244
+
245
+ yield conn
246
+
247
+ # Teardown: close connection
248
+ conn.close()
249
+ print("Database connection closed")
250
+
251
+ @fixture
252
+ def temp_file():
253
+ # Setup
254
+ file = open("temp.txt", "w")
255
+ file.write("test data")
256
+
257
+ yield file
258
+
259
+ # Teardown
260
+ file.close()
261
+ os.remove("temp.txt")
262
+
263
+ def test_database_query(database_connection):
264
+ result = database_connection.query("SELECT 1")
265
+ assert result is not None
266
+ ```
267
+
268
+ #### Fixture Scopes
269
+
270
+ Fixtures support different scopes to control when they are created and destroyed:
271
+
272
+ ```python
273
+ from rustest import fixture
274
+
275
+ @fixture # Default: function scope - new instance per test
276
+ def function_fixture() -> dict:
277
+ return {"value": "reset each test"}
278
+
279
+ @fixture(scope="class") # Shared across all tests in a class
280
+ def class_database() -> dict:
281
+ return {"connection": "db://test", "shared": True}
282
+
283
+ @fixture(scope="module") # Shared across all tests in a module
284
+ def module_config() -> dict:
285
+ return {"env": "test", "timeout": 30}
286
+
287
+ @fixture(scope="session") # Shared across entire test session
288
+ def session_cache() -> dict:
289
+ return {"global_cache": {}}
290
+
291
+ # Fixtures can depend on fixtures with different scopes
292
+ @fixture(scope="function")
293
+ def request_handler(module_config: dict, session_cache: dict) -> dict:
294
+ return {
295
+ "config": module_config, # module-scoped
296
+ "cache": session_cache, # session-scoped
297
+ "request_id": id(object()) # unique per test
298
+ }
299
+ ```
300
+
301
+ **Scope Behavior:**
302
+ - `function` (default): New instance for each test function
303
+ - `class`: Shared across all test methods in a test class
304
+ - `module`: Shared across all tests in a Python module
305
+ - `session`: Shared across the entire test session
306
+
307
+ Scoped fixtures are especially useful for expensive setup operations like database connections, API clients, or configuration loading.
308
+
309
+ **Using conftest.py for Shared Fixtures:**
310
+
311
+ You can define fixtures in a `conftest.py` file to share them across multiple test files:
312
+
313
+ ```python
314
+ # conftest.py
315
+ from rustest import fixture
316
+
317
+ @fixture(scope="session")
318
+ def database():
319
+ """Shared database connection for all tests."""
320
+ db = setup_database()
321
+ yield db
322
+ db.cleanup()
323
+
324
+ @fixture(scope="module")
325
+ def api_client():
326
+ """API client shared across a module."""
327
+ return create_api_client()
328
+ ```
329
+
330
+ All test files in the same directory (and subdirectories) can use these fixtures automatically.
331
+
332
+ #### Parametrized Tests
333
+
334
+ Run the same test with different inputs:
335
+
336
+ ```python
337
+ from rustest import parametrize
338
+
339
+ @parametrize("input,expected", [
340
+ (1, 2),
341
+ (2, 4),
342
+ (3, 6),
343
+ ])
344
+ def test_double(input: int, expected: int) -> None:
345
+ assert input * 2 == expected
346
+
347
+ # With custom test IDs for better output
348
+ @parametrize("value,expected", [
349
+ (2, 4),
350
+ (3, 9),
351
+ (4, 16),
352
+ ], ids=["two", "three", "four"])
353
+ def test_square(value: int, expected: int) -> None:
354
+ assert value ** 2 == expected
355
+ ```
356
+
357
+ #### Combining Fixtures and Parameters
358
+
359
+ ```python
360
+ from rustest import fixture, parametrize
361
+
362
+ @fixture
363
+ def multiplier() -> int:
364
+ return 10
365
+
366
+ @parametrize("value,expected", [
367
+ (1, 10),
368
+ (2, 20),
369
+ (3, 30),
370
+ ])
371
+ def test_multiply(multiplier: int, value: int, expected: int) -> None:
372
+ assert multiplier * value == expected
373
+ ```
374
+
375
+ #### Skipping Tests
376
+
377
+ ```python
378
+ from rustest import skip, mark
379
+
380
+ @skip("Not implemented yet")
381
+ def test_future_feature() -> None:
382
+ assert False
383
+
384
+ @mark.skip(reason="Waiting for API update")
385
+ def test_deprecated_api() -> None:
386
+ assert False
387
+ ```
388
+
389
+ #### Using Marks to Organize Tests
390
+
391
+ ```python
392
+ from rustest import mark
393
+
394
+ @mark.unit
395
+ def test_calculation() -> None:
396
+ assert 2 + 2 == 4
397
+
398
+ @mark.integration
399
+ def test_database_integration() -> None:
400
+ # Integration test
401
+ pass
402
+
403
+ @mark.slow
404
+ @mark.integration
405
+ def test_full_workflow() -> None:
406
+ # This test has multiple marks
407
+ pass
408
+ ```
409
+
410
+ ### Test Output
411
+
412
+ When you run rustest, you'll see clean, informative output:
413
+
414
+ ```
415
+ PASSED 0.001s test_simple_assertion
416
+ PASSED 0.002s test_string_operations
417
+ PASSED 0.001s test_database_query
418
+ PASSED 0.003s test_square[two]
419
+ PASSED 0.001s test_square[three]
420
+ PASSED 0.002s test_square[four]
421
+ SKIPPED 0.000s test_future_feature
422
+ FAILED 0.005s test_broken_feature
423
+ ----------------------------------------
424
+ AssertionError: Expected 5, got 4
425
+ at test_example.py:42
426
+
427
+ 8 tests: 6 passed, 1 failed, 1 skipped in 0.015s
428
+ ```
429
+
430
+ ## Feature Comparison with pytest
431
+
432
+ Rustest aims to provide the most commonly-used pytest features with dramatically better performance. Here's how the two compare:
433
+
434
+ | Feature | pytest | rustest | Notes |
435
+ |---------|--------|---------|-------|
436
+ | **Core Test Discovery** |
437
+ | `test_*.py` / `*_test.py` files | ✅ | ✅ | Rustest uses Rust for dramatically faster discovery |
438
+ | Test function detection (`test_*`) | ✅ | ✅ | |
439
+ | Test class detection (`Test*`) | ✅ | ✅ | via `unittest.TestCase` support |
440
+ | Pattern-based filtering | ✅ | ✅ | `-k` pattern matching |
441
+ | **Fixtures** |
442
+ | `@fixture` decorator | ✅ | ✅ | Rust-based dependency resolution |
443
+ | Fixture dependency injection | ✅ | ✅ | Much faster in rustest |
444
+ | Fixture scopes (function/class/module/session) | ✅ | ✅ | Full support for all scopes |
445
+ | Yield fixtures (setup/teardown) | ✅ | ✅ | Full support with cleanup |
446
+ | Fixture parametrization | ✅ | 🚧 | Planned |
447
+ | **Parametrization** |
448
+ | `@parametrize` decorator | ✅ | ✅ | Full support with custom IDs |
449
+ | Multiple parameter sets | ✅ | ✅ | |
450
+ | Parametrize with fixtures | ✅ | ✅ | |
451
+ | **Marks** |
452
+ | `@mark.skip` / `@skip` | ✅ | ✅ | Skip tests with reasons |
453
+ | Custom marks (`@mark.slow`, etc.) | ✅ | ✅ | Just added! |
454
+ | Mark with arguments | ✅ | ✅ | `@mark.timeout(30)` |
455
+ | Selecting tests by mark (`-m`) | ✅ | 🚧 | Mark metadata collected, filtering planned |
456
+ | **Test Execution** |
457
+ | Detailed assertion introspection | ✅ | ❌ | Uses standard Python assertions |
458
+ | Parallel execution | ✅ (`pytest-xdist`) | 🚧 | Planned (Rust makes this easier) |
459
+ | Test isolation | ✅ | ✅ | |
460
+ | Stdout/stderr capture | ✅ | ✅ | |
461
+ | **Reporting** |
462
+ | Pass/fail/skip summary | ✅ | ✅ | |
463
+ | Failure tracebacks | ✅ | ✅ | Full Python traceback support |
464
+ | Duration reporting | ✅ | ✅ | Per-test timing |
465
+ | JUnit XML output | ✅ | 🚧 | Planned |
466
+ | HTML reports | ✅ (`pytest-html`) | 🚧 | Planned |
467
+ | **Advanced Features** |
468
+ | Plugins | ✅ | ❌ | Not planned (keeps rustest simple) |
469
+ | Hooks | ✅ | ❌ | Not planned |
470
+ | Custom collectors | ✅ | ❌ | Not planned |
471
+ | `conftest.py` | ✅ | ✅ | Shared fixtures across test files |
472
+ | **Developer Experience** |
473
+ | Fully typed Python API | ⚠️ | ✅ | rustest uses `basedpyright` strict mode |
474
+ | Fast CI/CD runs | ⚠️ | ✅ | 78x faster = dramatically shorter feedback loops |
475
+
476
+ **Legend:**
477
+ - ✅ Fully supported
478
+ - 🚧 Planned or in progress
479
+ - ⚠️ Partial support
480
+ - ❌ Not planned
481
+
482
+ **Philosophy:** Rustest implements the 20% of pytest features that cover 80% of use cases, with a focus on raw speed and simplicity. If you need advanced pytest features like plugins or custom hooks, stick with pytest. If you want fast, straightforward testing with familiar syntax, rustest is for you.
483
+
484
+ ## License
485
+
486
+ rustest is distributed under the terms of the MIT license. See [LICENSE](LICENSE).
487
+
@@ -0,0 +1,14 @@
1
+ rustest-0.1.0.dist-info/METADATA,sha256=MxCLrEAs8gtwnesn3cpHVwnD-lzCdw7A03a2aTmxrvI,13963
2
+ rustest-0.1.0.dist-info/WHEEL,sha256=dewdenAwp3PDth0u4HpQhcjieEs1_hiwRbm3WvCuoaI,104
3
+ rustest-0.1.0.dist-info/entry_points.txt,sha256=7fUa3LO8vudQ4dKG1sTRaDnxcMdBSZsWs9EyuxFQ7Lk,48
4
+ rustest-0.1.0.dist-info/licenses/LICENSE,sha256=s64ibUGtb6jEDBsYuxUFtMr_c4PaqYP-vj3YY6QtTGw,1075
5
+ rustest/__init__.py,sha256=K7MyGPARnn97RSjk-E_x_1KjqD49RK4fII_sZ_0rdcc,439
6
+ rustest/__main__.py,sha256=nqdz6DhrDze715SXxtzAYV2sie3CPoy7IvWCdcyHJEM,179
7
+ rustest/_cli.py,sha256=kq9LAwHaJmZ-gnAlTsz7Ov8r1fiDvNoLf4hEI3sxhng,8700
8
+ rustest/_decorators.py,sha256=EFcAmZfThiyj_J5l6fEx7Ix9LroXIBrOwa8QuxltNLI,6561
9
+ rustest/_reporting.py,sha256=6nVcccX1dgEBW72wCOeOIl5I-OE-ukjJD0VQs56pwjo,1626
10
+ rustest/_rust.cpython-311-darwin.so,sha256=RK7ZC1Z3ZNxKvXKNv6ub_mHbJ0mYb2mAGdlcRCgw34s,1466544
11
+ rustest/_rust.py,sha256=k3nXhGiehOVY_S6w28rIdrc0CEc3gFLgwWVOEMcPOZo,660
12
+ rustest/_rust.pyi,sha256=fDFLX0qj4G_bV1sHmTtRPI26grTDG_LFzPFEqp5vFGk,671
13
+ rustest/core.py,sha256=xmBUpuPs0r0HQthc9J5dCQYkZnXqfxqIfSGkHeoqQS4,488
14
+ rustest-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: maturin (1.9.6)
3
+ Root-Is-Purelib: false
4
+ Tag: cp311-cp311-macosx_11_0_arm64
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ rustest=rustest.__main__:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Apex Engineers Inc
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.