pyforge-test 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyforge_test-0.1.0/Documentation.md +677 -0
- pyforge_test-0.1.0/FUTURE_UPDATES.md +141 -0
- pyforge_test-0.1.0/LICENSE +21 -0
- pyforge_test-0.1.0/MANIFEST.in +10 -0
- pyforge_test-0.1.0/PKG-INFO +188 -0
- pyforge_test-0.1.0/README.md +159 -0
- pyforge_test-0.1.0/pyproject.toml +128 -0
- pyforge_test-0.1.0/setup.cfg +4 -0
- pyforge_test-0.1.0/src/pyforge_test/__init__.py +23 -0
- pyforge_test-0.1.0/src/pyforge_test/__main__.py +8 -0
- pyforge_test-0.1.0/src/pyforge_test/core/__init__.py +8 -0
- pyforge_test-0.1.0/src/pyforge_test/core/collector.py +253 -0
- pyforge_test-0.1.0/src/pyforge_test/core/main.py +227 -0
- pyforge_test-0.1.0/src/pyforge_test/core/py.typed +0 -0
- pyforge_test-0.1.0/src/pyforge_test/core/registry.py +45 -0
- pyforge_test-0.1.0/src/pyforge_test/core/reporter.py +373 -0
- pyforge_test-0.1.0/src/pyforge_test/core/runner.py +222 -0
- pyforge_test-0.1.0/src/pyforge_test.egg-info/PKG-INFO +188 -0
- pyforge_test-0.1.0/src/pyforge_test.egg-info/SOURCES.txt +21 -0
- pyforge_test-0.1.0/src/pyforge_test.egg-info/dependency_links.txt +1 -0
- pyforge_test-0.1.0/src/pyforge_test.egg-info/entry_points.txt +2 -0
- pyforge_test-0.1.0/src/pyforge_test.egg-info/top_level.txt +1 -0
- pyforge_test-0.1.0/tests/test_test.py +74 -0
|
@@ -0,0 +1,677 @@
|
|
|
1
|
+
# PyForge Testing Framework - Complete Documentation
|
|
2
|
+
|
|
3
|
+
A lightweight Python testing framework with decorator-based test collection and execution.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install git+https://github.com/ertanturk/pyforge-test.git
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
### 1. Setup
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
mkdir tests
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
### 2. Write Tests
|
|
20
|
+
|
|
21
|
+
Create `tests/test_example.py`:
|
|
22
|
+
|
|
23
|
+
```python
|
|
24
|
+
from pyforge_test import test
|
|
25
|
+
|
|
26
|
+
@test
|
|
27
|
+
def test_addition() -> None:
|
|
28
|
+
assert 2 + 2 == 4
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
### 3. Run Tests
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
# Run all tests
|
|
35
|
+
pyforge
|
|
36
|
+
|
|
37
|
+
# Run specific tests by name
|
|
38
|
+
pyforge -k basic
|
|
39
|
+
|
|
40
|
+
# Run from specific file
|
|
41
|
+
pyforge test_example.py
|
|
42
|
+
|
|
43
|
+
# Verbose output with fail-fast
|
|
44
|
+
pyforge -v --fail-fast
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Core Features
|
|
48
|
+
|
|
49
|
+
### Basic Tests
|
|
50
|
+
|
|
51
|
+
```python
|
|
52
|
+
from pyforge_test import test
|
|
53
|
+
|
|
54
|
+
@test
|
|
55
|
+
def test_string_operations() -> None:
|
|
56
|
+
"""Test basic string manipulation."""
|
|
57
|
+
result = "hello".upper()
|
|
58
|
+
assert result == "HELLO"
|
|
59
|
+
assert "world" in "hello world"
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
**Requirements:**
|
|
63
|
+
|
|
64
|
+
- Function name starts with `test_`
|
|
65
|
+
- No parameters
|
|
66
|
+
- Return type `-> None`
|
|
67
|
+
- Use `@test` decorator
|
|
68
|
+
|
|
69
|
+
### Test Markers
|
|
70
|
+
|
|
71
|
+
Organize tests by execution priority: **Unmarked (0)** → **Integration (1)** → **Slow (2)**
|
|
72
|
+
|
|
73
|
+
#### Unmarked Tests (Priority 0)
|
|
74
|
+
|
|
75
|
+
```python
|
|
76
|
+
@test
|
|
77
|
+
def test_fast_calculation() -> None:
|
|
78
|
+
"""Fast unit test - runs first."""
|
|
79
|
+
assert 5 * 5 == 25
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
#### Integration Tests (Priority 1)
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
from pyforge_test import test, test_marker
|
|
86
|
+
|
|
87
|
+
@test_marker("integration")
|
|
88
|
+
@test
|
|
89
|
+
def test_database_query() -> None:
|
|
90
|
+
"""Requires external database."""
|
|
91
|
+
result = db.query("SELECT 1")
|
|
92
|
+
assert result is not None
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
#### Slow Tests (Priority 2)
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
import time
|
|
99
|
+
from pyforge_test import test, test_marker
|
|
100
|
+
|
|
101
|
+
@test_marker("slow")
|
|
102
|
+
@test
|
|
103
|
+
def test_performance() -> None:
|
|
104
|
+
"""Performance-intensive operation."""
|
|
105
|
+
time.sleep(0.5)
|
|
106
|
+
result = process_large_dataset()
|
|
107
|
+
assert len(result) > 1000
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
**Built-in Markers:**
|
|
111
|
+
|
|
112
|
+
- `"integration"` - External resources (DB, API, filesystem)
|
|
113
|
+
- `"slow"` - Time-consuming operations (>100ms)
|
|
114
|
+
|
|
115
|
+
**Important:** Decorator order matters - `@test_marker` must come **before** `@test`
|
|
116
|
+
|
|
117
|
+
### Parameterized Tests
|
|
118
|
+
|
|
119
|
+
Run same test with multiple inputs:
|
|
120
|
+
|
|
121
|
+
```python
|
|
122
|
+
from pyforge_test import test_parameterized
|
|
123
|
+
|
|
124
|
+
@test_parameterized([
|
|
125
|
+
(2, 3, 5),
|
|
126
|
+
(10, 5, 15),
|
|
127
|
+
(100, 200, 300),
|
|
128
|
+
])
|
|
129
|
+
def test_addition(a: int, b: int, expected: int) -> None:
|
|
130
|
+
"""Test addition with multiple cases."""
|
|
131
|
+
assert a + b == expected
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
Generates: `test_addition_0`, `test_addition_1`, `test_addition_2`
|
|
135
|
+
|
|
136
|
+
### Skip Tests
|
|
137
|
+
|
|
138
|
+
#### Conditional Skip
|
|
139
|
+
|
|
140
|
+
```python
|
|
141
|
+
import sys
|
|
142
|
+
from pyforge_test import test, test_skipif
|
|
143
|
+
|
|
144
|
+
@test_skipif(
|
|
145
|
+
sys.platform == "win32",
|
|
146
|
+
reason="Not supported on Windows"
|
|
147
|
+
)
|
|
148
|
+
def test_unix_feature() -> None:
|
|
149
|
+
"""Only runs on Unix-like systems."""
|
|
150
|
+
assert os.fork() >= 0
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
#### Unconditional Skip
|
|
154
|
+
|
|
155
|
+
```python
|
|
156
|
+
from pyforge_test import test, test_skip
|
|
157
|
+
|
|
158
|
+
@test_skip(reason="Feature not implemented yet")
|
|
159
|
+
def test_future_feature() -> None:
|
|
160
|
+
"""This test is always skipped."""
|
|
161
|
+
assert future_api_call() == "success"
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
### Combined Features
|
|
165
|
+
|
|
166
|
+
```python
|
|
167
|
+
import os
|
|
168
|
+
from pyforge_test import test, test_marker, test_skipif
|
|
169
|
+
|
|
170
|
+
@test_marker("integration")
|
|
171
|
+
@test_skipif(
|
|
172
|
+
not os.getenv("DATABASE_URL"),
|
|
173
|
+
reason="Database not configured"
|
|
174
|
+
)
|
|
175
|
+
def test_real_database() -> None:
|
|
176
|
+
"""Integration test with skip condition."""
|
|
177
|
+
conn = connect_database()
|
|
178
|
+
assert conn.is_connected()
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
## Complete Example
|
|
182
|
+
|
|
183
|
+
```python
|
|
184
|
+
"""Complete test suite example."""
|
|
185
|
+
|
|
186
|
+
import time
|
|
187
|
+
import os
|
|
188
|
+
from pyforge_test import (
|
|
189
|
+
test,
|
|
190
|
+
test_marker,
|
|
191
|
+
test_parameterized,
|
|
192
|
+
test_skip,
|
|
193
|
+
test_skipif,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
# Unmarked test (Priority 0)
|
|
198
|
+
@test
|
|
199
|
+
def test_basic_arithmetic() -> None:
|
|
200
|
+
"""Fast unit test."""
|
|
201
|
+
assert 10 / 2 == 5
|
|
202
|
+
assert 3 ** 2 == 9
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
# Parameterized test
|
|
206
|
+
@test_parameterized([
|
|
207
|
+
("hello", "HELLO"),
|
|
208
|
+
("world", "WORLD"),
|
|
209
|
+
("PyForge", "PYFORGE"),
|
|
210
|
+
])
|
|
211
|
+
def test_uppercase(input_str: str, expected: str) -> None:
|
|
212
|
+
"""Test string uppercase conversion."""
|
|
213
|
+
assert input_str.upper() == expected
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
# Integration test (Priority 1)
|
|
217
|
+
@test_marker("integration")
|
|
218
|
+
@test
|
|
219
|
+
def test_api_endpoint() -> None:
|
|
220
|
+
"""Test external API."""
|
|
221
|
+
response = api_client.get("/health")
|
|
222
|
+
assert response.status_code == 200
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
# Slow test (Priority 2)
|
|
226
|
+
@test_marker("slow")
|
|
227
|
+
@test
|
|
228
|
+
def test_large_computation() -> None:
|
|
229
|
+
"""Performance-intensive test."""
|
|
230
|
+
time.sleep(0.2)
|
|
231
|
+
result = sum(i ** 2 for i in range(10000))
|
|
232
|
+
assert result > 0
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
# Conditional skip
|
|
236
|
+
@test_skipif(
|
|
237
|
+
os.getenv("CI") == "true",
|
|
238
|
+
reason="Skip in CI environment"
|
|
239
|
+
)
|
|
240
|
+
def test_local_only() -> None:
|
|
241
|
+
"""Only runs locally."""
|
|
242
|
+
assert local_resource_available()
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
# Unconditional skip
|
|
246
|
+
@test_skip(reason="Temporarily disabled")
|
|
247
|
+
def test_broken_feature() -> None:
|
|
248
|
+
"""Skipped until bug is fixed."""
|
|
249
|
+
assert broken_function() == "works"
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
# Combined: marker + skip
|
|
253
|
+
@test_marker("slow")
|
|
254
|
+
@test_skip(reason="Takes 10+ minutes")
|
|
255
|
+
def test_extensive_benchmark() -> None:
|
|
256
|
+
"""Long-running benchmark test."""
|
|
257
|
+
time.sleep(600)
|
|
258
|
+
```
|
|
259
|
+
|
|
260
|
+
## Project Structure
|
|
261
|
+
|
|
262
|
+
```
|
|
263
|
+
my-project/
|
|
264
|
+
├── src/
|
|
265
|
+
│ ├── main.py
|
|
266
|
+
│ └── utils.py
|
|
267
|
+
├── tests/
|
|
268
|
+
│ ├── __init__.py # Required (empty file)
|
|
269
|
+
│ ├── test_main.py # Auto-discovered
|
|
270
|
+
│ ├── test_utils.py # Auto-discovered
|
|
271
|
+
│ └── test_integration.py # Auto-discovered
|
|
272
|
+
└── pyproject.toml
|
|
273
|
+
```
|
|
274
|
+
|
|
275
|
+
## Running Tests
|
|
276
|
+
|
|
277
|
+
### Command Line Interface
|
|
278
|
+
|
|
279
|
+
PyForge supports verbosity levels, selective running, and execution control:
|
|
280
|
+
|
|
281
|
+
```bash
|
|
282
|
+
# Run all tests
|
|
283
|
+
pyforge
|
|
284
|
+
|
|
285
|
+
# Quiet mode: only show summary and failures
|
|
286
|
+
pyforge -q
|
|
287
|
+
pyforge --quiet
|
|
288
|
+
|
|
289
|
+
# Verbose mode: show detailed info with tracebacks
|
|
290
|
+
pyforge -v
|
|
291
|
+
pyforge --verbose
|
|
292
|
+
|
|
293
|
+
# Stop at first failure
|
|
294
|
+
pyforge --fail-fast
|
|
295
|
+
|
|
296
|
+
# Combine options
|
|
297
|
+
pyforge -v --fail-fast
|
|
298
|
+
```
|
|
299
|
+
|
|
300
|
+
### Selective Running
|
|
301
|
+
|
|
302
|
+
#### Substring Filtering (`-k`)
|
|
303
|
+
|
|
304
|
+
Run tests matching a substring in their name:
|
|
305
|
+
|
|
306
|
+
```bash
|
|
307
|
+
# Run tests containing "basic" in the name
|
|
308
|
+
pyforge -k basic
|
|
309
|
+
|
|
310
|
+
# Run tests containing "api" with verbose output
|
|
311
|
+
pyforge -k api -v
|
|
312
|
+
|
|
313
|
+
# Combine with other options
|
|
314
|
+
pyforge -k test_uppercase --fail-fast
|
|
315
|
+
```
|
|
316
|
+
|
|
317
|
+
#### File Path Filtering
|
|
318
|
+
|
|
319
|
+
Run tests from specific files using positional arguments:
|
|
320
|
+
|
|
321
|
+
```bash
|
|
322
|
+
# Run all tests from a specific file
|
|
323
|
+
pyforge test_example.py
|
|
324
|
+
|
|
325
|
+
# Run from multiple files
|
|
326
|
+
pyforge test_example.py test_utils.py
|
|
327
|
+
|
|
328
|
+
# Use partial paths to match multiple files
|
|
329
|
+
pyforge test_
|
|
330
|
+
```
|
|
331
|
+
|
|
332
|
+
#### Combined Filtering
|
|
333
|
+
|
|
334
|
+
Use both `-k` and file paths together:
|
|
335
|
+
|
|
336
|
+
```bash
|
|
337
|
+
# Run tests with "api" in the name from test_integration.py
|
|
338
|
+
pyforge -k api test_integration.py
|
|
339
|
+
|
|
340
|
+
# Run uppercase tests from any test_*.py file with verbose output
|
|
341
|
+
pyforge -k uppercase test_ -v
|
|
342
|
+
|
|
343
|
+
# Run tests with "db" in name from specific files with fail-fast
|
|
344
|
+
pyforge -k db test_database.py test_models.py --fail-fast
|
|
345
|
+
```
|
|
346
|
+
|
|
347
|
+
### Help Output
|
|
348
|
+
|
|
349
|
+
```bash
|
|
350
|
+
pyforge --help
|
|
351
|
+
```
|
|
352
|
+
|
|
353
|
+
### Auto-Discovery
|
|
354
|
+
|
|
355
|
+
PyForge automatically:
|
|
356
|
+
|
|
357
|
+
1. Finds `tests/` directory in current/parent directory
|
|
358
|
+
2. Loads all `test*.py` files
|
|
359
|
+
3. Collects functions decorated with `@test`
|
|
360
|
+
4. Filters by criteria (if provided)
|
|
361
|
+
5. Sorts by marker priority
|
|
362
|
+
6. Executes and reports results
|
|
363
|
+
|
|
364
|
+
### Output Format
|
|
365
|
+
|
|
366
|
+
```
|
|
367
|
+
PyForge Test Results
|
|
368
|
+
------------------------------------------------------------------------
|
|
369
|
+
|
|
370
|
+
test_example.py
|
|
371
|
+
PASSED test_basic_arithmetic (Line 10)
|
|
372
|
+
PASSED test_uppercase_0 (Line 15)
|
|
373
|
+
PASSED test_uppercase_1 (Line 15)
|
|
374
|
+
PASSED test_uppercase_2 (Line 15)
|
|
375
|
+
ERROR test_api_endpoint (Line 25): name 'api_client' is not defined
|
|
376
|
+
PASSED test_large_computation (Line 33)
|
|
377
|
+
SKIPPED test_local_only (Line 42): Skip in CI environment
|
|
378
|
+
SKIPPED test_broken_feature (Line 50): Temporarily disabled
|
|
379
|
+
|
|
380
|
+
------------------------------------------------------------------------
|
|
381
|
+
Summary: PASSED: 5/8 FAILED: 0/8 SKIPPED: 2/8 ERRORS: 1/8
|
|
382
|
+
Took 156 ms to execute all tests
|
|
383
|
+
------------------------------------------------------------------------
|
|
384
|
+
```
|
|
385
|
+
|
|
386
|
+
## Test Results
|
|
387
|
+
|
|
388
|
+
Test results are displayed with color coding:
|
|
389
|
+
|
|
390
|
+
- **PASSED** (Green) - Assertion succeeded
|
|
391
|
+
- **FAILED** (Red) - AssertionError with message
|
|
392
|
+
- **ERROR** (Orange) - Unexpected exception with message
|
|
393
|
+
- **SKIPPED** (Blue) - Test skipped with reason
|
|
394
|
+
|
|
395
|
+
Each result shows:
|
|
396
|
+
|
|
397
|
+
- Status indicator (colored)
|
|
398
|
+
- Test function name
|
|
399
|
+
- Line number where test is defined
|
|
400
|
+
- Error message (if applicable)
|
|
401
|
+
- Traceback (in verbose mode only)
|
|
402
|
+
|
|
403
|
+
## API Reference
|
|
404
|
+
|
|
405
|
+
### CLI Options
|
|
406
|
+
|
|
407
|
+
```
|
|
408
|
+
usage: pyforge [-h] [-q] [-v] [--fail-fast] [-k NAME_PATTERN] [files ...]
|
|
409
|
+
|
|
410
|
+
positional arguments:
|
|
411
|
+
files File paths to run tests from (supports partial paths and filenames)
|
|
412
|
+
|
|
413
|
+
options:
|
|
414
|
+
-h, --help show this help message and exit
|
|
415
|
+
-q, --quiet Quiet mode: only show summary and failures
|
|
416
|
+
-v, --verbose Verbose mode: show detailed info with tracebacks
|
|
417
|
+
--fail-fast Stop execution at first failure
|
|
418
|
+
-k NAME_PATTERN Substring filter: run tests with this string in their name
|
|
419
|
+
```
|
|
420
|
+
|
|
421
|
+
### CLI Option Details
|
|
422
|
+
|
|
423
|
+
| Option | Short | Description | Example |
|
|
424
|
+
| ---------------- | ----- | -------------------------------- | ------------------------- |
|
|
425
|
+
| `--help` | `-h` | Show help message | `pyforge -h` |
|
|
426
|
+
| `--quiet` | `-q` | Only show summary and failures | `pyforge -q` |
|
|
427
|
+
| `--verbose` | `-v` | Show tracebacks and details | `pyforge -v` |
|
|
428
|
+
| `--fail-fast` | | Stop at first failure | `pyforge --fail-fast` |
|
|
429
|
+
| `--name-pattern` | `-k` | Filter by test name substring | `pyforge -k basic` |
|
|
430
|
+
| `files` | | Filter by file path (positional) | `pyforge test_example.py` |
|
|
431
|
+
|
|
432
|
+
### Combining Options
|
|
433
|
+
|
|
434
|
+
Options can be combined freely:
|
|
435
|
+
|
|
436
|
+
```bash
|
|
437
|
+
# Verbose output with fail-fast
|
|
438
|
+
pyforge -v --fail-fast
|
|
439
|
+
|
|
440
|
+
# Quiet mode for CI environments
|
|
441
|
+
pyforge -q
|
|
442
|
+
|
|
443
|
+
# Filter by name and file with verbose output
|
|
444
|
+
pyforge -k api test_integration.py -v
|
|
445
|
+
|
|
446
|
+
# Multiple files with name filter
|
|
447
|
+
pyforge -k test_ test_utils.py test_models.py --fail-fast
|
|
448
|
+
```
|
|
449
|
+
|
|
450
|
+
### Decorators
|
|
451
|
+
|
|
452
|
+
| Decorator | Purpose | Example |
|
|
453
|
+
| ---------------------------- | ------------------------ | ------------------------------ |
|
|
454
|
+
| `@test` | Mark function as test | `@test` |
|
|
455
|
+
| `@test_marker(marker)` | Apply priority marker | `@test_marker("slow")` |
|
|
456
|
+
| `@test_parameterized(cases)` | Run with multiple inputs | `@test_parameterized([(1,2)])` |
|
|
457
|
+
| `@test_skip(reason)` | Always skip | `@test_skip("Not ready")` |
|
|
458
|
+
| `@test_skipif(cond, reason)` | Conditionally skip | `@test_skipif(True, "Skip")` |
|
|
459
|
+
|
|
460
|
+
### Built-in Markers
|
|
461
|
+
|
|
462
|
+
| Marker | Priority | Use Case |
|
|
463
|
+
| --------------- | -------- | --------------------- |
|
|
464
|
+
| None (unmarked) | 0 | Fast unit tests |
|
|
465
|
+
| `"integration"` | 1 | External dependencies |
|
|
466
|
+
| `"slow"` | 2 | Performance-intensive |
|
|
467
|
+
|
|
468
|
+
### Import Paths
|
|
469
|
+
|
|
470
|
+
```python
|
|
471
|
+
# Recommended: Import from main package
|
|
472
|
+
from pyforge_test import (
|
|
473
|
+
test,
|
|
474
|
+
test_marker,
|
|
475
|
+
test_parameterized,
|
|
476
|
+
test_skip,
|
|
477
|
+
test_skipif,
|
|
478
|
+
BUILTIN_MARKERS,
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
# Alternative: Import from core module
|
|
482
|
+
from pyforge_test.core.collector import test
|
|
483
|
+
```
|
|
484
|
+
|
|
485
|
+
## Best Practices
|
|
486
|
+
|
|
487
|
+
### 1. Test Naming
|
|
488
|
+
|
|
489
|
+
```python
|
|
490
|
+
# ✅ Good: Descriptive, starts with test_
|
|
491
|
+
@test
|
|
492
|
+
def test_user_authentication_with_valid_credentials() -> None:
|
|
493
|
+
pass
|
|
494
|
+
|
|
495
|
+
# ❌ Bad: Vague, doesn't start with test_
|
|
496
|
+
def check_auth() -> None:
|
|
497
|
+
pass
|
|
498
|
+
```
|
|
499
|
+
|
|
500
|
+
### 2. Marker Usage
|
|
501
|
+
|
|
502
|
+
```python
|
|
503
|
+
# ✅ Good: Unmarked for fast unit tests
|
|
504
|
+
@test
|
|
505
|
+
def test_string_length() -> None:
|
|
506
|
+
assert len("hello") == 5
|
|
507
|
+
|
|
508
|
+
# ✅ Good: Integration marker for external resources
|
|
509
|
+
@test_marker("integration")
|
|
510
|
+
@test
|
|
511
|
+
def test_database_connection() -> None:
|
|
512
|
+
db.connect()
|
|
513
|
+
|
|
514
|
+
# ✅ Good: Slow marker for performance tests
|
|
515
|
+
@test_marker("slow")
|
|
516
|
+
@test
|
|
517
|
+
def test_million_records() -> None:
|
|
518
|
+
process_records(1_000_000)
|
|
519
|
+
```
|
|
520
|
+
|
|
521
|
+
### 3. Parameterization
|
|
522
|
+
|
|
523
|
+
```python
|
|
524
|
+
# ✅ Good: Multiple related test cases
|
|
525
|
+
@test_parameterized([
|
|
526
|
+
(0, True),
|
|
527
|
+
(1, False),
|
|
528
|
+
(-1, False),
|
|
529
|
+
(10, True),
|
|
530
|
+
])
|
|
531
|
+
def test_is_even(num: int, expected: bool) -> None:
|
|
532
|
+
assert (num % 2 == 0) == expected
|
|
533
|
+
|
|
534
|
+
# ❌ Bad: Separate tests for each case
|
|
535
|
+
@test
|
|
536
|
+
def test_zero_is_even() -> None:
|
|
537
|
+
assert 0 % 2 == 0
|
|
538
|
+
|
|
539
|
+
@test
|
|
540
|
+
def test_one_is_odd() -> None:
|
|
541
|
+
assert 1 % 2 != 0
|
|
542
|
+
```
|
|
543
|
+
|
|
544
|
+
### 4. Skip Conditions
|
|
545
|
+
|
|
546
|
+
```python
|
|
547
|
+
# ✅ Good: Conditional skip with clear reason
|
|
548
|
+
@test_skipif(
|
|
549
|
+
sys.version_info < (3, 12),
|
|
550
|
+
reason="Requires Python 3.12+"
|
|
551
|
+
)
|
|
552
|
+
def test_new_syntax() -> None:
|
|
553
|
+
pass
|
|
554
|
+
|
|
555
|
+
# ✅ Good: Skip unfinished tests
|
|
556
|
+
@test_skip(reason="Implementation pending")
|
|
557
|
+
def test_new_feature() -> None:
|
|
558
|
+
pass
|
|
559
|
+
```
|
|
560
|
+
|
|
561
|
+
## Troubleshooting
|
|
562
|
+
|
|
563
|
+
### Tests Not Found
|
|
564
|
+
|
|
565
|
+
**Problem:** `No tests to execute. Exiting.`
|
|
566
|
+
|
|
567
|
+
**Solution:**
|
|
568
|
+
|
|
569
|
+
- Ensure `tests/` directory exists
|
|
570
|
+
- Create `tests/__init__.py` (can be empty)
|
|
571
|
+
- Test files must match `test*.py` pattern
|
|
572
|
+
- Functions must start with `test_`
|
|
573
|
+
- Functions must use `@test` decorator
|
|
574
|
+
|
|
575
|
+
### Import Errors
|
|
576
|
+
|
|
577
|
+
**Problem:** `ModuleNotFoundError: No module named 'pyforge_test'`
|
|
578
|
+
|
|
579
|
+
**Solution:**
|
|
580
|
+
|
|
581
|
+
```bash
|
|
582
|
+
pip install git+https://github.com/ertanturk/pyforge-test.git
|
|
583
|
+
```
|
|
584
|
+
|
|
585
|
+
### Marker Errors
|
|
586
|
+
|
|
587
|
+
**Problem:** `ValueError: Marker 'custom' is not a built-in marker.`
|
|
588
|
+
|
|
589
|
+
**Solution:**
|
|
590
|
+
|
|
591
|
+
- Only use `"integration"` or `"slow"` markers
|
|
592
|
+
- Check spelling: `@test_marker("integration")` not `"Integration"`
|
|
593
|
+
|
|
594
|
+
### Decorator Order
|
|
595
|
+
|
|
596
|
+
**Problem:** `ValueError: Test function must be collected before applying marker.`
|
|
597
|
+
|
|
598
|
+
**Solution:**
|
|
599
|
+
|
|
600
|
+
```python
|
|
601
|
+
# ✅ Correct order
|
|
602
|
+
@test_marker("slow")
|
|
603
|
+
@test
|
|
604
|
+
def test_something() -> None:
|
|
605
|
+
pass
|
|
606
|
+
|
|
607
|
+
# ❌ Wrong order
|
|
608
|
+
@test
|
|
609
|
+
@test_marker("slow")
|
|
610
|
+
def test_something() -> None:
|
|
611
|
+
pass
|
|
612
|
+
```
|
|
613
|
+
|
|
614
|
+
## Advanced Usage
|
|
615
|
+
|
|
616
|
+
### Custom Assertions
|
|
617
|
+
|
|
618
|
+
```python
|
|
619
|
+
@test
|
|
620
|
+
def test_with_custom_assertion() -> None:
|
|
621
|
+
"""Use custom assertion helpers."""
|
|
622
|
+
result = compute_value()
|
|
623
|
+
|
|
624
|
+
# Multiple assertions in one test
|
|
625
|
+
assert result > 0, "Result should be positive"
|
|
626
|
+
assert result < 100, "Result should be less than 100"
|
|
627
|
+
assert isinstance(result, int), "Result should be integer"
|
|
628
|
+
```
|
|
629
|
+
|
|
630
|
+
### Environment-Based Testing
|
|
631
|
+
|
|
632
|
+
```python
|
|
633
|
+
import os
|
|
634
|
+
|
|
635
|
+
@test_skipif(
|
|
636
|
+
os.getenv("ENVIRONMENT") == "production",
|
|
637
|
+
reason="Don't run in production"
|
|
638
|
+
)
|
|
639
|
+
def test_destructive_operation() -> None:
|
|
640
|
+
"""Only runs in dev/test environments."""
|
|
641
|
+
delete_all_data()
|
|
642
|
+
assert True
|
|
643
|
+
```
|
|
644
|
+
|
|
645
|
+
### Multiple Markers with Skip
|
|
646
|
+
|
|
647
|
+
```python
|
|
648
|
+
@test_marker("integration")
|
|
649
|
+
@test_skipif(
|
|
650
|
+
not os.path.exists("/var/run/docker.sock"),
|
|
651
|
+
reason="Docker not available"
|
|
652
|
+
)
|
|
653
|
+
def test_docker_container() -> None:
|
|
654
|
+
"""Integration test requiring Docker."""
|
|
655
|
+
container = docker_client.run("alpine")
|
|
656
|
+
assert container.status == "running"
|
|
657
|
+
```
|
|
658
|
+
|
|
659
|
+
## Platform Support
|
|
660
|
+
|
|
661
|
+
- ✅ Linux
|
|
662
|
+
- ✅ macOS
|
|
663
|
+
- ✅ Windows (WSL)
|
|
664
|
+
- ✅ Python 3.12+
|
|
665
|
+
|
|
666
|
+
## Summary
|
|
667
|
+
|
|
668
|
+
PyForge provides:
|
|
669
|
+
|
|
670
|
+
1. **Simple test collection** with `@test` decorator
|
|
671
|
+
2. **Test prioritization** with markers (`integration`, `slow`)
|
|
672
|
+
3. **Parameterized tests** for multiple test cases
|
|
673
|
+
4. **Skip conditions** for conditional execution
|
|
674
|
+
5. **Auto-discovery** of test files
|
|
675
|
+
6. **Clear output** with pass/fail/skip status
|
|
676
|
+
|
|
677
|
+
Write tests, run `pyforge`, see results. That's it!
|