@juho0719/cckit 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/assets/agents/architect.md +211 -0
- package/assets/agents/build-error-resolver.md +114 -0
- package/assets/agents/ccwin-code-reviewer.md +224 -0
- package/assets/agents/database-reviewer.md +91 -0
- package/assets/agents/doc-updater.md +107 -0
- package/assets/agents/e2e-runner.md +107 -0
- package/assets/agents/planner.md +212 -0
- package/assets/agents/python-reviewer.md +98 -0
- package/assets/agents/refactor-cleaner.md +85 -0
- package/assets/agents/security-reviewer.md +108 -0
- package/assets/agents/superpower-code-reviewer.md +48 -0
- package/assets/agents/tdd-guide.md +80 -0
- package/assets/commands/build-fix.md +62 -0
- package/assets/commands/checkpoint.md +74 -0
- package/assets/commands/code-review.md +40 -0
- package/assets/commands/e2e.md +362 -0
- package/assets/commands/eval.md +120 -0
- package/assets/commands/orchestrate.md +172 -0
- package/assets/commands/plan.md +113 -0
- package/assets/commands/python-review.md +297 -0
- package/assets/commands/refactor-clean.md +80 -0
- package/assets/commands/sessions.md +305 -0
- package/assets/commands/tdd.md +326 -0
- package/assets/commands/test-coverage.md +69 -0
- package/assets/commands/update-codemaps.md +72 -0
- package/assets/commands/update-docs.md +84 -0
- package/assets/commands/verify.md +59 -0
- package/assets/hooks/post-edit-format.js +49 -0
- package/assets/hooks/post-edit-typecheck.js +96 -0
- package/assets/mcps/mcp-servers.json +92 -0
- package/assets/rules/common/agents.md +49 -0
- package/assets/rules/common/coding-style.md +48 -0
- package/assets/rules/common/git-workflow.md +45 -0
- package/assets/rules/common/hooks.md +30 -0
- package/assets/rules/common/patterns.md +31 -0
- package/assets/rules/common/performance.md +55 -0
- package/assets/rules/common/security.md +29 -0
- package/assets/rules/common/testing.md +29 -0
- package/assets/rules/python/coding-style.md +42 -0
- package/assets/rules/python/hooks.md +19 -0
- package/assets/rules/python/patterns.md +39 -0
- package/assets/rules/python/security.md +30 -0
- package/assets/rules/python/testing.md +38 -0
- package/assets/rules/typescript/coding-style.md +18 -0
- package/assets/rules/typescript/hooks.md +19 -0
- package/assets/rules/typescript/patterns.md +39 -0
- package/assets/rules/typescript/security.md +30 -0
- package/assets/rules/typescript/testing.md +38 -0
- package/assets/skills/api-design/SKILL.md +522 -0
- package/assets/skills/backend-patterns/SKILL.md +597 -0
- package/assets/skills/brainstorming/SKILL.md +96 -0
- package/assets/skills/coding-standards/SKILL.md +529 -0
- package/assets/skills/database-migrations/SKILL.md +334 -0
- package/assets/skills/deployment-patterns/SKILL.md +426 -0
- package/assets/skills/dispatching-parallel-agents/SKILL.md +180 -0
- package/assets/skills/docker-patterns/SKILL.md +363 -0
- package/assets/skills/e2e-testing/SKILL.md +325 -0
- package/assets/skills/eval-harness/SKILL.md +235 -0
- package/assets/skills/executing-plans/SKILL.md +84 -0
- package/assets/skills/finishing-a-development-branch/SKILL.md +200 -0
- package/assets/skills/frontend-patterns/SKILL.md +641 -0
- package/assets/skills/iterative-retrieval/SKILL.md +210 -0
- package/assets/skills/postgres-patterns/SKILL.md +145 -0
- package/assets/skills/python-patterns/SKILL.md +749 -0
- package/assets/skills/python-testing/SKILL.md +815 -0
- package/assets/skills/receiving-code-review/SKILL.md +213 -0
- package/assets/skills/requesting-code-review/SKILL.md +105 -0
- package/assets/skills/requesting-code-review/code-reviewer-template.md +146 -0
- package/assets/skills/subagent-driven-development/SKILL.md +242 -0
- package/assets/skills/subagent-driven-development/code-quality-reviewer-prompt.md +20 -0
- package/assets/skills/subagent-driven-development/implementer-prompt.md +78 -0
- package/assets/skills/subagent-driven-development/spec-reviewer-prompt.md +61 -0
- package/assets/skills/systematic-debugging/CREATION-LOG.md +114 -0
- package/assets/skills/systematic-debugging/SKILL.md +296 -0
- package/assets/skills/systematic-debugging/condition-based-waiting-example.ts +158 -0
- package/assets/skills/systematic-debugging/condition-based-waiting.md +115 -0
- package/assets/skills/systematic-debugging/defense-in-depth.md +122 -0
- package/assets/skills/systematic-debugging/root-cause-tracing.md +169 -0
- package/assets/skills/systematic-debugging/scripts/find-polluter.sh +63 -0
- package/assets/skills/systematic-debugging/test-academic.md +14 -0
- package/assets/skills/systematic-debugging/test-pressure-1.md +58 -0
- package/assets/skills/systematic-debugging/test-pressure-2.md +68 -0
- package/assets/skills/systematic-debugging/test-pressure-3.md +69 -0
- package/assets/skills/tdd-workflow/SKILL.md +409 -0
- package/assets/skills/test-driven-development/SKILL.md +371 -0
- package/assets/skills/test-driven-development/testing-anti-patterns.md +299 -0
- package/assets/skills/using-git-worktrees/SKILL.md +218 -0
- package/assets/skills/verification-before-completion/SKILL.md +139 -0
- package/assets/skills/verification-loop/SKILL.md +125 -0
- package/assets/skills/writing-plans/SKILL.md +116 -0
- package/dist/agents-AEKT67A6.js +9 -0
- package/dist/chunk-3GUKEMND.js +28 -0
- package/dist/chunk-3UNN3IBE.js +54 -0
- package/dist/chunk-3Y26YU4R.js +27 -0
- package/dist/chunk-5XOKKPAA.js +21 -0
- package/dist/chunk-6B46AIFM.js +136 -0
- package/dist/chunk-EYY2IZ7N.js +27 -0
- package/dist/chunk-K25UZZVG.js +17 -0
- package/dist/chunk-KEENFBLL.js +24 -0
- package/dist/chunk-RMUKD7CW.js +44 -0
- package/dist/chunk-W63UKEIT.js +50 -0
- package/dist/cli-VZRGF733.js +238 -0
- package/dist/commands-P5LILVZ5.js +9 -0
- package/dist/hooks-IIG2XK4I.js +9 -0
- package/dist/index.js +131 -0
- package/dist/mcps-67Q7TBGW.js +6 -0
- package/dist/paths-FT6KBIRD.js +10 -0
- package/dist/registry-EGXWYWWK.js +17 -0
- package/dist/rules-2CPBVNNJ.js +7 -0
- package/dist/skills-ULMW3UCM.js +8 -0
- package/package.json +36 -0
|
@@ -0,0 +1,815 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: python-testing
|
|
3
|
+
description: Python testing strategies using pytest, TDD methodology, fixtures, mocking, parametrization, and coverage requirements.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Python Testing Patterns
|
|
7
|
+
|
|
8
|
+
Comprehensive testing strategies for Python applications using pytest, TDD methodology, and best practices.
|
|
9
|
+
|
|
10
|
+
## When to Activate
|
|
11
|
+
|
|
12
|
+
- Writing new Python code (follow TDD: red, green, refactor)
|
|
13
|
+
- Designing test suites for Python projects
|
|
14
|
+
- Reviewing Python test coverage
|
|
15
|
+
- Setting up testing infrastructure
|
|
16
|
+
|
|
17
|
+
## Core Testing Philosophy
|
|
18
|
+
|
|
19
|
+
### Test-Driven Development (TDD)
|
|
20
|
+
|
|
21
|
+
Always follow the TDD cycle:
|
|
22
|
+
|
|
23
|
+
1. **RED**: Write a failing test for the desired behavior
|
|
24
|
+
2. **GREEN**: Write minimal code to make the test pass
|
|
25
|
+
3. **REFACTOR**: Improve code while keeping tests green
|
|
26
|
+
|
|
27
|
+
```python
|
|
28
|
+
# Step 1: Write failing test (RED)
|
|
29
|
+
def test_add_numbers():
|
|
30
|
+
result = add(2, 3)
|
|
31
|
+
assert result == 5
|
|
32
|
+
|
|
33
|
+
# Step 2: Write minimal implementation (GREEN)
|
|
34
|
+
def add(a, b):
|
|
35
|
+
return a + b
|
|
36
|
+
|
|
37
|
+
# Step 3: Refactor if needed (REFACTOR)
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
### Coverage Requirements
|
|
41
|
+
|
|
42
|
+
- **Target**: 80%+ code coverage
|
|
43
|
+
- **Critical paths**: 100% coverage required
|
|
44
|
+
- Use `pytest --cov` to measure coverage
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
pytest --cov=mypackage --cov-report=term-missing --cov-report=html
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## pytest Fundamentals
|
|
51
|
+
|
|
52
|
+
### Basic Test Structure
|
|
53
|
+
|
|
54
|
+
```python
|
|
55
|
+
import pytest
|
|
56
|
+
|
|
57
|
+
def test_addition():
|
|
58
|
+
"""Test basic addition."""
|
|
59
|
+
assert 2 + 2 == 4
|
|
60
|
+
|
|
61
|
+
def test_string_uppercase():
|
|
62
|
+
"""Test string uppercasing."""
|
|
63
|
+
text = "hello"
|
|
64
|
+
assert text.upper() == "HELLO"
|
|
65
|
+
|
|
66
|
+
def test_list_append():
|
|
67
|
+
"""Test list append."""
|
|
68
|
+
items = [1, 2, 3]
|
|
69
|
+
items.append(4)
|
|
70
|
+
assert 4 in items
|
|
71
|
+
assert len(items) == 4
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
### Assertions
|
|
75
|
+
|
|
76
|
+
```python
|
|
77
|
+
# Equality
|
|
78
|
+
assert result == expected
|
|
79
|
+
|
|
80
|
+
# Inequality
|
|
81
|
+
assert result != unexpected
|
|
82
|
+
|
|
83
|
+
# Truthiness
|
|
84
|
+
assert result # Truthy
|
|
85
|
+
assert not result # Falsy
|
|
86
|
+
assert result is True # Exactly True
|
|
87
|
+
assert result is False # Exactly False
|
|
88
|
+
assert result is None # Exactly None
|
|
89
|
+
|
|
90
|
+
# Membership
|
|
91
|
+
assert item in collection
|
|
92
|
+
assert item not in collection
|
|
93
|
+
|
|
94
|
+
# Comparisons
|
|
95
|
+
assert result > 0
|
|
96
|
+
assert 0 <= result <= 100
|
|
97
|
+
|
|
98
|
+
# Type checking
|
|
99
|
+
assert isinstance(result, str)
|
|
100
|
+
|
|
101
|
+
# Exception testing (preferred approach)
|
|
102
|
+
with pytest.raises(ValueError):
|
|
103
|
+
raise ValueError("error message")
|
|
104
|
+
|
|
105
|
+
# Check exception message
|
|
106
|
+
with pytest.raises(ValueError, match="invalid input"):
|
|
107
|
+
raise ValueError("invalid input provided")
|
|
108
|
+
|
|
109
|
+
# Check exception attributes
|
|
110
|
+
with pytest.raises(ValueError) as exc_info:
|
|
111
|
+
raise ValueError("error message")
|
|
112
|
+
assert str(exc_info.value) == "error message"
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
## Fixtures
|
|
116
|
+
|
|
117
|
+
### Basic Fixture Usage
|
|
118
|
+
|
|
119
|
+
```python
|
|
120
|
+
import pytest
|
|
121
|
+
|
|
122
|
+
@pytest.fixture
|
|
123
|
+
def sample_data():
|
|
124
|
+
"""Fixture providing sample data."""
|
|
125
|
+
return {"name": "Alice", "age": 30}
|
|
126
|
+
|
|
127
|
+
def test_sample_data(sample_data):
|
|
128
|
+
"""Test using the fixture."""
|
|
129
|
+
assert sample_data["name"] == "Alice"
|
|
130
|
+
assert sample_data["age"] == 30
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
### Fixture with Setup/Teardown
|
|
134
|
+
|
|
135
|
+
```python
|
|
136
|
+
@pytest.fixture
|
|
137
|
+
def database():
|
|
138
|
+
"""Fixture with setup and teardown."""
|
|
139
|
+
# Setup
|
|
140
|
+
db = Database(":memory:")
|
|
141
|
+
db.create_tables()
|
|
142
|
+
db.insert_test_data()
|
|
143
|
+
|
|
144
|
+
yield db # Provide to test
|
|
145
|
+
|
|
146
|
+
# Teardown
|
|
147
|
+
db.close()
|
|
148
|
+
|
|
149
|
+
def test_database_query(database):
|
|
150
|
+
"""Test database operations."""
|
|
151
|
+
result = database.query("SELECT * FROM users")
|
|
152
|
+
assert len(result) > 0
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
### Fixture Scopes
|
|
156
|
+
|
|
157
|
+
```python
|
|
158
|
+
# Function scope (default) - runs for each test
|
|
159
|
+
@pytest.fixture
|
|
160
|
+
def temp_file():
|
|
161
|
+
with open("temp.txt", "w") as f:
|
|
162
|
+
yield f
|
|
163
|
+
os.remove("temp.txt")
|
|
164
|
+
|
|
165
|
+
# Module scope - runs once per module
|
|
166
|
+
@pytest.fixture(scope="module")
|
|
167
|
+
def module_db():
|
|
168
|
+
db = Database(":memory:")
|
|
169
|
+
db.create_tables()
|
|
170
|
+
yield db
|
|
171
|
+
db.close()
|
|
172
|
+
|
|
173
|
+
# Session scope - runs once per test session
|
|
174
|
+
@pytest.fixture(scope="session")
|
|
175
|
+
def shared_resource():
|
|
176
|
+
resource = ExpensiveResource()
|
|
177
|
+
yield resource
|
|
178
|
+
resource.cleanup()
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
### Fixture with Parameters
|
|
182
|
+
|
|
183
|
+
```python
|
|
184
|
+
@pytest.fixture(params=[1, 2, 3])
|
|
185
|
+
def number(request):
|
|
186
|
+
"""Parameterized fixture."""
|
|
187
|
+
return request.param
|
|
188
|
+
|
|
189
|
+
def test_numbers(number):
|
|
190
|
+
"""Test runs 3 times, once for each parameter."""
|
|
191
|
+
assert number > 0
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
### Using Multiple Fixtures
|
|
195
|
+
|
|
196
|
+
```python
|
|
197
|
+
@pytest.fixture
|
|
198
|
+
def user():
|
|
199
|
+
return User(id=1, name="Alice")
|
|
200
|
+
|
|
201
|
+
@pytest.fixture
|
|
202
|
+
def admin():
|
|
203
|
+
return User(id=2, name="Admin", role="admin")
|
|
204
|
+
|
|
205
|
+
def test_user_admin_interaction(user, admin):
|
|
206
|
+
"""Test using multiple fixtures."""
|
|
207
|
+
assert admin.can_manage(user)
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
### Autouse Fixtures
|
|
211
|
+
|
|
212
|
+
```python
|
|
213
|
+
@pytest.fixture(autouse=True)
|
|
214
|
+
def reset_config():
|
|
215
|
+
"""Automatically runs before every test."""
|
|
216
|
+
Config.reset()
|
|
217
|
+
yield
|
|
218
|
+
Config.cleanup()
|
|
219
|
+
|
|
220
|
+
def test_without_fixture_call():
|
|
221
|
+
# reset_config runs automatically
|
|
222
|
+
assert Config.get_setting("debug") is False
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
### Conftest.py for Shared Fixtures
|
|
226
|
+
|
|
227
|
+
```python
|
|
228
|
+
# tests/conftest.py
|
|
229
|
+
import pytest
|
|
230
|
+
|
|
231
|
+
@pytest.fixture
|
|
232
|
+
def client():
|
|
233
|
+
"""Shared fixture for all tests."""
|
|
234
|
+
app = create_app(testing=True)
|
|
235
|
+
with app.test_client() as client:
|
|
236
|
+
yield client
|
|
237
|
+
|
|
238
|
+
@pytest.fixture
|
|
239
|
+
def auth_headers(client):
|
|
240
|
+
"""Generate auth headers for API testing."""
|
|
241
|
+
response = client.post("/api/login", json={
|
|
242
|
+
"username": "test",
|
|
243
|
+
"password": "test"
|
|
244
|
+
})
|
|
245
|
+
token = response.json["token"]
|
|
246
|
+
return {"Authorization": f"Bearer {token}"}
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
## Parametrization
|
|
250
|
+
|
|
251
|
+
### Basic Parametrization
|
|
252
|
+
|
|
253
|
+
```python
|
|
254
|
+
@pytest.mark.parametrize("input,expected", [
|
|
255
|
+
("hello", "HELLO"),
|
|
256
|
+
("world", "WORLD"),
|
|
257
|
+
("PyThOn", "PYTHON"),
|
|
258
|
+
])
|
|
259
|
+
def test_uppercase(input, expected):
|
|
260
|
+
"""Test runs 3 times with different inputs."""
|
|
261
|
+
assert input.upper() == expected
|
|
262
|
+
```
|
|
263
|
+
|
|
264
|
+
### Multiple Parameters
|
|
265
|
+
|
|
266
|
+
```python
|
|
267
|
+
@pytest.mark.parametrize("a,b,expected", [
|
|
268
|
+
(2, 3, 5),
|
|
269
|
+
(0, 0, 0),
|
|
270
|
+
(-1, 1, 0),
|
|
271
|
+
(100, 200, 300),
|
|
272
|
+
])
|
|
273
|
+
def test_add(a, b, expected):
|
|
274
|
+
"""Test addition with multiple inputs."""
|
|
275
|
+
assert add(a, b) == expected
|
|
276
|
+
```
|
|
277
|
+
|
|
278
|
+
### Parametrize with IDs
|
|
279
|
+
|
|
280
|
+
```python
|
|
281
|
+
@pytest.mark.parametrize("input,expected", [
|
|
282
|
+
("valid@email.com", True),
|
|
283
|
+
("invalid", False),
|
|
284
|
+
("@no-domain.com", False),
|
|
285
|
+
], ids=["valid-email", "missing-at", "missing-domain"])
|
|
286
|
+
def test_email_validation(input, expected):
|
|
287
|
+
"""Test email validation with readable test IDs."""
|
|
288
|
+
assert is_valid_email(input) is expected
|
|
289
|
+
```
|
|
290
|
+
|
|
291
|
+
### Parametrized Fixtures
|
|
292
|
+
|
|
293
|
+
```python
|
|
294
|
+
@pytest.fixture(params=["sqlite", "postgresql", "mysql"])
|
|
295
|
+
def db(request):
|
|
296
|
+
"""Test against multiple database backends."""
|
|
297
|
+
if request.param == "sqlite":
|
|
298
|
+
return Database(":memory:")
|
|
299
|
+
elif request.param == "postgresql":
|
|
300
|
+
return Database("postgresql://localhost/test")
|
|
301
|
+
elif request.param == "mysql":
|
|
302
|
+
return Database("mysql://localhost/test")
|
|
303
|
+
|
|
304
|
+
def test_database_operations(db):
|
|
305
|
+
"""Test runs 3 times, once for each database."""
|
|
306
|
+
result = db.query("SELECT 1")
|
|
307
|
+
assert result is not None
|
|
308
|
+
```
|
|
309
|
+
|
|
310
|
+
## Markers and Test Selection
|
|
311
|
+
|
|
312
|
+
### Custom Markers
|
|
313
|
+
|
|
314
|
+
```python
|
|
315
|
+
# Mark slow tests
|
|
316
|
+
@pytest.mark.slow
|
|
317
|
+
def test_slow_operation():
|
|
318
|
+
time.sleep(5)
|
|
319
|
+
|
|
320
|
+
# Mark integration tests
|
|
321
|
+
@pytest.mark.integration
|
|
322
|
+
def test_api_integration():
|
|
323
|
+
response = requests.get("https://api.example.com")
|
|
324
|
+
assert response.status_code == 200
|
|
325
|
+
|
|
326
|
+
# Mark unit tests
|
|
327
|
+
@pytest.mark.unit
|
|
328
|
+
def test_unit_logic():
|
|
329
|
+
assert calculate(2, 3) == 5
|
|
330
|
+
```
|
|
331
|
+
|
|
332
|
+
### Run Specific Tests
|
|
333
|
+
|
|
334
|
+
```bash
|
|
335
|
+
# Run only fast tests
|
|
336
|
+
pytest -m "not slow"
|
|
337
|
+
|
|
338
|
+
# Run only integration tests
|
|
339
|
+
pytest -m integration
|
|
340
|
+
|
|
341
|
+
# Run integration or slow tests
|
|
342
|
+
pytest -m "integration or slow"
|
|
343
|
+
|
|
344
|
+
# Run tests marked as unit but not slow
|
|
345
|
+
pytest -m "unit and not slow"
|
|
346
|
+
```
|
|
347
|
+
|
|
348
|
+
### Configure Markers in pytest.ini
|
|
349
|
+
|
|
350
|
+
```ini
|
|
351
|
+
[pytest]
|
|
352
|
+
markers =
|
|
353
|
+
slow: marks tests as slow
|
|
354
|
+
integration: marks tests as integration tests
|
|
355
|
+
unit: marks tests as unit tests
|
|
356
|
+
django: marks tests as requiring Django
|
|
357
|
+
```
|
|
358
|
+
|
|
359
|
+
## Mocking and Patching
|
|
360
|
+
|
|
361
|
+
### Mocking Functions
|
|
362
|
+
|
|
363
|
+
```python
|
|
364
|
+
from unittest.mock import patch, Mock
|
|
365
|
+
|
|
366
|
+
@patch("mypackage.external_api_call")
|
|
367
|
+
def test_with_mock(api_call_mock):
|
|
368
|
+
"""Test with mocked external API."""
|
|
369
|
+
api_call_mock.return_value = {"status": "success"}
|
|
370
|
+
|
|
371
|
+
result = my_function()
|
|
372
|
+
|
|
373
|
+
api_call_mock.assert_called_once()
|
|
374
|
+
assert result["status"] == "success"
|
|
375
|
+
```
|
|
376
|
+
|
|
377
|
+
### Mocking Return Values
|
|
378
|
+
|
|
379
|
+
```python
|
|
380
|
+
@patch("mypackage.Database.connect")
|
|
381
|
+
def test_database_connection(connect_mock):
|
|
382
|
+
"""Test with mocked database connection."""
|
|
383
|
+
connect_mock.return_value = MockConnection()
|
|
384
|
+
|
|
385
|
+
db = Database()
|
|
386
|
+
db.connect()
|
|
387
|
+
|
|
388
|
+
connect_mock.assert_called_once_with("localhost")
|
|
389
|
+
```
|
|
390
|
+
|
|
391
|
+
### Mocking Exceptions
|
|
392
|
+
|
|
393
|
+
```python
|
|
394
|
+
@patch("mypackage.api_call")
|
|
395
|
+
def test_api_error_handling(api_call_mock):
|
|
396
|
+
"""Test error handling with mocked exception."""
|
|
397
|
+
api_call_mock.side_effect = ConnectionError("Network error")
|
|
398
|
+
|
|
399
|
+
with pytest.raises(ConnectionError):
|
|
400
|
+
api_call()
|
|
401
|
+
|
|
402
|
+
api_call_mock.assert_called_once()
|
|
403
|
+
```
|
|
404
|
+
|
|
405
|
+
### Mocking Context Managers
|
|
406
|
+
|
|
407
|
+
```python
|
|
408
|
+
@patch("builtins.open", new_callable=mock_open)
|
|
409
|
+
def test_file_reading(mock_file):
|
|
410
|
+
"""Test file reading with mocked open."""
|
|
411
|
+
mock_file.return_value.read.return_value = "file content"
|
|
412
|
+
|
|
413
|
+
result = read_file("test.txt")
|
|
414
|
+
|
|
415
|
+
mock_file.assert_called_once_with("test.txt", "r")
|
|
416
|
+
assert result == "file content"
|
|
417
|
+
```
|
|
418
|
+
|
|
419
|
+
### Using Autospec
|
|
420
|
+
|
|
421
|
+
```python
|
|
422
|
+
@patch("mypackage.DBConnection", autospec=True)
|
|
423
|
+
def test_autospec(db_mock):
|
|
424
|
+
"""Test with autospec to catch API misuse."""
|
|
425
|
+
db = db_mock.return_value
|
|
426
|
+
db.query("SELECT * FROM users")
|
|
427
|
+
|
|
428
|
+
# This would fail if DBConnection doesn't have query method
|
|
429
|
+
db_mock.assert_called_once()
|
|
430
|
+
```
|
|
431
|
+
|
|
432
|
+
### Mock Class Instances
|
|
433
|
+
|
|
434
|
+
```python
|
|
435
|
+
class TestUserService:
|
|
436
|
+
@patch("mypackage.UserRepository")
|
|
437
|
+
def test_create_user(self, repo_mock):
|
|
438
|
+
"""Test user creation with mocked repository."""
|
|
439
|
+
repo_mock.return_value.save.return_value = User(id=1, name="Alice")
|
|
440
|
+
|
|
441
|
+
service = UserService(repo_mock.return_value)
|
|
442
|
+
user = service.create_user(name="Alice")
|
|
443
|
+
|
|
444
|
+
assert user.name == "Alice"
|
|
445
|
+
repo_mock.return_value.save.assert_called_once()
|
|
446
|
+
```
|
|
447
|
+
|
|
448
|
+
### Mock Property
|
|
449
|
+
|
|
450
|
+
```python
|
|
451
|
+
@pytest.fixture
|
|
452
|
+
def mock_config():
|
|
453
|
+
"""Create a mock with a property."""
|
|
454
|
+
config = Mock()
|
|
455
|
+
type(config).debug = PropertyMock(return_value=True)
|
|
456
|
+
type(config).api_key = PropertyMock(return_value="test-key")
|
|
457
|
+
return config
|
|
458
|
+
|
|
459
|
+
def test_with_mock_config(mock_config):
|
|
460
|
+
"""Test with mocked config properties."""
|
|
461
|
+
assert mock_config.debug is True
|
|
462
|
+
assert mock_config.api_key == "test-key"
|
|
463
|
+
```
|
|
464
|
+
|
|
465
|
+
## Testing Async Code
|
|
466
|
+
|
|
467
|
+
### Async Tests with pytest-asyncio
|
|
468
|
+
|
|
469
|
+
```python
|
|
470
|
+
import pytest
|
|
471
|
+
|
|
472
|
+
@pytest.mark.asyncio
|
|
473
|
+
async def test_async_function():
|
|
474
|
+
"""Test async function."""
|
|
475
|
+
result = await async_add(2, 3)
|
|
476
|
+
assert result == 5
|
|
477
|
+
|
|
478
|
+
@pytest.mark.asyncio
|
|
479
|
+
async def test_async_with_fixture(async_client):
|
|
480
|
+
"""Test async with async fixture."""
|
|
481
|
+
response = await async_client.get("/api/users")
|
|
482
|
+
assert response.status_code == 200
|
|
483
|
+
```
|
|
484
|
+
|
|
485
|
+
### Async Fixture
|
|
486
|
+
|
|
487
|
+
```python
|
|
488
|
+
@pytest.fixture
|
|
489
|
+
async def async_client():
|
|
490
|
+
"""Async fixture providing async test client."""
|
|
491
|
+
app = create_app()
|
|
492
|
+
async with app.test_client() as client:
|
|
493
|
+
yield client
|
|
494
|
+
|
|
495
|
+
@pytest.mark.asyncio
|
|
496
|
+
async def test_api_endpoint(async_client):
|
|
497
|
+
"""Test using async fixture."""
|
|
498
|
+
response = await async_client.get("/api/data")
|
|
499
|
+
assert response.status_code == 200
|
|
500
|
+
```
|
|
501
|
+
|
|
502
|
+
### Mocking Async Functions
|
|
503
|
+
|
|
504
|
+
```python
|
|
505
|
+
@pytest.mark.asyncio
|
|
506
|
+
@patch("mypackage.async_api_call")
|
|
507
|
+
async def test_async_mock(api_call_mock):
|
|
508
|
+
"""Test async function with mock."""
|
|
509
|
+
api_call_mock.return_value = {"status": "ok"}
|
|
510
|
+
|
|
511
|
+
result = await my_async_function()
|
|
512
|
+
|
|
513
|
+
api_call_mock.assert_awaited_once()
|
|
514
|
+
assert result["status"] == "ok"
|
|
515
|
+
```
|
|
516
|
+
|
|
517
|
+
## Testing Exceptions
|
|
518
|
+
|
|
519
|
+
### Testing Expected Exceptions
|
|
520
|
+
|
|
521
|
+
```python
|
|
522
|
+
def test_divide_by_zero():
|
|
523
|
+
"""Test that dividing by zero raises ZeroDivisionError."""
|
|
524
|
+
with pytest.raises(ZeroDivisionError):
|
|
525
|
+
divide(10, 0)
|
|
526
|
+
|
|
527
|
+
def test_custom_exception():
|
|
528
|
+
"""Test custom exception with message."""
|
|
529
|
+
with pytest.raises(ValueError, match="invalid input"):
|
|
530
|
+
validate_input("invalid")
|
|
531
|
+
```
|
|
532
|
+
|
|
533
|
+
### Testing Exception Attributes
|
|
534
|
+
|
|
535
|
+
```python
|
|
536
|
+
def test_exception_with_details():
|
|
537
|
+
"""Test exception with custom attributes."""
|
|
538
|
+
with pytest.raises(CustomError) as exc_info:
|
|
539
|
+
raise CustomError("error", code=400)
|
|
540
|
+
|
|
541
|
+
assert exc_info.value.code == 400
|
|
542
|
+
assert "error" in str(exc_info.value)
|
|
543
|
+
```
|
|
544
|
+
|
|
545
|
+
## Testing Side Effects
|
|
546
|
+
|
|
547
|
+
### Testing File Operations
|
|
548
|
+
|
|
549
|
+
```python
|
|
550
|
+
import tempfile
|
|
551
|
+
import os
|
|
552
|
+
|
|
553
|
+
def test_file_processing():
|
|
554
|
+
"""Test file processing with temp file."""
|
|
555
|
+
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt') as f:
|
|
556
|
+
f.write("test content")
|
|
557
|
+
temp_path = f.name
|
|
558
|
+
|
|
559
|
+
try:
|
|
560
|
+
result = process_file(temp_path)
|
|
561
|
+
assert result == "processed: test content"
|
|
562
|
+
finally:
|
|
563
|
+
os.unlink(temp_path)
|
|
564
|
+
```
|
|
565
|
+
|
|
566
|
+
### Testing with pytest's tmp_path Fixture
|
|
567
|
+
|
|
568
|
+
```python
|
|
569
|
+
def test_with_tmp_path(tmp_path):
|
|
570
|
+
"""Test using pytest's built-in temp path fixture."""
|
|
571
|
+
test_file = tmp_path / "test.txt"
|
|
572
|
+
test_file.write_text("hello world")
|
|
573
|
+
|
|
574
|
+
result = process_file(str(test_file))
|
|
575
|
+
assert result == "hello world"
|
|
576
|
+
# tmp_path automatically cleaned up
|
|
577
|
+
```
|
|
578
|
+
|
|
579
|
+
### Testing with tmpdir Fixture
|
|
580
|
+
|
|
581
|
+
```python
|
|
582
|
+
def test_with_tmpdir(tmpdir):
|
|
583
|
+
"""Test using pytest's tmpdir fixture."""
|
|
584
|
+
test_file = tmpdir.join("test.txt")
|
|
585
|
+
test_file.write("data")
|
|
586
|
+
|
|
587
|
+
result = process_file(str(test_file))
|
|
588
|
+
assert result == "data"
|
|
589
|
+
```
|
|
590
|
+
|
|
591
|
+
## Test Organization
|
|
592
|
+
|
|
593
|
+
### Directory Structure
|
|
594
|
+
|
|
595
|
+
```
|
|
596
|
+
tests/
|
|
597
|
+
├── conftest.py # Shared fixtures
|
|
598
|
+
├── __init__.py
|
|
599
|
+
├── unit/ # Unit tests
|
|
600
|
+
│ ├── __init__.py
|
|
601
|
+
│ ├── test_models.py
|
|
602
|
+
│ ├── test_utils.py
|
|
603
|
+
│ └── test_services.py
|
|
604
|
+
├── integration/ # Integration tests
|
|
605
|
+
│ ├── __init__.py
|
|
606
|
+
│ ├── test_api.py
|
|
607
|
+
│ └── test_database.py
|
|
608
|
+
└── e2e/ # End-to-end tests
|
|
609
|
+
├── __init__.py
|
|
610
|
+
└── test_user_flow.py
|
|
611
|
+
```
|
|
612
|
+
|
|
613
|
+
### Test Classes
|
|
614
|
+
|
|
615
|
+
```python
|
|
616
|
+
class TestUserService:
|
|
617
|
+
"""Group related tests in a class."""
|
|
618
|
+
|
|
619
|
+
@pytest.fixture(autouse=True)
|
|
620
|
+
def setup(self):
|
|
621
|
+
"""Setup runs before each test in this class."""
|
|
622
|
+
self.service = UserService()
|
|
623
|
+
|
|
624
|
+
def test_create_user(self):
|
|
625
|
+
"""Test user creation."""
|
|
626
|
+
user = self.service.create_user("Alice")
|
|
627
|
+
assert user.name == "Alice"
|
|
628
|
+
|
|
629
|
+
def test_delete_user(self):
|
|
630
|
+
"""Test user deletion."""
|
|
631
|
+
user = User(id=1, name="Bob")
|
|
632
|
+
self.service.delete_user(user)
|
|
633
|
+
assert not self.service.user_exists(1)
|
|
634
|
+
```
|
|
635
|
+
|
|
636
|
+
## Best Practices
|
|
637
|
+
|
|
638
|
+
### DO
|
|
639
|
+
|
|
640
|
+
- **Follow TDD**: Write tests before code (red-green-refactor)
|
|
641
|
+
- **Test one thing**: Each test should verify a single behavior
|
|
642
|
+
- **Use descriptive names**: `test_user_login_with_invalid_credentials_fails`
|
|
643
|
+
- **Use fixtures**: Eliminate duplication with fixtures
|
|
644
|
+
- **Mock external dependencies**: Don't depend on external services
|
|
645
|
+
- **Test edge cases**: Empty inputs, None values, boundary conditions
|
|
646
|
+
- **Aim for 80%+ coverage**: Focus on critical paths
|
|
647
|
+
- **Keep tests fast**: Use marks to separate slow tests
|
|
648
|
+
|
|
649
|
+
### DON'T
|
|
650
|
+
|
|
651
|
+
- **Don't test implementation**: Test behavior, not internals
|
|
652
|
+
- **Don't use complex conditionals in tests**: Keep tests simple
|
|
653
|
+
- **Don't ignore test failures**: All tests must pass
|
|
654
|
+
- **Don't test third-party code**: Trust libraries to work
|
|
655
|
+
- **Don't share state between tests**: Tests should be independent
|
|
656
|
+
- **Don't catch exceptions in tests**: Use `pytest.raises`
|
|
657
|
+
- **Don't use print statements**: Use assertions and pytest output
|
|
658
|
+
- **Don't write tests that are too brittle**: Avoid over-specific mocks
|
|
659
|
+
|
|
660
|
+
## Common Patterns
|
|
661
|
+
|
|
662
|
+
### Testing API Endpoints (FastAPI/Flask)
|
|
663
|
+
|
|
664
|
+
```python
|
|
665
|
+
@pytest.fixture
|
|
666
|
+
def client():
|
|
667
|
+
app = create_app(testing=True)
|
|
668
|
+
return app.test_client()
|
|
669
|
+
|
|
670
|
+
def test_get_user(client):
|
|
671
|
+
response = client.get("/api/users/1")
|
|
672
|
+
assert response.status_code == 200
|
|
673
|
+
assert response.json["id"] == 1
|
|
674
|
+
|
|
675
|
+
def test_create_user(client):
|
|
676
|
+
response = client.post("/api/users", json={
|
|
677
|
+
"name": "Alice",
|
|
678
|
+
"email": "alice@example.com"
|
|
679
|
+
})
|
|
680
|
+
assert response.status_code == 201
|
|
681
|
+
assert response.json["name"] == "Alice"
|
|
682
|
+
```
|
|
683
|
+
|
|
684
|
+
### Testing Database Operations
|
|
685
|
+
|
|
686
|
+
```python
|
|
687
|
+
@pytest.fixture
|
|
688
|
+
def db_session():
|
|
689
|
+
"""Create a test database session."""
|
|
690
|
+
session = Session(bind=engine)
|
|
691
|
+
session.begin_nested()
|
|
692
|
+
yield session
|
|
693
|
+
session.rollback()
|
|
694
|
+
session.close()
|
|
695
|
+
|
|
696
|
+
def test_create_user(db_session):
|
|
697
|
+
user = User(name="Alice", email="alice@example.com")
|
|
698
|
+
db_session.add(user)
|
|
699
|
+
db_session.commit()
|
|
700
|
+
|
|
701
|
+
retrieved = db_session.query(User).filter_by(name="Alice").first()
|
|
702
|
+
assert retrieved.email == "alice@example.com"
|
|
703
|
+
```
|
|
704
|
+
|
|
705
|
+
### Testing Class Methods
|
|
706
|
+
|
|
707
|
+
```python
|
|
708
|
+
class TestCalculator:
|
|
709
|
+
@pytest.fixture
|
|
710
|
+
def calculator(self):
|
|
711
|
+
return Calculator()
|
|
712
|
+
|
|
713
|
+
def test_add(self, calculator):
|
|
714
|
+
assert calculator.add(2, 3) == 5
|
|
715
|
+
|
|
716
|
+
def test_divide_by_zero(self, calculator):
|
|
717
|
+
with pytest.raises(ZeroDivisionError):
|
|
718
|
+
calculator.divide(10, 0)
|
|
719
|
+
```
|
|
720
|
+
|
|
721
|
+
## pytest Configuration
|
|
722
|
+
|
|
723
|
+
### pytest.ini
|
|
724
|
+
|
|
725
|
+
```ini
|
|
726
|
+
[pytest]
|
|
727
|
+
testpaths = tests
|
|
728
|
+
python_files = test_*.py
|
|
729
|
+
python_classes = Test*
|
|
730
|
+
python_functions = test_*
|
|
731
|
+
addopts =
|
|
732
|
+
--strict-markers
|
|
733
|
+
--disable-warnings
|
|
734
|
+
--cov=mypackage
|
|
735
|
+
--cov-report=term-missing
|
|
736
|
+
--cov-report=html
|
|
737
|
+
markers =
|
|
738
|
+
slow: marks tests as slow
|
|
739
|
+
integration: marks tests as integration tests
|
|
740
|
+
unit: marks tests as unit tests
|
|
741
|
+
```
|
|
742
|
+
|
|
743
|
+
### pyproject.toml
|
|
744
|
+
|
|
745
|
+
```toml
|
|
746
|
+
[tool.pytest.ini_options]
|
|
747
|
+
testpaths = ["tests"]
|
|
748
|
+
python_files = ["test_*.py"]
|
|
749
|
+
python_classes = ["Test*"]
|
|
750
|
+
python_functions = ["test_*"]
|
|
751
|
+
addopts = [
|
|
752
|
+
"--strict-markers",
|
|
753
|
+
"--cov=mypackage",
|
|
754
|
+
"--cov-report=term-missing",
|
|
755
|
+
"--cov-report=html",
|
|
756
|
+
]
|
|
757
|
+
markers = [
|
|
758
|
+
"slow: marks tests as slow",
|
|
759
|
+
"integration: marks tests as integration tests",
|
|
760
|
+
"unit: marks tests as unit tests",
|
|
761
|
+
]
|
|
762
|
+
```
|
|
763
|
+
|
|
764
|
+
## Running Tests
|
|
765
|
+
|
|
766
|
+
```bash
|
|
767
|
+
# Run all tests
|
|
768
|
+
pytest
|
|
769
|
+
|
|
770
|
+
# Run specific file
|
|
771
|
+
pytest tests/test_utils.py
|
|
772
|
+
|
|
773
|
+
# Run specific test
|
|
774
|
+
pytest tests/test_utils.py::test_function
|
|
775
|
+
|
|
776
|
+
# Run with verbose output
|
|
777
|
+
pytest -v
|
|
778
|
+
|
|
779
|
+
# Run with coverage
|
|
780
|
+
pytest --cov=mypackage --cov-report=html
|
|
781
|
+
|
|
782
|
+
# Run only fast tests
|
|
783
|
+
pytest -m "not slow"
|
|
784
|
+
|
|
785
|
+
# Run until first failure
|
|
786
|
+
pytest -x
|
|
787
|
+
|
|
788
|
+
# Run and stop on N failures
|
|
789
|
+
pytest --maxfail=3
|
|
790
|
+
|
|
791
|
+
# Run last failed tests
|
|
792
|
+
pytest --lf
|
|
793
|
+
|
|
794
|
+
# Run tests with pattern
|
|
795
|
+
pytest -k "test_user"
|
|
796
|
+
|
|
797
|
+
# Run with debugger on failure
|
|
798
|
+
pytest --pdb
|
|
799
|
+
```
|
|
800
|
+
|
|
801
|
+
## Quick Reference
|
|
802
|
+
|
|
803
|
+
| Pattern | Usage |
|
|
804
|
+
|---------|-------|
|
|
805
|
+
| `pytest.raises()` | Test expected exceptions |
|
|
806
|
+
| `@pytest.fixture()` | Create reusable test fixtures |
|
|
807
|
+
| `@pytest.mark.parametrize()` | Run tests with multiple inputs |
|
|
808
|
+
| `@pytest.mark.slow` | Mark slow tests |
|
|
809
|
+
| `pytest -m "not slow"` | Skip slow tests |
|
|
810
|
+
| `@patch()` | Mock functions and classes |
|
|
811
|
+
| `tmp_path` fixture | Automatic temp directory |
|
|
812
|
+
| `pytest --cov` | Generate coverage report |
|
|
813
|
+
| `assert` | Simple and readable assertions |
|
|
814
|
+
|
|
815
|
+
**Remember**: Tests are code too. Keep them clean, readable, and maintainable. Good tests catch bugs; great tests prevent them.
|