@punks/cli 1.0.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,622 @@
1
+ ---
2
+ name: python-testing-patterns
3
+ description: Implement comprehensive testing strategies with pytest, fixtures, mocking, and test-driven development. Use when writing Python tests, setting up test suites, or implementing testing best practices.
4
+ ---
5
+
6
+ # Python Testing Patterns
7
+
8
+ Comprehensive guide to implementing robust testing strategies in Python using pytest, fixtures, mocking, parameterization, and test-driven development practices.
9
+
10
+ ## When to Use This Skill
11
+
12
+ - Writing unit tests for Python code
13
+ - Setting up test suites and test infrastructure
14
+ - Implementing test-driven development (TDD)
15
+ - Creating integration tests for APIs and services
16
+ - Mocking external dependencies and services
17
+ - Testing async code and concurrent operations
18
+ - Setting up continuous testing in CI/CD
19
+ - Implementing property-based testing
20
+ - Testing database operations
21
+ - Debugging failing tests
22
+
23
+ ## Core Concepts
24
+
25
+ ### 1. Test Types
26
+
27
+ - **Unit Tests**: Test individual functions/classes in isolation
28
+ - **Integration Tests**: Test interaction between components
29
+ - **Functional Tests**: Test complete features end-to-end
30
+ - **Performance Tests**: Measure speed and resource usage
31
+
32
+ ### 2. Test Structure (AAA Pattern)
33
+
34
+ - **Arrange**: Set up test data and preconditions
35
+ - **Act**: Execute the code under test
36
+ - **Assert**: Verify the results
37
+
38
+ ### 3. Test Coverage
39
+
40
+ - Measure what code is exercised by tests
41
+ - Identify untested code paths
42
+ - Aim for meaningful coverage, not just high percentages
43
+
44
+ ### 4. Test Isolation
45
+
46
+ - Tests should be independent
47
+ - No shared state between tests
48
+ - Each test should clean up after itself
49
+
50
+ ## Quick Start
51
+
52
+ ```python
53
+ # test_example.py
54
+ def add(a, b):
55
+ return a + b
56
+
57
+ def test_add():
58
+ """Basic test example."""
59
+ result = add(2, 3)
60
+ assert result == 5
61
+
62
+ def test_add_negative():
63
+ """Test with negative numbers."""
64
+ assert add(-1, 1) == 0
65
+
66
+ # Run with: pytest test_example.py
67
+ ```
68
+
69
+ ## Fundamental Patterns
70
+
71
+ ### Pattern 1: Basic pytest Tests
72
+
73
+ ```python
74
+ # test_calculator.py
75
+ import pytest
76
+
77
+ class Calculator:
78
+ """Simple calculator for testing."""
79
+
80
+ def add(self, a: float, b: float) -> float:
81
+ return a + b
82
+
83
+ def subtract(self, a: float, b: float) -> float:
84
+ return a - b
85
+
86
+ def multiply(self, a: float, b: float) -> float:
87
+ return a * b
88
+
89
+ def divide(self, a: float, b: float) -> float:
90
+ if b == 0:
91
+ raise ValueError("Cannot divide by zero")
92
+ return a / b
93
+
94
+
95
+ def test_addition():
96
+ """Test addition."""
97
+ calc = Calculator()
98
+ assert calc.add(2, 3) == 5
99
+ assert calc.add(-1, 1) == 0
100
+ assert calc.add(0, 0) == 0
101
+
102
+
103
+ def test_subtraction():
104
+ """Test subtraction."""
105
+ calc = Calculator()
106
+ assert calc.subtract(5, 3) == 2
107
+ assert calc.subtract(0, 5) == -5
108
+
109
+
110
+ def test_multiplication():
111
+ """Test multiplication."""
112
+ calc = Calculator()
113
+ assert calc.multiply(3, 4) == 12
114
+ assert calc.multiply(0, 5) == 0
115
+
116
+
117
+ def test_division():
118
+ """Test division."""
119
+ calc = Calculator()
120
+ assert calc.divide(6, 3) == 2
121
+ assert calc.divide(5, 2) == 2.5
122
+
123
+
124
+ def test_division_by_zero():
125
+ """Test division by zero raises error."""
126
+ calc = Calculator()
127
+ with pytest.raises(ValueError, match="Cannot divide by zero"):
128
+ calc.divide(5, 0)
129
+ ```
130
+
131
+ ### Pattern 2: Fixtures for Setup and Teardown
132
+
133
+ ```python
134
+ # test_database.py
135
+ import pytest
136
+ from typing import Generator
137
+
138
+ class Database:
139
+ """Simple database class."""
140
+
141
+ def __init__(self, connection_string: str):
142
+ self.connection_string = connection_string
143
+ self.connected = False
144
+
145
+ def connect(self):
146
+ """Connect to database."""
147
+ self.connected = True
148
+
149
+ def disconnect(self):
150
+ """Disconnect from database."""
151
+ self.connected = False
152
+
153
+ def query(self, sql: str) -> list:
154
+ """Execute query."""
155
+ if not self.connected:
156
+ raise RuntimeError("Not connected")
157
+ return [{"id": 1, "name": "Test"}]
158
+
159
+
160
+ @pytest.fixture
161
+ def db() -> Generator[Database, None, None]:
162
+ """Fixture that provides connected database."""
163
+ # Setup
164
+ database = Database("sqlite:///:memory:")
165
+ database.connect()
166
+
167
+ # Provide to test
168
+ yield database
169
+
170
+ # Teardown
171
+ database.disconnect()
172
+
173
+
174
+ def test_database_query(db):
175
+ """Test database query with fixture."""
176
+ results = db.query("SELECT * FROM users")
177
+ assert len(results) == 1
178
+ assert results[0]["name"] == "Test"
179
+
180
+
181
+ @pytest.fixture(scope="session")
182
+ def app_config():
183
+ """Session-scoped fixture - created once per test session."""
184
+ return {
185
+ "database_url": "postgresql://localhost/test",
186
+ "api_key": "test-key",
187
+ "debug": True
188
+ }
189
+
190
+
191
+ @pytest.fixture(scope="module")
192
+ def api_client(app_config):
193
+ """Module-scoped fixture - created once per test module."""
194
+ # Setup expensive resource
195
+ client = {"config": app_config, "session": "active"}
196
+ yield client
197
+ # Cleanup
198
+ client["session"] = "closed"
199
+
200
+
201
+ def test_api_client(api_client):
202
+ """Test using api client fixture."""
203
+ assert api_client["session"] == "active"
204
+ assert api_client["config"]["debug"] is True
205
+ ```
206
+
207
+ ### Pattern 3: Parameterized Tests
208
+
209
+ ```python
210
+ # test_validation.py
211
+ import pytest
212
+
213
+ def is_valid_email(email: str) -> bool:
214
+ """Check if email is valid."""
215
+ return "@" in email and "." in email.split("@")[1]
216
+
217
+
218
+ @pytest.mark.parametrize("email,expected", [
219
+ ("user@example.com", True),
220
+ ("test.user@domain.co.uk", True),
221
+ ("invalid.email", False),
222
+ ("@example.com", False),
223
+ ("user@domain", False),
224
+ ("", False),
225
+ ])
226
+ def test_email_validation(email, expected):
227
+ """Test email validation with various inputs."""
228
+ assert is_valid_email(email) == expected
229
+
230
+
231
+ @pytest.mark.parametrize("a,b,expected", [
232
+ (2, 3, 5),
233
+ (0, 0, 0),
234
+ (-1, 1, 0),
235
+ (100, 200, 300),
236
+ (-5, -5, -10),
237
+ ])
238
+ def test_addition_parameterized(a, b, expected):
239
+ """Test addition with multiple parameter sets."""
240
+ from test_calculator import Calculator
241
+ calc = Calculator()
242
+ assert calc.add(a, b) == expected
243
+
244
+
245
+ # Using pytest.param for special cases
246
+ @pytest.mark.parametrize("value,expected", [
247
+ pytest.param(1, True, id="positive"),
248
+ pytest.param(0, False, id="zero"),
249
+ pytest.param(-1, False, id="negative"),
250
+ ])
251
+ def test_is_positive(value, expected):
252
+ """Test with custom test IDs."""
253
+ assert (value > 0) == expected
254
+ ```
255
+
256
+ ### Pattern 4: Mocking with unittest.mock
257
+
258
+ ```python
259
+ # test_api_client.py
260
+ import pytest
261
+ from unittest.mock import Mock, patch, MagicMock
262
+ import requests
263
+
264
+ class APIClient:
265
+ """Simple API client."""
266
+
267
+ def __init__(self, base_url: str):
268
+ self.base_url = base_url
269
+
270
+ def get_user(self, user_id: int) -> dict:
271
+ """Fetch user from API."""
272
+ response = requests.get(f"{self.base_url}/users/{user_id}")
273
+ response.raise_for_status()
274
+ return response.json()
275
+
276
+ def create_user(self, data: dict) -> dict:
277
+ """Create new user."""
278
+ response = requests.post(f"{self.base_url}/users", json=data)
279
+ response.raise_for_status()
280
+ return response.json()
281
+
282
+
283
+ def test_get_user_success():
284
+ """Test successful API call with mock."""
285
+ client = APIClient("https://api.example.com")
286
+
287
+ mock_response = Mock()
288
+ mock_response.json.return_value = {"id": 1, "name": "John Doe"}
289
+ mock_response.raise_for_status.return_value = None
290
+
291
+ with patch("requests.get", return_value=mock_response) as mock_get:
292
+ user = client.get_user(1)
293
+
294
+ assert user["id"] == 1
295
+ assert user["name"] == "John Doe"
296
+ mock_get.assert_called_once_with("https://api.example.com/users/1")
297
+
298
+
299
+ def test_get_user_not_found():
300
+ """Test API call with 404 error."""
301
+ client = APIClient("https://api.example.com")
302
+
303
+ mock_response = Mock()
304
+ mock_response.raise_for_status.side_effect = requests.HTTPError("404 Not Found")
305
+
306
+ with patch("requests.get", return_value=mock_response):
307
+ with pytest.raises(requests.HTTPError):
308
+ client.get_user(999)
309
+
310
+
311
+ @patch("requests.post")
312
+ def test_create_user(mock_post):
313
+ """Test user creation with decorator syntax."""
314
+ client = APIClient("https://api.example.com")
315
+
316
+ mock_post.return_value.json.return_value = {"id": 2, "name": "Jane Doe"}
317
+ mock_post.return_value.raise_for_status.return_value = None
318
+
319
+ user_data = {"name": "Jane Doe", "email": "jane@example.com"}
320
+ result = client.create_user(user_data)
321
+
322
+ assert result["id"] == 2
323
+ mock_post.assert_called_once()
324
+ call_args = mock_post.call_args
325
+ assert call_args.kwargs["json"] == user_data
326
+ ```
327
+
328
+ ### Pattern 5: Testing Exceptions
329
+
330
+ ```python
331
+ # test_exceptions.py
332
+ import pytest
333
+
334
+ def divide(a: float, b: float) -> float:
335
+ """Divide a by b."""
336
+ if b == 0:
337
+ raise ZeroDivisionError("Division by zero")
338
+ if not isinstance(a, (int, float)) or not isinstance(b, (int, float)):
339
+ raise TypeError("Arguments must be numbers")
340
+ return a / b
341
+
342
+
343
+ def test_zero_division():
344
+ """Test exception is raised for division by zero."""
345
+ with pytest.raises(ZeroDivisionError):
346
+ divide(10, 0)
347
+
348
+
349
+ def test_zero_division_with_message():
350
+ """Test exception message."""
351
+ with pytest.raises(ZeroDivisionError, match="Division by zero"):
352
+ divide(5, 0)
353
+
354
+
355
+ def test_type_error():
356
+ """Test type error exception."""
357
+ with pytest.raises(TypeError, match="must be numbers"):
358
+ divide("10", 5)
359
+
360
+
361
+ def test_exception_info():
362
+ """Test accessing exception info."""
363
+ with pytest.raises(ValueError) as exc_info:
364
+ int("not a number")
365
+
366
+ assert "invalid literal" in str(exc_info.value)
367
+ ```
368
+
369
+ For advanced patterns including async testing, monkeypatching, temporary files, conftest setup, property-based testing, database testing, CI/CD integration, and configuration files, see [references/advanced-patterns.md](references/advanced-patterns.md)
370
+
371
+ ## Test Design Principles
372
+
373
+ ### One Behavior Per Test
374
+
375
+ Each test should verify exactly one behavior. This makes failures easy to diagnose and tests easy to maintain.
376
+
377
+ ```python
378
+ # BAD - testing multiple behaviors
379
+ def test_user_service():
380
+ user = service.create_user(data)
381
+ assert user.id is not None
382
+ assert user.email == data["email"]
383
+ updated = service.update_user(user.id, {"name": "New"})
384
+ assert updated.name == "New"
385
+
386
+ # GOOD - focused tests
387
+ def test_create_user_assigns_id():
388
+ user = service.create_user(data)
389
+ assert user.id is not None
390
+
391
+ def test_create_user_stores_email():
392
+ user = service.create_user(data)
393
+ assert user.email == data["email"]
394
+
395
+ def test_update_user_changes_name():
396
+ user = service.create_user(data)
397
+ updated = service.update_user(user.id, {"name": "New"})
398
+ assert updated.name == "New"
399
+ ```
400
+
401
+ ### Test Error Paths
402
+
403
+ Always test failure cases, not just happy paths.
404
+
405
+ ```python
406
+ def test_get_user_raises_not_found():
407
+ with pytest.raises(UserNotFoundError) as exc_info:
408
+ service.get_user("nonexistent-id")
409
+
410
+ assert "nonexistent-id" in str(exc_info.value)
411
+
412
+ def test_create_user_rejects_invalid_email():
413
+ with pytest.raises(ValueError, match="Invalid email format"):
414
+ service.create_user({"email": "not-an-email"})
415
+ ```
416
+
417
+ ## Testing Best Practices
418
+
419
+ ### Test Organization
420
+
421
+ ```python
422
+ # tests/
423
+ # __init__.py
424
+ # conftest.py # Shared fixtures
425
+ # test_unit/ # Unit tests
426
+ # test_models.py
427
+ # test_utils.py
428
+ # test_integration/ # Integration tests
429
+ # test_api.py
430
+ # test_database.py
431
+ # test_e2e/ # End-to-end tests
432
+ # test_workflows.py
433
+ ```
434
+
435
+ ### Test Naming Convention
436
+
437
+ A common pattern: `test_<unit>_<scenario>_<expected_outcome>`. Adapt to your team's preferences.
438
+
439
+ ```python
440
+ # Pattern: test_<unit>_<scenario>_<expected>
441
+ def test_create_user_with_valid_data_returns_user():
442
+ ...
443
+
444
+ def test_create_user_with_duplicate_email_raises_conflict():
445
+ ...
446
+
447
+ def test_get_user_with_unknown_id_returns_none():
448
+ ...
449
+
450
+ # Good test names - clear and descriptive
451
+ def test_user_creation_with_valid_data():
452
+ """Clear name describes what is being tested."""
453
+ pass
454
+
455
+ def test_login_fails_with_invalid_password():
456
+ """Name describes expected behavior."""
457
+ pass
458
+
459
+ def test_api_returns_404_for_missing_resource():
460
+ """Specific about inputs and expected outcomes."""
461
+ pass
462
+
463
+ # Bad test names - avoid these
464
+ def test_1(): # Not descriptive
465
+ pass
466
+
467
+ def test_user(): # Too vague
468
+ pass
469
+
470
+ def test_function(): # Doesn't explain what's tested
471
+ pass
472
+ ```
473
+
474
+ ### Testing Retry Behavior
475
+
476
+ Verify that retry logic works correctly using mock side effects.
477
+
478
+ ```python
479
+ from unittest.mock import Mock
480
+
481
+ def test_retries_on_transient_error():
482
+ """Test that service retries on transient failures."""
483
+ client = Mock()
484
+ # Fail twice, then succeed
485
+ client.request.side_effect = [
486
+ ConnectionError("Failed"),
487
+ ConnectionError("Failed"),
488
+ {"status": "ok"},
489
+ ]
490
+
491
+ service = ServiceWithRetry(client, max_retries=3)
492
+ result = service.fetch()
493
+
494
+ assert result == {"status": "ok"}
495
+ assert client.request.call_count == 3
496
+
497
+ def test_gives_up_after_max_retries():
498
+ """Test that service stops retrying after max attempts."""
499
+ client = Mock()
500
+ client.request.side_effect = ConnectionError("Failed")
501
+
502
+ service = ServiceWithRetry(client, max_retries=3)
503
+
504
+ with pytest.raises(ConnectionError):
505
+ service.fetch()
506
+
507
+ assert client.request.call_count == 3
508
+
509
+ def test_does_not_retry_on_permanent_error():
510
+ """Test that permanent errors are not retried."""
511
+ client = Mock()
512
+ client.request.side_effect = ValueError("Invalid input")
513
+
514
+ service = ServiceWithRetry(client, max_retries=3)
515
+
516
+ with pytest.raises(ValueError):
517
+ service.fetch()
518
+
519
+ # Only called once - no retry for ValueError
520
+ assert client.request.call_count == 1
521
+ ```
522
+
523
+ ### Mocking Time with Freezegun
524
+
525
+ Use freezegun to control time in tests for predictable time-dependent behavior.
526
+
527
+ ```python
528
+ from freezegun import freeze_time
529
+ from datetime import datetime, timedelta
530
+
531
+ @freeze_time("2026-01-15 10:00:00")
532
+ def test_token_expiry():
533
+ """Test token expires at correct time."""
534
+ token = create_token(expires_in_seconds=3600)
535
+ assert token.expires_at == datetime(2026, 1, 15, 11, 0, 0)
536
+
537
+ @freeze_time("2026-01-15 10:00:00")
538
+ def test_is_expired_returns_false_before_expiry():
539
+ """Test token is not expired when within validity period."""
540
+ token = create_token(expires_in_seconds=3600)
541
+ assert not token.is_expired()
542
+
543
+ @freeze_time("2026-01-15 12:00:00")
544
+ def test_is_expired_returns_true_after_expiry():
545
+ """Test token is expired after validity period."""
546
+ token = Token(expires_at=datetime(2026, 1, 15, 11, 30, 0))
547
+ assert token.is_expired()
548
+
549
+ def test_with_time_travel():
550
+ """Test behavior across time using freeze_time context."""
551
+ with freeze_time("2026-01-01") as frozen_time:
552
+ item = create_item()
553
+ assert item.created_at == datetime(2026, 1, 1)
554
+
555
+ # Move forward in time
556
+ frozen_time.move_to("2026-01-15")
557
+ assert item.age_days == 14
558
+ ```
559
+
560
+ ### Test Markers
561
+
562
+ ```python
563
+ # test_markers.py
564
+ import pytest
565
+
566
+ @pytest.mark.slow
567
+ def test_slow_operation():
568
+ """Mark slow tests."""
569
+ import time
570
+ time.sleep(2)
571
+
572
+
573
+ @pytest.mark.integration
574
+ def test_database_integration():
575
+ """Mark integration tests."""
576
+ pass
577
+
578
+
579
+ @pytest.mark.skip(reason="Feature not implemented yet")
580
+ def test_future_feature():
581
+ """Skip tests temporarily."""
582
+ pass
583
+
584
+
585
+ @pytest.mark.skipif(os.name == "nt", reason="Unix only test")
586
+ def test_unix_specific():
587
+ """Conditional skip."""
588
+ pass
589
+
590
+
591
+ @pytest.mark.xfail(reason="Known bug #123")
592
+ def test_known_bug():
593
+ """Mark expected failures."""
594
+ assert False
595
+
596
+
597
+ # Run with:
598
+ # pytest -m slow # Run only slow tests
599
+ # pytest -m "not slow" # Skip slow tests
600
+ # pytest -m integration # Run integration tests
601
+ ```
602
+
603
+ ### Coverage Reporting
604
+
605
+ ```bash
606
+ # Install coverage
607
+ pip install pytest-cov
608
+
609
+ # Run tests with coverage
610
+ pytest --cov=myapp tests/
611
+
612
+ # Generate HTML report
613
+ pytest --cov=myapp --cov-report=html tests/
614
+
615
+ # Fail if coverage below threshold
616
+ pytest --cov=myapp --cov-fail-under=80 tests/
617
+
618
+ # Show missing lines
619
+ pytest --cov=myapp --cov-report=term-missing tests/
620
+ ```
621
+
622
+ For advanced patterns (async testing, monkeypatching, property-based testing, database testing, CI/CD integration, and configuration), see [references/advanced-patterns.md](references/advanced-patterns.md)