kollabor 0.4.9__py3-none-any.whl → 0.4.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +2 -0
- agents/coder/__init__.py +0 -0
- agents/coder/agent.json +4 -0
- agents/coder/api-integration.md +2150 -0
- agents/coder/cli-pretty.md +765 -0
- agents/coder/code-review.md +1092 -0
- agents/coder/database-design.md +1525 -0
- agents/coder/debugging.md +1102 -0
- agents/coder/dependency-management.md +1397 -0
- agents/coder/git-workflow.md +1099 -0
- agents/coder/refactoring.md +1454 -0
- agents/coder/security-hardening.md +1732 -0
- agents/coder/system_prompt.md +1448 -0
- agents/coder/tdd.md +1367 -0
- agents/creative-writer/__init__.py +0 -0
- agents/creative-writer/agent.json +4 -0
- agents/creative-writer/character-development.md +1852 -0
- agents/creative-writer/dialogue-craft.md +1122 -0
- agents/creative-writer/plot-structure.md +1073 -0
- agents/creative-writer/revision-editing.md +1484 -0
- agents/creative-writer/system_prompt.md +690 -0
- agents/creative-writer/worldbuilding.md +2049 -0
- agents/data-analyst/__init__.py +30 -0
- agents/data-analyst/agent.json +4 -0
- agents/data-analyst/data-visualization.md +992 -0
- agents/data-analyst/exploratory-data-analysis.md +1110 -0
- agents/data-analyst/pandas-data-manipulation.md +1081 -0
- agents/data-analyst/sql-query-optimization.md +881 -0
- agents/data-analyst/statistical-analysis.md +1118 -0
- agents/data-analyst/system_prompt.md +928 -0
- agents/default/__init__.py +0 -0
- agents/default/agent.json +4 -0
- agents/default/dead-code.md +794 -0
- agents/default/explore-agent-system.md +585 -0
- agents/default/system_prompt.md +1448 -0
- agents/kollabor/__init__.py +0 -0
- agents/kollabor/analyze-plugin-lifecycle.md +175 -0
- agents/kollabor/analyze-terminal-rendering.md +388 -0
- agents/kollabor/code-review.md +1092 -0
- agents/kollabor/debug-mcp-integration.md +521 -0
- agents/kollabor/debug-plugin-hooks.md +547 -0
- agents/kollabor/debugging.md +1102 -0
- agents/kollabor/dependency-management.md +1397 -0
- agents/kollabor/git-workflow.md +1099 -0
- agents/kollabor/inspect-llm-conversation.md +148 -0
- agents/kollabor/monitor-event-bus.md +558 -0
- agents/kollabor/profile-performance.md +576 -0
- agents/kollabor/refactoring.md +1454 -0
- agents/kollabor/system_prompt copy.md +1448 -0
- agents/kollabor/system_prompt.md +757 -0
- agents/kollabor/trace-command-execution.md +178 -0
- agents/kollabor/validate-config.md +879 -0
- agents/research/__init__.py +0 -0
- agents/research/agent.json +4 -0
- agents/research/architecture-mapping.md +1099 -0
- agents/research/codebase-analysis.md +1077 -0
- agents/research/dependency-audit.md +1027 -0
- agents/research/performance-profiling.md +1047 -0
- agents/research/security-review.md +1359 -0
- agents/research/system_prompt.md +492 -0
- agents/technical-writer/__init__.py +0 -0
- agents/technical-writer/agent.json +4 -0
- agents/technical-writer/api-documentation.md +2328 -0
- agents/technical-writer/changelog-management.md +1181 -0
- agents/technical-writer/readme-writing.md +1360 -0
- agents/technical-writer/style-guide.md +1410 -0
- agents/technical-writer/system_prompt.md +653 -0
- agents/technical-writer/tutorial-creation.md +1448 -0
- core/__init__.py +0 -2
- core/application.py +343 -88
- core/cli.py +229 -10
- core/commands/menu_renderer.py +463 -59
- core/commands/registry.py +14 -9
- core/commands/system_commands.py +2461 -14
- core/config/loader.py +151 -37
- core/config/service.py +18 -6
- core/events/bus.py +29 -9
- core/events/executor.py +205 -75
- core/events/models.py +27 -8
- core/fullscreen/command_integration.py +20 -24
- core/fullscreen/components/__init__.py +10 -1
- core/fullscreen/components/matrix_components.py +1 -2
- core/fullscreen/components/space_shooter_components.py +654 -0
- core/fullscreen/plugin.py +5 -0
- core/fullscreen/renderer.py +52 -13
- core/fullscreen/session.py +52 -15
- core/io/__init__.py +29 -5
- core/io/buffer_manager.py +6 -1
- core/io/config_status_view.py +7 -29
- core/io/core_status_views.py +267 -347
- core/io/input/__init__.py +25 -0
- core/io/input/command_mode_handler.py +711 -0
- core/io/input/display_controller.py +128 -0
- core/io/input/hook_registrar.py +286 -0
- core/io/input/input_loop_manager.py +421 -0
- core/io/input/key_press_handler.py +502 -0
- core/io/input/modal_controller.py +1011 -0
- core/io/input/paste_processor.py +339 -0
- core/io/input/status_modal_renderer.py +184 -0
- core/io/input_errors.py +5 -1
- core/io/input_handler.py +211 -2452
- core/io/key_parser.py +7 -0
- core/io/layout.py +15 -3
- core/io/message_coordinator.py +111 -2
- core/io/message_renderer.py +129 -4
- core/io/status_renderer.py +147 -607
- core/io/terminal_renderer.py +97 -51
- core/io/terminal_state.py +21 -4
- core/io/visual_effects.py +816 -165
- core/llm/agent_manager.py +1063 -0
- core/llm/api_adapters/__init__.py +44 -0
- core/llm/api_adapters/anthropic_adapter.py +432 -0
- core/llm/api_adapters/base.py +241 -0
- core/llm/api_adapters/openai_adapter.py +326 -0
- core/llm/api_communication_service.py +167 -113
- core/llm/conversation_logger.py +322 -16
- core/llm/conversation_manager.py +556 -30
- core/llm/file_operations_executor.py +84 -32
- core/llm/llm_service.py +934 -103
- core/llm/mcp_integration.py +541 -57
- core/llm/message_display_service.py +135 -18
- core/llm/plugin_sdk.py +1 -2
- core/llm/profile_manager.py +1183 -0
- core/llm/response_parser.py +274 -56
- core/llm/response_processor.py +16 -3
- core/llm/tool_executor.py +6 -1
- core/logging/__init__.py +2 -0
- core/logging/setup.py +34 -6
- core/models/resume.py +54 -0
- core/plugins/__init__.py +4 -2
- core/plugins/base.py +127 -0
- core/plugins/collector.py +23 -161
- core/plugins/discovery.py +37 -3
- core/plugins/factory.py +6 -12
- core/plugins/registry.py +5 -17
- core/ui/config_widgets.py +128 -28
- core/ui/live_modal_renderer.py +2 -1
- core/ui/modal_actions.py +5 -0
- core/ui/modal_overlay_renderer.py +0 -60
- core/ui/modal_renderer.py +268 -7
- core/ui/modal_state_manager.py +29 -4
- core/ui/widgets/base_widget.py +7 -0
- core/updates/__init__.py +10 -0
- core/updates/version_check_service.py +348 -0
- core/updates/version_comparator.py +103 -0
- core/utils/config_utils.py +685 -526
- core/utils/plugin_utils.py +1 -1
- core/utils/session_naming.py +111 -0
- fonts/LICENSE +21 -0
- fonts/README.md +46 -0
- fonts/SymbolsNerdFont-Regular.ttf +0 -0
- fonts/SymbolsNerdFontMono-Regular.ttf +0 -0
- fonts/__init__.py +44 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/METADATA +54 -4
- kollabor-0.4.15.dist-info/RECORD +228 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/top_level.txt +2 -0
- plugins/agent_orchestrator/__init__.py +39 -0
- plugins/agent_orchestrator/activity_monitor.py +181 -0
- plugins/agent_orchestrator/file_attacher.py +77 -0
- plugins/agent_orchestrator/message_injector.py +135 -0
- plugins/agent_orchestrator/models.py +48 -0
- plugins/agent_orchestrator/orchestrator.py +403 -0
- plugins/agent_orchestrator/plugin.py +976 -0
- plugins/agent_orchestrator/xml_parser.py +191 -0
- plugins/agent_orchestrator_plugin.py +9 -0
- plugins/enhanced_input/box_styles.py +1 -0
- plugins/enhanced_input/color_engine.py +19 -4
- plugins/enhanced_input/config.py +2 -2
- plugins/enhanced_input_plugin.py +61 -11
- plugins/fullscreen/__init__.py +6 -2
- plugins/fullscreen/example_plugin.py +1035 -222
- plugins/fullscreen/setup_wizard_plugin.py +592 -0
- plugins/fullscreen/space_shooter_plugin.py +131 -0
- plugins/hook_monitoring_plugin.py +436 -78
- plugins/query_enhancer_plugin.py +66 -30
- plugins/resume_conversation_plugin.py +1494 -0
- plugins/save_conversation_plugin.py +98 -32
- plugins/system_commands_plugin.py +70 -56
- plugins/tmux_plugin.py +154 -78
- plugins/workflow_enforcement_plugin.py +94 -92
- system_prompt/default.md +952 -886
- core/io/input_mode_manager.py +0 -402
- core/io/modal_interaction_handler.py +0 -315
- core/io/raw_input_processor.py +0 -946
- core/storage/__init__.py +0 -5
- core/storage/state_manager.py +0 -84
- core/ui/widget_integration.py +0 -222
- core/utils/key_reader.py +0 -171
- kollabor-0.4.9.dist-info/RECORD +0 -128
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/WHEEL +0 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/entry_points.txt +0 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/licenses/LICENSE +0 -0
agents/coder/tdd.md
ADDED
|
@@ -0,0 +1,1367 @@
|
|
|
1
|
+
<!-- Test-Driven Development skill - write tests first, then implementation -->
|
|
2
|
+
|
|
3
|
+
tdd mode: TESTS FIRST, CODE SECOND
|
|
4
|
+
|
|
5
|
+
when this skill is active, you follow strict TDD discipline.
|
|
6
|
+
this is a comprehensive guide to professional test-driven development.
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
PHASE 0: ENVIRONMENT VERIFICATION
|
|
10
|
+
|
|
11
|
+
before writing ANY code, verify the testing environment is ready.
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
check testing framework
|
|
15
|
+
|
|
16
|
+
<terminal>python -m pytest --version</terminal>
|
|
17
|
+
|
|
18
|
+
if pytest not installed:
|
|
19
|
+
<terminal>pip install pytest pytest-cov pytest-mock pytest-asyncio</terminal>
|
|
20
|
+
|
|
21
|
+
verify installation:
|
|
22
|
+
<terminal>python -c "import pytest; print('pytest ready')"</terminal>
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
check project structure
|
|
26
|
+
|
|
27
|
+
<terminal>ls -la</terminal>
|
|
28
|
+
<terminal>ls -la tests/ 2>/dev/null || echo "no tests directory"</terminal>
|
|
29
|
+
|
|
30
|
+
if no tests directory:
|
|
31
|
+
<terminal>mkdir -p tests</terminal>
|
|
32
|
+
<create>
|
|
33
|
+
<file>tests/__init__.py</file>
|
|
34
|
+
<content>
|
|
35
|
+
"""Test suite for the project."""
|
|
36
|
+
</content>
|
|
37
|
+
</create>
|
|
38
|
+
|
|
39
|
+
<create>
|
|
40
|
+
<file>tests/conftest.py</file>
|
|
41
|
+
<content>
|
|
42
|
+
"""Pytest configuration and shared fixtures."""
|
|
43
|
+
import pytest
|
|
44
|
+
|
|
45
|
+
# add fixtures here as needed
|
|
46
|
+
</content>
|
|
47
|
+
</create>
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
check for existing test configuration
|
|
51
|
+
|
|
52
|
+
<terminal>cat pytest.ini 2>/dev/null || cat pyproject.toml 2>/dev/null | grep -A20 "\[tool.pytest"</terminal>
|
|
53
|
+
|
|
54
|
+
if no pytest config exists, create one:
|
|
55
|
+
<create>
|
|
56
|
+
<file>pytest.ini</file>
|
|
57
|
+
<content>
|
|
58
|
+
[pytest]
|
|
59
|
+
testpaths = tests
|
|
60
|
+
python_files = test_*.py
|
|
61
|
+
python_classes = Test*
|
|
62
|
+
python_functions = test_*
|
|
63
|
+
addopts = -v --tb=short
|
|
64
|
+
</content>
|
|
65
|
+
</create>
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
check for coverage tools
|
|
69
|
+
|
|
70
|
+
<terminal>python -m coverage --version 2>/dev/null || echo "coverage not installed"</terminal>
|
|
71
|
+
|
|
72
|
+
if not installed:
|
|
73
|
+
<terminal>pip install pytest-cov coverage</terminal>
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
check existing test patterns in codebase
|
|
77
|
+
|
|
78
|
+
<terminal>find . -name "test_*.py" -type f | head -10</terminal>
|
|
79
|
+
<terminal>grep -r "def test_" tests/ 2>/dev/null | head -20</terminal>
|
|
80
|
+
<terminal>grep -r "import pytest\|from pytest" tests/ 2>/dev/null | head -5</terminal>
|
|
81
|
+
|
|
82
|
+
understand existing patterns before adding new tests.
|
|
83
|
+
match the style already in use.
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
verify tests can run
|
|
87
|
+
|
|
88
|
+
<terminal>python -m pytest tests/ --collect-only 2>&1 | head -20</terminal>
|
|
89
|
+
|
|
90
|
+
if collection errors, fix them before proceeding.
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
PHASE 1: THE TDD CYCLE
|
|
94
|
+
|
|
95
|
+
the fundamental rhythm of TDD:
|
|
96
|
+
|
|
97
|
+
RED -> write a failing test
|
|
98
|
+
GREEN -> write minimal code to pass
|
|
99
|
+
REFACTOR -> clean up while tests stay green
|
|
100
|
+
|
|
101
|
+
this cycle repeats for every piece of functionality.
|
|
102
|
+
never skip steps. never write code before the test.
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
the red phase
|
|
106
|
+
|
|
107
|
+
purpose: define what the code SHOULD do before it exists.
|
|
108
|
+
|
|
109
|
+
requirements:
|
|
110
|
+
[1] test must fail
|
|
111
|
+
[2] test must fail for the RIGHT reason
|
|
112
|
+
[3] test must be specific and focused
|
|
113
|
+
|
|
114
|
+
write the test:
|
|
115
|
+
<create>
|
|
116
|
+
<file>tests/test_feature.py</file>
|
|
117
|
+
<content>
|
|
118
|
+
"""Tests for feature module."""
|
|
119
|
+
import pytest
|
|
120
|
+
from src.feature import calculate
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def test_calculate_returns_sum_of_two_positive_integers():
|
|
124
|
+
"""Calculate should return the sum of two positive integers."""
|
|
125
|
+
result = calculate(5, 3)
|
|
126
|
+
assert result == 8
|
|
127
|
+
</content>
|
|
128
|
+
</create>
|
|
129
|
+
|
|
130
|
+
run and verify it fails:
|
|
131
|
+
<terminal>python -m pytest tests/test_feature.py -v</terminal>
|
|
132
|
+
|
|
133
|
+
expected output:
|
|
134
|
+
FAILED - ImportError or ModuleNotFoundError
|
|
135
|
+
this is correct - the module doesnt exist yet
|
|
136
|
+
|
|
137
|
+
if test passes on first run:
|
|
138
|
+
- the feature already exists (search for it)
|
|
139
|
+
- or your test is wrong (testing the wrong thing)
|
|
140
|
+
- NEVER proceed with a passing test in the red phase
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
the green phase
|
|
144
|
+
|
|
145
|
+
purpose: make the test pass with MINIMAL code.
|
|
146
|
+
|
|
147
|
+
requirements:
|
|
148
|
+
[1] write the simplest code that passes
|
|
149
|
+
[2] dont add features not tested
|
|
150
|
+
[3] dont optimize yet
|
|
151
|
+
[4] its okay to hardcode if test allows it
|
|
152
|
+
|
|
153
|
+
minimal implementation:
|
|
154
|
+
<create>
|
|
155
|
+
<file>src/feature.py</file>
|
|
156
|
+
<content>
|
|
157
|
+
"""Feature module."""
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def calculate(a: int, b: int) -> int:
|
|
161
|
+
"""Calculate the sum of two integers."""
|
|
162
|
+
return a + b
|
|
163
|
+
</content>
|
|
164
|
+
</create>
|
|
165
|
+
|
|
166
|
+
run and verify it passes:
|
|
167
|
+
<terminal>python -m pytest tests/test_feature.py -v</terminal>
|
|
168
|
+
|
|
169
|
+
expected: PASSED
|
|
170
|
+
|
|
171
|
+
if test still fails:
|
|
172
|
+
- read the error carefully
|
|
173
|
+
- fix the specific issue
|
|
174
|
+
- run again
|
|
175
|
+
- repeat until green
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
the refactor phase
|
|
179
|
+
|
|
180
|
+
purpose: improve code quality while tests stay green.
|
|
181
|
+
|
|
182
|
+
requirements:
|
|
183
|
+
[1] tests must pass before refactoring
|
|
184
|
+
[2] tests must pass after EVERY change
|
|
185
|
+
[3] dont add new functionality
|
|
186
|
+
[4] focus on readability, performance, design
|
|
187
|
+
|
|
188
|
+
refactoring checklist:
|
|
189
|
+
[ ] remove duplication
|
|
190
|
+
[ ] improve naming
|
|
191
|
+
[ ] extract methods/functions
|
|
192
|
+
[ ] simplify logic
|
|
193
|
+
[ ] add type hints
|
|
194
|
+
[ ] improve error messages
|
|
195
|
+
|
|
196
|
+
after each refactor step:
|
|
197
|
+
<terminal>python -m pytest tests/test_feature.py -v</terminal>
|
|
198
|
+
|
|
199
|
+
if tests fail during refactor:
|
|
200
|
+
- you broke something
|
|
201
|
+
- revert the last change
|
|
202
|
+
- try a smaller refactor step
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
PHASE 2: TEST STRUCTURE AND PATTERNS
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
the arrange-act-assert pattern
|
|
209
|
+
|
|
210
|
+
every test follows this structure:
|
|
211
|
+
|
|
212
|
+
def test_something():
|
|
213
|
+
# ARRANGE - set up the test conditions
|
|
214
|
+
user = User(name="alice", email="alice@example.com")
|
|
215
|
+
service = UserService(db=mock_db)
|
|
216
|
+
|
|
217
|
+
# ACT - perform the action being tested
|
|
218
|
+
result = service.create_user(user)
|
|
219
|
+
|
|
220
|
+
# ASSERT - verify the outcome
|
|
221
|
+
assert result.id is not None
|
|
222
|
+
assert result.name == "alice"
|
|
223
|
+
|
|
224
|
+
keep each section clearly separated.
|
|
225
|
+
some teams add blank lines between sections.
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
test naming conventions
|
|
229
|
+
|
|
230
|
+
tests should read like documentation:
|
|
231
|
+
|
|
232
|
+
pattern: test_<function>_<scenario>_<expected_result>
|
|
233
|
+
|
|
234
|
+
[ok] test_calculate_with_two_positive_numbers_returns_sum
|
|
235
|
+
[ok] test_calculate_with_negative_number_returns_correct_difference
|
|
236
|
+
[ok] test_login_with_invalid_password_raises_auth_error
|
|
237
|
+
[ok] test_create_user_with_duplicate_email_returns_conflict_error
|
|
238
|
+
|
|
239
|
+
[x] test_calculate
|
|
240
|
+
[x] test_calculate_1
|
|
241
|
+
[x] test_it_works
|
|
242
|
+
[x] test_functionality
|
|
243
|
+
|
|
244
|
+
the test name should tell you what broke without reading the code.
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
test file organization
|
|
248
|
+
|
|
249
|
+
tests/
|
|
250
|
+
__init__.py
|
|
251
|
+
conftest.py # shared fixtures
|
|
252
|
+
unit/ # fast, isolated tests
|
|
253
|
+
__init__.py
|
|
254
|
+
test_models.py
|
|
255
|
+
test_utils.py
|
|
256
|
+
test_validators.py
|
|
257
|
+
integration/ # tests with real dependencies
|
|
258
|
+
__init__.py
|
|
259
|
+
test_database.py
|
|
260
|
+
test_api.py
|
|
261
|
+
e2e/ # end-to-end tests
|
|
262
|
+
__init__.py
|
|
263
|
+
test_workflows.py
|
|
264
|
+
|
|
265
|
+
naming mirrors source structure:
|
|
266
|
+
src/auth/login.py -> tests/unit/test_login.py
|
|
267
|
+
src/api/routes.py -> tests/integration/test_routes.py
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
test class organization
|
|
271
|
+
|
|
272
|
+
group related tests in classes:
|
|
273
|
+
|
|
274
|
+
class TestUserCreation:
|
|
275
|
+
"""Tests for user creation functionality."""
|
|
276
|
+
|
|
277
|
+
def test_create_user_with_valid_data_succeeds(self):
|
|
278
|
+
...
|
|
279
|
+
|
|
280
|
+
def test_create_user_with_missing_email_fails(self):
|
|
281
|
+
...
|
|
282
|
+
|
|
283
|
+
def test_create_user_with_duplicate_email_fails(self):
|
|
284
|
+
...
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
class TestUserAuthentication:
|
|
288
|
+
"""Tests for user authentication functionality."""
|
|
289
|
+
|
|
290
|
+
def test_login_with_valid_credentials_returns_token(self):
|
|
291
|
+
...
|
|
292
|
+
|
|
293
|
+
def test_login_with_invalid_password_raises_error(self):
|
|
294
|
+
...
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
PHASE 3: FIXTURES AND TEST DATA
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
pytest fixtures
|
|
301
|
+
|
|
302
|
+
fixtures provide reusable test data and setup:
|
|
303
|
+
|
|
304
|
+
# conftest.py
|
|
305
|
+
import pytest
|
|
306
|
+
from src.models import User
|
|
307
|
+
from src.database import Database
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
@pytest.fixture
|
|
311
|
+
def sample_user():
|
|
312
|
+
"""Create a sample user for testing."""
|
|
313
|
+
return User(
|
|
314
|
+
id=1,
|
|
315
|
+
name="Test User",
|
|
316
|
+
email="test@example.com"
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
@pytest.fixture
|
|
321
|
+
def db_connection():
|
|
322
|
+
"""Create a database connection for testing."""
|
|
323
|
+
db = Database(":memory:")
|
|
324
|
+
db.initialize()
|
|
325
|
+
yield db
|
|
326
|
+
db.close()
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
@pytest.fixture
|
|
330
|
+
def populated_db(db_connection, sample_user):
|
|
331
|
+
"""Database with sample data."""
|
|
332
|
+
db_connection.insert(sample_user)
|
|
333
|
+
return db_connection
|
|
334
|
+
|
|
335
|
+
using fixtures in tests:
|
|
336
|
+
|
|
337
|
+
def test_get_user_returns_user_data(populated_db, sample_user):
|
|
338
|
+
result = populated_db.get_user(sample_user.id)
|
|
339
|
+
assert result.name == sample_user.name
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
fixture scopes
|
|
343
|
+
|
|
344
|
+
@pytest.fixture(scope="function") # default - new for each test
|
|
345
|
+
@pytest.fixture(scope="class") # shared within test class
|
|
346
|
+
@pytest.fixture(scope="module") # shared within test file
|
|
347
|
+
@pytest.fixture(scope="session") # shared across all tests
|
|
348
|
+
|
|
349
|
+
use narrowest scope possible.
|
|
350
|
+
wider scopes risk test pollution.
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
factory fixtures
|
|
354
|
+
|
|
355
|
+
for creating multiple variations:
|
|
356
|
+
|
|
357
|
+
@pytest.fixture
|
|
358
|
+
def user_factory():
|
|
359
|
+
"""Factory for creating test users."""
|
|
360
|
+
def _create_user(name="Test", email=None, role="user"):
|
|
361
|
+
if email is None:
|
|
362
|
+
email = f"{name.lower()}@example.com"
|
|
363
|
+
return User(name=name, email=email, role=role)
|
|
364
|
+
return _create_user
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
def test_admin_can_delete_users(user_factory):
|
|
368
|
+
admin = user_factory(name="Admin", role="admin")
|
|
369
|
+
regular = user_factory(name="Regular", role="user")
|
|
370
|
+
# ... test logic
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
PHASE 4: MOCKING AND ISOLATION
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
when to mock
|
|
377
|
+
|
|
378
|
+
mock external dependencies:
|
|
379
|
+
[ok] database connections
|
|
380
|
+
[ok] API calls to external services
|
|
381
|
+
[ok] file system operations
|
|
382
|
+
[ok] time/date operations
|
|
383
|
+
[ok] random number generation
|
|
384
|
+
[ok] environment variables
|
|
385
|
+
|
|
386
|
+
dont mock:
|
|
387
|
+
[x] the code under test
|
|
388
|
+
[x] simple data structures
|
|
389
|
+
[x] pure functions with no side effects
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
using pytest-mock
|
|
393
|
+
|
|
394
|
+
def test_send_email_calls_smtp_server(mocker):
|
|
395
|
+
# arrange
|
|
396
|
+
mock_smtp = mocker.patch("src.email.smtplib.SMTP")
|
|
397
|
+
service = EmailService()
|
|
398
|
+
|
|
399
|
+
# act
|
|
400
|
+
service.send_email("test@example.com", "Hello", "World")
|
|
401
|
+
|
|
402
|
+
# assert
|
|
403
|
+
mock_smtp.return_value.sendmail.assert_called_once()
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def test_get_data_handles_api_timeout(mocker):
|
|
407
|
+
# arrange
|
|
408
|
+
mock_request = mocker.patch("src.api.requests.get")
|
|
409
|
+
mock_request.side_effect = requests.Timeout("Connection timed out")
|
|
410
|
+
client = APIClient()
|
|
411
|
+
|
|
412
|
+
# act & assert
|
|
413
|
+
with pytest.raises(APIError) as exc_info:
|
|
414
|
+
client.get_data()
|
|
415
|
+
assert "timed out" in str(exc_info.value)
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
mocking return values
|
|
419
|
+
|
|
420
|
+
def test_get_user_returns_cached_data(mocker):
|
|
421
|
+
mock_cache = mocker.patch("src.service.cache")
|
|
422
|
+
mock_cache.get.return_value = {"id": 1, "name": "Cached User"}
|
|
423
|
+
|
|
424
|
+
result = get_user(1)
|
|
425
|
+
|
|
426
|
+
assert result["name"] == "Cached User"
|
|
427
|
+
mock_cache.get.assert_called_once_with("user:1")
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
mocking with side effects
|
|
431
|
+
|
|
432
|
+
def test_retry_on_transient_failure(mocker):
|
|
433
|
+
mock_api = mocker.patch("src.client.api_call")
|
|
434
|
+
# fail twice, then succeed
|
|
435
|
+
mock_api.side_effect = [
|
|
436
|
+
ConnectionError("Failed"),
|
|
437
|
+
ConnectionError("Failed again"),
|
|
438
|
+
{"status": "success"}
|
|
439
|
+
]
|
|
440
|
+
|
|
441
|
+
result = resilient_api_call()
|
|
442
|
+
|
|
443
|
+
assert result["status"] == "success"
|
|
444
|
+
assert mock_api.call_count == 3
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
PHASE 5: TESTING DIFFERENT SCENARIOS
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
testing exceptions
|
|
451
|
+
|
|
452
|
+
def test_divide_by_zero_raises_value_error():
|
|
453
|
+
with pytest.raises(ValueError) as exc_info:
|
|
454
|
+
divide(10, 0)
|
|
455
|
+
assert "cannot divide by zero" in str(exc_info.value)
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
def test_invalid_email_raises_validation_error():
|
|
459
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
460
|
+
validate_email("not-an-email")
|
|
461
|
+
assert exc_info.value.field == "email"
|
|
462
|
+
assert "invalid format" in exc_info.value.message
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
testing edge cases
|
|
466
|
+
|
|
467
|
+
comprehensive edge case checklist:
|
|
468
|
+
|
|
469
|
+
# empty inputs
|
|
470
|
+
def test_process_with_empty_list_returns_empty():
|
|
471
|
+
assert process([]) == []
|
|
472
|
+
|
|
473
|
+
def test_process_with_empty_string_returns_empty():
|
|
474
|
+
assert process("") == ""
|
|
475
|
+
|
|
476
|
+
# none/null inputs
|
|
477
|
+
def test_process_with_none_raises_type_error():
|
|
478
|
+
with pytest.raises(TypeError):
|
|
479
|
+
process(None)
|
|
480
|
+
|
|
481
|
+
# boundary values
|
|
482
|
+
def test_process_with_zero_returns_zero():
|
|
483
|
+
assert process(0) == 0
|
|
484
|
+
|
|
485
|
+
def test_process_with_negative_one_handles_correctly():
|
|
486
|
+
assert process(-1) == expected_negative_result
|
|
487
|
+
|
|
488
|
+
def test_process_with_max_int_doesnt_overflow():
|
|
489
|
+
import sys
|
|
490
|
+
result = process(sys.maxsize)
|
|
491
|
+
assert result is not None
|
|
492
|
+
|
|
493
|
+
# single element
|
|
494
|
+
def test_process_with_single_item_list():
|
|
495
|
+
assert process([1]) == [1]
|
|
496
|
+
|
|
497
|
+
# type variations
|
|
498
|
+
def test_process_with_float_converts_correctly():
|
|
499
|
+
assert process(3.14) == expected_float_result
|
|
500
|
+
|
|
501
|
+
def test_process_with_string_number_converts():
|
|
502
|
+
assert process("42") == 42
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
parametrized tests
|
|
506
|
+
|
|
507
|
+
test multiple inputs with one test function:
|
|
508
|
+
|
|
509
|
+
@pytest.mark.parametrize("input,expected", [
|
|
510
|
+
(0, 0),
|
|
511
|
+
(1, 1),
|
|
512
|
+
(2, 4),
|
|
513
|
+
(3, 9),
|
|
514
|
+
(10, 100),
|
|
515
|
+
(-5, 25),
|
|
516
|
+
])
|
|
517
|
+
def test_square_returns_correct_value(input, expected):
|
|
518
|
+
assert square(input) == expected
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
@pytest.mark.parametrize("email,is_valid", [
|
|
522
|
+
("user@example.com", True),
|
|
523
|
+
("user.name@example.co.uk", True),
|
|
524
|
+
("user+tag@example.com", True),
|
|
525
|
+
("invalid", False),
|
|
526
|
+
("@example.com", False),
|
|
527
|
+
("user@", False),
|
|
528
|
+
("", False),
|
|
529
|
+
(None, False),
|
|
530
|
+
])
|
|
531
|
+
def test_validate_email(email, is_valid):
|
|
532
|
+
if is_valid:
|
|
533
|
+
assert validate_email(email) is True
|
|
534
|
+
else:
|
|
535
|
+
assert validate_email(email) is False
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
parametrize with ids for clarity:
|
|
539
|
+
|
|
540
|
+
@pytest.mark.parametrize("status_code,should_retry", [
|
|
541
|
+
pytest.param(200, False, id="success-no-retry"),
|
|
542
|
+
pytest.param(429, True, id="rate-limited-retry"),
|
|
543
|
+
pytest.param(500, True, id="server-error-retry"),
|
|
544
|
+
pytest.param(400, False, id="client-error-no-retry"),
|
|
545
|
+
])
|
|
546
|
+
def test_should_retry_request(status_code, should_retry):
|
|
547
|
+
assert should_retry_request(status_code) == should_retry
|
|
548
|
+
|
|
549
|
+
|
|
550
|
+
PHASE 6: ASYNC TESTING
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
testing async functions
|
|
554
|
+
|
|
555
|
+
install pytest-asyncio:
|
|
556
|
+
<terminal>pip install pytest-asyncio</terminal>
|
|
557
|
+
|
|
558
|
+
mark async tests:
|
|
559
|
+
|
|
560
|
+
import pytest
|
|
561
|
+
|
|
562
|
+
|
|
563
|
+
@pytest.mark.asyncio
|
|
564
|
+
async def test_fetch_data_returns_expected_result():
|
|
565
|
+
result = await fetch_data("https://api.example.com/data")
|
|
566
|
+
assert result["status"] == "success"
|
|
567
|
+
|
|
568
|
+
|
|
569
|
+
@pytest.mark.asyncio
|
|
570
|
+
async def test_concurrent_requests_complete():
|
|
571
|
+
results = await asyncio.gather(
|
|
572
|
+
fetch_data("url1"),
|
|
573
|
+
fetch_data("url2"),
|
|
574
|
+
fetch_data("url3")
|
|
575
|
+
)
|
|
576
|
+
assert len(results) == 3
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
async fixtures:
|
|
580
|
+
|
|
581
|
+
@pytest.fixture
|
|
582
|
+
async def async_client():
|
|
583
|
+
client = AsyncAPIClient()
|
|
584
|
+
await client.connect()
|
|
585
|
+
yield client
|
|
586
|
+
await client.disconnect()
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
@pytest.mark.asyncio
|
|
590
|
+
async def test_with_async_client(async_client):
|
|
591
|
+
result = await async_client.get("/users")
|
|
592
|
+
assert result.status_code == 200
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
mocking async functions:
|
|
596
|
+
|
|
597
|
+
@pytest.mark.asyncio
|
|
598
|
+
async def test_async_api_call(mocker):
|
|
599
|
+
mock_fetch = mocker.patch("src.client.aiohttp.ClientSession.get")
|
|
600
|
+
|
|
601
|
+
# create async mock response
|
|
602
|
+
mock_response = mocker.AsyncMock()
|
|
603
|
+
mock_response.json.return_value = {"data": "test"}
|
|
604
|
+
mock_fetch.return_value.__aenter__.return_value = mock_response
|
|
605
|
+
|
|
606
|
+
result = await fetch_json("https://api.example.com")
|
|
607
|
+
|
|
608
|
+
assert result["data"] == "test"
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
PHASE 7: DATABASE TESTING
|
|
612
|
+
|
|
613
|
+
|
|
614
|
+
test database setup
|
|
615
|
+
|
|
616
|
+
@pytest.fixture(scope="function")
|
|
617
|
+
def test_db():
|
|
618
|
+
"""Create a fresh test database for each test."""
|
|
619
|
+
# use in-memory SQLite for speed
|
|
620
|
+
engine = create_engine("sqlite:///:memory:")
|
|
621
|
+
Base.metadata.create_all(engine)
|
|
622
|
+
Session = sessionmaker(bind=engine)
|
|
623
|
+
session = Session()
|
|
624
|
+
|
|
625
|
+
yield session
|
|
626
|
+
|
|
627
|
+
session.close()
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
def test_create_user_persists_to_database(test_db):
|
|
631
|
+
user = User(name="Alice", email="alice@example.com")
|
|
632
|
+
test_db.add(user)
|
|
633
|
+
test_db.commit()
|
|
634
|
+
|
|
635
|
+
retrieved = test_db.query(User).filter_by(email="alice@example.com").first()
|
|
636
|
+
assert retrieved is not None
|
|
637
|
+
assert retrieved.name == "Alice"
|
|
638
|
+
|
|
639
|
+
|
|
640
|
+
transaction rollback pattern
|
|
641
|
+
|
|
642
|
+
@pytest.fixture
|
|
643
|
+
def db_session(test_db):
|
|
644
|
+
"""Wrap each test in a transaction that rolls back."""
|
|
645
|
+
test_db.begin_nested()
|
|
646
|
+
|
|
647
|
+
yield test_db
|
|
648
|
+
|
|
649
|
+
test_db.rollback()
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
testing database constraints
|
|
653
|
+
|
|
654
|
+
def test_duplicate_email_raises_integrity_error(test_db):
|
|
655
|
+
user1 = User(name="Alice", email="same@example.com")
|
|
656
|
+
user2 = User(name="Bob", email="same@example.com")
|
|
657
|
+
|
|
658
|
+
test_db.add(user1)
|
|
659
|
+
test_db.commit()
|
|
660
|
+
|
|
661
|
+
test_db.add(user2)
|
|
662
|
+
with pytest.raises(IntegrityError):
|
|
663
|
+
test_db.commit()
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
PHASE 8: API TESTING
|
|
667
|
+
|
|
668
|
+
|
|
669
|
+
testing with test client
|
|
670
|
+
|
|
671
|
+
import pytest
|
|
672
|
+
from fastapi.testclient import TestClient
|
|
673
|
+
from src.main import app
|
|
674
|
+
|
|
675
|
+
|
|
676
|
+
@pytest.fixture
|
|
677
|
+
def client():
|
|
678
|
+
return TestClient(app)
|
|
679
|
+
|
|
680
|
+
|
|
681
|
+
def test_get_users_returns_list(client):
|
|
682
|
+
response = client.get("/api/users")
|
|
683
|
+
assert response.status_code == 200
|
|
684
|
+
assert isinstance(response.json(), list)
|
|
685
|
+
|
|
686
|
+
|
|
687
|
+
def test_create_user_returns_created(client):
|
|
688
|
+
response = client.post(
|
|
689
|
+
"/api/users",
|
|
690
|
+
json={"name": "Alice", "email": "alice@example.com"}
|
|
691
|
+
)
|
|
692
|
+
assert response.status_code == 201
|
|
693
|
+
assert response.json()["name"] == "Alice"
|
|
694
|
+
|
|
695
|
+
|
|
696
|
+
def test_get_nonexistent_user_returns_404(client):
|
|
697
|
+
response = client.get("/api/users/99999")
|
|
698
|
+
assert response.status_code == 404
|
|
699
|
+
|
|
700
|
+
|
|
701
|
+
testing authentication
|
|
702
|
+
|
|
703
|
+
@pytest.fixture
|
|
704
|
+
def auth_headers(client):
|
|
705
|
+
"""Get authentication headers for testing."""
|
|
706
|
+
response = client.post(
|
|
707
|
+
"/api/auth/login",
|
|
708
|
+
json={"username": "testuser", "password": "testpass"}
|
|
709
|
+
)
|
|
710
|
+
token = response.json()["token"]
|
|
711
|
+
return {"Authorization": f"Bearer {token}"}
|
|
712
|
+
|
|
713
|
+
|
|
714
|
+
def test_protected_endpoint_requires_auth(client):
|
|
715
|
+
response = client.get("/api/protected")
|
|
716
|
+
assert response.status_code == 401
|
|
717
|
+
|
|
718
|
+
|
|
719
|
+
def test_protected_endpoint_works_with_auth(client, auth_headers):
|
|
720
|
+
response = client.get("/api/protected", headers=auth_headers)
|
|
721
|
+
assert response.status_code == 200
|
|
722
|
+
|
|
723
|
+
|
|
724
|
+
testing error responses
|
|
725
|
+
|
|
726
|
+
def test_invalid_json_returns_400(client):
|
|
727
|
+
response = client.post(
|
|
728
|
+
"/api/users",
|
|
729
|
+
data="not json",
|
|
730
|
+
headers={"Content-Type": "application/json"}
|
|
731
|
+
)
|
|
732
|
+
assert response.status_code == 400
|
|
733
|
+
|
|
734
|
+
|
|
735
|
+
def test_missing_required_field_returns_422(client):
|
|
736
|
+
response = client.post(
|
|
737
|
+
"/api/users",
|
|
738
|
+
json={"name": "Alice"} # missing email
|
|
739
|
+
)
|
|
740
|
+
assert response.status_code == 422
|
|
741
|
+
assert "email" in response.json()["detail"][0]["loc"]
|
|
742
|
+
|
|
743
|
+
|
|
744
|
+
PHASE 9: TEST COVERAGE
|
|
745
|
+
|
|
746
|
+
|
|
747
|
+
running coverage
|
|
748
|
+
|
|
749
|
+
<terminal>python -m pytest tests/ --cov=src --cov-report=term-missing</terminal>
|
|
750
|
+
|
|
751
|
+
<terminal>python -m pytest tests/ --cov=src --cov-report=html</terminal>
|
|
752
|
+
|
|
753
|
+
view html report:
|
|
754
|
+
<terminal>open htmlcov/index.html</terminal>
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
coverage thresholds
|
|
758
|
+
|
|
759
|
+
add to pytest.ini:
|
|
760
|
+
[pytest]
|
|
761
|
+
addopts = --cov=src --cov-fail-under=80
|
|
762
|
+
|
|
763
|
+
fail build if coverage drops below 80%.
|
|
764
|
+
|
|
765
|
+
|
|
766
|
+
what coverage tells you
|
|
767
|
+
|
|
768
|
+
[ok] 100% coverage = all lines executed during tests
|
|
769
|
+
[warn] 100% coverage != all scenarios tested
|
|
770
|
+
[warn] 100% coverage != no bugs
|
|
771
|
+
|
|
772
|
+
coverage is a floor, not a ceiling.
|
|
773
|
+
high coverage with bad tests is worse than low coverage with good tests.
|
|
774
|
+
|
|
775
|
+
|
|
776
|
+
what to focus on
|
|
777
|
+
|
|
778
|
+
prioritize coverage for:
|
|
779
|
+
- business logic
|
|
780
|
+
- error handling paths
|
|
781
|
+
- edge cases
|
|
782
|
+
- security-sensitive code
|
|
783
|
+
|
|
784
|
+
less critical:
|
|
785
|
+
- simple getters/setters
|
|
786
|
+
- configuration loading
|
|
787
|
+
- logging statements
|
|
788
|
+
|
|
789
|
+
|
|
790
|
+
PHASE 10: TEST QUALITY PATTERNS
|
|
791
|
+
|
|
792
|
+
|
|
793
|
+
one assertion per test (mostly)
|
|
794
|
+
|
|
795
|
+
# good - one logical assertion
|
|
796
|
+
def test_create_user_returns_correct_id():
|
|
797
|
+
user = create_user("Alice")
|
|
798
|
+
assert user.id is not None
|
|
799
|
+
|
|
800
|
+
|
|
801
|
+
# good - multiple assertions about one outcome
|
|
802
|
+
def test_create_user_returns_complete_user():
|
|
803
|
+
user = create_user("Alice")
|
|
804
|
+
assert user.id is not None
|
|
805
|
+
assert user.name == "Alice"
|
|
806
|
+
assert user.created_at is not None
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
# bad - testing multiple behaviors
|
|
810
|
+
def test_user_operations():
|
|
811
|
+
user = create_user("Alice")
|
|
812
|
+
assert user.id is not None
|
|
813
|
+
|
|
814
|
+
updated = update_user(user.id, name="Bob")
|
|
815
|
+
assert updated.name == "Bob"
|
|
816
|
+
|
|
817
|
+
delete_user(user.id)
|
|
818
|
+
assert get_user(user.id) is None
|
|
819
|
+
|
|
820
|
+
|
|
821
|
+
test isolation
|
|
822
|
+
|
|
823
|
+
each test must be independent:
|
|
824
|
+
|
|
825
|
+
# bad - tests depend on each other
|
|
826
|
+
class TestUserWorkflow:
|
|
827
|
+
user_id = None
|
|
828
|
+
|
|
829
|
+
def test_create_user(self):
|
|
830
|
+
user = create_user("Alice")
|
|
831
|
+
TestUserWorkflow.user_id = user.id # shared state!
|
|
832
|
+
|
|
833
|
+
def test_get_user(self):
|
|
834
|
+
user = get_user(TestUserWorkflow.user_id) # depends on first test!
|
|
835
|
+
assert user.name == "Alice"
|
|
836
|
+
|
|
837
|
+
|
|
838
|
+
# good - each test is independent
|
|
839
|
+
class TestUserWorkflow:
|
|
840
|
+
|
|
841
|
+
def test_create_user(self):
|
|
842
|
+
user = create_user("Alice")
|
|
843
|
+
assert user.id is not None
|
|
844
|
+
|
|
845
|
+
def test_get_user(self, sample_user): # fixture provides data
|
|
846
|
+
user = get_user(sample_user.id)
|
|
847
|
+
assert user.name == sample_user.name
|
|
848
|
+
|
|
849
|
+
|
|
850
|
+
avoid test pollution
|
|
851
|
+
|
|
852
|
+
@pytest.fixture(autouse=True)
|
|
853
|
+
def clean_environment():
|
|
854
|
+
"""Reset environment before each test."""
|
|
855
|
+
os.environ.pop("API_KEY", None)
|
|
856
|
+
yield
|
|
857
|
+
os.environ.pop("API_KEY", None)
|
|
858
|
+
|
|
859
|
+
|
|
860
|
+
@pytest.fixture(autouse=True)
|
|
861
|
+
def reset_singletons():
|
|
862
|
+
"""Reset singleton instances between tests."""
|
|
863
|
+
Config._instance = None
|
|
864
|
+
Cache._instance = None
|
|
865
|
+
yield
|
|
866
|
+
|
|
867
|
+
|
|
868
|
+
PHASE 11: DEBUGGING FAILING TESTS
|
|
869
|
+
|
|
870
|
+
|
|
871
|
+
reading test output
|
|
872
|
+
|
|
873
|
+
FAILED tests/test_user.py::test_create_user - AssertionError: assert None == 1
|
|
874
|
+
|
|
875
|
+
breakdown:
|
|
876
|
+
- file: tests/test_user.py
|
|
877
|
+
- test: test_create_user
|
|
878
|
+
- error: AssertionError
|
|
879
|
+
- detail: expected 1, got None
|
|
880
|
+
|
|
881
|
+
|
|
882
|
+
verbose output
|
|
883
|
+
|
|
884
|
+
<terminal>python -m pytest tests/test_user.py::test_create_user -v</terminal>
|
|
885
|
+
|
|
886
|
+
<terminal>python -m pytest tests/test_user.py::test_create_user -vv</terminal>
|
|
887
|
+
|
|
888
|
+
<terminal>python -m pytest tests/test_user.py::test_create_user -vvv</terminal>
|
|
889
|
+
|
|
890
|
+
|
|
891
|
+
print debugging
|
|
892
|
+
|
|
893
|
+
def test_something():
|
|
894
|
+
result = complex_function()
|
|
895
|
+
print(f"DEBUG: result = {result}") # shows with -s flag
|
|
896
|
+
assert result == expected
|
|
897
|
+
|
|
898
|
+
run with:
|
|
899
|
+
<terminal>python -m pytest tests/test_user.py -s</terminal>
|
|
900
|
+
|
|
901
|
+
|
|
902
|
+
pdb debugging
|
|
903
|
+
|
|
904
|
+
def test_something():
|
|
905
|
+
result = complex_function()
|
|
906
|
+
import pdb; pdb.set_trace() # drops into debugger
|
|
907
|
+
assert result == expected
|
|
908
|
+
|
|
909
|
+
run with:
|
|
910
|
+
<terminal>python -m pytest tests/test_user.py -s --pdb</terminal>
|
|
911
|
+
|
|
912
|
+
|
|
913
|
+
run single test
|
|
914
|
+
|
|
915
|
+
<terminal>python -m pytest tests/test_user.py::test_create_user -v</terminal>
|
|
916
|
+
|
|
917
|
+
<terminal>python -m pytest tests/test_user.py::TestUserCreation::test_create_user -v</terminal>
|
|
918
|
+
|
|
919
|
+
|
|
920
|
+
run tests matching pattern
|
|
921
|
+
|
|
922
|
+
<terminal>python -m pytest -k "create" -v</terminal>
|
|
923
|
+
|
|
924
|
+
<terminal>python -m pytest -k "create and not delete" -v</terminal>
|
|
925
|
+
|
|
926
|
+
|
|
927
|
+
PHASE 12: REFACTORING WITH TESTS
|
|
928
|
+
|
|
929
|
+
|
|
930
|
+
the safety net
|
|
931
|
+
|
|
932
|
+
tests enable fearless refactoring:
|
|
933
|
+
[1] run all tests - confirm green
|
|
934
|
+
[2] make one refactoring change
|
|
935
|
+
[3] run all tests - confirm still green
|
|
936
|
+
[4] repeat
|
|
937
|
+
|
|
938
|
+
if tests fail after refactor:
|
|
939
|
+
- you broke something
|
|
940
|
+
- revert immediately
|
|
941
|
+
- try smaller change
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
refactoring patterns
|
|
945
|
+
|
|
946
|
+
extract function:
|
|
947
|
+
|
|
948
|
+
# before
|
|
949
|
+
def process_order(order):
|
|
950
|
+
# validate
|
|
951
|
+
if not order.items:
|
|
952
|
+
raise ValueError("Empty order")
|
|
953
|
+
if order.total < 0:
|
|
954
|
+
raise ValueError("Invalid total")
|
|
955
|
+
# ... more validation ...
|
|
956
|
+
|
|
957
|
+
# process
|
|
958
|
+
for item in order.items:
|
|
959
|
+
# ... processing ...
|
|
960
|
+
|
|
961
|
+
# after
|
|
962
|
+
def process_order(order):
|
|
963
|
+
validate_order(order)
|
|
964
|
+
process_items(order.items)
|
|
965
|
+
|
|
966
|
+
def validate_order(order):
|
|
967
|
+
if not order.items:
|
|
968
|
+
raise ValueError("Empty order")
|
|
969
|
+
if order.total < 0:
|
|
970
|
+
raise ValueError("Invalid total")
|
|
971
|
+
|
|
972
|
+
def process_items(items):
|
|
973
|
+
for item in items:
|
|
974
|
+
# ... processing ...
|
|
975
|
+
|
|
976
|
+
tests should still pass after extraction.
|
|
977
|
+
|
|
978
|
+
|
|
979
|
+
rename for clarity:
|
|
980
|
+
|
|
981
|
+
# before
|
|
982
|
+
def proc(d):
|
|
983
|
+
return d["v"] * d["q"]
|
|
984
|
+
|
|
985
|
+
# after
|
|
986
|
+
def calculate_line_total(line_item):
|
|
987
|
+
return line_item["price"] * line_item["quantity"]
|
|
988
|
+
|
|
989
|
+
update tests to use new name.
|
|
990
|
+
tests verify behavior unchanged.
|
|
991
|
+
|
|
992
|
+
|
|
993
|
+
PHASE 13: TDD FOR BUG FIXES
|
|
994
|
+
|
|
995
|
+
|
|
996
|
+
bug fix workflow
|
|
997
|
+
|
|
998
|
+
[1] reproduce the bug manually
|
|
999
|
+
[2] write a test that fails due to the bug
|
|
1000
|
+
[3] verify test fails for the right reason
|
|
1001
|
+
[4] fix the bug
|
|
1002
|
+
[5] verify test passes
|
|
1003
|
+
[6] verify no other tests broke
|
|
1004
|
+
|
|
1005
|
+
|
|
1006
|
+
example bug fix
|
|
1007
|
+
|
|
1008
|
+
bug report: "negative quantities allowed in orders"
|
|
1009
|
+
|
|
1010
|
+
step 1: write failing test
|
|
1011
|
+
|
|
1012
|
+
def test_order_rejects_negative_quantity():
|
|
1013
|
+
with pytest.raises(ValidationError):
|
|
1014
|
+
create_order_item(product_id=1, quantity=-5)
|
|
1015
|
+
|
|
1016
|
+
<terminal>python -m pytest tests/test_order.py::test_order_rejects_negative_quantity -v</terminal>
|
|
1017
|
+
|
|
1018
|
+
expected: FAIL (the bug exists, so negative is currently allowed)
|
|
1019
|
+
|
|
1020
|
+
step 2: fix the bug
|
|
1021
|
+
|
|
1022
|
+
<read><file>src/orders.py</file></read>
|
|
1023
|
+
|
|
1024
|
+
<edit>
|
|
1025
|
+
<file>src/orders.py</file>
|
|
1026
|
+
<find>
|
|
1027
|
+
def create_order_item(product_id: int, quantity: int):
|
|
1028
|
+
return OrderItem(product_id=product_id, quantity=quantity)
|
|
1029
|
+
</find>
|
|
1030
|
+
<replace>
|
|
1031
|
+
def create_order_item(product_id: int, quantity: int):
|
|
1032
|
+
if quantity <= 0:
|
|
1033
|
+
raise ValidationError("Quantity must be positive")
|
|
1034
|
+
return OrderItem(product_id=product_id, quantity=quantity)
|
|
1035
|
+
</replace>
|
|
1036
|
+
</edit>
|
|
1037
|
+
|
|
1038
|
+
step 3: verify fix
|
|
1039
|
+
|
|
1040
|
+
<terminal>python -m pytest tests/test_order.py::test_order_rejects_negative_quantity -v</terminal>
|
|
1041
|
+
|
|
1042
|
+
expected: PASS
|
|
1043
|
+
|
|
1044
|
+
step 4: run full test suite
|
|
1045
|
+
|
|
1046
|
+
<terminal>python -m pytest tests/ -v</terminal>
|
|
1047
|
+
|
|
1048
|
+
ensure fix didnt break anything else.
|
|
1049
|
+
|
|
1050
|
+
|
|
1051
|
+
PHASE 14: TESTING EXTERNAL SERVICES
|
|
1052
|
+
|
|
1053
|
+
|
|
1054
|
+
mocking external APIs
|
|
1055
|
+
|
|
1056
|
+
def test_get_weather_returns_temperature(mocker):
|
|
1057
|
+
mock_response = mocker.Mock()
|
|
1058
|
+
mock_response.status_code = 200
|
|
1059
|
+
mock_response.json.return_value = {
|
|
1060
|
+
"temperature": 72,
|
|
1061
|
+
"conditions": "sunny"
|
|
1062
|
+
}
|
|
1063
|
+
|
|
1064
|
+
mocker.patch("requests.get", return_value=mock_response)
|
|
1065
|
+
|
|
1066
|
+
result = get_weather("New York")
|
|
1067
|
+
|
|
1068
|
+
assert result["temperature"] == 72
|
|
1069
|
+
|
|
1070
|
+
|
|
1071
|
+
testing with VCR (recording HTTP interactions)
|
|
1072
|
+
|
|
1073
|
+
pip install pytest-vcr
|
|
1074
|
+
|
|
1075
|
+
@pytest.mark.vcr()
|
|
1076
|
+
def test_real_api_call():
|
|
1077
|
+
"""First run hits real API and records, subsequent runs use recording."""
|
|
1078
|
+
result = fetch_from_external_api()
|
|
1079
|
+
assert result is not None
|
|
1080
|
+
|
|
1081
|
+
|
|
1082
|
+
testing timeouts and failures
|
|
1083
|
+
|
|
1084
|
+
def test_api_timeout_raises_error(mocker):
|
|
1085
|
+
mocker.patch("requests.get", side_effect=requests.Timeout())
|
|
1086
|
+
|
|
1087
|
+
with pytest.raises(ExternalServiceError):
|
|
1088
|
+
fetch_data_from_api()
|
|
1089
|
+
|
|
1090
|
+
|
|
1091
|
+
def test_api_500_error_triggers_retry(mocker):
|
|
1092
|
+
mock_get = mocker.patch("requests.get")
|
|
1093
|
+
mock_get.side_effect = [
|
|
1094
|
+
mocker.Mock(status_code=500),
|
|
1095
|
+
mocker.Mock(status_code=500),
|
|
1096
|
+
mocker.Mock(status_code=200, json=lambda: {"data": "success"})
|
|
1097
|
+
]
|
|
1098
|
+
|
|
1099
|
+
result = fetch_with_retry()
|
|
1100
|
+
|
|
1101
|
+
assert result["data"] == "success"
|
|
1102
|
+
assert mock_get.call_count == 3
|
|
1103
|
+
|
|
1104
|
+
|
|
1105
|
+
PHASE 15: PERFORMANCE TESTING
|
|
1106
|
+
|
|
1107
|
+
|
|
1108
|
+
timing tests
|
|
1109
|
+
|
|
1110
|
+
import time
|
|
1111
|
+
|
|
1112
|
+
def test_search_completes_within_threshold():
|
|
1113
|
+
start = time.time()
|
|
1114
|
+
result = search("query")
|
|
1115
|
+
elapsed = time.time() - start
|
|
1116
|
+
|
|
1117
|
+
assert elapsed < 1.0 # must complete within 1 second
|
|
1118
|
+
assert len(result) > 0
|
|
1119
|
+
|
|
1120
|
+
|
|
1121
|
+
using pytest-benchmark
|
|
1122
|
+
|
|
1123
|
+
pip install pytest-benchmark
|
|
1124
|
+
|
|
1125
|
+
def test_sort_performance(benchmark):
|
|
1126
|
+
data = list(range(10000, 0, -1))
|
|
1127
|
+
result = benchmark(lambda: custom_sort(data.copy()))
|
|
1128
|
+
assert result == sorted(data)
|
|
1129
|
+
|
|
1130
|
+
|
|
1131
|
+
memory testing
|
|
1132
|
+
|
|
1133
|
+
pip install pytest-memray
|
|
1134
|
+
|
|
1135
|
+
@pytest.mark.limit_memory("100 MB")
|
|
1136
|
+
def test_large_data_processing():
|
|
1137
|
+
result = process_large_dataset(generate_large_data())
|
|
1138
|
+
assert result is not None
|
|
1139
|
+
|
|
1140
|
+
|
|
1141
|
+
PHASE 16: CONTINUOUS INTEGRATION
|
|
1142
|
+
|
|
1143
|
+
|
|
1144
|
+
github actions example
|
|
1145
|
+
|
|
1146
|
+
# .github/workflows/tests.yml
|
|
1147
|
+
name: Tests
|
|
1148
|
+
|
|
1149
|
+
on: [push, pull_request]
|
|
1150
|
+
|
|
1151
|
+
jobs:
|
|
1152
|
+
test:
|
|
1153
|
+
runs-on: ubuntu-latest
|
|
1154
|
+
steps:
|
|
1155
|
+
- uses: actions/checkout@v3
|
|
1156
|
+
|
|
1157
|
+
- name: Set up Python
|
|
1158
|
+
uses: actions/setup-python@v4
|
|
1159
|
+
with:
|
|
1160
|
+
python-version: '3.11'
|
|
1161
|
+
|
|
1162
|
+
- name: Install dependencies
|
|
1163
|
+
run: |
|
|
1164
|
+
pip install -r requirements.txt
|
|
1165
|
+
pip install pytest pytest-cov
|
|
1166
|
+
|
|
1167
|
+
- name: Run tests
|
|
1168
|
+
run: |
|
|
1169
|
+
python -m pytest tests/ --cov=src --cov-report=xml
|
|
1170
|
+
|
|
1171
|
+
- name: Upload coverage
|
|
1172
|
+
uses: codecov/codecov-action@v3
|
|
1173
|
+
|
|
1174
|
+
|
|
1175
|
+
pre-commit hooks
|
|
1176
|
+
|
|
1177
|
+
# .pre-commit-config.yaml
|
|
1178
|
+
repos:
|
|
1179
|
+
- repo: local
|
|
1180
|
+
hooks:
|
|
1181
|
+
- id: pytest
|
|
1182
|
+
name: pytest
|
|
1183
|
+
entry: python -m pytest tests/ -x -q
|
|
1184
|
+
language: system
|
|
1185
|
+
pass_filenames: false
|
|
1186
|
+
always_run: true
|
|
1187
|
+
|
|
1188
|
+
|
|
1189
|
+
PHASE 17: COMMON PITFALLS
|
|
1190
|
+
|
|
1191
|
+
|
|
1192
|
+
pitfall: testing implementation, not behavior
|
|
1193
|
+
|
|
1194
|
+
# bad - tests HOW it works
|
|
1195
|
+
def test_cache_uses_dict():
|
|
1196
|
+
cache = Cache()
|
|
1197
|
+
assert isinstance(cache._storage, dict)
|
|
1198
|
+
|
|
1199
|
+
# good - tests WHAT it does
|
|
1200
|
+
def test_cache_stores_and_retrieves_values():
|
|
1201
|
+
cache = Cache()
|
|
1202
|
+
cache.set("key", "value")
|
|
1203
|
+
assert cache.get("key") == "value"
|
|
1204
|
+
|
|
1205
|
+
|
|
1206
|
+
pitfall: over-mocking
|
|
1207
|
+
|
|
1208
|
+
# bad - mocking the thing youre testing
|
|
1209
|
+
def test_calculate(mocker):
|
|
1210
|
+
mocker.patch("src.math.add", return_value=5)
|
|
1211
|
+
assert calculate(2, 3) == 5 # youre not testing calculate!
|
|
1212
|
+
|
|
1213
|
+
# good - test real implementation
|
|
1214
|
+
def test_calculate():
|
|
1215
|
+
assert calculate(2, 3) == 5
|
|
1216
|
+
|
|
1217
|
+
|
|
1218
|
+
pitfall: flaky tests
|
|
1219
|
+
|
|
1220
|
+
causes:
|
|
1221
|
+
- time-dependent code
|
|
1222
|
+
- random data
|
|
1223
|
+
- external dependencies
|
|
1224
|
+
- test order dependencies
|
|
1225
|
+
- shared state
|
|
1226
|
+
|
|
1227
|
+
fixes:
|
|
1228
|
+
- mock time/random
|
|
1229
|
+
- isolate tests
|
|
1230
|
+
- use fixtures for setup
|
|
1231
|
+
- run tests in random order to find issues:
|
|
1232
|
+
<terminal>python -m pytest tests/ --random-order</terminal>
|
|
1233
|
+
|
|
1234
|
+
|
|
1235
|
+
pitfall: slow tests
|
|
1236
|
+
|
|
1237
|
+
causes:
|
|
1238
|
+
- real database connections
|
|
1239
|
+
- real API calls
|
|
1240
|
+
- file system operations
|
|
1241
|
+
- sleep() calls
|
|
1242
|
+
|
|
1243
|
+
fixes:
|
|
1244
|
+
- use in-memory databases
|
|
1245
|
+
- mock external calls
|
|
1246
|
+
- use tmpdir fixtures
|
|
1247
|
+
- mock time.sleep
|
|
1248
|
+
|
|
1249
|
+
|
|
1250
|
+
pitfall: testing too much at once
|
|
1251
|
+
|
|
1252
|
+
# bad - integration test pretending to be unit test
|
|
1253
|
+
def test_user_signup():
|
|
1254
|
+
result = signup("user@example.com", "password")
|
|
1255
|
+
assert result.id is not None
|
|
1256
|
+
assert result.email == "user@example.com"
|
|
1257
|
+
assert result.password_hash is not None
|
|
1258
|
+
assert result.created_at is not None
|
|
1259
|
+
assert email_sent_to("user@example.com")
|
|
1260
|
+
assert user_in_database(result.id)
|
|
1261
|
+
assert audit_log_contains("user_created")
|
|
1262
|
+
|
|
1263
|
+
# good - focused unit test
|
|
1264
|
+
def test_create_user_sets_email():
|
|
1265
|
+
user = create_user(email="user@example.com", password="password")
|
|
1266
|
+
assert user.email == "user@example.com"
|
|
1267
|
+
|
|
1268
|
+
|
|
1269
|
+
PHASE 18: TDD RULES (STRICT MODE)
|
|
1270
|
+
|
|
1271
|
+
|
|
1272
|
+
while this skill is active, these rules are MANDATORY:
|
|
1273
|
+
|
|
1274
|
+
[1] NEVER write implementation before test
|
|
1275
|
+
if you catch yourself writing code first, stop
|
|
1276
|
+
write the test, then the code
|
|
1277
|
+
|
|
1278
|
+
[2] run tests after EVERY change
|
|
1279
|
+
no exceptions
|
|
1280
|
+
<terminal>python -m pytest tests/test_current.py -v</terminal>
|
|
1281
|
+
|
|
1282
|
+
[3] write ONE test at a time
|
|
1283
|
+
resist the urge to write all tests upfront
|
|
1284
|
+
red-green-refactor, one cycle at a time
|
|
1285
|
+
|
|
1286
|
+
[4] keep tests simple and focused
|
|
1287
|
+
if a test needs extensive setup, something is wrong
|
|
1288
|
+
with the design
|
|
1289
|
+
|
|
1290
|
+
[5] refactor only when tests pass
|
|
1291
|
+
never refactor while red
|
|
1292
|
+
get to green first, then clean up
|
|
1293
|
+
|
|
1294
|
+
[6] if test passes on first run, QUESTION IT
|
|
1295
|
+
a passing test in the red phase means:
|
|
1296
|
+
- the feature exists (find it)
|
|
1297
|
+
- the test is wrong (fix it)
|
|
1298
|
+
- you wrote code before the test (start over)
|
|
1299
|
+
|
|
1300
|
+
[7] tests are production code
|
|
1301
|
+
apply same quality standards
|
|
1302
|
+
refactor tests too
|
|
1303
|
+
|
|
1304
|
+
|
|
1305
|
+
PHASE 19: TDD SESSION CHECKLIST
|
|
1306
|
+
|
|
1307
|
+
|
|
1308
|
+
before starting:
|
|
1309
|
+
|
|
1310
|
+
[ ] pytest installed and working
|
|
1311
|
+
[ ] tests directory exists
|
|
1312
|
+
[ ] conftest.py with common fixtures
|
|
1313
|
+
[ ] pytest.ini or pyproject.toml configured
|
|
1314
|
+
[ ] coverage tools installed
|
|
1315
|
+
[ ] existing tests passing
|
|
1316
|
+
|
|
1317
|
+
for each feature:
|
|
1318
|
+
|
|
1319
|
+
[ ] understand the requirement
|
|
1320
|
+
[ ] write ONE failing test
|
|
1321
|
+
[ ] verify it fails for the right reason
|
|
1322
|
+
[ ] write minimal code to pass
|
|
1323
|
+
[ ] verify it passes
|
|
1324
|
+
[ ] refactor if needed
|
|
1325
|
+
[ ] verify still passes
|
|
1326
|
+
[ ] commit
|
|
1327
|
+
|
|
1328
|
+
after completing feature:
|
|
1329
|
+
|
|
1330
|
+
[ ] run full test suite
|
|
1331
|
+
[ ] check coverage
|
|
1332
|
+
[ ] review test quality
|
|
1333
|
+
[ ] commit with message referencing feature
|
|
1334
|
+
|
|
1335
|
+
|
|
1336
|
+
FINAL REMINDERS
|
|
1337
|
+
|
|
1338
|
+
|
|
1339
|
+
tdd is a discipline
|
|
1340
|
+
|
|
1341
|
+
it feels slow at first.
|
|
1342
|
+
it becomes fast with practice.
|
|
1343
|
+
the tests you write today save hours tomorrow.
|
|
1344
|
+
|
|
1345
|
+
|
|
1346
|
+
tests are documentation
|
|
1347
|
+
|
|
1348
|
+
tests show how code should be used.
|
|
1349
|
+
tests show what behavior is expected.
|
|
1350
|
+
tests show edge cases and error handling.
|
|
1351
|
+
|
|
1352
|
+
|
|
1353
|
+
when in doubt
|
|
1354
|
+
|
|
1355
|
+
write a test.
|
|
1356
|
+
if you cant write a test, you dont understand the requirement.
|
|
1357
|
+
the test forces clarity.
|
|
1358
|
+
|
|
1359
|
+
|
|
1360
|
+
the goal
|
|
1361
|
+
|
|
1362
|
+
working software with high confidence.
|
|
1363
|
+
fearless refactoring.
|
|
1364
|
+
living documentation.
|
|
1365
|
+
fewer bugs in production.
|
|
1366
|
+
|
|
1367
|
+
now go write some failing tests.
|