mindsystem-cc 4.0.0 → 4.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +246 -220
- package/agents/ms-debugger.md +2 -3
- package/agents/ms-verifier.md +4 -4
- package/bin/install.js +9 -1
- package/commands/ms/add-todo.md +1 -1
- package/commands/ms/audit-milestone.md +1 -1
- package/commands/ms/config.md +8 -10
- package/commands/ms/debug.md +1 -1
- package/commands/ms/design-phase.md +1 -1
- package/commands/ms/execute-phase.md +1 -1
- package/commands/ms/help.md +31 -22
- package/commands/ms/plan-phase.md +3 -17
- package/commands/ms/progress.md +9 -5
- package/commands/ms/research-phase.md +1 -1
- package/mindsystem/references/continuation-format.md +1 -1
- package/mindsystem/references/plan-format.md +1 -1
- package/mindsystem/references/plan-risk-assessment.md +1 -2
- package/mindsystem/references/routing/gap-closure-routing.md +44 -18
- package/mindsystem/templates/debug-subagent-prompt.md +0 -5
- package/mindsystem/workflows/adhoc.md +5 -4
- package/mindsystem/workflows/compound.md +2 -2
- package/mindsystem/workflows/discuss-phase.md +1 -1
- package/mindsystem/workflows/execute-phase.md +3 -37
- package/mindsystem/workflows/execute-plan.md +1 -1
- package/mindsystem/workflows/mockup-generation.md +1 -1
- package/mindsystem/workflows/plan-phase.md +3 -139
- package/package.json +5 -2
- package/scripts/ms-tools.py +202 -0
- package/commands/ms/check-phase.md +0 -134
- package/mindsystem/workflows/diagnose-issues.md +0 -238
- package/scripts/__pycache__/ms-tools.cpython-314.pyc +0 -0
- package/scripts/__pycache__/test_ms_tools.cpython-314-pytest-9.0.2.pyc +0 -0
- package/scripts/fixtures/scan-context/.planning/ROADMAP.md +0 -16
- package/scripts/fixtures/scan-context/.planning/adhoc/20260220-fix-token-SUMMARY.md +0 -12
- package/scripts/fixtures/scan-context/.planning/adhoc/20260225-refactor-api/adhoc-01-SUMMARY.md +0 -39
- package/scripts/fixtures/scan-context/.planning/config.json +0 -3
- package/scripts/fixtures/scan-context/.planning/debug/resolved/token-bug.md +0 -11
- package/scripts/fixtures/scan-context/.planning/knowledge/auth.md +0 -11
- package/scripts/fixtures/scan-context/.planning/phases/02-infra/02-1-SUMMARY.md +0 -20
- package/scripts/fixtures/scan-context/.planning/phases/04-setup/04-1-SUMMARY.md +0 -21
- package/scripts/fixtures/scan-context/.planning/phases/05-auth/05-1-SUMMARY.md +0 -28
- package/scripts/fixtures/scan-context/.planning/todos/add-logout.md +0 -10
- package/scripts/fixtures/scan-context/.planning/todos/done/setup-db.md +0 -10
- package/scripts/fixtures/scan-context/expected-output.json +0 -271
- package/scripts/test_ms_tools.py +0 -1701
package/scripts/test_ms_tools.py
DELETED
|
@@ -1,1701 +0,0 @@
|
|
|
1
|
-
"""Tests for ms-tools.py pure logic layer and scan-planning-context integration."""
|
|
2
|
-
|
|
3
|
-
import argparse
|
|
4
|
-
import datetime
|
|
5
|
-
import importlib.util
|
|
6
|
-
import io
|
|
7
|
-
import json
|
|
8
|
-
from pathlib import Path
|
|
9
|
-
from unittest import mock
|
|
10
|
-
|
|
11
|
-
import pytest
|
|
12
|
-
|
|
13
|
-
# ---------------------------------------------------------------------------
|
|
14
|
-
# Import ms-tools.py (hyphenated filename requires importlib)
|
|
15
|
-
# ---------------------------------------------------------------------------
|
|
16
|
-
|
|
17
|
-
_spec = importlib.util.spec_from_file_location(
|
|
18
|
-
"ms_tools", Path(__file__).parent / "ms-tools.py"
|
|
19
|
-
)
|
|
20
|
-
_mod = importlib.util.module_from_spec(_spec)
|
|
21
|
-
_spec.loader.exec_module(_mod)
|
|
22
|
-
|
|
23
|
-
slugify = _mod.slugify
|
|
24
|
-
normalize_phase = _mod.normalize_phase
|
|
25
|
-
in_range = _mod.in_range
|
|
26
|
-
parse_frontmatter = _mod.parse_frontmatter
|
|
27
|
-
build_exclude_pathspecs = _mod.build_exclude_pathspecs
|
|
28
|
-
PATCH_EXCLUSIONS = _mod.PATCH_EXCLUSIONS
|
|
29
|
-
_extract_phase_number = _mod._extract_phase_number
|
|
30
|
-
_is_adjacent_phase = _mod._is_adjacent_phase
|
|
31
|
-
_score_summary = _mod._score_summary
|
|
32
|
-
_resolve_transitive_requires = _mod._resolve_transitive_requires
|
|
33
|
-
_aggregate_from_summaries = _mod._aggregate_from_summaries
|
|
34
|
-
_has_readiness_section = _mod._has_readiness_section
|
|
35
|
-
_scan_summaries = _mod._scan_summaries
|
|
36
|
-
_scan_debug_docs = _mod._scan_debug_docs
|
|
37
|
-
_scan_adhoc_summaries = _mod._scan_adhoc_summaries
|
|
38
|
-
_scan_todos = _mod._scan_todos
|
|
39
|
-
_scan_knowledge_files = _mod._scan_knowledge_files
|
|
40
|
-
_detect_versioned_milestone_dirs = _mod._detect_versioned_milestone_dirs
|
|
41
|
-
_parse_milestone_name_mapping = _mod._parse_milestone_name_mapping
|
|
42
|
-
_SafeEncoder = _mod._SafeEncoder
|
|
43
|
-
cmd_set_last_command = _mod.cmd_set_last_command
|
|
44
|
-
cmd_gather_milestone_stats = _mod.cmd_gather_milestone_stats
|
|
45
|
-
|
|
46
|
-
# ---------------------------------------------------------------------------
|
|
47
|
-
# Fixtures
|
|
48
|
-
# ---------------------------------------------------------------------------
|
|
49
|
-
|
|
50
|
-
FIXTURE_PLANNING = Path(__file__).parent / "fixtures" / "scan-context" / ".planning"
|
|
51
|
-
|
|
52
|
-
# Set to True, run once, review expected-output.json, then set back to False.
|
|
53
|
-
REGENERATE_GOLDEN = False
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
# ===================================================================
|
|
57
|
-
# Part 1: Pure Function Unit Tests
|
|
58
|
-
# ===================================================================
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
class TestSlugify:
|
|
62
|
-
def test_basic_name(self):
|
|
63
|
-
assert slugify("Push Notifications") == "push-notifications"
|
|
64
|
-
|
|
65
|
-
def test_ampersand_stripped(self):
|
|
66
|
-
assert slugify("Auth & Payments") == "auth-payments"
|
|
67
|
-
|
|
68
|
-
def test_uppercase(self):
|
|
69
|
-
assert slugify("MVP") == "mvp"
|
|
70
|
-
|
|
71
|
-
def test_underscores(self):
|
|
72
|
-
assert slugify("push_notifications") == "push-notifications"
|
|
73
|
-
|
|
74
|
-
def test_consecutive_hyphens(self):
|
|
75
|
-
assert slugify("auth -- payments") == "auth-payments"
|
|
76
|
-
|
|
77
|
-
def test_leading_trailing_hyphens(self):
|
|
78
|
-
assert slugify("-hello-world-") == "hello-world"
|
|
79
|
-
|
|
80
|
-
def test_special_characters(self):
|
|
81
|
-
assert slugify("v2.0 New Features!") == "v20-new-features"
|
|
82
|
-
|
|
83
|
-
def test_empty_string(self):
|
|
84
|
-
assert slugify("") == ""
|
|
85
|
-
|
|
86
|
-
def test_already_slug(self):
|
|
87
|
-
assert slugify("push-notifications") == "push-notifications"
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
class TestNormalizePhase:
|
|
91
|
-
def test_single_digit(self):
|
|
92
|
-
assert normalize_phase("5") == "05"
|
|
93
|
-
|
|
94
|
-
def test_decimal(self):
|
|
95
|
-
assert normalize_phase("2.1") == "02.1"
|
|
96
|
-
|
|
97
|
-
def test_already_padded(self):
|
|
98
|
-
assert normalize_phase("02.1") == "02.1"
|
|
99
|
-
|
|
100
|
-
def test_empty_string(self):
|
|
101
|
-
assert normalize_phase("") == ""
|
|
102
|
-
|
|
103
|
-
def test_non_numeric(self):
|
|
104
|
-
assert normalize_phase("abc") == "abc"
|
|
105
|
-
|
|
106
|
-
def test_leading_zero_large(self):
|
|
107
|
-
assert normalize_phase("007") == "07"
|
|
108
|
-
|
|
109
|
-
def test_two_digit(self):
|
|
110
|
-
assert normalize_phase("12") == "12"
|
|
111
|
-
|
|
112
|
-
def test_zero(self):
|
|
113
|
-
assert normalize_phase("0") == "00"
|
|
114
|
-
|
|
115
|
-
def test_already_padded_integer(self):
|
|
116
|
-
assert normalize_phase("05") == "05"
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
class TestInRange:
|
|
120
|
-
def test_inside(self):
|
|
121
|
-
assert in_range("03", 1, 5) is True
|
|
122
|
-
|
|
123
|
-
def test_at_start_boundary(self):
|
|
124
|
-
assert in_range("01", 1, 5) is True
|
|
125
|
-
|
|
126
|
-
def test_at_end_boundary(self):
|
|
127
|
-
assert in_range("05", 1, 5) is True
|
|
128
|
-
|
|
129
|
-
def test_decimal_inside(self):
|
|
130
|
-
assert in_range("05.1", 3, 5) is True
|
|
131
|
-
|
|
132
|
-
def test_decimal_outside(self):
|
|
133
|
-
assert in_range("06.0", 3, 5) is False
|
|
134
|
-
|
|
135
|
-
def test_below_range(self):
|
|
136
|
-
assert in_range("00", 1, 5) is False
|
|
137
|
-
|
|
138
|
-
def test_above_range(self):
|
|
139
|
-
assert in_range("10", 1, 5) is False
|
|
140
|
-
|
|
141
|
-
def test_non_numeric(self):
|
|
142
|
-
assert in_range("abc", 1, 5) is False
|
|
143
|
-
|
|
144
|
-
def test_decimal_at_upper_edge(self):
|
|
145
|
-
# 5.999 is within range end+0.999
|
|
146
|
-
assert in_range("05.9", 3, 5) is True
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
class TestExtractPhaseNumber:
|
|
150
|
-
def test_phase_with_name(self):
|
|
151
|
-
assert _extract_phase_number("05-auth") == 5
|
|
152
|
-
|
|
153
|
-
def test_decimal_phase(self):
|
|
154
|
-
assert _extract_phase_number("02.1-setup") == 2
|
|
155
|
-
|
|
156
|
-
def test_empty_string(self):
|
|
157
|
-
assert _extract_phase_number("") is None
|
|
158
|
-
|
|
159
|
-
def test_non_numeric(self):
|
|
160
|
-
assert _extract_phase_number("auth") is None
|
|
161
|
-
|
|
162
|
-
def test_bare_number(self):
|
|
163
|
-
assert _extract_phase_number("07") == 7
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
class TestIsAdjacentPhase:
|
|
167
|
-
def test_n_minus_1(self):
|
|
168
|
-
assert _is_adjacent_phase(6, 5) is True
|
|
169
|
-
|
|
170
|
-
def test_n_minus_2(self):
|
|
171
|
-
assert _is_adjacent_phase(6, 4) is True
|
|
172
|
-
|
|
173
|
-
def test_equal(self):
|
|
174
|
-
assert _is_adjacent_phase(6, 6) is False
|
|
175
|
-
|
|
176
|
-
def test_n_minus_3(self):
|
|
177
|
-
assert _is_adjacent_phase(6, 3) is False
|
|
178
|
-
|
|
179
|
-
def test_n_plus_1(self):
|
|
180
|
-
assert _is_adjacent_phase(6, 7) is False
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
class TestScoreSummary:
|
|
184
|
-
"""Test _score_summary relevance scoring."""
|
|
185
|
-
|
|
186
|
-
def test_high_via_affects(self):
|
|
187
|
-
fm = {"affects": ["06-ui"], "subsystem": "", "requires": [], "tags": [], "phase": "05-auth"}
|
|
188
|
-
score, reasons = _score_summary(fm, "06", 6, [], [])
|
|
189
|
-
assert score == "HIGH"
|
|
190
|
-
assert any("affects" in r for r in reasons)
|
|
191
|
-
|
|
192
|
-
def test_high_via_subsystem(self):
|
|
193
|
-
fm = {"affects": [], "subsystem": "auth", "requires": [], "tags": [], "phase": "03"}
|
|
194
|
-
score, reasons = _score_summary(fm, "06", 6, ["auth"], [])
|
|
195
|
-
assert score == "HIGH"
|
|
196
|
-
assert any("subsystem" in r for r in reasons)
|
|
197
|
-
|
|
198
|
-
def test_high_via_requires(self):
|
|
199
|
-
fm = {"affects": [], "subsystem": "", "requires": [{"phase": "06-ui"}], "tags": [], "phase": "03"}
|
|
200
|
-
score, reasons = _score_summary(fm, "06", 6, [], [])
|
|
201
|
-
assert score == "HIGH"
|
|
202
|
-
assert any("requires" in r for r in reasons)
|
|
203
|
-
|
|
204
|
-
def test_medium_via_tags(self):
|
|
205
|
-
fm = {"affects": [], "subsystem": "", "requires": [], "tags": ["jwt", "config"], "phase": "01"}
|
|
206
|
-
score, reasons = _score_summary(fm, "06", 6, [], ["jwt"])
|
|
207
|
-
assert score == "MEDIUM"
|
|
208
|
-
assert any("tags" in r for r in reasons)
|
|
209
|
-
|
|
210
|
-
def test_medium_via_adjacent(self):
|
|
211
|
-
fm = {"affects": [], "subsystem": "", "requires": [], "tags": [], "phase": "05-auth"}
|
|
212
|
-
score, reasons = _score_summary(fm, "06", 6, [], [])
|
|
213
|
-
assert score == "MEDIUM"
|
|
214
|
-
assert any("adjacent" in r for r in reasons)
|
|
215
|
-
|
|
216
|
-
def test_low_default(self):
|
|
217
|
-
fm = {"affects": [], "subsystem": "database", "requires": [], "tags": ["postgres"], "phase": "02-infra"}
|
|
218
|
-
score, reasons = _score_summary(fm, "06", 6, [], [])
|
|
219
|
-
assert score == "LOW"
|
|
220
|
-
|
|
221
|
-
def test_string_affects_coercion(self):
|
|
222
|
-
"""affects as string instead of list."""
|
|
223
|
-
fm = {"affects": "06-ui", "subsystem": "", "requires": [], "tags": [], "phase": "05"}
|
|
224
|
-
score, _ = _score_summary(fm, "06", 6, [], [])
|
|
225
|
-
assert score == "HIGH"
|
|
226
|
-
|
|
227
|
-
def test_none_affects(self):
|
|
228
|
-
fm = {"affects": None, "subsystem": "", "requires": None, "tags": None, "phase": "01"}
|
|
229
|
-
score, _ = _score_summary(fm, "06", 6, [], [])
|
|
230
|
-
assert score == "LOW"
|
|
231
|
-
|
|
232
|
-
def test_high_trumps_medium(self):
|
|
233
|
-
"""When both HIGH and MEDIUM signals present, result is HIGH."""
|
|
234
|
-
fm = {"affects": ["06-ui"], "subsystem": "", "requires": [], "tags": ["jwt"], "phase": "05-auth"}
|
|
235
|
-
score, reasons = _score_summary(fm, "06", 6, [], ["jwt"])
|
|
236
|
-
assert score == "HIGH"
|
|
237
|
-
# Both reasons should be present
|
|
238
|
-
assert any("affects" in r for r in reasons)
|
|
239
|
-
assert any("tags" in r for r in reasons)
|
|
240
|
-
|
|
241
|
-
def test_requires_string_in_list(self):
|
|
242
|
-
"""requires as list of strings instead of list of dicts."""
|
|
243
|
-
fm = {"affects": [], "subsystem": "", "requires": ["06-ui"], "tags": [], "phase": "03"}
|
|
244
|
-
score, reasons = _score_summary(fm, "06", 6, [], [])
|
|
245
|
-
assert score == "HIGH"
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
class TestResolveTransitiveRequires:
|
|
249
|
-
def test_direct_affects(self):
|
|
250
|
-
summaries = [
|
|
251
|
-
{"frontmatter": {"phase": "05-auth", "affects": ["06-ui"], "requires": []}},
|
|
252
|
-
]
|
|
253
|
-
result = _resolve_transitive_requires(summaries, "06")
|
|
254
|
-
assert "05-auth" in result
|
|
255
|
-
|
|
256
|
-
def test_one_hop_chain(self):
|
|
257
|
-
"""05-auth affects 06, and requires 04-setup -> 04-setup should be in chain."""
|
|
258
|
-
summaries = [
|
|
259
|
-
{"frontmatter": {"phase": "05-auth", "affects": ["06-ui"], "requires": ["04-setup"]}},
|
|
260
|
-
{"frontmatter": {"phase": "04-setup", "affects": [], "requires": []}},
|
|
261
|
-
]
|
|
262
|
-
result = _resolve_transitive_requires(summaries, "06")
|
|
263
|
-
assert "05-auth" in result
|
|
264
|
-
assert "04-setup" in result
|
|
265
|
-
|
|
266
|
-
def test_no_matches(self):
|
|
267
|
-
summaries = [
|
|
268
|
-
{"frontmatter": {"phase": "02-infra", "affects": [], "requires": []}},
|
|
269
|
-
]
|
|
270
|
-
result = _resolve_transitive_requires(summaries, "06")
|
|
271
|
-
assert len(result) == 0
|
|
272
|
-
|
|
273
|
-
def test_string_affects_coercion(self):
|
|
274
|
-
summaries = [
|
|
275
|
-
{"frontmatter": {"phase": "05-auth", "affects": "06-ui", "requires": []}},
|
|
276
|
-
]
|
|
277
|
-
result = _resolve_transitive_requires(summaries, "06")
|
|
278
|
-
assert "05-auth" in result
|
|
279
|
-
|
|
280
|
-
def test_dict_requires(self):
|
|
281
|
-
summaries = [
|
|
282
|
-
{"frontmatter": {"phase": "05-auth", "affects": ["06-ui"], "requires": [{"phase": "04-setup"}]}},
|
|
283
|
-
]
|
|
284
|
-
result = _resolve_transitive_requires(summaries, "06")
|
|
285
|
-
assert "04-setup" in result
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
class TestAggregateFromSummaries:
|
|
289
|
-
def test_skips_low(self):
|
|
290
|
-
summaries = [
|
|
291
|
-
{"relevance": "LOW", "frontmatter": {
|
|
292
|
-
"tech-stack": {"added": ["redis"], "patterns": []},
|
|
293
|
-
"patterns-established": [], "key-files": {}, "key-decisions": [],
|
|
294
|
-
}},
|
|
295
|
-
]
|
|
296
|
-
result = _aggregate_from_summaries(summaries)
|
|
297
|
-
assert result["tech_stack_added"] == []
|
|
298
|
-
|
|
299
|
-
def test_collects_high_and_medium(self):
|
|
300
|
-
summaries = [
|
|
301
|
-
{"relevance": "HIGH", "frontmatter": {
|
|
302
|
-
"tech-stack": {"added": ["jose"], "patterns": ["jwt-auth"]},
|
|
303
|
-
"patterns-established": ["Token rotation"],
|
|
304
|
-
"key-files": {"created": ["src/auth.ts"], "modified": ["src/config.ts"]},
|
|
305
|
-
"key-decisions": ["Use JWT"],
|
|
306
|
-
}},
|
|
307
|
-
{"relevance": "MEDIUM", "frontmatter": {
|
|
308
|
-
"tech-stack": {"added": ["dotenv"], "patterns": []},
|
|
309
|
-
"patterns-established": [],
|
|
310
|
-
"key-files": {"created": ["src/config.ts"], "modified": []},
|
|
311
|
-
"key-decisions": ["Use dotenv"],
|
|
312
|
-
}},
|
|
313
|
-
]
|
|
314
|
-
result = _aggregate_from_summaries(summaries)
|
|
315
|
-
assert "jose" in result["tech_stack_added"]
|
|
316
|
-
assert "dotenv" in result["tech_stack_added"]
|
|
317
|
-
assert "Token rotation" in result["patterns_established"]
|
|
318
|
-
assert "src/auth.ts" in result["key_files_created"]
|
|
319
|
-
assert "Use JWT" in result["key_decisions"]
|
|
320
|
-
assert "Use dotenv" in result["key_decisions"]
|
|
321
|
-
|
|
322
|
-
def test_deduplication(self):
|
|
323
|
-
summaries = [
|
|
324
|
-
{"relevance": "HIGH", "frontmatter": {
|
|
325
|
-
"tech-stack": {"added": ["jose"], "patterns": []},
|
|
326
|
-
"patterns-established": ["P1"],
|
|
327
|
-
"key-files": {"created": ["f.ts"], "modified": []},
|
|
328
|
-
"key-decisions": ["D1"],
|
|
329
|
-
}},
|
|
330
|
-
{"relevance": "HIGH", "frontmatter": {
|
|
331
|
-
"tech-stack": {"added": ["jose"], "patterns": []},
|
|
332
|
-
"patterns-established": ["P1"],
|
|
333
|
-
"key-files": {"created": ["f.ts"], "modified": []},
|
|
334
|
-
"key-decisions": ["D1"],
|
|
335
|
-
}},
|
|
336
|
-
]
|
|
337
|
-
result = _aggregate_from_summaries(summaries)
|
|
338
|
-
assert result["tech_stack_added"] == ["jose"]
|
|
339
|
-
assert result["patterns_established"] == ["P1"]
|
|
340
|
-
assert result["key_files_created"] == ["f.ts"]
|
|
341
|
-
assert result["key_decisions"] == ["D1"]
|
|
342
|
-
|
|
343
|
-
def test_string_coercion(self):
|
|
344
|
-
"""String values instead of lists should be coerced."""
|
|
345
|
-
summaries = [
|
|
346
|
-
{"relevance": "HIGH", "frontmatter": {
|
|
347
|
-
"tech-stack": {"added": "single-lib", "patterns": "single-pattern"},
|
|
348
|
-
"patterns-established": "single-established",
|
|
349
|
-
"key-files": {"created": "single-file.ts", "modified": "mod-file.ts"},
|
|
350
|
-
"key-decisions": "single-decision",
|
|
351
|
-
}},
|
|
352
|
-
]
|
|
353
|
-
result = _aggregate_from_summaries(summaries)
|
|
354
|
-
assert "single-lib" in result["tech_stack_added"]
|
|
355
|
-
assert "single-pattern" in result["patterns_established"]
|
|
356
|
-
assert "single-established" in result["patterns_established"]
|
|
357
|
-
assert "single-file.ts" in result["key_files_created"]
|
|
358
|
-
assert "mod-file.ts" in result["key_files_modified"]
|
|
359
|
-
assert "single-decision" in result["key_decisions"]
|
|
360
|
-
|
|
361
|
-
def test_none_fields(self):
|
|
362
|
-
"""None values for optional fields should not crash."""
|
|
363
|
-
summaries = [
|
|
364
|
-
{"relevance": "HIGH", "frontmatter": {
|
|
365
|
-
"tech-stack": None,
|
|
366
|
-
"patterns-established": None,
|
|
367
|
-
"key-files": None,
|
|
368
|
-
"key-decisions": None,
|
|
369
|
-
}},
|
|
370
|
-
]
|
|
371
|
-
result = _aggregate_from_summaries(summaries)
|
|
372
|
-
assert result["tech_stack_added"] == []
|
|
373
|
-
assert result["patterns_established"] == []
|
|
374
|
-
assert result["key_files_created"] == []
|
|
375
|
-
assert result["key_decisions"] == []
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
class TestBuildExcludePathspecs:
|
|
379
|
-
def test_all_start_with_colon_bang(self):
|
|
380
|
-
specs = build_exclude_pathspecs()
|
|
381
|
-
for spec in specs:
|
|
382
|
-
assert spec.startswith(":!"), f"Expected ':!' prefix, got: {spec}"
|
|
383
|
-
|
|
384
|
-
def test_count_matches_exclusions(self):
|
|
385
|
-
specs = build_exclude_pathspecs()
|
|
386
|
-
assert len(specs) == len(PATCH_EXCLUSIONS)
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
class TestParseFrontmatter:
|
|
390
|
-
def test_valid_yaml(self, tmp_path):
|
|
391
|
-
f = tmp_path / "test.md"
|
|
392
|
-
f.write_text("---\ntitle: Hello\ntags: [a, b]\n---\n\n# Content\n")
|
|
393
|
-
result = parse_frontmatter(f)
|
|
394
|
-
assert result == {"title": "Hello", "tags": ["a", "b"]}
|
|
395
|
-
|
|
396
|
-
def test_no_frontmatter(self, tmp_path):
|
|
397
|
-
f = tmp_path / "test.md"
|
|
398
|
-
f.write_text("# Just a heading\n\nSome content.\n")
|
|
399
|
-
assert parse_frontmatter(f) is None
|
|
400
|
-
|
|
401
|
-
def test_malformed_yaml(self, tmp_path):
|
|
402
|
-
f = tmp_path / "test.md"
|
|
403
|
-
f.write_text("---\n: invalid: yaml: [[\n---\n\nContent\n")
|
|
404
|
-
assert parse_frontmatter(f) is None
|
|
405
|
-
|
|
406
|
-
def test_empty_file(self, tmp_path):
|
|
407
|
-
f = tmp_path / "test.md"
|
|
408
|
-
f.write_text("")
|
|
409
|
-
assert parse_frontmatter(f) is None
|
|
410
|
-
|
|
411
|
-
def test_date_value(self, tmp_path):
|
|
412
|
-
f = tmp_path / "test.md"
|
|
413
|
-
f.write_text("---\ndate: 2026-01-15\n---\n\nContent\n")
|
|
414
|
-
result = parse_frontmatter(f)
|
|
415
|
-
assert result is not None
|
|
416
|
-
# YAML parses bare dates as datetime.date objects
|
|
417
|
-
import datetime
|
|
418
|
-
assert result["date"] == datetime.date(2026, 1, 15)
|
|
419
|
-
|
|
420
|
-
def test_list_value(self, tmp_path):
|
|
421
|
-
f = tmp_path / "test.md"
|
|
422
|
-
f.write_text("---\nitems:\n - one\n - two\n - three\n---\n\nContent\n")
|
|
423
|
-
result = parse_frontmatter(f)
|
|
424
|
-
assert result == {"items": ["one", "two", "three"]}
|
|
425
|
-
|
|
426
|
-
def test_empty_frontmatter(self, tmp_path):
|
|
427
|
-
"""Empty frontmatter (no content between ---) doesn't match the regex."""
|
|
428
|
-
f = tmp_path / "test.md"
|
|
429
|
-
f.write_text("---\n---\n\nContent\n")
|
|
430
|
-
assert parse_frontmatter(f) is None
|
|
431
|
-
|
|
432
|
-
def test_empty_frontmatter_with_newline(self, tmp_path):
|
|
433
|
-
"""Frontmatter with only a newline between --- returns empty dict."""
|
|
434
|
-
f = tmp_path / "test.md"
|
|
435
|
-
f.write_text("---\n\n---\n\nContent\n")
|
|
436
|
-
result = parse_frontmatter(f)
|
|
437
|
-
assert result == {}
|
|
438
|
-
|
|
439
|
-
def test_nonexistent_file(self, tmp_path):
|
|
440
|
-
f = tmp_path / "nonexistent.md"
|
|
441
|
-
assert parse_frontmatter(f) is None
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
class TestHasReadinessSection:
|
|
445
|
-
def test_present_with_content(self, tmp_path):
|
|
446
|
-
f = tmp_path / "test.md"
|
|
447
|
-
f.write_text("---\nphase: '05'\n---\n\n## Next Phase Readiness\n\n- Need auth provider\n- Token refresh missing\n")
|
|
448
|
-
assert _has_readiness_section(f) is True
|
|
449
|
-
|
|
450
|
-
def test_empty_section(self, tmp_path):
|
|
451
|
-
f = tmp_path / "test.md"
|
|
452
|
-
f.write_text("---\nphase: '05'\n---\n\n## Next Phase Readiness\n\n## Another Section\n")
|
|
453
|
-
assert _has_readiness_section(f) is False
|
|
454
|
-
|
|
455
|
-
def test_absent(self, tmp_path):
|
|
456
|
-
f = tmp_path / "test.md"
|
|
457
|
-
f.write_text("---\nphase: '05'\n---\n\n## Summary\n\nSome content.\n")
|
|
458
|
-
assert _has_readiness_section(f) is False
|
|
459
|
-
|
|
460
|
-
def test_nonexistent_file(self, tmp_path):
|
|
461
|
-
f = tmp_path / "nonexistent.md"
|
|
462
|
-
assert _has_readiness_section(f) is False
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
# ===================================================================
|
|
466
|
-
# Part 2: Golden-File Integration Test
|
|
467
|
-
# ===================================================================
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
def _build_scan_output(planning: Path) -> dict:
|
|
471
|
-
"""Build the same output dict that cmd_scan_planning_context produces."""
|
|
472
|
-
target_phase = "06"
|
|
473
|
-
target_num = 6
|
|
474
|
-
subsystems = ["auth"]
|
|
475
|
-
keywords = ["jwt", "ui"]
|
|
476
|
-
parse_errors: list[dict] = []
|
|
477
|
-
|
|
478
|
-
summaries, summaries_src = _scan_summaries(
|
|
479
|
-
planning, target_phase, target_num, subsystems, keywords, parse_errors
|
|
480
|
-
)
|
|
481
|
-
debug_learnings, debug_src = _scan_debug_docs(planning, parse_errors)
|
|
482
|
-
adhoc_learnings, adhoc_src = _scan_adhoc_summaries(planning, parse_errors)
|
|
483
|
-
completed_todos, completed_src = _scan_todos(planning, "done", parse_errors)
|
|
484
|
-
pending_todos, pending_src = _scan_todos(planning, "", parse_errors)
|
|
485
|
-
knowledge_files, knowledge_src = _scan_knowledge_files(planning, subsystems)
|
|
486
|
-
|
|
487
|
-
aggregated = _aggregate_from_summaries(summaries)
|
|
488
|
-
|
|
489
|
-
return {
|
|
490
|
-
"success": True,
|
|
491
|
-
"target": {
|
|
492
|
-
"phase": target_phase,
|
|
493
|
-
"phase_name": "",
|
|
494
|
-
"subsystems": subsystems,
|
|
495
|
-
"keywords": keywords,
|
|
496
|
-
},
|
|
497
|
-
"sources": {
|
|
498
|
-
"summaries": summaries_src,
|
|
499
|
-
"debug_docs": debug_src,
|
|
500
|
-
"adhoc_summaries": adhoc_src,
|
|
501
|
-
"completed_todos": completed_src,
|
|
502
|
-
"pending_todos": pending_src,
|
|
503
|
-
"knowledge_files": knowledge_src,
|
|
504
|
-
"parse_errors": parse_errors,
|
|
505
|
-
},
|
|
506
|
-
"summaries": summaries,
|
|
507
|
-
"debug_learnings": debug_learnings,
|
|
508
|
-
"adhoc_learnings": adhoc_learnings,
|
|
509
|
-
"completed_todos": completed_todos,
|
|
510
|
-
"pending_todos": pending_todos,
|
|
511
|
-
"knowledge_files": knowledge_files,
|
|
512
|
-
"aggregated": aggregated,
|
|
513
|
-
}
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
def _normalize_paths(obj, base: Path):
|
|
517
|
-
"""Recursively replace absolute paths with relative paths for stable comparison."""
|
|
518
|
-
base_str = str(base)
|
|
519
|
-
if isinstance(obj, str):
|
|
520
|
-
return obj.replace(base_str, "<FIXTURE>")
|
|
521
|
-
if isinstance(obj, list):
|
|
522
|
-
return [_normalize_paths(item, base) for item in obj]
|
|
523
|
-
if isinstance(obj, dict):
|
|
524
|
-
return {k: _normalize_paths(v, base) for k, v in obj.items()}
|
|
525
|
-
return obj
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
GOLDEN_FILE = Path(__file__).parent / "fixtures" / "scan-context" / "expected-output.json"
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
class TestGoldenFile:
|
|
532
|
-
"""Full JSON comparison of scan-planning-context output against golden file."""
|
|
533
|
-
|
|
534
|
-
def test_golden_file(self):
|
|
535
|
-
output = _build_scan_output(FIXTURE_PLANNING)
|
|
536
|
-
normalized = _normalize_paths(output, FIXTURE_PLANNING.parent)
|
|
537
|
-
|
|
538
|
-
if REGENERATE_GOLDEN:
|
|
539
|
-
GOLDEN_FILE.write_text(
|
|
540
|
-
json.dumps(normalized, indent=2, cls=_SafeEncoder) + "\n",
|
|
541
|
-
encoding="utf-8",
|
|
542
|
-
)
|
|
543
|
-
pytest.skip("Golden file regenerated — review and set REGENERATE_GOLDEN = False")
|
|
544
|
-
|
|
545
|
-
expected = json.loads(GOLDEN_FILE.read_text(encoding="utf-8"))
|
|
546
|
-
assert normalized == expected, (
|
|
547
|
-
"Output differs from golden file. "
|
|
548
|
-
"Set REGENERATE_GOLDEN = True and re-run to update."
|
|
549
|
-
)
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
class TestScanIntegrationTargeted:
|
|
553
|
-
"""Targeted assertions against fixture data."""
|
|
554
|
-
|
|
555
|
-
def test_summary_relevance_scores(self):
|
|
556
|
-
parse_errors: list[dict] = []
|
|
557
|
-
summaries, _ = _scan_summaries(
|
|
558
|
-
FIXTURE_PLANNING, "06", 6, ["auth"], ["jwt", "ui"], parse_errors
|
|
559
|
-
)
|
|
560
|
-
by_phase = {s["frontmatter"]["phase"]: s for s in summaries}
|
|
561
|
-
|
|
562
|
-
assert by_phase["02-infra"]["relevance"] == "LOW"
|
|
563
|
-
assert by_phase["05-auth"]["relevance"] == "HIGH"
|
|
564
|
-
# 04-setup: upgraded from MEDIUM to HIGH via transitive requires
|
|
565
|
-
assert by_phase["04-setup"]["relevance"] == "HIGH"
|
|
566
|
-
|
|
567
|
-
def test_transitive_requires_upgrade(self):
|
|
568
|
-
"""04-setup gets upgraded to HIGH because 05-auth affects 06 and requires 04-setup."""
|
|
569
|
-
parse_errors: list[dict] = []
|
|
570
|
-
summaries, _ = _scan_summaries(
|
|
571
|
-
FIXTURE_PLANNING, "06", 6, ["auth"], ["jwt", "ui"], parse_errors
|
|
572
|
-
)
|
|
573
|
-
setup = next(s for s in summaries if s["frontmatter"]["phase"] == "04-setup")
|
|
574
|
-
assert setup["relevance"] == "HIGH"
|
|
575
|
-
assert any("transitive" in r for r in setup["match_reasons"])
|
|
576
|
-
|
|
577
|
-
def test_aggregated_includes_high_and_medium(self):
|
|
578
|
-
output = _build_scan_output(FIXTURE_PLANNING)
|
|
579
|
-
agg = output["aggregated"]
|
|
580
|
-
|
|
581
|
-
# From 05-auth (HIGH) and 04-setup (upgraded to HIGH)
|
|
582
|
-
assert "jose" in agg["tech_stack_added"]
|
|
583
|
-
assert "bcrypt" in agg["tech_stack_added"]
|
|
584
|
-
assert "dotenv" in agg["tech_stack_added"]
|
|
585
|
-
|
|
586
|
-
# 02-infra is LOW, so postgres should NOT be in aggregated
|
|
587
|
-
assert "postgres" not in agg["tech_stack_added"]
|
|
588
|
-
|
|
589
|
-
def test_debug_learnings_collected(self):
|
|
590
|
-
output = _build_scan_output(FIXTURE_PLANNING)
|
|
591
|
-
assert len(output["debug_learnings"]) == 1
|
|
592
|
-
debug = output["debug_learnings"][0]
|
|
593
|
-
assert debug["subsystem"] == "auth"
|
|
594
|
-
assert "clock skew" in debug["root_cause"].lower()
|
|
595
|
-
|
|
596
|
-
def test_adhoc_learnings_collected(self):
|
|
597
|
-
output = _build_scan_output(FIXTURE_PLANNING)
|
|
598
|
-
assert len(output["adhoc_learnings"]) == 2
|
|
599
|
-
# Flat file (old format with learnings field)
|
|
600
|
-
auth_adhoc = next(a for a in output["adhoc_learnings"] if a["subsystem"] == "auth")
|
|
601
|
-
assert len(auth_adhoc["learnings"]) == 2
|
|
602
|
-
# Subdirectory file (phase-style with key-decisions fallback)
|
|
603
|
-
api_adhoc = next(a for a in output["adhoc_learnings"] if a["subsystem"] == "api")
|
|
604
|
-
assert len(api_adhoc["learnings"]) == 2
|
|
605
|
-
assert "duplicate route handlers" in api_adhoc["learnings"][0].lower()
|
|
606
|
-
|
|
607
|
-
def test_pending_todos_collected(self):
|
|
608
|
-
output = _build_scan_output(FIXTURE_PLANNING)
|
|
609
|
-
assert len(output["pending_todos"]) == 1
|
|
610
|
-
assert output["pending_todos"][0]["title"] == "Add logout endpoint"
|
|
611
|
-
|
|
612
|
-
def test_completed_todos_collected(self):
|
|
613
|
-
output = _build_scan_output(FIXTURE_PLANNING)
|
|
614
|
-
assert len(output["completed_todos"]) == 1
|
|
615
|
-
assert output["completed_todos"][0]["title"] == "Set up database migrations"
|
|
616
|
-
|
|
617
|
-
def test_knowledge_files_matched(self):
|
|
618
|
-
output = _build_scan_output(FIXTURE_PLANNING)
|
|
619
|
-
knowledge = output["knowledge_files"]
|
|
620
|
-
assert len(knowledge) == 1
|
|
621
|
-
assert knowledge[0]["subsystem"] == "auth"
|
|
622
|
-
assert knowledge[0]["matched"] is True
|
|
623
|
-
|
|
624
|
-
def test_readiness_warnings_on_05_auth(self):
|
|
625
|
-
parse_errors: list[dict] = []
|
|
626
|
-
summaries, _ = _scan_summaries(
|
|
627
|
-
FIXTURE_PLANNING, "06", 6, ["auth"], ["jwt", "ui"], parse_errors
|
|
628
|
-
)
|
|
629
|
-
auth_summary = next(s for s in summaries if s["frontmatter"]["phase"] == "05-auth")
|
|
630
|
-
assert auth_summary["has_readiness_warnings"] is True
|
|
631
|
-
|
|
632
|
-
def test_no_parse_errors(self):
|
|
633
|
-
output = _build_scan_output(FIXTURE_PLANNING)
|
|
634
|
-
assert output["sources"]["parse_errors"] == []
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
# ===================================================================
|
|
638
|
-
# Part 3: Milestone Naming Detection Tests
|
|
639
|
-
# ===================================================================
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
class TestDetectVersionedMilestoneDirs:
|
|
643
|
-
"""Test _detect_versioned_milestone_dirs detection logic."""
|
|
644
|
-
|
|
645
|
-
def test_standard_dirs(self, tmp_path):
|
|
646
|
-
"""v0.1/, v0.2/ with .md files detected as standard."""
|
|
647
|
-
planning = tmp_path / ".planning"
|
|
648
|
-
ms = planning / "milestones"
|
|
649
|
-
(ms / "v0.1").mkdir(parents=True)
|
|
650
|
-
(ms / "v0.1" / "ROADMAP.md").write_text("# Roadmap")
|
|
651
|
-
(ms / "v0.2").mkdir(parents=True)
|
|
652
|
-
(ms / "v0.2" / "ROADMAP.md").write_text("# Roadmap")
|
|
653
|
-
|
|
654
|
-
result = _detect_versioned_milestone_dirs(planning)
|
|
655
|
-
assert len(result) == 2
|
|
656
|
-
assert result[0]["version"] == "v0.1"
|
|
657
|
-
assert result[0]["type"] == "standard"
|
|
658
|
-
assert result[0]["sub"] is None
|
|
659
|
-
assert result[1]["version"] == "v0.2"
|
|
660
|
-
assert result[1]["type"] == "standard"
|
|
661
|
-
|
|
662
|
-
def test_nested_dirs(self, tmp_path):
|
|
663
|
-
"""v2.0.0/ with sub-dirs and no .md files detected as nested."""
|
|
664
|
-
planning = tmp_path / ".planning"
|
|
665
|
-
ms = planning / "milestones"
|
|
666
|
-
v200 = ms / "v2.0.0"
|
|
667
|
-
(v200 / "quests").mkdir(parents=True)
|
|
668
|
-
(v200 / "quests" / "ROADMAP.md").write_text("# Roadmap")
|
|
669
|
-
(v200 / "sanctuary").mkdir(parents=True)
|
|
670
|
-
(v200 / "sanctuary" / "ROADMAP.md").write_text("# Roadmap")
|
|
671
|
-
|
|
672
|
-
result = _detect_versioned_milestone_dirs(planning)
|
|
673
|
-
assert len(result) == 2
|
|
674
|
-
assert result[0]["version"] == "v2.0.0"
|
|
675
|
-
assert result[0]["sub"] == "quests"
|
|
676
|
-
assert result[0]["type"] == "nested"
|
|
677
|
-
assert result[1]["sub"] == "sanctuary"
|
|
678
|
-
assert result[1]["type"] == "nested"
|
|
679
|
-
|
|
680
|
-
def test_mixed_standard_and_nested(self, tmp_path):
|
|
681
|
-
"""v2.0.0/quests/ (nested) + v2.2.0/ (standard) both detected."""
|
|
682
|
-
planning = tmp_path / ".planning"
|
|
683
|
-
ms = planning / "milestones"
|
|
684
|
-
# Nested
|
|
685
|
-
v200 = ms / "v2.0.0"
|
|
686
|
-
(v200 / "quests").mkdir(parents=True)
|
|
687
|
-
(v200 / "quests" / "ROADMAP.md").write_text("# Roadmap")
|
|
688
|
-
# Standard
|
|
689
|
-
(ms / "v2.2.0").mkdir(parents=True)
|
|
690
|
-
(ms / "v2.2.0" / "ROADMAP.md").write_text("# Roadmap")
|
|
691
|
-
|
|
692
|
-
result = _detect_versioned_milestone_dirs(planning)
|
|
693
|
-
assert len(result) == 2
|
|
694
|
-
nested = [r for r in result if r["type"] == "nested"]
|
|
695
|
-
standard = [r for r in result if r["type"] == "standard"]
|
|
696
|
-
assert len(nested) == 1
|
|
697
|
-
assert nested[0]["sub"] == "quests"
|
|
698
|
-
assert len(standard) == 1
|
|
699
|
-
assert standard[0]["version"] == "v2.2.0"
|
|
700
|
-
|
|
701
|
-
def test_slug_dirs_ignored(self, tmp_path):
|
|
702
|
-
"""mvp/, blast-pass/ are not flagged."""
|
|
703
|
-
planning = tmp_path / ".planning"
|
|
704
|
-
ms = planning / "milestones"
|
|
705
|
-
(ms / "mvp").mkdir(parents=True)
|
|
706
|
-
(ms / "mvp" / "ROADMAP.md").write_text("# Roadmap")
|
|
707
|
-
(ms / "blast-pass").mkdir(parents=True)
|
|
708
|
-
(ms / "blast-pass" / "ROADMAP.md").write_text("# Roadmap")
|
|
709
|
-
|
|
710
|
-
result = _detect_versioned_milestone_dirs(planning)
|
|
711
|
-
assert result == []
|
|
712
|
-
|
|
713
|
-
def test_no_milestones_dir(self, tmp_path):
|
|
714
|
-
"""No milestones/ directory returns empty."""
|
|
715
|
-
planning = tmp_path / ".planning"
|
|
716
|
-
planning.mkdir(parents=True)
|
|
717
|
-
|
|
718
|
-
result = _detect_versioned_milestone_dirs(planning)
|
|
719
|
-
assert result == []
|
|
720
|
-
|
|
721
|
-
def test_empty_milestones_dir(self, tmp_path):
|
|
722
|
-
"""Empty milestones/ directory returns empty."""
|
|
723
|
-
planning = tmp_path / ".planning"
|
|
724
|
-
(planning / "milestones").mkdir(parents=True)
|
|
725
|
-
|
|
726
|
-
result = _detect_versioned_milestone_dirs(planning)
|
|
727
|
-
assert result == []
|
|
728
|
-
|
|
729
|
-
def test_phases_subdir_excluded_from_nested(self, tmp_path):
|
|
730
|
-
"""phases/ sub-directory inside v-dir is excluded from nested detection."""
|
|
731
|
-
planning = tmp_path / ".planning"
|
|
732
|
-
ms = planning / "milestones"
|
|
733
|
-
v01 = ms / "v0.1"
|
|
734
|
-
(v01 / "phases" / "01-setup").mkdir(parents=True)
|
|
735
|
-
# Has .md files, so it's standard despite having phases/ sub-dir
|
|
736
|
-
(v01 / "ROADMAP.md").write_text("# Roadmap")
|
|
737
|
-
|
|
738
|
-
result = _detect_versioned_milestone_dirs(planning)
|
|
739
|
-
assert len(result) == 1
|
|
740
|
-
assert result[0]["type"] == "standard"
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
class TestParseMilestoneNameMapping:
|
|
744
|
-
"""Test _parse_milestone_name_mapping parsing logic."""
|
|
745
|
-
|
|
746
|
-
def test_standard_headers(self, tmp_path):
|
|
747
|
-
"""Parses ## v0.1 MVP (Shipped: ...) correctly."""
|
|
748
|
-
planning = tmp_path / ".planning"
|
|
749
|
-
planning.mkdir(parents=True)
|
|
750
|
-
(planning / "MILESTONES.md").write_text(
|
|
751
|
-
"# Milestones\n\n"
|
|
752
|
-
"## v0.1 MVP (Shipped: 2026-01-15)\n\n"
|
|
753
|
-
"Some content.\n"
|
|
754
|
-
)
|
|
755
|
-
|
|
756
|
-
result = _parse_milestone_name_mapping(planning)
|
|
757
|
-
assert len(result) == 1
|
|
758
|
-
assert result[0]["version"] == "v0.1"
|
|
759
|
-
assert result[0]["name"] == "MVP"
|
|
760
|
-
assert result[0]["slug"] == "mvp"
|
|
761
|
-
|
|
762
|
-
def test_multi_word_names(self, tmp_path):
|
|
763
|
-
"""Parses multi-word names with special chars."""
|
|
764
|
-
planning = tmp_path / ".planning"
|
|
765
|
-
planning.mkdir(parents=True)
|
|
766
|
-
(planning / "MILESTONES.md").write_text(
|
|
767
|
-
"# Milestones\n\n"
|
|
768
|
-
"## v0.1 MVP - POSitive Plus SDK Integration (Shipped: 2026-01-15)\n\n"
|
|
769
|
-
)
|
|
770
|
-
|
|
771
|
-
result = _parse_milestone_name_mapping(planning)
|
|
772
|
-
assert len(result) == 1
|
|
773
|
-
assert result[0]["name"] == "MVP - POSitive Plus SDK Integration"
|
|
774
|
-
assert result[0]["slug"] == "mvp-positive-plus-sdk-integration"
|
|
775
|
-
|
|
776
|
-
def test_duplicate_version(self, tmp_path):
|
|
777
|
-
"""Two v2.0.0 entries (like ForgeBlast) both extracted."""
|
|
778
|
-
planning = tmp_path / ".planning"
|
|
779
|
-
planning.mkdir(parents=True)
|
|
780
|
-
(planning / "MILESTONES.md").write_text(
|
|
781
|
-
"# Milestones\n\n"
|
|
782
|
-
"## v2.0.0 Quests Feature (Shipped: 2026-01-01)\n\n"
|
|
783
|
-
"Content.\n\n"
|
|
784
|
-
"## v2.0.0 Sanctuary (Shipped: 2026-02-01)\n\n"
|
|
785
|
-
"Content.\n"
|
|
786
|
-
)
|
|
787
|
-
|
|
788
|
-
result = _parse_milestone_name_mapping(planning)
|
|
789
|
-
assert len(result) == 2
|
|
790
|
-
versions = [r["version"] for r in result]
|
|
791
|
-
assert versions == ["v2.0.0", "v2.0.0"]
|
|
792
|
-
names = [r["name"] for r in result]
|
|
793
|
-
assert "Quests Feature" in names
|
|
794
|
-
assert "Sanctuary" in names
|
|
795
|
-
|
|
796
|
-
def test_no_versioned_headers(self, tmp_path):
|
|
797
|
-
"""New-format headers without version prefix are not matched."""
|
|
798
|
-
planning = tmp_path / ".planning"
|
|
799
|
-
planning.mkdir(parents=True)
|
|
800
|
-
(planning / "MILESTONES.md").write_text(
|
|
801
|
-
"# Milestones\n\n"
|
|
802
|
-
"## MVP (Shipped: 2026-01-15)\n\n"
|
|
803
|
-
"Content.\n"
|
|
804
|
-
)
|
|
805
|
-
|
|
806
|
-
result = _parse_milestone_name_mapping(planning)
|
|
807
|
-
assert result == []
|
|
808
|
-
|
|
809
|
-
def test_current_milestone_from_project(self, tmp_path):
|
|
810
|
-
"""Parses ## Current Milestone: v0.3 Demo Release from PROJECT.md."""
|
|
811
|
-
planning = tmp_path / ".planning"
|
|
812
|
-
planning.mkdir(parents=True)
|
|
813
|
-
(planning / "PROJECT.md").write_text(
|
|
814
|
-
"# Project\n\n"
|
|
815
|
-
"## Current Milestone: v0.3 Demo Release\n\n"
|
|
816
|
-
"Content.\n"
|
|
817
|
-
)
|
|
818
|
-
|
|
819
|
-
result = _parse_milestone_name_mapping(planning)
|
|
820
|
-
assert len(result) == 1
|
|
821
|
-
assert result[0]["version"] == "v0.3"
|
|
822
|
-
assert result[0]["name"] == "Demo Release"
|
|
823
|
-
assert result[0]["slug"] == "demo-release"
|
|
824
|
-
assert result[0].get("current") is True
|
|
825
|
-
|
|
826
|
-
def test_no_files(self, tmp_path):
|
|
827
|
-
"""No MILESTONES.md or PROJECT.md returns empty."""
|
|
828
|
-
planning = tmp_path / ".planning"
|
|
829
|
-
planning.mkdir(parents=True)
|
|
830
|
-
|
|
831
|
-
result = _parse_milestone_name_mapping(planning)
|
|
832
|
-
assert result == []
|
|
833
|
-
|
|
834
|
-
def test_started_status(self, tmp_path):
|
|
835
|
-
"""Parses Started: status headers too."""
|
|
836
|
-
planning = tmp_path / ".planning"
|
|
837
|
-
planning.mkdir(parents=True)
|
|
838
|
-
(planning / "MILESTONES.md").write_text(
|
|
839
|
-
"# Milestones\n\n"
|
|
840
|
-
"## v0.2 Infrastructure (Started: 2026-02-01)\n\n"
|
|
841
|
-
)
|
|
842
|
-
|
|
843
|
-
result = _parse_milestone_name_mapping(planning)
|
|
844
|
-
assert len(result) == 1
|
|
845
|
-
assert result[0]["version"] == "v0.2"
|
|
846
|
-
assert result[0]["name"] == "Infrastructure"
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
# ---------------------------------------------------------------------------
|
|
850
|
-
# Tests: cmd_set_last_command
|
|
851
|
-
# ---------------------------------------------------------------------------
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
def _make_args(command_string: str) -> argparse.Namespace:
|
|
855
|
-
return argparse.Namespace(command_string=command_string)
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
class TestSetLastCommand:
|
|
859
|
-
"""Tests for the set-last-command subcommand."""
|
|
860
|
-
|
|
861
|
-
def _patch_git_root(self, tmp_path):
|
|
862
|
-
return mock.patch.object(_mod, "find_git_root", return_value=tmp_path)
|
|
863
|
-
|
|
864
|
-
def test_replaces_existing_last_command(self, tmp_path):
|
|
865
|
-
state = tmp_path / ".planning" / "STATE.md"
|
|
866
|
-
state.parent.mkdir(parents=True)
|
|
867
|
-
state.write_text(
|
|
868
|
-
"# State\n"
|
|
869
|
-
"Status: In progress\n"
|
|
870
|
-
"Last Command: ms:old-cmd | 2025-01-01 00:00\n"
|
|
871
|
-
"Phase: 10\n"
|
|
872
|
-
)
|
|
873
|
-
|
|
874
|
-
with self._patch_git_root(tmp_path):
|
|
875
|
-
cmd_set_last_command(_make_args("ms:plan-phase 10"))
|
|
876
|
-
|
|
877
|
-
text = state.read_text()
|
|
878
|
-
assert "ms:plan-phase 10 |" in text
|
|
879
|
-
assert "ms:old-cmd" not in text
|
|
880
|
-
# Verify only one Last Command line
|
|
881
|
-
assert text.count("Last Command:") == 1
|
|
882
|
-
|
|
883
|
-
def test_inserts_after_status_when_missing(self, tmp_path):
|
|
884
|
-
state = tmp_path / ".planning" / "STATE.md"
|
|
885
|
-
state.parent.mkdir(parents=True)
|
|
886
|
-
state.write_text(
|
|
887
|
-
"# State\n"
|
|
888
|
-
"Status: In progress\n"
|
|
889
|
-
"Phase: 10\n"
|
|
890
|
-
)
|
|
891
|
-
|
|
892
|
-
with self._patch_git_root(tmp_path):
|
|
893
|
-
cmd_set_last_command(_make_args("ms:execute-phase 10"))
|
|
894
|
-
|
|
895
|
-
text = state.read_text()
|
|
896
|
-
assert "Last Command: ms:execute-phase 10 |" in text
|
|
897
|
-
# Should appear after Status line
|
|
898
|
-
lines = text.splitlines()
|
|
899
|
-
status_idx = next(i for i, l in enumerate(lines) if l.startswith("Status:"))
|
|
900
|
-
last_cmd_idx = next(i for i, l in enumerate(lines) if l.startswith("Last Command:"))
|
|
901
|
-
assert last_cmd_idx == status_idx + 1
|
|
902
|
-
|
|
903
|
-
def test_missing_state_file_warns(self, tmp_path, capsys):
|
|
904
|
-
with self._patch_git_root(tmp_path):
|
|
905
|
-
cmd_set_last_command(_make_args("ms:plan-phase 10"))
|
|
906
|
-
|
|
907
|
-
captured = capsys.readouterr()
|
|
908
|
-
assert "Warning: STATE.md not found" in captured.err
|
|
909
|
-
assert captured.out == ""
|
|
910
|
-
|
|
911
|
-
def test_missing_both_lines_warns(self, tmp_path, capsys):
|
|
912
|
-
state = tmp_path / ".planning" / "STATE.md"
|
|
913
|
-
state.parent.mkdir(parents=True)
|
|
914
|
-
original = "# State\nPhase: 10\n"
|
|
915
|
-
state.write_text(original)
|
|
916
|
-
|
|
917
|
-
with self._patch_git_root(tmp_path):
|
|
918
|
-
cmd_set_last_command(_make_args("ms:adhoc"))
|
|
919
|
-
|
|
920
|
-
captured = capsys.readouterr()
|
|
921
|
-
assert "Warning:" in captured.err
|
|
922
|
-
# File should be unchanged
|
|
923
|
-
assert state.read_text() == original
|
|
924
|
-
|
|
925
|
-
def test_timestamp_format(self, tmp_path):
|
|
926
|
-
state = tmp_path / ".planning" / "STATE.md"
|
|
927
|
-
state.parent.mkdir(parents=True)
|
|
928
|
-
state.write_text("# State\nStatus: Idle\nLast Command: old\n")
|
|
929
|
-
|
|
930
|
-
fake_dt = mock.MagicMock()
|
|
931
|
-
fake_dt.datetime.now.return_value.strftime.return_value = "2026-02-24 14:30"
|
|
932
|
-
with self._patch_git_root(tmp_path), \
|
|
933
|
-
mock.patch.object(_mod, "datetime", fake_dt):
|
|
934
|
-
cmd_set_last_command(_make_args("ms:verify-work 10"))
|
|
935
|
-
|
|
936
|
-
text = state.read_text()
|
|
937
|
-
assert "Last Command: ms:verify-work 10 | 2026-02-24 14:30" in text
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
class TestCmdGatherMilestoneStats:
|
|
941
|
-
"""Tests for gather-milestone-stats command."""
|
|
942
|
-
|
|
943
|
-
def _patch_git_root(self, tmp_path):
|
|
944
|
-
return mock.patch.object(_mod, "find_git_root", return_value=tmp_path)
|
|
945
|
-
|
|
946
|
-
def _patch_run_git(self):
|
|
947
|
-
return mock.patch.object(_mod, "run_git", return_value="")
|
|
948
|
-
|
|
949
|
-
def _make_phase(self, tmp_path, name, plans=None, summaries=None):
|
|
950
|
-
"""Create a phase dir with optional PLAN.md and SUMMARY.md files."""
|
|
951
|
-
phase_dir = tmp_path / ".planning" / "phases" / name
|
|
952
|
-
phase_dir.mkdir(parents=True, exist_ok=True)
|
|
953
|
-
for p in (plans or []):
|
|
954
|
-
(phase_dir / p).write_text("# Plan")
|
|
955
|
-
for s in (summaries or []):
|
|
956
|
-
(phase_dir / s).write_text("# Summary")
|
|
957
|
-
return phase_dir
|
|
958
|
-
|
|
959
|
-
def test_both_plan_and_summary(self, tmp_path, capsys):
|
|
960
|
-
self._make_phase(tmp_path, "01-auth",
|
|
961
|
-
plans=["01-01-PLAN.md"], summaries=["01-01-SUMMARY.md"])
|
|
962
|
-
args = argparse.Namespace(start_phase=1, end_phase=1)
|
|
963
|
-
with self._patch_git_root(tmp_path), self._patch_run_git():
|
|
964
|
-
cmd_gather_milestone_stats(args)
|
|
965
|
-
out = capsys.readouterr().out
|
|
966
|
-
assert "Plans: 1 total, 1 complete" in out
|
|
967
|
-
assert "Status: READY" in out
|
|
968
|
-
|
|
969
|
-
def test_summary_only_no_plan(self, tmp_path, capsys):
|
|
970
|
-
"""PLAN.md cleaned up after execution — SUMMARY.md alone counts."""
|
|
971
|
-
self._make_phase(tmp_path, "09-persistence",
|
|
972
|
-
summaries=["09-01-SUMMARY.md", "09-02-SUMMARY.md"])
|
|
973
|
-
args = argparse.Namespace(start_phase=9, end_phase=9)
|
|
974
|
-
with self._patch_git_root(tmp_path), self._patch_run_git():
|
|
975
|
-
cmd_gather_milestone_stats(args)
|
|
976
|
-
out = capsys.readouterr().out
|
|
977
|
-
assert "Plans: 2 total, 2 complete" in out
|
|
978
|
-
assert "Status: READY" in out
|
|
979
|
-
|
|
980
|
-
def test_plan_only_no_summary_is_incomplete(self, tmp_path, capsys):
|
|
981
|
-
self._make_phase(tmp_path, "03-setup",
|
|
982
|
-
plans=["03-01-PLAN.md"])
|
|
983
|
-
args = argparse.Namespace(start_phase=3, end_phase=3)
|
|
984
|
-
with self._patch_git_root(tmp_path), self._patch_run_git():
|
|
985
|
-
cmd_gather_milestone_stats(args)
|
|
986
|
-
out = capsys.readouterr().out
|
|
987
|
-
assert "Plans: 1 total, 0 complete" in out
|
|
988
|
-
assert "Status: NOT READY" in out
|
|
989
|
-
|
|
990
|
-
def test_multi_phase_mixed(self, tmp_path, capsys):
|
|
991
|
-
"""Multiple phases: some with PLANs, some with only SUMMARYs."""
|
|
992
|
-
self._make_phase(tmp_path, "09-persistence",
|
|
993
|
-
summaries=["09-01-SUMMARY.md", "09-02-SUMMARY.md"])
|
|
994
|
-
self._make_phase(tmp_path, "10-transactions",
|
|
995
|
-
plans=["10-01-PLAN.md"],
|
|
996
|
-
summaries=["10-01-SUMMARY.md"])
|
|
997
|
-
args = argparse.Namespace(start_phase=9, end_phase=10)
|
|
998
|
-
with self._patch_git_root(tmp_path), self._patch_run_git():
|
|
999
|
-
cmd_gather_milestone_stats(args)
|
|
1000
|
-
out = capsys.readouterr().out
|
|
1001
|
-
assert "Plans: 3 total, 3 complete" in out
|
|
1002
|
-
assert "Status: READY" in out
|
|
1003
|
-
|
|
1004
|
-
def test_no_plans_or_summaries(self, tmp_path, capsys):
|
|
1005
|
-
self._make_phase(tmp_path, "01-auth")
|
|
1006
|
-
args = argparse.Namespace(start_phase=1, end_phase=1)
|
|
1007
|
-
with self._patch_git_root(tmp_path), self._patch_run_git():
|
|
1008
|
-
cmd_gather_milestone_stats(args)
|
|
1009
|
-
out = capsys.readouterr().out
|
|
1010
|
-
assert "Plans: 0 total, 0 complete" in out
|
|
1011
|
-
assert "Status: NOT READY" in out
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
# ===================================================================
|
|
1015
|
-
# Part 4: UAT File Management Tests
|
|
1016
|
-
# ===================================================================
|
|
1017
|
-
|
|
1018
|
-
UATFile = _mod.UATFile
|
|
1019
|
-
cmd_uat_init = _mod.cmd_uat_init
|
|
1020
|
-
cmd_uat_update = _mod.cmd_uat_update
|
|
1021
|
-
cmd_uat_status = _mod.cmd_uat_status
|
|
1022
|
-
|
|
1023
|
-
# Shared fixture: a complete UAT.md matching the template format
|
|
1024
|
-
UAT_FIXTURE = """\
|
|
1025
|
-
---
|
|
1026
|
-
status: testing
|
|
1027
|
-
phase: 05-auth
|
|
1028
|
-
source: [05-01-SUMMARY.md]
|
|
1029
|
-
started: '2026-02-24 10:00'
|
|
1030
|
-
updated: '2026-02-24 10:30'
|
|
1031
|
-
current_batch: 2
|
|
1032
|
-
mocked_files: [auth_service.dart]
|
|
1033
|
-
pre_work_stash: null
|
|
1034
|
-
---
|
|
1035
|
-
|
|
1036
|
-
## Progress
|
|
1037
|
-
|
|
1038
|
-
total: 5
|
|
1039
|
-
tested: 3
|
|
1040
|
-
passed: 2
|
|
1041
|
-
issues: 1
|
|
1042
|
-
fixing: 0
|
|
1043
|
-
pending: 2
|
|
1044
|
-
skipped: 0
|
|
1045
|
-
|
|
1046
|
-
## Current Batch
|
|
1047
|
-
|
|
1048
|
-
batch: 2 of 3
|
|
1049
|
-
name: "Error States"
|
|
1050
|
-
mock_type: error_state
|
|
1051
|
-
tests: [3, 4]
|
|
1052
|
-
status: testing
|
|
1053
|
-
|
|
1054
|
-
## Tests
|
|
1055
|
-
|
|
1056
|
-
### 1. Login with valid credentials
|
|
1057
|
-
expected: User sees dashboard after entering valid email/password
|
|
1058
|
-
mock_required: false
|
|
1059
|
-
mock_type: null
|
|
1060
|
-
result: pass
|
|
1061
|
-
|
|
1062
|
-
### 2. View profile page
|
|
1063
|
-
expected: Profile shows user name and email
|
|
1064
|
-
mock_required: false
|
|
1065
|
-
mock_type: null
|
|
1066
|
-
result: pass
|
|
1067
|
-
|
|
1068
|
-
### 3. Login with invalid password
|
|
1069
|
-
expected: Error banner shows "Invalid credentials"
|
|
1070
|
-
mock_required: true
|
|
1071
|
-
mock_type: error_state
|
|
1072
|
-
result: issue
|
|
1073
|
-
reported: "Shows generic error instead of specific message"
|
|
1074
|
-
severity: major
|
|
1075
|
-
fix_status: applied
|
|
1076
|
-
fix_commit: abc1234
|
|
1077
|
-
retry_count: 0
|
|
1078
|
-
|
|
1079
|
-
### 4. Login with expired token
|
|
1080
|
-
expected: Redirect to login page with session expired message
|
|
1081
|
-
mock_required: true
|
|
1082
|
-
mock_type: error_state
|
|
1083
|
-
result: [pending]
|
|
1084
|
-
|
|
1085
|
-
### 5. Premium feature access
|
|
1086
|
-
expected: Shows upgrade prompt for free users
|
|
1087
|
-
mock_required: true
|
|
1088
|
-
mock_type: premium_user
|
|
1089
|
-
result: [pending]
|
|
1090
|
-
|
|
1091
|
-
## Fixes Applied
|
|
1092
|
-
|
|
1093
|
-
- commit: abc1234
|
|
1094
|
-
test: 3
|
|
1095
|
-
description: "Fixed error message to show specific auth error"
|
|
1096
|
-
files: [auth_service.dart, login_page.dart]
|
|
1097
|
-
|
|
1098
|
-
## Batches
|
|
1099
|
-
|
|
1100
|
-
### Batch 1: No Mocks Required
|
|
1101
|
-
tests: [1, 2]
|
|
1102
|
-
status: complete
|
|
1103
|
-
mock_type: null
|
|
1104
|
-
passed: 2
|
|
1105
|
-
issues: 0
|
|
1106
|
-
|
|
1107
|
-
### Batch 2: Error States
|
|
1108
|
-
tests: [3, 4]
|
|
1109
|
-
status: testing
|
|
1110
|
-
mock_type: error_state
|
|
1111
|
-
|
|
1112
|
-
### Batch 3: Premium Features
|
|
1113
|
-
tests: [5]
|
|
1114
|
-
status: pending
|
|
1115
|
-
mock_type: premium_user
|
|
1116
|
-
|
|
1117
|
-
## Assumptions
|
|
1118
|
-
"""
|
|
1119
|
-
|
|
1120
|
-
# Minimal UAT.md with empty fixes/assumptions
|
|
1121
|
-
UAT_MINIMAL = """\
|
|
1122
|
-
---
|
|
1123
|
-
status: testing
|
|
1124
|
-
phase: 03-setup
|
|
1125
|
-
source: [03-01-SUMMARY.md]
|
|
1126
|
-
started: '2026-02-24 10:00'
|
|
1127
|
-
updated: '2026-02-24 10:00'
|
|
1128
|
-
current_batch: 1
|
|
1129
|
-
mocked_files: []
|
|
1130
|
-
pre_work_stash: null
|
|
1131
|
-
---
|
|
1132
|
-
|
|
1133
|
-
## Progress
|
|
1134
|
-
|
|
1135
|
-
total: 2
|
|
1136
|
-
tested: 0
|
|
1137
|
-
passed: 0
|
|
1138
|
-
issues: 0
|
|
1139
|
-
fixing: 0
|
|
1140
|
-
pending: 2
|
|
1141
|
-
skipped: 0
|
|
1142
|
-
|
|
1143
|
-
## Current Batch
|
|
1144
|
-
|
|
1145
|
-
batch: 1 of 1
|
|
1146
|
-
name: "No Mocks"
|
|
1147
|
-
mock_type: null
|
|
1148
|
-
tests: [1, 2]
|
|
1149
|
-
status: pending
|
|
1150
|
-
|
|
1151
|
-
## Tests
|
|
1152
|
-
|
|
1153
|
-
### 1. Basic setup check
|
|
1154
|
-
expected: App starts without errors
|
|
1155
|
-
mock_required: false
|
|
1156
|
-
mock_type: null
|
|
1157
|
-
result: [pending]
|
|
1158
|
-
|
|
1159
|
-
### 2. Config loads
|
|
1160
|
-
expected: Config values appear in settings
|
|
1161
|
-
mock_required: false
|
|
1162
|
-
mock_type: null
|
|
1163
|
-
result: [pending]
|
|
1164
|
-
|
|
1165
|
-
## Fixes Applied
|
|
1166
|
-
|
|
1167
|
-
## Batches
|
|
1168
|
-
|
|
1169
|
-
### Batch 1: No Mocks
|
|
1170
|
-
tests: [1, 2]
|
|
1171
|
-
status: pending
|
|
1172
|
-
mock_type: null
|
|
1173
|
-
|
|
1174
|
-
## Assumptions
|
|
1175
|
-
"""
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
class TestUATFileParse:
|
|
1179
|
-
"""Test UATFile.parse with complete and minimal fixtures."""
|
|
1180
|
-
|
|
1181
|
-
def test_parse_complete_file(self):
|
|
1182
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1183
|
-
assert uat.frontmatter["status"] == "testing"
|
|
1184
|
-
assert uat.frontmatter["phase"] == "05-auth"
|
|
1185
|
-
assert uat.frontmatter["current_batch"] == 2
|
|
1186
|
-
assert uat.frontmatter["mocked_files"] == ["auth_service.dart"]
|
|
1187
|
-
assert len(uat.tests) == 5
|
|
1188
|
-
assert len(uat.batches) == 3
|
|
1189
|
-
assert len(uat.fixes) == 1
|
|
1190
|
-
assert len(uat.assumptions) == 0
|
|
1191
|
-
|
|
1192
|
-
def test_parse_minimal_file(self):
|
|
1193
|
-
uat = UATFile.parse(UAT_MINIMAL)
|
|
1194
|
-
assert uat.frontmatter["phase"] == "03-setup"
|
|
1195
|
-
assert len(uat.tests) == 2
|
|
1196
|
-
assert len(uat.batches) == 1
|
|
1197
|
-
assert len(uat.fixes) == 0
|
|
1198
|
-
assert len(uat.assumptions) == 0
|
|
1199
|
-
|
|
1200
|
-
def test_parse_test_with_issue_fields(self):
|
|
1201
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1202
|
-
t3 = next(t for t in uat.tests if t["num"] == "3")
|
|
1203
|
-
assert t3["result"] == "issue"
|
|
1204
|
-
assert "Shows generic error" in t3["reported"]
|
|
1205
|
-
assert t3["severity"] == "major"
|
|
1206
|
-
assert t3["fix_status"] == "applied"
|
|
1207
|
-
assert t3["fix_commit"] == "abc1234"
|
|
1208
|
-
assert t3["retry_count"] == "0"
|
|
1209
|
-
|
|
1210
|
-
def test_parse_progress(self):
|
|
1211
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1212
|
-
assert uat.progress["total"] == "5"
|
|
1213
|
-
assert uat.progress["passed"] == "2"
|
|
1214
|
-
assert uat.progress["pending"] == "2"
|
|
1215
|
-
|
|
1216
|
-
def test_parse_current_batch(self):
|
|
1217
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1218
|
-
assert uat.current_batch["batch"] == "2 of 3"
|
|
1219
|
-
assert uat.current_batch["mock_type"] == "error_state"
|
|
1220
|
-
assert uat.current_batch["status"] == "testing"
|
|
1221
|
-
|
|
1222
|
-
def test_parse_fixes(self):
|
|
1223
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1224
|
-
fix = uat.fixes[0]
|
|
1225
|
-
assert fix["commit"] == "abc1234"
|
|
1226
|
-
assert fix["test"] == "3"
|
|
1227
|
-
assert "Fixed error message" in fix["description"]
|
|
1228
|
-
|
|
1229
|
-
def test_parse_batches(self):
|
|
1230
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1231
|
-
b1 = uat.batches[0]
|
|
1232
|
-
assert b1["name"] == "No Mocks Required"
|
|
1233
|
-
assert b1["status"] == "complete"
|
|
1234
|
-
assert b1["passed"] == "2"
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
class TestUATFileRoundtrip:
|
|
1238
|
-
"""Test parse -> serialize roundtrip."""
|
|
1239
|
-
|
|
1240
|
-
def test_roundtrip_preserves_structure(self):
|
|
1241
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1242
|
-
output = uat.serialize()
|
|
1243
|
-
# Re-parse the output
|
|
1244
|
-
uat2 = UATFile.parse(output)
|
|
1245
|
-
assert len(uat2.tests) == len(uat.tests)
|
|
1246
|
-
assert len(uat2.batches) == len(uat.batches)
|
|
1247
|
-
assert len(uat2.fixes) == len(uat.fixes)
|
|
1248
|
-
assert uat2.frontmatter["phase"] == "05-auth"
|
|
1249
|
-
# Test names preserved
|
|
1250
|
-
for t1, t2 in zip(uat.tests, uat2.tests):
|
|
1251
|
-
assert t1["name"] == t2["name"]
|
|
1252
|
-
assert t1["result"] == t2["result"]
|
|
1253
|
-
|
|
1254
|
-
def test_roundtrip_minimal(self):
|
|
1255
|
-
uat = UATFile.parse(UAT_MINIMAL)
|
|
1256
|
-
output = uat.serialize()
|
|
1257
|
-
uat2 = UATFile.parse(output)
|
|
1258
|
-
assert len(uat2.tests) == 2
|
|
1259
|
-
assert len(uat2.fixes) == 0
|
|
1260
|
-
assert len(uat2.assumptions) == 0
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
class TestUATFileRecalcProgress:
|
|
1264
|
-
"""Test recalc_progress with various result combinations."""
|
|
1265
|
-
|
|
1266
|
-
def test_all_pending(self):
|
|
1267
|
-
uat = UATFile.parse(UAT_MINIMAL)
|
|
1268
|
-
uat.recalc_progress()
|
|
1269
|
-
assert uat.progress["total"] == "2"
|
|
1270
|
-
assert uat.progress["pending"] == "2"
|
|
1271
|
-
assert uat.progress["tested"] == "0"
|
|
1272
|
-
assert uat.progress["passed"] == "0"
|
|
1273
|
-
|
|
1274
|
-
def test_mixed_results(self):
|
|
1275
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1276
|
-
uat.recalc_progress()
|
|
1277
|
-
assert uat.progress["total"] == "5"
|
|
1278
|
-
assert uat.progress["passed"] == "2"
|
|
1279
|
-
# Test 3 has fix_status=applied → fixing
|
|
1280
|
-
assert uat.progress["fixing"] == "1"
|
|
1281
|
-
assert uat.progress["pending"] == "2"
|
|
1282
|
-
|
|
1283
|
-
def test_verified_fix_counts_as_passed(self):
|
|
1284
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1285
|
-
# Change test 3's fix_status to verified
|
|
1286
|
-
t3 = next(t for t in uat.tests if t["num"] == "3")
|
|
1287
|
-
t3["fix_status"] = "verified"
|
|
1288
|
-
uat.recalc_progress()
|
|
1289
|
-
assert uat.progress["passed"] == "3" # 2 pass + 1 verified
|
|
1290
|
-
assert uat.progress["fixing"] == "0"
|
|
1291
|
-
|
|
1292
|
-
def test_blocked_counts_as_pending(self):
|
|
1293
|
-
uat = UATFile.parse(UAT_MINIMAL)
|
|
1294
|
-
uat.tests[0]["result"] = "blocked"
|
|
1295
|
-
uat.recalc_progress()
|
|
1296
|
-
assert uat.progress["pending"] == "2" # 1 blocked + 1 [pending]
|
|
1297
|
-
assert uat.progress["tested"] == "0"
|
|
1298
|
-
|
|
1299
|
-
def test_skipped(self):
|
|
1300
|
-
uat = UATFile.parse(UAT_MINIMAL)
|
|
1301
|
-
uat.tests[0]["result"] = "skipped"
|
|
1302
|
-
uat.recalc_progress()
|
|
1303
|
-
assert uat.progress["skipped"] == "1"
|
|
1304
|
-
assert uat.progress["pending"] == "1"
|
|
1305
|
-
assert uat.progress["tested"] == "1"
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
class TestUATFileMutations:
|
|
1309
|
-
"""Test update_test, update_batch, update_session."""
|
|
1310
|
-
|
|
1311
|
-
def test_update_test(self):
|
|
1312
|
-
uat = UATFile.parse(UAT_MINIMAL)
|
|
1313
|
-
uat.update_test(1, {"result": "pass"})
|
|
1314
|
-
t1 = next(t for t in uat.tests if t["num"] == "1")
|
|
1315
|
-
assert t1["result"] == "pass"
|
|
1316
|
-
|
|
1317
|
-
def test_update_test_not_found(self):
|
|
1318
|
-
uat = UATFile.parse(UAT_MINIMAL)
|
|
1319
|
-
with pytest.raises(ValueError, match="Test 99 not found"):
|
|
1320
|
-
uat.update_test(99, {"result": "pass"})
|
|
1321
|
-
|
|
1322
|
-
def test_update_batch(self):
|
|
1323
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1324
|
-
uat.update_batch(1, {"passed": "3", "issues": "0"})
|
|
1325
|
-
b1 = next(b for b in uat.batches if b["num"] == "1")
|
|
1326
|
-
assert b1["passed"] == "3"
|
|
1327
|
-
|
|
1328
|
-
def test_update_session(self):
|
|
1329
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1330
|
-
uat.update_session({"status": "fixing"})
|
|
1331
|
-
assert uat.frontmatter["status"] == "fixing"
|
|
1332
|
-
|
|
1333
|
-
def test_update_session_mocked_files(self):
|
|
1334
|
-
uat = UATFile.parse(UAT_MINIMAL)
|
|
1335
|
-
uat.update_session({"mocked_files": "a.dart,b.dart"})
|
|
1336
|
-
assert uat.frontmatter["mocked_files"] == ["a.dart", "b.dart"]
|
|
1337
|
-
|
|
1338
|
-
def test_update_session_clear_mocked_files(self):
|
|
1339
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1340
|
-
uat.update_session({"mocked_files": ""})
|
|
1341
|
-
assert uat.frontmatter["mocked_files"] == []
|
|
1342
|
-
|
|
1343
|
-
def test_update_session_current_batch_syncs(self):
|
|
1344
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1345
|
-
uat.update_session({"current_batch": "3"})
|
|
1346
|
-
assert uat.frontmatter["current_batch"] == 3
|
|
1347
|
-
assert uat.current_batch["batch"] == "3 of 3"
|
|
1348
|
-
assert "Premium" in uat.current_batch["name"]
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
class TestUATFileAppendFix:
|
|
1352
|
-
"""Test append_fix new and in-place update."""
|
|
1353
|
-
|
|
1354
|
-
def test_append_new_fix(self):
|
|
1355
|
-
uat = UATFile.parse(UAT_MINIMAL)
|
|
1356
|
-
uat.append_fix({
|
|
1357
|
-
"commit": "def567",
|
|
1358
|
-
"test": 1,
|
|
1359
|
-
"description": "Fixed setup",
|
|
1360
|
-
"files": ["setup.dart"],
|
|
1361
|
-
})
|
|
1362
|
-
assert len(uat.fixes) == 1
|
|
1363
|
-
assert uat.fixes[0]["commit"] == "def567"
|
|
1364
|
-
assert uat.fixes[0]["files"] == "[setup.dart]"
|
|
1365
|
-
|
|
1366
|
-
def test_append_fix_same_test_updates_in_place(self):
|
|
1367
|
-
uat = UATFile.parse(UAT_FIXTURE)
|
|
1368
|
-
assert len(uat.fixes) == 1
|
|
1369
|
-
uat.append_fix({
|
|
1370
|
-
"commit": "new999",
|
|
1371
|
-
"test": 3,
|
|
1372
|
-
"description": "Better fix for auth error",
|
|
1373
|
-
"files": ["auth_service.dart"],
|
|
1374
|
-
})
|
|
1375
|
-
# Still 1 fix, updated in place
|
|
1376
|
-
assert len(uat.fixes) == 1
|
|
1377
|
-
assert uat.fixes[0]["commit"] == "new999"
|
|
1378
|
-
assert "Better fix" in uat.fixes[0]["description"]
|
|
1379
|
-
|
|
1380
|
-
def test_append_assumption(self):
|
|
1381
|
-
uat = UATFile.parse(UAT_MINIMAL)
|
|
1382
|
-
uat.append_assumption({
|
|
1383
|
-
"test": 2,
|
|
1384
|
-
"name": "Config loads",
|
|
1385
|
-
"expected": "Config values appear",
|
|
1386
|
-
"reason": "No config file available",
|
|
1387
|
-
})
|
|
1388
|
-
assert len(uat.assumptions) == 1
|
|
1389
|
-
assert uat.assumptions[0]["test"] == "2"
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
class TestCmdUatInit:
|
|
1393
|
-
"""Tests for uat-init command."""
|
|
1394
|
-
|
|
1395
|
-
def _patch_git_root(self, tmp_path):
|
|
1396
|
-
return mock.patch.object(_mod, "find_git_root", return_value=tmp_path)
|
|
1397
|
-
|
|
1398
|
-
def test_creates_file_from_valid_json(self, tmp_path, capsys):
|
|
1399
|
-
# Create phase dir
|
|
1400
|
-
phase_dir = tmp_path / ".planning" / "phases" / "05-auth"
|
|
1401
|
-
phase_dir.mkdir(parents=True)
|
|
1402
|
-
|
|
1403
|
-
input_json = json.dumps({
|
|
1404
|
-
"source": ["05-01-SUMMARY.md"],
|
|
1405
|
-
"tests": [
|
|
1406
|
-
{"name": "Login works", "expected": "User sees dashboard", "mock_required": False, "mock_type": None},
|
|
1407
|
-
{"name": "Logout works", "expected": "User sees login page", "mock_required": False, "mock_type": None},
|
|
1408
|
-
],
|
|
1409
|
-
"batches": [
|
|
1410
|
-
{"name": "No Mocks", "mock_type": None, "tests": [1, 2]},
|
|
1411
|
-
],
|
|
1412
|
-
})
|
|
1413
|
-
|
|
1414
|
-
args = argparse.Namespace(phase="5")
|
|
1415
|
-
with self._patch_git_root(tmp_path), \
|
|
1416
|
-
mock.patch.object(_mod.sys, "stdin", io.StringIO(input_json)):
|
|
1417
|
-
cmd_uat_init(args)
|
|
1418
|
-
|
|
1419
|
-
captured = capsys.readouterr()
|
|
1420
|
-
assert "2 tests" in captured.out
|
|
1421
|
-
assert "1 batches" in captured.out
|
|
1422
|
-
|
|
1423
|
-
uat_path = phase_dir / "05-auth-UAT.md"
|
|
1424
|
-
assert uat_path.is_file()
|
|
1425
|
-
content = uat_path.read_text()
|
|
1426
|
-
assert "Login works" in content
|
|
1427
|
-
assert "Logout works" in content
|
|
1428
|
-
|
|
1429
|
-
def test_auto_creates_phase_dir(self, tmp_path, capsys):
|
|
1430
|
-
(tmp_path / ".planning" / "phases").mkdir(parents=True)
|
|
1431
|
-
|
|
1432
|
-
input_json = json.dumps({
|
|
1433
|
-
"source": [],
|
|
1434
|
-
"tests": [{"name": "Test", "expected": "Works"}],
|
|
1435
|
-
"batches": [{"name": "B1", "tests": [1]}],
|
|
1436
|
-
})
|
|
1437
|
-
|
|
1438
|
-
args = argparse.Namespace(phase="99")
|
|
1439
|
-
with self._patch_git_root(tmp_path), \
|
|
1440
|
-
mock.patch.object(_mod.sys, "stdin", io.StringIO(input_json)):
|
|
1441
|
-
cmd_uat_init(args)
|
|
1442
|
-
|
|
1443
|
-
captured = capsys.readouterr()
|
|
1444
|
-
assert "1 tests" in captured.out
|
|
1445
|
-
assert (tmp_path / ".planning" / "phases" / "99").is_dir()
|
|
1446
|
-
|
|
1447
|
-
def test_invalid_json_exits(self, tmp_path):
|
|
1448
|
-
(tmp_path / ".planning" / "phases" / "05-auth").mkdir(parents=True)
|
|
1449
|
-
|
|
1450
|
-
args = argparse.Namespace(phase="5")
|
|
1451
|
-
with self._patch_git_root(tmp_path), \
|
|
1452
|
-
mock.patch.object(_mod.sys, "stdin", io.StringIO("not json")), \
|
|
1453
|
-
pytest.raises(SystemExit) as exc:
|
|
1454
|
-
cmd_uat_init(args)
|
|
1455
|
-
assert exc.value.code == 1
|
|
1456
|
-
|
|
1457
|
-
def test_stdout_contains_path_and_counts(self, tmp_path, capsys):
|
|
1458
|
-
phase_dir = tmp_path / ".planning" / "phases" / "03-setup"
|
|
1459
|
-
phase_dir.mkdir(parents=True)
|
|
1460
|
-
|
|
1461
|
-
input_json = json.dumps({
|
|
1462
|
-
"source": ["03-01-SUMMARY.md"],
|
|
1463
|
-
"tests": [
|
|
1464
|
-
{"name": "T1", "expected": "E1"},
|
|
1465
|
-
{"name": "T2", "expected": "E2"},
|
|
1466
|
-
{"name": "T3", "expected": "E3"},
|
|
1467
|
-
],
|
|
1468
|
-
"batches": [
|
|
1469
|
-
{"name": "B1", "tests": [1, 2]},
|
|
1470
|
-
{"name": "B2", "tests": [3]},
|
|
1471
|
-
],
|
|
1472
|
-
})
|
|
1473
|
-
|
|
1474
|
-
args = argparse.Namespace(phase="3")
|
|
1475
|
-
with self._patch_git_root(tmp_path), \
|
|
1476
|
-
mock.patch.object(_mod.sys, "stdin", io.StringIO(input_json)):
|
|
1477
|
-
cmd_uat_init(args)
|
|
1478
|
-
|
|
1479
|
-
captured = capsys.readouterr()
|
|
1480
|
-
assert "3 tests" in captured.out
|
|
1481
|
-
assert "2 batches" in captured.out
|
|
1482
|
-
assert "03-setup-UAT.md" in captured.out
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
class TestCmdUatUpdate:
|
|
1486
|
-
"""Tests for uat-update command."""
|
|
1487
|
-
|
|
1488
|
-
def _patch_git_root(self, tmp_path):
|
|
1489
|
-
return mock.patch.object(_mod, "find_git_root", return_value=tmp_path)
|
|
1490
|
-
|
|
1491
|
-
def _setup_uat(self, tmp_path, content=UAT_FIXTURE):
|
|
1492
|
-
phase_dir = tmp_path / ".planning" / "phases" / "05-auth"
|
|
1493
|
-
phase_dir.mkdir(parents=True)
|
|
1494
|
-
uat_path = phase_dir / "05-auth-UAT.md"
|
|
1495
|
-
uat_path.write_text(content)
|
|
1496
|
-
return uat_path
|
|
1497
|
-
|
|
1498
|
-
def test_update_test_result_pass(self, tmp_path, capsys):
|
|
1499
|
-
uat_path = self._setup_uat(tmp_path)
|
|
1500
|
-
|
|
1501
|
-
args = argparse.Namespace(
|
|
1502
|
-
phase="5", test=4, batch=None, session=False,
|
|
1503
|
-
append_fix=False, append_assumption=False,
|
|
1504
|
-
fields=["result=pass"],
|
|
1505
|
-
)
|
|
1506
|
-
with self._patch_git_root(tmp_path):
|
|
1507
|
-
cmd_uat_update(args)
|
|
1508
|
-
|
|
1509
|
-
captured = capsys.readouterr()
|
|
1510
|
-
assert "Updated test 4" in captured.out
|
|
1511
|
-
assert "Progress:" in captured.out
|
|
1512
|
-
|
|
1513
|
-
uat = UATFile.parse(uat_path.read_text())
|
|
1514
|
-
t4 = next(t for t in uat.tests if t["num"] == "4")
|
|
1515
|
-
assert t4["result"] == "pass"
|
|
1516
|
-
|
|
1517
|
-
def test_update_test_issue_with_fields(self, tmp_path, capsys):
|
|
1518
|
-
uat_path = self._setup_uat(tmp_path)
|
|
1519
|
-
|
|
1520
|
-
args = argparse.Namespace(
|
|
1521
|
-
phase="5", test=4, batch=None, session=False,
|
|
1522
|
-
append_fix=False, append_assumption=False,
|
|
1523
|
-
fields=["result=issue", "severity=major", "fix_status=investigating", "retry_count=0"],
|
|
1524
|
-
)
|
|
1525
|
-
with self._patch_git_root(tmp_path):
|
|
1526
|
-
cmd_uat_update(args)
|
|
1527
|
-
|
|
1528
|
-
uat = UATFile.parse(uat_path.read_text())
|
|
1529
|
-
t4 = next(t for t in uat.tests if t["num"] == "4")
|
|
1530
|
-
assert t4["result"] == "issue"
|
|
1531
|
-
assert t4["severity"] == "major"
|
|
1532
|
-
assert t4["fix_status"] == "investigating"
|
|
1533
|
-
|
|
1534
|
-
def test_update_batch_complete(self, tmp_path, capsys):
|
|
1535
|
-
uat_path = self._setup_uat(tmp_path)
|
|
1536
|
-
|
|
1537
|
-
args = argparse.Namespace(
|
|
1538
|
-
phase="5", test=None, batch=2, session=False,
|
|
1539
|
-
append_fix=False, append_assumption=False,
|
|
1540
|
-
fields=["status=complete", "passed=1", "issues=1"],
|
|
1541
|
-
)
|
|
1542
|
-
with self._patch_git_root(tmp_path):
|
|
1543
|
-
cmd_uat_update(args)
|
|
1544
|
-
|
|
1545
|
-
uat = UATFile.parse(uat_path.read_text())
|
|
1546
|
-
b2 = next(b for b in uat.batches if b["num"] == "2")
|
|
1547
|
-
assert b2["status"] == "complete"
|
|
1548
|
-
assert b2["passed"] == "1"
|
|
1549
|
-
|
|
1550
|
-
def test_update_session_frontmatter(self, tmp_path, capsys):
|
|
1551
|
-
uat_path = self._setup_uat(tmp_path)
|
|
1552
|
-
|
|
1553
|
-
args = argparse.Namespace(
|
|
1554
|
-
phase="5", test=None, batch=None, session=True,
|
|
1555
|
-
append_fix=False, append_assumption=False,
|
|
1556
|
-
fields=["status=fixing"],
|
|
1557
|
-
)
|
|
1558
|
-
with self._patch_git_root(tmp_path):
|
|
1559
|
-
cmd_uat_update(args)
|
|
1560
|
-
|
|
1561
|
-
uat = UATFile.parse(uat_path.read_text())
|
|
1562
|
-
assert uat.frontmatter["status"] == "fixing"
|
|
1563
|
-
|
|
1564
|
-
def test_append_fix_from_stdin(self, tmp_path, capsys):
|
|
1565
|
-
uat_path = self._setup_uat(tmp_path)
|
|
1566
|
-
|
|
1567
|
-
fix_json = json.dumps({
|
|
1568
|
-
"commit": "xyz789",
|
|
1569
|
-
"test": 4,
|
|
1570
|
-
"description": "Fixed token expiry redirect",
|
|
1571
|
-
"files": ["auth_middleware.dart"],
|
|
1572
|
-
})
|
|
1573
|
-
|
|
1574
|
-
args = argparse.Namespace(
|
|
1575
|
-
phase="5", test=None, batch=None, session=False,
|
|
1576
|
-
append_fix=True, append_assumption=False,
|
|
1577
|
-
fields=[],
|
|
1578
|
-
)
|
|
1579
|
-
with self._patch_git_root(tmp_path), \
|
|
1580
|
-
mock.patch.object(_mod.sys, "stdin", io.StringIO(fix_json)):
|
|
1581
|
-
cmd_uat_update(args)
|
|
1582
|
-
|
|
1583
|
-
uat = UATFile.parse(uat_path.read_text())
|
|
1584
|
-
assert len(uat.fixes) == 2
|
|
1585
|
-
assert uat.fixes[1]["commit"] == "xyz789"
|
|
1586
|
-
|
|
1587
|
-
def test_append_assumption_from_stdin(self, tmp_path, capsys):
|
|
1588
|
-
uat_path = self._setup_uat(tmp_path)
|
|
1589
|
-
|
|
1590
|
-
assumption_json = json.dumps({
|
|
1591
|
-
"test": 5,
|
|
1592
|
-
"name": "Premium feature access",
|
|
1593
|
-
"expected": "Shows upgrade prompt",
|
|
1594
|
-
"reason": "No premium account available",
|
|
1595
|
-
})
|
|
1596
|
-
|
|
1597
|
-
args = argparse.Namespace(
|
|
1598
|
-
phase="5", test=None, batch=None, session=False,
|
|
1599
|
-
append_fix=False, append_assumption=True,
|
|
1600
|
-
fields=[],
|
|
1601
|
-
)
|
|
1602
|
-
with self._patch_git_root(tmp_path), \
|
|
1603
|
-
mock.patch.object(_mod.sys, "stdin", io.StringIO(assumption_json)):
|
|
1604
|
-
cmd_uat_update(args)
|
|
1605
|
-
|
|
1606
|
-
uat = UATFile.parse(uat_path.read_text())
|
|
1607
|
-
assert len(uat.assumptions) == 1
|
|
1608
|
-
assert uat.assumptions[0]["test"] == "5"
|
|
1609
|
-
|
|
1610
|
-
def test_progress_auto_recalculated(self, tmp_path, capsys):
|
|
1611
|
-
uat_path = self._setup_uat(tmp_path, UAT_MINIMAL)
|
|
1612
|
-
|
|
1613
|
-
args = argparse.Namespace(
|
|
1614
|
-
phase="3", test=1, batch=None, session=False,
|
|
1615
|
-
append_fix=False, append_assumption=False,
|
|
1616
|
-
fields=["result=pass"],
|
|
1617
|
-
)
|
|
1618
|
-
|
|
1619
|
-
# Adjust path for phase 03-setup
|
|
1620
|
-
phase_dir = tmp_path / ".planning" / "phases" / "03-setup"
|
|
1621
|
-
phase_dir.mkdir(parents=True, exist_ok=True)
|
|
1622
|
-
(phase_dir / "03-setup-UAT.md").write_text(UAT_MINIMAL)
|
|
1623
|
-
|
|
1624
|
-
with self._patch_git_root(tmp_path):
|
|
1625
|
-
cmd_uat_update(args)
|
|
1626
|
-
|
|
1627
|
-
uat = UATFile.parse((phase_dir / "03-setup-UAT.md").read_text())
|
|
1628
|
-
assert uat.progress["passed"] == "1"
|
|
1629
|
-
assert uat.progress["tested"] == "1"
|
|
1630
|
-
assert uat.progress["pending"] == "1"
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
class TestCmdUatStatus:
|
|
1634
|
-
"""Tests for uat-status command."""
|
|
1635
|
-
|
|
1636
|
-
def _patch_git_root(self, tmp_path):
|
|
1637
|
-
return mock.patch.object(_mod, "find_git_root", return_value=tmp_path)
|
|
1638
|
-
|
|
1639
|
-
def _setup_uat(self, tmp_path, content=UAT_FIXTURE):
|
|
1640
|
-
phase_dir = tmp_path / ".planning" / "phases" / "05-auth"
|
|
1641
|
-
phase_dir.mkdir(parents=True)
|
|
1642
|
-
uat_path = phase_dir / "05-auth-UAT.md"
|
|
1643
|
-
uat_path.write_text(content)
|
|
1644
|
-
return uat_path
|
|
1645
|
-
|
|
1646
|
-
def test_outputs_valid_json(self, tmp_path, capsys):
|
|
1647
|
-
self._setup_uat(tmp_path)
|
|
1648
|
-
|
|
1649
|
-
args = argparse.Namespace(phase="5")
|
|
1650
|
-
with self._patch_git_root(tmp_path):
|
|
1651
|
-
cmd_uat_status(args)
|
|
1652
|
-
|
|
1653
|
-
captured = capsys.readouterr()
|
|
1654
|
-
output = json.loads(captured.out)
|
|
1655
|
-
assert output["status"] == "testing"
|
|
1656
|
-
assert output["current_batch"] == 2
|
|
1657
|
-
assert output["total_batches"] == 3
|
|
1658
|
-
assert output["progress"]["total"] == 5
|
|
1659
|
-
assert output["progress"]["passed"] == 2
|
|
1660
|
-
assert isinstance(output["mocked_files"], list)
|
|
1661
|
-
|
|
1662
|
-
def test_fixing_tests_listed(self, tmp_path, capsys):
|
|
1663
|
-
self._setup_uat(tmp_path)
|
|
1664
|
-
|
|
1665
|
-
args = argparse.Namespace(phase="5")
|
|
1666
|
-
with self._patch_git_root(tmp_path):
|
|
1667
|
-
cmd_uat_status(args)
|
|
1668
|
-
|
|
1669
|
-
captured = capsys.readouterr()
|
|
1670
|
-
output = json.loads(captured.out)
|
|
1671
|
-
# Test 3 has fix_status=applied
|
|
1672
|
-
assert len(output["fixing_tests"]) == 1
|
|
1673
|
-
assert output["fixing_tests"][0]["num"] == 3
|
|
1674
|
-
assert output["fixing_tests"][0]["fix_status"] == "applied"
|
|
1675
|
-
assert output["fixing_tests"][0]["fix_commit"] == "abc1234"
|
|
1676
|
-
|
|
1677
|
-
def test_fixing_tests_without_commit(self, tmp_path, capsys):
|
|
1678
|
-
"""Tests with fix_status=investigating have empty fix_commit."""
|
|
1679
|
-
uat_path = self._setup_uat(tmp_path)
|
|
1680
|
-
# Change test 3 to investigating (no fix_commit yet)
|
|
1681
|
-
content = uat_path.read_text()
|
|
1682
|
-
content = content.replace("fix_status: applied", "fix_status: investigating")
|
|
1683
|
-
content = content.replace("fix_commit: abc1234\n", "")
|
|
1684
|
-
uat_path.write_text(content)
|
|
1685
|
-
|
|
1686
|
-
args = argparse.Namespace(phase="5")
|
|
1687
|
-
with self._patch_git_root(tmp_path):
|
|
1688
|
-
cmd_uat_status(args)
|
|
1689
|
-
|
|
1690
|
-
captured = capsys.readouterr()
|
|
1691
|
-
output = json.loads(captured.out)
|
|
1692
|
-
assert output["fixing_tests"][0]["fix_commit"] == ""
|
|
1693
|
-
|
|
1694
|
-
def test_missing_file_exits(self, tmp_path):
|
|
1695
|
-
(tmp_path / ".planning" / "phases" / "05-auth").mkdir(parents=True)
|
|
1696
|
-
|
|
1697
|
-
args = argparse.Namespace(phase="5")
|
|
1698
|
-
with self._patch_git_root(tmp_path), \
|
|
1699
|
-
pytest.raises(SystemExit) as exc:
|
|
1700
|
-
cmd_uat_status(args)
|
|
1701
|
-
assert exc.value.code == 1
|