devforgeai 1.0.4 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CLAUDE.md +120 -0
- package/package.json +9 -1
- package/src/CLAUDE.md +699 -0
- package/src/claude/scripts/README.md +396 -0
- package/src/claude/scripts/audit-command-skill-overlap.sh +67 -0
- package/src/claude/scripts/check-hooks-fast.sh +70 -0
- package/src/claude/scripts/devforgeai-validate +6 -0
- package/src/claude/scripts/devforgeai_cli/README.md +531 -0
- package/src/claude/scripts/devforgeai_cli/__init__.py +12 -0
- package/src/claude/scripts/devforgeai_cli/cli.py +716 -0
- package/src/claude/scripts/devforgeai_cli/commands/__init__.py +1 -0
- package/src/claude/scripts/devforgeai_cli/commands/check_hooks.py +384 -0
- package/src/claude/scripts/devforgeai_cli/commands/invoke_hooks.py +149 -0
- package/src/claude/scripts/devforgeai_cli/commands/phase_commands.py +731 -0
- package/src/claude/scripts/devforgeai_cli/commands/validate_installation.py +412 -0
- package/src/claude/scripts/devforgeai_cli/context_extraction.py +426 -0
- package/src/claude/scripts/devforgeai_cli/feedback/AC_TO_TEST_MAPPING.md +636 -0
- package/src/claude/scripts/devforgeai_cli/feedback/DELIVERY_SUMMARY.txt +329 -0
- package/src/claude/scripts/devforgeai_cli/feedback/README_TEST_SPECS.md +486 -0
- package/src/claude/scripts/devforgeai_cli/feedback/TEST_IMPLEMENTATION_GUIDE.md +529 -0
- package/src/claude/scripts/devforgeai_cli/feedback/TEST_SPECIFICATIONS.md +2652 -0
- package/src/claude/scripts/devforgeai_cli/feedback/TEST_SPECS_INDEX.md +398 -0
- package/src/claude/scripts/devforgeai_cli/feedback/__init__.py +34 -0
- package/src/claude/scripts/devforgeai_cli/feedback/adaptive_questioning_engine.py +581 -0
- package/src/claude/scripts/devforgeai_cli/feedback/aggregation.py +179 -0
- package/src/claude/scripts/devforgeai_cli/feedback/commands.py +535 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_defaults.py +58 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_manager.py +423 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_models.py +192 -0
- package/src/claude/scripts/devforgeai_cli/feedback/config_schema.py +140 -0
- package/src/claude/scripts/devforgeai_cli/feedback/coverage.json +1 -0
- package/src/claude/scripts/devforgeai_cli/feedback/feature_flag.py +152 -0
- package/src/claude/scripts/devforgeai_cli/feedback/feedback_indexer.py +394 -0
- package/src/claude/scripts/devforgeai_cli/feedback/hot_reload.py +226 -0
- package/src/claude/scripts/devforgeai_cli/feedback/longitudinal.py +115 -0
- package/src/claude/scripts/devforgeai_cli/feedback/models.py +67 -0
- package/src/claude/scripts/devforgeai_cli/feedback/question_router.py +236 -0
- package/src/claude/scripts/devforgeai_cli/feedback/retrospective.py +233 -0
- package/src/claude/scripts/devforgeai_cli/feedback/skip_tracker.py +177 -0
- package/src/claude/scripts/devforgeai_cli/feedback/skip_tracking.py +221 -0
- package/src/claude/scripts/devforgeai_cli/feedback/template_engine.py +549 -0
- package/src/claude/scripts/devforgeai_cli/feedback/validation.py +163 -0
- package/src/claude/scripts/devforgeai_cli/headless/__init__.py +30 -0
- package/src/claude/scripts/devforgeai_cli/headless/answer_models.py +206 -0
- package/src/claude/scripts/devforgeai_cli/headless/answer_resolver.py +204 -0
- package/src/claude/scripts/devforgeai_cli/headless/exceptions.py +36 -0
- package/src/claude/scripts/devforgeai_cli/headless/pattern_matcher.py +156 -0
- package/src/claude/scripts/devforgeai_cli/hooks.py +313 -0
- package/src/claude/scripts/devforgeai_cli/metrics/__init__.py +46 -0
- package/src/claude/scripts/devforgeai_cli/metrics/command_metrics.py +142 -0
- package/src/claude/scripts/devforgeai_cli/metrics/failure_modes.py +152 -0
- package/src/claude/scripts/devforgeai_cli/metrics/story_segmentation.py +181 -0
- package/src/claude/scripts/devforgeai_cli/orchestrate_hooks.py +780 -0
- package/src/claude/scripts/devforgeai_cli/phase_state.py +1229 -0
- package/src/claude/scripts/devforgeai_cli/session/__init__.py +30 -0
- package/src/claude/scripts/devforgeai_cli/session/checkpoint.py +268 -0
- package/src/claude/scripts/devforgeai_cli/tests/__init__.py +1 -0
- package/src/claude/scripts/devforgeai_cli/tests/conftest.py +29 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/TEST_EXECUTION_GUIDE.md +298 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/__init__.py +3 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_adaptive_questioning_engine.py +2171 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_aggregation.py +476 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_defaults.py +133 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_manager.py +592 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_models.py +373 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_config_schema.py +130 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_configuration_management.py +1355 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_edge_cases.py +308 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_feature_flag.py +307 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_feedback_indexer.py +384 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_hot_reload.py +580 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_integration.py +402 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_models.py +105 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_question_routing.py +262 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_retrospective.py +333 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracker.py +410 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracking.py +159 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_skip_tracking_integration.py +1155 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_template_engine.py +1389 -0
- package/src/claude/scripts/devforgeai_cli/tests/feedback/test_validation_comprehensive.py +210 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/autonomous-deferral-story.md +46 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/missing-impl-notes.md +31 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/valid-deferral-story.md +46 -0
- package/src/claude/scripts/devforgeai_cli/tests/fixtures/valid-story-complete.md +48 -0
- package/src/claude/scripts/devforgeai_cli/tests/manual_test_invoke_hooks.sh +200 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/DELIVERABLES.md +518 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/TEST_SUMMARY.md +468 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/__init__.py +6 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/corrupted-checkpoint.json +1 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/missing-fields-checkpoint.json +4 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/fixtures/valid-checkpoint.json +15 -0
- package/src/claude/scripts/devforgeai_cli/tests/session/test_checkpoint.py +851 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_check_hooks.py +1886 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_depends_on_normalizer.py +171 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_dod_validator.py +97 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_invoke_hooks.py +1902 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands.py +320 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands_error_handling.py +1021 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_commands_import.py +697 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_phase_state.py +2187 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_skip_tracking.py +2141 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_skip_tracking_coverage_gap.py +195 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_subagent_enforcement.py +539 -0
- package/src/claude/scripts/devforgeai_cli/tests/test_validate_installation.py +361 -0
- package/src/claude/scripts/devforgeai_cli/utils/__init__.py +11 -0
- package/src/claude/scripts/devforgeai_cli/utils/depends_on_normalizer.py +149 -0
- package/src/claude/scripts/devforgeai_cli/utils/markdown_parser.py +219 -0
- package/src/claude/scripts/devforgeai_cli/utils/story_analyzer.py +249 -0
- package/src/claude/scripts/devforgeai_cli/utils/yaml_parser.py +152 -0
- package/src/claude/scripts/devforgeai_cli/validators/__init__.py +27 -0
- package/src/claude/scripts/devforgeai_cli/validators/ast_grep_validator.py +373 -0
- package/src/claude/scripts/devforgeai_cli/validators/context_validator.py +180 -0
- package/src/claude/scripts/devforgeai_cli/validators/dod_validator.py +309 -0
- package/src/claude/scripts/devforgeai_cli/validators/git_validator.py +107 -0
- package/src/claude/scripts/devforgeai_cli/validators/grep_fallback.py +300 -0
- package/src/claude/scripts/install_hooks.sh +186 -0
- package/src/claude/scripts/invoke_feedback_hooks.sh +59 -0
- package/src/claude/scripts/migrate-ac-headers.sh +122 -0
- package/src/claude/scripts/plan_file_kb.sh +704 -0
- package/src/claude/scripts/requirements.txt +8 -0
- package/src/claude/scripts/session_catalog.sh +543 -0
- package/src/claude/scripts/setup.py +55 -0
- package/src/claude/scripts/start-devforgeai.sh +16 -0
- package/src/claude/scripts/statusline.sh +27 -0
- package/src/claude/scripts/validate_deferrals.py +344 -0
- package/src/claude/skills/devforgeai-qa/SKILL.md +1 -1
- package/src/claude/skills/researching-market/SKILL.md +2 -1
- package/src/cli/lib/copier.js +13 -1
- package/src/claude/skills/designing-systems/scripts/__pycache__/detect_anti_patterns.cpython-312.pyc +0 -0
- package/src/claude/skills/designing-systems/scripts/__pycache__/validate_all_context.cpython-312.pyc +0 -0
- package/src/claude/skills/designing-systems/scripts/__pycache__/validate_architecture.cpython-312.pyc +0 -0
- package/src/claude/skills/designing-systems/scripts/__pycache__/validate_dependencies.cpython-312.pyc +0 -0
- package/src/claude/skills/devforgeai-story-creation/scripts/__pycache__/migrate_story_v1_to_v2.cpython-312.pyc +0 -0
- package/src/claude/skills/devforgeai-story-creation/scripts/tests/__pycache__/measure_accuracy.cpython-312.pyc +0 -0
|
@@ -0,0 +1,1229 @@
|
|
|
1
|
+
"""
|
|
2
|
+
PhaseState Module - Workflow Phase Tracking for DevForgeAI CLI
|
|
3
|
+
|
|
4
|
+
This module provides the PhaseState class for managing workflow phase tracking
|
|
5
|
+
state files during /dev workflow execution. It enforces sequential phase
|
|
6
|
+
execution and provides atomic, concurrent-safe file operations.
|
|
7
|
+
|
|
8
|
+
STORY-253: Create PhaseState Module in Correct Location
|
|
9
|
+
Source RCA: RCA-001-phase-state-module-missing.md
|
|
10
|
+
|
|
11
|
+
Usage:
|
|
12
|
+
from devforgeai_cli.phase_state import PhaseState
|
|
13
|
+
|
|
14
|
+
ps = PhaseState(project_root=Path("/path/to/project"))
|
|
15
|
+
state = ps.create(story_id="STORY-001")
|
|
16
|
+
state = ps.complete_phase(story_id="STORY-001", phase="01", checkpoint_passed=True)
|
|
17
|
+
|
|
18
|
+
Platform Support:
|
|
19
|
+
- Windows 10+: Uses msvcrt for file locking
|
|
20
|
+
- macOS 11+: Uses fcntl for file locking
|
|
21
|
+
- Linux (Ubuntu/Debian/RHEL): Uses fcntl for file locking
|
|
22
|
+
- WSL 1/2: Uses fcntl for file locking
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from __future__ import annotations
|
|
26
|
+
|
|
27
|
+
import json
|
|
28
|
+
import logging
|
|
29
|
+
import os
|
|
30
|
+
import re
|
|
31
|
+
import shutil
|
|
32
|
+
import tempfile
|
|
33
|
+
import time
|
|
34
|
+
import uuid
|
|
35
|
+
from datetime import datetime, timezone
|
|
36
|
+
from pathlib import Path
|
|
37
|
+
from typing import Any, Dict, List, Optional, Union
|
|
38
|
+
|
|
39
|
+
# Platform-aware file locking imports
|
|
40
|
+
if os.name == 'posix':
|
|
41
|
+
import fcntl
|
|
42
|
+
_HAS_FCNTL = True
|
|
43
|
+
else:
|
|
44
|
+
_HAS_FCNTL = False
|
|
45
|
+
|
|
46
|
+
if os.name == 'nt':
|
|
47
|
+
try:
|
|
48
|
+
import msvcrt
|
|
49
|
+
_HAS_MSVCRT = True
|
|
50
|
+
except ImportError:
|
|
51
|
+
_HAS_MSVCRT = False
|
|
52
|
+
else:
|
|
53
|
+
_HAS_MSVCRT = False
|
|
54
|
+
|
|
55
|
+
logger = logging.getLogger(__name__)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
# =============================================================================
|
|
59
|
+
# Custom Exceptions
|
|
60
|
+
# =============================================================================
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class PhaseStateError(Exception):
|
|
64
|
+
"""Base exception for PhaseState errors."""
|
|
65
|
+
pass
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class PhaseNotFoundError(PhaseStateError):
|
|
69
|
+
"""Raised when an invalid phase ID is provided."""
|
|
70
|
+
|
|
71
|
+
def __init__(self, phase_id: str, message: Optional[str] = None):
|
|
72
|
+
self.phase_id = phase_id
|
|
73
|
+
if message is None:
|
|
74
|
+
message = (
|
|
75
|
+
f"Invalid phase_id: '{phase_id}'. "
|
|
76
|
+
"Valid phases are '01' through '10'."
|
|
77
|
+
)
|
|
78
|
+
super().__init__(message)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class StateFileCorruptionError(PhaseStateError):
|
|
82
|
+
"""Raised when a state file contains malformed JSON."""
|
|
83
|
+
|
|
84
|
+
def __init__(self, story_id: str, original_error: Optional[Exception] = None):
|
|
85
|
+
self.story_id = story_id
|
|
86
|
+
self.original_error = original_error
|
|
87
|
+
message = (
|
|
88
|
+
f"State file for '{story_id}' is corrupted. "
|
|
89
|
+
"Recovery: Delete the file and re-run the workflow."
|
|
90
|
+
)
|
|
91
|
+
super().__init__(message)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class PhaseTransitionError(PhaseStateError):
|
|
95
|
+
"""Raised when attempting to complete phases out of order."""
|
|
96
|
+
|
|
97
|
+
def __init__(
|
|
98
|
+
self,
|
|
99
|
+
story_id: str,
|
|
100
|
+
current_phase: str,
|
|
101
|
+
attempted_phase: str
|
|
102
|
+
):
|
|
103
|
+
self.story_id = story_id
|
|
104
|
+
self.current_phase = current_phase
|
|
105
|
+
self.attempted_phase = attempted_phase
|
|
106
|
+
message = (
|
|
107
|
+
f"Cannot complete phase '{attempted_phase}' for '{story_id}'. "
|
|
108
|
+
f"Current phase is '{current_phase}'. "
|
|
109
|
+
"Phases must be completed sequentially."
|
|
110
|
+
)
|
|
111
|
+
super().__init__(message)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class LockTimeoutError(PhaseStateError):
|
|
115
|
+
"""Raised when file lock acquisition times out."""
|
|
116
|
+
|
|
117
|
+
def __init__(self, file_path: Union[str, Path], timeout: float):
|
|
118
|
+
self.file_path = str(file_path)
|
|
119
|
+
self.timeout = timeout
|
|
120
|
+
message = (
|
|
121
|
+
f"Failed to acquire lock on '{file_path}' "
|
|
122
|
+
f"after {timeout} seconds."
|
|
123
|
+
)
|
|
124
|
+
super().__init__(message)
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class SubagentEnforcementError(PhaseStateError):
|
|
128
|
+
"""Raised when required subagents not invoked before phase completion.
|
|
129
|
+
|
|
130
|
+
STORY-306: Subagent Enforcement in Phase State Completion
|
|
131
|
+
"""
|
|
132
|
+
|
|
133
|
+
def __init__(self, story_id: str, phase: str, missing_subagents: List[str]):
|
|
134
|
+
self.story_id = story_id
|
|
135
|
+
self.phase = phase
|
|
136
|
+
self.missing_subagents = missing_subagents
|
|
137
|
+
message = (
|
|
138
|
+
f"Cannot complete phase '{phase}' for '{story_id}'. "
|
|
139
|
+
f"Missing required subagents: {', '.join(missing_subagents)}"
|
|
140
|
+
)
|
|
141
|
+
super().__init__(message)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
# =============================================================================
|
|
145
|
+
# Required Subagents Per Phase (STORY-306)
|
|
146
|
+
# =============================================================================
|
|
147
|
+
|
|
148
|
+
# Tuple indicates OR logic: any one subagent in tuple satisfies requirement
|
|
149
|
+
# Matches SKILL.md Required Subagents Per Phase table (lines 167-181)
|
|
150
|
+
# =============================================================================
|
|
151
|
+
# QA Workflow Phase Schema (STORY-517)
|
|
152
|
+
# =============================================================================
|
|
153
|
+
|
|
154
|
+
# =============================================================================
|
|
155
|
+
# Dev Workflow Phase Schema (STORY-521)
|
|
156
|
+
# =============================================================================
|
|
157
|
+
|
|
158
|
+
DEV_PHASES: Dict[str, Dict[str, Any]] = {
|
|
159
|
+
"01": {"steps_required": ["git_validation", "context_validation", "tech_stack_detection"], "subagents_required": ["git-validator", "tech-stack-detector"], "checkpoint_description": "Pre-flight validation complete"},
|
|
160
|
+
"02": {"steps_required": ["test_generation", "test_failure_verification"], "subagents_required": ["test-automator"], "checkpoint_description": "Failing tests written (Red)"},
|
|
161
|
+
"03": {"steps_required": ["implementation", "test_pass_verification"], "subagents_required": [("backend-architect", "frontend-developer"), "context-validator"], "checkpoint_description": "Tests passing (Green)"},
|
|
162
|
+
"04": {"steps_required": ["refactoring", "code_review"], "subagents_required": ["refactoring-specialist", "code-reviewer"], "checkpoint_description": "Code refactored and reviewed"},
|
|
163
|
+
"4.5": {"steps_required": ["ac_verification"], "subagents_required": ["ac-compliance-verifier"], "checkpoint_description": "AC verification post-refactor"},
|
|
164
|
+
"05": {"steps_required": ["integration_testing"], "subagents_required": ["integration-tester"], "checkpoint_description": "Integration tests passing"},
|
|
165
|
+
"5.5": {"steps_required": ["ac_verification"], "subagents_required": ["ac-compliance-verifier"], "checkpoint_description": "AC verification post-integration"},
|
|
166
|
+
"06": {"steps_required": ["deferral_review"], "subagents_required": [], "checkpoint_description": "Deferral challenge complete"},
|
|
167
|
+
"07": {"steps_required": ["dod_update"], "subagents_required": [], "checkpoint_description": "DoD updated"},
|
|
168
|
+
"08": {"steps_required": ["git_commit"], "subagents_required": [], "checkpoint_description": "Changes committed"},
|
|
169
|
+
"09": {"steps_required": ["feedback_capture"], "subagents_required": ["framework-analyst"], "checkpoint_description": "Feedback captured"},
|
|
170
|
+
"10": {"steps_required": ["result_interpretation"], "subagents_required": ["dev-result-interpreter"], "checkpoint_description": "Result interpreted"},
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
QA_PHASES: Dict[str, Dict[str, Any]] = {
|
|
174
|
+
"01": {"steps_required": ["setup_validation", "story_file_loading"], "subagents_required": [], "checkpoint_description": "QA setup and story loading complete"},
|
|
175
|
+
"02": {"steps_required": ["constraint_validation", "anti_pattern_scan", "security_audit"], "subagents_required": ["anti-pattern-scanner", "security-auditor"], "checkpoint_description": "Constraint and security validation complete"},
|
|
176
|
+
"03": {"steps_required": ["diff_regression_detection", "test_integrity_verification"], "subagents_required": [], "checkpoint_description": "Diff regression and test integrity verified"},
|
|
177
|
+
"04": {"steps_required": ["coverage_analysis", "code_quality_metrics"], "subagents_required": ["coverage-analyzer", "code-quality-auditor"], "checkpoint_description": "Coverage and quality analysis complete"},
|
|
178
|
+
"05": {"steps_required": ["report_generation", "result_determination"], "subagents_required": ["qa-result-interpreter"], "checkpoint_description": "QA report generated"},
|
|
179
|
+
"06": {"steps_required": ["cleanup", "state_preservation"], "subagents_required": [], "checkpoint_description": "QA cleanup complete"},
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
QA_VALID_PHASES: List[str] = ["01", "02", "03", "04", "05", "06"]
|
|
183
|
+
|
|
184
|
+
# =============================================================================
|
|
185
|
+
# Workflow Schemas Registry (STORY-521)
|
|
186
|
+
# =============================================================================
|
|
187
|
+
|
|
188
|
+
WORKFLOW_SCHEMAS: Dict[str, Dict[str, Any]] = {
|
|
189
|
+
"dev": {
|
|
190
|
+
"phases": DEV_PHASES,
|
|
191
|
+
"valid_phases": ["01", "02", "03", "04", "4.5", "05", "5.5", "06", "07", "08", "09", "10"],
|
|
192
|
+
},
|
|
193
|
+
"qa": {
|
|
194
|
+
"phases": QA_PHASES,
|
|
195
|
+
"valid_phases": ["01", "02", "03", "04", "05", "06"],
|
|
196
|
+
},
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
VALID_WORKFLOWS: List[str] = list(WORKFLOW_SCHEMAS.keys())
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
PHASE_REQUIRED_SUBAGENTS: Dict[str, List[Union[str, tuple]]] = {
|
|
203
|
+
"01": ["git-validator", "tech-stack-detector"],
|
|
204
|
+
"02": ["test-automator"],
|
|
205
|
+
"03": [("backend-architect", "frontend-developer"), "context-validator"], # tuple = OR
|
|
206
|
+
"04": ["refactoring-specialist", "code-reviewer"],
|
|
207
|
+
"4.5": ["ac-compliance-verifier"],
|
|
208
|
+
"05": ["integration-tester"],
|
|
209
|
+
"5.5": ["ac-compliance-verifier"],
|
|
210
|
+
"06": [], # deferral-validator is conditional
|
|
211
|
+
"07": [], # no required subagents (file operations)
|
|
212
|
+
"08": [], # no required subagents (git operations)
|
|
213
|
+
"09": ["framework-analyst"], # RCA-027 fix
|
|
214
|
+
"10": ["dev-result-interpreter"],
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
# =============================================================================
|
|
219
|
+
# PhaseState Class
|
|
220
|
+
# =============================================================================
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
class PhaseState:
|
|
224
|
+
"""
|
|
225
|
+
Manages workflow phase tracking state files for DevForgeAI.
|
|
226
|
+
|
|
227
|
+
Provides atomic, concurrent-safe operations for creating, reading,
|
|
228
|
+
and updating phase state during /dev workflow execution.
|
|
229
|
+
|
|
230
|
+
Attributes:
|
|
231
|
+
project_root: Path to the project root directory
|
|
232
|
+
workflows_dir: Path to the workflows directory (project_root/devforgeai/workflows)
|
|
233
|
+
|
|
234
|
+
Example:
|
|
235
|
+
>>> ps = PhaseState(project_root=Path("/my/project"))
|
|
236
|
+
>>> state = ps.create(story_id="STORY-001")
|
|
237
|
+
>>> print(state["current_phase"]) # "01"
|
|
238
|
+
"""
|
|
239
|
+
|
|
240
|
+
# Valid phase IDs (includes decimal phases 4.5 and 5.5 for AC verification)
|
|
241
|
+
VALID_PHASES: List[str] = [
|
|
242
|
+
"01", "02", "03", "04", "4.5", "05", "5.5",
|
|
243
|
+
"06", "07", "08", "09", "10"
|
|
244
|
+
]
|
|
245
|
+
|
|
246
|
+
# Valid observation categories
|
|
247
|
+
VALID_CATEGORIES: List[str] = ["friction", "gap", "success", "pattern"]
|
|
248
|
+
|
|
249
|
+
# Valid observation severities
|
|
250
|
+
VALID_SEVERITIES: List[str] = ["low", "medium", "high"]
|
|
251
|
+
|
|
252
|
+
# Story ID pattern
|
|
253
|
+
STORY_ID_PATTERN: re.Pattern = re.compile(r'^STORY-\d{3}$')
|
|
254
|
+
|
|
255
|
+
# Lock timeout in seconds
|
|
256
|
+
LOCK_TIMEOUT: float = 5.0
|
|
257
|
+
|
|
258
|
+
# Maximum note length
|
|
259
|
+
MAX_NOTE_LENGTH: int = 1000
|
|
260
|
+
|
|
261
|
+
def __init__(self, project_root: Path) -> None:
|
|
262
|
+
"""
|
|
263
|
+
Initialize PhaseState with project root directory.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
project_root: Path to the project root directory
|
|
267
|
+
|
|
268
|
+
Example:
|
|
269
|
+
>>> ps = PhaseState(project_root=Path("/my/project"))
|
|
270
|
+
>>> ps.workflows_dir
|
|
271
|
+
PosixPath('/my/project/devforgeai/workflows')
|
|
272
|
+
"""
|
|
273
|
+
self.project_root = Path(project_root)
|
|
274
|
+
self.workflows_dir = self.project_root / "devforgeai" / "workflows"
|
|
275
|
+
logger.debug(f"PhaseState initialized: project_root={project_root}")
|
|
276
|
+
|
|
277
|
+
def _validate_story_id(self, story_id: str) -> None:
|
|
278
|
+
"""
|
|
279
|
+
Validate story ID format and check for path traversal.
|
|
280
|
+
|
|
281
|
+
Args:
|
|
282
|
+
story_id: Story identifier to validate
|
|
283
|
+
|
|
284
|
+
Raises:
|
|
285
|
+
ValueError: If story_id is invalid or contains path traversal
|
|
286
|
+
"""
|
|
287
|
+
# Check for path traversal attempts
|
|
288
|
+
if '..' in story_id or '/' in story_id or '\\' in story_id:
|
|
289
|
+
raise ValueError(
|
|
290
|
+
f"Invalid story_id: '{story_id}'. "
|
|
291
|
+
"Must match pattern STORY-XXX (e.g., STORY-001)"
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
# Check for null bytes (security)
|
|
295
|
+
if '\x00' in story_id:
|
|
296
|
+
raise ValueError(
|
|
297
|
+
f"Invalid story_id: '{story_id}'. "
|
|
298
|
+
"Must match pattern STORY-XXX (e.g., STORY-001)"
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
# Check pattern match
|
|
302
|
+
if not self.STORY_ID_PATTERN.match(story_id):
|
|
303
|
+
raise ValueError(
|
|
304
|
+
f"Invalid story_id: '{story_id}'. "
|
|
305
|
+
"Must match pattern STORY-XXX (e.g., STORY-001)"
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
def _validate_phase_id(self, phase_id: str) -> None:
|
|
309
|
+
"""
|
|
310
|
+
Validate phase ID is in valid range.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
phase_id: Phase identifier to validate
|
|
314
|
+
|
|
315
|
+
Raises:
|
|
316
|
+
PhaseNotFoundError: If phase_id is not valid
|
|
317
|
+
"""
|
|
318
|
+
if phase_id not in self.VALID_PHASES:
|
|
319
|
+
raise PhaseNotFoundError(phase_id)
|
|
320
|
+
|
|
321
|
+
def _get_state_path(self, story_id: str) -> Path:
|
|
322
|
+
"""
|
|
323
|
+
Get the path to the state file for a story.
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
story_id: Story identifier
|
|
327
|
+
|
|
328
|
+
Returns:
|
|
329
|
+
Path to the state file
|
|
330
|
+
|
|
331
|
+
Example:
|
|
332
|
+
>>> ps = PhaseState(project_root=Path("/project"))
|
|
333
|
+
>>> ps._get_state_path("STORY-001")
|
|
334
|
+
PosixPath('/project/devforgeai/workflows/STORY-001-phase-state.json')
|
|
335
|
+
"""
|
|
336
|
+
return self.workflows_dir / f"{story_id}-phase-state.json"
|
|
337
|
+
|
|
338
|
+
def _serialize_subagents_required(
|
|
339
|
+
self, items: List[Union[str, tuple, list]]
|
|
340
|
+
) -> List[Union[str, List[str]]]:
|
|
341
|
+
"""
|
|
342
|
+
Convert subagent requirement items to JSON-serializable form.
|
|
343
|
+
|
|
344
|
+
Tuples represent OR-groups (any one satisfies the requirement) and
|
|
345
|
+
must be converted to lists for JSON serialization.
|
|
346
|
+
|
|
347
|
+
Args:
|
|
348
|
+
items: List of subagent names (str) or OR-groups (tuple/list)
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
List with tuples converted to lists, other items unchanged
|
|
352
|
+
"""
|
|
353
|
+
result = []
|
|
354
|
+
for item in items:
|
|
355
|
+
if isinstance(item, tuple):
|
|
356
|
+
result.append(list(item))
|
|
357
|
+
else:
|
|
358
|
+
result.append(item)
|
|
359
|
+
return result
|
|
360
|
+
|
|
361
|
+
def _create_initial_state(self, story_id: str) -> Dict[str, Any]:
|
|
362
|
+
"""
|
|
363
|
+
Create the initial state dictionary for a new story.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
story_id: Story identifier
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
Initial state dictionary
|
|
370
|
+
"""
|
|
371
|
+
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
372
|
+
|
|
373
|
+
phases = {}
|
|
374
|
+
for phase in self.VALID_PHASES:
|
|
375
|
+
# Populate subagents_required from constant (AC2 - STORY-306)
|
|
376
|
+
required = self._serialize_subagents_required(
|
|
377
|
+
PHASE_REQUIRED_SUBAGENTS.get(phase, [])
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
phases[phase] = {
|
|
381
|
+
"status": "pending",
|
|
382
|
+
"subagents_required": required,
|
|
383
|
+
"subagents_invoked": []
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
return {
|
|
387
|
+
"story_id": story_id,
|
|
388
|
+
"current_phase": "01",
|
|
389
|
+
"workflow_started": now,
|
|
390
|
+
"blocking_status": False,
|
|
391
|
+
"phases": phases,
|
|
392
|
+
"validation_errors": [],
|
|
393
|
+
"observations": []
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
def _acquire_lock(self, fd: int, timeout: float) -> bool:
|
|
397
|
+
"""
|
|
398
|
+
Acquire an exclusive lock on a file descriptor.
|
|
399
|
+
|
|
400
|
+
Uses platform-aware locking:
|
|
401
|
+
- Unix (Linux/macOS): fcntl.flock with LOCK_EX | LOCK_NB
|
|
402
|
+
- Windows: msvcrt.locking with LK_NBLCK
|
|
403
|
+
- Fallback: No locking (last-write-wins)
|
|
404
|
+
|
|
405
|
+
Args:
|
|
406
|
+
fd: File descriptor
|
|
407
|
+
timeout: Maximum time to wait for lock
|
|
408
|
+
|
|
409
|
+
Returns:
|
|
410
|
+
True if lock acquired, False if locking not available
|
|
411
|
+
|
|
412
|
+
Raises:
|
|
413
|
+
LockTimeoutError: If lock cannot be acquired within timeout
|
|
414
|
+
"""
|
|
415
|
+
start_time = time.time()
|
|
416
|
+
|
|
417
|
+
while True:
|
|
418
|
+
elapsed = time.time() - start_time
|
|
419
|
+
if elapsed >= timeout:
|
|
420
|
+
raise LockTimeoutError("<file>", timeout)
|
|
421
|
+
|
|
422
|
+
try:
|
|
423
|
+
if _HAS_FCNTL:
|
|
424
|
+
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
425
|
+
return True
|
|
426
|
+
elif _HAS_MSVCRT:
|
|
427
|
+
msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
|
|
428
|
+
return True
|
|
429
|
+
else:
|
|
430
|
+
# No locking available - proceed without lock
|
|
431
|
+
logger.warning("File locking not available on this platform")
|
|
432
|
+
return False
|
|
433
|
+
except (IOError, OSError):
|
|
434
|
+
# Lock is held by another process, wait and retry
|
|
435
|
+
time.sleep(0.1)
|
|
436
|
+
|
|
437
|
+
def _release_lock(self, fd: int) -> None:
|
|
438
|
+
"""
|
|
439
|
+
Release a lock on a file descriptor.
|
|
440
|
+
|
|
441
|
+
Args:
|
|
442
|
+
fd: File descriptor
|
|
443
|
+
"""
|
|
444
|
+
try:
|
|
445
|
+
if _HAS_FCNTL:
|
|
446
|
+
fcntl.flock(fd, fcntl.LOCK_UN)
|
|
447
|
+
elif _HAS_MSVCRT:
|
|
448
|
+
msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
|
|
449
|
+
except (IOError, OSError):
|
|
450
|
+
pass # Ignore errors during unlock
|
|
451
|
+
|
|
452
|
+
def _atomic_write(self, path: Path, data: Dict[str, Any]) -> None:
|
|
453
|
+
"""
|
|
454
|
+
Write data to file atomically using temp file + rename.
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
path: Target file path
|
|
458
|
+
data: Dictionary to write as JSON
|
|
459
|
+
"""
|
|
460
|
+
# Ensure parent directory exists
|
|
461
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
462
|
+
|
|
463
|
+
# Write to temp file in same directory
|
|
464
|
+
fd, temp_path = tempfile.mkstemp(
|
|
465
|
+
dir=str(path.parent),
|
|
466
|
+
suffix='.tmp',
|
|
467
|
+
prefix=path.stem
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
try:
|
|
471
|
+
with os.fdopen(fd, 'w') as f:
|
|
472
|
+
json.dump(data, f, indent=2)
|
|
473
|
+
|
|
474
|
+
# Atomic rename
|
|
475
|
+
shutil.move(temp_path, str(path))
|
|
476
|
+
|
|
477
|
+
# Set permissions (0644)
|
|
478
|
+
try:
|
|
479
|
+
os.chmod(str(path), 0o644)
|
|
480
|
+
except OSError:
|
|
481
|
+
pass # Ignore permission errors on some platforms
|
|
482
|
+
|
|
483
|
+
except Exception:
|
|
484
|
+
# Clean up temp file on error
|
|
485
|
+
if os.path.exists(temp_path):
|
|
486
|
+
os.unlink(temp_path)
|
|
487
|
+
raise
|
|
488
|
+
|
|
489
|
+
def _read_state(self, path: Path) -> Dict[str, Any]:
|
|
490
|
+
"""
|
|
491
|
+
Read and parse state file with validation.
|
|
492
|
+
|
|
493
|
+
Args:
|
|
494
|
+
path: Path to state file
|
|
495
|
+
|
|
496
|
+
Returns:
|
|
497
|
+
Parsed state dictionary
|
|
498
|
+
|
|
499
|
+
Raises:
|
|
500
|
+
StateFileCorruptionError: If JSON is malformed or empty
|
|
501
|
+
"""
|
|
502
|
+
story_id = path.stem.replace("-phase-state", "")
|
|
503
|
+
|
|
504
|
+
try:
|
|
505
|
+
content = path.read_text()
|
|
506
|
+
|
|
507
|
+
# Check for empty file
|
|
508
|
+
if not content.strip():
|
|
509
|
+
raise StateFileCorruptionError(story_id)
|
|
510
|
+
|
|
511
|
+
state = json.loads(content)
|
|
512
|
+
|
|
513
|
+
# Backward compatibility: ensure all VALID_PHASES exist
|
|
514
|
+
# (handles state files created before decimal phases 4.5/5.5 were added)
|
|
515
|
+
self._ensure_phases_exist(state)
|
|
516
|
+
|
|
517
|
+
return state
|
|
518
|
+
|
|
519
|
+
except json.JSONDecodeError as e:
|
|
520
|
+
raise StateFileCorruptionError(story_id, e)
|
|
521
|
+
|
|
522
|
+
def _ensure_phases_exist(self, state: Dict[str, Any]) -> None:
|
|
523
|
+
"""
|
|
524
|
+
Ensure all VALID_PHASES have entries in the phases dictionary.
|
|
525
|
+
|
|
526
|
+
Adds missing phase entries with default values for backward compatibility
|
|
527
|
+
with state files created before decimal phases (4.5, 5.5) were added.
|
|
528
|
+
|
|
529
|
+
Also populates empty subagents_required from PHASE_REQUIRED_SUBAGENTS
|
|
530
|
+
for legacy state files (AC8 - STORY-306).
|
|
531
|
+
|
|
532
|
+
Args:
|
|
533
|
+
state: State dictionary to update in-place
|
|
534
|
+
"""
|
|
535
|
+
phases = state.get("phases", {})
|
|
536
|
+
for phase in self.VALID_PHASES:
|
|
537
|
+
if phase not in phases:
|
|
538
|
+
# Create new phase entry with populated subagents_required
|
|
539
|
+
required = self._serialize_subagents_required(
|
|
540
|
+
PHASE_REQUIRED_SUBAGENTS.get(phase, [])
|
|
541
|
+
)
|
|
542
|
+
phases[phase] = {
|
|
543
|
+
"status": "pending",
|
|
544
|
+
"subagents_required": required,
|
|
545
|
+
"subagents_invoked": []
|
|
546
|
+
}
|
|
547
|
+
else:
|
|
548
|
+
# Migrate legacy: populate empty subagents_required (AC8)
|
|
549
|
+
if not phases[phase].get("subagents_required"):
|
|
550
|
+
phases[phase]["subagents_required"] = self._serialize_subagents_required(
|
|
551
|
+
PHASE_REQUIRED_SUBAGENTS.get(phase, [])
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
state["phases"] = phases
|
|
555
|
+
|
|
556
|
+
def create(self, story_id: str) -> Dict[str, Any]:
|
|
557
|
+
"""
|
|
558
|
+
Create a new phase state file for a story (idempotent).
|
|
559
|
+
|
|
560
|
+
If a state file already exists, returns the existing state
|
|
561
|
+
without modification.
|
|
562
|
+
|
|
563
|
+
Args:
|
|
564
|
+
story_id: Story identifier (must match STORY-XXX pattern)
|
|
565
|
+
|
|
566
|
+
Returns:
|
|
567
|
+
The state dictionary (new or existing)
|
|
568
|
+
|
|
569
|
+
Raises:
|
|
570
|
+
ValueError: If story_id is invalid
|
|
571
|
+
|
|
572
|
+
Example:
|
|
573
|
+
>>> ps = PhaseState(project_root=Path("/project"))
|
|
574
|
+
>>> state = ps.create("STORY-001")
|
|
575
|
+
>>> state["current_phase"]
|
|
576
|
+
'01'
|
|
577
|
+
"""
|
|
578
|
+
self._validate_story_id(story_id)
|
|
579
|
+
|
|
580
|
+
path = self._get_state_path(story_id)
|
|
581
|
+
|
|
582
|
+
# Idempotent: return existing state if file exists
|
|
583
|
+
if path.exists():
|
|
584
|
+
logger.debug(f"State file already exists for {story_id}")
|
|
585
|
+
return self._read_state(path)
|
|
586
|
+
|
|
587
|
+
# Create new state
|
|
588
|
+
state = self._create_initial_state(story_id)
|
|
589
|
+
self._atomic_write(path, state)
|
|
590
|
+
|
|
591
|
+
logger.info(f"Created phase state for {story_id}")
|
|
592
|
+
return state
|
|
593
|
+
|
|
594
|
+
def read(self, story_id: str) -> Optional[Dict[str, Any]]:
|
|
595
|
+
"""
|
|
596
|
+
Read the phase state for a story.
|
|
597
|
+
|
|
598
|
+
Args:
|
|
599
|
+
story_id: Story identifier
|
|
600
|
+
|
|
601
|
+
Returns:
|
|
602
|
+
State dictionary if file exists, None otherwise
|
|
603
|
+
|
|
604
|
+
Raises:
|
|
605
|
+
ValueError: If story_id is invalid
|
|
606
|
+
StateFileCorruptionError: If state file is malformed
|
|
607
|
+
|
|
608
|
+
Example:
|
|
609
|
+
>>> ps = PhaseState(project_root=Path("/project"))
|
|
610
|
+
>>> state = ps.read("STORY-001")
|
|
611
|
+
>>> state is None # If file doesn't exist
|
|
612
|
+
True
|
|
613
|
+
"""
|
|
614
|
+
self._validate_story_id(story_id)
|
|
615
|
+
|
|
616
|
+
path = self._get_state_path(story_id)
|
|
617
|
+
|
|
618
|
+
if not path.exists():
|
|
619
|
+
return None
|
|
620
|
+
|
|
621
|
+
return self._read_state(path)
|
|
622
|
+
|
|
623
|
+
def complete_phase(
|
|
624
|
+
self,
|
|
625
|
+
story_id: str,
|
|
626
|
+
phase: str,
|
|
627
|
+
checkpoint_passed: bool
|
|
628
|
+
) -> Dict[str, Any]:
|
|
629
|
+
"""
|
|
630
|
+
Complete a phase with sequential enforcement.
|
|
631
|
+
|
|
632
|
+
A phase can only be completed if it is the current phase.
|
|
633
|
+
After completion, current_phase advances to the next phase
|
|
634
|
+
(except at phase 10).
|
|
635
|
+
|
|
636
|
+
Args:
|
|
637
|
+
story_id: Story identifier
|
|
638
|
+
phase: Phase ID to complete (must be current phase)
|
|
639
|
+
checkpoint_passed: Whether the phase checkpoint passed
|
|
640
|
+
|
|
641
|
+
Returns:
|
|
642
|
+
Updated state dictionary
|
|
643
|
+
|
|
644
|
+
Raises:
|
|
645
|
+
ValueError: If story_id is invalid
|
|
646
|
+
PhaseNotFoundError: If phase is invalid
|
|
647
|
+
PhaseTransitionError: If phase is not current phase
|
|
648
|
+
FileNotFoundError: If state file doesn't exist
|
|
649
|
+
|
|
650
|
+
Example:
|
|
651
|
+
>>> ps = PhaseState(project_root=Path("/project"))
|
|
652
|
+
>>> state = ps.complete_phase("STORY-001", "01", checkpoint_passed=True)
|
|
653
|
+
>>> state["current_phase"]
|
|
654
|
+
'02'
|
|
655
|
+
"""
|
|
656
|
+
self._validate_story_id(story_id)
|
|
657
|
+
self._validate_phase_id(phase)
|
|
658
|
+
|
|
659
|
+
path = self._get_state_path(story_id)
|
|
660
|
+
|
|
661
|
+
if not path.exists():
|
|
662
|
+
raise FileNotFoundError(f"State file not found for {story_id}")
|
|
663
|
+
|
|
664
|
+
state = self._read_state(path)
|
|
665
|
+
current = state["current_phase"]
|
|
666
|
+
|
|
667
|
+
# Sequential enforcement
|
|
668
|
+
if phase != current:
|
|
669
|
+
raise PhaseTransitionError(story_id, current, phase)
|
|
670
|
+
|
|
671
|
+
# Subagent enforcement (AC3, AC4, AC5, AC6 - STORY-306)
|
|
672
|
+
# Skip validation when checkpoint_passed=False (escape hatch - AC5)
|
|
673
|
+
if checkpoint_passed:
|
|
674
|
+
required = state["phases"][phase].get("subagents_required", [])
|
|
675
|
+
invoked = set(state["phases"][phase].get("subagents_invoked", []))
|
|
676
|
+
missing = []
|
|
677
|
+
|
|
678
|
+
for requirement in required:
|
|
679
|
+
if isinstance(requirement, list):
|
|
680
|
+
# OR logic (AC6): any one subagent in list satisfies requirement
|
|
681
|
+
if not any(subagent_name in invoked for subagent_name in requirement):
|
|
682
|
+
missing.append(f"({' OR '.join(requirement)})")
|
|
683
|
+
else:
|
|
684
|
+
# Simple requirement: subagent must be in invoked set
|
|
685
|
+
if requirement not in invoked:
|
|
686
|
+
missing.append(requirement)
|
|
687
|
+
|
|
688
|
+
if missing:
|
|
689
|
+
logger.warning(
|
|
690
|
+
f"Phase {phase} completion blocked for {story_id}: "
|
|
691
|
+
f"Missing subagents: {', '.join(missing)}"
|
|
692
|
+
)
|
|
693
|
+
raise SubagentEnforcementError(story_id, phase, missing)
|
|
694
|
+
else:
|
|
695
|
+
# Escape hatch used - log for audit trail
|
|
696
|
+
logger.info(
|
|
697
|
+
f"Phase {phase} completed via escape hatch for {story_id} "
|
|
698
|
+
"(checkpoint_passed=False)"
|
|
699
|
+
)
|
|
700
|
+
|
|
701
|
+
# Update phase status
|
|
702
|
+
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
703
|
+
state["phases"][phase]["status"] = "completed"
|
|
704
|
+
state["phases"][phase]["completed_at"] = now
|
|
705
|
+
state["phases"][phase]["checkpoint_passed"] = checkpoint_passed
|
|
706
|
+
|
|
707
|
+
# Advance to next phase (unless at phase 10)
|
|
708
|
+
phase_idx = self.VALID_PHASES.index(phase)
|
|
709
|
+
if phase_idx < len(self.VALID_PHASES) - 1:
|
|
710
|
+
state["current_phase"] = self.VALID_PHASES[phase_idx + 1]
|
|
711
|
+
|
|
712
|
+
self._atomic_write(path, state)
|
|
713
|
+
|
|
714
|
+
logger.info(f"Completed phase {phase} for {story_id}")
|
|
715
|
+
return state
|
|
716
|
+
|
|
717
|
+
def record_subagent(
|
|
718
|
+
self,
|
|
719
|
+
story_id: str,
|
|
720
|
+
phase: str,
|
|
721
|
+
subagent: str
|
|
722
|
+
) -> Dict[str, Any]:
|
|
723
|
+
"""
|
|
724
|
+
Record a subagent invocation for a phase (idempotent).
|
|
725
|
+
|
|
726
|
+
If the subagent is already recorded, does nothing.
|
|
727
|
+
Also sets the phase started_at timestamp if not already set.
|
|
728
|
+
|
|
729
|
+
Args:
|
|
730
|
+
story_id: Story identifier
|
|
731
|
+
phase: Phase ID
|
|
732
|
+
subagent: Subagent name
|
|
733
|
+
|
|
734
|
+
Returns:
|
|
735
|
+
Updated state dictionary
|
|
736
|
+
|
|
737
|
+
Raises:
|
|
738
|
+
ValueError: If story_id is invalid
|
|
739
|
+
PhaseNotFoundError: If phase is invalid
|
|
740
|
+
FileNotFoundError: If state file doesn't exist
|
|
741
|
+
|
|
742
|
+
Example:
|
|
743
|
+
>>> ps = PhaseState(project_root=Path("/project"))
|
|
744
|
+
>>> state = ps.record_subagent("STORY-001", "02", "test-automator")
|
|
745
|
+
>>> "test-automator" in state["phases"]["02"]["subagents_invoked"]
|
|
746
|
+
True
|
|
747
|
+
"""
|
|
748
|
+
self._validate_story_id(story_id)
|
|
749
|
+
self._validate_phase_id(phase)
|
|
750
|
+
|
|
751
|
+
path = self._get_state_path(story_id)
|
|
752
|
+
|
|
753
|
+
if not path.exists():
|
|
754
|
+
raise FileNotFoundError(f"State file not found for {story_id}")
|
|
755
|
+
|
|
756
|
+
state = self._read_state(path)
|
|
757
|
+
|
|
758
|
+
# Idempotent: only add if not already present
|
|
759
|
+
if subagent not in state["phases"][phase]["subagents_invoked"]:
|
|
760
|
+
state["phases"][phase]["subagents_invoked"].append(subagent)
|
|
761
|
+
|
|
762
|
+
# Set started_at if not present
|
|
763
|
+
if "started_at" not in state["phases"][phase]:
|
|
764
|
+
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
765
|
+
state["phases"][phase]["started_at"] = now
|
|
766
|
+
|
|
767
|
+
self._atomic_write(path, state)
|
|
768
|
+
|
|
769
|
+
logger.debug(f"Recorded subagent {subagent} for {story_id} phase {phase}")
|
|
770
|
+
return state
|
|
771
|
+
|
|
772
|
+
def add_observation(
|
|
773
|
+
self,
|
|
774
|
+
story_id: str,
|
|
775
|
+
phase_id: str,
|
|
776
|
+
category: str,
|
|
777
|
+
note: str,
|
|
778
|
+
severity: str
|
|
779
|
+
) -> str:
|
|
780
|
+
"""
|
|
781
|
+
Add a workflow observation.
|
|
782
|
+
|
|
783
|
+
Args:
|
|
784
|
+
story_id: Story identifier
|
|
785
|
+
phase_id: Phase ID (01-10)
|
|
786
|
+
category: Observation category (friction, gap, success, pattern)
|
|
787
|
+
note: Observation note (1-1000 characters, non-empty)
|
|
788
|
+
severity: Observation severity (low, medium, high)
|
|
789
|
+
|
|
790
|
+
Returns:
|
|
791
|
+
Generated observation ID (format: obs-{phase_id}-{8-char-hex})
|
|
792
|
+
|
|
793
|
+
Raises:
|
|
794
|
+
ValueError: If any parameter is invalid
|
|
795
|
+
PhaseNotFoundError: If phase_id is invalid
|
|
796
|
+
FileNotFoundError: If state file doesn't exist
|
|
797
|
+
|
|
798
|
+
Example:
|
|
799
|
+
>>> ps = PhaseState(project_root=Path("/project"))
|
|
800
|
+
>>> obs_id = ps.add_observation(
|
|
801
|
+
... story_id="STORY-001",
|
|
802
|
+
... phase_id="04",
|
|
803
|
+
... category="friction",
|
|
804
|
+
... note="Test took longer than expected",
|
|
805
|
+
... severity="medium"
|
|
806
|
+
... )
|
|
807
|
+
>>> obs_id.startswith("obs-04-")
|
|
808
|
+
True
|
|
809
|
+
"""
|
|
810
|
+
self._validate_story_id(story_id)
|
|
811
|
+
self._validate_phase_id(phase_id)
|
|
812
|
+
|
|
813
|
+
# Validate category
|
|
814
|
+
if category not in self.VALID_CATEGORIES:
|
|
815
|
+
raise ValueError(
|
|
816
|
+
f"Invalid category: '{category}'. "
|
|
817
|
+
f"Valid categories: {', '.join(self.VALID_CATEGORIES)}"
|
|
818
|
+
)
|
|
819
|
+
|
|
820
|
+
# Validate severity
|
|
821
|
+
if severity not in self.VALID_SEVERITIES:
|
|
822
|
+
raise ValueError(
|
|
823
|
+
f"Invalid severity: '{severity}'. "
|
|
824
|
+
f"Valid severities: {', '.join(self.VALID_SEVERITIES)}"
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
# Validate note
|
|
828
|
+
note = note.strip() if note else ""
|
|
829
|
+
if not note:
|
|
830
|
+
raise ValueError("Observation note cannot be empty")
|
|
831
|
+
|
|
832
|
+
if len(note) > self.MAX_NOTE_LENGTH:
|
|
833
|
+
raise ValueError(
|
|
834
|
+
f"Observation note too long ({len(note)} chars). "
|
|
835
|
+
f"Maximum length is {self.MAX_NOTE_LENGTH} characters."
|
|
836
|
+
)
|
|
837
|
+
|
|
838
|
+
path = self._get_state_path(story_id)
|
|
839
|
+
|
|
840
|
+
if not path.exists():
|
|
841
|
+
raise FileNotFoundError(f"State file not found for {story_id}")
|
|
842
|
+
|
|
843
|
+
state = self._read_state(path)
|
|
844
|
+
|
|
845
|
+
# Generate unique observation ID
|
|
846
|
+
hex_suffix = uuid.uuid4().hex[:8]
|
|
847
|
+
obs_id = f"obs-{phase_id}-{hex_suffix}"
|
|
848
|
+
|
|
849
|
+
# Create observation object
|
|
850
|
+
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
851
|
+
observation = {
|
|
852
|
+
"id": obs_id,
|
|
853
|
+
"phase": phase_id,
|
|
854
|
+
"category": category,
|
|
855
|
+
"note": note,
|
|
856
|
+
"severity": severity,
|
|
857
|
+
"timestamp": now
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
state["observations"].append(observation)
|
|
861
|
+
self._atomic_write(path, state)
|
|
862
|
+
|
|
863
|
+
logger.info(f"Added observation {obs_id} for {story_id}")
|
|
864
|
+
return obs_id
|
|
865
|
+
|
|
866
|
+
# =================================================================
|
|
867
|
+
# Step-Level Tracking Methods (STORY-525)
|
|
868
|
+
# =================================================================
|
|
869
|
+
|
|
870
|
+
def _get_registry_path(self) -> Path:
|
|
871
|
+
"""
|
|
872
|
+
Get the path to the phase-steps-registry.json file.
|
|
873
|
+
|
|
874
|
+
Returns:
|
|
875
|
+
Path to the registry JSON file
|
|
876
|
+
"""
|
|
877
|
+
return self.project_root / ".claude" / "hooks" / "phase-steps-registry.json"
|
|
878
|
+
|
|
879
|
+
def record_step(
|
|
880
|
+
self,
|
|
881
|
+
story_id: str,
|
|
882
|
+
phase: str,
|
|
883
|
+
step_id: str
|
|
884
|
+
) -> Dict[str, Any]:
|
|
885
|
+
"""
|
|
886
|
+
Record a step completion for a phase (idempotent).
|
|
887
|
+
|
|
888
|
+
Appends step_id to steps_completed list in the phase data.
|
|
889
|
+
If step_id already present, does nothing (no duplicates).
|
|
890
|
+
|
|
891
|
+
Args:
|
|
892
|
+
story_id: Story identifier
|
|
893
|
+
phase: Phase ID
|
|
894
|
+
step_id: Step identifier (e.g., "02.1")
|
|
895
|
+
|
|
896
|
+
Returns:
|
|
897
|
+
Updated state dictionary
|
|
898
|
+
|
|
899
|
+
Raises:
|
|
900
|
+
ValueError: If story_id is invalid
|
|
901
|
+
PhaseNotFoundError: If phase is invalid
|
|
902
|
+
FileNotFoundError: If state file doesn't exist
|
|
903
|
+
"""
|
|
904
|
+
self._validate_story_id(story_id)
|
|
905
|
+
self._validate_phase_id(phase)
|
|
906
|
+
|
|
907
|
+
path = self._get_state_path(story_id)
|
|
908
|
+
|
|
909
|
+
if not path.exists():
|
|
910
|
+
raise FileNotFoundError(f"State file not found for {story_id}")
|
|
911
|
+
|
|
912
|
+
state = self._read_state(path)
|
|
913
|
+
|
|
914
|
+
# Ensure steps_completed list exists
|
|
915
|
+
if "steps_completed" not in state["phases"][phase]:
|
|
916
|
+
state["phases"][phase]["steps_completed"] = []
|
|
917
|
+
|
|
918
|
+
# Idempotent: only add if not already present
|
|
919
|
+
if step_id not in state["phases"][phase]["steps_completed"]:
|
|
920
|
+
state["phases"][phase]["steps_completed"].append(step_id)
|
|
921
|
+
|
|
922
|
+
self._atomic_write(path, state)
|
|
923
|
+
|
|
924
|
+
logger.debug(f"Recorded step {step_id} for {story_id} phase {phase}")
|
|
925
|
+
return state
|
|
926
|
+
|
|
927
|
+
def validate_phase_steps(
|
|
928
|
+
self,
|
|
929
|
+
story_id: str,
|
|
930
|
+
phase: str
|
|
931
|
+
) -> Dict[str, Any]:
|
|
932
|
+
"""
|
|
933
|
+
Validate that all required steps for a phase are completed.
|
|
934
|
+
|
|
935
|
+
Loads the phase-steps-registry.json to determine required steps,
|
|
936
|
+
filters out conditional steps, and checks against steps_completed.
|
|
937
|
+
|
|
938
|
+
Args:
|
|
939
|
+
story_id: Story identifier
|
|
940
|
+
phase: Phase ID
|
|
941
|
+
|
|
942
|
+
Returns:
|
|
943
|
+
Dict with 'status' ('PASS'|'FAIL') and 'missing_steps' (list)
|
|
944
|
+
|
|
945
|
+
Raises:
|
|
946
|
+
ValueError: If story_id is invalid
|
|
947
|
+
PhaseNotFoundError: If phase is invalid
|
|
948
|
+
FileNotFoundError: If state file or registry missing
|
|
949
|
+
json.JSONDecodeError: If registry is malformed
|
|
950
|
+
"""
|
|
951
|
+
self._validate_story_id(story_id)
|
|
952
|
+
self._validate_phase_id(phase)
|
|
953
|
+
|
|
954
|
+
path = self._get_state_path(story_id)
|
|
955
|
+
|
|
956
|
+
if not path.exists():
|
|
957
|
+
raise FileNotFoundError(f"State file not found for {story_id}")
|
|
958
|
+
|
|
959
|
+
state = self._read_state(path)
|
|
960
|
+
|
|
961
|
+
# Load registry
|
|
962
|
+
registry_path = self._get_registry_path()
|
|
963
|
+
if not registry_path.exists():
|
|
964
|
+
raise FileNotFoundError(
|
|
965
|
+
f"Registry file not found at {registry_path}"
|
|
966
|
+
)
|
|
967
|
+
|
|
968
|
+
registry_content = registry_path.read_text()
|
|
969
|
+
registry = json.loads(registry_content)
|
|
970
|
+
|
|
971
|
+
# Get steps for this phase from registry
|
|
972
|
+
phase_data = registry.get(phase, {})
|
|
973
|
+
steps = phase_data.get("steps", [])
|
|
974
|
+
|
|
975
|
+
# Filter to required steps only (conditional=false)
|
|
976
|
+
required_step_ids = [
|
|
977
|
+
step["id"] for step in steps
|
|
978
|
+
if not step.get("conditional", False)
|
|
979
|
+
]
|
|
980
|
+
|
|
981
|
+
# Get completed steps from state
|
|
982
|
+
steps_completed = state["phases"].get(phase, {}).get(
|
|
983
|
+
"steps_completed", []
|
|
984
|
+
)
|
|
985
|
+
|
|
986
|
+
# Find missing
|
|
987
|
+
missing = [
|
|
988
|
+
sid for sid in required_step_ids
|
|
989
|
+
if sid not in steps_completed
|
|
990
|
+
]
|
|
991
|
+
|
|
992
|
+
if missing:
|
|
993
|
+
return {"status": "FAIL", "missing_steps": missing}
|
|
994
|
+
else:
|
|
995
|
+
return {"status": "PASS", "missing_steps": []}
|
|
996
|
+
|
|
997
|
+
# =================================================================
|
|
998
|
+
# QA Workflow Methods (STORY-517)
|
|
999
|
+
# =================================================================
|
|
1000
|
+
|
|
1001
|
+
def _get_qa_state_path(self, story_id: str) -> Path:
|
|
1002
|
+
"""Get the path to the QA state file for a story."""
|
|
1003
|
+
return self.workflows_dir / f"{story_id}-qa-phase-state.json"
|
|
1004
|
+
|
|
1005
|
+
def _get_workflow_path(self, story_id: str, workflow: str) -> Path:
|
|
1006
|
+
"""
|
|
1007
|
+
Get the state file path for any workflow type.
|
|
1008
|
+
|
|
1009
|
+
Centralises the dev/qa/generic path selection used by
|
|
1010
|
+
create_workflow() and complete_workflow_phase().
|
|
1011
|
+
|
|
1012
|
+
Args:
|
|
1013
|
+
story_id: Story identifier
|
|
1014
|
+
workflow: Workflow type key (e.g. "dev", "qa")
|
|
1015
|
+
|
|
1016
|
+
Returns:
|
|
1017
|
+
Path to the workflow state file
|
|
1018
|
+
"""
|
|
1019
|
+
if workflow == "dev":
|
|
1020
|
+
return self._get_state_path(story_id)
|
|
1021
|
+
if workflow == "qa":
|
|
1022
|
+
return self._get_qa_state_path(story_id)
|
|
1023
|
+
# Defense-in-depth: currently unreachable via public API because
|
|
1024
|
+
# create_workflow() and complete_workflow_phase() validate against
|
|
1025
|
+
# WORKFLOW_SCHEMAS before calling this method. Retained as path
|
|
1026
|
+
# traversal protection for future callers that may bypass the registry.
|
|
1027
|
+
if not re.match(r'^[a-z][a-z0-9_-]*$', workflow):
|
|
1028
|
+
raise ValueError(
|
|
1029
|
+
f"Invalid workflow name: '{workflow}'. "
|
|
1030
|
+
"Must be lowercase alphanumeric with hyphens/underscores."
|
|
1031
|
+
)
|
|
1032
|
+
return self.workflows_dir / f"{story_id}-{workflow}-phase-state.json"
|
|
1033
|
+
|
|
1034
|
+
def _create_qa_initial_state(self, story_id: str) -> Dict[str, Any]:
|
|
1035
|
+
"""Create the initial QA state dictionary."""
|
|
1036
|
+
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
1037
|
+
|
|
1038
|
+
phases = {}
|
|
1039
|
+
for phase_key in QA_VALID_PHASES:
|
|
1040
|
+
phase_def = QA_PHASES[phase_key]
|
|
1041
|
+
phases[phase_key] = {
|
|
1042
|
+
"status": "pending",
|
|
1043
|
+
"steps_required": list(phase_def["steps_required"]),
|
|
1044
|
+
"steps_completed": [],
|
|
1045
|
+
"checkpoint_passed": False,
|
|
1046
|
+
"subagents_required": list(phase_def["subagents_required"]),
|
|
1047
|
+
"subagents_invoked": [],
|
|
1048
|
+
}
|
|
1049
|
+
|
|
1050
|
+
return {
|
|
1051
|
+
"story_id": story_id,
|
|
1052
|
+
"workflow": "qa",
|
|
1053
|
+
"current_phase": "00",
|
|
1054
|
+
"workflow_started": now,
|
|
1055
|
+
"blocking_status": False,
|
|
1056
|
+
"phases": phases,
|
|
1057
|
+
"validation_errors": [],
|
|
1058
|
+
"observations": [],
|
|
1059
|
+
}
|
|
1060
|
+
|
|
1061
|
+
def create_qa(self, story_id: str) -> Dict[str, Any]:
|
|
1062
|
+
"""Create a new QA phase state file for a story (idempotent)."""
|
|
1063
|
+
self._validate_story_id(story_id)
|
|
1064
|
+
|
|
1065
|
+
path = self._get_qa_state_path(story_id)
|
|
1066
|
+
|
|
1067
|
+
if path.exists():
|
|
1068
|
+
logger.debug(f"QA state file already exists for {story_id}")
|
|
1069
|
+
return self._read_state(path)
|
|
1070
|
+
|
|
1071
|
+
state = self._create_qa_initial_state(story_id)
|
|
1072
|
+
self._atomic_write(path, state)
|
|
1073
|
+
|
|
1074
|
+
logger.info(f"Created QA phase state for {story_id}")
|
|
1075
|
+
return state
|
|
1076
|
+
|
|
1077
|
+
def complete_workflow_phase(
|
|
1078
|
+
self,
|
|
1079
|
+
story_id: str,
|
|
1080
|
+
workflow: str,
|
|
1081
|
+
phase: str,
|
|
1082
|
+
checkpoint_passed: bool,
|
|
1083
|
+
) -> Dict[str, Any]:
|
|
1084
|
+
"""
|
|
1085
|
+
Complete a phase for any workflow type using WORKFLOW_SCHEMAS.
|
|
1086
|
+
|
|
1087
|
+
Generic completion method that works for dev, qa, and any future
|
|
1088
|
+
workflow types. Uses sequential enforcement but delegates
|
|
1089
|
+
subagent/step validation to workflow-specific methods.
|
|
1090
|
+
|
|
1091
|
+
STORY-521: Unified workflow phase completion via registry.
|
|
1092
|
+
|
|
1093
|
+
Args:
|
|
1094
|
+
story_id: Story identifier
|
|
1095
|
+
workflow: Workflow type key from WORKFLOW_SCHEMAS
|
|
1096
|
+
phase: Phase ID to complete
|
|
1097
|
+
checkpoint_passed: Whether the phase checkpoint passed
|
|
1098
|
+
|
|
1099
|
+
Returns:
|
|
1100
|
+
Updated state dictionary
|
|
1101
|
+
|
|
1102
|
+
Raises:
|
|
1103
|
+
ValueError: If story_id, workflow, or phase is invalid
|
|
1104
|
+
FileNotFoundError: If state file doesn't exist
|
|
1105
|
+
PhaseTransitionError: If phase is not current phase
|
|
1106
|
+
"""
|
|
1107
|
+
self._validate_story_id(story_id)
|
|
1108
|
+
|
|
1109
|
+
schema = WORKFLOW_SCHEMAS.get(workflow)
|
|
1110
|
+
if schema is None:
|
|
1111
|
+
raise ValueError(f"Unknown workflow: '{workflow}'")
|
|
1112
|
+
|
|
1113
|
+
valid_phases = schema["valid_phases"]
|
|
1114
|
+
if phase not in valid_phases:
|
|
1115
|
+
raise PhaseNotFoundError(phase, f"Invalid {workflow} phase: '{phase}'. Valid phases: {valid_phases}")
|
|
1116
|
+
|
|
1117
|
+
path = self._get_workflow_path(story_id, workflow)
|
|
1118
|
+
|
|
1119
|
+
if not path.exists():
|
|
1120
|
+
raise FileNotFoundError(f"State file not found for {story_id} ({workflow} workflow)")
|
|
1121
|
+
|
|
1122
|
+
state = self._read_state(path)
|
|
1123
|
+
current = state["current_phase"]
|
|
1124
|
+
|
|
1125
|
+
# Sequential enforcement
|
|
1126
|
+
if phase != current:
|
|
1127
|
+
raise PhaseTransitionError(story_id, current, phase)
|
|
1128
|
+
|
|
1129
|
+
# Step validation for QA workflows (STORY-517 AC2)
|
|
1130
|
+
# QA phases track steps_required/steps_completed; reject if incomplete
|
|
1131
|
+
if workflow == "qa":
|
|
1132
|
+
phase_data = state["phases"][phase]
|
|
1133
|
+
required = set(phase_data.get("steps_required", []))
|
|
1134
|
+
completed = set(phase_data.get("steps_completed", []))
|
|
1135
|
+
missing = required - completed
|
|
1136
|
+
if missing:
|
|
1137
|
+
raise ValueError(
|
|
1138
|
+
f"Cannot complete {workflow} phase '{phase}' for '{story_id}'. "
|
|
1139
|
+
f"Missing required steps: {', '.join(sorted(missing))}"
|
|
1140
|
+
)
|
|
1141
|
+
|
|
1142
|
+
# Update phase status
|
|
1143
|
+
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
1144
|
+
state["phases"][phase]["status"] = "completed"
|
|
1145
|
+
state["phases"][phase]["completed_at"] = now
|
|
1146
|
+
state["phases"][phase]["checkpoint_passed"] = checkpoint_passed
|
|
1147
|
+
|
|
1148
|
+
# Advance to next phase
|
|
1149
|
+
phase_idx = valid_phases.index(phase)
|
|
1150
|
+
if phase_idx < len(valid_phases) - 1:
|
|
1151
|
+
state["current_phase"] = valid_phases[phase_idx + 1]
|
|
1152
|
+
|
|
1153
|
+
self._atomic_write(path, state)
|
|
1154
|
+
logger.info(f"Completed {workflow} phase {phase} for {story_id}")
|
|
1155
|
+
return state
|
|
1156
|
+
|
|
1157
|
+
|
|
1158
|
+
def create_workflow(self, story_id: str, workflow: str) -> Dict[str, Any]:
|
|
1159
|
+
"""
|
|
1160
|
+
Create state file for any workflow type using WORKFLOW_SCHEMAS.
|
|
1161
|
+
|
|
1162
|
+
STORY-521: Unified workflow creation via registry.
|
|
1163
|
+
|
|
1164
|
+
Args:
|
|
1165
|
+
story_id: Story identifier (must match STORY-XXX pattern)
|
|
1166
|
+
workflow: Workflow type key from WORKFLOW_SCHEMAS
|
|
1167
|
+
|
|
1168
|
+
Returns:
|
|
1169
|
+
The state dictionary (new or existing)
|
|
1170
|
+
|
|
1171
|
+
Raises:
|
|
1172
|
+
ValueError: If story_id is invalid or workflow is unknown
|
|
1173
|
+
"""
|
|
1174
|
+
self._validate_story_id(story_id)
|
|
1175
|
+
|
|
1176
|
+
schema = WORKFLOW_SCHEMAS.get(workflow)
|
|
1177
|
+
if schema is None:
|
|
1178
|
+
raise ValueError(f"Unknown workflow: '{workflow}'")
|
|
1179
|
+
|
|
1180
|
+
path = self._get_workflow_path(story_id, workflow)
|
|
1181
|
+
|
|
1182
|
+
if path.exists():
|
|
1183
|
+
return self._read_state(path)
|
|
1184
|
+
|
|
1185
|
+
# Build initial state from schema
|
|
1186
|
+
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
1187
|
+
phases = {}
|
|
1188
|
+
for phase_key in schema["valid_phases"]:
|
|
1189
|
+
phase_def = schema["phases"][phase_key]
|
|
1190
|
+
required = self._serialize_subagents_required(
|
|
1191
|
+
phase_def.get("subagents_required", [])
|
|
1192
|
+
)
|
|
1193
|
+
phases[phase_key] = {
|
|
1194
|
+
"status": "pending",
|
|
1195
|
+
"steps_required": list(phase_def.get("steps_required", [])),
|
|
1196
|
+
"steps_completed": [],
|
|
1197
|
+
"checkpoint_passed": False,
|
|
1198
|
+
"subagents_required": required,
|
|
1199
|
+
"subagents_invoked": [],
|
|
1200
|
+
}
|
|
1201
|
+
|
|
1202
|
+
state = {
|
|
1203
|
+
"story_id": story_id,
|
|
1204
|
+
"workflow": workflow,
|
|
1205
|
+
"current_phase": schema["valid_phases"][0],
|
|
1206
|
+
"workflow_started": now,
|
|
1207
|
+
"blocking_status": False,
|
|
1208
|
+
"phases": phases,
|
|
1209
|
+
"validation_errors": [],
|
|
1210
|
+
"observations": [],
|
|
1211
|
+
}
|
|
1212
|
+
|
|
1213
|
+
self._atomic_write(path, state)
|
|
1214
|
+
logger.info(f"Created {workflow} phase state for {story_id}")
|
|
1215
|
+
return state
|
|
1216
|
+
|
|
1217
|
+
def read_qa(self, story_id: str) -> Optional[Dict[str, Any]]:
|
|
1218
|
+
"""Read the QA phase state for a story."""
|
|
1219
|
+
self._validate_story_id(story_id)
|
|
1220
|
+
|
|
1221
|
+
path = self._get_qa_state_path(story_id)
|
|
1222
|
+
|
|
1223
|
+
if not path.exists():
|
|
1224
|
+
return None
|
|
1225
|
+
|
|
1226
|
+
return self._read_state(path)
|
|
1227
|
+
|
|
1228
|
+
# complete_qa_phase() removed — step validation now in complete_workflow_phase()
|
|
1229
|
+
# See STORY-517 AC2 fix. The method had zero callers after STORY-521 unification.
|