galangal-orchestrate 0.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- galangal/__init__.py +36 -0
- galangal/__main__.py +6 -0
- galangal/ai/__init__.py +167 -0
- galangal/ai/base.py +159 -0
- galangal/ai/claude.py +352 -0
- galangal/ai/codex.py +370 -0
- galangal/ai/gemini.py +43 -0
- galangal/ai/subprocess.py +254 -0
- galangal/cli.py +371 -0
- galangal/commands/__init__.py +27 -0
- galangal/commands/complete.py +367 -0
- galangal/commands/github.py +355 -0
- galangal/commands/init.py +177 -0
- galangal/commands/init_wizard.py +762 -0
- galangal/commands/list.py +20 -0
- galangal/commands/pause.py +34 -0
- galangal/commands/prompts.py +89 -0
- galangal/commands/reset.py +41 -0
- galangal/commands/resume.py +30 -0
- galangal/commands/skip.py +62 -0
- galangal/commands/start.py +530 -0
- galangal/commands/status.py +44 -0
- galangal/commands/switch.py +28 -0
- galangal/config/__init__.py +15 -0
- galangal/config/defaults.py +183 -0
- galangal/config/loader.py +163 -0
- galangal/config/schema.py +330 -0
- galangal/core/__init__.py +33 -0
- galangal/core/artifacts.py +136 -0
- galangal/core/state.py +1097 -0
- galangal/core/tasks.py +454 -0
- galangal/core/utils.py +116 -0
- galangal/core/workflow/__init__.py +68 -0
- galangal/core/workflow/core.py +789 -0
- galangal/core/workflow/engine.py +781 -0
- galangal/core/workflow/pause.py +35 -0
- galangal/core/workflow/tui_runner.py +1322 -0
- galangal/exceptions.py +36 -0
- galangal/github/__init__.py +31 -0
- galangal/github/client.py +427 -0
- galangal/github/images.py +324 -0
- galangal/github/issues.py +298 -0
- galangal/logging.py +364 -0
- galangal/prompts/__init__.py +5 -0
- galangal/prompts/builder.py +527 -0
- galangal/prompts/defaults/benchmark.md +34 -0
- galangal/prompts/defaults/contract.md +35 -0
- galangal/prompts/defaults/design.md +54 -0
- galangal/prompts/defaults/dev.md +89 -0
- galangal/prompts/defaults/docs.md +104 -0
- galangal/prompts/defaults/migration.md +59 -0
- galangal/prompts/defaults/pm.md +110 -0
- galangal/prompts/defaults/pm_questions.md +53 -0
- galangal/prompts/defaults/preflight.md +32 -0
- galangal/prompts/defaults/qa.md +65 -0
- galangal/prompts/defaults/review.md +90 -0
- galangal/prompts/defaults/review_codex.md +99 -0
- galangal/prompts/defaults/security.md +84 -0
- galangal/prompts/defaults/test.md +91 -0
- galangal/results.py +176 -0
- galangal/ui/__init__.py +5 -0
- galangal/ui/console.py +126 -0
- galangal/ui/tui/__init__.py +56 -0
- galangal/ui/tui/adapters.py +168 -0
- galangal/ui/tui/app.py +902 -0
- galangal/ui/tui/entry.py +24 -0
- galangal/ui/tui/mixins.py +196 -0
- galangal/ui/tui/modals.py +339 -0
- galangal/ui/tui/styles/app.tcss +86 -0
- galangal/ui/tui/styles/modals.tcss +197 -0
- galangal/ui/tui/types.py +107 -0
- galangal/ui/tui/widgets.py +263 -0
- galangal/validation/__init__.py +5 -0
- galangal/validation/runner.py +1072 -0
- galangal_orchestrate-0.13.0.dist-info/METADATA +985 -0
- galangal_orchestrate-0.13.0.dist-info/RECORD +79 -0
- galangal_orchestrate-0.13.0.dist-info/WHEEL +4 -0
- galangal_orchestrate-0.13.0.dist-info/entry_points.txt +2 -0
- galangal_orchestrate-0.13.0.dist-info/licenses/LICENSE +674 -0
galangal/core/state.py
ADDED
|
@@ -0,0 +1,1097 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workflow state management - Stage, TaskType, and WorkflowState.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from dataclasses import asdict, dataclass, field
|
|
7
|
+
from datetime import datetime, timedelta, timezone
|
|
8
|
+
from enum import Enum
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TaskType(str, Enum):
|
|
14
|
+
"""Type of task - determines which stages are required."""
|
|
15
|
+
|
|
16
|
+
FEATURE = "feature"
|
|
17
|
+
BUG_FIX = "bug_fix"
|
|
18
|
+
REFACTOR = "refactor"
|
|
19
|
+
CHORE = "chore"
|
|
20
|
+
DOCS = "docs"
|
|
21
|
+
HOTFIX = "hotfix"
|
|
22
|
+
|
|
23
|
+
@classmethod
|
|
24
|
+
def from_str(cls, value: str) -> "TaskType":
|
|
25
|
+
"""Convert string to TaskType, defaulting to FEATURE.
|
|
26
|
+
|
|
27
|
+
Handles multiple string formats:
|
|
28
|
+
- Enum values: "feature", "bug_fix", "refactor", etc.
|
|
29
|
+
- UI keys: "bugfix" (maps to BUG_FIX)
|
|
30
|
+
- GitHub hints: "bug_fix" from label inference
|
|
31
|
+
"""
|
|
32
|
+
normalized = value.lower().strip()
|
|
33
|
+
|
|
34
|
+
# Handle aliases that don't match enum values directly
|
|
35
|
+
aliases = {
|
|
36
|
+
"bugfix": cls.BUG_FIX,
|
|
37
|
+
"bug": cls.BUG_FIX,
|
|
38
|
+
"fix": cls.BUG_FIX,
|
|
39
|
+
"enhancement": cls.FEATURE,
|
|
40
|
+
"feat": cls.FEATURE,
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
if normalized in aliases:
|
|
44
|
+
return aliases[normalized]
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
return cls(normalized)
|
|
48
|
+
except ValueError:
|
|
49
|
+
return cls.FEATURE
|
|
50
|
+
|
|
51
|
+
def display_name(self) -> str:
|
|
52
|
+
"""Human-readable name for display."""
|
|
53
|
+
return {
|
|
54
|
+
TaskType.FEATURE: "Feature",
|
|
55
|
+
TaskType.BUG_FIX: "Bug Fix",
|
|
56
|
+
TaskType.REFACTOR: "Refactor",
|
|
57
|
+
TaskType.CHORE: "Chore",
|
|
58
|
+
TaskType.DOCS: "Docs",
|
|
59
|
+
TaskType.HOTFIX: "Hotfix",
|
|
60
|
+
}[self]
|
|
61
|
+
|
|
62
|
+
def short_description(self) -> str:
|
|
63
|
+
"""Brief description of what this task type is for."""
|
|
64
|
+
return {
|
|
65
|
+
TaskType.FEATURE: "New functionality",
|
|
66
|
+
TaskType.BUG_FIX: "Fix broken behavior",
|
|
67
|
+
TaskType.REFACTOR: "Restructure code",
|
|
68
|
+
TaskType.CHORE: "Dependencies, config, tooling",
|
|
69
|
+
TaskType.DOCS: "Documentation only",
|
|
70
|
+
TaskType.HOTFIX: "Critical fix",
|
|
71
|
+
}[self]
|
|
72
|
+
|
|
73
|
+
def description(self) -> str:
|
|
74
|
+
"""Full description with pipeline (derived from TASK_TYPE_SKIP_STAGES)."""
|
|
75
|
+
pipeline = get_task_type_pipeline(self)
|
|
76
|
+
if self == TaskType.FEATURE:
|
|
77
|
+
return f"{self.short_description()} (full workflow)"
|
|
78
|
+
return f"{self.short_description()} ({pipeline})"
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@dataclass(frozen=True)
|
|
82
|
+
class StageMetadata:
|
|
83
|
+
"""
|
|
84
|
+
Rich metadata for a workflow stage.
|
|
85
|
+
|
|
86
|
+
Provides centralized information about each stage including:
|
|
87
|
+
- Display properties (name, description)
|
|
88
|
+
- Behavioral flags (conditional, requires approval, skippable)
|
|
89
|
+
- Artifact dependencies (produces, requires)
|
|
90
|
+
- Decision file configuration for validation
|
|
91
|
+
- Context artifacts for prompt building
|
|
92
|
+
|
|
93
|
+
This metadata is used by the TUI, validation, prompt builder, and workflow logic.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
display_name: str
|
|
97
|
+
description: str
|
|
98
|
+
is_conditional: bool = False
|
|
99
|
+
requires_approval: bool = False
|
|
100
|
+
is_skippable: bool = False
|
|
101
|
+
produces_artifacts: tuple[str, ...] = ()
|
|
102
|
+
skip_artifact: str | None = None # e.g., "MIGRATION_SKIP.md"
|
|
103
|
+
approval_artifact: str | None = (
|
|
104
|
+
None # e.g., "APPROVAL.md" - checked when requires_approval=True
|
|
105
|
+
)
|
|
106
|
+
# Decision file for validation (e.g., "SECURITY_DECISION")
|
|
107
|
+
decision_file: str | None = None
|
|
108
|
+
# Valid decision values and their outcomes: (value, success, message, rollback_to, is_fast_track)
|
|
109
|
+
decision_outcomes: tuple[tuple[str, bool, str, str | None, bool], ...] = ()
|
|
110
|
+
# Schema for read-only backend structured output parsing
|
|
111
|
+
# {
|
|
112
|
+
# "notes_file": "ARTIFACT.md",
|
|
113
|
+
# "notes_field": "json_field_name",
|
|
114
|
+
# "decision_file": "DECISION_FILE",
|
|
115
|
+
# "decision_field": "decision",
|
|
116
|
+
# "issues_field": "issues"
|
|
117
|
+
# }
|
|
118
|
+
artifact_schema: dict[str, str | None] | None = None
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class Stage(str, Enum):
|
|
122
|
+
"""Workflow stages."""
|
|
123
|
+
|
|
124
|
+
PM = "PM"
|
|
125
|
+
DESIGN = "DESIGN"
|
|
126
|
+
PREFLIGHT = "PREFLIGHT"
|
|
127
|
+
DEV = "DEV"
|
|
128
|
+
MIGRATION = "MIGRATION"
|
|
129
|
+
TEST = "TEST"
|
|
130
|
+
TEST_GATE = "TEST_GATE"
|
|
131
|
+
CONTRACT = "CONTRACT"
|
|
132
|
+
QA = "QA"
|
|
133
|
+
BENCHMARK = "BENCHMARK"
|
|
134
|
+
SECURITY = "SECURITY"
|
|
135
|
+
REVIEW = "REVIEW"
|
|
136
|
+
DOCS = "DOCS"
|
|
137
|
+
COMPLETE = "COMPLETE"
|
|
138
|
+
|
|
139
|
+
@classmethod
|
|
140
|
+
def from_str(cls, value: str) -> "Stage":
|
|
141
|
+
return cls(value.upper())
|
|
142
|
+
|
|
143
|
+
@property
|
|
144
|
+
def metadata(self) -> StageMetadata:
|
|
145
|
+
"""Get rich metadata for this stage."""
|
|
146
|
+
return STAGE_METADATA[self]
|
|
147
|
+
|
|
148
|
+
def is_conditional(self) -> bool:
|
|
149
|
+
"""Return True if this stage only runs when conditions are met."""
|
|
150
|
+
return self.metadata.is_conditional
|
|
151
|
+
|
|
152
|
+
def is_skippable(self) -> bool:
|
|
153
|
+
"""Return True if this stage can be manually skipped."""
|
|
154
|
+
return self.metadata.is_skippable
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
# Stage order - the canonical sequence
|
|
158
|
+
STAGE_ORDER = [
|
|
159
|
+
Stage.PM,
|
|
160
|
+
Stage.DESIGN,
|
|
161
|
+
Stage.PREFLIGHT,
|
|
162
|
+
Stage.DEV,
|
|
163
|
+
Stage.MIGRATION,
|
|
164
|
+
Stage.TEST,
|
|
165
|
+
Stage.TEST_GATE,
|
|
166
|
+
Stage.CONTRACT,
|
|
167
|
+
Stage.QA,
|
|
168
|
+
Stage.BENCHMARK,
|
|
169
|
+
Stage.SECURITY,
|
|
170
|
+
Stage.REVIEW,
|
|
171
|
+
Stage.DOCS,
|
|
172
|
+
Stage.COMPLETE,
|
|
173
|
+
]
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
# Rich metadata for each stage
|
|
177
|
+
STAGE_METADATA: dict[Stage, StageMetadata] = {
|
|
178
|
+
Stage.PM: StageMetadata(
|
|
179
|
+
display_name="PM",
|
|
180
|
+
description="Define requirements and generate spec",
|
|
181
|
+
requires_approval=True,
|
|
182
|
+
approval_artifact="APPROVAL.md",
|
|
183
|
+
produces_artifacts=("SPEC.md", "PLAN.md", "DISCOVERY_LOG.md"),
|
|
184
|
+
),
|
|
185
|
+
Stage.DESIGN: StageMetadata(
|
|
186
|
+
display_name="Design",
|
|
187
|
+
description="Create implementation plan and architecture",
|
|
188
|
+
is_skippable=True,
|
|
189
|
+
produces_artifacts=("DESIGN.md",),
|
|
190
|
+
skip_artifact="DESIGN_SKIP.md",
|
|
191
|
+
),
|
|
192
|
+
Stage.PREFLIGHT: StageMetadata(
|
|
193
|
+
display_name="Preflight",
|
|
194
|
+
description="Verify environment and dependencies",
|
|
195
|
+
produces_artifacts=("PREFLIGHT_REPORT.md",),
|
|
196
|
+
),
|
|
197
|
+
Stage.DEV: StageMetadata(
|
|
198
|
+
display_name="Development",
|
|
199
|
+
description="Implement the feature or fix",
|
|
200
|
+
produces_artifacts=("DEVELOPMENT.md",),
|
|
201
|
+
),
|
|
202
|
+
Stage.MIGRATION: StageMetadata(
|
|
203
|
+
display_name="Migration",
|
|
204
|
+
description="Database and data migrations",
|
|
205
|
+
is_conditional=True,
|
|
206
|
+
is_skippable=True,
|
|
207
|
+
produces_artifacts=("MIGRATION_REPORT.md",),
|
|
208
|
+
skip_artifact="MIGRATION_SKIP.md",
|
|
209
|
+
),
|
|
210
|
+
Stage.TEST: StageMetadata(
|
|
211
|
+
display_name="Test",
|
|
212
|
+
description="Write and run tests",
|
|
213
|
+
produces_artifacts=("TEST_PLAN.md",),
|
|
214
|
+
decision_file="TEST_DECISION",
|
|
215
|
+
decision_outcomes=(
|
|
216
|
+
("PASS", True, "Tests passed", None, False),
|
|
217
|
+
(
|
|
218
|
+
"FAIL",
|
|
219
|
+
False,
|
|
220
|
+
"Tests failed due to implementation issues - needs DEV fix",
|
|
221
|
+
"DEV",
|
|
222
|
+
False,
|
|
223
|
+
),
|
|
224
|
+
(
|
|
225
|
+
"BLOCKED",
|
|
226
|
+
False,
|
|
227
|
+
"Tests blocked by implementation issues - needs DEV fix",
|
|
228
|
+
"DEV",
|
|
229
|
+
False,
|
|
230
|
+
),
|
|
231
|
+
),
|
|
232
|
+
),
|
|
233
|
+
Stage.TEST_GATE: StageMetadata(
|
|
234
|
+
display_name="Test Gate",
|
|
235
|
+
description="Verify configured test suites pass",
|
|
236
|
+
is_conditional=True,
|
|
237
|
+
is_skippable=True,
|
|
238
|
+
produces_artifacts=("TEST_GATE_RESULTS.md",),
|
|
239
|
+
skip_artifact="TEST_GATE_SKIP.md",
|
|
240
|
+
decision_file="TEST_GATE_DECISION",
|
|
241
|
+
decision_outcomes=(
|
|
242
|
+
("PASS", True, "All configured tests passed", None, False),
|
|
243
|
+
(
|
|
244
|
+
"FAIL",
|
|
245
|
+
False,
|
|
246
|
+
"Test gate failed - tests did not pass",
|
|
247
|
+
"DEV",
|
|
248
|
+
False,
|
|
249
|
+
),
|
|
250
|
+
),
|
|
251
|
+
),
|
|
252
|
+
Stage.CONTRACT: StageMetadata(
|
|
253
|
+
display_name="Contract",
|
|
254
|
+
description="API contract testing",
|
|
255
|
+
is_conditional=True,
|
|
256
|
+
is_skippable=True,
|
|
257
|
+
produces_artifacts=("CONTRACT_REPORT.md",),
|
|
258
|
+
skip_artifact="CONTRACT_SKIP.md",
|
|
259
|
+
),
|
|
260
|
+
Stage.QA: StageMetadata(
|
|
261
|
+
display_name="QA",
|
|
262
|
+
description="Quality assurance review",
|
|
263
|
+
produces_artifacts=("QA_REPORT.md",),
|
|
264
|
+
decision_file="QA_DECISION",
|
|
265
|
+
decision_outcomes=(
|
|
266
|
+
("PASS", True, "QA passed", None, False),
|
|
267
|
+
("FAIL", False, "QA failed", "DEV", False),
|
|
268
|
+
),
|
|
269
|
+
artifact_schema={
|
|
270
|
+
"notes_file": "QA_REPORT.md",
|
|
271
|
+
"notes_field": "qa_report",
|
|
272
|
+
"decision_file": "QA_DECISION",
|
|
273
|
+
"decision_field": "decision",
|
|
274
|
+
"issues_field": "issues",
|
|
275
|
+
},
|
|
276
|
+
),
|
|
277
|
+
Stage.BENCHMARK: StageMetadata(
|
|
278
|
+
display_name="Benchmark",
|
|
279
|
+
description="Performance benchmarking",
|
|
280
|
+
is_conditional=True,
|
|
281
|
+
is_skippable=True,
|
|
282
|
+
produces_artifacts=("BENCHMARK_REPORT.md",),
|
|
283
|
+
skip_artifact="BENCHMARK_SKIP.md",
|
|
284
|
+
),
|
|
285
|
+
Stage.SECURITY: StageMetadata(
|
|
286
|
+
display_name="Security",
|
|
287
|
+
description="Security review and audit",
|
|
288
|
+
is_skippable=True,
|
|
289
|
+
produces_artifacts=("SECURITY_CHECKLIST.md",),
|
|
290
|
+
skip_artifact="SECURITY_SKIP.md",
|
|
291
|
+
decision_file="SECURITY_DECISION",
|
|
292
|
+
decision_outcomes=(
|
|
293
|
+
("APPROVED", True, "Security review approved", None, False),
|
|
294
|
+
("REJECTED", False, "Security review found blocking issues", "DEV", False),
|
|
295
|
+
("BLOCKED", False, "Security review found blocking issues", "DEV", False),
|
|
296
|
+
),
|
|
297
|
+
artifact_schema={
|
|
298
|
+
"notes_file": "SECURITY_CHECKLIST.md",
|
|
299
|
+
"notes_field": "security_checklist",
|
|
300
|
+
"decision_file": "SECURITY_DECISION",
|
|
301
|
+
"decision_field": "decision",
|
|
302
|
+
"issues_field": "issues",
|
|
303
|
+
},
|
|
304
|
+
),
|
|
305
|
+
Stage.REVIEW: StageMetadata(
|
|
306
|
+
display_name="Review",
|
|
307
|
+
description="Code review and final checks",
|
|
308
|
+
produces_artifacts=("REVIEW_NOTES.md",),
|
|
309
|
+
decision_file="REVIEW_DECISION",
|
|
310
|
+
decision_outcomes=(
|
|
311
|
+
("APPROVE", True, "Review approved", None, False),
|
|
312
|
+
("REQUEST_CHANGES", False, "Review requested changes", "DEV", False),
|
|
313
|
+
(
|
|
314
|
+
"REQUEST_MINOR_CHANGES",
|
|
315
|
+
False,
|
|
316
|
+
"Review requested minor changes (fast-track)",
|
|
317
|
+
"DEV",
|
|
318
|
+
True,
|
|
319
|
+
),
|
|
320
|
+
),
|
|
321
|
+
artifact_schema={
|
|
322
|
+
"notes_file": "REVIEW_NOTES.md",
|
|
323
|
+
"notes_field": "review_notes",
|
|
324
|
+
"decision_file": "REVIEW_DECISION",
|
|
325
|
+
"decision_field": "decision",
|
|
326
|
+
"issues_field": "issues",
|
|
327
|
+
},
|
|
328
|
+
),
|
|
329
|
+
Stage.DOCS: StageMetadata(
|
|
330
|
+
display_name="Docs",
|
|
331
|
+
description="Update documentation",
|
|
332
|
+
produces_artifacts=("DOCS_REPORT.md",),
|
|
333
|
+
),
|
|
334
|
+
Stage.COMPLETE: StageMetadata(
|
|
335
|
+
display_name="Complete",
|
|
336
|
+
description="Workflow completed",
|
|
337
|
+
),
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
# Stages that are always skipped for each task type
|
|
342
|
+
TASK_TYPE_SKIP_STAGES: dict[TaskType, set[Stage]] = {
|
|
343
|
+
# FEATURE: Full workflow - PM → DESIGN → PREFLIGHT → DEV → all validation stages
|
|
344
|
+
TaskType.FEATURE: set(),
|
|
345
|
+
# BUG_FIX: PM → DEV → TEST → QA (skip design, run QA for regression check)
|
|
346
|
+
TaskType.BUG_FIX: {
|
|
347
|
+
Stage.DESIGN,
|
|
348
|
+
Stage.MIGRATION,
|
|
349
|
+
Stage.CONTRACT,
|
|
350
|
+
Stage.BENCHMARK,
|
|
351
|
+
Stage.SECURITY,
|
|
352
|
+
Stage.REVIEW,
|
|
353
|
+
Stage.DOCS,
|
|
354
|
+
},
|
|
355
|
+
# REFACTOR: PM → DESIGN → DEV → TEST (code restructuring, needs design but not full validation)
|
|
356
|
+
TaskType.REFACTOR: {
|
|
357
|
+
Stage.MIGRATION,
|
|
358
|
+
Stage.CONTRACT,
|
|
359
|
+
Stage.QA,
|
|
360
|
+
Stage.BENCHMARK,
|
|
361
|
+
Stage.SECURITY,
|
|
362
|
+
Stage.REVIEW,
|
|
363
|
+
Stage.DOCS,
|
|
364
|
+
},
|
|
365
|
+
# CHORE: PM → DEV → TEST (dependencies, config, tooling - minimal workflow)
|
|
366
|
+
TaskType.CHORE: {
|
|
367
|
+
Stage.DESIGN,
|
|
368
|
+
Stage.MIGRATION,
|
|
369
|
+
Stage.CONTRACT,
|
|
370
|
+
Stage.QA,
|
|
371
|
+
Stage.BENCHMARK,
|
|
372
|
+
Stage.SECURITY,
|
|
373
|
+
Stage.REVIEW,
|
|
374
|
+
Stage.DOCS,
|
|
375
|
+
},
|
|
376
|
+
# DOCS: PM → DOCS (documentation only - skip everything else)
|
|
377
|
+
TaskType.DOCS: {
|
|
378
|
+
Stage.DESIGN,
|
|
379
|
+
Stage.PREFLIGHT,
|
|
380
|
+
Stage.DEV,
|
|
381
|
+
Stage.MIGRATION,
|
|
382
|
+
Stage.TEST,
|
|
383
|
+
Stage.TEST_GATE,
|
|
384
|
+
Stage.CONTRACT,
|
|
385
|
+
Stage.QA,
|
|
386
|
+
Stage.BENCHMARK,
|
|
387
|
+
Stage.SECURITY,
|
|
388
|
+
Stage.REVIEW,
|
|
389
|
+
},
|
|
390
|
+
# HOTFIX: PM → DEV → TEST (critical fix - expedited, minimal stages)
|
|
391
|
+
TaskType.HOTFIX: {
|
|
392
|
+
Stage.DESIGN,
|
|
393
|
+
Stage.PREFLIGHT,
|
|
394
|
+
Stage.MIGRATION,
|
|
395
|
+
Stage.CONTRACT,
|
|
396
|
+
Stage.QA,
|
|
397
|
+
Stage.BENCHMARK,
|
|
398
|
+
Stage.SECURITY,
|
|
399
|
+
Stage.REVIEW,
|
|
400
|
+
Stage.DOCS,
|
|
401
|
+
},
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
def should_skip_for_task_type(stage: Stage, task_type: TaskType) -> bool:
|
|
406
|
+
"""Check if a stage should be skipped based on task type."""
|
|
407
|
+
return stage in TASK_TYPE_SKIP_STAGES.get(task_type, set())
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
def get_task_type_pipeline(task_type: TaskType) -> str:
|
|
411
|
+
"""
|
|
412
|
+
Get the stage pipeline string for a task type.
|
|
413
|
+
|
|
414
|
+
Derives the pipeline from TASK_TYPE_SKIP_STAGES, ensuring it stays
|
|
415
|
+
in sync with the actual skip configuration.
|
|
416
|
+
|
|
417
|
+
Args:
|
|
418
|
+
task_type: The task type to get the pipeline for.
|
|
419
|
+
|
|
420
|
+
Returns:
|
|
421
|
+
Pipeline string like "PM → DEV → TEST → QA"
|
|
422
|
+
"""
|
|
423
|
+
skip_stages = TASK_TYPE_SKIP_STAGES.get(task_type, set())
|
|
424
|
+
stages = [s.value for s in STAGE_ORDER if s not in skip_stages and s != Stage.COMPLETE]
|
|
425
|
+
return " → ".join(stages)
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
def get_workflow_diagram() -> str:
|
|
429
|
+
"""
|
|
430
|
+
Get the full workflow pipeline diagram.
|
|
431
|
+
|
|
432
|
+
Returns:
|
|
433
|
+
Multi-line string showing the stage pipeline with conditional markers.
|
|
434
|
+
"""
|
|
435
|
+
# Split into two lines for readability
|
|
436
|
+
first_half = STAGE_ORDER[:6] # PM through MIGRATION
|
|
437
|
+
second_half = STAGE_ORDER[6:-1] # TEST through DOCS (exclude COMPLETE)
|
|
438
|
+
|
|
439
|
+
# Mark conditional stages with *
|
|
440
|
+
def format_stage(s: Stage) -> str:
|
|
441
|
+
meta = STAGE_METADATA.get(s)
|
|
442
|
+
marker = "*" if meta and meta.is_conditional else ""
|
|
443
|
+
return f"{s.value}{marker}"
|
|
444
|
+
|
|
445
|
+
line1 = " → ".join(format_stage(s) for s in first_half)
|
|
446
|
+
line2 = " → ".join(format_stage(s) for s in second_half) + " → COMPLETE"
|
|
447
|
+
|
|
448
|
+
return f"{line1} →\n {line2}"
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
def get_conditional_stages() -> dict[Stage, str]:
|
|
452
|
+
"""
|
|
453
|
+
Get mapping of conditional stages to their skip artifact names.
|
|
454
|
+
|
|
455
|
+
Returns:
|
|
456
|
+
Dict mapping Stage -> skip artifact filename (e.g., "MIGRATION_SKIP.md")
|
|
457
|
+
"""
|
|
458
|
+
return {
|
|
459
|
+
stage: metadata.skip_artifact
|
|
460
|
+
for stage, metadata in STAGE_METADATA.items()
|
|
461
|
+
if metadata.is_conditional and metadata.skip_artifact
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
def get_all_artifact_names() -> list[str]:
|
|
466
|
+
"""
|
|
467
|
+
Get all artifact names for status display.
|
|
468
|
+
|
|
469
|
+
Derives the complete list from STAGE_METADATA to ensure it stays in sync.
|
|
470
|
+
Includes: produces, skip, approval, and decision artifacts.
|
|
471
|
+
|
|
472
|
+
Returns:
|
|
473
|
+
Sorted list of all artifact names.
|
|
474
|
+
"""
|
|
475
|
+
artifacts: set[str] = set()
|
|
476
|
+
|
|
477
|
+
for metadata in STAGE_METADATA.values():
|
|
478
|
+
# Add produced artifacts
|
|
479
|
+
artifacts.update(metadata.produces_artifacts)
|
|
480
|
+
|
|
481
|
+
# Add skip artifact if defined
|
|
482
|
+
if metadata.skip_artifact:
|
|
483
|
+
artifacts.add(metadata.skip_artifact)
|
|
484
|
+
|
|
485
|
+
# Add approval artifact if defined
|
|
486
|
+
if metadata.approval_artifact:
|
|
487
|
+
artifacts.add(metadata.approval_artifact)
|
|
488
|
+
|
|
489
|
+
# Add decision file if defined (no .md extension)
|
|
490
|
+
if metadata.decision_file:
|
|
491
|
+
artifacts.add(metadata.decision_file)
|
|
492
|
+
|
|
493
|
+
# Add system-generated artifacts not tied to specific stages
|
|
494
|
+
artifacts.add("ROLLBACK.md")
|
|
495
|
+
artifacts.add("TEST_SUMMARY.md")
|
|
496
|
+
artifacts.add("VALIDATION_REPORT.md")
|
|
497
|
+
artifacts.add("STAGE_PLAN.md")
|
|
498
|
+
|
|
499
|
+
return sorted(artifacts)
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
def get_decision_config(stage: Stage) -> dict[str, tuple[bool, str, str | None, bool]] | None:
|
|
503
|
+
"""
|
|
504
|
+
Get decision file configuration for a stage.
|
|
505
|
+
|
|
506
|
+
Returns a dict mapping decision values to their outcomes:
|
|
507
|
+
{value: (success, message, rollback_to, is_fast_track)}
|
|
508
|
+
|
|
509
|
+
Args:
|
|
510
|
+
stage: The stage to get decision config for.
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
Decision config dict or None if stage has no decision file.
|
|
514
|
+
"""
|
|
515
|
+
metadata = stage.metadata
|
|
516
|
+
if not metadata.decision_file or not metadata.decision_outcomes:
|
|
517
|
+
return None
|
|
518
|
+
|
|
519
|
+
return {
|
|
520
|
+
value: (success, message, rollback_to, is_fast_track)
|
|
521
|
+
for value, success, message, rollback_to, is_fast_track in metadata.decision_outcomes
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
def get_decision_file_name(stage: Stage) -> str | None:
|
|
526
|
+
"""
|
|
527
|
+
Get the decision file name for a stage.
|
|
528
|
+
|
|
529
|
+
Args:
|
|
530
|
+
stage: The stage to get the decision file name for.
|
|
531
|
+
|
|
532
|
+
Returns:
|
|
533
|
+
Decision file name (e.g., "QA_DECISION") or None if stage has no decision file.
|
|
534
|
+
"""
|
|
535
|
+
metadata = stage.metadata
|
|
536
|
+
return metadata.decision_file if metadata else None
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
def get_decision_values(stage: Stage) -> list[str]:
|
|
540
|
+
"""
|
|
541
|
+
Get the valid decision values for a stage.
|
|
542
|
+
|
|
543
|
+
Args:
|
|
544
|
+
stage: The stage to get decision values for.
|
|
545
|
+
|
|
546
|
+
Returns:
|
|
547
|
+
List of valid decision values (e.g., ["PASS", "FAIL"]), empty if no decision file.
|
|
548
|
+
"""
|
|
549
|
+
metadata = stage.metadata
|
|
550
|
+
if not metadata or not metadata.decision_outcomes:
|
|
551
|
+
return []
|
|
552
|
+
return [value for value, *_ in metadata.decision_outcomes]
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
def get_decision_words(stage: Stage) -> tuple[str | None, str | None]:
|
|
556
|
+
"""
|
|
557
|
+
Get the approve and reject decision words for a stage.
|
|
558
|
+
|
|
559
|
+
Derives the words from STAGE_METADATA.decision_outcomes:
|
|
560
|
+
- approve_word: First outcome where success=True
|
|
561
|
+
- reject_word: First outcome where success=False
|
|
562
|
+
|
|
563
|
+
Args:
|
|
564
|
+
stage: The stage to get decision words for.
|
|
565
|
+
|
|
566
|
+
Returns:
|
|
567
|
+
(approve_word, reject_word) tuple, or (None, None) if stage has no decision file.
|
|
568
|
+
"""
|
|
569
|
+
metadata = stage.metadata
|
|
570
|
+
if not metadata or not metadata.decision_outcomes:
|
|
571
|
+
return (None, None)
|
|
572
|
+
|
|
573
|
+
approve_word = next(
|
|
574
|
+
(value for value, success, *_ in metadata.decision_outcomes if success), None
|
|
575
|
+
)
|
|
576
|
+
reject_word = next(
|
|
577
|
+
(value for value, success, *_ in metadata.decision_outcomes if not success), None
|
|
578
|
+
)
|
|
579
|
+
return (approve_word, reject_word)
|
|
580
|
+
|
|
581
|
+
|
|
582
|
+
def get_decision_info_for_prompt(stage: Stage) -> str | None:
|
|
583
|
+
"""
|
|
584
|
+
Get formatted decision file info for prompt injection.
|
|
585
|
+
|
|
586
|
+
Returns a markdown snippet describing the decision file and valid values,
|
|
587
|
+
suitable for injection into stage prompts.
|
|
588
|
+
|
|
589
|
+
Args:
|
|
590
|
+
stage: The stage to get decision info for.
|
|
591
|
+
|
|
592
|
+
Returns:
|
|
593
|
+
Markdown-formatted decision file instructions, or None if no decision file.
|
|
594
|
+
"""
|
|
595
|
+
metadata = stage.metadata
|
|
596
|
+
if not metadata or not metadata.decision_file or not metadata.decision_outcomes:
|
|
597
|
+
return None
|
|
598
|
+
|
|
599
|
+
decision_file = metadata.decision_file
|
|
600
|
+
values = [value for value, *_ in metadata.decision_outcomes]
|
|
601
|
+
|
|
602
|
+
# Build the prompt snippet
|
|
603
|
+
lines = [
|
|
604
|
+
"## CRITICAL: Decision File",
|
|
605
|
+
"",
|
|
606
|
+
"After completing this stage, you MUST create a decision file:",
|
|
607
|
+
"",
|
|
608
|
+
f"**File:** `{decision_file}` (no extension)",
|
|
609
|
+
f"**Contents:** Exactly one of: `{'`, `'.join(values)}`",
|
|
610
|
+
"",
|
|
611
|
+
"Example:",
|
|
612
|
+
"```",
|
|
613
|
+
values[0], # Show first value as example
|
|
614
|
+
"```",
|
|
615
|
+
"",
|
|
616
|
+
"This file must contain ONLY the decision word, nothing else.",
|
|
617
|
+
"The validation system reads this file to determine the stage result.",
|
|
618
|
+
]
|
|
619
|
+
|
|
620
|
+
return "\n".join(lines)
|
|
621
|
+
|
|
622
|
+
|
|
623
|
+
def parse_stage_arg(
|
|
624
|
+
stage_arg: str,
|
|
625
|
+
exclude_complete: bool = False,
|
|
626
|
+
) -> Stage | None:
|
|
627
|
+
"""Parse a stage argument string and return the Stage enum.
|
|
628
|
+
|
|
629
|
+
Handles invalid stage errors with consistent messaging.
|
|
630
|
+
|
|
631
|
+
Args:
|
|
632
|
+
stage_arg: The stage argument from CLI (e.g., "pm", "DEV")
|
|
633
|
+
exclude_complete: If True, COMPLETE is not allowed and excluded from valid list
|
|
634
|
+
|
|
635
|
+
Returns:
|
|
636
|
+
Stage enum if valid, None if invalid (error already printed)
|
|
637
|
+
"""
|
|
638
|
+
from galangal.ui.console import console, print_error
|
|
639
|
+
|
|
640
|
+
stage_str = stage_arg.upper()
|
|
641
|
+
try:
|
|
642
|
+
stage = Stage.from_str(stage_str)
|
|
643
|
+
except ValueError:
|
|
644
|
+
print_error(f"Invalid stage: '{stage_arg}'")
|
|
645
|
+
valid = [s.value.lower() for s in Stage if not (exclude_complete and s == Stage.COMPLETE)]
|
|
646
|
+
console.print(f"[dim]Valid stages: {', '.join(valid)}[/dim]")
|
|
647
|
+
return None
|
|
648
|
+
|
|
649
|
+
if exclude_complete and stage == Stage.COMPLETE:
|
|
650
|
+
print_error("COMPLETE stage is not allowed here.")
|
|
651
|
+
return None
|
|
652
|
+
|
|
653
|
+
return stage
|
|
654
|
+
|
|
655
|
+
|
|
656
|
+
def get_hidden_stages_for_task_type(
|
|
657
|
+
task_type: TaskType, config_skip: list[str] | None = None
|
|
658
|
+
) -> set[str]:
|
|
659
|
+
"""Get stages to hide from progress bar based on task type and config.
|
|
660
|
+
|
|
661
|
+
Args:
|
|
662
|
+
task_type: The type of task being executed
|
|
663
|
+
config_skip: List of stage names from config.stages.skip
|
|
664
|
+
|
|
665
|
+
Returns:
|
|
666
|
+
Set of stage name strings that should be hidden from the progress bar
|
|
667
|
+
"""
|
|
668
|
+
hidden = set()
|
|
669
|
+
|
|
670
|
+
# Add task type skips
|
|
671
|
+
for stage in TASK_TYPE_SKIP_STAGES.get(task_type, set()):
|
|
672
|
+
hidden.add(stage.value)
|
|
673
|
+
|
|
674
|
+
# Add config skips
|
|
675
|
+
if config_skip:
|
|
676
|
+
for stage_name in config_skip:
|
|
677
|
+
hidden.add(stage_name.upper())
|
|
678
|
+
|
|
679
|
+
return hidden
|
|
680
|
+
|
|
681
|
+
|
|
682
|
+
# Maximum rollbacks to the same stage within the time window
|
|
683
|
+
MAX_ROLLBACKS_PER_STAGE = 3
|
|
684
|
+
ROLLBACK_TIME_WINDOW_HOURS = 1
|
|
685
|
+
|
|
686
|
+
|
|
687
|
+
@dataclass
|
|
688
|
+
class RollbackEvent:
|
|
689
|
+
"""
|
|
690
|
+
Record of a rollback event in the workflow.
|
|
691
|
+
|
|
692
|
+
Tracks when a stage failed validation and triggered a rollback
|
|
693
|
+
to an earlier stage. Used to detect rollback loops and prevent
|
|
694
|
+
infinite retry cycles.
|
|
695
|
+
|
|
696
|
+
Attributes:
|
|
697
|
+
timestamp: When the rollback occurred (ISO format string).
|
|
698
|
+
from_stage: Stage that failed and triggered the rollback.
|
|
699
|
+
to_stage: Target stage to roll back to.
|
|
700
|
+
reason: Description of why the rollback was needed.
|
|
701
|
+
"""
|
|
702
|
+
|
|
703
|
+
timestamp: str
|
|
704
|
+
from_stage: str
|
|
705
|
+
to_stage: str
|
|
706
|
+
reason: str
|
|
707
|
+
|
|
708
|
+
@classmethod
|
|
709
|
+
def create(cls, from_stage: "Stage", to_stage: "Stage", reason: str) -> "RollbackEvent":
|
|
710
|
+
"""Create a new rollback event with current timestamp."""
|
|
711
|
+
return cls(
|
|
712
|
+
timestamp=datetime.now(timezone.utc).isoformat(),
|
|
713
|
+
from_stage=from_stage.value,
|
|
714
|
+
to_stage=to_stage.value,
|
|
715
|
+
reason=reason,
|
|
716
|
+
)
|
|
717
|
+
|
|
718
|
+
def to_dict(self) -> dict[str, str]:
|
|
719
|
+
"""Convert to dictionary for JSON serialization."""
|
|
720
|
+
return asdict(self)
|
|
721
|
+
|
|
722
|
+
@classmethod
|
|
723
|
+
def from_dict(cls, d: dict[str, str]) -> "RollbackEvent":
|
|
724
|
+
"""Create from dictionary."""
|
|
725
|
+
return cls(
|
|
726
|
+
timestamp=d["timestamp"],
|
|
727
|
+
from_stage=d["from_stage"],
|
|
728
|
+
to_stage=d["to_stage"],
|
|
729
|
+
reason=d["reason"],
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
|
|
733
|
+
@dataclass
|
|
734
|
+
class WorkflowState:
|
|
735
|
+
"""Persistent workflow state for a task."""
|
|
736
|
+
|
|
737
|
+
stage: Stage
|
|
738
|
+
attempt: int
|
|
739
|
+
awaiting_approval: bool
|
|
740
|
+
clarification_required: bool
|
|
741
|
+
last_failure: str | None
|
|
742
|
+
started_at: str
|
|
743
|
+
task_description: str
|
|
744
|
+
task_name: str
|
|
745
|
+
task_type: TaskType = TaskType.FEATURE
|
|
746
|
+
rollback_history: list[RollbackEvent] = field(default_factory=list)
|
|
747
|
+
|
|
748
|
+
# PM Discovery Q&A tracking
|
|
749
|
+
qa_rounds: list[dict[str, Any]] | None = None # [{"questions": [...], "answers": [...]}]
|
|
750
|
+
qa_complete: bool = False
|
|
751
|
+
|
|
752
|
+
# PM-driven stage planning
|
|
753
|
+
# Maps stage name to {"action": "skip"|"run", "reason": "..."}
|
|
754
|
+
stage_plan: dict[str, dict[str, Any]] | None = None
|
|
755
|
+
|
|
756
|
+
# Stage timing tracking
|
|
757
|
+
stage_start_time: str | None = None # ISO timestamp when current stage started
|
|
758
|
+
stage_durations: dict[str, int] | None = None # Completed stage durations in seconds
|
|
759
|
+
|
|
760
|
+
# GitHub integration
|
|
761
|
+
github_issue: int | None = None # Issue number if created from GitHub
|
|
762
|
+
github_repo: str | None = None # owner/repo for PR creation
|
|
763
|
+
screenshots: list[str] | None = None # Local paths to screenshots from issue
|
|
764
|
+
|
|
765
|
+
# Fast-track rollback support
|
|
766
|
+
# Stages that passed since last DEV run (cleared when entering DEV)
|
|
767
|
+
passed_stages: set[str] = field(default_factory=set)
|
|
768
|
+
# Stages to skip on this iteration (set from passed_stages on minor rollback)
|
|
769
|
+
fast_track_skip: set[str] = field(default_factory=set)
|
|
770
|
+
|
|
771
|
+
# -------------------------------------------------------------------------
|
|
772
|
+
# Retry management methods
|
|
773
|
+
# -------------------------------------------------------------------------
|
|
774
|
+
|
|
775
|
+
def record_failure(self, error: str, max_length: int = 4000) -> None:
|
|
776
|
+
"""
|
|
777
|
+
Record a failed attempt.
|
|
778
|
+
|
|
779
|
+
Increments the attempt counter and stores a truncated error message
|
|
780
|
+
for context in the next retry. Full output is preserved in logs/.
|
|
781
|
+
|
|
782
|
+
Args:
|
|
783
|
+
error: Error message from the failed attempt.
|
|
784
|
+
max_length: Maximum characters to store (default 4000). Prevents
|
|
785
|
+
prompt size from exceeding shell argument limits (~128KB).
|
|
786
|
+
"""
|
|
787
|
+
self.attempt += 1
|
|
788
|
+
if len(error) > max_length:
|
|
789
|
+
self.last_failure = (
|
|
790
|
+
error[:max_length] + "\n\n[... truncated, see logs/ for full output]"
|
|
791
|
+
)
|
|
792
|
+
else:
|
|
793
|
+
self.last_failure = error
|
|
794
|
+
|
|
795
|
+
def can_retry(self, max_retries: int) -> bool:
|
|
796
|
+
"""
|
|
797
|
+
Check if another retry attempt is allowed.
|
|
798
|
+
|
|
799
|
+
Args:
|
|
800
|
+
max_retries: Maximum number of attempts allowed.
|
|
801
|
+
|
|
802
|
+
Returns:
|
|
803
|
+
True if attempt <= max_retries, False if exhausted.
|
|
804
|
+
"""
|
|
805
|
+
return self.attempt <= max_retries
|
|
806
|
+
|
|
807
|
+
def reset_attempts(self, clear_failure: bool = True) -> None:
|
|
808
|
+
"""
|
|
809
|
+
Reset attempt counter for a new stage or after user intervention.
|
|
810
|
+
|
|
811
|
+
Called when:
|
|
812
|
+
- Advancing to a new stage (clear_failure=True)
|
|
813
|
+
- User chooses to retry after max attempts (clear_failure=True)
|
|
814
|
+
- Rolling back to an earlier stage (clear_failure=False to preserve context)
|
|
815
|
+
|
|
816
|
+
Args:
|
|
817
|
+
clear_failure: If True, also clears last_failure. Set to False
|
|
818
|
+
when rolling back to preserve feedback context for the next attempt.
|
|
819
|
+
"""
|
|
820
|
+
self.attempt = 1
|
|
821
|
+
if clear_failure:
|
|
822
|
+
self.last_failure = None
|
|
823
|
+
|
|
824
|
+
# -------------------------------------------------------------------------
|
|
825
|
+
# Stage timing methods
|
|
826
|
+
# -------------------------------------------------------------------------
|
|
827
|
+
|
|
828
|
+
def start_stage_timer(self) -> None:
|
|
829
|
+
"""
|
|
830
|
+
Start timing for the current stage.
|
|
831
|
+
|
|
832
|
+
Records the current timestamp in ISO format. Called when a stage
|
|
833
|
+
begins execution.
|
|
834
|
+
"""
|
|
835
|
+
self.stage_start_time = datetime.now(timezone.utc).isoformat()
|
|
836
|
+
|
|
837
|
+
def record_stage_duration(self) -> int | None:
|
|
838
|
+
"""
|
|
839
|
+
Record the duration of the current stage.
|
|
840
|
+
|
|
841
|
+
Calculates elapsed time from stage_start_time and stores it in
|
|
842
|
+
stage_durations dict. Returns the duration in seconds.
|
|
843
|
+
|
|
844
|
+
Returns:
|
|
845
|
+
Duration in seconds, or None if no start time was recorded.
|
|
846
|
+
"""
|
|
847
|
+
if not self.stage_start_time:
|
|
848
|
+
return None
|
|
849
|
+
|
|
850
|
+
try:
|
|
851
|
+
start = datetime.fromisoformat(self.stage_start_time)
|
|
852
|
+
elapsed = int((datetime.now(timezone.utc) - start).total_seconds())
|
|
853
|
+
|
|
854
|
+
if self.stage_durations is None:
|
|
855
|
+
self.stage_durations = {}
|
|
856
|
+
|
|
857
|
+
self.stage_durations[self.stage.value] = elapsed
|
|
858
|
+
self.stage_start_time = None # Clear for next stage
|
|
859
|
+
return elapsed
|
|
860
|
+
except (ValueError, TypeError):
|
|
861
|
+
return None
|
|
862
|
+
|
|
863
|
+
def get_stage_duration(self, stage: "Stage") -> int | None:
|
|
864
|
+
"""
|
|
865
|
+
Get the recorded duration for a stage.
|
|
866
|
+
|
|
867
|
+
Args:
|
|
868
|
+
stage: The stage to get duration for.
|
|
869
|
+
|
|
870
|
+
Returns:
|
|
871
|
+
Duration in seconds, or None if not recorded.
|
|
872
|
+
"""
|
|
873
|
+
if self.stage_durations is None:
|
|
874
|
+
return None
|
|
875
|
+
return self.stage_durations.get(stage.value)
|
|
876
|
+
|
|
877
|
+
# -------------------------------------------------------------------------
|
|
878
|
+
# Rollback management methods
|
|
879
|
+
# -------------------------------------------------------------------------
|
|
880
|
+
|
|
881
|
+
def record_rollback(self, from_stage: Stage, to_stage: Stage, reason: str) -> None:
|
|
882
|
+
"""
|
|
883
|
+
Record a rollback event in the history.
|
|
884
|
+
|
|
885
|
+
Called when validation fails and triggers a rollback to an earlier stage.
|
|
886
|
+
The history is used to detect rollback loops and prevent infinite retries.
|
|
887
|
+
|
|
888
|
+
Args:
|
|
889
|
+
from_stage: Stage that failed and triggered the rollback.
|
|
890
|
+
to_stage: Target stage to roll back to.
|
|
891
|
+
reason: Description of why the rollback was needed.
|
|
892
|
+
"""
|
|
893
|
+
event = RollbackEvent.create(from_stage, to_stage, reason)
|
|
894
|
+
self.rollback_history.append(event)
|
|
895
|
+
|
|
896
|
+
def should_allow_rollback(self, target_stage: Stage) -> bool:
|
|
897
|
+
"""
|
|
898
|
+
Check if a rollback to the target stage is allowed.
|
|
899
|
+
|
|
900
|
+
Prevents infinite rollback loops by limiting the number of rollbacks
|
|
901
|
+
to the same stage within a time window.
|
|
902
|
+
|
|
903
|
+
Args:
|
|
904
|
+
target_stage: Stage to potentially roll back to.
|
|
905
|
+
|
|
906
|
+
Returns:
|
|
907
|
+
True if rollback is allowed, False if too many recent rollbacks.
|
|
908
|
+
"""
|
|
909
|
+
cutoff = datetime.now(timezone.utc) - timedelta(hours=ROLLBACK_TIME_WINDOW_HOURS)
|
|
910
|
+
cutoff_str = cutoff.isoformat()
|
|
911
|
+
|
|
912
|
+
recent_rollbacks = [
|
|
913
|
+
r
|
|
914
|
+
for r in self.rollback_history
|
|
915
|
+
if r.to_stage == target_stage.value and r.timestamp > cutoff_str
|
|
916
|
+
]
|
|
917
|
+
|
|
918
|
+
return len(recent_rollbacks) < MAX_ROLLBACKS_PER_STAGE
|
|
919
|
+
|
|
920
|
+
def get_rollback_count(self, target_stage: Stage) -> int:
|
|
921
|
+
"""
|
|
922
|
+
Get the number of recent rollbacks to a stage.
|
|
923
|
+
|
|
924
|
+
Args:
|
|
925
|
+
target_stage: Stage to count rollbacks for.
|
|
926
|
+
|
|
927
|
+
Returns:
|
|
928
|
+
Number of rollbacks to this stage in the time window.
|
|
929
|
+
"""
|
|
930
|
+
cutoff = datetime.now(timezone.utc) - timedelta(hours=ROLLBACK_TIME_WINDOW_HOURS)
|
|
931
|
+
cutoff_str = cutoff.isoformat()
|
|
932
|
+
|
|
933
|
+
return len(
|
|
934
|
+
[
|
|
935
|
+
r
|
|
936
|
+
for r in self.rollback_history
|
|
937
|
+
if r.to_stage == target_stage.value and r.timestamp > cutoff_str
|
|
938
|
+
]
|
|
939
|
+
)
|
|
940
|
+
|
|
941
|
+
# -------------------------------------------------------------------------
|
|
942
|
+
# Fast-track rollback methods
|
|
943
|
+
# -------------------------------------------------------------------------
|
|
944
|
+
|
|
945
|
+
def record_passed_stage(self, stage: Stage) -> None:
|
|
946
|
+
"""
|
|
947
|
+
Record that a stage has passed in the current iteration.
|
|
948
|
+
|
|
949
|
+
Called when a stage completes successfully. Used to track which
|
|
950
|
+
stages can be skipped on a minor rollback.
|
|
951
|
+
|
|
952
|
+
Args:
|
|
953
|
+
stage: The stage that passed.
|
|
954
|
+
"""
|
|
955
|
+
self.passed_stages.add(stage.value)
|
|
956
|
+
|
|
957
|
+
def clear_passed_stages(self) -> None:
|
|
958
|
+
"""
|
|
959
|
+
Clear the passed stages tracking.
|
|
960
|
+
|
|
961
|
+
Called when entering DEV stage to start fresh tracking,
|
|
962
|
+
or on a full rollback (REQUEST_CHANGES).
|
|
963
|
+
"""
|
|
964
|
+
self.passed_stages = set()
|
|
965
|
+
|
|
966
|
+
def setup_fast_track(self) -> None:
|
|
967
|
+
"""
|
|
968
|
+
Setup fast-track skipping from passed stages.
|
|
969
|
+
|
|
970
|
+
Called on a minor rollback (REQUEST_MINOR_CHANGES). Copies
|
|
971
|
+
passed_stages to fast_track_skip so those stages will be
|
|
972
|
+
skipped on the re-run.
|
|
973
|
+
"""
|
|
974
|
+
self.fast_track_skip = self.passed_stages.copy()
|
|
975
|
+
# Don't clear passed_stages - we'll clear it when entering DEV
|
|
976
|
+
|
|
977
|
+
def clear_fast_track(self) -> None:
|
|
978
|
+
"""
|
|
979
|
+
Clear fast-track skipping.
|
|
980
|
+
|
|
981
|
+
Called when workflow completes or on a full rollback.
|
|
982
|
+
"""
|
|
983
|
+
self.fast_track_skip = set()
|
|
984
|
+
|
|
985
|
+
def should_fast_track_skip(self, stage: Stage) -> bool:
|
|
986
|
+
"""
|
|
987
|
+
Check if a stage should be skipped due to fast-track.
|
|
988
|
+
|
|
989
|
+
Args:
|
|
990
|
+
stage: The stage to check.
|
|
991
|
+
|
|
992
|
+
Returns:
|
|
993
|
+
True if the stage is in fast_track_skip set.
|
|
994
|
+
"""
|
|
995
|
+
return stage.value in self.fast_track_skip
|
|
996
|
+
|
|
997
|
+
def to_dict(self) -> dict[str, Any]:
|
|
998
|
+
d = asdict(self)
|
|
999
|
+
d["stage"] = self.stage.value
|
|
1000
|
+
d["task_type"] = self.task_type.value
|
|
1001
|
+
# rollback_history is already converted to list of dicts by asdict
|
|
1002
|
+
# Convert sets to lists for JSON serialization
|
|
1003
|
+
d["passed_stages"] = list(self.passed_stages)
|
|
1004
|
+
d["fast_track_skip"] = list(self.fast_track_skip)
|
|
1005
|
+
return d
|
|
1006
|
+
|
|
1007
|
+
@classmethod
|
|
1008
|
+
def from_dict(cls, d: dict[str, Any]) -> "WorkflowState":
|
|
1009
|
+
# Parse rollback history if present
|
|
1010
|
+
rollback_history = [RollbackEvent.from_dict(r) for r in d.get("rollback_history", [])]
|
|
1011
|
+
|
|
1012
|
+
return cls(
|
|
1013
|
+
stage=Stage.from_str(d["stage"]),
|
|
1014
|
+
attempt=d.get("attempt", 1),
|
|
1015
|
+
awaiting_approval=d.get("awaiting_approval", False),
|
|
1016
|
+
clarification_required=d.get("clarification_required", False),
|
|
1017
|
+
last_failure=d.get("last_failure"),
|
|
1018
|
+
started_at=d.get("started_at", datetime.now(timezone.utc).isoformat()),
|
|
1019
|
+
task_description=d.get("task_description", ""),
|
|
1020
|
+
task_name=d.get("task_name", ""),
|
|
1021
|
+
task_type=TaskType.from_str(d.get("task_type", "feature")),
|
|
1022
|
+
rollback_history=rollback_history,
|
|
1023
|
+
qa_rounds=d.get("qa_rounds"),
|
|
1024
|
+
qa_complete=d.get("qa_complete", False),
|
|
1025
|
+
stage_plan=d.get("stage_plan"),
|
|
1026
|
+
stage_start_time=d.get("stage_start_time"),
|
|
1027
|
+
stage_durations=d.get("stage_durations"),
|
|
1028
|
+
github_issue=d.get("github_issue"),
|
|
1029
|
+
github_repo=d.get("github_repo"),
|
|
1030
|
+
screenshots=d.get("screenshots"),
|
|
1031
|
+
passed_stages=set(d.get("passed_stages", [])),
|
|
1032
|
+
fast_track_skip=set(d.get("fast_track_skip", [])),
|
|
1033
|
+
)
|
|
1034
|
+
|
|
1035
|
+
@classmethod
|
|
1036
|
+
def new(
|
|
1037
|
+
cls,
|
|
1038
|
+
description: str,
|
|
1039
|
+
task_name: str,
|
|
1040
|
+
task_type: TaskType = TaskType.FEATURE,
|
|
1041
|
+
github_issue: int | None = None,
|
|
1042
|
+
github_repo: str | None = None,
|
|
1043
|
+
screenshots: list[str] | None = None,
|
|
1044
|
+
) -> "WorkflowState":
|
|
1045
|
+
return cls(
|
|
1046
|
+
stage=Stage.PM,
|
|
1047
|
+
attempt=1,
|
|
1048
|
+
awaiting_approval=False,
|
|
1049
|
+
clarification_required=False,
|
|
1050
|
+
last_failure=None,
|
|
1051
|
+
started_at=datetime.now(timezone.utc).isoformat(),
|
|
1052
|
+
task_description=description,
|
|
1053
|
+
task_name=task_name,
|
|
1054
|
+
task_type=task_type,
|
|
1055
|
+
github_issue=github_issue,
|
|
1056
|
+
github_repo=github_repo,
|
|
1057
|
+
screenshots=screenshots,
|
|
1058
|
+
)
|
|
1059
|
+
|
|
1060
|
+
|
|
1061
|
+
def get_task_dir(task_name: str) -> Path:
|
|
1062
|
+
"""Get the directory for a task."""
|
|
1063
|
+
from galangal.config.loader import get_tasks_dir
|
|
1064
|
+
|
|
1065
|
+
return get_tasks_dir() / task_name
|
|
1066
|
+
|
|
1067
|
+
|
|
1068
|
+
def load_state(task_name: str | None = None) -> WorkflowState | None:
|
|
1069
|
+
"""Load workflow state for a task."""
|
|
1070
|
+
from galangal.core.tasks import get_active_task
|
|
1071
|
+
|
|
1072
|
+
if task_name is None:
|
|
1073
|
+
task_name = get_active_task()
|
|
1074
|
+
if task_name is None:
|
|
1075
|
+
return None
|
|
1076
|
+
|
|
1077
|
+
state_file = get_task_dir(task_name) / "state.json"
|
|
1078
|
+
if not state_file.exists():
|
|
1079
|
+
return None
|
|
1080
|
+
|
|
1081
|
+
try:
|
|
1082
|
+
with open(state_file) as f:
|
|
1083
|
+
return WorkflowState.from_dict(json.load(f))
|
|
1084
|
+
except (json.JSONDecodeError, KeyError, ValueError) as e:
|
|
1085
|
+
# ValueError can occur from Stage.from_str or TaskType.from_str
|
|
1086
|
+
# with invalid/unknown values in a corrupted or manually edited state.json
|
|
1087
|
+
print(f"Error loading state: {e}")
|
|
1088
|
+
return None
|
|
1089
|
+
|
|
1090
|
+
|
|
1091
|
+
def save_state(state: WorkflowState) -> None:
|
|
1092
|
+
"""Save workflow state for a task."""
|
|
1093
|
+
task_dir = get_task_dir(state.task_name)
|
|
1094
|
+
task_dir.mkdir(parents=True, exist_ok=True)
|
|
1095
|
+
state_file = task_dir / "state.json"
|
|
1096
|
+
with open(state_file, "w") as f:
|
|
1097
|
+
json.dump(state.to_dict(), f, indent=2)
|