gsd-lite 0.6.0 → 0.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,7 +13,7 @@
13
13
  "name": "gsd",
14
14
  "source": "./",
15
15
  "description": "AI orchestration tool — GSD management shell + Superpowers quality core. 5 commands, 4 agents, 5 workflows, MCP server, context monitoring.",
16
- "version": "0.6.0",
16
+ "version": "0.6.2",
17
17
  "keywords": [
18
18
  "orchestration",
19
19
  "mcp",
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gsd",
3
- "version": "0.6.0",
3
+ "version": "0.6.2",
4
4
  "description": "AI orchestration tool for Claude Code — GSD management shell + Superpowers quality core",
5
5
  "author": {
6
6
  "name": "sdsrss",
package/README.md CHANGED
@@ -213,8 +213,8 @@ All state lives in `.gsd/state.json` — a single source of truth with:
213
213
  |-----------|-----|----------|
214
214
  | Commands | 32 | **6** |
215
215
  | Agents | 12 | **4** |
216
- | Source files | 100+ | **~35** |
217
- | Installer | 2465 lines | **~80 lines** |
216
+ | Source files | 100+ | **~48** |
217
+ | Installer | 2465 lines | **~290 lines** |
218
218
  | User interactions | 6+ confirmations | **Typically 2** |
219
219
  | TDD / Anti-rationalization | No | **Yes** |
220
220
  | State machine recovery | Partial | **Full (12 modes)** |
@@ -249,7 +249,7 @@ gsd-lite/
249
249
  ├── references/ # 8 reference docs
250
250
  ├── hooks/ # Session lifecycle (StatusLine + PostToolUse + SessionStart + Stop + AutoUpdate)
251
251
  │ └── lib/ # Shared hook utilities (gsd-finder)
252
- ├── tests/ # 826 tests (unit + simulation + E2E)
252
+ ├── tests/ # 866 tests (unit + simulation + E2E)
253
253
  ├── cli.js # Install/uninstall CLI entry
254
254
  ├── install.js # Installation script
255
255
  └── uninstall.js # Uninstall script
@@ -258,7 +258,7 @@ gsd-lite/
258
258
  ## Testing
259
259
 
260
260
  ```bash
261
- npm test # Run all 826 tests
261
+ npm test # Run all 866 tests
262
262
  npm run test:coverage # Tests + coverage report (94%+ lines, 83%+ branches)
263
263
  npm run lint # Biome lint
264
264
  node --test tests/file.js # Run a single test file
@@ -286,6 +286,7 @@ async function fetchLatestRelease(token) {
286
286
  if (!res.ok) return null;
287
287
 
288
288
  const data = await res.json();
289
+ if (!data.tag_name || !data.tarball_url) return null;
289
290
  return {
290
291
  version: data.tag_name.replace(/^v/, ''),
291
292
  tarballUrl: data.tarball_url,
@@ -112,7 +112,7 @@ process.stdin.on('end', () => {
112
112
  if (isCritical) {
113
113
  message = `CONTEXT CRITICAL: Usage at ${usedPct}%. Remaining: ${remaining}%. `
114
114
  + 'Context is nearly exhausted. Complete current task checkpoint immediately, '
115
- + 'set workflow_mode = awaiting_clear via gsd-state-update, and tell user to /clear then /gsd:resume.';
115
+ + 'set workflow_mode = awaiting_clear via state-update, and tell user to /clear then /gsd:resume.';
116
116
  } else {
117
117
  message = `CONTEXT WARNING: Usage at ${usedPct}%. Remaining: ${remaining}%. `
118
118
  + 'Context is getting limited. Avoid starting new complex work. Complete current task then save state.';
package/install.js CHANGED
@@ -170,8 +170,13 @@ export function main() {
170
170
  copyDir(localNM, join(RUNTIME_DIR, 'node_modules'), 'runtime/node_modules (copied)');
171
171
  } else if (!DRY_RUN) {
172
172
  log(' ⧗ Installing runtime dependencies...');
173
- execSync('npm ci --omit=dev', { cwd: RUNTIME_DIR, stdio: 'pipe' });
174
- log(' runtime dependencies installed');
173
+ try {
174
+ execSync('npm ci --omit=dev', { cwd: RUNTIME_DIR, stdio: 'pipe' });
175
+ log(' ✓ runtime dependencies installed');
176
+ } catch (err) {
177
+ log(` ✗ Failed to install runtime dependencies: ${err.message}`);
178
+ process.exit(1);
179
+ }
175
180
  } else {
176
181
  log(' [dry-run] Would install runtime dependencies');
177
182
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gsd-lite",
3
- "version": "0.6.0",
3
+ "version": "0.6.2",
4
4
  "description": "AI orchestration tool for Claude Code — GSD management shell + Superpowers quality core",
5
5
  "type": "module",
6
6
  "bin": {
@@ -208,20 +208,11 @@ async function evaluatePreflight(state, basePath) {
208
208
  }
209
209
  }
210
210
 
211
- const expired_research = collectExpiredResearch(state);
212
- if (expired_research.length > 0) {
213
- hints.push({
214
- workflow_mode: 'research_refresh_needed',
215
- action: 'dispatch_researcher',
216
- updates: { workflow_mode: 'research_refresh_needed' },
217
- expired_research,
218
- message: 'Research cache expired and must be refreshed before execution resumes',
219
- });
220
- }
221
-
222
- // P0-2: Dirty-phase detection — rollback current_phase to earliest phase
211
+ // Dirty-phase detection — rollback current_phase to earliest phase
223
212
  // that has needs_revalidation tasks, ensuring earlier invalidated work
224
213
  // is re-executed before proceeding with later phases.
214
+ // Priority: placed before research expiry because dirty-phase rollback is a
215
+ // safety-critical action (prevents executing later phases on stale foundations).
225
216
  // Use filter+reduce (not .find) to guarantee lowest-ID match regardless of array order.
226
217
  const dirtyPhases = (state.phases || []).filter(p =>
227
218
  p.id < state.current_phase
@@ -245,6 +236,17 @@ async function evaluatePreflight(state, basePath) {
245
236
  });
246
237
  }
247
238
 
239
+ const expired_research = collectExpiredResearch(state);
240
+ if (expired_research.length > 0) {
241
+ hints.push({
242
+ workflow_mode: 'research_refresh_needed',
243
+ action: 'dispatch_researcher',
244
+ updates: { workflow_mode: 'research_refresh_needed' },
245
+ expired_research,
246
+ message: 'Research cache expired and must be refreshed before execution resumes',
247
+ });
248
+ }
249
+
248
250
  if (hints.length === 0) return { override: null };
249
251
 
250
252
  return {
@@ -53,7 +53,7 @@ function _buildResumeSummary(state, response) {
53
53
  };
54
54
  }
55
55
 
56
- async function resumeAwaitingClear(state, basePath) {
56
+ async function resumeAwaitingClear(state, basePath, _depth = 0) {
57
57
  const health = await readContextHealth(basePath);
58
58
  if (health !== null && health < CONTEXT_RESUME_THRESHOLD) {
59
59
  const persistError = await persist(basePath, {
@@ -83,7 +83,7 @@ async function resumeAwaitingClear(state, basePath) {
83
83
  }
84
84
  const persistError = await persist(basePath, updates);
85
85
  if (persistError) return persistError;
86
- return resumeWorkflow({ basePath });
86
+ return resumeWorkflow({ basePath, _depth: _depth + 1 });
87
87
  }
88
88
 
89
89
  async function resumeExecutingTask(state, basePath) {
@@ -109,26 +109,28 @@ async function resumeExecutingTask(state, basePath) {
109
109
  };
110
110
  }
111
111
 
112
- if (state.current_task) {
113
- const currentTask = getTaskById(phase, state.current_task);
114
- if (currentTask?.lifecycle === 'running') {
115
- const isRetrying = (currentTask.retry_count || 0) > 0;
116
- const persistError = await persist(basePath, {
117
- workflow_mode: 'executing_task',
118
- current_task: currentTask.id,
119
- current_review: null,
120
- });
121
- if (persistError) return persistError;
122
- return buildExecutorDispatch(state, phase, currentTask, {
123
- resumed: true,
124
- interruption_recovered: !isRetrying,
125
- ...(isRetrying ? {
126
- retry_after_failure: true,
127
- retry_count: currentTask.retry_count,
128
- last_failure_summary: currentTask.last_failure_summary,
129
- } : {}),
130
- });
131
- }
112
+ // Find the running task — either from current_task or by scanning (orphan recovery)
113
+ const runningTask = state.current_task
114
+ ? getTaskById(phase, state.current_task)
115
+ : (phase.todo || []).find(t => t.lifecycle === 'running');
116
+
117
+ if (runningTask?.lifecycle === 'running') {
118
+ const isRetrying = (runningTask.retry_count || 0) > 0;
119
+ const persistError = await persist(basePath, {
120
+ workflow_mode: 'executing_task',
121
+ current_task: runningTask.id,
122
+ current_review: null,
123
+ });
124
+ if (persistError) return persistError;
125
+ return buildExecutorDispatch(state, phase, runningTask, {
126
+ resumed: true,
127
+ interruption_recovered: !isRetrying,
128
+ ...(isRetrying ? {
129
+ retry_after_failure: true,
130
+ retry_count: runningTask.retry_count,
131
+ last_failure_summary: runningTask.last_failure_summary,
132
+ } : {}),
133
+ });
132
134
  }
133
135
 
134
136
  const selection = selectRunnableTask(phase, state);
@@ -317,7 +319,7 @@ export async function resumeWorkflow({ basePath = process.cwd(), _depth = 0, unb
317
319
  result = await resumeExecutingTask(state, basePath);
318
320
  break;
319
321
  case 'awaiting_clear':
320
- result = await resumeAwaitingClear(state, basePath);
322
+ result = await resumeAwaitingClear(state, basePath, _depth);
321
323
  break;
322
324
  case 'awaiting_user': {
323
325
  if (state.current_review?.stage === 'direction_drift') {
@@ -74,6 +74,23 @@ export async function handleReviewerResult({ result, basePath = process.cwd() }
74
74
  const qualityFailed = result.quality_passed === false;
75
75
  const needsRework = hasCritical || specFailed || qualityFailed;
76
76
 
77
+ // Safety: if rework is needed but no tasks were targeted for rework,
78
+ // fall back to marking all non-accepted checkpointed/accepted tasks as needs_revalidation
79
+ // to prevent infinite review loops (no runnable tasks → trigger_review → same result).
80
+ if (needsRework && taskPatches.filter(p => p.lifecycle === 'needs_revalidation').length === 0) {
81
+ for (const task of (phase.todo || [])) {
82
+ if (task.lifecycle === 'checkpointed' || task.lifecycle === 'accepted') {
83
+ taskPatches.push({
84
+ id: task.id,
85
+ lifecycle: 'needs_revalidation',
86
+ retry_count: 0,
87
+ evidence_refs: [],
88
+ last_review_feedback: ['Reviewer indicated rework needed but did not specify tasks; all completed tasks require revalidation'],
89
+ });
90
+ }
91
+ }
92
+ }
93
+
77
94
  // Compute retry count once for both exhaustion check and state update
78
95
  const currentRetryCount = phase.phase_review?.retry_count || 0;
79
96
  const nextRetryCount = needsRework ? currentRetryCount + 1 : 0;
@@ -153,8 +170,8 @@ export async function handleReviewerResult({ result, basePath = process.cwd() }
153
170
  workflow_mode: workflowMode,
154
171
  phase_id: phase.id,
155
172
  review_status: reviewStatus,
156
- accepted_count: result.accepted_tasks?.length || 0,
157
- rework_count: result.rework_tasks?.length || 0,
173
+ accepted_count: taskPatches.filter(p => p.lifecycle === 'accepted').length,
174
+ rework_count: taskPatches.filter(p => p.lifecycle === 'needs_revalidation').length,
158
175
  critical_count: result.critical_issues?.length || 0,
159
176
  };
160
177
  }
@@ -251,6 +251,11 @@ export async function update({ updates, basePath = process.cwd(), expectedVersio
251
251
  // Deep merge phases by ID instead of shallow replace [I-1]
252
252
  const merged = { ...state, ...updates };
253
253
 
254
+ // Deep merge context by key (preserves plan_hashes, last_session, etc.)
255
+ if (updates.context && isPlainObject(updates.context)) {
256
+ merged.context = { ...(state.context || {}), ...updates.context };
257
+ }
258
+
254
259
  // Deep merge evidence by key (preserves existing entries)
255
260
  if (updates.evidence && isPlainObject(updates.evidence)) {
256
261
  merged.evidence = { ...(state.evidence || {}), ...updates.evidence };
@@ -326,10 +331,13 @@ export async function update({ updates, basePath = process.cwd(), expectedVersio
326
331
  await _pruneEvidenceFromState(merged, merged.current_phase, gsdDir);
327
332
  }
328
333
 
329
- // Use incremental validation for simple updates (no phases changes)
330
- const validation = !updates.phases
331
- ? validateStateUpdate(state, updates)
332
- : validateState(merged);
334
+ // Use incremental validation for simple updates (no phases/propagation/decisions changes)
335
+ const needsFullValidation = updates.phases
336
+ || (_append_decisions?.length > 0)
337
+ || (_propagation_tasks?.length > 0);
338
+ const validation = needsFullValidation
339
+ ? validateState(merged)
340
+ : validateStateUpdate(state, updates);
333
341
  if (!validation.valid) {
334
342
  return {
335
343
  error: true,
@@ -495,6 +503,7 @@ export async function phaseComplete({
495
503
  if (!driftValidation.valid) {
496
504
  return { error: true, code: ERROR_CODES.VALIDATION_FAILED, message: `Validation failed: ${driftValidation.errors.join('; ')}` };
497
505
  }
506
+ state._version = (state._version ?? 0) + 1;
498
507
  await writeJson(statePath, state);
499
508
  return {
500
509
  success: true,
@@ -525,6 +534,8 @@ export async function phaseComplete({
525
534
  } else if (state.current_phase === phase_id && phase_id >= state.total_phases) {
526
535
  // Final phase completed — mark workflow as completed
527
536
  state.workflow_mode = 'completed';
537
+ state.current_task = null;
538
+ state.current_review = null;
528
539
  }
529
540
 
530
541
  // Update git_head to current commit
@@ -539,6 +550,7 @@ export async function phaseComplete({
539
550
  if (!finalValidation.valid) {
540
551
  return { error: true, code: ERROR_CODES.VALIDATION_FAILED, message: `Validation failed: ${finalValidation.errors.join('; ')}` };
541
552
  }
553
+ state._version = (state._version ?? 0) + 1;
542
554
  await writeJson(statePath, state);
543
555
  return { success: true };
544
556
  });
@@ -570,7 +582,7 @@ export async function addEvidence({ id, data, basePath = process.cwd() }) {
570
582
  if (!result.ok) {
571
583
  return { error: true, code: ERROR_CODES.NO_PROJECT_DIR, message: result.error };
572
584
  }
573
- const state = result.data;
585
+ const state = migrateState(result.data);
574
586
 
575
587
  if (!state.evidence) {
576
588
  state.evidence = {};
@@ -585,6 +597,7 @@ export async function addEvidence({ id, data, basePath = process.cwd() }) {
585
597
  await _pruneEvidenceFromState(state, state.current_phase, gsdDir);
586
598
  }
587
599
 
600
+ state._version = (state._version ?? 0) + 1;
588
601
  await writeJson(statePath, state);
589
602
  return { success: true };
590
603
  });
@@ -650,11 +663,14 @@ export async function pruneEvidence({ currentPhase, basePath = process.cwd() })
650
663
  if (!result.ok) {
651
664
  return { error: true, code: ERROR_CODES.NO_PROJECT_DIR, message: result.error };
652
665
  }
653
- const state = result.data;
666
+ const state = migrateState(result.data);
654
667
 
655
668
  const gsdDir = dirname(statePath);
656
669
  const archived = await _pruneEvidenceFromState(state, currentPhase, gsdDir);
657
- if (archived > 0) await writeJson(statePath, state);
670
+ if (archived > 0) {
671
+ state._version = (state._version ?? 0) + 1;
672
+ await writeJson(statePath, state);
673
+ }
658
674
 
659
675
  return { success: true, archived };
660
676
  });
@@ -734,6 +750,7 @@ export async function patchPlan({ operations, basePath = process.cwd() } = {}) {
734
750
  return { error: true, code: ERROR_CODES.VALIDATION_FAILED, message: `Validation failed: ${validation.errors.join('; ')}` };
735
751
  }
736
752
 
753
+ state._version = (state._version ?? 0) + 1;
737
754
  await writeJson(statePath, state);
738
755
  return { success: true, applied, plan_version: state.plan_version };
739
756
  });
@@ -5,6 +5,7 @@ import { writeFile, rename, unlink } from 'node:fs/promises';
5
5
  import { ensureDir, readJson, writeJson, getStatePath } from '../../utils.js';
6
6
  import {
7
7
  TASK_LIFECYCLE,
8
+ migrateState,
8
9
  validateResearchArtifacts,
9
10
  validateResearchDecisionIndex,
10
11
  validateResearcherResult,
@@ -439,7 +440,7 @@ export async function storeResearch({ result, artifacts, decision_index, basePat
439
440
  return { error: true, code: ERROR_CODES.NO_PROJECT_DIR, message: current.error };
440
441
  }
441
442
 
442
- const state = current.data;
443
+ const state = migrateState(current.data);
443
444
  const gsdDir = dirname(statePath);
444
445
  const researchDir = join(gsdDir, 'research');
445
446
  await ensureDir(researchDir);
@@ -503,6 +504,7 @@ export async function storeResearch({ result, artifacts, decision_index, basePat
503
504
  return { error: true, code: ERROR_CODES.VALIDATION_FAILED, message: `State validation failed: ${validation.errors.join('; ')}` };
504
505
  }
505
506
 
507
+ state._version = (state._version ?? 0) + 1;
506
508
  await writeJson(statePath, state);
507
509
  return {
508
510
  success: true,