@nathapp/nax 0.49.6 → 0.50.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nathapp/nax",
3
- "version": "0.49.6",
3
+ "version": "0.50.0",
4
4
  "description": "AI Coding Agent Orchestrator — loops until done",
5
5
  "type": "module",
6
6
  "bin": {
@@ -114,6 +114,18 @@ IMPORTANT: Output raw TypeScript code only. Do NOT use markdown code fences (\`\
114
114
  });
115
115
  const testCode = extractTestCode(rawOutput);
116
116
 
117
+ if (!testCode) {
118
+ logger.warn("acceptance", "LLM returned non-code output for acceptance tests — falling back to skeleton", {
119
+ outputPreview: rawOutput.slice(0, 200),
120
+ });
121
+ const skeletonCriteria: AcceptanceCriterion[] = refinedCriteria.map((c, i) => ({
122
+ id: `AC-${i + 1}`,
123
+ text: c.refined,
124
+ lineNumber: i + 1,
125
+ }));
126
+ return { testCode: generateSkeletonTests(options.featureName, skeletonCriteria), criteria: skeletonCriteria };
127
+ }
128
+
117
129
  const refinedJsonContent = JSON.stringify(
118
130
  refinedCriteria.map((c, i) => ({
119
131
  acId: `AC-${i + 1}`,
@@ -306,6 +318,16 @@ export async function generateAcceptanceTests(
306
318
  // Extract test code from output
307
319
  const testCode = extractTestCode(output);
308
320
 
321
+ if (!testCode) {
322
+ logger.warn("acceptance", "LLM returned non-code output for acceptance tests — falling back to skeleton", {
323
+ outputPreview: output.slice(0, 200),
324
+ });
325
+ return {
326
+ testCode: generateSkeletonTests(options.featureName, criteria),
327
+ criteria,
328
+ };
329
+ }
330
+
309
331
  return {
310
332
  testCode,
311
333
  criteria,
@@ -328,21 +350,40 @@ export async function generateAcceptanceTests(
328
350
  * @param output - Agent stdout
329
351
  * @returns Extracted test code
330
352
  */
331
- function extractTestCode(output: string): string {
353
+ function extractTestCode(output: string): string | null {
354
+ let code: string | undefined;
355
+
332
356
  // Try to extract from markdown code fence
333
357
  const fenceMatch = output.match(/```(?:typescript|ts)?\s*([\s\S]*?)\s*```/);
334
358
  if (fenceMatch) {
335
- return fenceMatch[1].trim();
359
+ code = fenceMatch[1].trim();
336
360
  }
337
361
 
338
362
  // If no fence, try to find import statement and take everything from there
339
- const importMatch = output.match(/import\s+{[\s\S]+/);
340
- if (importMatch) {
341
- return importMatch[0].trim();
363
+ if (!code) {
364
+ const importMatch = output.match(/import\s+{[\s\S]+/);
365
+ if (importMatch) {
366
+ code = importMatch[0].trim();
367
+ }
368
+ }
369
+
370
+ // If no fence and no import, try to find describe() block
371
+ if (!code) {
372
+ const describeMatch = output.match(/describe\s*\([\s\S]+/);
373
+ if (describeMatch) {
374
+ code = describeMatch[0].trim();
375
+ }
376
+ }
377
+
378
+ if (!code) return null;
379
+
380
+ // Validate: extracted code must contain at least one test-like keyword
381
+ const hasTestKeyword = /\b(?:describe|test|it|expect)\s*\(/.test(code);
382
+ if (!hasTestKeyword) {
383
+ return null;
342
384
  }
343
385
 
344
- // Fall back to full output
345
- return output.trim();
386
+ return code;
346
387
  }
347
388
 
348
389
  /**
package/src/cli/plan.ts CHANGED
@@ -395,15 +395,18 @@ function buildCodebaseContext(scan: CodebaseScan): string {
395
395
  /**
396
396
  * Build the full planning prompt sent to the LLM.
397
397
  *
398
+ * Structured as 3 explicit steps (ENH-006):
399
+ * Step 1: Understand the spec
400
+ * Step 2: Analyze codebase (existing) or architecture decisions (greenfield)
401
+ * Step 3: Generate implementation stories from analysis
402
+ *
398
403
  * Includes:
399
- * - Spec content
400
- * - Codebase context
401
- * - Output schema (exact prd.json JSON structure)
402
- * - Complexity classification guide
403
- * - Test strategy guide
404
+ * - Spec content + codebase context
405
+ * - Output schema with analysis + contextFiles fields
406
+ * - Complexity + test strategy guides
404
407
  * - MW-007: Monorepo hint and package list when packages are detected
405
408
  */
406
- function buildPlanningPrompt(
409
+ export function buildPlanningPrompt(
407
410
  specContent: string,
408
411
  codebaseContext: string,
409
412
  outputFilePath?: string,
@@ -423,14 +426,48 @@ function buildPlanningPrompt(
423
426
 
424
427
  return `You are a senior software architect generating a product requirements document (PRD) as JSON.
425
428
 
429
+ ## Step 1: Understand the Spec
430
+
431
+ Read the spec carefully. Identify the goal, scope, constraints, and what "done" looks like.
432
+
426
433
  ## Spec
427
434
 
428
435
  ${specContent}
429
436
 
437
+ ## Step 2: Analyze
438
+
439
+ Examine the codebase context below.
440
+
441
+ If the codebase has existing code (refactoring, enhancement, bug fix):
442
+ - Which existing files need modification?
443
+ - Which files import from or depend on them?
444
+ - What tests cover the affected code?
445
+ - What are the risks (breaking changes, backward compatibility)?
446
+ - What is the migration path?
447
+
448
+ If this is a greenfield project (empty or minimal codebase):
449
+ - What is the target architecture?
450
+ - What are the key technical decisions (framework, patterns, conventions)?
451
+ - What should be built first (dependency order)?
452
+
453
+ Record ALL findings in the "analysis" field of the output JSON. This analysis is provided to every implementation agent as context — be thorough.
454
+
430
455
  ## Codebase Context
431
456
 
432
457
  ${codebaseContext}${monorepoHint}
433
458
 
459
+ ## Step 3: Generate Implementation Stories
460
+
461
+ Based on your Step 2 analysis, create stories that produce CODE CHANGES.
462
+
463
+ ${GROUPING_RULES}
464
+
465
+ For each story, set "contextFiles" to the key source files the agent should read before implementing (max 5 per story). Use your Step 2 analysis to identify the most relevant files. Leave empty for greenfield stories with no existing files to reference.
466
+
467
+ ${COMPLEXITY_GUIDE}
468
+
469
+ ${TEST_STRATEGY_GUIDE}
470
+
434
471
  ## Output Schema
435
472
 
436
473
  Generate a JSON object with this exact structure (no markdown, no explanation — JSON only):
@@ -438,6 +475,7 @@ Generate a JSON object with this exact structure (no markdown, no explanation
438
475
  {
439
476
  "project": "string — project name",
440
477
  "feature": "string — feature name",
478
+ "analysis": "string — your Step 2 analysis: key files, impact areas, risks, architecture decisions, migration notes. All implementation agents will receive this.",
441
479
  "branchName": "string — git branch (e.g. feat/my-feature)",
442
480
  "createdAt": "ISO 8601 timestamp",
443
481
  "updatedAt": "ISO 8601 timestamp",
@@ -447,13 +485,14 @@ Generate a JSON object with this exact structure (no markdown, no explanation
447
485
  "title": "string — concise story title",
448
486
  "description": "string — detailed description of the story",
449
487
  "acceptanceCriteria": ["string — each AC line"],
488
+ "contextFiles": ["string — key source files the agent should read (max 5, relative paths)"],
450
489
  "tags": ["string — routing tags, e.g. feature, security, api"],
451
490
  "dependencies": ["string — story IDs this story depends on"],${workdirField}
452
491
  "status": "pending",
453
492
  "passes": false,
454
493
  "routing": {
455
494
  "complexity": "simple | medium | complex | expert",
456
- "testStrategy": "test-after | tdd-simple | three-session-tdd | three-session-tdd-lite",
495
+ "testStrategy": "tdd-simple | three-session-tdd-lite | three-session-tdd | test-after",
457
496
  "reasoning": "string — brief classification rationale"
458
497
  },
459
498
  "escalations": [],
@@ -462,12 +501,6 @@ Generate a JSON object with this exact structure (no markdown, no explanation
462
501
  ]
463
502
  }
464
503
 
465
- ${COMPLEXITY_GUIDE}
466
-
467
- ${TEST_STRATEGY_GUIDE}
468
-
469
- ${GROUPING_RULES}
470
-
471
504
  ${
472
505
  outputFilePath
473
506
  ? `Write the PRD JSON directly to this file path: ${outputFilePath}\nDo NOT output the JSON to the conversation. Write the file, then reply with a brief confirmation.`
@@ -40,31 +40,32 @@ export function resolveTestStrategy(raw: string | undefined): TestStrategy {
40
40
 
41
41
  export const COMPLEXITY_GUIDE = `## Complexity Classification Guide
42
42
 
43
- - simple: ≤50 LOC, single-file change, purely additive, no new dependencies → test-after
44
- - medium: 50–200 LOC, 2–5 files, standard patterns, clear requirements → tdd-simple
43
+ - simple: ≤50 LOC, single-file change, purely additive, no new dependencies → tdd-simple
44
+ - medium: 50–200 LOC, 2–5 files, standard patterns, clear requirements → three-session-tdd-lite
45
45
  - complex: 200–500 LOC, multiple modules, new abstractions or integrations → three-session-tdd
46
- - expert: 500+ LOC, architectural changes, cross-cutting concerns, high risk → three-session-tdd-lite
46
+ - expert: 500+ LOC, architectural changes, cross-cutting concerns, high risk → three-session-tdd
47
47
 
48
48
  ### Security Override
49
49
 
50
50
  Security-critical functions (authentication, cryptography, tokens, sessions, credentials,
51
- password hashing, access control) must be classified at MINIMUM "medium" complexity
52
- regardless of LOC count. These require at minimum "tdd-simple" test strategy.`;
51
+ password hashing, access control) must use three-session-tdd regardless of complexity.`;
53
52
 
54
53
  export const TEST_STRATEGY_GUIDE = `## Test Strategy Guide
55
54
 
56
- - test-after: Simple changes with well-understood behavior. Write tests after implementation in a single session.
57
- - tdd-simple: Medium complexity. Write failing tests first, then implement to pass them all in one session.
58
- - three-session-tdd: Complex stories. 3 sessions: (1) test-writer writes failing tests — no src/ changes allowed, (2) implementer makes them pass without modifying test files, (3) verifier confirms correctness.
59
- - three-session-tdd-lite: Expert/high-risk stories. 3 sessions: (1) test-writer writes failing tests and may create minimal src/ stubs for imports, (2) implementer makes tests pass and may add missing coverage or replace stubs, (3) verifier confirms correctness.`;
55
+ - tdd-simple: Simple stories (≤50 LOC). Write failing tests first, then implement to pass them — all in one session.
56
+ - three-session-tdd-lite: Medium stories, or complex stories involving UI/CLI/integration. 3 sessions: (1) test-writer writes failing tests and may create minimal src/ stubs for imports, (2) implementer makes tests pass and may replace stubs, (3) verifier confirms correctness.
57
+ - three-session-tdd: Complex/expert stories or security-critical code. 3 sessions with strict isolation: (1) test-writer writes failing tests — no src/ changes allowed, (2) implementer makes them pass without modifying test files, (3) verifier confirms correctness.
58
+ - test-after: Only when explicitly configured (tddStrategy: "off"). Write tests after implementation. Not auto-assigned.`;
60
59
 
61
- export const GROUPING_RULES = `## Grouping Rules
60
+ export const GROUPING_RULES = `## Story Rules
62
61
 
62
+ - Every story must produce code changes verifiable by tests or review.
63
+ - NEVER create stories for analysis, planning, documentation, or migration plans.
64
+ Your analysis belongs in the "analysis" field, not in a story.
65
+ - NEVER create stories whose primary purpose is writing tests, achieving coverage
66
+ targets, or running validation/regression suites. Each story's testStrategy
67
+ handles test creation as part of implementation. Testing is a built-in pipeline
68
+ stage, not a user story. No exceptions.
63
69
  - Combine small, related tasks into a single "simple" or "medium" story.
64
- - Do NOT create separate stories for every single file or function unless complex.
65
- - Do NOT create standalone stories purely for test coverage or testing.
66
- Each story's testStrategy already handles testing (tdd-simple writes tests first,
67
- three-session-tdd uses separate test-writer session, test-after writes tests after).
68
- Only create a dedicated test story for unique integration/E2E test logic that spans
69
- multiple stories and cannot be covered by individual story test strategies.
70
+ Do NOT create separate stories for every single file or function unless complex.
70
71
  - Aim for coherent units of value. Maximum recommended stories: 10-15 per feature.`;
@@ -21,6 +21,7 @@ import {
21
21
  createStoryContext,
22
22
  createTestCoverageContext,
23
23
  } from "./elements";
24
+ import { getParentOutputFiles } from "./parent-context";
24
25
  import { generateTestCoverageSummary } from "./test-scanner";
25
26
  import type { BuiltContext, ContextBudget, ContextElement, StoryContext } from "./types";
26
27
 
@@ -115,6 +116,18 @@ export async function buildContext(storyContext: StoryContext, budget: ContextBu
115
116
  // Add current story (high priority)
116
117
  elements.push(createStoryContext(currentStory, 80));
117
118
 
119
+ // ENH-006: Inject planning analysis from prd.analysis (priority 88 — above story, below errors)
120
+ if (prd.analysis) {
121
+ const analysisContent = `The following analysis was performed during the planning phase. Use it to understand the codebase context before implementing:\n\n${prd.analysis}`;
122
+ elements.push({
123
+ type: "planning-analysis",
124
+ label: "Planning Analysis",
125
+ content: analysisContent,
126
+ priority: 88,
127
+ tokens: estimateTokens(analysisContent),
128
+ });
129
+ }
130
+
118
131
  // Add dependency stories (medium priority)
119
132
  addDependencyElements(elements, currentStory, prd);
120
133
 
@@ -199,6 +212,18 @@ async function addFileElements(
199
212
 
200
213
  let contextFiles = getContextFiles(story);
201
214
 
215
+ // ENH-005: Inject parent output files for context chaining
216
+ const parentFiles = getParentOutputFiles(story, storyContext.prd?.userStories ?? []);
217
+ if (parentFiles.length > 0) {
218
+ const logger = getLogger();
219
+ logger.info("context", "Injecting parent output files for context chaining", {
220
+ storyId: story.id,
221
+ parentFiles,
222
+ });
223
+ // Merge with existing contextFiles (don't replace — parent files are supplementary)
224
+ contextFiles = [...new Set([...contextFiles, ...parentFiles])];
225
+ }
226
+
202
227
  // Auto-detect contextFiles if empty and enabled (BUG-006)
203
228
  if (
204
229
  contextFiles.length === 0 &&
@@ -0,0 +1,39 @@
1
+ /**
2
+ * Parent output file resolution for context chaining (ENH-005).
3
+ *
4
+ * When a story has dependencies, its parent stories' outputFiles are injected
5
+ * as additional contextFiles so agents have targeted context from prior work.
6
+ */
7
+
8
+ import type { UserStory } from "../prd/types";
9
+
10
+ const MAX_PARENT_FILES = 10;
11
+
12
+ const NOISE_PATTERNS = [
13
+ /\.test\.(ts|js|tsx|jsx)$/,
14
+ /\.spec\.(ts|js|tsx|jsx)$/,
15
+ /package-lock\.json$/,
16
+ /bun\.lockb?$/,
17
+ /\.gitignore$/,
18
+ /^nax\//,
19
+ ];
20
+
21
+ /**
22
+ * Get output files from direct parent stories (dependencies[]).
23
+ * Only direct parents — no transitive resolution (keep simple, extend later).
24
+ * Returns deduped list, filtered of noise, capped at MAX_PARENT_FILES.
25
+ */
26
+ export function getParentOutputFiles(story: UserStory, allStories: UserStory[]): string[] {
27
+ if (!story.dependencies || story.dependencies.length === 0) return [];
28
+
29
+ const parentFiles: string[] = [];
30
+ for (const depId of story.dependencies) {
31
+ const parent = allStories.find((s) => s.id === depId);
32
+ if (parent?.outputFiles) {
33
+ parentFiles.push(...parent.outputFiles);
34
+ }
35
+ }
36
+
37
+ const unique = [...new Set(parentFiles)];
38
+ return unique.filter((f) => !NOISE_PATTERNS.some((p) => p.test(f))).slice(0, MAX_PARENT_FILES);
39
+ }
@@ -21,10 +21,13 @@ export function applyDecomposition(prd: PRD, result: DecomposeResult): void {
21
21
  const originalIndex = prd.userStories.findIndex((s) => s.id === parentStoryId);
22
22
  if (originalIndex === -1) return;
23
23
 
24
+ const parentStory = prd.userStories[originalIndex];
25
+
24
26
  // Mark original story as decomposed
25
- prd.userStories[originalIndex].status = "decomposed";
27
+ parentStory.status = "decomposed";
26
28
 
27
29
  // Convert substories to UserStory format with parentStoryId attached
30
+ // ENH-008: Inherit workdir from parent so sub-stories run in the same package scope
28
31
  const newStories = subStories.map((sub): UserStory & { parentStoryId: string } => ({
29
32
  id: sub.id,
30
33
  title: sub.title,
@@ -37,6 +40,7 @@ export function applyDecomposition(prd: PRD, result: DecomposeResult): void {
37
40
  escalations: [],
38
41
  attempts: 0,
39
42
  parentStoryId: sub.parentStoryId,
43
+ ...(parentStory.workdir !== undefined && { workdir: parentStory.workdir }),
40
44
  }));
41
45
 
42
46
  // Insert substories immediately after the original story
@@ -150,7 +150,7 @@ export async function preIterationTierCheck(
150
150
  });
151
151
 
152
152
  const failedPrd = { ...prd };
153
- markStoryFailed(failedPrd, story.id);
153
+ markStoryFailed(failedPrd, story.id, undefined, undefined);
154
154
  await savePRD(failedPrd, prdPath);
155
155
 
156
156
  if (featureDir) {
@@ -56,7 +56,7 @@ export async function handleNoTierAvailable(
56
56
 
57
57
  // Outcome is "fail"
58
58
  const failedPrd = { ...ctx.prd };
59
- markStoryFailed(failedPrd, ctx.story.id, failureCategory);
59
+ markStoryFailed(failedPrd, ctx.story.id, failureCategory, undefined);
60
60
  await savePRD(failedPrd, ctx.prdPath);
61
61
 
62
62
  logger?.error("execution", "Story failed - execution failed", {
@@ -119,7 +119,7 @@ export async function handleMaxAttemptsReached(
119
119
 
120
120
  // Outcome is "fail"
121
121
  const failedPrd = { ...ctx.prd };
122
- markStoryFailed(failedPrd, ctx.story.id, failureCategory);
122
+ markStoryFailed(failedPrd, ctx.story.id, failureCategory, undefined);
123
123
  await savePRD(failedPrd, ctx.prdPath);
124
124
 
125
125
  logger?.error("execution", "Story failed - max attempts reached", {
@@ -8,6 +8,7 @@
8
8
  * 4. Initial PRD analysis
9
9
  */
10
10
 
11
+ import { join } from "node:path";
11
12
  import chalk from "chalk";
12
13
  import type { NaxConfig } from "../../config";
13
14
  import { AgentNotFoundError, AgentNotInstalledError, StoryLimitExceededError } from "../../errors";
@@ -15,8 +16,20 @@ import { getSafeLogger } from "../../logger";
15
16
  import type { AgentGetFn } from "../../pipeline/types";
16
17
  import { countStories, loadPRD, markStoryPassed, savePRD } from "../../prd";
17
18
  import type { PRD } from "../../prd/types";
19
+ import { runReview } from "../../review/runner";
20
+ import type { ReviewConfig } from "../../review/types";
18
21
  import { hasCommitsForStory } from "../../utils/git";
19
22
 
23
+ /**
24
+ * Injectable dependencies for reconcileState — allows tests to mock
25
+ * hasCommitsForStory and runReview without mock.module().
26
+ */
27
+ export const _reconcileDeps = {
28
+ hasCommitsForStory: (workdir: string, storyId: string) => hasCommitsForStory(workdir, storyId),
29
+ runReview: (reviewConfig: ReviewConfig, workdir: string, executionConfig: NaxConfig["execution"]) =>
30
+ runReview(reviewConfig, workdir, executionConfig),
31
+ };
32
+
20
33
  export interface InitializationContext {
21
34
  config: NaxConfig;
22
35
  prdPath: string;
@@ -43,26 +56,47 @@ export interface InitializationResult {
43
56
  * Reconcile PRD state with git history
44
57
  *
45
58
  * Checks if failed stories have commits in git history and marks them as passed.
46
- * This handles the case where TDD failed but agent already committed code.
59
+ * For stories that failed at review/autofix stage, re-runs the review before
60
+ * reconciling to ensure the code quality issues were actually fixed.
47
61
  */
48
- async function reconcileState(prd: PRD, prdPath: string, workdir: string): Promise<PRD> {
62
+ async function reconcileState(prd: PRD, prdPath: string, workdir: string, config: NaxConfig): Promise<PRD> {
49
63
  const logger = getSafeLogger();
50
64
  let reconciledCount = 0;
51
65
  let modified = false;
52
66
 
53
67
  for (const story of prd.userStories) {
54
- if (story.status === "failed") {
55
- const hasCommits = await hasCommitsForStory(workdir, story.id);
56
- if (hasCommits) {
57
- logger?.warn("reconciliation", "Failed story has commits in git history, marking as passed", {
58
- storyId: story.id,
59
- title: story.title,
60
- });
61
- markStoryPassed(prd, story.id);
62
- reconciledCount++;
63
- modified = true;
68
+ if (story.status !== "failed") continue;
69
+
70
+ const hasCommits = await _reconcileDeps.hasCommitsForStory(workdir, story.id);
71
+ if (!hasCommits) continue;
72
+
73
+ // Gate: re-run review for stories that failed at review/autofix stage
74
+ if (story.failureStage === "review" || story.failureStage === "autofix") {
75
+ const effectiveWorkdir = story.workdir ? join(workdir, story.workdir) : workdir;
76
+ try {
77
+ const reviewResult = await _reconcileDeps.runReview(config.review, effectiveWorkdir, config.execution);
78
+ if (!reviewResult.success) {
79
+ logger?.warn("reconciliation", "Review still fails — not reconciling story", {
80
+ storyId: story.id,
81
+ failureReason: reviewResult.failureReason,
82
+ });
83
+ continue;
84
+ }
85
+ logger?.info("reconciliation", "Review now passes — reconciling story", { storyId: story.id });
86
+ } catch {
87
+ // Non-fatal: if review check errors, skip reconciliation for this story
88
+ logger?.warn("reconciliation", "Review check errored — not reconciling story", { storyId: story.id });
89
+ continue;
64
90
  }
65
91
  }
92
+
93
+ logger?.warn("reconciliation", "Failed story has commits in git history, marking as passed", {
94
+ storyId: story.id,
95
+ title: story.title,
96
+ });
97
+ markStoryPassed(prd, story.id);
98
+ reconciledCount++;
99
+ modified = true;
66
100
  }
67
101
 
68
102
  if (reconciledCount > 0) {
@@ -137,7 +171,7 @@ export async function initializeRun(ctx: InitializationContext): Promise<Initial
137
171
 
138
172
  // Load and reconcile PRD
139
173
  let prd = await loadPRD(ctx.prdPath);
140
- prd = await reconcileState(prd, ctx.prdPath, ctx.workdir);
174
+ prd = await reconcileState(prd, ctx.prdPath, ctx.workdir, ctx.config);
141
175
 
142
176
  // Validate story counts
143
177
  const counts = countStories(prd);
@@ -171,7 +171,7 @@ export async function executeParallel(
171
171
  worktreePath,
172
172
  });
173
173
  } catch (error) {
174
- markStoryFailed(currentPrd, story.id);
174
+ markStoryFailed(currentPrd, story.id, undefined, undefined);
175
175
  logger?.error("parallel", "Failed to create worktree", {
176
176
  storyId: story.id,
177
177
  error: errorMessage(error),
@@ -218,7 +218,7 @@ export async function executeParallel(
218
218
  });
219
219
  } else {
220
220
  // Merge conflict — mark story as failed
221
- markStoryFailed(currentPrd, mergeResult.storyId);
221
+ markStoryFailed(currentPrd, mergeResult.storyId, undefined, undefined);
222
222
  batchResult.mergeConflicts.push({
223
223
  storyId: mergeResult.storyId,
224
224
  conflictFiles: mergeResult.conflictFiles || [],
@@ -241,7 +241,7 @@ export async function executeParallel(
241
241
 
242
242
  // Mark failed stories in PRD and clean up their worktrees
243
243
  for (const { story, error } of batchResult.failed) {
244
- markStoryFailed(currentPrd, story.id);
244
+ markStoryFailed(currentPrd, story.id, undefined, undefined);
245
245
 
246
246
  logger?.error("parallel", "Cleaning up failed story worktree", {
247
247
  storyId: story.id,
@@ -17,9 +17,23 @@ import type { PluginRegistry } from "../plugins";
17
17
  import { countStories, markStoryFailed, markStoryPaused, savePRD } from "../prd";
18
18
  import type { PRD, UserStory } from "../prd/types";
19
19
  import type { routeTask } from "../routing";
20
+ import { captureOutputFiles } from "../utils/git";
20
21
  import { handleTierEscalation } from "./escalation";
21
22
  import { appendProgress } from "./progress";
22
23
 
24
+ /** Filter noise from output files (test files, lock files, nax runtime files) */
25
+ function filterOutputFiles(files: string[]): string[] {
26
+ const NOISE = [
27
+ /\.test\.(ts|js|tsx|jsx)$/,
28
+ /\.spec\.(ts|js|tsx|jsx)$/,
29
+ /package-lock\.json$/,
30
+ /bun\.lock(b?)$/,
31
+ /\.gitignore$/,
32
+ /^nax\//,
33
+ ];
34
+ return files.filter((f) => !NOISE.some((p) => p.test(f))).slice(0, 15);
35
+ }
36
+
23
37
  export interface PipelineHandlerContext {
24
38
  config: NaxConfig;
25
39
  prd: PRD;
@@ -84,6 +98,21 @@ export async function handlePipelineSuccess(
84
98
  });
85
99
  }
86
100
 
101
+ // ENH-005: Capture output files for context chaining
102
+ if (ctx.storyGitRef) {
103
+ for (const completedStory of ctx.storiesToExecute) {
104
+ try {
105
+ const rawFiles = await captureOutputFiles(ctx.workdir, ctx.storyGitRef, completedStory.workdir);
106
+ const filtered = filterOutputFiles(rawFiles);
107
+ if (filtered.length > 0) {
108
+ completedStory.outputFiles = filtered;
109
+ }
110
+ } catch {
111
+ // Non-fatal — context chaining is best-effort
112
+ }
113
+ }
114
+ }
115
+
87
116
  const updatedCounts = countStories(prd);
88
117
  logger?.info("progress", "Progress update", {
89
118
  totalStories: updatedCounts.total,
@@ -135,7 +164,7 @@ export async function handlePipelineFailure(
135
164
  break;
136
165
 
137
166
  case "fail":
138
- markStoryFailed(prd, ctx.story.id, pipelineResult.context.tddFailureCategory);
167
+ markStoryFailed(prd, ctx.story.id, pipelineResult.context.tddFailureCategory, pipelineResult.stoppedAtStage);
139
168
  await savePRD(prd, ctx.prdPath);
140
169
  prdDirty = true;
141
170
  logger?.error("pipeline", "Story failed", { storyId: ctx.story.id, reason: pipelineResult.reason });
@@ -166,6 +166,11 @@ export function buildReviewRectificationPrompt(failedChecks: ReviewCheckResult[]
166
166
  .map((c) => `## ${c.check} errors (exit code ${c.exitCode})\n\`\`\`\n${c.output}\n\`\`\``)
167
167
  .join("\n\n");
168
168
 
169
+ // ENH-008: Scope constraint for monorepo stories — prevent out-of-package changes
170
+ const scopeConstraint = story.workdir
171
+ ? `\n\nIMPORTANT: Only modify files within \`${story.workdir}/\`. Do NOT touch files outside this directory.`
172
+ : "";
173
+
169
174
  return `You are fixing lint/typecheck errors from a code review.
170
175
 
171
176
  Story: ${story.title} (${story.id})
@@ -176,7 +181,7 @@ ${errors}
176
181
 
177
182
  Fix ALL errors listed above. Do NOT change test files or test behavior.
178
183
  Do NOT add new features — only fix the quality check errors.
179
- Commit your fixes when done.`;
184
+ Commit your fixes when done.${scopeConstraint}`;
180
185
  }
181
186
 
182
187
  async function runAgentRectification(ctx: PipelineContext): Promise<boolean> {
@@ -211,9 +216,12 @@ async function runAgentRectification(ctx: PipelineContext): Promise<boolean> {
211
216
  const modelTier = ctx.story.routing?.modelTier ?? ctx.config.autoMode.escalation.tierOrder[0]?.tier ?? "balanced";
212
217
  const modelDef = resolveModel(ctx.config.models[modelTier]);
213
218
 
219
+ // ENH-008: Scope agent to story.workdir for monorepo — prevents out-of-package changes
220
+ const rectificationWorkdir = ctx.story.workdir ? join(ctx.workdir, ctx.story.workdir) : ctx.workdir;
221
+
214
222
  await agent.run({
215
223
  prompt,
216
- workdir: ctx.workdir,
224
+ workdir: rectificationWorkdir,
217
225
  modelTier,
218
226
  modelDef,
219
227
  timeoutSeconds: ctx.config.execution.sessionTimeoutSeconds,
package/src/prd/index.ts CHANGED
@@ -168,7 +168,12 @@ export function markStoryPassed(prd: PRD, storyId: string): void {
168
168
  }
169
169
 
170
170
  /** Mark a story as failed */
171
- export function markStoryFailed(prd: PRD, storyId: string, failureCategory?: FailureCategory): void {
171
+ export function markStoryFailed(
172
+ prd: PRD,
173
+ storyId: string,
174
+ failureCategory?: FailureCategory,
175
+ failureStage?: string,
176
+ ): void {
172
177
  const story = prd.userStories.find((s) => s.id === storyId);
173
178
  if (story) {
174
179
  story.status = "failed";
@@ -176,6 +181,9 @@ export function markStoryFailed(prd: PRD, storyId: string, failureCategory?: Fai
176
181
  if (failureCategory !== undefined) {
177
182
  story.failureCategory = failureCategory;
178
183
  }
184
+ if (failureStage !== undefined) {
185
+ story.failureStage = failureStage;
186
+ }
179
187
  }
180
188
  }
181
189
 
package/src/prd/types.ts CHANGED
@@ -125,6 +125,8 @@ export interface UserStory {
125
125
  customContext?: string[];
126
126
  /** Category of the last failure (set when story is marked failed) */
127
127
  failureCategory?: FailureCategory;
128
+ /** Pipeline stage where this story last failed (set by markStoryFailed) */
129
+ failureStage?: string;
128
130
  /** Worktree path for parallel execution (set when --parallel is used) */
129
131
  worktreePath?: string;
130
132
  /**
@@ -133,6 +135,8 @@ export interface UserStory {
133
135
  * @example "packages/api"
134
136
  */
135
137
  workdir?: string;
138
+ /** Files created/modified by this story (auto-captured after completion, used by dependent stories) */
139
+ outputFiles?: string[];
136
140
  }
137
141
 
138
142
  // ============================================================================
@@ -236,6 +240,8 @@ export interface PRD {
236
240
  project: string;
237
241
  /** Feature name */
238
242
  feature: string;
243
+ /** Codebase analysis from planning phase — injected into all story contexts (ENH-006) */
244
+ analysis?: string;
239
245
  /** Git branch name */
240
246
  branchName: string;
241
247
  /** Creation timestamp */