ai-sdlc 0.2.0-alpha.6 → 0.2.0-alpha.61
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +65 -1057
- package/dist/agents/implementation.d.ts +36 -1
- package/dist/agents/implementation.d.ts.map +1 -1
- package/dist/agents/implementation.js +259 -30
- package/dist/agents/implementation.js.map +1 -1
- package/dist/agents/index.d.ts +2 -0
- package/dist/agents/index.d.ts.map +1 -1
- package/dist/agents/index.js +2 -0
- package/dist/agents/index.js.map +1 -1
- package/dist/agents/orchestrator.d.ts +61 -0
- package/dist/agents/orchestrator.d.ts.map +1 -0
- package/dist/agents/orchestrator.js +443 -0
- package/dist/agents/orchestrator.js.map +1 -0
- package/dist/agents/planning.d.ts +1 -1
- package/dist/agents/planning.d.ts.map +1 -1
- package/dist/agents/planning.js +55 -4
- package/dist/agents/planning.js.map +1 -1
- package/dist/agents/refinement.d.ts.map +1 -1
- package/dist/agents/refinement.js +22 -3
- package/dist/agents/refinement.js.map +1 -1
- package/dist/agents/research.d.ts +85 -1
- package/dist/agents/research.d.ts.map +1 -1
- package/dist/agents/research.js +506 -16
- package/dist/agents/research.js.map +1 -1
- package/dist/agents/review.d.ts +116 -2
- package/dist/agents/review.d.ts.map +1 -1
- package/dist/agents/review.js +847 -93
- package/dist/agents/review.js.map +1 -1
- package/dist/agents/rework.d.ts.map +1 -1
- package/dist/agents/rework.js +25 -4
- package/dist/agents/rework.js.map +1 -1
- package/dist/agents/single-task.d.ts +41 -0
- package/dist/agents/single-task.d.ts.map +1 -0
- package/dist/agents/single-task.js +357 -0
- package/dist/agents/single-task.js.map +1 -0
- package/dist/agents/state-assessor.d.ts +3 -3
- package/dist/agents/state-assessor.d.ts.map +1 -1
- package/dist/agents/state-assessor.js +6 -6
- package/dist/agents/state-assessor.js.map +1 -1
- package/dist/agents/test-pattern-detector.d.ts +49 -0
- package/dist/agents/test-pattern-detector.d.ts.map +1 -0
- package/dist/agents/test-pattern-detector.js +273 -0
- package/dist/agents/test-pattern-detector.js.map +1 -0
- package/dist/agents/verification.d.ts +11 -0
- package/dist/agents/verification.d.ts.map +1 -1
- package/dist/agents/verification.js +99 -12
- package/dist/agents/verification.js.map +1 -1
- package/dist/cli/batch-processor.d.ts +64 -0
- package/dist/cli/batch-processor.d.ts.map +1 -0
- package/dist/cli/batch-processor.js +85 -0
- package/dist/cli/batch-processor.js.map +1 -0
- package/dist/cli/batch-validator.d.ts +80 -0
- package/dist/cli/batch-validator.d.ts.map +1 -0
- package/dist/cli/batch-validator.js +121 -0
- package/dist/cli/batch-validator.js.map +1 -0
- package/dist/cli/commands/migrate.js +1 -1
- package/dist/cli/commands/migrate.js.map +1 -1
- package/dist/cli/commands.d.ts +67 -3
- package/dist/cli/commands.d.ts.map +1 -1
- package/dist/cli/commands.js +1765 -198
- package/dist/cli/commands.js.map +1 -1
- package/dist/cli/daemon.d.ts.map +1 -1
- package/dist/cli/daemon.js +25 -3
- package/dist/cli/daemon.js.map +1 -1
- package/dist/cli/runner.d.ts.map +1 -1
- package/dist/cli/runner.js +35 -12
- package/dist/cli/runner.js.map +1 -1
- package/dist/core/auth.d.ts +43 -0
- package/dist/core/auth.d.ts.map +1 -1
- package/dist/core/auth.js +105 -1
- package/dist/core/auth.js.map +1 -1
- package/dist/core/client.d.ts +25 -1
- package/dist/core/client.d.ts.map +1 -1
- package/dist/core/client.js +247 -7
- package/dist/core/client.js.map +1 -1
- package/dist/core/config.d.ts +32 -1
- package/dist/core/config.d.ts.map +1 -1
- package/dist/core/config.js +146 -3
- package/dist/core/config.js.map +1 -1
- package/dist/core/conflict-detector.d.ts +108 -0
- package/dist/core/conflict-detector.d.ts.map +1 -0
- package/dist/core/conflict-detector.js +413 -0
- package/dist/core/conflict-detector.js.map +1 -0
- package/dist/core/git-utils.d.ts +28 -0
- package/dist/core/git-utils.d.ts.map +1 -0
- package/dist/core/git-utils.js +146 -0
- package/dist/core/git-utils.js.map +1 -0
- package/dist/core/index.d.ts +19 -0
- package/dist/core/index.d.ts.map +1 -0
- package/dist/core/index.js +19 -0
- package/dist/core/index.js.map +1 -0
- package/dist/core/kanban.d.ts +1 -1
- package/dist/core/kanban.d.ts.map +1 -1
- package/dist/core/kanban.js +3 -3
- package/dist/core/kanban.js.map +1 -1
- package/dist/core/llm-utils.d.ts +103 -0
- package/dist/core/llm-utils.d.ts.map +1 -0
- package/dist/core/llm-utils.js +368 -0
- package/dist/core/llm-utils.js.map +1 -0
- package/dist/core/logger.d.ts +92 -0
- package/dist/core/logger.d.ts.map +1 -0
- package/dist/core/logger.js +221 -0
- package/dist/core/logger.js.map +1 -0
- package/dist/core/process-manager.d.ts +15 -0
- package/dist/core/process-manager.d.ts.map +1 -0
- package/dist/core/process-manager.js +132 -0
- package/dist/core/process-manager.js.map +1 -0
- package/dist/core/story-logger.d.ts +102 -0
- package/dist/core/story-logger.d.ts.map +1 -0
- package/dist/core/story-logger.js +265 -0
- package/dist/core/story-logger.js.map +1 -0
- package/dist/core/story.d.ts +113 -20
- package/dist/core/story.d.ts.map +1 -1
- package/dist/core/story.js +328 -40
- package/dist/core/story.js.map +1 -1
- package/dist/core/task-parser.d.ts +59 -0
- package/dist/core/task-parser.d.ts.map +1 -0
- package/dist/core/task-parser.js +235 -0
- package/dist/core/task-parser.js.map +1 -0
- package/dist/core/task-progress.d.ts +92 -0
- package/dist/core/task-progress.d.ts.map +1 -0
- package/dist/core/task-progress.js +280 -0
- package/dist/core/task-progress.js.map +1 -0
- package/dist/core/workflow-state.d.ts +45 -6
- package/dist/core/workflow-state.d.ts.map +1 -1
- package/dist/core/workflow-state.js +201 -12
- package/dist/core/workflow-state.js.map +1 -1
- package/dist/core/worktree.d.ts +186 -0
- package/dist/core/worktree.d.ts.map +1 -0
- package/dist/core/worktree.js +554 -0
- package/dist/core/worktree.js.map +1 -0
- package/dist/index.js +146 -5
- package/dist/index.js.map +1 -1
- package/dist/services/error-classifier.d.ts +119 -0
- package/dist/services/error-classifier.d.ts.map +1 -0
- package/dist/services/error-classifier.js +182 -0
- package/dist/services/error-classifier.js.map +1 -0
- package/dist/types/index.d.ts +381 -1
- package/dist/types/index.d.ts.map +1 -1
- package/dist/types/index.js +1 -0
- package/dist/types/index.js.map +1 -1
- package/package.json +5 -2
- package/templates/story.md +5 -0
package/dist/agents/review.js
CHANGED
|
@@ -1,12 +1,16 @@
|
|
|
1
|
-
import { execSync, spawn } from 'child_process';
|
|
1
|
+
import { execSync, spawn, spawnSync } from 'child_process';
|
|
2
2
|
import path from 'path';
|
|
3
3
|
import fs from 'fs';
|
|
4
4
|
import { z } from 'zod';
|
|
5
|
-
import {
|
|
5
|
+
import { ProcessManager } from '../core/process-manager.js';
|
|
6
|
+
import { parseStory, updateStoryStatus, appendToSection, updateStoryField, isAtMaxRetries, appendReviewHistory, snapshotMaxRetries, getEffectiveMaxRetries, getEffectiveMaxImplementationRetries } from '../core/story.js';
|
|
6
7
|
import { runAgentQuery } from '../core/client.js';
|
|
8
|
+
import { getLogger } from '../core/logger.js';
|
|
7
9
|
import { loadConfig, DEFAULT_TIMEOUTS } from '../core/config.js';
|
|
10
|
+
import { extractStructuredResponseSync } from '../core/llm-utils.js';
|
|
8
11
|
import { ReviewDecision, ReviewSeverity } from '../types/index.js';
|
|
9
12
|
import { sanitizeInput, truncateText } from '../cli/formatting.js';
|
|
13
|
+
import { detectTestDuplicationPatterns } from './test-pattern-detector.js';
|
|
10
14
|
/**
|
|
11
15
|
* Security: Validate Git branch name to prevent command injection
|
|
12
16
|
* Only allows alphanumeric characters, hyphens, underscores, and forward slashes
|
|
@@ -93,7 +97,9 @@ const ReviewIssueSchema = z.object({
|
|
|
93
97
|
// This handles LLM responses that return {"line": null} instead of omitting the field
|
|
94
98
|
file: z.string().nullish().transform(v => v ?? undefined),
|
|
95
99
|
line: z.number().int().positive().nullish().transform(v => v ?? undefined),
|
|
96
|
-
suggestedFix: z.string().max(
|
|
100
|
+
suggestedFix: z.string().max(5000).nullish().transform(v => v ?? undefined),
|
|
101
|
+
// Perspectives field for unified review (optional for backward compatibility)
|
|
102
|
+
perspectives: z.array(z.enum(['code', 'security', 'po'])).optional(),
|
|
97
103
|
});
|
|
98
104
|
const ReviewResponseSchema = z.object({
|
|
99
105
|
passed: z.boolean(),
|
|
@@ -168,6 +174,7 @@ async function runCommandAsync(command, workingDir, timeout, onProgress) {
|
|
|
168
174
|
cwd: workingDir,
|
|
169
175
|
stdio: ['pipe', 'pipe', 'pipe'],
|
|
170
176
|
});
|
|
177
|
+
ProcessManager.getInstance().registerChild(child);
|
|
171
178
|
const timeoutId = setTimeout(() => {
|
|
172
179
|
killed = true;
|
|
173
180
|
child.kill('SIGTERM');
|
|
@@ -248,23 +255,138 @@ Output your review as a JSON object with this structure:
|
|
|
248
255
|
"issues": [
|
|
249
256
|
{
|
|
250
257
|
"severity": "blocker" | "critical" | "major" | "minor",
|
|
251
|
-
"category": "code_quality" | "security" | "requirements" | "testing" | etc,
|
|
258
|
+
"category": "code_quality" | "security" | "requirements" | "testing" | "test_alignment" | etc,
|
|
252
259
|
"description": "Detailed description of the issue",
|
|
253
260
|
"file": "path/to/file.ts" (if applicable),
|
|
254
261
|
"line": 42 (if applicable),
|
|
255
|
-
"suggestedFix": "How to fix this issue"
|
|
262
|
+
"suggestedFix": "How to fix this issue",
|
|
263
|
+
"perspectives": ["code", "security", "po"] (which perspectives this issue relates to)
|
|
256
264
|
}
|
|
257
265
|
]
|
|
258
266
|
}
|
|
259
267
|
|
|
260
268
|
Severity guidelines:
|
|
261
|
-
- blocker: Must be fixed before merging (security holes, broken functionality)
|
|
269
|
+
- blocker: Must be fixed before merging (security holes, broken functionality, test misalignment)
|
|
262
270
|
- critical: Should be fixed before merging (major bugs, poor practices)
|
|
263
271
|
- major: Should be addressed soon (code quality, maintainability)
|
|
264
272
|
- minor: Nice to have improvements (style, optimizations)
|
|
265
273
|
|
|
266
274
|
If no issues found, return: {"passed": true, "issues": []}
|
|
267
275
|
`;
|
|
276
|
+
/**
|
|
277
|
+
* Unified Review Prompt - combines code, security, and product owner perspectives
|
|
278
|
+
* into a single collaborative review to eliminate duplicate issues.
|
|
279
|
+
*/
|
|
280
|
+
const UNIFIED_REVIEW_PROMPT = `You are a senior engineering team conducting a comprehensive collaborative review.
|
|
281
|
+
|
|
282
|
+
You must evaluate the implementation from THREE perspectives simultaneously, but produce ONE unified set of issues:
|
|
283
|
+
|
|
284
|
+
## Perspective 1: Code Quality (Senior Developer)
|
|
285
|
+
Evaluate:
|
|
286
|
+
- Code quality and maintainability
|
|
287
|
+
- Following best practices and design patterns
|
|
288
|
+
- Potential bugs or logic errors
|
|
289
|
+
- Test coverage adequacy and test quality
|
|
290
|
+
- Error handling completeness
|
|
291
|
+
- Performance considerations
|
|
292
|
+
|
|
293
|
+
## Perspective 2: Security (Security Engineer)
|
|
294
|
+
Evaluate:
|
|
295
|
+
- OWASP Top 10 vulnerabilities
|
|
296
|
+
- Input validation and sanitization
|
|
297
|
+
- Authentication and authorization issues
|
|
298
|
+
- Data exposure risks
|
|
299
|
+
- Command injection vulnerabilities
|
|
300
|
+
- Secure coding practices
|
|
301
|
+
|
|
302
|
+
## Perspective 3: Requirements (Product Owner)
|
|
303
|
+
Evaluate:
|
|
304
|
+
- Does it meet the acceptance criteria stated in the story?
|
|
305
|
+
- Is the user experience appropriate and intuitive?
|
|
306
|
+
- Are edge cases and error scenarios handled?
|
|
307
|
+
- Is documentation adequate for users and maintainers?
|
|
308
|
+
- Does the implementation align with the story goals?
|
|
309
|
+
|
|
310
|
+
## Test-Implementation Alignment (BLOCKER category)
|
|
311
|
+
|
|
312
|
+
**CRITICAL PRE-REVIEW REQUIREMENT**: Tests have already been executed and passed. However, passing tests don't guarantee correctness if they verify outdated behavior.
|
|
313
|
+
|
|
314
|
+
During code review, you MUST verify test alignment:
|
|
315
|
+
|
|
316
|
+
1. **For each changed production file, identify its test file**
|
|
317
|
+
- Check if tests exist for modified functions/modules
|
|
318
|
+
- Read the test assertions carefully
|
|
319
|
+
|
|
320
|
+
2. **Verify tests match NEW behavior, not OLD**
|
|
321
|
+
- Do test assertions expect the current implementation behavior?
|
|
322
|
+
- If production code changed from sync to async, do tests use await?
|
|
323
|
+
- If function signature changed, do tests call it correctly?
|
|
324
|
+
- If return values changed, do tests expect the new values?
|
|
325
|
+
|
|
326
|
+
3. **Flag misalignment as BLOCKER**
|
|
327
|
+
- If tests reference changed code but still expect old behavior:
|
|
328
|
+
- This is a **BLOCKER** severity issue
|
|
329
|
+
- Category MUST be: \`"test_alignment"\`
|
|
330
|
+
- Specify which test files need updating and why
|
|
331
|
+
- Provide example of correct assertion for new behavior
|
|
332
|
+
|
|
333
|
+
**Example of misaligned test (BLOCKER):**
|
|
334
|
+
\`\`\`typescript
|
|
335
|
+
// Production code changed from sync to async
|
|
336
|
+
async function loadConfig(): Promise<Config> {
|
|
337
|
+
return await fetchConfig();
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
// Test still expects sync behavior - MISSING await (BLOCKER)
|
|
341
|
+
test('loads config', () => {
|
|
342
|
+
const config = loadConfig(); // ❌ Missing await! Returns Promise<Config>, not Config
|
|
343
|
+
expect(config.port).toBe(3000); // ❌ Checking Promise.port, not config.port
|
|
344
|
+
});
|
|
345
|
+
|
|
346
|
+
// Correct aligned test:
|
|
347
|
+
test('loads config', async () => {
|
|
348
|
+
const config = await loadConfig(); // ✅ Awaits async function
|
|
349
|
+
expect(config.port).toBe(3000); // ✅ Checks actual config
|
|
350
|
+
});
|
|
351
|
+
\`\`\`
|
|
352
|
+
|
|
353
|
+
**When to flag test_alignment issues:**
|
|
354
|
+
- Tests verify old function signatures that no longer exist
|
|
355
|
+
- Tests expect old return value formats that changed
|
|
356
|
+
- Tests miss new error conditions introduced
|
|
357
|
+
- Tests pass but don't exercise the new code paths
|
|
358
|
+
- Mock expectations don't match the new implementation calls
|
|
359
|
+
|
|
360
|
+
## CRITICAL DEDUPLICATION INSTRUCTIONS:
|
|
361
|
+
|
|
362
|
+
1. **DO NOT repeat the same underlying issue from different perspectives**
|
|
363
|
+
- If multiple perspectives notice the same problem, list it ONCE
|
|
364
|
+
- Use the \`perspectives\` array to indicate which perspectives it affects
|
|
365
|
+
|
|
366
|
+
2. **Prioritize by actual impact, not by how many perspectives notice it**
|
|
367
|
+
- A issue seen by all 3 perspectives is still just ONE issue
|
|
368
|
+
- Focus on the distinct, actionable problems that need fixing
|
|
369
|
+
|
|
370
|
+
3. **If the fundamental problem is "no implementation exists" or "functionality completely missing":**
|
|
371
|
+
- Report this as ONE blocker issue, not three separate issues
|
|
372
|
+
- Use perspectives: ["code", "security", "po"] to show all perspectives agree
|
|
373
|
+
|
|
374
|
+
4. **Combine related issues into single, comprehensive descriptions:**
|
|
375
|
+
- Instead of: "No tests" (code) + "Untested security" (security) + "No validation tests" (po)
|
|
376
|
+
- Write: "No tests exist for the implementation" with perspectives: ["code", "security", "po"]
|
|
377
|
+
|
|
378
|
+
5. **Each issue should have a clear, single suggested fix**
|
|
379
|
+
- Avoid vague suggestions like "improve everything"
|
|
380
|
+
- Be specific and actionable
|
|
381
|
+
|
|
382
|
+
${REVIEW_OUTPUT_FORMAT}
|
|
383
|
+
|
|
384
|
+
Remember: Your goal is to produce a clean, deduplicated list of actual distinct problems, not to maximize issue count.`;
|
|
385
|
+
/**
|
|
386
|
+
* Legacy prompts - kept for reference only
|
|
387
|
+
* @deprecated These are replaced by UNIFIED_REVIEW_PROMPT which combines all three perspectives.
|
|
388
|
+
* The unified prompt reduces LLM calls from 3 to 1 and eliminates duplicate issues.
|
|
389
|
+
*/
|
|
268
390
|
const CODE_REVIEW_PROMPT = `You are a senior code reviewer. Review the implementation for:
|
|
269
391
|
1. Code quality and maintainability
|
|
270
392
|
2. Following best practices
|
|
@@ -272,6 +394,9 @@ const CODE_REVIEW_PROMPT = `You are a senior code reviewer. Review the implement
|
|
|
272
394
|
4. Test coverage adequacy
|
|
273
395
|
|
|
274
396
|
${REVIEW_OUTPUT_FORMAT}`;
|
|
397
|
+
/**
|
|
398
|
+
* @deprecated Use UNIFIED_REVIEW_PROMPT instead
|
|
399
|
+
*/
|
|
275
400
|
const SECURITY_REVIEW_PROMPT = `You are a security specialist. Review the implementation for:
|
|
276
401
|
1. OWASP Top 10 vulnerabilities
|
|
277
402
|
2. Input validation issues
|
|
@@ -279,6 +404,9 @@ const SECURITY_REVIEW_PROMPT = `You are a security specialist. Review the implem
|
|
|
279
404
|
4. Data exposure risks
|
|
280
405
|
|
|
281
406
|
${REVIEW_OUTPUT_FORMAT}`;
|
|
407
|
+
/**
|
|
408
|
+
* @deprecated Use UNIFIED_REVIEW_PROMPT instead
|
|
409
|
+
*/
|
|
282
410
|
const PO_REVIEW_PROMPT = `You are a product owner validating the implementation. Check:
|
|
283
411
|
1. Does it meet the acceptance criteria?
|
|
284
412
|
2. Is the user experience appropriate?
|
|
@@ -288,26 +416,25 @@ const PO_REVIEW_PROMPT = `You are a product owner validating the implementation.
|
|
|
288
416
|
${REVIEW_OUTPUT_FORMAT}`;
|
|
289
417
|
/**
|
|
290
418
|
* Parse review response and extract structured issues
|
|
419
|
+
* Uses extractStructuredResponseSync for robust parsing with multiple strategies:
|
|
420
|
+
* 1. Direct JSON parse
|
|
421
|
+
* 2. JSON within markdown code blocks
|
|
422
|
+
* 3. JSON with leading/trailing text stripped
|
|
423
|
+
* 4. YAML format fallback
|
|
424
|
+
*
|
|
291
425
|
* Security: Uses zod schema validation to prevent malicious JSON
|
|
292
426
|
*/
|
|
293
427
|
function parseReviewResponse(response, reviewType) {
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
if (!validationResult.success) {
|
|
305
|
-
// Log validation errors for debugging
|
|
306
|
-
console.warn('Review response failed schema validation:', validationResult.error);
|
|
307
|
-
// Fallback to text analysis
|
|
308
|
-
return parseTextReview(response, reviewType);
|
|
309
|
-
}
|
|
310
|
-
const validated = validationResult.data;
|
|
428
|
+
const logger = getLogger();
|
|
429
|
+
// Use the robust extraction utility with all strategies
|
|
430
|
+
const extractionResult = extractStructuredResponseSync(response, ReviewResponseSchema, false);
|
|
431
|
+
if (extractionResult.success && extractionResult.data) {
|
|
432
|
+
const validated = extractionResult.data;
|
|
433
|
+
logger.debug('review', `Successfully parsed review response using strategy: ${extractionResult.strategy}`, {
|
|
434
|
+
reviewType,
|
|
435
|
+
strategy: extractionResult.strategy,
|
|
436
|
+
issueCount: validated.issues.length,
|
|
437
|
+
});
|
|
311
438
|
// Map validated data to ReviewIssue format (additional sanitization)
|
|
312
439
|
const issues = validated.issues.map((issue) => ({
|
|
313
440
|
severity: issue.severity,
|
|
@@ -316,17 +443,20 @@ function parseReviewResponse(response, reviewType) {
|
|
|
316
443
|
file: issue.file,
|
|
317
444
|
line: issue.line,
|
|
318
445
|
suggestedFix: issue.suggestedFix,
|
|
446
|
+
perspectives: issue.perspectives,
|
|
319
447
|
}));
|
|
320
448
|
return {
|
|
321
449
|
passed: validated.passed !== false && issues.filter(i => i.severity === 'blocker' || i.severity === 'critical').length === 0,
|
|
322
450
|
issues,
|
|
323
451
|
};
|
|
324
452
|
}
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
453
|
+
// All extraction strategies failed - log raw response for debugging and use text fallback
|
|
454
|
+
logger.warn('review', 'All extraction strategies failed for review response', {
|
|
455
|
+
reviewType,
|
|
456
|
+
error: extractionResult.error,
|
|
457
|
+
responsePreview: response.substring(0, 200),
|
|
458
|
+
});
|
|
459
|
+
return parseTextReview(response, reviewType);
|
|
330
460
|
}
|
|
331
461
|
/**
|
|
332
462
|
* Fallback: Parse text-based review response (for when LLM doesn't return JSON)
|
|
@@ -383,8 +513,35 @@ function determineReviewSeverity(issues) {
|
|
|
383
513
|
return ReviewSeverity.LOW;
|
|
384
514
|
}
|
|
385
515
|
}
|
|
516
|
+
/**
|
|
517
|
+
* Derive individual perspective pass/fail status from issues
|
|
518
|
+
*
|
|
519
|
+
* For backward compatibility with ReviewAttempt structure, determines whether
|
|
520
|
+
* each perspective (code, security, po) would pass based on issues flagged
|
|
521
|
+
* for that perspective.
|
|
522
|
+
*
|
|
523
|
+
* A perspective fails if it has any blocker or critical issues.
|
|
524
|
+
*
|
|
525
|
+
* @param issues - Array of review issues with perspectives field
|
|
526
|
+
* @returns Object with pass/fail status for each perspective
|
|
527
|
+
*/
|
|
528
|
+
export function deriveIndividualPassFailFromPerspectives(issues) {
|
|
529
|
+
// Check if any blocker/critical issues exist for each perspective
|
|
530
|
+
const codeIssues = issues.filter(i => i.perspectives?.includes('code') &&
|
|
531
|
+
(i.severity === 'blocker' || i.severity === 'critical'));
|
|
532
|
+
const securityIssues = issues.filter(i => i.perspectives?.includes('security') &&
|
|
533
|
+
(i.severity === 'blocker' || i.severity === 'critical'));
|
|
534
|
+
const poIssues = issues.filter(i => i.perspectives?.includes('po') &&
|
|
535
|
+
(i.severity === 'blocker' || i.severity === 'critical'));
|
|
536
|
+
return {
|
|
537
|
+
codeReviewPassed: codeIssues.length === 0,
|
|
538
|
+
securityReviewPassed: securityIssues.length === 0,
|
|
539
|
+
poReviewPassed: poIssues.length === 0,
|
|
540
|
+
};
|
|
541
|
+
}
|
|
386
542
|
/**
|
|
387
543
|
* Aggregate issues from multiple reviews and determine overall pass/fail
|
|
544
|
+
* @deprecated No longer used with unified review. Kept for reference only.
|
|
388
545
|
*/
|
|
389
546
|
function aggregateReviews(codeResult, securityResult, poResult) {
|
|
390
547
|
const allIssues = [...codeResult.issues, ...securityResult.issues, ...poResult.issues];
|
|
@@ -399,6 +556,7 @@ function aggregateReviews(codeResult, securityResult, poResult) {
|
|
|
399
556
|
}
|
|
400
557
|
/**
|
|
401
558
|
* Format issues for display in review notes
|
|
559
|
+
* Shows perspectives (code, security, po) when available
|
|
402
560
|
*/
|
|
403
561
|
function formatIssuesForDisplay(issues) {
|
|
404
562
|
if (issues.length === 0) {
|
|
@@ -417,7 +575,11 @@ function formatIssuesForDisplay(issues) {
|
|
|
417
575
|
const icon = severity === 'blocker' ? '🛑' : severity === 'critical' ? '⚠️' : severity === 'major' ? '📋' : 'ℹ️';
|
|
418
576
|
output += `\n#### ${icon} ${severity.toUpperCase()} (${issueList.length})\n\n`;
|
|
419
577
|
for (const issue of issueList) {
|
|
420
|
-
|
|
578
|
+
// Format perspectives indicator if present
|
|
579
|
+
const perspectivesTag = issue.perspectives && issue.perspectives.length > 0
|
|
580
|
+
? ` [${issue.perspectives.join(', ')}]`
|
|
581
|
+
: '';
|
|
582
|
+
output += `**${issue.category}**${perspectivesTag}: ${issue.description}\n`;
|
|
421
583
|
if (issue.file) {
|
|
422
584
|
output += ` - File: \`${issue.file}\`${issue.line ? `:${issue.line}` : ''}\n`;
|
|
423
585
|
}
|
|
@@ -429,6 +591,205 @@ function formatIssuesForDisplay(issues) {
|
|
|
429
591
|
}
|
|
430
592
|
return output;
|
|
431
593
|
}
|
|
594
|
+
/**
|
|
595
|
+
* Get source code changes from git diff
|
|
596
|
+
*
|
|
597
|
+
* Returns list of source files that have been modified (excludes tests and story files).
|
|
598
|
+
* Uses spawnSync for security (prevents command injection).
|
|
599
|
+
*
|
|
600
|
+
* @param workingDir - Working directory to run git diff in
|
|
601
|
+
* @returns Array of source file paths that have changed, or ['unknown'] if git fails
|
|
602
|
+
*/
|
|
603
|
+
export function getSourceCodeChanges(workingDir) {
|
|
604
|
+
try {
|
|
605
|
+
// Security: Use spawnSync with explicit args (not shell) to prevent injection
|
|
606
|
+
const result = spawnSync('git', ['diff', '--name-only', 'HEAD~1'], {
|
|
607
|
+
cwd: workingDir,
|
|
608
|
+
encoding: 'utf-8',
|
|
609
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
610
|
+
});
|
|
611
|
+
if (result.status !== 0) {
|
|
612
|
+
// Git command failed - fail open (assume changes exist)
|
|
613
|
+
return ['unknown'];
|
|
614
|
+
}
|
|
615
|
+
const output = result.stdout.toString();
|
|
616
|
+
return output
|
|
617
|
+
.split('\n')
|
|
618
|
+
.filter(f => f.trim())
|
|
619
|
+
.filter(f => /\.(ts|tsx|js|jsx)$/.test(f)) // Source files only
|
|
620
|
+
.filter(f => !f.includes('.test.')) // Exclude test files
|
|
621
|
+
.filter(f => !f.includes('.spec.')) // Exclude spec files
|
|
622
|
+
.filter(f => !f.startsWith('.ai-sdlc/')); // Exclude story files
|
|
623
|
+
}
|
|
624
|
+
catch {
|
|
625
|
+
// If git diff fails, assume there are changes (fail open, not closed)
|
|
626
|
+
return ['unknown'];
|
|
627
|
+
}
|
|
628
|
+
}
|
|
629
|
+
/**
|
|
630
|
+
* Get configuration file changes from git diff
|
|
631
|
+
*
|
|
632
|
+
* Detects changes to configuration files including:
|
|
633
|
+
* - .claude/ directory (Agent SDK skills, CLAUDE.md)
|
|
634
|
+
* - .github/ directory (workflows, actions, issue templates)
|
|
635
|
+
* - Root config files (tsconfig.json, package.json, .gitignore, vitest.config.ts, etc.)
|
|
636
|
+
*
|
|
637
|
+
* Uses spawnSync for security (prevents command injection).
|
|
638
|
+
*
|
|
639
|
+
* @param workingDir - Working directory to run git diff in
|
|
640
|
+
* @returns Array of configuration file paths that have changed, or ['unknown'] if git fails
|
|
641
|
+
*/
|
|
642
|
+
export function getConfigurationChanges(workingDir) {
|
|
643
|
+
try {
|
|
644
|
+
// Security: Use spawnSync with explicit args (not shell) to prevent injection
|
|
645
|
+
const result = spawnSync('git', ['diff', '--name-only', 'HEAD~1'], {
|
|
646
|
+
cwd: workingDir,
|
|
647
|
+
encoding: 'utf-8',
|
|
648
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
649
|
+
});
|
|
650
|
+
if (result.status !== 0) {
|
|
651
|
+
// Git command failed - fail open (assume changes exist)
|
|
652
|
+
return ['unknown'];
|
|
653
|
+
}
|
|
654
|
+
const output = result.stdout.toString();
|
|
655
|
+
return output
|
|
656
|
+
.split('\n')
|
|
657
|
+
.filter(f => f.trim())
|
|
658
|
+
.filter(f => {
|
|
659
|
+
// Configuration directories
|
|
660
|
+
if (f.startsWith('.claude/'))
|
|
661
|
+
return true;
|
|
662
|
+
if (f.startsWith('.github/'))
|
|
663
|
+
return true;
|
|
664
|
+
// Root configuration files (common patterns)
|
|
665
|
+
const rootConfigs = [
|
|
666
|
+
'tsconfig.json',
|
|
667
|
+
'package.json',
|
|
668
|
+
'package-lock.json',
|
|
669
|
+
'.gitignore',
|
|
670
|
+
'.gitattributes',
|
|
671
|
+
'vitest.config.ts',
|
|
672
|
+
'vitest.config.js',
|
|
673
|
+
'jest.config.js',
|
|
674
|
+
'jest.config.ts',
|
|
675
|
+
'.eslintrc',
|
|
676
|
+
'.eslintrc.js',
|
|
677
|
+
'.eslintrc.json',
|
|
678
|
+
'.prettierrc',
|
|
679
|
+
'.prettierrc.js',
|
|
680
|
+
'.prettierrc.json',
|
|
681
|
+
'Makefile',
|
|
682
|
+
'Dockerfile',
|
|
683
|
+
'docker-compose.yml',
|
|
684
|
+
'.env.example',
|
|
685
|
+
];
|
|
686
|
+
return rootConfigs.includes(f);
|
|
687
|
+
});
|
|
688
|
+
}
|
|
689
|
+
catch {
|
|
690
|
+
// If git diff fails, assume there are changes (fail open, not closed)
|
|
691
|
+
return ['unknown'];
|
|
692
|
+
}
|
|
693
|
+
}
|
|
694
|
+
/**
|
|
695
|
+
* Get documentation file changes from git diff
|
|
696
|
+
*
|
|
697
|
+
* Detects changes to documentation files including:
|
|
698
|
+
* - Markdown files (.md) anywhere in the project (excluding story files)
|
|
699
|
+
* - docs/ directory (any file type)
|
|
700
|
+
*
|
|
701
|
+
* Uses spawnSync for security (prevents command injection).
|
|
702
|
+
*
|
|
703
|
+
* @param workingDir - Working directory to run git diff in
|
|
704
|
+
* @returns Array of documentation file paths that have changed, or ['unknown'] if git fails
|
|
705
|
+
*/
|
|
706
|
+
export function getDocumentationChanges(workingDir) {
|
|
707
|
+
try {
|
|
708
|
+
// Security: Use spawnSync with explicit args (not shell) to prevent injection
|
|
709
|
+
const result = spawnSync('git', ['diff', '--name-only', 'HEAD~1'], {
|
|
710
|
+
cwd: workingDir,
|
|
711
|
+
encoding: 'utf-8',
|
|
712
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
713
|
+
});
|
|
714
|
+
if (result.status !== 0) {
|
|
715
|
+
// Git command failed - fail open (assume changes exist)
|
|
716
|
+
return ['unknown'];
|
|
717
|
+
}
|
|
718
|
+
const output = result.stdout.toString();
|
|
719
|
+
return output
|
|
720
|
+
.split('\n')
|
|
721
|
+
.filter(f => f.trim())
|
|
722
|
+
.filter(f => {
|
|
723
|
+
// Markdown files (excluding story files in .ai-sdlc/stories/)
|
|
724
|
+
if (f.endsWith('.md') && !f.startsWith('.ai-sdlc/stories/'))
|
|
725
|
+
return true;
|
|
726
|
+
// Files in docs/ directory (any file type - images, diagrams, etc.)
|
|
727
|
+
if (f.startsWith('docs/'))
|
|
728
|
+
return true;
|
|
729
|
+
return false;
|
|
730
|
+
});
|
|
731
|
+
}
|
|
732
|
+
catch {
|
|
733
|
+
// If git diff fails, assume there are changes (fail open, not closed)
|
|
734
|
+
return ['unknown'];
|
|
735
|
+
}
|
|
736
|
+
}
|
|
737
|
+
/**
|
|
738
|
+
* Determine the effective content type for validation
|
|
739
|
+
*
|
|
740
|
+
* Resolves the final content type based on story frontmatter fields:
|
|
741
|
+
* 1. If requires_source_changes === false, treat as 'configuration'
|
|
742
|
+
* 2. If requires_source_changes === true, treat as 'code'
|
|
743
|
+
* 3. Otherwise, use content_type field (default: 'code' for backward compatibility)
|
|
744
|
+
*
|
|
745
|
+
* @param story - Story with frontmatter to analyze
|
|
746
|
+
* @returns The effective content type to use for validation
|
|
747
|
+
*/
|
|
748
|
+
export function determineEffectiveContentType(story) {
|
|
749
|
+
const frontmatter = story.frontmatter;
|
|
750
|
+
// Manual override takes precedence
|
|
751
|
+
if (frontmatter.requires_source_changes === false) {
|
|
752
|
+
return 'configuration';
|
|
753
|
+
}
|
|
754
|
+
if (frontmatter.requires_source_changes === true) {
|
|
755
|
+
return 'code';
|
|
756
|
+
}
|
|
757
|
+
// Use explicit content_type or default to 'code'
|
|
758
|
+
return frontmatter.content_type || 'code';
|
|
759
|
+
}
|
|
760
|
+
/**
|
|
761
|
+
* Check if test files exist in git diff
|
|
762
|
+
*
|
|
763
|
+
* Returns true if any test files have been modified/added, false otherwise.
|
|
764
|
+
* Uses spawnSync for security (prevents command injection).
|
|
765
|
+
*
|
|
766
|
+
* @param workingDir - Working directory to run git diff in
|
|
767
|
+
* @returns True if test files exist in changes, false otherwise
|
|
768
|
+
*/
|
|
769
|
+
export function hasTestFiles(workingDir) {
|
|
770
|
+
try {
|
|
771
|
+
// Security: Use spawnSync with explicit args (not shell) to prevent injection
|
|
772
|
+
const result = spawnSync('git', ['diff', '--name-only', 'HEAD~1'], {
|
|
773
|
+
cwd: workingDir,
|
|
774
|
+
encoding: 'utf-8',
|
|
775
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
776
|
+
});
|
|
777
|
+
if (result.status !== 0) {
|
|
778
|
+
// Git command failed - fail open (assume tests exist to avoid false blocks)
|
|
779
|
+
return true;
|
|
780
|
+
}
|
|
781
|
+
const output = result.stdout.toString();
|
|
782
|
+
const files = output.split('\n').filter(f => f.trim());
|
|
783
|
+
// Check if any files match test patterns
|
|
784
|
+
return files.some(f => f.includes('.test.') ||
|
|
785
|
+
f.includes('.spec.') ||
|
|
786
|
+
f.includes('__tests__/'));
|
|
787
|
+
}
|
|
788
|
+
catch {
|
|
789
|
+
// If git diff fails, assume tests exist (fail open, not closed)
|
|
790
|
+
return true;
|
|
791
|
+
}
|
|
792
|
+
}
|
|
432
793
|
/**
|
|
433
794
|
* Generate executive summary from review issues (1-3 sentences)
|
|
434
795
|
*
|
|
@@ -526,9 +887,15 @@ export function generateReviewSummary(issues, terminalWidth) {
|
|
|
526
887
|
* Now returns structured ReviewResult with pass/fail and issues.
|
|
527
888
|
*/
|
|
528
889
|
export async function runReviewAgent(storyPath, sdlcRoot, options) {
|
|
890
|
+
const logger = getLogger();
|
|
891
|
+
const startTime = Date.now();
|
|
529
892
|
const story = parseStory(storyPath);
|
|
530
893
|
const changesMade = [];
|
|
531
894
|
const workingDir = path.dirname(sdlcRoot);
|
|
895
|
+
logger.info('review', 'Starting review phase', {
|
|
896
|
+
storyId: story.frontmatter.id,
|
|
897
|
+
retryCount: story.frontmatter.retry_count || 0,
|
|
898
|
+
});
|
|
532
899
|
// Security: Validate working directory before any operations
|
|
533
900
|
try {
|
|
534
901
|
validateWorkingDirectory(workingDir);
|
|
@@ -554,14 +921,14 @@ export async function runReviewAgent(storyPath, sdlcRoot, options) {
|
|
|
554
921
|
const config = loadConfig(workingDir);
|
|
555
922
|
try {
|
|
556
923
|
// Snapshot max_retries from config (protects against mid-cycle config changes)
|
|
557
|
-
snapshotMaxRetries(story, config);
|
|
924
|
+
await snapshotMaxRetries(story, config);
|
|
558
925
|
// Check if story has reached max retries
|
|
559
926
|
if (isAtMaxRetries(story, config)) {
|
|
560
927
|
const retryCount = story.frontmatter.retry_count || 0;
|
|
561
928
|
const maxRetries = getEffectiveMaxRetries(story, config);
|
|
562
929
|
const maxRetriesDisplay = Number.isFinite(maxRetries) ? maxRetries : '∞';
|
|
563
930
|
const errorMsg = `Story has reached maximum retry limit (${retryCount}/${maxRetriesDisplay}). Manual intervention required.`;
|
|
564
|
-
updateStoryField(story, 'last_error', errorMsg);
|
|
931
|
+
await updateStoryField(story, 'last_error', errorMsg);
|
|
565
932
|
changesMade.push(errorMsg);
|
|
566
933
|
return {
|
|
567
934
|
success: false,
|
|
@@ -579,6 +946,194 @@ export async function runReviewAgent(storyPath, sdlcRoot, options) {
|
|
|
579
946
|
feedback: errorMsg,
|
|
580
947
|
};
|
|
581
948
|
}
|
|
949
|
+
// PRE-CHECK GATE: Content type-aware validation before running expensive LLM reviews
|
|
950
|
+
const contentType = determineEffectiveContentType(story);
|
|
951
|
+
logger.info('review', 'Running content-type-specific validation', {
|
|
952
|
+
storyId: story.frontmatter.id,
|
|
953
|
+
contentType,
|
|
954
|
+
explicitContentType: story.frontmatter.content_type,
|
|
955
|
+
requiresSourceChanges: story.frontmatter.requires_source_changes,
|
|
956
|
+
});
|
|
957
|
+
// Validation flags
|
|
958
|
+
let validationFailed = false;
|
|
959
|
+
let validationReason = '';
|
|
960
|
+
let validationCategory = 'implementation';
|
|
961
|
+
// Check source code changes for 'code' and 'mixed' types
|
|
962
|
+
if (contentType === 'code' || contentType === 'mixed') {
|
|
963
|
+
const sourceChanges = getSourceCodeChanges(workingDir);
|
|
964
|
+
if (sourceChanges.length === 0) {
|
|
965
|
+
validationFailed = true;
|
|
966
|
+
validationReason = contentType === 'mixed'
|
|
967
|
+
? 'Mixed story requires both source AND configuration changes - no source code was modified.'
|
|
968
|
+
: 'Implementation wrote documentation/planning only - no source code was modified.';
|
|
969
|
+
logger.warn('review', 'Source code validation failed', {
|
|
970
|
+
storyId: story.frontmatter.id,
|
|
971
|
+
contentType,
|
|
972
|
+
sourceChangesFound: sourceChanges.length,
|
|
973
|
+
});
|
|
974
|
+
}
|
|
975
|
+
else {
|
|
976
|
+
logger.info('review', 'Source code changes detected', {
|
|
977
|
+
storyId: story.frontmatter.id,
|
|
978
|
+
fileCount: sourceChanges.length,
|
|
979
|
+
});
|
|
980
|
+
}
|
|
981
|
+
}
|
|
982
|
+
// Check configuration changes for 'configuration' and 'mixed' types
|
|
983
|
+
if (!validationFailed && (contentType === 'configuration' || contentType === 'mixed')) {
|
|
984
|
+
const configChanges = getConfigurationChanges(workingDir);
|
|
985
|
+
if (configChanges.length === 0) {
|
|
986
|
+
validationFailed = true;
|
|
987
|
+
validationReason = contentType === 'mixed'
|
|
988
|
+
? 'Mixed story requires both source AND configuration changes. No configuration file changes detected.'
|
|
989
|
+
: 'Configuration story requires changes to config files (.claude/, .github/, or root config files). No configuration changes detected.';
|
|
990
|
+
logger.warn('review', 'Configuration validation failed', {
|
|
991
|
+
storyId: story.frontmatter.id,
|
|
992
|
+
contentType,
|
|
993
|
+
configChangesFound: configChanges.length,
|
|
994
|
+
});
|
|
995
|
+
}
|
|
996
|
+
else {
|
|
997
|
+
logger.info('review', 'Configuration changes detected', {
|
|
998
|
+
storyId: story.frontmatter.id,
|
|
999
|
+
fileCount: configChanges.length,
|
|
1000
|
+
});
|
|
1001
|
+
}
|
|
1002
|
+
}
|
|
1003
|
+
// Check documentation changes for 'documentation' type
|
|
1004
|
+
if (!validationFailed && contentType === 'documentation') {
|
|
1005
|
+
const docChanges = getDocumentationChanges(workingDir);
|
|
1006
|
+
if (docChanges.length === 0) {
|
|
1007
|
+
validationFailed = true;
|
|
1008
|
+
validationReason = 'Documentation story requires changes to markdown files (.md) or docs/ directory. No documentation changes detected.';
|
|
1009
|
+
logger.warn('review', 'Documentation validation failed', {
|
|
1010
|
+
storyId: story.frontmatter.id,
|
|
1011
|
+
contentType,
|
|
1012
|
+
docChangesFound: docChanges.length,
|
|
1013
|
+
});
|
|
1014
|
+
}
|
|
1015
|
+
else {
|
|
1016
|
+
logger.info('review', 'Documentation changes detected', {
|
|
1017
|
+
storyId: story.frontmatter.id,
|
|
1018
|
+
fileCount: docChanges.length,
|
|
1019
|
+
});
|
|
1020
|
+
}
|
|
1021
|
+
}
|
|
1022
|
+
// Handle validation failure (if any)
|
|
1023
|
+
if (validationFailed) {
|
|
1024
|
+
const retryCount = story.frontmatter.implementation_retry_count || 0;
|
|
1025
|
+
const maxRetries = getEffectiveMaxImplementationRetries(story, config);
|
|
1026
|
+
if (retryCount < maxRetries) {
|
|
1027
|
+
// RECOVERABLE: Trigger implementation recovery
|
|
1028
|
+
logger.warn('review', 'Validation failed - triggering implementation recovery', {
|
|
1029
|
+
storyId: story.frontmatter.id,
|
|
1030
|
+
retryCount,
|
|
1031
|
+
maxRetries,
|
|
1032
|
+
contentType,
|
|
1033
|
+
});
|
|
1034
|
+
await updateStoryField(story, 'implementation_complete', false);
|
|
1035
|
+
// Set restart reason based on content type
|
|
1036
|
+
const restartReason = contentType === 'configuration'
|
|
1037
|
+
? 'Configuration story requires changes to config files (.claude/, .github/, or root config files). No configuration changes detected.'
|
|
1038
|
+
: contentType === 'mixed'
|
|
1039
|
+
? 'Mixed story requires both source AND configuration changes - no source code was modified.'
|
|
1040
|
+
: contentType === 'documentation'
|
|
1041
|
+
? 'Documentation story requires changes to markdown files (.md) or docs/ directory. No documentation changes detected.'
|
|
1042
|
+
: 'No source code changes detected. Implementation wrote documentation only.';
|
|
1043
|
+
await updateStoryField(story, 'last_restart_reason', restartReason);
|
|
1044
|
+
// Create user-friendly recovery description
|
|
1045
|
+
const recoveryDescription = contentType === 'configuration'
|
|
1046
|
+
? 'No configuration file modifications detected. Re-running implementation phase.'
|
|
1047
|
+
: contentType === 'mixed'
|
|
1048
|
+
? 'No source code modifications detected. Re-running implementation phase.'
|
|
1049
|
+
: contentType === 'documentation'
|
|
1050
|
+
? 'No documentation file modifications detected. Re-running implementation phase.'
|
|
1051
|
+
: 'No source code modifications detected. Re-running implementation phase.';
|
|
1052
|
+
return {
|
|
1053
|
+
success: true,
|
|
1054
|
+
story: parseStory(storyPath),
|
|
1055
|
+
changesMade: ['Detected incomplete implementation', 'Triggered implementation recovery'],
|
|
1056
|
+
passed: false,
|
|
1057
|
+
decision: ReviewDecision.RECOVERY,
|
|
1058
|
+
reviewType: 'pre-check',
|
|
1059
|
+
issues: [{
|
|
1060
|
+
severity: 'critical',
|
|
1061
|
+
category: validationCategory,
|
|
1062
|
+
description: recoveryDescription,
|
|
1063
|
+
}],
|
|
1064
|
+
feedback: `Implementation recovery triggered - ${validationReason}`,
|
|
1065
|
+
};
|
|
1066
|
+
}
|
|
1067
|
+
else {
|
|
1068
|
+
// NON-RECOVERABLE: Max retries reached
|
|
1069
|
+
const maxRetriesDisplay = Number.isFinite(maxRetries) ? maxRetries : '∞';
|
|
1070
|
+
logger.error('review', 'Validation failed and max implementation retries reached', {
|
|
1071
|
+
storyId: story.frontmatter.id,
|
|
1072
|
+
retryCount,
|
|
1073
|
+
maxRetries,
|
|
1074
|
+
contentType,
|
|
1075
|
+
});
|
|
1076
|
+
return {
|
|
1077
|
+
success: true,
|
|
1078
|
+
story: parseStory(storyPath),
|
|
1079
|
+
changesMade: ['Detected incomplete implementation', 'Max retries reached'],
|
|
1080
|
+
passed: false,
|
|
1081
|
+
decision: ReviewDecision.FAILED,
|
|
1082
|
+
severity: ReviewSeverity.CRITICAL,
|
|
1083
|
+
reviewType: 'pre-check',
|
|
1084
|
+
issues: [{
|
|
1085
|
+
severity: 'blocker',
|
|
1086
|
+
category: validationCategory,
|
|
1087
|
+
description: `${validationReason} This has occurred ${retryCount} time(s) (max: ${maxRetriesDisplay}). Manual intervention required.`,
|
|
1088
|
+
suggestedFix: 'Review the story requirements and implementation plan. Verify the content_type field matches the expected implementation. Consider simplifying the story or providing more explicit guidance.',
|
|
1089
|
+
}],
|
|
1090
|
+
feedback: 'Implementation failed validation after multiple attempts.',
|
|
1091
|
+
};
|
|
1092
|
+
}
|
|
1093
|
+
}
|
|
1094
|
+
// Validation passed - proceed with normal review flow
|
|
1095
|
+
logger.info('review', 'Content validation passed - proceeding with verification', {
|
|
1096
|
+
storyId: story.frontmatter.id,
|
|
1097
|
+
contentType,
|
|
1098
|
+
});
|
|
1099
|
+
// PRE-CHECK GATE: Check if test files exist (only for code/mixed types)
|
|
1100
|
+
// Documentation and configuration stories don't require test files
|
|
1101
|
+
const requiresTests = contentType === 'code' || contentType === 'mixed';
|
|
1102
|
+
if (requiresTests) {
|
|
1103
|
+
const testsExist = hasTestFiles(workingDir);
|
|
1104
|
+
if (!testsExist) {
|
|
1105
|
+
logger.warn('review', 'No test files detected in implementation changes', {
|
|
1106
|
+
storyId: story.frontmatter.id,
|
|
1107
|
+
});
|
|
1108
|
+
return {
|
|
1109
|
+
success: true,
|
|
1110
|
+
story: parseStory(storyPath),
|
|
1111
|
+
changesMade: ['No test files found for implementation'],
|
|
1112
|
+
passed: false,
|
|
1113
|
+
decision: ReviewDecision.REJECTED,
|
|
1114
|
+
severity: ReviewSeverity.CRITICAL,
|
|
1115
|
+
reviewType: 'pre-check',
|
|
1116
|
+
issues: [{
|
|
1117
|
+
severity: 'blocker',
|
|
1118
|
+
category: 'testing',
|
|
1119
|
+
description: 'No tests found for this implementation. All implementations must include tests.',
|
|
1120
|
+
suggestedFix: 'Add test files (*.test.ts, *.spec.ts, or files in __tests__/ directory) that verify the implementation.',
|
|
1121
|
+
}],
|
|
1122
|
+
feedback: formatIssuesForDisplay([{
|
|
1123
|
+
severity: 'blocker',
|
|
1124
|
+
category: 'testing',
|
|
1125
|
+
description: 'No tests found for this implementation. All implementations must include tests.',
|
|
1126
|
+
suggestedFix: 'Add test files (*.test.ts, *.spec.ts, or files in __tests__/ directory) that verify the implementation.',
|
|
1127
|
+
}]),
|
|
1128
|
+
};
|
|
1129
|
+
}
|
|
1130
|
+
}
|
|
1131
|
+
else {
|
|
1132
|
+
logger.info('review', 'Test file check skipped for non-code content type', {
|
|
1133
|
+
storyId: story.frontmatter.id,
|
|
1134
|
+
contentType,
|
|
1135
|
+
});
|
|
1136
|
+
}
|
|
582
1137
|
// Run build and tests BEFORE reviews (async with progress)
|
|
583
1138
|
changesMade.push('Running build and test verification...');
|
|
584
1139
|
const verification = await runVerificationAsync(workingDir, config, options?.onVerificationProgress);
|
|
@@ -625,7 +1180,7 @@ export async function runReviewAgent(storyPath, sdlcRoot, options) {
|
|
|
625
1180
|
severity: 'blocker',
|
|
626
1181
|
category: 'testing',
|
|
627
1182
|
description: `Tests must pass before code review can proceed.\n\nCommand: ${config.testCommand}\n\nTest output:\n\`\`\`\n${testOutput}${truncationNote}\n\`\`\``,
|
|
628
|
-
suggestedFix: 'Fix failing tests before review can proceed.',
|
|
1183
|
+
suggestedFix: 'Fix failing tests before review can proceed. If tests are failing after implementation changes, verify that tests were updated to match the new behavior (not just the old behavior).',
|
|
629
1184
|
});
|
|
630
1185
|
verificationContext += `\n## Test Results ❌\nTest command \`${config.testCommand}\` FAILED:\n\`\`\`\n${testOutput}${truncationNote}\n\`\`\`\n`;
|
|
631
1186
|
}
|
|
@@ -646,60 +1201,82 @@ export async function runReviewAgent(storyPath, sdlcRoot, options) {
|
|
|
646
1201
|
feedback: formatIssuesForDisplay(verificationIssues),
|
|
647
1202
|
};
|
|
648
1203
|
}
|
|
649
|
-
// Verification passed - proceed with
|
|
650
|
-
changesMade.push('Verification passed - proceeding with
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
1204
|
+
// Verification passed - proceed with unified collaborative review
|
|
1205
|
+
changesMade.push('Verification passed - proceeding with unified collaborative review');
|
|
1206
|
+
// Run test pattern detection if enabled
|
|
1207
|
+
let testPatternIssues = [];
|
|
1208
|
+
if (config.reviewConfig.detectTestAntipatterns !== false) {
|
|
1209
|
+
try {
|
|
1210
|
+
changesMade.push('Running test anti-pattern detection...');
|
|
1211
|
+
testPatternIssues = await detectTestDuplicationPatterns(workingDir);
|
|
1212
|
+
if (testPatternIssues.length > 0) {
|
|
1213
|
+
changesMade.push(`Detected ${testPatternIssues.length} test anti-pattern(s)`);
|
|
1214
|
+
}
|
|
1215
|
+
else {
|
|
1216
|
+
changesMade.push('No test anti-patterns detected');
|
|
1217
|
+
}
|
|
1218
|
+
}
|
|
1219
|
+
catch (error) {
|
|
1220
|
+
// Don't fail review if detection errors - just log and continue
|
|
1221
|
+
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
1222
|
+
changesMade.push(`Test pattern detection error: ${errorMsg}`);
|
|
1223
|
+
}
|
|
1224
|
+
}
|
|
1225
|
+
const unifiedReviewResponse = await runSubReview(story, UNIFIED_REVIEW_PROMPT, 'Unified Collaborative Review', workingDir, verificationContext);
|
|
1226
|
+
// Parse unified review response into structured issues
|
|
1227
|
+
const unifiedResult = parseReviewResponse(unifiedReviewResponse, 'Unified Review');
|
|
660
1228
|
// TDD Validation: Check TDD cycle completeness if TDD was enabled for this story
|
|
661
1229
|
const tddEnabled = story.frontmatter.tdd_enabled ?? config.tdd?.enabled ?? false;
|
|
662
1230
|
if (tddEnabled && story.frontmatter.tdd_test_history?.length) {
|
|
663
1231
|
const tddViolations = validateTDDCycles(story.frontmatter.tdd_test_history);
|
|
664
1232
|
if (tddViolations.length > 0) {
|
|
665
1233
|
const tddIssues = generateTDDIssues(tddViolations);
|
|
666
|
-
|
|
667
|
-
|
|
1234
|
+
unifiedResult.issues.push(...tddIssues);
|
|
1235
|
+
unifiedResult.passed = false;
|
|
668
1236
|
changesMade.push(`TDD validation: ${tddViolations.length} violation(s) detected`);
|
|
669
1237
|
}
|
|
670
1238
|
else {
|
|
671
1239
|
changesMade.push('TDD validation: All cycles completed correctly');
|
|
672
1240
|
}
|
|
673
1241
|
}
|
|
674
|
-
// Add
|
|
675
|
-
|
|
1242
|
+
// Add test pattern issues to unified result (they're code-quality related)
|
|
1243
|
+
if (testPatternIssues.length > 0) {
|
|
1244
|
+
unifiedResult.issues.push(...testPatternIssues);
|
|
1245
|
+
unifiedResult.passed = false;
|
|
1246
|
+
}
|
|
1247
|
+
// Add verification issues to unified result (they're code-quality related)
|
|
1248
|
+
unifiedResult.issues.unshift(...verificationIssues);
|
|
676
1249
|
if (verificationIssues.length > 0) {
|
|
677
|
-
|
|
1250
|
+
unifiedResult.passed = false;
|
|
678
1251
|
}
|
|
679
|
-
//
|
|
680
|
-
const
|
|
681
|
-
|
|
1252
|
+
// Determine overall pass/fail from unified review
|
|
1253
|
+
const allIssues = unifiedResult.issues;
|
|
1254
|
+
const blockerCount = allIssues.filter(i => i.severity === 'blocker').length;
|
|
1255
|
+
const criticalCount = allIssues.filter(i => i.severity === 'critical').length;
|
|
1256
|
+
const passed = blockerCount === 0 && criticalCount < 2;
|
|
1257
|
+
const severity = determineReviewSeverity(allIssues);
|
|
1258
|
+
// Derive individual perspective pass/fail for backward compatibility
|
|
1259
|
+
const { codeReviewPassed, securityReviewPassed, poReviewPassed } = deriveIndividualPassFailFromPerspectives(allIssues);
|
|
1260
|
+
// Compile review notes with structured format for unified review
|
|
682
1261
|
const reviewNotes = `
|
|
683
|
-
###
|
|
684
|
-
${formatIssuesForDisplay(codeResult.issues)}
|
|
1262
|
+
### Unified Collaborative Review
|
|
685
1263
|
|
|
686
|
-
|
|
687
|
-
${formatIssuesForDisplay(securityResult.issues)}
|
|
1264
|
+
${formatIssuesForDisplay(allIssues)}
|
|
688
1265
|
|
|
689
|
-
###
|
|
690
|
-
${
|
|
1266
|
+
### Perspective Summary
|
|
1267
|
+
- Code Quality: ${codeReviewPassed ? '✅ Passed' : '❌ Failed'}
|
|
1268
|
+
- Security: ${securityReviewPassed ? '✅ Passed' : '❌ Failed'}
|
|
1269
|
+
- Requirements (PO): ${poReviewPassed ? '✅ Passed' : '❌ Failed'}
|
|
691
1270
|
|
|
692
1271
|
### Overall Result
|
|
693
1272
|
${passed ? '✅ **PASSED** - All reviews approved' : '❌ **FAILED** - Issues must be addressed'}
|
|
694
1273
|
|
|
695
1274
|
---
|
|
696
|
-
*
|
|
1275
|
+
*Review completed: ${new Date().toISOString().split('T')[0]}*
|
|
697
1276
|
`;
|
|
698
1277
|
// Append reviews to story
|
|
699
|
-
appendToSection(story, 'Review Notes', reviewNotes);
|
|
700
|
-
changesMade.push('Added
|
|
701
|
-
changesMade.push('Added security review notes');
|
|
702
|
-
changesMade.push('Added product owner review notes');
|
|
1278
|
+
await appendToSection(story, 'Review Notes', reviewNotes);
|
|
1279
|
+
changesMade.push('Added unified collaborative review notes');
|
|
703
1280
|
// Determine decision
|
|
704
1281
|
const decision = passed ? ReviewDecision.APPROVED : ReviewDecision.REJECTED;
|
|
705
1282
|
// Create review attempt record (omit undefined fields to avoid YAML serialization errors)
|
|
@@ -709,21 +1286,28 @@ ${passed ? '✅ **PASSED** - All reviews approved' : '❌ **FAILED** - Issues mu
|
|
|
709
1286
|
...(passed ? {} : { severity }),
|
|
710
1287
|
feedback: passed ? 'All reviews passed' : formatIssuesForDisplay(allIssues),
|
|
711
1288
|
blockers: allIssues.filter(i => i.severity === 'blocker').map(i => i.description),
|
|
712
|
-
codeReviewPassed
|
|
713
|
-
securityReviewPassed
|
|
714
|
-
poReviewPassed
|
|
1289
|
+
codeReviewPassed,
|
|
1290
|
+
securityReviewPassed,
|
|
1291
|
+
poReviewPassed,
|
|
715
1292
|
};
|
|
716
1293
|
// Append to review history
|
|
717
|
-
appendReviewHistory(story, reviewAttempt);
|
|
1294
|
+
await appendReviewHistory(story, reviewAttempt);
|
|
718
1295
|
changesMade.push('Recorded review attempt in history');
|
|
719
1296
|
if (passed) {
|
|
720
|
-
updateStoryField(story, 'reviews_complete', true);
|
|
1297
|
+
await updateStoryField(story, 'reviews_complete', true);
|
|
721
1298
|
changesMade.push('Marked reviews_complete: true');
|
|
722
1299
|
}
|
|
723
1300
|
else {
|
|
724
1301
|
changesMade.push(`Reviews failed with ${allIssues.length} issue(s) - rework required`);
|
|
725
1302
|
// Don't mark reviews_complete, this will trigger rework
|
|
726
1303
|
}
|
|
1304
|
+
logger.info('review', 'Review phase complete', {
|
|
1305
|
+
storyId: story.frontmatter.id,
|
|
1306
|
+
durationMs: Date.now() - startTime,
|
|
1307
|
+
passed,
|
|
1308
|
+
decision,
|
|
1309
|
+
issueCount: allIssues.length,
|
|
1310
|
+
});
|
|
727
1311
|
return {
|
|
728
1312
|
success: true,
|
|
729
1313
|
story: parseStory(storyPath),
|
|
@@ -739,6 +1323,11 @@ ${passed ? '✅ **PASSED** - All reviews approved' : '❌ **FAILED** - Issues mu
|
|
|
739
1323
|
catch (error) {
|
|
740
1324
|
// Review agent failure - return FAILED decision (doesn't count as retry)
|
|
741
1325
|
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
1326
|
+
logger.error('review', 'Review phase failed', {
|
|
1327
|
+
storyId: story.frontmatter.id,
|
|
1328
|
+
durationMs: Date.now() - startTime,
|
|
1329
|
+
error: errorMsg,
|
|
1330
|
+
});
|
|
742
1331
|
return {
|
|
743
1332
|
success: false,
|
|
744
1333
|
story,
|
|
@@ -756,6 +1345,139 @@ ${passed ? '✅ **PASSED** - All reviews approved' : '❌ **FAILED** - Issues mu
|
|
|
756
1345
|
};
|
|
757
1346
|
}
|
|
758
1347
|
}
|
|
1348
|
+
/**
|
|
1349
|
+
* Parse story content into sections by level-2 headers (##)
|
|
1350
|
+
* Returns array of {title, content} objects
|
|
1351
|
+
*/
|
|
1352
|
+
export function parseContentSections(content) {
|
|
1353
|
+
const sections = [];
|
|
1354
|
+
const lines = content.split('\n');
|
|
1355
|
+
let currentSection = null;
|
|
1356
|
+
for (const line of lines) {
|
|
1357
|
+
const headerMatch = line.match(/^##\s+(.+)$/);
|
|
1358
|
+
if (headerMatch) {
|
|
1359
|
+
if (currentSection)
|
|
1360
|
+
sections.push(currentSection);
|
|
1361
|
+
currentSection = { title: headerMatch[1], content: '' };
|
|
1362
|
+
}
|
|
1363
|
+
else if (currentSection) {
|
|
1364
|
+
currentSection.content += line + '\n';
|
|
1365
|
+
}
|
|
1366
|
+
}
|
|
1367
|
+
if (currentSection)
|
|
1368
|
+
sections.push(currentSection);
|
|
1369
|
+
return sections;
|
|
1370
|
+
}
|
|
1371
|
+
/**
|
|
1372
|
+
* Remove unfinished checkboxes from content (per CLAUDE.md requirement)
|
|
1373
|
+
* Removes lines with `- [ ]` or `* [ ]` patterns
|
|
1374
|
+
* Preserves completed checkboxes `- [x]` and `- [X]`
|
|
1375
|
+
*/
|
|
1376
|
+
export function removeUnfinishedCheckboxes(content) {
|
|
1377
|
+
const lines = content.split('\n');
|
|
1378
|
+
const filteredLines = [];
|
|
1379
|
+
for (let i = 0; i < lines.length; i++) {
|
|
1380
|
+
const line = lines[i];
|
|
1381
|
+
// Match unchecked boxes: - [ ] or * [ ] with optional leading whitespace
|
|
1382
|
+
const isUnchecked = /^\s*[-*] \[ \]/.test(line);
|
|
1383
|
+
if (!isUnchecked) {
|
|
1384
|
+
filteredLines.push(line);
|
|
1385
|
+
}
|
|
1386
|
+
}
|
|
1387
|
+
return filteredLines.join('\n');
|
|
1388
|
+
}
|
|
1389
|
+
/**
|
|
1390
|
+
* Generate GitHub blob URL for story file
|
|
1391
|
+
* Parses remote URL and constructs link to story in repository
|
|
1392
|
+
*/
|
|
1393
|
+
export function getStoryFileURL(storyPath, branch, workingDir) {
|
|
1394
|
+
try {
|
|
1395
|
+
const remoteUrl = execSync('git remote get-url origin', { cwd: workingDir, encoding: 'utf-8' }).trim();
|
|
1396
|
+
// Parse owner/repo from URL
|
|
1397
|
+
// HTTPS: https://github.com/owner/repo.git
|
|
1398
|
+
// SSH: git@github.com:owner/repo.git
|
|
1399
|
+
const match = remoteUrl.match(/github\.com[:/]([^/]+)\/(.+?)(\.git)?$/);
|
|
1400
|
+
if (!match)
|
|
1401
|
+
return '';
|
|
1402
|
+
const [, owner, repo] = match;
|
|
1403
|
+
const relativePath = path.relative(workingDir, storyPath);
|
|
1404
|
+
return `https://github.com/${owner}/${repo}/blob/${branch}/${relativePath}`;
|
|
1405
|
+
}
|
|
1406
|
+
catch {
|
|
1407
|
+
return '';
|
|
1408
|
+
}
|
|
1409
|
+
}
|
|
1410
|
+
/**
|
|
1411
|
+
* Format PR description from story sections
|
|
1412
|
+
* Includes: Story ID, User Story, Summary, Acceptance Criteria, Implementation Summary
|
|
1413
|
+
* Removes unfinished checkboxes from all sections
|
|
1414
|
+
*/
|
|
1415
|
+
export function formatPRDescription(story, storyFileUrl) {
|
|
1416
|
+
const sections = parseContentSections(story.content);
|
|
1417
|
+
// Extract key sections
|
|
1418
|
+
const userStory = sections.find(s => s.title === 'User Story')?.content || '';
|
|
1419
|
+
const summary = sections.find(s => s.title === 'Summary')?.content || '';
|
|
1420
|
+
const acceptanceCriteria = sections.find(s => s.title === 'Acceptance Criteria')?.content || '';
|
|
1421
|
+
const implementationSummary = sections.find(s => s.title === 'Implementation Summary')?.content || '';
|
|
1422
|
+
// Remove unfinished checkboxes from all sections
|
|
1423
|
+
const cleanAcceptanceCriteria = removeUnfinishedCheckboxes(acceptanceCriteria);
|
|
1424
|
+
const cleanImplementationSummary = removeUnfinishedCheckboxes(implementationSummary);
|
|
1425
|
+
// Build PR body
|
|
1426
|
+
let prBody = `## Story ID\n\n${story.frontmatter.id}\n\n`;
|
|
1427
|
+
if (userStory.trim()) {
|
|
1428
|
+
prBody += `## User Story\n\n${userStory.trim()}\n\n`;
|
|
1429
|
+
}
|
|
1430
|
+
if (summary.trim()) {
|
|
1431
|
+
prBody += `## Summary\n\n${summary.trim()}\n\n`;
|
|
1432
|
+
}
|
|
1433
|
+
if (cleanAcceptanceCriteria.trim()) {
|
|
1434
|
+
prBody += `## Acceptance Criteria\n\n${cleanAcceptanceCriteria.trim()}\n\n`;
|
|
1435
|
+
}
|
|
1436
|
+
if (cleanImplementationSummary.trim()) {
|
|
1437
|
+
prBody += `## Implementation Summary\n\n${cleanImplementationSummary.trim()}\n\n`;
|
|
1438
|
+
}
|
|
1439
|
+
// Add story file link
|
|
1440
|
+
if (storyFileUrl) {
|
|
1441
|
+
prBody += `---\n\n📋 [View Full Story](${storyFileUrl})\n`;
|
|
1442
|
+
}
|
|
1443
|
+
return prBody;
|
|
1444
|
+
}
|
|
1445
|
+
/**
|
|
1446
|
+
* Truncate PR body to respect GitHub's 65K character limit
|
|
1447
|
+
* Truncates Implementation Summary first (most verbose section)
|
|
1448
|
+
* Adds clear truncation indicator with story link
|
|
1449
|
+
*/
|
|
1450
|
+
export function truncatePRBody(body, maxLength = 64000) {
|
|
1451
|
+
// Check if truncation needed
|
|
1452
|
+
if (body.length <= maxLength) {
|
|
1453
|
+
return body;
|
|
1454
|
+
}
|
|
1455
|
+
// Find Implementation Summary section
|
|
1456
|
+
const implSummaryMatch = body.match(/(## Implementation Summary\n\n)([\s\S]*?)(\n\n##|\n\n---|\n\n📋|$)/);
|
|
1457
|
+
if (implSummaryMatch) {
|
|
1458
|
+
const [fullMatch, header, content, trailer] = implSummaryMatch;
|
|
1459
|
+
const beforeImpl = body.substring(0, body.indexOf(fullMatch));
|
|
1460
|
+
const afterImpl = body.substring(body.indexOf(fullMatch) + fullMatch.length);
|
|
1461
|
+
// Calculate how much we need to remove
|
|
1462
|
+
const overhead = beforeImpl.length + header.length + trailer.length + afterImpl.length;
|
|
1463
|
+
const truncationIndicator = '\n\n⚠️ Implementation Summary truncated due to length. See full story for complete details.\n';
|
|
1464
|
+
const availableForContent = maxLength - overhead - truncationIndicator.length;
|
|
1465
|
+
if (availableForContent > 100) {
|
|
1466
|
+
// Truncate Implementation Summary at paragraph boundary
|
|
1467
|
+
let truncatedContent = content.substring(0, availableForContent);
|
|
1468
|
+
const lastParagraph = truncatedContent.lastIndexOf('\n\n');
|
|
1469
|
+
if (lastParagraph > 0) {
|
|
1470
|
+
truncatedContent = truncatedContent.substring(0, lastParagraph);
|
|
1471
|
+
}
|
|
1472
|
+
return beforeImpl + header + truncatedContent + truncationIndicator + trailer + afterImpl;
|
|
1473
|
+
}
|
|
1474
|
+
}
|
|
1475
|
+
// Fallback: simple truncation if no Implementation Summary found
|
|
1476
|
+
const truncatedBody = body.substring(0, maxLength - 200);
|
|
1477
|
+
const lastParagraph = truncatedBody.lastIndexOf('\n\n');
|
|
1478
|
+
const finalBody = lastParagraph > 0 ? truncatedBody.substring(0, lastParagraph) : truncatedBody;
|
|
1479
|
+
return finalBody + '\n\n⚠️ Description truncated due to length. See full story for complete details.\n';
|
|
1480
|
+
}
|
|
759
1481
|
/**
|
|
760
1482
|
* Run a sub-review with a specific prompt
|
|
761
1483
|
*/
|
|
@@ -782,7 +1504,7 @@ Provide your ${reviewType} feedback. Be specific and actionable.`;
|
|
|
782
1504
|
/**
|
|
783
1505
|
* Create a pull request for the completed story
|
|
784
1506
|
*/
|
|
785
|
-
export async function createPullRequest(storyPath, sdlcRoot) {
|
|
1507
|
+
export async function createPullRequest(storyPath, sdlcRoot, options) {
|
|
786
1508
|
let story = parseStory(storyPath);
|
|
787
1509
|
const changesMade = [];
|
|
788
1510
|
const workingDir = path.dirname(sdlcRoot);
|
|
@@ -819,7 +1541,7 @@ export async function createPullRequest(storyPath, sdlcRoot) {
|
|
|
819
1541
|
catch {
|
|
820
1542
|
changesMade.push('GitHub CLI not available - PR creation skipped');
|
|
821
1543
|
// Still update to done for MVP
|
|
822
|
-
story = updateStoryStatus(story, 'done');
|
|
1544
|
+
story = await updateStoryStatus(story, 'done');
|
|
823
1545
|
changesMade.push('Updated status to done');
|
|
824
1546
|
return {
|
|
825
1547
|
success: true,
|
|
@@ -844,37 +1566,69 @@ export async function createPullRequest(storyPath, sdlcRoot) {
|
|
|
844
1566
|
// Push branch (already validated)
|
|
845
1567
|
execSync(`git push -u origin ${branchName}`, { cwd: workingDir, stdio: 'pipe' });
|
|
846
1568
|
changesMade.push(`Pushed branch: ${branchName}`);
|
|
847
|
-
//
|
|
848
|
-
|
|
1569
|
+
// Check if PR already exists for this branch
|
|
1570
|
+
try {
|
|
1571
|
+
const existingPROutput = execSync('gh pr view --json url', { cwd: workingDir, encoding: 'utf-8', stdio: 'pipe' });
|
|
1572
|
+
const prData = JSON.parse(existingPROutput);
|
|
1573
|
+
if (prData.url) {
|
|
1574
|
+
changesMade.push(`PR already exists: ${prData.url}`);
|
|
1575
|
+
// Update story with PR URL if missing
|
|
1576
|
+
if (!story.frontmatter.pr_url) {
|
|
1577
|
+
await updateStoryField(story, 'pr_url', prData.url);
|
|
1578
|
+
changesMade.push('Updated story with existing PR URL');
|
|
1579
|
+
}
|
|
1580
|
+
// Don't create duplicate - skip to status update
|
|
1581
|
+
story = await updateStoryStatus(story, 'done');
|
|
1582
|
+
changesMade.push('Updated status to done');
|
|
1583
|
+
return {
|
|
1584
|
+
success: true,
|
|
1585
|
+
story,
|
|
1586
|
+
changesMade,
|
|
1587
|
+
};
|
|
1588
|
+
}
|
|
1589
|
+
}
|
|
1590
|
+
catch {
|
|
1591
|
+
// No existing PR - proceed with creation
|
|
1592
|
+
}
|
|
1593
|
+
// Create PR using gh CLI with rich formatted body
|
|
1594
|
+
// Security: Use escaped arguments and heredoc to prevent shell injection
|
|
849
1595
|
const prTitle = story.frontmatter.title;
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
const prOutput = execSync(`gh pr create --title ${escapeShellArg(prTitle)} --body ${escapeShellArg(prBody)}`, { cwd: workingDir, encoding: 'utf-8' });
|
|
1596
|
+
// Generate story file URL
|
|
1597
|
+
const storyFileUrl = getStoryFileURL(storyPath, branchName, workingDir);
|
|
1598
|
+
// Format rich PR description
|
|
1599
|
+
let prBody = formatPRDescription(story, storyFileUrl);
|
|
1600
|
+
// Truncate if needed to respect GitHub's 65K limit
|
|
1601
|
+
prBody = truncatePRBody(prBody);
|
|
1602
|
+
// Determine if draft PR should be created
|
|
1603
|
+
// Options parameter takes precedence, then config, default is false
|
|
1604
|
+
const config = loadConfig(workingDir);
|
|
1605
|
+
const createAsDraft = options?.draft ?? config.github?.createDraftPRs ?? false;
|
|
1606
|
+
const draftFlag = createAsDraft ? ' --draft' : '';
|
|
1607
|
+
// Use heredoc pattern for multi-line body to preserve formatting
|
|
1608
|
+
const ghCommand = `gh pr create --title ${escapeShellArg(prTitle)}${draftFlag} --body "$(cat <<'EOF'
|
|
1609
|
+
${prBody}
|
|
1610
|
+
EOF
|
|
1611
|
+
)"`;
|
|
1612
|
+
const prOutput = execSync(ghCommand, { cwd: workingDir, encoding: 'utf-8' });
|
|
868
1613
|
const prUrl = prOutput.trim();
|
|
869
|
-
updateStoryField(story, 'pr_url', prUrl);
|
|
870
|
-
|
|
1614
|
+
await updateStoryField(story, 'pr_url', prUrl);
|
|
1615
|
+
const prTypeLabel = createAsDraft ? 'draft PR' : 'PR';
|
|
1616
|
+
changesMade.push(`Created ${prTypeLabel}: ${prUrl}`);
|
|
871
1617
|
}
|
|
872
1618
|
catch (error) {
|
|
873
1619
|
const sanitizedError = sanitizeErrorMessage(error instanceof Error ? error.message : String(error), workingDir);
|
|
874
|
-
|
|
1620
|
+
// Provide actionable error messages for common issues
|
|
1621
|
+
let errorMessage = `PR creation failed: ${sanitizedError}`;
|
|
1622
|
+
if (sanitizedError.includes('authentication') || sanitizedError.includes('auth') || sanitizedError.includes('credentials')) {
|
|
1623
|
+
errorMessage = `GitHub authentication failed. Please authenticate using one of:
|
|
1624
|
+
1. Set GITHUB_TOKEN env var: export GITHUB_TOKEN=ghp_xxx
|
|
1625
|
+
2. Run: gh auth login
|
|
1626
|
+
3. Check: gh auth status`;
|
|
1627
|
+
}
|
|
1628
|
+
changesMade.push(errorMessage);
|
|
875
1629
|
}
|
|
876
1630
|
// Update status to done
|
|
877
|
-
story = updateStoryStatus(story, 'done');
|
|
1631
|
+
story = await updateStoryStatus(story, 'done');
|
|
878
1632
|
changesMade.push('Updated status to done');
|
|
879
1633
|
return {
|
|
880
1634
|
success: true,
|