@pcoliveira90/pdd 0.4.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/pdd-ai.js CHANGED
@@ -11,6 +11,10 @@ async function main() {
11
11
  console.log(`Provider: ${result.provider}`);
12
12
  console.log(`Task: ${result.task}`);
13
13
  console.log(`Model: ${result.model}`);
14
+ console.log(`Model selection: ${result.model_selection?.selected_automatically ? 'automatic' : 'user/fallback'}`);
15
+ if (result.model_selection?.note) {
16
+ console.log(`Selection note: ${result.model_selection.note}`);
17
+ }
14
18
  console.log(`Issue: ${result.issue}`);
15
19
  console.log('\nResult:\n');
16
20
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pcoliveira90/pdd",
3
- "version": "0.4.0",
3
+ "version": "0.5.0",
4
4
  "description": "Patch-Driven Development CLI — safe, resilient and guided code changes",
5
5
  "type": "module",
6
6
  "bin": {
@@ -0,0 +1,82 @@
1
+ const TASK_ALIASES = {
2
+ analyze: 'analysis',
3
+ analysis: 'analysis',
4
+ recon: 'analysis',
5
+ build: 'build',
6
+ implement: 'build',
7
+ implementation: 'build',
8
+ test: 'test',
9
+ testing: 'test',
10
+ review: 'review',
11
+ security: 'review'
12
+ };
13
+
14
+ const PROVIDER_TASK_MODELS = {
15
+ openai: {
16
+ analysis: 'gpt-5',
17
+ build: 'gpt-5',
18
+ test: 'gpt-5-mini',
19
+ review: 'gpt-5'
20
+ },
21
+ claude: {
22
+ analysis: 'claude-sonnet-4-20250514',
23
+ build: 'claude-sonnet-4-20250514',
24
+ test: 'claude-sonnet-4-20250514',
25
+ review: 'claude-sonnet-4-20250514'
26
+ },
27
+ openrouter: {
28
+ analysis: 'anthropic/claude-3.5-sonnet',
29
+ build: 'openai/gpt-4.1',
30
+ test: 'openai/gpt-4.1-mini',
31
+ review: 'anthropic/claude-3.5-sonnet'
32
+ }
33
+ };
34
+
35
+ function normalizeTask(task) {
36
+ const key = String(task || 'analysis').trim().toLowerCase();
37
+ return TASK_ALIASES[key] || null;
38
+ }
39
+
40
+ export function resolveTaskModel({
41
+ provider,
42
+ task = 'analysis',
43
+ explicitModel = null,
44
+ fallbackModel
45
+ }) {
46
+ if (explicitModel) {
47
+ return {
48
+ task: normalizeTask(task) || 'analysis',
49
+ model: explicitModel,
50
+ selectedAutomatically: false,
51
+ note: 'Using model explicitly provided by user.'
52
+ };
53
+ }
54
+
55
+ const normalizedTask = normalizeTask(task);
56
+ if (!normalizedTask) {
57
+ return {
58
+ task: 'analysis',
59
+ model: fallbackModel,
60
+ selectedAutomatically: false,
61
+ note: `Unknown task "${task}". Suggested fallback model selected.`
62
+ };
63
+ }
64
+
65
+ const providerMap = PROVIDER_TASK_MODELS[provider];
66
+ const model = providerMap?.[normalizedTask];
67
+ if (!model) {
68
+ return {
69
+ task: normalizedTask,
70
+ model: fallbackModel,
71
+ selectedAutomatically: false,
72
+ note: 'Automatic task model mapping unavailable for this provider. Suggested fallback model selected.'
73
+ };
74
+ }
75
+
76
+ return {
77
+ task: normalizedTask,
78
+ model,
79
+ selectedAutomatically: true,
80
+ note: 'Model selected automatically by task profile.'
81
+ };
82
+ }
@@ -1,5 +1,6 @@
1
1
  import { buildBugfixPrompt } from './analyze-change.js';
2
2
  import { getAiProviderConfig } from './engine.js';
3
+ import { resolveTaskModel } from './model-router.js';
3
4
 
4
5
  function extractArgValue(args, name, fallback = null) {
5
6
  const prefix = `${name}=`;
@@ -144,8 +145,15 @@ function parseJsonSafely(text) {
144
145
  export async function runAiFixAnalysis(argv = process.argv.slice(2)) {
145
146
  const provider = extractArgValue(argv, '--provider', 'openai');
146
147
  const providerConfig = getAiProviderConfig(provider);
147
- const model = extractArgValue(argv, '--model', providerConfig.defaultModel);
148
+ const explicitModel = extractArgValue(argv, '--model', null);
148
149
  const task = extractArgValue(argv, '--task', 'analysis');
150
+ const modelSelection = resolveTaskModel({
151
+ provider,
152
+ task,
153
+ explicitModel,
154
+ fallbackModel: providerConfig.defaultModel
155
+ });
156
+ const model = modelSelection.model;
149
157
  const issue = getIssueFromArgs(argv);
150
158
 
151
159
  if (!issue) {
@@ -179,8 +187,12 @@ export async function runAiFixAnalysis(argv = process.argv.slice(2)) {
179
187
 
180
188
  return {
181
189
  provider,
182
- task,
190
+ task: modelSelection.task,
183
191
  model,
192
+ model_selection: {
193
+ selected_automatically: modelSelection.selectedAutomatically,
194
+ note: modelSelection.note
195
+ },
184
196
  issue,
185
197
  result: parsed
186
198
  };
package/src/cli/index.js CHANGED
@@ -1,7 +1,6 @@
1
1
  import { readFileSync } from 'fs';
2
2
  import { dirname, join } from 'path';
3
3
  import { fileURLToPath } from 'url';
4
- import { spawnSync } from 'child_process';
5
4
  import { runValidation } from '../core/validator.js';
6
5
  import { openPullRequest } from '../core/pr-manager.js';
7
6
  import { generatePatchArtifacts } from '../core/patch-generator.js';
@@ -9,7 +8,13 @@ import { runInit } from './init-command.js';
9
8
  import { runDoctor } from './doctor-command.js';
10
9
  import { runStatus } from './status-command.js';
11
10
  import { runResilientFixWorkflow } from '../core/fix-runner.js';
12
- import { createLinkedWorktree, detectWorktreeContext } from '../core/worktree-guard.js';
11
+ import {
12
+ analyzeStructuralImpact,
13
+ formatRiskSummary,
14
+ enforceStructuralRiskAck
15
+ } from '../core/structural-risk-guard.js';
16
+ import { runAutomaticGapCheck, formatGapCheckSummary } from '../core/gap-checker.js';
17
+ import { maybeAutoRelocateToWorktree } from '../core/worktree-guard.js';
13
18
 
14
19
  const __dirname = dirname(fileURLToPath(import.meta.url));
15
20
 
@@ -20,6 +25,12 @@ function readCliVersion() {
20
25
  }
21
26
 
22
27
  function parseFixArgs(argv) {
28
+ const minCoverageArg = argv.find(arg => arg.startsWith('--min-coverage='));
29
+ const parsedMinCoverage = minCoverageArg ? Number(minCoverageArg.split('=')[1]) : null;
30
+ const minCoverage = Number.isFinite(parsedMinCoverage)
31
+ ? parsedMinCoverage
32
+ : Number(process.env.PDD_MIN_COVERAGE || 80);
33
+
23
34
  const issue = argv
24
35
  .filter(arg => !arg.startsWith('--') && arg !== 'fix')
25
36
  .join(' ')
@@ -30,48 +41,13 @@ function parseFixArgs(argv) {
30
41
  openPr: argv.includes('--open-pr'),
31
42
  dryRun: argv.includes('--dry-run'),
32
43
  noValidate: argv.includes('--no-validate'),
33
- allowMainWorktree: argv.includes('--allow-main-worktree')
44
+ ackStructuralRisk: argv.includes('--ack-structural-risk'),
45
+ minCoverage,
46
+ requireCoverage: argv.includes('--require-coverage'),
47
+ noCoverageGate: argv.includes('--no-coverage-gate')
34
48
  };
35
49
  }
36
50
 
37
- function maybeAutoRelocateToWorktree({
38
- cwd,
39
- argv,
40
- commandName,
41
- enabled
42
- }) {
43
- if (!enabled || argv.includes('--allow-main-worktree')) {
44
- return false;
45
- }
46
-
47
- const context = detectWorktreeContext(cwd);
48
- if (!context.isGitRepo || !context.isPrimaryWorktree) {
49
- return false;
50
- }
51
-
52
- const { worktreePath, branchName } = createLinkedWorktree({
53
- baseDir: cwd,
54
- commandName
55
- });
56
-
57
- console.log(`🔀 Primary worktree detected. Auto-created linked worktree: ${worktreePath}`);
58
- console.log(`🪴 Branch: ${branchName}`);
59
- console.log('▶️ Continuing command in the new worktree...');
60
-
61
- const result = spawnSync(
62
- process.execPath,
63
- [process.argv[1], ...argv],
64
- { cwd: worktreePath, stdio: 'inherit' }
65
- );
66
-
67
- if (result.error) {
68
- throw result.error;
69
- }
70
-
71
- process.exitCode = typeof result.status === 'number' ? result.status : 1;
72
- return true;
73
- }
74
-
75
51
  export async function runCli(argv = process.argv.slice(2)) {
76
52
  const command = argv[0];
77
53
  const cwd = process.cwd();
@@ -82,28 +58,11 @@ export async function runCli(argv = process.argv.slice(2)) {
82
58
  }
83
59
 
84
60
  if (command === 'init') {
85
- const mutatesCurrentRepo = argv.includes('--here') || argv.includes('--upgrade');
86
- if (maybeAutoRelocateToWorktree({
87
- cwd,
88
- argv,
89
- commandName: 'init',
90
- enabled: mutatesCurrentRepo
91
- })) {
92
- return;
93
- }
94
61
  await runInit(argv);
95
62
  return;
96
63
  }
97
64
 
98
65
  if (command === 'doctor') {
99
- if (maybeAutoRelocateToWorktree({
100
- cwd,
101
- argv,
102
- commandName: 'doctor-fix',
103
- enabled: argv.includes('--fix')
104
- })) {
105
- return;
106
- }
107
66
  runDoctor(cwd, argv);
108
67
  return;
109
68
  }
@@ -114,38 +73,71 @@ export async function runCli(argv = process.argv.slice(2)) {
114
73
  }
115
74
 
116
75
  if (command === 'fix') {
117
- const { issue, openPr, dryRun, noValidate } = parseFixArgs(argv);
76
+ const relocated = maybeAutoRelocateToWorktree({
77
+ cwd,
78
+ argv,
79
+ commandName: 'fix'
80
+ });
81
+ if (relocated) return;
82
+
83
+ const {
84
+ issue,
85
+ openPr,
86
+ dryRun,
87
+ noValidate,
88
+ ackStructuralRisk,
89
+ minCoverage,
90
+ requireCoverage,
91
+ noCoverageGate
92
+ } = parseFixArgs(argv);
118
93
 
119
94
  if (!issue) {
120
95
  console.error('❌ Missing issue description.');
121
- console.log('Use: pdd fix "description" [--open-pr] [--dry-run] [--no-validate] [--allow-main-worktree]');
96
+ console.log('Use: pdd fix "description" [--open-pr] [--dry-run] [--no-validate] [--ack-structural-risk] [--min-coverage=80] [--require-coverage] [--no-coverage-gate] [--allow-main-worktree]');
122
97
  process.exit(1);
123
98
  }
124
99
 
125
- if (maybeAutoRelocateToWorktree({
126
- cwd,
127
- argv,
128
- commandName: 'fix',
129
- enabled: true
130
- })) {
131
- return;
132
- }
133
-
134
100
  console.log('🔧 PDD Fix Workflow');
135
101
  console.log(`Issue: ${issue}`);
136
102
  console.log(`Open PR prep: ${openPr ? 'yes' : 'no'}`);
137
103
  console.log(`Dry run: ${dryRun ? 'yes' : 'no'}`);
138
104
  console.log(`Validation: ${noValidate ? 'skipped' : 'enabled'}`);
105
+ console.log(`Coverage gate: ${noCoverageGate ? 'disabled' : `enabled (min ${minCoverage}%)`}`);
106
+
107
+ const riskAssessment = analyzeStructuralImpact(issue);
108
+ console.log(formatRiskSummary(riskAssessment));
109
+ const gapCheck = runAutomaticGapCheck({
110
+ issue,
111
+ riskAssessment,
112
+ minCoverage
113
+ });
114
+ console.log(formatGapCheckSummary(gapCheck));
139
115
 
140
116
  try {
117
+ await enforceStructuralRiskAck({
118
+ assessment: riskAssessment,
119
+ ackFlag: ackStructuralRisk,
120
+ dryRun
121
+ });
122
+
141
123
  const result = await runResilientFixWorkflow({
142
124
  baseDir: cwd,
143
125
  issue,
144
126
  dryRun,
145
127
  noValidate,
146
128
  openPr,
147
- generatePatchArtifacts,
148
- runValidation,
129
+ generatePatchArtifacts: args =>
130
+ generatePatchArtifacts({
131
+ ...args,
132
+ riskAssessment,
133
+ gapCheck
134
+ }),
135
+ runValidation: targetBaseDir =>
136
+ runValidation(targetBaseDir, {
137
+ coverageGate: !noCoverageGate,
138
+ minCoverage,
139
+ requireCoverage
140
+ }),
149
141
  openPullRequest
150
142
  });
151
143
 
@@ -182,11 +174,12 @@ export async function runCli(argv = process.argv.slice(2)) {
182
174
  console.log(' pdd doctor [--fix] Check installation health and optionally auto-repair');
183
175
  console.log(' pdd status Show current change workflow state');
184
176
  console.log(' pdd fix "description" [--open-pr] [--dry-run] [--no-validate] Run fix workflow and generate artifacts');
177
+ console.log(' [--ack-structural-risk] [--min-coverage=80] [--require-coverage] [--no-coverage-gate] [--allow-main-worktree]');
185
178
  console.log(' pdd version (or: pdd --version, pdd -v) Show CLI version');
186
179
  console.log('');
187
180
  console.log('Worktree policy:');
188
- console.log(' Mutating commands auto-create and use a linked git worktree when needed.');
189
- console.log(' Use --allow-main-worktree only if you intentionally want to run in primary.');
181
+ console.log(' Task execution auto-creates and uses a linked git worktree when needed.');
182
+ console.log(' Current scope: pdd fix. Use --allow-main-worktree to run in primary intentionally.');
190
183
  console.log('');
191
184
  console.log('AI command (official binary):');
192
185
  console.log(' pdd-ai [--provider=openai|claude|openrouter] [--task=analysis|build|test|review] [--model=<id>] "issue"');
@@ -0,0 +1,128 @@
1
+ function hasAny(text, patterns) {
2
+ return patterns.some(pattern => pattern.test(text));
3
+ }
4
+
5
+ function mapTasks(issue, riskAssessment) {
6
+ const tasks = [
7
+ 'Map current vs expected behavior',
8
+ 'Confirm root cause',
9
+ 'Define minimal safe delta',
10
+ 'Define validation plan'
11
+ ];
12
+
13
+ if (riskAssessment?.hasHighRisk) {
14
+ tasks.push('Design structural mitigation and rollback plan');
15
+ }
16
+
17
+ return tasks;
18
+ }
19
+
20
+ export function runAutomaticGapCheck({ issue = '', riskAssessment = null, minCoverage = 80 }) {
21
+ const normalized = String(issue || '').toLowerCase();
22
+ const mappedTasks = mapTasks(normalized, riskAssessment);
23
+ const gaps = [];
24
+
25
+ const hasBusinessContext = hasAny(normalized, [
26
+ /\bregra\b/i,
27
+ /\bneg[oó]cio\b/i,
28
+ /\bpolicy\b/i,
29
+ /\bcrit[eé]rio\b/i,
30
+ /\baceita[cç][aã]o\b/i
31
+ ]);
32
+ if (!hasBusinessContext) {
33
+ gaps.push({
34
+ id: 'business-rules-context',
35
+ severity: 'high',
36
+ title: 'Business rules context is not explicit',
37
+ recommendation: 'Document business rules and acceptance criteria before implementation.'
38
+ });
39
+ }
40
+
41
+ const hasUsabilityContext = hasAny(normalized, [
42
+ /\busu[aá]rio\b/i,
43
+ /\bux\b/i,
44
+ /\busabilidade\b/i,
45
+ /\bfluxo\b/i,
46
+ /\bjornada\b/i,
47
+ /\binterface\b/i
48
+ ]);
49
+ if (!hasUsabilityContext) {
50
+ gaps.push({
51
+ id: 'usability-context',
52
+ severity: 'medium',
53
+ title: 'Usability impact is not explicit',
54
+ recommendation: 'Map affected journey and UI/interaction impact before implementation.'
55
+ });
56
+ }
57
+
58
+ const hasSecurityContext = hasAny(normalized, [
59
+ /\bsecurity\b/i,
60
+ /\bseguran[cç]a\b/i,
61
+ /\bauth\b/i,
62
+ /\bpermiss[aã]o\b/i,
63
+ /\bexposi[cç][aã]o\b/i,
64
+ /\bprivacidade\b/i
65
+ ]);
66
+ if (!hasSecurityContext) {
67
+ gaps.push({
68
+ id: 'security-context',
69
+ severity: 'medium',
70
+ title: 'Security impact is not explicit',
71
+ recommendation: 'Review auth, authorization, and data exposure risks for this change.'
72
+ });
73
+ }
74
+
75
+ const hasValidationContext = hasAny(normalized, [
76
+ /\btest\b/i,
77
+ /\bteste\b/i,
78
+ /\bvalida[cç][aã]o\b/i,
79
+ /\bqa\b/i
80
+ ]);
81
+ if (!hasValidationContext) {
82
+ gaps.push({
83
+ id: 'validation-plan',
84
+ severity: 'high',
85
+ title: 'Validation strategy is not explicit',
86
+ recommendation: `Define tests and minimum coverage target (currently ${minCoverage}%).`
87
+ });
88
+ }
89
+
90
+ if (riskAssessment?.hasHighRisk) {
91
+ const hasMitigationPlan = hasAny(normalized, [
92
+ /\brollback\b/i,
93
+ /\bmigration\b/i,
94
+ /\bcompatibil/i,
95
+ /\bbackward\b/i
96
+ ]);
97
+ if (!hasMitigationPlan) {
98
+ gaps.push({
99
+ id: 'structural-mitigation',
100
+ severity: 'critical',
101
+ title: 'Structural risk detected without mitigation details',
102
+ recommendation: 'Define migration strategy, compatibility approach, and rollback plan.'
103
+ });
104
+ }
105
+ }
106
+
107
+ const criticalCount = gaps.filter(gap => gap.severity === 'critical').length;
108
+ const highCount = gaps.filter(gap => gap.severity === 'high').length;
109
+
110
+ return {
111
+ mappedTasks,
112
+ gaps,
113
+ summary: {
114
+ total: gaps.length,
115
+ critical: criticalCount,
116
+ high: highCount,
117
+ status: gaps.length === 0 ? 'ok' : 'needs-review'
118
+ }
119
+ };
120
+ }
121
+
122
+ export function formatGapCheckSummary(gapCheck) {
123
+ if (!gapCheck || gapCheck.summary.total === 0) {
124
+ return 'Automatic gap check: no critical gaps detected.';
125
+ }
126
+
127
+ return `Automatic gap check: ${gapCheck.summary.total} gap(s) detected (${gapCheck.summary.critical} critical, ${gapCheck.summary.high} high).`;
128
+ }
@@ -17,15 +17,138 @@ function slugify(value) {
17
17
  .replace(/^-+|-+$/g, '')
18
18
  .slice(0, 48);
19
19
  }
20
+ function renderStructuralRiskSection(riskAssessment) {
21
+ if (!riskAssessment?.hasHighRisk) {
22
+ return 'No high structural impact signals detected from issue description.';
23
+ }
24
+
25
+ const lines = [];
26
+ for (const hit of riskAssessment.hits) {
27
+ lines.push(`- ${hit.label} (${hit.id})`);
28
+ }
29
+ return lines.join('\n');
30
+ }
31
+
32
+ function renderGapCheckSection(gapCheck) {
33
+ if (!gapCheck || gapCheck.summary.total === 0) {
34
+ return '- status: ok\n- notes: no automatic gaps detected';
35
+ }
36
+
37
+ const lines = [
38
+ `- status: ${gapCheck.summary.status}`,
39
+ `- total: ${gapCheck.summary.total}`,
40
+ `- critical: ${gapCheck.summary.critical}`,
41
+ `- high: ${gapCheck.summary.high}`
42
+ ];
43
+
44
+ for (const gap of gapCheck.gaps) {
45
+ lines.push(`- [${gap.severity}] ${gap.title}`);
46
+ lines.push(` recommendation: ${gap.recommendation}`);
47
+ }
48
+
49
+ return lines.join('\n');
50
+ }
51
+
52
+ function renderMappedTasksSection(gapCheck) {
53
+ if (!gapCheck || !Array.isArray(gapCheck.mappedTasks) || gapCheck.mappedTasks.length === 0) {
54
+ return '- not available';
55
+ }
56
+
57
+ return gapCheck.mappedTasks.map(task => `- ${task}`).join('\n');
58
+ }
59
+
60
+ function writeWorkItemRecords({ baseDir, changeId, issue, gapCheck }) {
61
+ const recordsBase = path.join(baseDir, '.pdd', 'work-items');
62
+ const changeDir = path.join(recordsBase, 'changes', changeId);
63
+ const planDir = path.join(recordsBase, 'plans', changeId);
64
+ const featureDir = path.join(recordsBase, 'features');
65
+
66
+ writeFile(
67
+ path.join(changeDir, 'proposal.md'),
68
+ `# Change Proposal
69
+
70
+ ## Change ID
71
+ ${changeId}
72
+
73
+ ## Issue
74
+ ${issue}
20
75
 
21
- export function generatePatchArtifacts({ issue, baseDir = process.cwd(), changeId = null }) {
76
+ ## Proposed Solution (concise)
77
+
78
+ ## Why this is the minimal safe option
79
+
80
+ ## Validation with user
81
+ - status: pending
82
+ - feedback:
83
+
84
+ ## User edits to proposal
85
+ `
86
+ );
87
+
88
+ writeFile(
89
+ path.join(changeDir, 'decision.md'),
90
+ `# Change Decision
91
+
92
+ ## Change ID
93
+ ${changeId}
94
+
95
+ ## Decision
96
+ - approved: yes | no
97
+ - approved_by:
98
+ - approved_at:
99
+
100
+ ## Notes
101
+ `
102
+ );
103
+
104
+ writeFile(
105
+ path.join(planDir, 'plan.md'),
106
+ `# Execution Plan
107
+
108
+ ## Change ID
109
+ ${changeId}
110
+
111
+ ## Mapped Tasks
112
+ ${renderMappedTasksSection(gapCheck)}
113
+
114
+ ## Planned Steps (concise)
115
+ 1.
116
+ 2.
117
+ 3.
118
+
119
+ ## Validation and Coverage
120
+ - tests:
121
+ - coverage target:
122
+ `
123
+ );
124
+
125
+ writeFile(
126
+ path.join(featureDir, '.gitkeep'),
127
+ ''
128
+ );
129
+
130
+ return [
131
+ path.join('.pdd', 'work-items', 'changes', changeId, 'proposal.md'),
132
+ path.join('.pdd', 'work-items', 'changes', changeId, 'decision.md'),
133
+ path.join('.pdd', 'work-items', 'plans', changeId, 'plan.md')
134
+ ];
135
+ }
136
+
137
+ export function generatePatchArtifacts({
138
+ issue,
139
+ baseDir = process.cwd(),
140
+ changeId = null,
141
+ riskAssessment = null,
142
+ gapCheck = null
143
+ }) {
22
144
  const resolvedChangeId = changeId || `change-${Date.now()}-${slugify(issue || 'update')}`;
23
145
  const changeDir = path.join(baseDir, 'changes', resolvedChangeId);
24
146
 
25
147
  const files = [
26
148
  path.join('changes', resolvedChangeId, 'delta-spec.md'),
27
149
  path.join('changes', resolvedChangeId, 'patch-plan.md'),
28
- path.join('changes', resolvedChangeId, 'verification-report.md')
150
+ path.join('changes', resolvedChangeId, 'verification-report.md'),
151
+ path.join('changes', resolvedChangeId, 'gaps-report.md')
29
152
  ];
30
153
 
31
154
  writeFile(
@@ -55,6 +178,12 @@ bugfix | feature | refactor-safe | hotfix
55
178
 
56
179
  ## Constraints
57
180
 
181
+ ## Structural Impact Risks
182
+ ${renderStructuralRiskSection(riskAssessment)}
183
+
184
+ ## Automatic Gap Check
185
+ ${renderGapCheckSection(gapCheck)}
186
+
58
187
  ## Minimal Safe Delta
59
188
 
60
189
  ## Alternatives Considered
@@ -79,6 +208,9 @@ ${issue}
79
208
 
80
209
  ## Files to Change
81
210
 
211
+ ## Task Mapping
212
+ ${renderMappedTasksSection(gapCheck)}
213
+
82
214
  ## Execution Steps
83
215
  1. Reproduce issue
84
216
  2. Confirm root cause
@@ -88,6 +220,12 @@ ${issue}
88
220
 
89
221
  ## Regression Risks
90
222
 
223
+ ## Structural Impact Risks
224
+ ${renderStructuralRiskSection(riskAssessment)}
225
+
226
+ ## Automatic Gap Check
227
+ ${renderGapCheckSection(gapCheck)}
228
+
91
229
  ## Rollback Strategy
92
230
  `
93
231
  );
@@ -108,6 +246,11 @@ ${issue}
108
246
 
109
247
  ## Tests Run
110
248
 
249
+ ## Test Coverage
250
+ - minimum threshold:
251
+ - measured result:
252
+ - status: pass | fail | not-available
253
+
111
254
  ## Manual Validation
112
255
 
113
256
  ## Residual Risks
@@ -117,6 +260,37 @@ pending
117
260
  `
118
261
  );
119
262
 
263
+ writeFile(
264
+ path.join(changeDir, 'gaps-report.md'),
265
+ `# Gaps Report
266
+
267
+ ## Change ID
268
+ ${resolvedChangeId}
269
+
270
+ ## Issue
271
+ ${issue}
272
+
273
+ ## Task Mapping
274
+ ${renderMappedTasksSection(gapCheck)}
275
+
276
+ ## Automatic Gap Check Summary
277
+ ${renderGapCheckSection(gapCheck)}
278
+
279
+ ## Reviewer Decision
280
+ - approved: yes | no
281
+ - notes:
282
+ `
283
+ );
284
+
285
+ const workItemFiles = writeWorkItemRecords({
286
+ baseDir,
287
+ changeId: resolvedChangeId,
288
+ issue,
289
+ gapCheck
290
+ });
291
+
292
+ files.push(...workItemFiles);
293
+
120
294
  return {
121
295
  changeId: resolvedChangeId,
122
296
  changeDir,
@@ -0,0 +1,120 @@
1
+ import readline from 'node:readline/promises';
2
+ import { stdin as input, stdout as output } from 'node:process';
3
+
4
+ const STRUCTURAL_RISK_RULES = [
5
+ {
6
+ id: 'database-schema',
7
+ label: 'Database schema/data model changes',
8
+ patterns: [
9
+ /\bdatabase\b/i,
10
+ /\bdb\b/i,
11
+ /\bschema\b/i,
12
+ /\bmigration\b/i,
13
+ /\balter table\b/i,
14
+ /\bdrop table\b/i,
15
+ /\badd column\b/i,
16
+ /\bforeign key\b/i,
17
+ /\bprimary key\b/i,
18
+ /\bindex\b/i
19
+ ]
20
+ },
21
+ {
22
+ id: 'api-contract',
23
+ label: 'API/consumer contract changes',
24
+ patterns: [
25
+ /\bcontract\b/i,
26
+ /\bbreaking\b/i,
27
+ /\bapi\b/i,
28
+ /\bendpoint\b/i,
29
+ /\brequest\b/i,
30
+ /\bresponse\b/i,
31
+ /\bpayload\b/i,
32
+ /\bgraphql\b/i,
33
+ /\bopenapi\b/i
34
+ ]
35
+ },
36
+ {
37
+ id: 'integration-contract',
38
+ label: 'Event/message integration contract changes',
39
+ patterns: [
40
+ /\bevent\b/i,
41
+ /\bmessage\b/i,
42
+ /\btopic\b/i,
43
+ /\bqueue\b/i,
44
+ /\bkafka\b/i,
45
+ /\brabbitmq\b/i,
46
+ /\bsqs\b/i,
47
+ /\bsns\b/i
48
+ ]
49
+ }
50
+ ];
51
+
52
+ export function analyzeStructuralImpact(issue = '') {
53
+ const text = String(issue || '');
54
+ const hits = [];
55
+
56
+ for (const rule of STRUCTURAL_RISK_RULES) {
57
+ const matchedPatterns = rule.patterns.filter(pattern => pattern.test(text)).map(pattern => pattern.source);
58
+ if (matchedPatterns.length > 0) {
59
+ hits.push({
60
+ id: rule.id,
61
+ label: rule.label,
62
+ evidence: matchedPatterns
63
+ });
64
+ }
65
+ }
66
+
67
+ return {
68
+ hasHighRisk: hits.length > 0,
69
+ hits
70
+ };
71
+ }
72
+
73
+ export function formatRiskSummary(assessment) {
74
+ if (!assessment?.hasHighRisk) {
75
+ return 'No high structural impact signals detected in issue description.';
76
+ }
77
+
78
+ const lines = ['High structural impact signals detected:'];
79
+ for (const hit of assessment.hits) {
80
+ lines.push(`- ${hit.label} (${hit.id})`);
81
+ }
82
+ return lines.join('\n');
83
+ }
84
+
85
+ async function askForAck() {
86
+ const rl = readline.createInterface({ input, output });
87
+ try {
88
+ console.log('');
89
+ console.log('⚠️ Structural-impact risk guard');
90
+ console.log('Type "STRUCTURAL_OK" to continue this fix workflow.');
91
+ const answer = await rl.question('> ');
92
+ return answer.trim() === 'STRUCTURAL_OK';
93
+ } finally {
94
+ rl.close();
95
+ }
96
+ }
97
+
98
+ export async function enforceStructuralRiskAck({
99
+ assessment,
100
+ ackFlag = false,
101
+ dryRun = false,
102
+ isInteractive = process.stdin.isTTY
103
+ }) {
104
+ if (!assessment?.hasHighRisk || dryRun) {
105
+ return;
106
+ }
107
+
108
+ if (ackFlag) {
109
+ return;
110
+ }
111
+
112
+ if (isInteractive) {
113
+ const accepted = await askForAck();
114
+ if (accepted) return;
115
+ }
116
+
117
+ throw new Error(
118
+ 'High structural-impact risk detected. Re-run with --ack-structural-risk after reviewing risks.'
119
+ );
120
+ }
@@ -1,4 +1,4 @@
1
- export const PDD_TEMPLATE_VERSION = '0.2.3';
1
+ export const PDD_TEMPLATE_VERSION = '0.3.2';
2
2
 
3
3
  export const CORE_TEMPLATES = {
4
4
  '.pdd/constitution.md': `# PDD Constitution
@@ -24,8 +24,17 @@ Prefer existing patterns over new ones.
24
24
  ## 7. Verifiable Outcome
25
25
  Every change must be validated.
26
26
 
27
- ## 8. Worktree First
28
- Always execute code changes from a linked git worktree, not from the primary worktree.
27
+ ## 8. Business Rule Integrity
28
+ Never break core business rules while fixing or extending behavior.
29
+
30
+ ## 9. Usability First
31
+ Every change must preserve or improve user experience and task flow.
32
+
33
+ ## 10. Security by Default
34
+ Every change must evaluate security impact before implementation.
35
+
36
+ ## 11. Worktree First for Tasks
37
+ When starting implementation tasks (for example, bug fixes), prefer a linked worktree over the primary worktree.
29
38
  `,
30
39
  '.pdd/templates/delta-spec.md': `# Delta Spec
31
40
 
@@ -48,6 +57,17 @@ bugfix | feature | refactor-safe | hotfix
48
57
 
49
58
  ## Constraints
50
59
 
60
+ ## Business Rules Impact
61
+
62
+ ## Usability Impact
63
+
64
+ ## Security Impact
65
+
66
+ ## Structural Impact Risks
67
+ - database/schema/data migration impact
68
+ - API/event contract compatibility impact
69
+ - rollout/rollback complexity impact
70
+
51
71
  ## Minimal Safe Delta
52
72
 
53
73
  ## Alternatives Considered
@@ -71,6 +91,17 @@ bugfix | feature | refactor-safe | hotfix
71
91
 
72
92
  ## Regression Risks
73
93
 
94
+ ## Business Rules Risks
95
+
96
+ ## Usability Risks
97
+
98
+ ## Security Risks
99
+
100
+ ## Structural Impact Risks
101
+ - database/schema/data migration impact
102
+ - API/event contract compatibility impact
103
+ - rollout/rollback complexity impact
104
+
74
105
  ## Rollback Strategy
75
106
  `,
76
107
  '.pdd/templates/verification-report.md': `# Verification Report
@@ -81,12 +112,40 @@ bugfix | feature | refactor-safe | hotfix
81
112
 
82
113
  ## Tests Run
83
114
 
115
+ ## Test Coverage
116
+ - minimum threshold:
117
+ - measured result:
118
+ - status: pass | fail | not-available
119
+
120
+ ## Business Rule Validation
121
+
122
+ ## Usability Validation
123
+
124
+ ## Security Validation
125
+
84
126
  ## Manual Validation
85
127
 
86
128
  ## Residual Risks
87
129
 
88
130
  ## Final Status
89
131
  approved | needs-review | partial
132
+ `,
133
+ '.pdd/templates/gaps-report.md': `# Gaps Report
134
+
135
+ ## Task Mapping
136
+
137
+ ## Automatic Gap Check Summary
138
+
139
+ ## Gaps by Severity
140
+ - critical:
141
+ - high:
142
+ - medium:
143
+
144
+ ## Mitigation Plan
145
+
146
+ ## Reviewer Decision
147
+ - approved: yes | no
148
+ - notes:
90
149
  `,
91
150
  '.pdd/commands/pdd-recon.md': `# pdd.recon
92
151
 
@@ -171,6 +230,42 @@ Map the structure of the system.
171
230
  ## Hotspots
172
231
  -
173
232
  `,
233
+ '.pdd/memory/model-routing.md': `# Model Routing
234
+
235
+ ## Goal
236
+ Pick the most suitable AI model for each task type.
237
+
238
+ ## Task-to-Model Guidance
239
+ - analysis/recon: prefer a more capable model for deep reasoning
240
+ - implementation/build: prefer balanced model (quality + speed)
241
+ - tests/coverage: prefer fast model for iterative feedback loops
242
+ - review/risk/security: prefer a more capable model for edge cases
243
+
244
+ ## Decision Rule
245
+ 1. If model can be set automatically in this environment, set it by task type.
246
+ 2. If model cannot be set automatically, suggest the best model to the user.
247
+ 3. Ask for confirmation before continuing when model choice impacts quality/speed.
248
+
249
+ ## Output Requirement
250
+ - chosen model profile
251
+ - reason for choice
252
+ - fallback suggested to user (if auto-selection is unavailable)
253
+ `,
254
+ '.pdd/work-items/README.md': `# Work Items Registry
255
+
256
+ PDD stores concise and editable records here:
257
+ - plans/
258
+ - changes/
259
+ - features/
260
+
261
+ Each change should include:
262
+ - proposal (user can edit)
263
+ - decision (approval and notes)
264
+ - plan (execution and validation)
265
+ `,
266
+ '.pdd/work-items/plans/.gitkeep': ``,
267
+ '.pdd/work-items/changes/.gitkeep': ``,
268
+ '.pdd/work-items/features/.gitkeep': ``,
174
269
  '.pdd/version.json': JSON.stringify({ templateVersion: PDD_TEMPLATE_VERSION }, null, 2) + '\n'
175
270
  };
176
271
 
@@ -254,7 +349,9 @@ This repo uses **PDD**: safe changes in existing systems. The agent should:
254
349
  - Prefer **minimal safe deltas**; avoid drive-by refactors.
255
350
  - Use templates under \`.pdd/templates/\` when producing specs or reports (\`delta-spec\`, \`patch-plan\`, \`verification-report\`).
256
351
  - Follow playbooks under \`.pdd/commands/\` when the user invokes a PDD slash command.
257
- - Use linked git worktrees for all mutating work. Avoid editing from the primary worktree.
352
+ - For \`bugfix\` and \`feature\`, do not edit files before presenting context, business rules, risks, and plan, then waiting for explicit user approval.
353
+ - Choose model by task type whenever possible (analysis/build/tests/review). If auto model switch is unavailable, suggest model to user and ask confirmation.
354
+ - For implementation tasks, use linked git worktrees when needed (for example, \`pdd fix\`). Avoid changing code from primary worktree when auto-relocation is available.
258
355
 
259
356
  Slash commands live in \`.cursor/commands/\` (type \`/\` in Chat/Agent). They are the primary operational guidance for Cursor.
260
357
  `,
@@ -272,6 +369,7 @@ You are running **Patch-Driven Development** in this repository.
272
369
  - Obey \`.pdd/constitution.md\`.
273
370
  - Evidence before edits: locate behavior in code/tests before changing.
274
371
  - Smallest change that solves the problem; match local patterns.
372
+ - For \`bugfix\` and \`feature\`, always stop for explicit user approval before any file edits.
275
373
 
276
374
  ## User request
277
375
 
@@ -281,15 +379,26 @@ $ARGUMENTS
281
379
 
282
380
  ## What to do
283
381
 
284
- 1. Classify: bugfix vs feature vs exploration-only (**recon**).
285
- 2. Name impacted files and risks.
286
- 3. Propose a minimal plan, then implement or outline next steps.
287
- 4. Say how to verify (tests, manual steps).
382
+ 1. Keep response concise and practical.
383
+ 2. Classify: bugfix vs feature vs recon.
384
+ 3. Map context + business rules (only essential points).
385
+ 4. Map key risks (regression, structural, usability, security).
386
+ 5. Run automatic gap check after task mapping.
387
+ 6. Present a concise proposal and ask the user to edit if needed.
388
+ 7. Ask explicit approval before any file edits.
389
+ 8. After approval, implement and validate.
288
390
 
289
391
  ## Output
290
392
 
291
- - Files touched or to touch
292
- - Risks and what you did not change on purpose
393
+ Use this exact structure:
394
+ 1) Classification
395
+ 2) Context map
396
+ 3) Business rules
397
+ 4) Risks and structural impact
398
+ 5) Concise proposal (editable by user)
399
+ 6) Verification plan
400
+ 7) Automatic gap check
401
+ 8) Pending approval (explicit question)
293
402
  `,
294
403
  '.cursor/commands/pdd-recon.md': `---
295
404
  description: "PDD — recon (explore before editing)"
@@ -313,6 +422,7 @@ $ARGUMENTS
313
422
  - Short map: entry points, key modules, data flow if useful
314
423
  - List of files worth reading next
315
424
  - Risks and unknowns
425
+ - Suggested model profile for next phase (analysis/build/tests/review)
316
426
  - No production edits unless the user explicitly asked to fix something
317
427
  `,
318
428
  '.cursor/commands/pdd-fix.md': `---
@@ -330,16 +440,44 @@ $ARGUMENTS
330
440
 
331
441
  ## Steps
332
442
 
443
+ ### Phase 1 — Investigation (no edits)
333
444
  1. Reproduce or infer current vs expected behavior (code/tests).
334
445
  2. Confirm root cause (not only symptoms).
335
- 3. Apply the smallest fix; avoid scope creep.
336
- 4. State how to verify (tests or manual).
446
+ 3. Map context (data flow, integrations, impacted modules/files).
447
+ 4. List business rules and constraints.
448
+ 5. Analyze usability impact (journeys, friction, discoverability).
449
+ 6. Analyze security impact (auth, authz, data exposure, abuse vectors).
450
+ 7. Build risk map (regression, data/contract, performance/ops, usability, security).
451
+ - Flag structural-impact actions explicitly (database/schema/migrations/contracts).
452
+ 8. Run automatic gap check immediately after task mapping and risk mapping.
453
+ 9. Present concise proposal and allow user edits.
454
+ 10. Ask explicit approval before editing files.
455
+
456
+ ### Phase 2 — Plan (no edits)
457
+ 11. Propose minimal safe delta and alternatives considered.
458
+ 12. Define verification plan (tests + manual checks + rollback).
459
+
460
+ ### Phase 3 — Execution (after approval)
461
+ 13. Implement approved minimal change.
462
+ 14. Validate and report residual risks.
337
463
 
338
464
  ## Output
339
465
 
340
- - Root cause (brief)
341
- - Files changed
342
- - Verification steps
466
+ Use this exact structure:
467
+ 1) Current vs expected behavior
468
+ 2) Root cause
469
+ 3) Context map
470
+ 4) Business rules
471
+ 5) Risks and structural impact
472
+ 6) Concise proposal (editable by user)
473
+ 7) Verification + coverage plan
474
+ 8) Automatic gap check
475
+ 9) Pending approval (explicit question)
476
+
477
+ After approval:
478
+ 10) Files changed
479
+ 11) Validation results
480
+ 12) Residual risks
343
481
  `,
344
482
  '.cursor/commands/pdd-feature.md': `---
345
483
  description: "PDD — feature (safe extension)"
@@ -356,16 +494,42 @@ $ARGUMENTS
356
494
 
357
495
  ## Steps
358
496
 
497
+ ### Phase 1 — Discovery (no edits)
359
498
  1. Understand current behavior and extension points.
360
- 2. Define the smallest extension (APIs, files).
361
- 3. Implement without breaking existing callers.
362
- 4. Verification and rollback idea.
499
+ 2. Map context (user journey, modules/files, contracts, dependencies).
500
+ 3. List business rules and acceptance constraints.
501
+ 4. Analyze usability impact (journeys, accessibility, adoption friction).
502
+ 5. Analyze security impact (permissions, data exposure, misuse scenarios).
503
+ 6. Build risk map (compatibility, regression, data, performance, operational, usability, security).
504
+ - Flag structural-impact actions explicitly (database/schema/migrations/contracts).
505
+ 7. Run automatic gap check immediately after task mapping and risk mapping.
506
+ 8. Present concise proposal and allow user edits.
507
+ 9. Ask explicit approval before editing files.
508
+
509
+ ### Phase 2 — Plan (no edits)
510
+ 10. Define smallest safe extension and non-goals.
511
+ 11. Propose verification and rollback strategy.
512
+
513
+ ### Phase 3 — Execution (after approval)
514
+ 12. Implement approved scope.
515
+ 13. Validate compatibility and report residual risks.
363
516
 
364
517
  ## Output
365
518
 
366
- - Design note (what you extended and why)
367
- - Files changed
368
- - Tests or checks to run
519
+ Use this exact structure:
520
+ 1) Feature scope
521
+ 2) Context map
522
+ 3) Business rules
523
+ 4) Risks and structural impact
524
+ 5) Concise proposal (editable by user)
525
+ 6) Verification + coverage + rollback plan
526
+ 7) Automatic gap check
527
+ 8) Pending approval (explicit question)
528
+
529
+ After approval:
530
+ 9) Files changed
531
+ 10) Validation results
532
+ 11) Residual risks
369
533
  `,
370
534
  '.cursor/commands/pdd-verify.md': `---
371
535
  description: "PDD — verify (validation checklist)"
@@ -386,6 +550,7 @@ $ARGUMENTS
386
550
  - Regressions considered
387
551
  - Manual checks if needed
388
552
  - Residual risks
553
+ - Model used for review and why
389
554
 
390
555
  ## Output
391
556
 
@@ -6,7 +6,82 @@ function runCommand(command, baseDir) {
6
6
  execSync(command, { stdio: 'inherit', cwd: baseDir });
7
7
  }
8
8
 
9
- export function runValidation(baseDir = process.cwd()) {
9
+ function resolveCoverageSummaryPath(baseDir) {
10
+ const candidates = [
11
+ `${baseDir}/coverage/coverage-summary.json`,
12
+ `${baseDir}/coverage/summary.json`
13
+ ];
14
+
15
+ return candidates.find(filePath => fs.existsSync(filePath)) || null;
16
+ }
17
+
18
+ function readCoverageMetrics(coverageSummaryPath) {
19
+ const raw = JSON.parse(fs.readFileSync(coverageSummaryPath, 'utf-8'));
20
+ const total = raw?.total || {};
21
+ const metrics = {
22
+ lines: Number(total?.lines?.pct),
23
+ statements: Number(total?.statements?.pct),
24
+ functions: Number(total?.functions?.pct),
25
+ branches: Number(total?.branches?.pct)
26
+ };
27
+
28
+ return Object.fromEntries(
29
+ Object.entries(metrics).filter(([, value]) => Number.isFinite(value))
30
+ );
31
+ }
32
+
33
+ function validateCoverage({
34
+ baseDir,
35
+ minCoverage = 80,
36
+ requireCoverage = false
37
+ }) {
38
+ const summaryPath = resolveCoverageSummaryPath(baseDir);
39
+ if (!summaryPath) {
40
+ if (requireCoverage) {
41
+ throw new Error(
42
+ 'Coverage report not found. Generate coverage (for example with npm run test:coverage) or disable this gate.'
43
+ );
44
+ }
45
+
46
+ console.log('⚠️ Coverage report not found. Skipping coverage gate.');
47
+ return;
48
+ }
49
+
50
+ const metrics = readCoverageMetrics(summaryPath);
51
+ const metricEntries = Object.entries(metrics);
52
+ if (metricEntries.length === 0) {
53
+ if (requireCoverage) {
54
+ throw new Error('Coverage summary is invalid or empty.');
55
+ }
56
+ console.log('⚠️ Coverage summary has no numeric metrics. Skipping coverage gate.');
57
+ return;
58
+ }
59
+
60
+ let minMetric = metricEntries[0];
61
+ for (const entry of metricEntries.slice(1)) {
62
+ if (entry[1] < minMetric[1]) {
63
+ minMetric = entry;
64
+ }
65
+ }
66
+
67
+ const [metricName, metricValue] = minMetric;
68
+ console.log(`Coverage (worst metric: ${metricName}) = ${metricValue.toFixed(2)}%`);
69
+ if (metricValue < minCoverage) {
70
+ throw new Error(
71
+ `Coverage gate failed: ${metricValue.toFixed(2)}% is below minimum ${minCoverage.toFixed(2)}%.`
72
+ );
73
+ }
74
+
75
+ console.log(`✅ Coverage gate passed (minimum ${minCoverage.toFixed(2)}%).`);
76
+ }
77
+
78
+ export function runValidation(baseDir = process.cwd(), options = {}) {
79
+ const {
80
+ coverageGate = true,
81
+ minCoverage = Number(process.env.PDD_MIN_COVERAGE || 80),
82
+ requireCoverage = false
83
+ } = options;
84
+
10
85
  console.log('Running validation...');
11
86
 
12
87
  const packageJsonPath = `${baseDir}/package.json`;
@@ -19,7 +94,11 @@ export function runValidation(baseDir = process.cwd()) {
19
94
  const scripts = pkg.scripts || {};
20
95
  const commands = [];
21
96
 
22
- if (scripts.test) commands.push('npm test');
97
+ if (scripts['test:coverage']) {
98
+ commands.push('npm run test:coverage');
99
+ } else if (scripts.test) {
100
+ commands.push('npm test');
101
+ }
23
102
  if (scripts.lint) commands.push('npm run lint');
24
103
  if (scripts.build) commands.push('npm run build');
25
104
 
@@ -30,6 +109,13 @@ export function runValidation(baseDir = process.cwd()) {
30
109
 
31
110
  try {
32
111
  commands.forEach(command => runCommand(command, baseDir));
112
+ if (coverageGate) {
113
+ validateCoverage({
114
+ baseDir,
115
+ minCoverage,
116
+ requireCoverage
117
+ });
118
+ }
33
119
  } catch {
34
120
  throw new Error('Validation failed');
35
121
  }
@@ -1,27 +1,37 @@
1
- import fs from 'fs';
2
- import { execSync, execFileSync } from 'child_process';
3
- import path from 'path';
1
+ import { execFileSync } from 'node:child_process';
2
+ import fs from 'node:fs';
3
+ import path from 'node:path';
4
4
 
5
- function runGit(command, baseDir) {
6
- return execSync(command, { cwd: baseDir, stdio: 'pipe', encoding: 'utf-8' }).trim();
5
+ function runGit(args, cwd) {
6
+ return execFileSync('git', args, {
7
+ cwd,
8
+ encoding: 'utf-8',
9
+ stdio: ['ignore', 'pipe', 'pipe']
10
+ }).trim();
7
11
  }
8
12
 
9
- function normalize(p) {
10
- return path.resolve(String(p || '')).toLowerCase();
13
+ function normalizePath(input) {
14
+ return path.resolve(String(input || '')).replace(/\\/g, '/').toLowerCase();
15
+ }
16
+
17
+ function slug(value, max = 40) {
18
+ return String(value || '')
19
+ .toLowerCase()
20
+ .replace(/[^a-z0-9]+/g, '-')
21
+ .replace(/^-+|-+$/g, '')
22
+ .slice(0, max) || 'task';
11
23
  }
12
24
 
13
25
  export function detectWorktreeContext(baseDir = process.cwd()) {
14
26
  try {
15
- const topLevel = runGit('git rev-parse --show-toplevel', baseDir);
16
- const gitDir = runGit('git rev-parse --git-dir', baseDir);
17
- const commonDir = runGit('git rev-parse --git-common-dir', baseDir);
18
-
19
- const isPrimaryWorktree = normalize(gitDir) === normalize(commonDir);
27
+ const topLevel = runGit(['rev-parse', '--show-toplevel'], baseDir);
28
+ const gitDir = runGit(['rev-parse', '--git-dir'], baseDir);
29
+ const commonDir = runGit(['rev-parse', '--git-common-dir'], baseDir);
20
30
 
21
31
  return {
22
32
  isGitRepo: true,
23
33
  topLevel,
24
- isPrimaryWorktree
34
+ isPrimaryWorktree: normalizePath(gitDir) === normalizePath(commonDir)
25
35
  };
26
36
  } catch {
27
37
  return {
@@ -32,14 +42,6 @@ export function detectWorktreeContext(baseDir = process.cwd()) {
32
42
  }
33
43
  }
34
44
 
35
- function slug(value) {
36
- return String(value || '')
37
- .toLowerCase()
38
- .replace(/[^a-z0-9]+/g, '-')
39
- .replace(/^-+|-+$/g, '')
40
- .slice(0, 40);
41
- }
42
-
43
45
  export function createLinkedWorktree({
44
46
  baseDir = process.cwd(),
45
47
  commandName = 'change'
@@ -50,8 +52,8 @@ export function createLinkedWorktree({
50
52
  }
51
53
 
52
54
  const topLevel = context.topLevel;
53
- const repoName = slug(path.basename(topLevel)) || 'repo';
54
- const commandSlug = slug(commandName) || 'change';
55
+ const repoName = slug(path.basename(topLevel), 24);
56
+ const commandSlug = slug(commandName, 24);
55
57
  const stamp = Date.now();
56
58
  const branchName = `feature/pdd-auto-${commandSlug}-${stamp}`;
57
59
 
@@ -92,3 +94,31 @@ export function enforceLinkedWorktree({
92
94
 
93
95
  return context;
94
96
  }
97
+
98
+ export function maybeAutoRelocateToWorktree({ cwd, argv, commandName }) {
99
+ if (argv.includes('--allow-main-worktree')) {
100
+ return false;
101
+ }
102
+
103
+ const context = detectWorktreeContext(cwd);
104
+ if (!context.isGitRepo || !context.isPrimaryWorktree) {
105
+ return false;
106
+ }
107
+
108
+ const { worktreePath, branchName } = createLinkedWorktree({
109
+ baseDir: cwd,
110
+ commandName
111
+ });
112
+
113
+ console.log('🔀 Primary worktree detected. Auto-created linked worktree for task execution.');
114
+ console.log(`- branch: ${branchName}`);
115
+ console.log(`- path: ${worktreePath}`);
116
+ console.log('');
117
+
118
+ execFileSync(process.execPath, [process.argv[1], ...argv], {
119
+ cwd: worktreePath,
120
+ stdio: 'inherit'
121
+ });
122
+
123
+ return true;
124
+ }