claude-autopm 1.18.0 → 1.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +159 -0
- package/autopm/.claude/agents/core/mcp-manager.md +1 -1
- package/autopm/.claude/commands/pm/context.md +11 -0
- package/autopm/.claude/commands/pm/epic-decompose.md +25 -2
- package/autopm/.claude/commands/pm/epic-oneshot.md +13 -0
- package/autopm/.claude/commands/pm/epic-start.md +19 -0
- package/autopm/.claude/commands/pm/epic-sync-modular.md +10 -10
- package/autopm/.claude/commands/pm/epic-sync.md +14 -14
- package/autopm/.claude/commands/pm/issue-start.md +50 -5
- package/autopm/.claude/commands/pm/issue-sync.md +15 -15
- package/autopm/.claude/commands/pm/what-next.md +11 -0
- package/autopm/.claude/mcp/MCP-REGISTRY.md +1 -1
- package/autopm/.claude/scripts/azure/active-work.js +2 -2
- package/autopm/.claude/scripts/azure/blocked.js +13 -13
- package/autopm/.claude/scripts/azure/daily.js +1 -1
- package/autopm/.claude/scripts/azure/dashboard.js +1 -1
- package/autopm/.claude/scripts/azure/feature-list.js +2 -2
- package/autopm/.claude/scripts/azure/feature-status.js +1 -1
- package/autopm/.claude/scripts/azure/next-task.js +1 -1
- package/autopm/.claude/scripts/azure/search.js +1 -1
- package/autopm/.claude/scripts/azure/setup.js +15 -15
- package/autopm/.claude/scripts/azure/sprint-report.js +2 -2
- package/autopm/.claude/scripts/azure/sync.js +1 -1
- package/autopm/.claude/scripts/azure/us-list.js +1 -1
- package/autopm/.claude/scripts/azure/us-status.js +1 -1
- package/autopm/.claude/scripts/azure/validate.js +13 -13
- package/autopm/.claude/scripts/lib/frontmatter-utils.sh +42 -7
- package/autopm/.claude/scripts/lib/logging-utils.sh +20 -16
- package/autopm/.claude/scripts/lib/validation-utils.sh +1 -1
- package/autopm/.claude/scripts/pm/context.js +338 -0
- package/autopm/.claude/scripts/pm/issue-sync/format-comment.sh +3 -3
- package/autopm/.claude/scripts/pm/lib/README.md +85 -0
- package/autopm/.claude/scripts/pm/lib/logger.js +78 -0
- package/autopm/.claude/scripts/pm/next.js +25 -1
- package/autopm/.claude/scripts/pm/what-next.js +660 -0
- package/bin/autopm.js +25 -0
- package/package.json +1 -1
- package/lib/agentExecutor.js.deprecated +0 -101
- package/lib/azure/cache.js +0 -80
- package/lib/azure/client.js +0 -77
- package/lib/azure/formatter.js +0 -177
- package/lib/commandHelpers.js +0 -177
- package/lib/context/manager.js +0 -290
- package/lib/documentation/manager.js +0 -528
- package/lib/github/workflow-manager.js +0 -546
- package/lib/helpers/azure-batch-api.js +0 -133
- package/lib/helpers/azure-cache-manager.js +0 -287
- package/lib/helpers/azure-parallel-processor.js +0 -158
- package/lib/helpers/azure-work-item-create.js +0 -278
- package/lib/helpers/gh-issue-create.js +0 -250
- package/lib/helpers/interactive-prompt.js +0 -336
- package/lib/helpers/output-manager.js +0 -335
- package/lib/helpers/progress-indicator.js +0 -258
- package/lib/performance/benchmarker.js +0 -429
- package/lib/pm/epic-decomposer.js +0 -273
- package/lib/pm/epic-syncer.js +0 -221
- package/lib/prdMetadata.js +0 -270
- package/lib/providers/azure/index.js +0 -234
- package/lib/providers/factory.js +0 -87
- package/lib/providers/github/index.js +0 -204
- package/lib/providers/interface.js +0 -73
- package/lib/python/scaffold-manager.js +0 -576
- package/lib/react/scaffold-manager.js +0 -745
- package/lib/regression/analyzer.js +0 -578
- package/lib/release/manager.js +0 -324
- package/lib/tailwind/manager.js +0 -486
- package/lib/traefik/manager.js +0 -484
- package/lib/utils/colors.js +0 -126
- package/lib/utils/config.js +0 -317
- package/lib/utils/filesystem.js +0 -316
- package/lib/utils/logger.js +0 -135
- package/lib/utils/prompts.js +0 -294
- package/lib/utils/shell.js +0 -237
- package/lib/validators/email-validator.js +0 -337
- package/lib/workflow/manager.js +0 -449
|
@@ -1,429 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Performance Benchmarker
|
|
3
|
-
* Centralized performance benchmarking functionality
|
|
4
|
-
*/
|
|
5
|
-
|
|
6
|
-
const fs = require('fs').promises;
|
|
7
|
-
const path = require('path');
|
|
8
|
-
const { performance } = require('perf_hooks');
|
|
9
|
-
const v8 = require('v8');
|
|
10
|
-
|
|
11
|
-
/**
|
|
12
|
-
* Configuration
|
|
13
|
-
*/
|
|
14
|
-
const CONFIG = {
|
|
15
|
-
directories: {
|
|
16
|
-
benchmarks: '.claude/benchmarks'
|
|
17
|
-
},
|
|
18
|
-
defaults: {
|
|
19
|
-
metrics: ['time', 'memory', 'cpu'],
|
|
20
|
-
threshold: 20 // 20% regression threshold
|
|
21
|
-
},
|
|
22
|
-
filePatterns: {
|
|
23
|
-
benchmark: 'benchmark-{timestamp}.json',
|
|
24
|
-
cpuProfile: 'cpu-profile-{timestamp}.cpuprofile',
|
|
25
|
-
heapSnapshot: 'heap-snapshot-{timestamp}.heapsnapshot'
|
|
26
|
-
}
|
|
27
|
-
};
|
|
28
|
-
|
|
29
|
-
class PerformanceBenchmarker {
|
|
30
|
-
constructor(projectRoot = process.cwd()) {
|
|
31
|
-
this.projectRoot = projectRoot;
|
|
32
|
-
this.benchmarkDir = path.join(projectRoot, CONFIG.directories.benchmarks);
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
/**
|
|
36
|
-
* Measures execution time for a function
|
|
37
|
-
*/
|
|
38
|
-
async measureExecutionTime(fn) {
|
|
39
|
-
const start = performance.now();
|
|
40
|
-
await fn();
|
|
41
|
-
const end = performance.now();
|
|
42
|
-
return Math.round((end - start) * 100) / 100;
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
/**
|
|
46
|
-
* Gets memory usage statistics
|
|
47
|
-
*/
|
|
48
|
-
getMemoryUsage() {
|
|
49
|
-
const usage = process.memoryUsage();
|
|
50
|
-
return {
|
|
51
|
-
heapUsed: Math.round(usage.heapUsed / 1024 / 1024 * 100) / 100, // MB
|
|
52
|
-
heapTotal: Math.round(usage.heapTotal / 1024 / 1024 * 100) / 100, // MB
|
|
53
|
-
external: Math.round(usage.external / 1024 / 1024 * 100) / 100, // MB
|
|
54
|
-
rss: Math.round(usage.rss / 1024 / 1024 * 100) / 100 // MB
|
|
55
|
-
};
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
/**
|
|
59
|
-
* Gets CPU usage statistics
|
|
60
|
-
*/
|
|
61
|
-
getCPUUsage() {
|
|
62
|
-
const startUsage = process.cpuUsage();
|
|
63
|
-
|
|
64
|
-
// Simulate some work to measure
|
|
65
|
-
let sum = 0;
|
|
66
|
-
for (let i = 0; i < 1000000; i++) {
|
|
67
|
-
sum += Math.sqrt(i);
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
const usage = process.cpuUsage(startUsage);
|
|
71
|
-
return {
|
|
72
|
-
user: Math.round(usage.user / 1000), // ms
|
|
73
|
-
system: Math.round(usage.system / 1000), // ms
|
|
74
|
-
percent: Math.round((usage.user + usage.system) / 10000) // approximate %
|
|
75
|
-
};
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
/**
|
|
79
|
-
* Analyzes a target path (file or directory)
|
|
80
|
-
*/
|
|
81
|
-
async analyzeTarget(targetPath) {
|
|
82
|
-
try {
|
|
83
|
-
const stats = await fs.stat(targetPath);
|
|
84
|
-
|
|
85
|
-
if (stats.isDirectory()) {
|
|
86
|
-
return await this.analyzeDirectory(targetPath);
|
|
87
|
-
} else {
|
|
88
|
-
return await this.analyzeFile(targetPath);
|
|
89
|
-
}
|
|
90
|
-
} catch (error) {
|
|
91
|
-
// Return default analysis if path doesn't exist
|
|
92
|
-
return { type: 'unknown', files: 0, size: 0 };
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
/**
|
|
97
|
-
* Analyzes a directory
|
|
98
|
-
*/
|
|
99
|
-
async analyzeDirectory(dirPath) {
|
|
100
|
-
const entries = await fs.readdir(dirPath);
|
|
101
|
-
let fileCount = 0;
|
|
102
|
-
let totalSize = 0;
|
|
103
|
-
|
|
104
|
-
for (const entry of entries) {
|
|
105
|
-
if (entry.startsWith('.')) continue;
|
|
106
|
-
|
|
107
|
-
const fullPath = path.join(dirPath, entry);
|
|
108
|
-
try {
|
|
109
|
-
const stats = await fs.stat(fullPath);
|
|
110
|
-
if (stats.isFile()) {
|
|
111
|
-
fileCount++;
|
|
112
|
-
totalSize += stats.size;
|
|
113
|
-
}
|
|
114
|
-
} catch (error) {
|
|
115
|
-
// Skip inaccessible files
|
|
116
|
-
}
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
return {
|
|
120
|
-
type: 'directory',
|
|
121
|
-
files: fileCount,
|
|
122
|
-
size: totalSize,
|
|
123
|
-
sizeKB: Math.round(totalSize / 1024)
|
|
124
|
-
};
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
/**
|
|
128
|
-
* Analyzes a file
|
|
129
|
-
*/
|
|
130
|
-
async analyzeFile(filePath) {
|
|
131
|
-
const stats = await fs.stat(filePath);
|
|
132
|
-
let lines = 0;
|
|
133
|
-
|
|
134
|
-
try {
|
|
135
|
-
const content = await fs.readFile(filePath, 'utf8');
|
|
136
|
-
lines = content.split('\n').length;
|
|
137
|
-
} catch (error) {
|
|
138
|
-
// Can't read file content
|
|
139
|
-
}
|
|
140
|
-
|
|
141
|
-
return {
|
|
142
|
-
type: 'file',
|
|
143
|
-
name: path.basename(filePath),
|
|
144
|
-
size: stats.size,
|
|
145
|
-
sizeKB: Math.round(stats.size / 1024),
|
|
146
|
-
lines
|
|
147
|
-
};
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
/**
|
|
151
|
-
* Runs performance benchmark
|
|
152
|
-
*/
|
|
153
|
-
async runBenchmark(targetPath = '.', options = {}) {
|
|
154
|
-
const metrics = {};
|
|
155
|
-
const requestedMetrics = options.metrics ?
|
|
156
|
-
(typeof options.metrics === 'string' ?
|
|
157
|
-
options.metrics.split(',').map(m => m.trim()) :
|
|
158
|
-
options.metrics) :
|
|
159
|
-
CONFIG.defaults.metrics;
|
|
160
|
-
|
|
161
|
-
// Analyze target
|
|
162
|
-
const analysis = await this.analyzeTarget(targetPath);
|
|
163
|
-
|
|
164
|
-
// Measure execution time
|
|
165
|
-
if (requestedMetrics.includes('time')) {
|
|
166
|
-
metrics.executionTime = await this.measureExecutionTime(async () => {
|
|
167
|
-
// Simulate analyzing the codebase
|
|
168
|
-
await this.analyzeTarget(targetPath);
|
|
169
|
-
await new Promise(resolve => setTimeout(resolve, 50));
|
|
170
|
-
});
|
|
171
|
-
}
|
|
172
|
-
|
|
173
|
-
// Measure memory usage
|
|
174
|
-
if (requestedMetrics.includes('memory')) {
|
|
175
|
-
const memory = this.getMemoryUsage();
|
|
176
|
-
metrics.memoryUsage = memory.heapUsed;
|
|
177
|
-
metrics.memoryDetails = memory;
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
// Measure CPU usage
|
|
181
|
-
if (requestedMetrics.includes('cpu')) {
|
|
182
|
-
const cpu = this.getCPUUsage();
|
|
183
|
-
metrics.cpuUsage = cpu.percent;
|
|
184
|
-
metrics.cpuDetails = cpu;
|
|
185
|
-
}
|
|
186
|
-
|
|
187
|
-
// Create benchmark data
|
|
188
|
-
const benchmarkData = {
|
|
189
|
-
timestamp: new Date().toISOString(),
|
|
190
|
-
target: targetPath,
|
|
191
|
-
analysis,
|
|
192
|
-
metrics,
|
|
193
|
-
environment: {
|
|
194
|
-
node: process.version,
|
|
195
|
-
platform: process.platform,
|
|
196
|
-
arch: process.arch
|
|
197
|
-
}
|
|
198
|
-
};
|
|
199
|
-
|
|
200
|
-
// Save if requested
|
|
201
|
-
if (options.save) {
|
|
202
|
-
await this.saveBenchmark(benchmarkData);
|
|
203
|
-
}
|
|
204
|
-
|
|
205
|
-
return benchmarkData;
|
|
206
|
-
}
|
|
207
|
-
|
|
208
|
-
/**
|
|
209
|
-
* Saves benchmark data
|
|
210
|
-
*/
|
|
211
|
-
async saveBenchmark(benchmarkData) {
|
|
212
|
-
await fs.mkdir(this.benchmarkDir, { recursive: true });
|
|
213
|
-
|
|
214
|
-
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
|
215
|
-
const filename = CONFIG.filePatterns.benchmark.replace('{timestamp}', timestamp);
|
|
216
|
-
const benchmarkPath = path.join(this.benchmarkDir, filename);
|
|
217
|
-
|
|
218
|
-
await fs.writeFile(benchmarkPath, JSON.stringify(benchmarkData, null, 2));
|
|
219
|
-
|
|
220
|
-
return benchmarkPath;
|
|
221
|
-
}
|
|
222
|
-
|
|
223
|
-
/**
|
|
224
|
-
* Loads previous benchmarks
|
|
225
|
-
*/
|
|
226
|
-
async loadBenchmarks() {
|
|
227
|
-
try {
|
|
228
|
-
const files = await fs.readdir(this.benchmarkDir);
|
|
229
|
-
const benchmarkFiles = files.filter(f => f.endsWith('.json'));
|
|
230
|
-
|
|
231
|
-
const benchmarks = [];
|
|
232
|
-
for (const file of benchmarkFiles.sort()) {
|
|
233
|
-
try {
|
|
234
|
-
const data = JSON.parse(
|
|
235
|
-
await fs.readFile(path.join(this.benchmarkDir, file), 'utf8')
|
|
236
|
-
);
|
|
237
|
-
benchmarks.push({ file, ...data });
|
|
238
|
-
} catch (error) {
|
|
239
|
-
// Skip invalid files
|
|
240
|
-
}
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
return benchmarks;
|
|
244
|
-
} catch (error) {
|
|
245
|
-
if (error.code === 'ENOENT') {
|
|
246
|
-
return [];
|
|
247
|
-
}
|
|
248
|
-
throw error;
|
|
249
|
-
}
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
/**
|
|
253
|
-
* Compares current metrics with previous benchmark
|
|
254
|
-
*/
|
|
255
|
-
compareBenchmarks(currentMetrics, previousMetrics) {
|
|
256
|
-
const comparison = {};
|
|
257
|
-
|
|
258
|
-
if (currentMetrics.executionTime && previousMetrics.executionTime) {
|
|
259
|
-
const change = ((currentMetrics.executionTime - previousMetrics.executionTime)
|
|
260
|
-
/ previousMetrics.executionTime * 100);
|
|
261
|
-
comparison.executionTime = {
|
|
262
|
-
current: currentMetrics.executionTime,
|
|
263
|
-
previous: previousMetrics.executionTime,
|
|
264
|
-
change: Math.round(change * 100) / 100,
|
|
265
|
-
regression: change > 0
|
|
266
|
-
};
|
|
267
|
-
}
|
|
268
|
-
|
|
269
|
-
if (currentMetrics.memoryUsage && previousMetrics.memoryUsage) {
|
|
270
|
-
const change = ((currentMetrics.memoryUsage - previousMetrics.memoryUsage)
|
|
271
|
-
/ previousMetrics.memoryUsage * 100);
|
|
272
|
-
comparison.memoryUsage = {
|
|
273
|
-
current: currentMetrics.memoryUsage,
|
|
274
|
-
previous: previousMetrics.memoryUsage,
|
|
275
|
-
change: Math.round(change * 100) / 100,
|
|
276
|
-
regression: change > 0
|
|
277
|
-
};
|
|
278
|
-
}
|
|
279
|
-
|
|
280
|
-
if (currentMetrics.cpuUsage && previousMetrics.cpuUsage) {
|
|
281
|
-
const change = ((currentMetrics.cpuUsage - previousMetrics.cpuUsage)
|
|
282
|
-
/ previousMetrics.cpuUsage * 100);
|
|
283
|
-
comparison.cpuUsage = {
|
|
284
|
-
current: currentMetrics.cpuUsage,
|
|
285
|
-
previous: previousMetrics.cpuUsage,
|
|
286
|
-
change: Math.round(change * 100) / 100,
|
|
287
|
-
regression: change > 0
|
|
288
|
-
};
|
|
289
|
-
}
|
|
290
|
-
|
|
291
|
-
return comparison;
|
|
292
|
-
}
|
|
293
|
-
|
|
294
|
-
/**
|
|
295
|
-
* Checks for performance regression
|
|
296
|
-
*/
|
|
297
|
-
checkThreshold(currentMetrics, baselineMetrics, threshold) {
|
|
298
|
-
const regressions = [];
|
|
299
|
-
|
|
300
|
-
if (currentMetrics.executionTime && baselineMetrics.executionTime) {
|
|
301
|
-
const change = ((currentMetrics.executionTime - baselineMetrics.executionTime)
|
|
302
|
-
/ baselineMetrics.executionTime * 100);
|
|
303
|
-
|
|
304
|
-
if (change > threshold) {
|
|
305
|
-
regressions.push({
|
|
306
|
-
metric: 'executionTime',
|
|
307
|
-
change: Math.round(change * 100) / 100,
|
|
308
|
-
threshold
|
|
309
|
-
});
|
|
310
|
-
}
|
|
311
|
-
}
|
|
312
|
-
|
|
313
|
-
if (currentMetrics.memoryUsage && baselineMetrics.memoryUsage) {
|
|
314
|
-
const change = ((currentMetrics.memoryUsage - baselineMetrics.memoryUsage)
|
|
315
|
-
/ baselineMetrics.memoryUsage * 100);
|
|
316
|
-
|
|
317
|
-
if (change > threshold) {
|
|
318
|
-
regressions.push({
|
|
319
|
-
metric: 'memoryUsage',
|
|
320
|
-
change: Math.round(change * 100) / 100,
|
|
321
|
-
threshold
|
|
322
|
-
});
|
|
323
|
-
}
|
|
324
|
-
}
|
|
325
|
-
|
|
326
|
-
if (currentMetrics.cpuUsage && baselineMetrics.cpuUsage) {
|
|
327
|
-
const change = ((currentMetrics.cpuUsage - baselineMetrics.cpuUsage)
|
|
328
|
-
/ baselineMetrics.cpuUsage * 100);
|
|
329
|
-
|
|
330
|
-
if (change > threshold) {
|
|
331
|
-
regressions.push({
|
|
332
|
-
metric: 'cpuUsage',
|
|
333
|
-
change: Math.round(change * 100) / 100,
|
|
334
|
-
threshold
|
|
335
|
-
});
|
|
336
|
-
}
|
|
337
|
-
}
|
|
338
|
-
|
|
339
|
-
return regressions;
|
|
340
|
-
}
|
|
341
|
-
|
|
342
|
-
/**
|
|
343
|
-
* Generates CPU profile
|
|
344
|
-
*/
|
|
345
|
-
async generateCPUProfile() {
|
|
346
|
-
await fs.mkdir(this.benchmarkDir, { recursive: true });
|
|
347
|
-
|
|
348
|
-
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
|
349
|
-
const filename = CONFIG.filePatterns.cpuProfile.replace('{timestamp}', timestamp);
|
|
350
|
-
const profilePath = path.join(this.benchmarkDir, filename);
|
|
351
|
-
|
|
352
|
-
// Simulate CPU profiling (real implementation would use v8-profiler-next)
|
|
353
|
-
const profile = {
|
|
354
|
-
startTime: Date.now(),
|
|
355
|
-
endTime: Date.now() + 1000,
|
|
356
|
-
samples: [],
|
|
357
|
-
timeDeltas: [],
|
|
358
|
-
nodes: [
|
|
359
|
-
{
|
|
360
|
-
id: 1,
|
|
361
|
-
callFrame: {
|
|
362
|
-
functionName: '(root)',
|
|
363
|
-
lineNumber: 0,
|
|
364
|
-
columnNumber: 0
|
|
365
|
-
}
|
|
366
|
-
}
|
|
367
|
-
]
|
|
368
|
-
};
|
|
369
|
-
|
|
370
|
-
await fs.writeFile(profilePath, JSON.stringify(profile, null, 2));
|
|
371
|
-
|
|
372
|
-
return profilePath;
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
/**
|
|
376
|
-
* Generates memory heap snapshot
|
|
377
|
-
*/
|
|
378
|
-
async generateHeapSnapshot() {
|
|
379
|
-
await fs.mkdir(this.benchmarkDir, { recursive: true });
|
|
380
|
-
|
|
381
|
-
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
|
382
|
-
const filename = CONFIG.filePatterns.heapSnapshot.replace('{timestamp}', timestamp);
|
|
383
|
-
const snapshotPath = path.join(this.benchmarkDir, filename);
|
|
384
|
-
|
|
385
|
-
try {
|
|
386
|
-
// Write real heap snapshot
|
|
387
|
-
const stream = v8.writeHeapSnapshot();
|
|
388
|
-
const chunks = [];
|
|
389
|
-
|
|
390
|
-
for await (const chunk of stream) {
|
|
391
|
-
chunks.push(chunk);
|
|
392
|
-
}
|
|
393
|
-
|
|
394
|
-
await fs.writeFile(snapshotPath, Buffer.concat(chunks));
|
|
395
|
-
} catch (error) {
|
|
396
|
-
// Fallback for testing environments
|
|
397
|
-
const mockSnapshot = {
|
|
398
|
-
snapshot: {
|
|
399
|
-
meta: {
|
|
400
|
-
node_version: process.version,
|
|
401
|
-
title: 'Heap Snapshot'
|
|
402
|
-
},
|
|
403
|
-
node_count: 1000,
|
|
404
|
-
edge_count: 2000
|
|
405
|
-
}
|
|
406
|
-
};
|
|
407
|
-
await fs.writeFile(snapshotPath, JSON.stringify(mockSnapshot));
|
|
408
|
-
}
|
|
409
|
-
|
|
410
|
-
return snapshotPath;
|
|
411
|
-
}
|
|
412
|
-
|
|
413
|
-
/**
|
|
414
|
-
* Gets benchmark history
|
|
415
|
-
*/
|
|
416
|
-
async getHistory(limit = 10) {
|
|
417
|
-
const benchmarks = await this.loadBenchmarks();
|
|
418
|
-
|
|
419
|
-
// Sort by timestamp descending
|
|
420
|
-
benchmarks.sort((a, b) =>
|
|
421
|
-
new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime()
|
|
422
|
-
);
|
|
423
|
-
|
|
424
|
-
// Apply limit
|
|
425
|
-
return benchmarks.slice(0, limit);
|
|
426
|
-
}
|
|
427
|
-
}
|
|
428
|
-
|
|
429
|
-
module.exports = PerformanceBenchmarker;
|
|
@@ -1,273 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
|
|
3
|
-
/**
|
|
4
|
-
* Epic Decomposer
|
|
5
|
-
* Decomposes PRD into Epic structure based on provider
|
|
6
|
-
*/
|
|
7
|
-
|
|
8
|
-
const fs = require('fs-extra');
|
|
9
|
-
const path = require('path');
|
|
10
|
-
|
|
11
|
-
class EpicDecomposer {
|
|
12
|
-
constructor(options = {}) {
|
|
13
|
-
this.provider = options.provider || this.detectProvider();
|
|
14
|
-
}
|
|
15
|
-
|
|
16
|
-
/**
|
|
17
|
-
* Detect provider from config
|
|
18
|
-
*/
|
|
19
|
-
detectProvider() {
|
|
20
|
-
try {
|
|
21
|
-
const configPath = path.join(process.cwd(), '.claude', 'config.json');
|
|
22
|
-
if (fs.existsSync(configPath)) {
|
|
23
|
-
const config = fs.readJsonSync(configPath);
|
|
24
|
-
return config.provider || 'github';
|
|
25
|
-
}
|
|
26
|
-
} catch (error) {
|
|
27
|
-
console.error(`Error detecting provider: ${error.message}`);
|
|
28
|
-
}
|
|
29
|
-
return 'github';
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
/**
|
|
33
|
-
* Decompose PRD into Epic structure
|
|
34
|
-
*/
|
|
35
|
-
async decompose(prdName) {
|
|
36
|
-
const prdPath = path.join(process.cwd(), '.claude', 'prds', `${prdName}.md`);
|
|
37
|
-
|
|
38
|
-
if (!await fs.pathExists(prdPath)) {
|
|
39
|
-
throw new Error(`PRD not found: ${prdName}`);
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
const prdContent = await fs.readFile(prdPath, 'utf8');
|
|
43
|
-
const prd = this.parsePRD(prdContent);
|
|
44
|
-
|
|
45
|
-
let epic;
|
|
46
|
-
if (this.provider === 'azure') {
|
|
47
|
-
epic = await this.decomposeForAzure(prd);
|
|
48
|
-
} else {
|
|
49
|
-
epic = await this.decomposeForGitHub(prd);
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
// Save epic to file
|
|
53
|
-
const epicPath = path.join(process.cwd(), '.claude', 'epics', `${prdName}.md`);
|
|
54
|
-
await fs.ensureDir(path.dirname(epicPath));
|
|
55
|
-
await fs.writeFile(epicPath, this.formatEpicMarkdown(epic));
|
|
56
|
-
|
|
57
|
-
// Also save as JSON for easier processing
|
|
58
|
-
const epicJsonPath = path.join(process.cwd(), '.claude', 'epics', `${prdName}.json`);
|
|
59
|
-
await fs.writeJson(epicJsonPath, epic, { spaces: 2 });
|
|
60
|
-
|
|
61
|
-
return epic;
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
/**
|
|
65
|
-
* Parse PRD from markdown
|
|
66
|
-
*/
|
|
67
|
-
parsePRD(content) {
|
|
68
|
-
const lines = content.split('\n');
|
|
69
|
-
const prd = {
|
|
70
|
-
title: '',
|
|
71
|
-
userStories: [],
|
|
72
|
-
acceptanceCriteria: [],
|
|
73
|
-
tasks: []
|
|
74
|
-
};
|
|
75
|
-
|
|
76
|
-
let currentSection = '';
|
|
77
|
-
|
|
78
|
-
for (const line of lines) {
|
|
79
|
-
if (line.startsWith('# ')) {
|
|
80
|
-
prd.title = line.substring(2).trim();
|
|
81
|
-
} else if (line.toLowerCase().includes('## user stories')) {
|
|
82
|
-
currentSection = 'userStories';
|
|
83
|
-
} else if (line.toLowerCase().includes('## acceptance criteria')) {
|
|
84
|
-
currentSection = 'acceptanceCriteria';
|
|
85
|
-
} else if (line.toLowerCase().includes('## tasks')) {
|
|
86
|
-
currentSection = 'tasks';
|
|
87
|
-
} else if (line.startsWith('- ') && currentSection) {
|
|
88
|
-
const item = line.substring(2).trim();
|
|
89
|
-
if (currentSection === 'userStories') {
|
|
90
|
-
prd.userStories.push(item);
|
|
91
|
-
} else if (currentSection === 'acceptanceCriteria') {
|
|
92
|
-
prd.acceptanceCriteria.push(item);
|
|
93
|
-
} else if (currentSection === 'tasks') {
|
|
94
|
-
prd.tasks.push(item);
|
|
95
|
-
}
|
|
96
|
-
}
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
return prd;
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
/**
|
|
103
|
-
* Decompose for Azure DevOps (Epic -> User Stories -> Tasks)
|
|
104
|
-
*/
|
|
105
|
-
async decomposeForAzure(prd) {
|
|
106
|
-
const epic = {
|
|
107
|
-
type: 'Epic',
|
|
108
|
-
title: prd.title,
|
|
109
|
-
description: `Epic for ${prd.title}`,
|
|
110
|
-
provider: 'azure',
|
|
111
|
-
userStories: []
|
|
112
|
-
};
|
|
113
|
-
|
|
114
|
-
// Convert each user story into structured format with tasks
|
|
115
|
-
for (const storyText of prd.userStories) {
|
|
116
|
-
const userStory = {
|
|
117
|
-
title: storyText,
|
|
118
|
-
acceptanceCriteria: prd.acceptanceCriteria,
|
|
119
|
-
tasks: this.generateTasksForStory(storyText)
|
|
120
|
-
};
|
|
121
|
-
epic.userStories.push(userStory);
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
return epic;
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
/**
|
|
128
|
-
* Decompose for GitHub (Epic -> Issues)
|
|
129
|
-
*/
|
|
130
|
-
async decomposeForGitHub(prd) {
|
|
131
|
-
const epic = {
|
|
132
|
-
type: 'Epic',
|
|
133
|
-
title: prd.title,
|
|
134
|
-
description: `Epic for ${prd.title}`,
|
|
135
|
-
provider: 'github',
|
|
136
|
-
issues: []
|
|
137
|
-
};
|
|
138
|
-
|
|
139
|
-
// For GitHub, create flat list of issues
|
|
140
|
-
if (prd.tasks && prd.tasks.length > 0) {
|
|
141
|
-
epic.issues = prd.tasks.map(task => ({
|
|
142
|
-
type: 'Issue',
|
|
143
|
-
title: task
|
|
144
|
-
}));
|
|
145
|
-
} else {
|
|
146
|
-
// Generate issues from user stories
|
|
147
|
-
for (const story of prd.userStories) {
|
|
148
|
-
const tasks = this.generateTasksForStory(story);
|
|
149
|
-
tasks.forEach(task => {
|
|
150
|
-
epic.issues.push({
|
|
151
|
-
type: 'Issue',
|
|
152
|
-
title: task.title
|
|
153
|
-
});
|
|
154
|
-
});
|
|
155
|
-
}
|
|
156
|
-
}
|
|
157
|
-
|
|
158
|
-
return epic;
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
/**
|
|
162
|
-
* Generate tasks for a user story
|
|
163
|
-
*/
|
|
164
|
-
generateTasksForStory(story) {
|
|
165
|
-
const tasks = [];
|
|
166
|
-
|
|
167
|
-
// Extract key action from story
|
|
168
|
-
const storyLower = story.toLowerCase();
|
|
169
|
-
|
|
170
|
-
// Generate common development tasks
|
|
171
|
-
if (storyLower.includes('register') || storyLower.includes('registration')) {
|
|
172
|
-
tasks.push(
|
|
173
|
-
{ title: 'Design database schema for user registration', remainingWork: 2 },
|
|
174
|
-
{ title: 'Implement registration API endpoint', remainingWork: 4 },
|
|
175
|
-
{ title: 'Build registration UI form', remainingWork: 3 },
|
|
176
|
-
{ title: 'Add email validation', remainingWork: 2 },
|
|
177
|
-
{ title: 'Write unit tests for registration', remainingWork: 3 }
|
|
178
|
-
);
|
|
179
|
-
} else if (storyLower.includes('login')) {
|
|
180
|
-
tasks.push(
|
|
181
|
-
{ title: 'Implement JWT token generation', remainingWork: 3 },
|
|
182
|
-
{ title: 'Create login API endpoint', remainingWork: 3 },
|
|
183
|
-
{ title: 'Build login UI component', remainingWork: 3 },
|
|
184
|
-
{ title: 'Add session management', remainingWork: 2 },
|
|
185
|
-
{ title: 'Write integration tests for login', remainingWork: 3 }
|
|
186
|
-
);
|
|
187
|
-
} else if (storyLower.includes('manage') || storyLower.includes('admin')) {
|
|
188
|
-
tasks.push(
|
|
189
|
-
{ title: 'Design admin dashboard UI', remainingWork: 4 },
|
|
190
|
-
{ title: 'Implement user management API', remainingWork: 4 },
|
|
191
|
-
{ title: 'Add role-based access control', remainingWork: 3 },
|
|
192
|
-
{ title: 'Create audit logging', remainingWork: 2 },
|
|
193
|
-
{ title: 'Write admin feature tests', remainingWork: 3 }
|
|
194
|
-
);
|
|
195
|
-
} else {
|
|
196
|
-
// Generic tasks for any user story
|
|
197
|
-
tasks.push(
|
|
198
|
-
{ title: `Design database schema for ${this.extractFeature(story)}`, remainingWork: 2 },
|
|
199
|
-
{ title: `Implement API for ${this.extractFeature(story)}`, remainingWork: 4 },
|
|
200
|
-
{ title: `Build UI for ${this.extractFeature(story)}`, remainingWork: 3 },
|
|
201
|
-
{ title: `Write tests for ${this.extractFeature(story)}`, remainingWork: 3 }
|
|
202
|
-
);
|
|
203
|
-
}
|
|
204
|
-
|
|
205
|
-
return tasks;
|
|
206
|
-
}
|
|
207
|
-
|
|
208
|
-
/**
|
|
209
|
-
* Extract feature name from user story
|
|
210
|
-
*/
|
|
211
|
-
extractFeature(story) {
|
|
212
|
-
// Try to extract the main action from the user story
|
|
213
|
-
const match = story.match(/I want to (.+)/i);
|
|
214
|
-
if (match) {
|
|
215
|
-
return match[1].trim();
|
|
216
|
-
}
|
|
217
|
-
return 'feature';
|
|
218
|
-
}
|
|
219
|
-
|
|
220
|
-
/**
|
|
221
|
-
* Format epic as markdown
|
|
222
|
-
*/
|
|
223
|
-
formatEpicMarkdown(epic) {
|
|
224
|
-
let markdown = `---
|
|
225
|
-
type: ${epic.type}
|
|
226
|
-
title: ${epic.title}
|
|
227
|
-
provider: ${epic.provider}
|
|
228
|
-
created: ${new Date().toISOString()}
|
|
229
|
-
---
|
|
230
|
-
|
|
231
|
-
# ${epic.title}
|
|
232
|
-
|
|
233
|
-
${epic.description || ''}
|
|
234
|
-
|
|
235
|
-
`;
|
|
236
|
-
|
|
237
|
-
if (epic.userStories && epic.userStories.length > 0) {
|
|
238
|
-
markdown += '## User Stories\n\n';
|
|
239
|
-
for (const story of epic.userStories) {
|
|
240
|
-
markdown += `### ${story.title}\n\n`;
|
|
241
|
-
|
|
242
|
-
if (story.acceptanceCriteria && story.acceptanceCriteria.length > 0) {
|
|
243
|
-
markdown += '**Acceptance Criteria:**\n';
|
|
244
|
-
story.acceptanceCriteria.forEach(criteria => {
|
|
245
|
-
markdown += `- ${criteria}\n`;
|
|
246
|
-
});
|
|
247
|
-
markdown += '\n';
|
|
248
|
-
}
|
|
249
|
-
|
|
250
|
-
if (story.tasks && story.tasks.length > 0) {
|
|
251
|
-
markdown += '**Tasks:**\n';
|
|
252
|
-
story.tasks.forEach(task => {
|
|
253
|
-
markdown += `- [ ] ${task.title}`;
|
|
254
|
-
if (task.remainingWork) {
|
|
255
|
-
markdown += ` (${task.remainingWork}h)`;
|
|
256
|
-
}
|
|
257
|
-
markdown += '\n';
|
|
258
|
-
});
|
|
259
|
-
markdown += '\n';
|
|
260
|
-
}
|
|
261
|
-
}
|
|
262
|
-
} else if (epic.issues && epic.issues.length > 0) {
|
|
263
|
-
markdown += '## Issues\n\n';
|
|
264
|
-
epic.issues.forEach(issue => {
|
|
265
|
-
markdown += `- [ ] ${issue.title}\n`;
|
|
266
|
-
});
|
|
267
|
-
}
|
|
268
|
-
|
|
269
|
-
return markdown;
|
|
270
|
-
}
|
|
271
|
-
}
|
|
272
|
-
|
|
273
|
-
module.exports = EpicDecomposer;
|