@vasperacapital/vaspera-mcp-server 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +182 -0
- package/dist/cli.d.ts +1 -0
- package/dist/cli.js +3102 -0
- package/dist/cli.js.map +1 -0
- package/dist/server.d.ts +3 -0
- package/dist/{index.js → server.js} +17 -13
- package/dist/server.js.map +1 -0
- package/package.json +6 -5
- package/src/cli.ts +315 -0
- package/src/middleware/auth.ts +1 -1
- package/src/middleware/rate-limit.ts +1 -1
- package/src/{index.ts → server.ts} +14 -9
- package/tsup.config.ts +5 -4
- package/dist/index.d.ts +0 -2
- package/dist/index.js.map +0 -1
package/dist/cli.js
ADDED
|
@@ -0,0 +1,3102 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
4
|
+
var __esm = (fn, res) => function __init() {
|
|
5
|
+
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
|
|
6
|
+
};
|
|
7
|
+
var __export = (target, all) => {
|
|
8
|
+
for (var name in all)
|
|
9
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
// src/middleware/auth.ts
|
|
13
|
+
import { QUOTA_LIMITS } from "@vasperacapital/vaspera-shared";
|
|
14
|
+
async function validateApiKey(apiKey) {
|
|
15
|
+
if (!apiKey) {
|
|
16
|
+
return {
|
|
17
|
+
valid: false,
|
|
18
|
+
error: {
|
|
19
|
+
code: "VPM-API-KEY-001",
|
|
20
|
+
message: "API key is required. Set VASPERA_API_KEY environment variable."
|
|
21
|
+
}
|
|
22
|
+
};
|
|
23
|
+
}
|
|
24
|
+
if (!apiKey.startsWith("vpm_live_") && !apiKey.startsWith("vpm_test_")) {
|
|
25
|
+
return {
|
|
26
|
+
valid: false,
|
|
27
|
+
error: {
|
|
28
|
+
code: "VPM-API-KEY-002",
|
|
29
|
+
message: "Invalid API key format. Keys must start with vpm_live_ or vpm_test_"
|
|
30
|
+
}
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
try {
|
|
34
|
+
const response = await fetch(`${VALIDATE_KEY_URL}/api/validate-key`, {
|
|
35
|
+
method: "POST",
|
|
36
|
+
headers: {
|
|
37
|
+
"Content-Type": "application/json"
|
|
38
|
+
},
|
|
39
|
+
body: JSON.stringify({ apiKey })
|
|
40
|
+
});
|
|
41
|
+
const data = await response.json();
|
|
42
|
+
if (!response.ok || !data.valid) {
|
|
43
|
+
return {
|
|
44
|
+
valid: false,
|
|
45
|
+
error: data.error || {
|
|
46
|
+
code: "VPM-API-KEY-002",
|
|
47
|
+
message: "Invalid API key"
|
|
48
|
+
}
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
return {
|
|
52
|
+
valid: true,
|
|
53
|
+
userId: data.user?.id,
|
|
54
|
+
apiKeyId: data.key?.id,
|
|
55
|
+
tier: data.user?.subscriptionTier,
|
|
56
|
+
scopes: data.key?.scopes || [],
|
|
57
|
+
quota: data.quota
|
|
58
|
+
};
|
|
59
|
+
} catch (error2) {
|
|
60
|
+
console.error("API key validation error:", error2);
|
|
61
|
+
if (process.env.NODE_ENV === "development" && apiKey.startsWith("vpm_test_")) {
|
|
62
|
+
console.error("Using offline test mode");
|
|
63
|
+
return {
|
|
64
|
+
valid: true,
|
|
65
|
+
userId: "test-user",
|
|
66
|
+
apiKeyId: "test-key",
|
|
67
|
+
tier: "free",
|
|
68
|
+
scopes: ["*"],
|
|
69
|
+
quota: {
|
|
70
|
+
limit: QUOTA_LIMITS.free,
|
|
71
|
+
used: 0,
|
|
72
|
+
remaining: QUOTA_LIMITS.free
|
|
73
|
+
}
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
return {
|
|
77
|
+
valid: false,
|
|
78
|
+
error: {
|
|
79
|
+
code: "VPM-INTERNAL-001",
|
|
80
|
+
message: "Failed to validate API key. Please try again."
|
|
81
|
+
}
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
var VALIDATE_KEY_URL;
|
|
86
|
+
var init_auth = __esm({
|
|
87
|
+
"src/middleware/auth.ts"() {
|
|
88
|
+
"use strict";
|
|
89
|
+
VALIDATE_KEY_URL = process.env.VASPERA_API_URL || "https://vaspera.dev";
|
|
90
|
+
}
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
// src/middleware/usage.ts
|
|
94
|
+
async function trackUsage(event) {
|
|
95
|
+
if (!INTERNAL_SERVICE_KEY) {
|
|
96
|
+
console.error("INTERNAL_SERVICE_KEY not configured, skipping usage tracking");
|
|
97
|
+
return;
|
|
98
|
+
}
|
|
99
|
+
try {
|
|
100
|
+
const response = await fetch(`${USAGE_API_URL}/api/usage`, {
|
|
101
|
+
method: "POST",
|
|
102
|
+
headers: {
|
|
103
|
+
"Content-Type": "application/json",
|
|
104
|
+
"Authorization": `Bearer ${INTERNAL_SERVICE_KEY}`
|
|
105
|
+
},
|
|
106
|
+
body: JSON.stringify({
|
|
107
|
+
userId: event.userId,
|
|
108
|
+
apiKeyId: event.apiKeyId,
|
|
109
|
+
toolName: event.toolName,
|
|
110
|
+
tokensUsed: event.tokensUsed,
|
|
111
|
+
latencyMs: event.latencyMs,
|
|
112
|
+
success: event.success,
|
|
113
|
+
errorCode: event.errorCode,
|
|
114
|
+
metadata: event.metadata,
|
|
115
|
+
requestId: generateRequestId()
|
|
116
|
+
})
|
|
117
|
+
});
|
|
118
|
+
if (!response.ok) {
|
|
119
|
+
const error2 = await response.json().catch(() => ({}));
|
|
120
|
+
console.error("Failed to track usage:", error2);
|
|
121
|
+
}
|
|
122
|
+
} catch (error2) {
|
|
123
|
+
console.error("Usage tracking error:", error2);
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
function generateRequestId() {
|
|
127
|
+
const timestamp = Date.now().toString(36);
|
|
128
|
+
const random = Math.random().toString(36).substring(2, 8);
|
|
129
|
+
return `req_${timestamp}_${random}`;
|
|
130
|
+
}
|
|
131
|
+
var USAGE_API_URL, INTERNAL_SERVICE_KEY;
|
|
132
|
+
var init_usage = __esm({
|
|
133
|
+
"src/middleware/usage.ts"() {
|
|
134
|
+
"use strict";
|
|
135
|
+
USAGE_API_URL = process.env.VASPERA_API_URL || "https://vaspera.dev";
|
|
136
|
+
INTERNAL_SERVICE_KEY = process.env.INTERNAL_SERVICE_KEY;
|
|
137
|
+
}
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
// src/middleware/rate-limit.ts
|
|
141
|
+
async function checkRateLimit(userId, tier) {
|
|
142
|
+
const limit = RATE_LIMITS[tier] || RATE_LIMITS.free;
|
|
143
|
+
const windowMs = 6e4;
|
|
144
|
+
const now = Date.now();
|
|
145
|
+
const key = `rate:${userId}`;
|
|
146
|
+
let entry = rateLimitStore.get(key);
|
|
147
|
+
if (!entry || entry.resetAt < now) {
|
|
148
|
+
entry = {
|
|
149
|
+
count: 0,
|
|
150
|
+
resetAt: now + windowMs
|
|
151
|
+
};
|
|
152
|
+
rateLimitStore.set(key, entry);
|
|
153
|
+
}
|
|
154
|
+
if (entry.count >= limit) {
|
|
155
|
+
return {
|
|
156
|
+
allowed: false,
|
|
157
|
+
remaining: 0,
|
|
158
|
+
limit,
|
|
159
|
+
resetsAt: new Date(entry.resetAt)
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
entry.count++;
|
|
163
|
+
rateLimitStore.set(key, entry);
|
|
164
|
+
return {
|
|
165
|
+
allowed: true,
|
|
166
|
+
remaining: limit - entry.count,
|
|
167
|
+
limit,
|
|
168
|
+
resetsAt: new Date(entry.resetAt)
|
|
169
|
+
};
|
|
170
|
+
}
|
|
171
|
+
var RATE_LIMITS, rateLimitStore;
|
|
172
|
+
var init_rate_limit = __esm({
|
|
173
|
+
"src/middleware/rate-limit.ts"() {
|
|
174
|
+
"use strict";
|
|
175
|
+
RATE_LIMITS = {
|
|
176
|
+
free: 5,
|
|
177
|
+
// 5 requests per minute
|
|
178
|
+
starter: 20,
|
|
179
|
+
// 20 requests per minute
|
|
180
|
+
pro: 60,
|
|
181
|
+
// 60 requests per minute (1 per second)
|
|
182
|
+
enterprise: 300
|
|
183
|
+
// 300 requests per minute (5 per second)
|
|
184
|
+
};
|
|
185
|
+
rateLimitStore = /* @__PURE__ */ new Map();
|
|
186
|
+
setInterval(() => {
|
|
187
|
+
const now = Date.now();
|
|
188
|
+
for (const [key, value] of rateLimitStore.entries()) {
|
|
189
|
+
if (value.resetAt < now) {
|
|
190
|
+
rateLimitStore.delete(key);
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
}, 6e4);
|
|
194
|
+
}
|
|
195
|
+
});
|
|
196
|
+
|
|
197
|
+
// src/tools/types.ts
|
|
198
|
+
function jsonResult(data, tokensUsed) {
|
|
199
|
+
return {
|
|
200
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
201
|
+
tokensUsed
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
function errorResult(message) {
|
|
205
|
+
return {
|
|
206
|
+
content: [{ type: "text", text: `Error: ${message}` }]
|
|
207
|
+
};
|
|
208
|
+
}
|
|
209
|
+
function markdownResult(markdown, tokensUsed) {
|
|
210
|
+
return {
|
|
211
|
+
content: [{ type: "text", text: markdown }],
|
|
212
|
+
tokensUsed
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
var init_types = __esm({
|
|
216
|
+
"src/tools/types.ts"() {
|
|
217
|
+
"use strict";
|
|
218
|
+
}
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
// src/ai/client.ts
|
|
222
|
+
import Anthropic from "@anthropic-ai/sdk";
|
|
223
|
+
function getAnthropicClient() {
|
|
224
|
+
if (!anthropicClient) {
|
|
225
|
+
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
226
|
+
if (!apiKey) {
|
|
227
|
+
throw new Error("ANTHROPIC_API_KEY environment variable is required");
|
|
228
|
+
}
|
|
229
|
+
anthropicClient = new Anthropic({ apiKey });
|
|
230
|
+
}
|
|
231
|
+
return anthropicClient;
|
|
232
|
+
}
|
|
233
|
+
async function createCompletion(options) {
|
|
234
|
+
const client = getAnthropicClient();
|
|
235
|
+
const model = MODELS[options.model || "balanced"];
|
|
236
|
+
const response = await client.messages.create({
|
|
237
|
+
model,
|
|
238
|
+
max_tokens: options.maxTokens || 4096,
|
|
239
|
+
temperature: options.temperature ?? 0.7,
|
|
240
|
+
system: options.systemPrompt,
|
|
241
|
+
messages: [
|
|
242
|
+
{
|
|
243
|
+
role: "user",
|
|
244
|
+
content: options.userMessage
|
|
245
|
+
}
|
|
246
|
+
]
|
|
247
|
+
});
|
|
248
|
+
const textContent = response.content.find((c) => c.type === "text");
|
|
249
|
+
const text = textContent?.type === "text" ? textContent.text : "";
|
|
250
|
+
return {
|
|
251
|
+
text,
|
|
252
|
+
inputTokens: response.usage.input_tokens,
|
|
253
|
+
outputTokens: response.usage.output_tokens
|
|
254
|
+
};
|
|
255
|
+
}
|
|
256
|
+
async function createJsonCompletion(options) {
|
|
257
|
+
const result = await createCompletion({
|
|
258
|
+
...options,
|
|
259
|
+
systemPrompt: `${options.systemPrompt}
|
|
260
|
+
|
|
261
|
+
IMPORTANT: Respond ONLY with valid JSON. No markdown, no explanation, just the JSON object.`,
|
|
262
|
+
temperature: 0.3
|
|
263
|
+
// Lower temperature for structured output
|
|
264
|
+
});
|
|
265
|
+
let jsonText = result.text.trim();
|
|
266
|
+
if (jsonText.startsWith("```json")) {
|
|
267
|
+
jsonText = jsonText.slice(7);
|
|
268
|
+
} else if (jsonText.startsWith("```")) {
|
|
269
|
+
jsonText = jsonText.slice(3);
|
|
270
|
+
}
|
|
271
|
+
if (jsonText.endsWith("```")) {
|
|
272
|
+
jsonText = jsonText.slice(0, -3);
|
|
273
|
+
}
|
|
274
|
+
jsonText = jsonText.trim();
|
|
275
|
+
try {
|
|
276
|
+
const data = JSON.parse(jsonText);
|
|
277
|
+
return {
|
|
278
|
+
data,
|
|
279
|
+
inputTokens: result.inputTokens,
|
|
280
|
+
outputTokens: result.outputTokens
|
|
281
|
+
};
|
|
282
|
+
} catch (error2) {
|
|
283
|
+
throw new Error(`Failed to parse AI response as JSON: ${result.text.substring(0, 200)}`);
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
var anthropicClient, MODELS;
|
|
287
|
+
var init_client = __esm({
|
|
288
|
+
"src/ai/client.ts"() {
|
|
289
|
+
"use strict";
|
|
290
|
+
anthropicClient = null;
|
|
291
|
+
MODELS = {
|
|
292
|
+
fast: "claude-3-5-haiku-20241022",
|
|
293
|
+
// Quick responses, lower cost
|
|
294
|
+
balanced: "claude-sonnet-4-20250514",
|
|
295
|
+
// Good balance of speed and quality
|
|
296
|
+
powerful: "claude-sonnet-4-20250514"
|
|
297
|
+
// Best quality for complex tasks
|
|
298
|
+
};
|
|
299
|
+
}
|
|
300
|
+
});
|
|
301
|
+
|
|
302
|
+
// src/tools/explode-backlog.ts
|
|
303
|
+
var explodeBacklogTool;
|
|
304
|
+
var init_explode_backlog = __esm({
|
|
305
|
+
"src/tools/explode-backlog.ts"() {
|
|
306
|
+
"use strict";
|
|
307
|
+
init_types();
|
|
308
|
+
init_client();
|
|
309
|
+
explodeBacklogTool = {
|
|
310
|
+
tool: {
|
|
311
|
+
name: "explode_backlog",
|
|
312
|
+
description: `Break down high-level features or epics into detailed, actionable user stories.
|
|
313
|
+
|
|
314
|
+
Takes a feature description and generates:
|
|
315
|
+
- User stories with acceptance criteria
|
|
316
|
+
- Story point estimates
|
|
317
|
+
- Priority recommendations
|
|
318
|
+
- Dependencies between stories
|
|
319
|
+
|
|
320
|
+
Best for: Converting feature ideas into sprint-ready backlog items.`,
|
|
321
|
+
inputSchema: {
|
|
322
|
+
type: "object",
|
|
323
|
+
properties: {
|
|
324
|
+
feature: {
|
|
325
|
+
type: "string",
|
|
326
|
+
description: "The feature or epic to break down into user stories"
|
|
327
|
+
},
|
|
328
|
+
context: {
|
|
329
|
+
type: "string",
|
|
330
|
+
description: "Optional context about the product, tech stack, or constraints"
|
|
331
|
+
},
|
|
332
|
+
format: {
|
|
333
|
+
type: "string",
|
|
334
|
+
enum: ["markdown", "json", "jira", "linear"],
|
|
335
|
+
default: "markdown",
|
|
336
|
+
description: "Output format for the stories"
|
|
337
|
+
},
|
|
338
|
+
maxStories: {
|
|
339
|
+
type: "number",
|
|
340
|
+
default: 10,
|
|
341
|
+
description: "Maximum number of stories to generate"
|
|
342
|
+
}
|
|
343
|
+
},
|
|
344
|
+
required: ["feature"]
|
|
345
|
+
}
|
|
346
|
+
},
|
|
347
|
+
handler: async (args, validation) => {
|
|
348
|
+
const feature = args.feature;
|
|
349
|
+
const context = args.context;
|
|
350
|
+
const format = args.format || "markdown";
|
|
351
|
+
const maxStories = args.maxStories || 10;
|
|
352
|
+
if (!feature || feature.trim().length === 0) {
|
|
353
|
+
return errorResult("Feature description is required");
|
|
354
|
+
}
|
|
355
|
+
const systemPrompt = `You are an expert product manager and agile coach. Your task is to break down features into well-structured user stories.
|
|
356
|
+
|
|
357
|
+
For each user story, provide:
|
|
358
|
+
1. A clear title following the format: "As a [user], I want [goal] so that [benefit]"
|
|
359
|
+
2. Detailed acceptance criteria (at least 3 per story)
|
|
360
|
+
3. Story point estimate (1, 2, 3, 5, 8, or 13)
|
|
361
|
+
4. Priority (High, Medium, Low)
|
|
362
|
+
5. Any dependencies on other stories
|
|
363
|
+
|
|
364
|
+
Guidelines:
|
|
365
|
+
- Stories should be independent when possible
|
|
366
|
+
- Each story should be completable in a single sprint
|
|
367
|
+
- Include edge cases and error handling in acceptance criteria
|
|
368
|
+
- Consider both happy path and error scenarios
|
|
369
|
+
- Stories should be testable
|
|
370
|
+
|
|
371
|
+
${context ? `Product Context: ${context}` : ""}`;
|
|
372
|
+
const userMessage = `Break down the following feature into ${maxStories} or fewer detailed user stories:
|
|
373
|
+
|
|
374
|
+
Feature: ${feature}
|
|
375
|
+
|
|
376
|
+
${format === "json" ? "Output as a JSON array of story objects." : format === "jira" ? "Format for Jira import (CSV-compatible)." : format === "linear" ? "Format for Linear import." : "Output in clean markdown format."}`;
|
|
377
|
+
try {
|
|
378
|
+
const result = await createCompletion({
|
|
379
|
+
systemPrompt,
|
|
380
|
+
userMessage,
|
|
381
|
+
model: "balanced",
|
|
382
|
+
maxTokens: 4096
|
|
383
|
+
});
|
|
384
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
385
|
+
return markdownResult(result.text, totalTokens);
|
|
386
|
+
} catch (error2) {
|
|
387
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
388
|
+
return errorResult(`Failed to generate user stories: ${message}`);
|
|
389
|
+
}
|
|
390
|
+
},
|
|
391
|
+
requiredScope: "tools:explode_backlog"
|
|
392
|
+
};
|
|
393
|
+
}
|
|
394
|
+
});
|
|
395
|
+
|
|
396
|
+
// src/tools/infer-prd.ts
|
|
397
|
+
var inferPrdTool;
|
|
398
|
+
var init_infer_prd = __esm({
|
|
399
|
+
"src/tools/infer-prd.ts"() {
|
|
400
|
+
"use strict";
|
|
401
|
+
init_types();
|
|
402
|
+
init_client();
|
|
403
|
+
inferPrdTool = {
|
|
404
|
+
tool: {
|
|
405
|
+
name: "infer_prd_from_code",
|
|
406
|
+
description: `Analyze code and generate a Product Requirements Document (PRD).
|
|
407
|
+
|
|
408
|
+
Takes code snippets, file structures, or repository descriptions and infers:
|
|
409
|
+
- Product overview and purpose
|
|
410
|
+
- Feature list with descriptions
|
|
411
|
+
- User personas and use cases
|
|
412
|
+
- Technical requirements
|
|
413
|
+
- Non-functional requirements
|
|
414
|
+
|
|
415
|
+
Best for: Documenting existing products or understanding inherited codebases.`,
|
|
416
|
+
inputSchema: {
|
|
417
|
+
type: "object",
|
|
418
|
+
properties: {
|
|
419
|
+
code: {
|
|
420
|
+
type: "string",
|
|
421
|
+
description: "Code snippets, file structure, or repository content to analyze"
|
|
422
|
+
},
|
|
423
|
+
repoDescription: {
|
|
424
|
+
type: "string",
|
|
425
|
+
description: "Optional description of the repository or project"
|
|
426
|
+
},
|
|
427
|
+
techStack: {
|
|
428
|
+
type: "string",
|
|
429
|
+
description: 'Technology stack used (e.g., "Next.js, TypeScript, PostgreSQL")'
|
|
430
|
+
},
|
|
431
|
+
focusAreas: {
|
|
432
|
+
type: "array",
|
|
433
|
+
items: { type: "string" },
|
|
434
|
+
description: 'Specific areas to focus on (e.g., ["authentication", "payments"])'
|
|
435
|
+
}
|
|
436
|
+
},
|
|
437
|
+
required: ["code"]
|
|
438
|
+
}
|
|
439
|
+
},
|
|
440
|
+
handler: async (args, validation) => {
|
|
441
|
+
const code = args.code;
|
|
442
|
+
const repoDescription = args.repoDescription;
|
|
443
|
+
const techStack = args.techStack;
|
|
444
|
+
const focusAreas = args.focusAreas;
|
|
445
|
+
if (!code || code.trim().length === 0) {
|
|
446
|
+
return errorResult("Code content is required");
|
|
447
|
+
}
|
|
448
|
+
const systemPrompt = `You are an expert product manager who specializes in reverse-engineering products from code. Your task is to analyze code and generate a comprehensive PRD.
|
|
449
|
+
|
|
450
|
+
The PRD should include:
|
|
451
|
+
1. **Product Overview** - What the product does and its purpose
|
|
452
|
+
2. **Target Users** - Who uses this product and why
|
|
453
|
+
3. **Core Features** - List of main features with descriptions
|
|
454
|
+
4. **User Stories** - Key user journeys
|
|
455
|
+
5. **Technical Requirements** - Architecture and technical constraints
|
|
456
|
+
6. **Non-Functional Requirements** - Performance, security, scalability
|
|
457
|
+
7. **Future Considerations** - Potential improvements or extensions
|
|
458
|
+
|
|
459
|
+
Guidelines:
|
|
460
|
+
- Infer purpose from code patterns and naming conventions
|
|
461
|
+
- Identify API endpoints and their purposes
|
|
462
|
+
- Note database schema and data models
|
|
463
|
+
- Consider error handling and edge cases
|
|
464
|
+
- Look for authentication/authorization patterns
|
|
465
|
+
|
|
466
|
+
${techStack ? `Tech Stack: ${techStack}` : ""}
|
|
467
|
+
${repoDescription ? `Repository Description: ${repoDescription}` : ""}
|
|
468
|
+
${focusAreas?.length ? `Focus Areas: ${focusAreas.join(", ")}` : ""}`;
|
|
469
|
+
const userMessage = `Analyze the following code and generate a comprehensive PRD:
|
|
470
|
+
|
|
471
|
+
\`\`\`
|
|
472
|
+
${code.length > 15e3 ? code.substring(0, 15e3) + "\n... [truncated]" : code}
|
|
473
|
+
\`\`\`
|
|
474
|
+
|
|
475
|
+
Generate a detailed PRD in markdown format.`;
|
|
476
|
+
try {
|
|
477
|
+
const result = await createCompletion({
|
|
478
|
+
systemPrompt,
|
|
479
|
+
userMessage,
|
|
480
|
+
model: "balanced",
|
|
481
|
+
maxTokens: 4096
|
|
482
|
+
});
|
|
483
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
484
|
+
return markdownResult(result.text, totalTokens);
|
|
485
|
+
} catch (error2) {
|
|
486
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
487
|
+
return errorResult(`Failed to generate PRD: ${message}`);
|
|
488
|
+
}
|
|
489
|
+
},
|
|
490
|
+
requiredScope: "tools:infer_prd"
|
|
491
|
+
};
|
|
492
|
+
}
|
|
493
|
+
});
|
|
494
|
+
|
|
495
|
+
// src/tools/synthesize-prd.ts
|
|
496
|
+
var synthesizePrdTool;
|
|
497
|
+
var init_synthesize_prd = __esm({
|
|
498
|
+
"src/tools/synthesize-prd.ts"() {
|
|
499
|
+
"use strict";
|
|
500
|
+
init_types();
|
|
501
|
+
init_client();
|
|
502
|
+
synthesizePrdTool = {
|
|
503
|
+
tool: {
|
|
504
|
+
name: "synthesize_master_prd",
|
|
505
|
+
description: `Synthesize multiple input documents into a unified Master PRD.
|
|
506
|
+
|
|
507
|
+
Takes multiple sources (meeting notes, emails, specs, designs) and creates:
|
|
508
|
+
- Unified product vision
|
|
509
|
+
- Consolidated requirements
|
|
510
|
+
- Prioritized feature list
|
|
511
|
+
- Clear success metrics
|
|
512
|
+
- Risk assessment
|
|
513
|
+
|
|
514
|
+
Best for: Creating a single source of truth from scattered documentation.`,
|
|
515
|
+
inputSchema: {
|
|
516
|
+
type: "object",
|
|
517
|
+
properties: {
|
|
518
|
+
documents: {
|
|
519
|
+
type: "array",
|
|
520
|
+
items: {
|
|
521
|
+
type: "object",
|
|
522
|
+
properties: {
|
|
523
|
+
title: { type: "string" },
|
|
524
|
+
content: { type: "string" },
|
|
525
|
+
type: {
|
|
526
|
+
type: "string",
|
|
527
|
+
enum: ["meeting_notes", "email", "spec", "design", "feedback", "other"]
|
|
528
|
+
}
|
|
529
|
+
},
|
|
530
|
+
required: ["content"]
|
|
531
|
+
},
|
|
532
|
+
description: "Array of documents to synthesize"
|
|
533
|
+
},
|
|
534
|
+
productName: {
|
|
535
|
+
type: "string",
|
|
536
|
+
description: "Name of the product"
|
|
537
|
+
},
|
|
538
|
+
targetAudience: {
|
|
539
|
+
type: "string",
|
|
540
|
+
description: "Description of the target users"
|
|
541
|
+
},
|
|
542
|
+
constraints: {
|
|
543
|
+
type: "string",
|
|
544
|
+
description: "Any constraints (timeline, budget, tech limitations)"
|
|
545
|
+
}
|
|
546
|
+
},
|
|
547
|
+
required: ["documents"]
|
|
548
|
+
}
|
|
549
|
+
},
|
|
550
|
+
handler: async (args, validation) => {
|
|
551
|
+
const documents = args.documents;
|
|
552
|
+
const productName = args.productName;
|
|
553
|
+
const targetAudience = args.targetAudience;
|
|
554
|
+
const constraints = args.constraints;
|
|
555
|
+
if (!documents || documents.length === 0) {
|
|
556
|
+
return errorResult("At least one document is required");
|
|
557
|
+
}
|
|
558
|
+
const systemPrompt = `You are an expert product manager who excels at synthesizing scattered information into clear, actionable PRDs.
|
|
559
|
+
|
|
560
|
+
Create a Master PRD with the following sections:
|
|
561
|
+
1. **Executive Summary** - High-level overview
|
|
562
|
+
2. **Product Vision** - What we're building and why
|
|
563
|
+
3. **Target Users** - Who benefits and their needs
|
|
564
|
+
4. **Goals & Success Metrics** - How we measure success
|
|
565
|
+
5. **Features & Requirements** - Detailed feature list with priorities
|
|
566
|
+
6. **User Journeys** - Key workflows
|
|
567
|
+
7. **Technical Considerations** - Architecture implications
|
|
568
|
+
8. **Risks & Mitigations** - What could go wrong
|
|
569
|
+
9. **Timeline & Milestones** - High-level roadmap
|
|
570
|
+
10. **Open Questions** - Items needing clarification
|
|
571
|
+
|
|
572
|
+
Guidelines:
|
|
573
|
+
- Resolve contradictions between documents by noting them
|
|
574
|
+
- Extract implicit requirements from feedback and discussions
|
|
575
|
+
- Identify gaps that need stakeholder input
|
|
576
|
+
- Prioritize based on user impact and business value
|
|
577
|
+
|
|
578
|
+
${productName ? `Product Name: ${productName}` : ""}
|
|
579
|
+
${targetAudience ? `Target Audience: ${targetAudience}` : ""}
|
|
580
|
+
${constraints ? `Constraints: ${constraints}` : ""}`;
|
|
581
|
+
const formattedDocs = documents.map((doc, i) => {
|
|
582
|
+
const title = doc.title || `Document ${i + 1}`;
|
|
583
|
+
const type = doc.type || "other";
|
|
584
|
+
return `### ${title} (${type})
|
|
585
|
+
${doc.content}`;
|
|
586
|
+
}).join("\n\n---\n\n");
|
|
587
|
+
const userMessage = `Synthesize the following documents into a comprehensive Master PRD:
|
|
588
|
+
|
|
589
|
+
${formattedDocs.length > 2e4 ? formattedDocs.substring(0, 2e4) + "\n... [truncated]" : formattedDocs}
|
|
590
|
+
|
|
591
|
+
Create a unified PRD that captures all requirements and resolves any conflicts.`;
|
|
592
|
+
try {
|
|
593
|
+
const result = await createCompletion({
|
|
594
|
+
systemPrompt,
|
|
595
|
+
userMessage,
|
|
596
|
+
model: "balanced",
|
|
597
|
+
maxTokens: 6e3
|
|
598
|
+
});
|
|
599
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
600
|
+
return markdownResult(result.text, totalTokens);
|
|
601
|
+
} catch (error2) {
|
|
602
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
603
|
+
return errorResult(`Failed to synthesize PRD: ${message}`);
|
|
604
|
+
}
|
|
605
|
+
},
|
|
606
|
+
requiredScope: "tools:synthesize_prd"
|
|
607
|
+
};
|
|
608
|
+
}
|
|
609
|
+
});
|
|
610
|
+
|
|
611
|
+
// src/tools/generate-architecture.ts
|
|
612
|
+
var generateArchitectureTool;
|
|
613
|
+
var init_generate_architecture = __esm({
|
|
614
|
+
"src/tools/generate-architecture.ts"() {
|
|
615
|
+
"use strict";
|
|
616
|
+
init_types();
|
|
617
|
+
init_client();
|
|
618
|
+
generateArchitectureTool = {
|
|
619
|
+
tool: {
|
|
620
|
+
name: "generate_architecture",
|
|
621
|
+
description: `Generate a technical architecture document from product requirements.
|
|
622
|
+
|
|
623
|
+
Takes a PRD or feature description and creates:
|
|
624
|
+
- System architecture diagram (in Mermaid)
|
|
625
|
+
- Component breakdown
|
|
626
|
+
- Data models and schemas
|
|
627
|
+
- API design recommendations
|
|
628
|
+
- Technology stack suggestions
|
|
629
|
+
- Scalability considerations
|
|
630
|
+
|
|
631
|
+
Best for: Translating product requirements into technical specifications.`,
|
|
632
|
+
inputSchema: {
|
|
633
|
+
type: "object",
|
|
634
|
+
properties: {
|
|
635
|
+
requirements: {
|
|
636
|
+
type: "string",
|
|
637
|
+
description: "Product requirements or PRD content"
|
|
638
|
+
},
|
|
639
|
+
existingStack: {
|
|
640
|
+
type: "string",
|
|
641
|
+
description: "Existing technology stack to build upon"
|
|
642
|
+
},
|
|
643
|
+
constraints: {
|
|
644
|
+
type: "object",
|
|
645
|
+
properties: {
|
|
646
|
+
budget: { type: "string" },
|
|
647
|
+
timeline: { type: "string" },
|
|
648
|
+
team: { type: "string" },
|
|
649
|
+
compliance: { type: "array", items: { type: "string" } }
|
|
650
|
+
},
|
|
651
|
+
description: "Technical and business constraints"
|
|
652
|
+
},
|
|
653
|
+
focus: {
|
|
654
|
+
type: "string",
|
|
655
|
+
enum: ["backend", "frontend", "fullstack", "infrastructure", "data"],
|
|
656
|
+
default: "fullstack",
|
|
657
|
+
description: "Architecture focus area"
|
|
658
|
+
}
|
|
659
|
+
},
|
|
660
|
+
required: ["requirements"]
|
|
661
|
+
}
|
|
662
|
+
},
|
|
663
|
+
handler: async (args, validation) => {
|
|
664
|
+
const requirements = args.requirements;
|
|
665
|
+
const existingStack = args.existingStack;
|
|
666
|
+
const constraints = args.constraints;
|
|
667
|
+
const focus = args.focus || "fullstack";
|
|
668
|
+
if (!requirements || requirements.trim().length === 0) {
|
|
669
|
+
return errorResult("Requirements are required");
|
|
670
|
+
}
|
|
671
|
+
const systemPrompt = `You are a senior solutions architect specializing in modern software systems. Your task is to create a comprehensive technical architecture document.
|
|
672
|
+
|
|
673
|
+
Include the following sections:
|
|
674
|
+
1. **Architecture Overview** - High-level system design
|
|
675
|
+
2. **System Diagram** - Mermaid diagram showing components
|
|
676
|
+
3. **Components** - Detailed breakdown of each component
|
|
677
|
+
4. **Data Models** - Entity relationships and schemas
|
|
678
|
+
5. **API Design** - Endpoint structure and contracts
|
|
679
|
+
6. **Technology Recommendations** - Stack choices with rationale
|
|
680
|
+
7. **Security Architecture** - Auth, encryption, access control
|
|
681
|
+
8. **Scalability Plan** - How the system grows
|
|
682
|
+
9. **Infrastructure** - Deployment and hosting
|
|
683
|
+
10. **Risks & Trade-offs** - Technical debt and decisions
|
|
684
|
+
|
|
685
|
+
Guidelines:
|
|
686
|
+
- Use Mermaid syntax for diagrams
|
|
687
|
+
- Consider SOLID principles and clean architecture
|
|
688
|
+
- Recommend proven, maintainable technologies
|
|
689
|
+
- Include performance considerations
|
|
690
|
+
- Address data integrity and consistency
|
|
691
|
+
|
|
692
|
+
Focus Area: ${focus}
|
|
693
|
+
${existingStack ? `Existing Stack: ${existingStack}` : ""}
|
|
694
|
+
${constraints ? `Constraints: ${JSON.stringify(constraints)}` : ""}`;
|
|
695
|
+
const userMessage = `Create a technical architecture for the following requirements:
|
|
696
|
+
|
|
697
|
+
${requirements.length > 12e3 ? requirements.substring(0, 12e3) + "\n... [truncated]" : requirements}
|
|
698
|
+
|
|
699
|
+
Generate a comprehensive architecture document with Mermaid diagrams.`;
|
|
700
|
+
try {
|
|
701
|
+
const result = await createCompletion({
|
|
702
|
+
systemPrompt,
|
|
703
|
+
userMessage,
|
|
704
|
+
model: "balanced",
|
|
705
|
+
maxTokens: 6e3
|
|
706
|
+
});
|
|
707
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
708
|
+
return markdownResult(result.text, totalTokens);
|
|
709
|
+
} catch (error2) {
|
|
710
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
711
|
+
return errorResult(`Failed to generate architecture: ${message}`);
|
|
712
|
+
}
|
|
713
|
+
},
|
|
714
|
+
requiredScope: "tools:generate_architecture"
|
|
715
|
+
};
|
|
716
|
+
}
|
|
717
|
+
});
|
|
718
|
+
|
|
719
|
+
// src/tools/handoff-package.ts
|
|
720
|
+
var handoffPackageTool;
|
|
721
|
+
var init_handoff_package = __esm({
|
|
722
|
+
"src/tools/handoff-package.ts"() {
|
|
723
|
+
"use strict";
|
|
724
|
+
init_types();
|
|
725
|
+
init_client();
|
|
726
|
+
handoffPackageTool = {
|
|
727
|
+
tool: {
|
|
728
|
+
name: "handoff_package",
|
|
729
|
+
description: `Create a complete developer handoff package from PM artifacts.
|
|
730
|
+
|
|
731
|
+
Takes PRD, designs, and specs to generate:
|
|
732
|
+
- Implementation guide
|
|
733
|
+
- Acceptance criteria checklist
|
|
734
|
+
- Edge cases and error handling
|
|
735
|
+
- Testing requirements
|
|
736
|
+
- Definition of Done
|
|
737
|
+
|
|
738
|
+
Best for: Preparing work for engineering sprint planning.`,
|
|
739
|
+
inputSchema: {
|
|
740
|
+
type: "object",
|
|
741
|
+
properties: {
|
|
742
|
+
prd: {
|
|
743
|
+
type: "string",
|
|
744
|
+
description: "Product Requirements Document content"
|
|
745
|
+
},
|
|
746
|
+
designs: {
|
|
747
|
+
type: "string",
|
|
748
|
+
description: "Design descriptions or Figma links"
|
|
749
|
+
},
|
|
750
|
+
techSpecs: {
|
|
751
|
+
type: "string",
|
|
752
|
+
description: "Technical specifications or architecture docs"
|
|
753
|
+
},
|
|
754
|
+
targetTeam: {
|
|
755
|
+
type: "string",
|
|
756
|
+
enum: ["frontend", "backend", "fullstack", "mobile", "devops"],
|
|
757
|
+
default: "fullstack",
|
|
758
|
+
description: "Target development team"
|
|
759
|
+
},
|
|
760
|
+
sprintDuration: {
|
|
761
|
+
type: "number",
|
|
762
|
+
default: 2,
|
|
763
|
+
description: "Sprint duration in weeks"
|
|
764
|
+
}
|
|
765
|
+
},
|
|
766
|
+
required: ["prd"]
|
|
767
|
+
}
|
|
768
|
+
},
|
|
769
|
+
handler: async (args, validation) => {
|
|
770
|
+
const prd = args.prd;
|
|
771
|
+
const designs = args.designs;
|
|
772
|
+
const techSpecs = args.techSpecs;
|
|
773
|
+
const targetTeam = args.targetTeam || "fullstack";
|
|
774
|
+
const sprintDuration = args.sprintDuration || 2;
|
|
775
|
+
if (!prd || prd.trim().length === 0) {
|
|
776
|
+
return errorResult("PRD content is required");
|
|
777
|
+
}
|
|
778
|
+
const systemPrompt = `You are an expert technical program manager who creates flawless developer handoff packages.
|
|
779
|
+
|
|
780
|
+
Create a handoff package with:
|
|
781
|
+
1. **Summary** - What we're building and why
|
|
782
|
+
2. **Implementation Guide**
|
|
783
|
+
- Step-by-step implementation order
|
|
784
|
+
- Dependencies between components
|
|
785
|
+
- Critical path items
|
|
786
|
+
3. **Acceptance Criteria** - Testable requirements for each feature
|
|
787
|
+
4. **Edge Cases** - All the "what if" scenarios
|
|
788
|
+
5. **Error Handling** - How errors should be handled
|
|
789
|
+
6. **Testing Requirements**
|
|
790
|
+
- Unit test requirements
|
|
791
|
+
- Integration test scenarios
|
|
792
|
+
- E2E test flows
|
|
793
|
+
7. **Definition of Done** - Checklist for completion
|
|
794
|
+
8. **Questions for PM** - Clarifications needed
|
|
795
|
+
9. **Risks** - Implementation risks to discuss
|
|
796
|
+
|
|
797
|
+
Guidelines:
|
|
798
|
+
- Be extremely specific - no ambiguity
|
|
799
|
+
- Include code examples where helpful
|
|
800
|
+
- Consider accessibility requirements
|
|
801
|
+
- Note performance expectations
|
|
802
|
+
- List any feature flags needed
|
|
803
|
+
|
|
804
|
+
Target Team: ${targetTeam}
|
|
805
|
+
Sprint Duration: ${sprintDuration} weeks`;
|
|
806
|
+
let userMessage = `Create a developer handoff package from:
|
|
807
|
+
|
|
808
|
+
## PRD
|
|
809
|
+
${prd.length > 1e4 ? prd.substring(0, 1e4) + "\n... [truncated]" : prd}`;
|
|
810
|
+
if (designs) {
|
|
811
|
+
userMessage += `
|
|
812
|
+
|
|
813
|
+
## Design Specs
|
|
814
|
+
${designs}`;
|
|
815
|
+
}
|
|
816
|
+
if (techSpecs) {
|
|
817
|
+
userMessage += `
|
|
818
|
+
|
|
819
|
+
## Technical Specs
|
|
820
|
+
${techSpecs}`;
|
|
821
|
+
}
|
|
822
|
+
userMessage += "\n\nCreate a comprehensive handoff package.";
|
|
823
|
+
try {
|
|
824
|
+
const result = await createCompletion({
|
|
825
|
+
systemPrompt,
|
|
826
|
+
userMessage,
|
|
827
|
+
model: "balanced",
|
|
828
|
+
maxTokens: 6e3
|
|
829
|
+
});
|
|
830
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
831
|
+
return markdownResult(result.text, totalTokens);
|
|
832
|
+
} catch (error2) {
|
|
833
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
834
|
+
return errorResult(`Failed to create handoff package: ${message}`);
|
|
835
|
+
}
|
|
836
|
+
},
|
|
837
|
+
requiredScope: "tools:handoff_package"
|
|
838
|
+
};
|
|
839
|
+
}
|
|
840
|
+
});
|
|
841
|
+
|
|
842
|
+
// src/tools/synthesize-requirements.ts
|
|
843
|
+
var synthesizeRequirementsTool;
|
|
844
|
+
var init_synthesize_requirements = __esm({
|
|
845
|
+
"src/tools/synthesize-requirements.ts"() {
|
|
846
|
+
"use strict";
|
|
847
|
+
init_types();
|
|
848
|
+
init_client();
|
|
849
|
+
synthesizeRequirementsTool = {
|
|
850
|
+
tool: {
|
|
851
|
+
name: "synthesize_requirements",
|
|
852
|
+
description: `Extract and synthesize requirements from various input sources.
|
|
853
|
+
|
|
854
|
+
Takes meeting notes, emails, slack threads, user feedback, or any unstructured input and produces:
|
|
855
|
+
- Functional requirements (what the system must do)
|
|
856
|
+
- Non-functional requirements (performance, security, scalability)
|
|
857
|
+
- User stories in standard format
|
|
858
|
+
- Acceptance criteria for each requirement
|
|
859
|
+
- Requirement dependencies and priorities
|
|
860
|
+
|
|
861
|
+
Best for: Converting messy stakeholder input into structured requirements.`,
|
|
862
|
+
inputSchema: {
|
|
863
|
+
type: "object",
|
|
864
|
+
properties: {
|
|
865
|
+
sources: {
|
|
866
|
+
type: "array",
|
|
867
|
+
items: {
|
|
868
|
+
type: "object",
|
|
869
|
+
properties: {
|
|
870
|
+
content: { type: "string" },
|
|
871
|
+
type: {
|
|
872
|
+
type: "string",
|
|
873
|
+
enum: ["meeting_notes", "email", "slack", "feedback", "interview", "survey", "other"]
|
|
874
|
+
},
|
|
875
|
+
stakeholder: { type: "string" },
|
|
876
|
+
date: { type: "string" }
|
|
877
|
+
},
|
|
878
|
+
required: ["content"]
|
|
879
|
+
},
|
|
880
|
+
description: "Array of source documents to analyze"
|
|
881
|
+
},
|
|
882
|
+
context: {
|
|
883
|
+
type: "string",
|
|
884
|
+
description: "Product or feature context to help interpretation"
|
|
885
|
+
},
|
|
886
|
+
outputFormat: {
|
|
887
|
+
type: "string",
|
|
888
|
+
enum: ["user_stories", "requirements_doc", "both"],
|
|
889
|
+
default: "both",
|
|
890
|
+
description: "Output format preference"
|
|
891
|
+
},
|
|
892
|
+
priorityFramework: {
|
|
893
|
+
type: "string",
|
|
894
|
+
enum: ["moscow", "rice", "kano", "value_effort"],
|
|
895
|
+
default: "moscow",
|
|
896
|
+
description: "Prioritization framework to use"
|
|
897
|
+
}
|
|
898
|
+
},
|
|
899
|
+
required: ["sources"]
|
|
900
|
+
}
|
|
901
|
+
},
|
|
902
|
+
handler: async (args, validation) => {
|
|
903
|
+
const sources = args.sources;
|
|
904
|
+
const context = args.context;
|
|
905
|
+
const outputFormat = args.outputFormat || "both";
|
|
906
|
+
const priorityFramework = args.priorityFramework || "moscow";
|
|
907
|
+
if (!sources || sources.length === 0) {
|
|
908
|
+
return errorResult("At least one source document is required");
|
|
909
|
+
}
|
|
910
|
+
const priorityDescriptions = {
|
|
911
|
+
moscow: "MoSCoW (Must have, Should have, Could have, Won't have)",
|
|
912
|
+
rice: "RICE (Reach, Impact, Confidence, Effort)",
|
|
913
|
+
kano: "Kano Model (Basic, Performance, Excitement)",
|
|
914
|
+
value_effort: "Value/Effort Matrix (Quick wins, Big bets, Fill-ins, Time sinks)"
|
|
915
|
+
};
|
|
916
|
+
const systemPrompt = `You are a senior business analyst expert at extracting clear requirements from ambiguous stakeholder input.
|
|
917
|
+
|
|
918
|
+
Your task is to analyze the provided sources and produce structured requirements.
|
|
919
|
+
|
|
920
|
+
Output Sections:
|
|
921
|
+
1. **Executive Summary** - Key themes and scope overview
|
|
922
|
+
2. **Functional Requirements** - What the system must do
|
|
923
|
+
- ID, Description, Priority, Source reference
|
|
924
|
+
3. **Non-Functional Requirements** - Quality attributes
|
|
925
|
+
- Performance, Security, Scalability, Accessibility, etc.
|
|
926
|
+
4. **User Stories** - In "As a [role], I want [feature], so that [benefit]" format
|
|
927
|
+
- Include acceptance criteria for each
|
|
928
|
+
5. **Requirement Dependencies** - Which requirements depend on others
|
|
929
|
+
6. **Priority Matrix** - Using ${priorityDescriptions[priorityFramework]}
|
|
930
|
+
7. **Gaps & Ambiguities** - What's unclear or missing
|
|
931
|
+
8. **Recommended Clarifications** - Questions for stakeholders
|
|
932
|
+
|
|
933
|
+
Guidelines:
|
|
934
|
+
- Extract implicit requirements (things stakeholders assume but didn't state)
|
|
935
|
+
- Note conflicting requirements and flag for resolution
|
|
936
|
+
- Use unique IDs for traceability (REQ-001, US-001, etc.)
|
|
937
|
+
- Map each requirement back to its source
|
|
938
|
+
- Consider edge cases mentioned in discussions
|
|
939
|
+
|
|
940
|
+
${context ? `Product Context: ${context}` : ""}`;
|
|
941
|
+
const formattedSources = sources.map((source, i) => {
|
|
942
|
+
const meta = [
|
|
943
|
+
source.type ? `Type: ${source.type}` : null,
|
|
944
|
+
source.stakeholder ? `Stakeholder: ${source.stakeholder}` : null,
|
|
945
|
+
source.date ? `Date: ${source.date}` : null
|
|
946
|
+
].filter(Boolean).join(" | ");
|
|
947
|
+
return `### Source ${i + 1}${meta ? ` (${meta})` : ""}
|
|
948
|
+
${source.content}`;
|
|
949
|
+
}).join("\n\n---\n\n");
|
|
950
|
+
const userMessage = `Analyze these sources and extract structured requirements:
|
|
951
|
+
|
|
952
|
+
${formattedSources.length > 15e3 ? formattedSources.substring(0, 15e3) + "\n... [truncated]" : formattedSources}
|
|
953
|
+
|
|
954
|
+
Output format preference: ${outputFormat}
|
|
955
|
+
Prioritization framework: ${priorityFramework}
|
|
956
|
+
|
|
957
|
+
Extract all requirements with proper structure and traceability.`;
|
|
958
|
+
try {
|
|
959
|
+
const result = await createCompletion({
|
|
960
|
+
systemPrompt,
|
|
961
|
+
userMessage,
|
|
962
|
+
model: "balanced",
|
|
963
|
+
maxTokens: 6e3
|
|
964
|
+
});
|
|
965
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
966
|
+
return markdownResult(result.text, totalTokens);
|
|
967
|
+
} catch (error2) {
|
|
968
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
969
|
+
return errorResult(`Failed to synthesize requirements: ${message}`);
|
|
970
|
+
}
|
|
971
|
+
},
|
|
972
|
+
requiredScope: "tools:synthesize_requirements"
|
|
973
|
+
};
|
|
974
|
+
}
|
|
975
|
+
});
|
|
976
|
+
|
|
977
|
+
// src/tools/review-prd.ts
|
|
978
|
+
var reviewPrdTool;
|
|
979
|
+
var init_review_prd = __esm({
|
|
980
|
+
"src/tools/review-prd.ts"() {
|
|
981
|
+
"use strict";
|
|
982
|
+
init_types();
|
|
983
|
+
init_client();
|
|
984
|
+
reviewPrdTool = {
|
|
985
|
+
tool: {
|
|
986
|
+
name: "review_prd",
|
|
987
|
+
description: `Review and critique a Product Requirements Document.
|
|
988
|
+
|
|
989
|
+
Analyzes a PRD against best practices and provides:
|
|
990
|
+
- Completeness assessment
|
|
991
|
+
- Clarity and ambiguity analysis
|
|
992
|
+
- Gap identification
|
|
993
|
+
- Risk assessment
|
|
994
|
+
- Improvement suggestions
|
|
995
|
+
- Readiness score
|
|
996
|
+
|
|
997
|
+
Best for: Quality assurance before engineering handoff.`,
|
|
998
|
+
inputSchema: {
|
|
999
|
+
type: "object",
|
|
1000
|
+
properties: {
|
|
1001
|
+
prd: {
|
|
1002
|
+
type: "string",
|
|
1003
|
+
description: "The PRD content to review"
|
|
1004
|
+
},
|
|
1005
|
+
reviewFocus: {
|
|
1006
|
+
type: "array",
|
|
1007
|
+
items: {
|
|
1008
|
+
type: "string",
|
|
1009
|
+
enum: [
|
|
1010
|
+
"completeness",
|
|
1011
|
+
"clarity",
|
|
1012
|
+
"technical_feasibility",
|
|
1013
|
+
"user_value",
|
|
1014
|
+
"metrics",
|
|
1015
|
+
"edge_cases",
|
|
1016
|
+
"security",
|
|
1017
|
+
"accessibility"
|
|
1018
|
+
]
|
|
1019
|
+
},
|
|
1020
|
+
default: ["completeness", "clarity", "technical_feasibility"],
|
|
1021
|
+
description: "Areas to focus the review on"
|
|
1022
|
+
},
|
|
1023
|
+
targetAudience: {
|
|
1024
|
+
type: "string",
|
|
1025
|
+
enum: ["engineering", "stakeholders", "both"],
|
|
1026
|
+
default: "engineering",
|
|
1027
|
+
description: "Primary audience for the PRD"
|
|
1028
|
+
},
|
|
1029
|
+
strictness: {
|
|
1030
|
+
type: "string",
|
|
1031
|
+
enum: ["lenient", "standard", "strict"],
|
|
1032
|
+
default: "standard",
|
|
1033
|
+
description: "How strict the review should be"
|
|
1034
|
+
}
|
|
1035
|
+
},
|
|
1036
|
+
required: ["prd"]
|
|
1037
|
+
}
|
|
1038
|
+
},
|
|
1039
|
+
handler: async (args, validation) => {
|
|
1040
|
+
const prd = args.prd;
|
|
1041
|
+
const reviewFocus = args.reviewFocus || [
|
|
1042
|
+
"completeness",
|
|
1043
|
+
"clarity",
|
|
1044
|
+
"technical_feasibility"
|
|
1045
|
+
];
|
|
1046
|
+
const targetAudience = args.targetAudience || "engineering";
|
|
1047
|
+
const strictness = args.strictness || "standard";
|
|
1048
|
+
if (!prd || prd.trim().length === 0) {
|
|
1049
|
+
return errorResult("PRD content is required for review");
|
|
1050
|
+
}
|
|
1051
|
+
const strictnessGuide = {
|
|
1052
|
+
lenient: "Be constructive and focus on major issues only. Assume good intent and fill in reasonable gaps.",
|
|
1053
|
+
standard: "Balance thoroughness with practicality. Flag important issues but don't nitpick.",
|
|
1054
|
+
strict: "Apply rigorous standards. Flag all issues, ambiguities, and potential problems. This PRD should be bulletproof."
|
|
1055
|
+
};
|
|
1056
|
+
const systemPrompt = `You are a senior product management coach who has reviewed thousands of PRDs. Your job is to provide actionable, constructive feedback.
|
|
1057
|
+
|
|
1058
|
+
Review the PRD with these priorities:
|
|
1059
|
+
${reviewFocus.map((f) => `- ${f}`).join("\n")}
|
|
1060
|
+
|
|
1061
|
+
Provide a structured review with:
|
|
1062
|
+
|
|
1063
|
+
1. **Overall Assessment**
|
|
1064
|
+
- Readiness Score (1-10)
|
|
1065
|
+
- Summary judgment (Ready / Needs Work / Major Revision Needed)
|
|
1066
|
+
|
|
1067
|
+
2. **Strengths**
|
|
1068
|
+
- What's done well
|
|
1069
|
+
|
|
1070
|
+
3. **Completeness Analysis**
|
|
1071
|
+
- Required sections present/missing
|
|
1072
|
+
- Depth of coverage
|
|
1073
|
+
|
|
1074
|
+
4. **Clarity Issues**
|
|
1075
|
+
- Ambiguous requirements (quote specific text)
|
|
1076
|
+
- Missing definitions
|
|
1077
|
+
- Unclear scope boundaries
|
|
1078
|
+
|
|
1079
|
+
5. **Technical Feasibility Concerns**
|
|
1080
|
+
- Potentially challenging requirements
|
|
1081
|
+
- Missing technical considerations
|
|
1082
|
+
- Architecture implications not addressed
|
|
1083
|
+
|
|
1084
|
+
6. **Gap Analysis**
|
|
1085
|
+
- Missing use cases
|
|
1086
|
+
- Unaddressed edge cases
|
|
1087
|
+
- Missing acceptance criteria
|
|
1088
|
+
|
|
1089
|
+
7. **Risk Assessment**
|
|
1090
|
+
- Implementation risks
|
|
1091
|
+
- Timeline risks
|
|
1092
|
+
- Dependency risks
|
|
1093
|
+
|
|
1094
|
+
8. **Specific Recommendations**
|
|
1095
|
+
- Prioritized list of improvements
|
|
1096
|
+
- Suggested rewrites for unclear sections
|
|
1097
|
+
|
|
1098
|
+
9. **Questions for PM**
|
|
1099
|
+
- Clarifications needed before engineering can start
|
|
1100
|
+
|
|
1101
|
+
Review Strictness: ${strictnessGuide[strictness]}
|
|
1102
|
+
Target Audience: ${targetAudience}`;
|
|
1103
|
+
const userMessage = `Review this PRD and provide comprehensive feedback:
|
|
1104
|
+
|
|
1105
|
+
${prd.length > 15e3 ? prd.substring(0, 15e3) + "\n... [truncated]" : prd}
|
|
1106
|
+
|
|
1107
|
+
Provide a thorough review with actionable feedback.`;
|
|
1108
|
+
try {
|
|
1109
|
+
const result = await createCompletion({
|
|
1110
|
+
systemPrompt,
|
|
1111
|
+
userMessage,
|
|
1112
|
+
model: "balanced",
|
|
1113
|
+
maxTokens: 5e3
|
|
1114
|
+
});
|
|
1115
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
1116
|
+
return markdownResult(result.text, totalTokens);
|
|
1117
|
+
} catch (error2) {
|
|
1118
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
1119
|
+
return errorResult(`Failed to review PRD: ${message}`);
|
|
1120
|
+
}
|
|
1121
|
+
},
|
|
1122
|
+
requiredScope: "tools:review_prd"
|
|
1123
|
+
};
|
|
1124
|
+
}
|
|
1125
|
+
});
|
|
1126
|
+
|
|
1127
|
+
// src/tools/sync-to-tracker.ts
|
|
1128
|
+
var syncToTrackerTool;
|
|
1129
|
+
var init_sync_to_tracker = __esm({
|
|
1130
|
+
"src/tools/sync-to-tracker.ts"() {
|
|
1131
|
+
"use strict";
|
|
1132
|
+
init_types();
|
|
1133
|
+
init_client();
|
|
1134
|
+
syncToTrackerTool = {
|
|
1135
|
+
tool: {
|
|
1136
|
+
name: "sync_to_tracker",
|
|
1137
|
+
description: `Format requirements and user stories for import into project management tools.
|
|
1138
|
+
|
|
1139
|
+
Takes requirements or a PRD and generates properly formatted items for:
|
|
1140
|
+
- Jira (Epics, Stories, Tasks, Subtasks)
|
|
1141
|
+
- Linear (Projects, Issues, Sub-issues)
|
|
1142
|
+
- GitHub Issues (Issues with labels and milestones)
|
|
1143
|
+
- Asana (Projects, Tasks, Subtasks)
|
|
1144
|
+
- Notion (Database entries)
|
|
1145
|
+
|
|
1146
|
+
Output includes import instructions and properly structured data.
|
|
1147
|
+
|
|
1148
|
+
Best for: Automated backlog population from requirements documents.`,
|
|
1149
|
+
inputSchema: {
|
|
1150
|
+
type: "object",
|
|
1151
|
+
properties: {
|
|
1152
|
+
requirements: {
|
|
1153
|
+
type: "string",
|
|
1154
|
+
description: "Requirements document, PRD, or user stories to convert"
|
|
1155
|
+
},
|
|
1156
|
+
platform: {
|
|
1157
|
+
type: "string",
|
|
1158
|
+
enum: ["jira", "linear", "github", "asana", "notion"],
|
|
1159
|
+
default: "jira",
|
|
1160
|
+
description: "Target project management platform"
|
|
1161
|
+
},
|
|
1162
|
+
projectKey: {
|
|
1163
|
+
type: "string",
|
|
1164
|
+
description: "Project key/identifier (e.g., PROJ for Jira)"
|
|
1165
|
+
},
|
|
1166
|
+
epicName: {
|
|
1167
|
+
type: "string",
|
|
1168
|
+
description: "Parent epic name for all generated items"
|
|
1169
|
+
},
|
|
1170
|
+
defaultLabels: {
|
|
1171
|
+
type: "array",
|
|
1172
|
+
items: { type: "string" },
|
|
1173
|
+
description: "Default labels to apply to all items"
|
|
1174
|
+
},
|
|
1175
|
+
includeEstimates: {
|
|
1176
|
+
type: "boolean",
|
|
1177
|
+
default: true,
|
|
1178
|
+
description: "Whether to include effort estimates"
|
|
1179
|
+
},
|
|
1180
|
+
estimateUnit: {
|
|
1181
|
+
type: "string",
|
|
1182
|
+
enum: ["story_points", "hours", "t_shirt"],
|
|
1183
|
+
default: "story_points",
|
|
1184
|
+
description: "Estimation unit to use"
|
|
1185
|
+
}
|
|
1186
|
+
},
|
|
1187
|
+
required: ["requirements"]
|
|
1188
|
+
}
|
|
1189
|
+
},
|
|
1190
|
+
handler: async (args, validation) => {
|
|
1191
|
+
const requirements = args.requirements;
|
|
1192
|
+
const platform = args.platform || "jira";
|
|
1193
|
+
const projectKey = args.projectKey;
|
|
1194
|
+
const epicName = args.epicName;
|
|
1195
|
+
const defaultLabels = args.defaultLabels || [];
|
|
1196
|
+
const includeEstimates = args.includeEstimates !== false;
|
|
1197
|
+
const estimateUnit = args.estimateUnit || "story_points";
|
|
1198
|
+
if (!requirements || requirements.trim().length === 0) {
|
|
1199
|
+
return errorResult("Requirements content is required");
|
|
1200
|
+
}
|
|
1201
|
+
const platformConfig = {
|
|
1202
|
+
jira: {
|
|
1203
|
+
hierarchy: "Epic > Story > Task > Subtask",
|
|
1204
|
+
estimateFormat: "Story points (fibonacci: 1, 2, 3, 5, 8, 13) or time (1h, 2h, 4h, 1d, 2d)"
|
|
1205
|
+
},
|
|
1206
|
+
linear: {
|
|
1207
|
+
hierarchy: "Project > Issue > Sub-issue",
|
|
1208
|
+
estimateFormat: "Linear points (0, 1, 2, 3, 5, 8)"
|
|
1209
|
+
},
|
|
1210
|
+
github: {
|
|
1211
|
+
hierarchy: "Milestone > Issue",
|
|
1212
|
+
estimateFormat: "T-shirt sizes in labels (size:S, size:M, size:L, size:XL)"
|
|
1213
|
+
},
|
|
1214
|
+
asana: {
|
|
1215
|
+
hierarchy: "Project > Section > Task > Subtask",
|
|
1216
|
+
estimateFormat: "Hours or days"
|
|
1217
|
+
},
|
|
1218
|
+
notion: {
|
|
1219
|
+
hierarchy: "Database > Entry (with relations)",
|
|
1220
|
+
estimateFormat: "Number property or select (S/M/L/XL)"
|
|
1221
|
+
}
|
|
1222
|
+
};
|
|
1223
|
+
const config = platformConfig[platform];
|
|
1224
|
+
if (!config) {
|
|
1225
|
+
return errorResult(`Unsupported platform: ${platform}`);
|
|
1226
|
+
}
|
|
1227
|
+
const systemPrompt = `You are an expert at structuring work items for project management tools.
|
|
1228
|
+
|
|
1229
|
+
Convert the requirements into structured items for ${platform.toUpperCase()}.
|
|
1230
|
+
|
|
1231
|
+
Platform hierarchy: ${config.hierarchy}
|
|
1232
|
+
Estimate format: ${config.estimateFormat}
|
|
1233
|
+
${epicName ? `Parent Epic: ${epicName}` : ""}
|
|
1234
|
+
${defaultLabels.length > 0 ? `Default Labels: ${defaultLabels.join(", ")}` : ""}
|
|
1235
|
+
|
|
1236
|
+
Output a JSON object with this structure:
|
|
1237
|
+
{
|
|
1238
|
+
"platform": "${platform}",
|
|
1239
|
+
"projectKey": "${projectKey || "PROJECT"}",
|
|
1240
|
+
"items": [
|
|
1241
|
+
{
|
|
1242
|
+
"type": "epic|story|task|bug|subtask",
|
|
1243
|
+
"title": "Clear, actionable title",
|
|
1244
|
+
"description": "Detailed description with context",
|
|
1245
|
+
"acceptanceCriteria": ["Criterion 1", "Criterion 2"],
|
|
1246
|
+
"priority": "highest|high|medium|low|lowest",
|
|
1247
|
+
"labels": ["label1", "label2"],
|
|
1248
|
+
"estimate": "${includeEstimates ? "Appropriate estimate" : "null"}",
|
|
1249
|
+
"parentRef": "Reference to parent item if subtask/child",
|
|
1250
|
+
"customFields": {}
|
|
1251
|
+
}
|
|
1252
|
+
],
|
|
1253
|
+
"importInstructions": "Step-by-step instructions for importing into ${platform}"
|
|
1254
|
+
}
|
|
1255
|
+
|
|
1256
|
+
Guidelines:
|
|
1257
|
+
- Create a logical hierarchy (epics contain stories, stories contain tasks)
|
|
1258
|
+
- Write clear, actionable titles (start with verb)
|
|
1259
|
+
- Include enough context in descriptions for developers
|
|
1260
|
+
- Add acceptance criteria for stories
|
|
1261
|
+
- Use appropriate priority based on business value and dependencies
|
|
1262
|
+
- Keep items appropriately sized (stories should be completable in 1 sprint)
|
|
1263
|
+
- Reference parent items for hierarchy
|
|
1264
|
+
${includeEstimates ? `- Include ${estimateUnit} estimates based on complexity` : "- Do not include estimates"}`;
|
|
1265
|
+
const userMessage = `Convert these requirements into ${platform} items:
|
|
1266
|
+
|
|
1267
|
+
${requirements.length > 12e3 ? requirements.substring(0, 12e3) + "\n... [truncated]" : requirements}
|
|
1268
|
+
|
|
1269
|
+
Generate structured items ready for import.`;
|
|
1270
|
+
try {
|
|
1271
|
+
const result = await createJsonCompletion({
|
|
1272
|
+
systemPrompt,
|
|
1273
|
+
userMessage,
|
|
1274
|
+
model: "balanced",
|
|
1275
|
+
maxTokens: 6e3
|
|
1276
|
+
});
|
|
1277
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
1278
|
+
const output = result.data;
|
|
1279
|
+
output.platform = platform;
|
|
1280
|
+
if (projectKey) output.projectKey = projectKey;
|
|
1281
|
+
if (defaultLabels.length > 0 && output.items) {
|
|
1282
|
+
output.items = output.items.map((item) => ({
|
|
1283
|
+
...item,
|
|
1284
|
+
labels: [.../* @__PURE__ */ new Set([...item.labels || [], ...defaultLabels])]
|
|
1285
|
+
}));
|
|
1286
|
+
}
|
|
1287
|
+
return jsonResult(output, totalTokens);
|
|
1288
|
+
} catch (error2) {
|
|
1289
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
1290
|
+
return errorResult(`Failed to format for tracker: ${message}`);
|
|
1291
|
+
}
|
|
1292
|
+
},
|
|
1293
|
+
requiredScope: "tools:sync_to_tracker"
|
|
1294
|
+
};
|
|
1295
|
+
}
|
|
1296
|
+
});
|
|
1297
|
+
|
|
1298
|
+
// src/tools/reverse-engineer-flows.ts
|
|
1299
|
+
var reverseEngineerFlowsTool;
|
|
1300
|
+
var init_reverse_engineer_flows = __esm({
|
|
1301
|
+
"src/tools/reverse-engineer-flows.ts"() {
|
|
1302
|
+
"use strict";
|
|
1303
|
+
init_types();
|
|
1304
|
+
init_client();
|
|
1305
|
+
reverseEngineerFlowsTool = {
|
|
1306
|
+
tool: {
|
|
1307
|
+
name: "reverse_engineer_user_flows",
|
|
1308
|
+
description: `Analyze code to extract and document user flows and journeys.
|
|
1309
|
+
|
|
1310
|
+
Takes source code (routes, components, handlers) and produces:
|
|
1311
|
+
- User journey maps
|
|
1312
|
+
- Flow diagrams (Mermaid)
|
|
1313
|
+
- Screen/state transitions
|
|
1314
|
+
- API call sequences
|
|
1315
|
+
- Error handling paths
|
|
1316
|
+
- Edge case flows
|
|
1317
|
+
|
|
1318
|
+
Best for: Documenting existing features or understanding inherited codebases.`,
|
|
1319
|
+
inputSchema: {
|
|
1320
|
+
type: "object",
|
|
1321
|
+
properties: {
|
|
1322
|
+
code: {
|
|
1323
|
+
type: "string",
|
|
1324
|
+
description: "Source code to analyze (routes, components, controllers)"
|
|
1325
|
+
},
|
|
1326
|
+
codeType: {
|
|
1327
|
+
type: "string",
|
|
1328
|
+
enum: ["frontend", "backend", "fullstack", "api"],
|
|
1329
|
+
default: "fullstack",
|
|
1330
|
+
description: "Type of code being analyzed"
|
|
1331
|
+
},
|
|
1332
|
+
featureName: {
|
|
1333
|
+
type: "string",
|
|
1334
|
+
description: "Name of the feature being analyzed"
|
|
1335
|
+
},
|
|
1336
|
+
outputFormat: {
|
|
1337
|
+
type: "string",
|
|
1338
|
+
enum: ["flows", "diagrams", "documentation", "all"],
|
|
1339
|
+
default: "all",
|
|
1340
|
+
description: "What to include in output"
|
|
1341
|
+
},
|
|
1342
|
+
includeErrorFlows: {
|
|
1343
|
+
type: "boolean",
|
|
1344
|
+
default: true,
|
|
1345
|
+
description: "Whether to document error handling flows"
|
|
1346
|
+
}
|
|
1347
|
+
},
|
|
1348
|
+
required: ["code"]
|
|
1349
|
+
}
|
|
1350
|
+
},
|
|
1351
|
+
handler: async (args, validation) => {
|
|
1352
|
+
const code = args.code;
|
|
1353
|
+
const codeType = args.codeType || "fullstack";
|
|
1354
|
+
const featureName = args.featureName;
|
|
1355
|
+
const outputFormat = args.outputFormat || "all";
|
|
1356
|
+
const includeErrorFlows = args.includeErrorFlows !== false;
|
|
1357
|
+
if (!code || code.trim().length === 0) {
|
|
1358
|
+
return errorResult("Source code is required for analysis");
|
|
1359
|
+
}
|
|
1360
|
+
const systemPrompt = `You are a senior engineer expert at reverse engineering user flows from code.
|
|
1361
|
+
|
|
1362
|
+
Analyze the provided code and extract user flows.
|
|
1363
|
+
|
|
1364
|
+
Output Sections:
|
|
1365
|
+
1. **Overview**
|
|
1366
|
+
- Feature summary
|
|
1367
|
+
- Entry points identified
|
|
1368
|
+
- Key actors/roles
|
|
1369
|
+
|
|
1370
|
+
2. **User Flows**
|
|
1371
|
+
- Primary happy path
|
|
1372
|
+
- Alternative paths
|
|
1373
|
+
${includeErrorFlows ? "- Error flows and recovery paths" : ""}
|
|
1374
|
+
- Edge cases
|
|
1375
|
+
|
|
1376
|
+
3. **Flow Diagram** (Mermaid)
|
|
1377
|
+
\`\`\`mermaid
|
|
1378
|
+
flowchart TD
|
|
1379
|
+
Start --> Action --> Decision{Condition}
|
|
1380
|
+
Decision -->|Yes| Success
|
|
1381
|
+
Decision -->|No| Error
|
|
1382
|
+
\`\`\`
|
|
1383
|
+
|
|
1384
|
+
4. **State Transitions**
|
|
1385
|
+
- UI states (loading, error, success, empty)
|
|
1386
|
+
- Data states
|
|
1387
|
+
- User state changes
|
|
1388
|
+
|
|
1389
|
+
5. **API Sequence** (if applicable)
|
|
1390
|
+
\`\`\`mermaid
|
|
1391
|
+
sequenceDiagram
|
|
1392
|
+
participant User
|
|
1393
|
+
participant Frontend
|
|
1394
|
+
participant API
|
|
1395
|
+
participant Database
|
|
1396
|
+
\`\`\`
|
|
1397
|
+
|
|
1398
|
+
6. **Components/Screens**
|
|
1399
|
+
- List of screens/components involved
|
|
1400
|
+
- Their responsibilities
|
|
1401
|
+
- Data dependencies
|
|
1402
|
+
|
|
1403
|
+
7. **User Stories Derived**
|
|
1404
|
+
- Inferred user stories from the code
|
|
1405
|
+
|
|
1406
|
+
8. **Gaps & Recommendations**
|
|
1407
|
+
- Missing error handling
|
|
1408
|
+
- Undocumented edge cases
|
|
1409
|
+
- Suggested improvements
|
|
1410
|
+
|
|
1411
|
+
Code Type: ${codeType}
|
|
1412
|
+
${featureName ? `Feature: ${featureName}` : ""}`;
|
|
1413
|
+
const userMessage = `Analyze this code and extract user flows:
|
|
1414
|
+
|
|
1415
|
+
\`\`\`
|
|
1416
|
+
${code.length > 15e3 ? code.substring(0, 15e3) + "\n// ... [truncated]" : code}
|
|
1417
|
+
\`\`\`
|
|
1418
|
+
|
|
1419
|
+
Document all user flows with diagrams.`;
|
|
1420
|
+
try {
|
|
1421
|
+
const result = await createCompletion({
|
|
1422
|
+
systemPrompt,
|
|
1423
|
+
userMessage,
|
|
1424
|
+
model: "balanced",
|
|
1425
|
+
maxTokens: 5e3
|
|
1426
|
+
});
|
|
1427
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
1428
|
+
return markdownResult(result.text, totalTokens);
|
|
1429
|
+
} catch (error2) {
|
|
1430
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
1431
|
+
return errorResult(`Failed to reverse engineer flows: ${message}`);
|
|
1432
|
+
}
|
|
1433
|
+
},
|
|
1434
|
+
requiredScope: "tools:reverse_engineer_flows"
|
|
1435
|
+
};
|
|
1436
|
+
}
|
|
1437
|
+
});
|
|
1438
|
+
|
|
1439
|
+
// src/tools/generate-test-specs.ts
|
|
1440
|
+
var generateTestSpecsTool;
|
|
1441
|
+
var init_generate_test_specs = __esm({
|
|
1442
|
+
"src/tools/generate-test-specs.ts"() {
|
|
1443
|
+
"use strict";
|
|
1444
|
+
init_types();
|
|
1445
|
+
init_client();
|
|
1446
|
+
generateTestSpecsTool = {
|
|
1447
|
+
tool: {
|
|
1448
|
+
name: "generate_test_specs",
|
|
1449
|
+
description: `Generate comprehensive test specifications from requirements or code.
|
|
1450
|
+
|
|
1451
|
+
Takes PRD, user stories, or source code and produces:
|
|
1452
|
+
- Test plan overview
|
|
1453
|
+
- Unit test specifications
|
|
1454
|
+
- Integration test scenarios
|
|
1455
|
+
- E2E test cases
|
|
1456
|
+
- Edge case coverage
|
|
1457
|
+
- Test data requirements
|
|
1458
|
+
- Accessibility testing checklist
|
|
1459
|
+
|
|
1460
|
+
Best for: Ensuring comprehensive test coverage before or during development.`,
|
|
1461
|
+
inputSchema: {
|
|
1462
|
+
type: "object",
|
|
1463
|
+
properties: {
|
|
1464
|
+
input: {
|
|
1465
|
+
type: "string",
|
|
1466
|
+
description: "PRD, user stories, or source code to generate tests from"
|
|
1467
|
+
},
|
|
1468
|
+
inputType: {
|
|
1469
|
+
type: "string",
|
|
1470
|
+
enum: ["prd", "user_stories", "code", "api_spec"],
|
|
1471
|
+
default: "prd",
|
|
1472
|
+
description: "Type of input provided"
|
|
1473
|
+
},
|
|
1474
|
+
testTypes: {
|
|
1475
|
+
type: "array",
|
|
1476
|
+
items: {
|
|
1477
|
+
type: "string",
|
|
1478
|
+
enum: ["unit", "integration", "e2e", "api", "performance", "security", "accessibility"]
|
|
1479
|
+
},
|
|
1480
|
+
default: ["unit", "integration", "e2e"],
|
|
1481
|
+
description: "Types of tests to generate"
|
|
1482
|
+
},
|
|
1483
|
+
framework: {
|
|
1484
|
+
type: "string",
|
|
1485
|
+
enum: ["jest", "vitest", "pytest", "playwright", "cypress", "generic"],
|
|
1486
|
+
default: "generic",
|
|
1487
|
+
description: "Testing framework for syntax hints"
|
|
1488
|
+
},
|
|
1489
|
+
coverage: {
|
|
1490
|
+
type: "string",
|
|
1491
|
+
enum: ["minimal", "standard", "comprehensive"],
|
|
1492
|
+
default: "standard",
|
|
1493
|
+
description: "How thorough the test coverage should be"
|
|
1494
|
+
}
|
|
1495
|
+
},
|
|
1496
|
+
required: ["input"]
|
|
1497
|
+
}
|
|
1498
|
+
},
|
|
1499
|
+
handler: async (args, validation) => {
|
|
1500
|
+
const input = args.input;
|
|
1501
|
+
const inputType = args.inputType || "prd";
|
|
1502
|
+
const testTypes = args.testTypes || ["unit", "integration", "e2e"];
|
|
1503
|
+
const framework = args.framework || "generic";
|
|
1504
|
+
const coverage = args.coverage || "standard";
|
|
1505
|
+
if (!input || input.trim().length === 0) {
|
|
1506
|
+
return errorResult("Input (PRD, user stories, or code) is required");
|
|
1507
|
+
}
|
|
1508
|
+
const coverageGuide = {
|
|
1509
|
+
minimal: "Focus on happy paths and critical functionality only",
|
|
1510
|
+
standard: "Cover happy paths, common edge cases, and error scenarios",
|
|
1511
|
+
comprehensive: "Cover all paths, edge cases, error scenarios, boundary conditions, and negative testing"
|
|
1512
|
+
};
|
|
1513
|
+
const systemPrompt = `You are a QA architect expert at creating comprehensive test specifications.
|
|
1514
|
+
|
|
1515
|
+
Generate test specs from the provided ${inputType}.
|
|
1516
|
+
|
|
1517
|
+
Test Types to Include: ${testTypes.join(", ")}
|
|
1518
|
+
Framework: ${framework}
|
|
1519
|
+
Coverage Level: ${coverageGuide[coverage]}
|
|
1520
|
+
|
|
1521
|
+
Output Sections:
|
|
1522
|
+
|
|
1523
|
+
1. **Test Plan Overview**
|
|
1524
|
+
- Scope and objectives
|
|
1525
|
+
- Test strategy
|
|
1526
|
+
- Risk-based prioritization
|
|
1527
|
+
|
|
1528
|
+
2. **Unit Tests** (if requested)
|
|
1529
|
+
- Function/method level tests
|
|
1530
|
+
- Mock requirements
|
|
1531
|
+
- Edge cases per function
|
|
1532
|
+
${framework !== "generic" ? `- ${framework} code snippets` : ""}
|
|
1533
|
+
|
|
1534
|
+
3. **Integration Tests** (if requested)
|
|
1535
|
+
- Component interaction tests
|
|
1536
|
+
- API contract tests
|
|
1537
|
+
- Database integration tests
|
|
1538
|
+
|
|
1539
|
+
4. **E2E Tests** (if requested)
|
|
1540
|
+
- User journey scenarios
|
|
1541
|
+
- Critical path coverage
|
|
1542
|
+
- Cross-browser/device requirements
|
|
1543
|
+
|
|
1544
|
+
5. **API Tests** (if requested)
|
|
1545
|
+
- Endpoint coverage
|
|
1546
|
+
- Request/response validation
|
|
1547
|
+
- Error response testing
|
|
1548
|
+
|
|
1549
|
+
6. **Performance Tests** (if requested)
|
|
1550
|
+
- Load testing scenarios
|
|
1551
|
+
- Response time expectations
|
|
1552
|
+
- Concurrent user tests
|
|
1553
|
+
|
|
1554
|
+
7. **Security Tests** (if requested)
|
|
1555
|
+
- Authentication/authorization tests
|
|
1556
|
+
- Input validation tests
|
|
1557
|
+
- OWASP considerations
|
|
1558
|
+
|
|
1559
|
+
8. **Accessibility Tests** (if requested)
|
|
1560
|
+
- WCAG compliance checks
|
|
1561
|
+
- Screen reader compatibility
|
|
1562
|
+
- Keyboard navigation
|
|
1563
|
+
|
|
1564
|
+
9. **Test Data Requirements**
|
|
1565
|
+
- Fixtures needed
|
|
1566
|
+
- Test user personas
|
|
1567
|
+
- Mock data specifications
|
|
1568
|
+
|
|
1569
|
+
10. **Acceptance Criteria Traceability**
|
|
1570
|
+
- Map each test to a requirement
|
|
1571
|
+
- Coverage matrix
|
|
1572
|
+
|
|
1573
|
+
Format each test case as:
|
|
1574
|
+
\`\`\`
|
|
1575
|
+
Test ID: TC-XXX
|
|
1576
|
+
Title: [Descriptive title]
|
|
1577
|
+
Preconditions: [Setup required]
|
|
1578
|
+
Steps:
|
|
1579
|
+
1. [Action]
|
|
1580
|
+
2. [Action]
|
|
1581
|
+
Expected Result: [What should happen]
|
|
1582
|
+
Priority: High/Medium/Low
|
|
1583
|
+
\`\`\``;
|
|
1584
|
+
const userMessage = `Generate test specifications from this ${inputType}:
|
|
1585
|
+
|
|
1586
|
+
${input.length > 12e3 ? input.substring(0, 12e3) + "\n... [truncated]" : input}
|
|
1587
|
+
|
|
1588
|
+
Create comprehensive test specs covering: ${testTypes.join(", ")}`;
|
|
1589
|
+
try {
|
|
1590
|
+
const result = await createCompletion({
|
|
1591
|
+
systemPrompt,
|
|
1592
|
+
userMessage,
|
|
1593
|
+
model: "balanced",
|
|
1594
|
+
maxTokens: 6e3
|
|
1595
|
+
});
|
|
1596
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
1597
|
+
return markdownResult(result.text, totalTokens);
|
|
1598
|
+
} catch (error2) {
|
|
1599
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
1600
|
+
return errorResult(`Failed to generate test specs: ${message}`);
|
|
1601
|
+
}
|
|
1602
|
+
},
|
|
1603
|
+
requiredScope: "tools:generate_test_specs"
|
|
1604
|
+
};
|
|
1605
|
+
}
|
|
1606
|
+
});
|
|
1607
|
+
|
|
1608
|
+
// src/tools/explain-codebase.ts
|
|
1609
|
+
var explainCodebaseTool;
|
|
1610
|
+
var init_explain_codebase = __esm({
|
|
1611
|
+
"src/tools/explain-codebase.ts"() {
|
|
1612
|
+
"use strict";
|
|
1613
|
+
init_types();
|
|
1614
|
+
init_client();
|
|
1615
|
+
explainCodebaseTool = {
|
|
1616
|
+
tool: {
|
|
1617
|
+
name: "explain_codebase",
|
|
1618
|
+
description: `Analyze code and generate comprehensive documentation for product managers.
|
|
1619
|
+
|
|
1620
|
+
Takes source code and produces PM-friendly documentation:
|
|
1621
|
+
- Architecture overview
|
|
1622
|
+
- Feature inventory
|
|
1623
|
+
- Data flow explanations
|
|
1624
|
+
- User-facing functionality mapping
|
|
1625
|
+
- Technical debt assessment
|
|
1626
|
+
- Business logic documentation
|
|
1627
|
+
|
|
1628
|
+
Best for: Onboarding PMs to existing codebases or creating technical context.`,
|
|
1629
|
+
inputSchema: {
|
|
1630
|
+
type: "object",
|
|
1631
|
+
properties: {
|
|
1632
|
+
code: {
|
|
1633
|
+
type: "string",
|
|
1634
|
+
description: "Source code to analyze and explain"
|
|
1635
|
+
},
|
|
1636
|
+
projectName: {
|
|
1637
|
+
type: "string",
|
|
1638
|
+
description: "Name of the project"
|
|
1639
|
+
},
|
|
1640
|
+
codeContext: {
|
|
1641
|
+
type: "string",
|
|
1642
|
+
description: "Additional context about the codebase"
|
|
1643
|
+
},
|
|
1644
|
+
audience: {
|
|
1645
|
+
type: "string",
|
|
1646
|
+
enum: ["pm", "stakeholder", "new_engineer", "executive"],
|
|
1647
|
+
default: "pm",
|
|
1648
|
+
description: "Target audience for the documentation"
|
|
1649
|
+
},
|
|
1650
|
+
focus: {
|
|
1651
|
+
type: "array",
|
|
1652
|
+
items: {
|
|
1653
|
+
type: "string",
|
|
1654
|
+
enum: [
|
|
1655
|
+
"architecture",
|
|
1656
|
+
"features",
|
|
1657
|
+
"data_flow",
|
|
1658
|
+
"business_logic",
|
|
1659
|
+
"tech_debt",
|
|
1660
|
+
"dependencies",
|
|
1661
|
+
"security"
|
|
1662
|
+
]
|
|
1663
|
+
},
|
|
1664
|
+
default: ["architecture", "features", "business_logic"],
|
|
1665
|
+
description: "Areas to focus documentation on"
|
|
1666
|
+
},
|
|
1667
|
+
detailLevel: {
|
|
1668
|
+
type: "string",
|
|
1669
|
+
enum: ["overview", "detailed", "deep_dive"],
|
|
1670
|
+
default: "detailed",
|
|
1671
|
+
description: "Level of detail in explanations"
|
|
1672
|
+
}
|
|
1673
|
+
},
|
|
1674
|
+
required: ["code"]
|
|
1675
|
+
}
|
|
1676
|
+
},
|
|
1677
|
+
handler: async (args, validation) => {
|
|
1678
|
+
const code = args.code;
|
|
1679
|
+
const projectName = args.projectName;
|
|
1680
|
+
const codeContext = args.codeContext;
|
|
1681
|
+
const audience = args.audience || "pm";
|
|
1682
|
+
const focus = args.focus || ["architecture", "features", "business_logic"];
|
|
1683
|
+
const detailLevel = args.detailLevel || "detailed";
|
|
1684
|
+
if (!code || code.trim().length === 0) {
|
|
1685
|
+
return errorResult("Source code is required for analysis");
|
|
1686
|
+
}
|
|
1687
|
+
const audienceGuide = {
|
|
1688
|
+
pm: "Focus on product features, user value, and business logic. Avoid deep technical jargon.",
|
|
1689
|
+
stakeholder: "High-level overview focusing on capabilities, risks, and business implications.",
|
|
1690
|
+
new_engineer: "Technical detail with onboarding focus. Explain patterns, conventions, and gotchas.",
|
|
1691
|
+
executive: "Strategic summary focusing on capabilities, technical health, and risks."
|
|
1692
|
+
};
|
|
1693
|
+
const systemPrompt = `You are a staff engineer who excels at explaining complex codebases to non-technical stakeholders.
|
|
1694
|
+
|
|
1695
|
+
Analyze the code and create documentation for: ${audience}
|
|
1696
|
+
${audienceGuide[audience]}
|
|
1697
|
+
|
|
1698
|
+
Documentation Focus: ${focus.join(", ")}
|
|
1699
|
+
Detail Level: ${detailLevel}
|
|
1700
|
+
${projectName ? `Project: ${projectName}` : ""}
|
|
1701
|
+
${codeContext ? `Context: ${codeContext}` : ""}
|
|
1702
|
+
|
|
1703
|
+
Output Sections (include based on focus):
|
|
1704
|
+
|
|
1705
|
+
1. **Executive Summary**
|
|
1706
|
+
- What this code does in plain English
|
|
1707
|
+
- Key capabilities
|
|
1708
|
+
- Technology stack
|
|
1709
|
+
|
|
1710
|
+
2. **Architecture Overview** (if in focus)
|
|
1711
|
+
- System diagram (Mermaid)
|
|
1712
|
+
- Key components and their roles
|
|
1713
|
+
- How pieces fit together
|
|
1714
|
+
|
|
1715
|
+
3. **Feature Inventory** (if in focus)
|
|
1716
|
+
- List of user-facing features found
|
|
1717
|
+
- Feature completeness assessment
|
|
1718
|
+
- Hidden/admin features
|
|
1719
|
+
|
|
1720
|
+
4. **Data Flow** (if in focus)
|
|
1721
|
+
- How data moves through the system
|
|
1722
|
+
- Key entities and relationships
|
|
1723
|
+
- Data transformations
|
|
1724
|
+
|
|
1725
|
+
5. **Business Logic Documentation** (if in focus)
|
|
1726
|
+
- Core algorithms explained
|
|
1727
|
+
- Business rules implemented
|
|
1728
|
+
- Validation and constraints
|
|
1729
|
+
|
|
1730
|
+
6. **Technical Debt Assessment** (if in focus)
|
|
1731
|
+
- Code quality observations
|
|
1732
|
+
- Maintenance concerns
|
|
1733
|
+
- Refactoring opportunities
|
|
1734
|
+
|
|
1735
|
+
7. **Dependencies** (if in focus)
|
|
1736
|
+
- External services used
|
|
1737
|
+
- Third-party libraries
|
|
1738
|
+
- Integration points
|
|
1739
|
+
|
|
1740
|
+
8. **Security Considerations** (if in focus)
|
|
1741
|
+
- Authentication/authorization patterns
|
|
1742
|
+
- Data protection measures
|
|
1743
|
+
- Potential concerns
|
|
1744
|
+
|
|
1745
|
+
9. **Glossary**
|
|
1746
|
+
- Technical terms explained
|
|
1747
|
+
- Domain-specific vocabulary
|
|
1748
|
+
|
|
1749
|
+
Guidelines:
|
|
1750
|
+
- Use analogies for complex concepts
|
|
1751
|
+
- Include "What this means for product" sections
|
|
1752
|
+
- Highlight risks and opportunities
|
|
1753
|
+
- Be honest about limitations and unknowns`;
|
|
1754
|
+
const userMessage = `Analyze and document this codebase:
|
|
1755
|
+
|
|
1756
|
+
\`\`\`
|
|
1757
|
+
${code.length > 15e3 ? code.substring(0, 15e3) + "\n// ... [truncated]" : code}
|
|
1758
|
+
\`\`\`
|
|
1759
|
+
|
|
1760
|
+
Create comprehensive documentation for a ${audience}.`;
|
|
1761
|
+
try {
|
|
1762
|
+
const result = await createCompletion({
|
|
1763
|
+
systemPrompt,
|
|
1764
|
+
userMessage,
|
|
1765
|
+
model: "balanced",
|
|
1766
|
+
maxTokens: 5e3
|
|
1767
|
+
});
|
|
1768
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
1769
|
+
return markdownResult(result.text, totalTokens);
|
|
1770
|
+
} catch (error2) {
|
|
1771
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
1772
|
+
return errorResult(`Failed to explain codebase: ${message}`);
|
|
1773
|
+
}
|
|
1774
|
+
},
|
|
1775
|
+
requiredScope: "tools:explain_codebase"
|
|
1776
|
+
};
|
|
1777
|
+
}
|
|
1778
|
+
});
|
|
1779
|
+
|
|
1780
|
+
// src/tools/validate-implementation.ts
|
|
1781
|
+
var validateImplementationTool;
|
|
1782
|
+
var init_validate_implementation = __esm({
|
|
1783
|
+
"src/tools/validate-implementation.ts"() {
|
|
1784
|
+
"use strict";
|
|
1785
|
+
init_types();
|
|
1786
|
+
init_client();
|
|
1787
|
+
validateImplementationTool = {
|
|
1788
|
+
tool: {
|
|
1789
|
+
name: "validate_implementation",
|
|
1790
|
+
description: `Validate that code implementation matches requirements.
|
|
1791
|
+
|
|
1792
|
+
Compares source code against PRD/requirements and produces:
|
|
1793
|
+
- Requirements coverage report
|
|
1794
|
+
- Gap analysis
|
|
1795
|
+
- Deviation documentation
|
|
1796
|
+
- Compliance checklist
|
|
1797
|
+
- Acceptance criteria verification
|
|
1798
|
+
|
|
1799
|
+
Best for: Pre-release validation and QA support.`,
|
|
1800
|
+
inputSchema: {
|
|
1801
|
+
type: "object",
|
|
1802
|
+
properties: {
|
|
1803
|
+
requirements: {
|
|
1804
|
+
type: "string",
|
|
1805
|
+
description: "PRD, user stories, or requirements to validate against"
|
|
1806
|
+
},
|
|
1807
|
+
code: {
|
|
1808
|
+
type: "string",
|
|
1809
|
+
description: "Implementation code to validate"
|
|
1810
|
+
},
|
|
1811
|
+
strictness: {
|
|
1812
|
+
type: "string",
|
|
1813
|
+
enum: ["lenient", "standard", "strict"],
|
|
1814
|
+
default: "standard",
|
|
1815
|
+
description: "How strictly to validate"
|
|
1816
|
+
},
|
|
1817
|
+
checkTypes: {
|
|
1818
|
+
type: "array",
|
|
1819
|
+
items: {
|
|
1820
|
+
type: "string",
|
|
1821
|
+
enum: [
|
|
1822
|
+
"functional",
|
|
1823
|
+
"edge_cases",
|
|
1824
|
+
"error_handling",
|
|
1825
|
+
"performance",
|
|
1826
|
+
"security",
|
|
1827
|
+
"accessibility",
|
|
1828
|
+
"ux"
|
|
1829
|
+
]
|
|
1830
|
+
},
|
|
1831
|
+
default: ["functional", "edge_cases", "error_handling"],
|
|
1832
|
+
description: "Types of validation to perform"
|
|
1833
|
+
}
|
|
1834
|
+
},
|
|
1835
|
+
required: ["requirements", "code"]
|
|
1836
|
+
}
|
|
1837
|
+
},
|
|
1838
|
+
handler: async (args, validation) => {
|
|
1839
|
+
const requirements = args.requirements;
|
|
1840
|
+
const code = args.code;
|
|
1841
|
+
const strictness = args.strictness || "standard";
|
|
1842
|
+
const checkTypes = args.checkTypes || [
|
|
1843
|
+
"functional",
|
|
1844
|
+
"edge_cases",
|
|
1845
|
+
"error_handling"
|
|
1846
|
+
];
|
|
1847
|
+
if (!requirements || requirements.trim().length === 0) {
|
|
1848
|
+
return errorResult("Requirements are required for validation");
|
|
1849
|
+
}
|
|
1850
|
+
if (!code || code.trim().length === 0) {
|
|
1851
|
+
return errorResult("Code is required for validation");
|
|
1852
|
+
}
|
|
1853
|
+
const strictnessGuide = {
|
|
1854
|
+
lenient: "Accept reasonable interpretations. Flag only clear deviations from requirements.",
|
|
1855
|
+
standard: "Check for requirement coverage and reasonable edge case handling. Flag gaps and concerns.",
|
|
1856
|
+
strict: "Exact requirement matching. Every stated requirement must be verifiably implemented. Flag any ambiguity."
|
|
1857
|
+
};
|
|
1858
|
+
const systemPrompt = `You are a QA lead expert at validating implementations against requirements.
|
|
1859
|
+
|
|
1860
|
+
Compare the code against the requirements and produce a validation report.
|
|
1861
|
+
|
|
1862
|
+
Strictness: ${strictnessGuide[strictness]}
|
|
1863
|
+
Validation Types: ${checkTypes.join(", ")}
|
|
1864
|
+
|
|
1865
|
+
Output Sections:
|
|
1866
|
+
|
|
1867
|
+
1. **Validation Summary**
|
|
1868
|
+
- Overall Score (0-100%)
|
|
1869
|
+
- Verdict: Pass / Pass with Concerns / Fail
|
|
1870
|
+
- Critical issues count
|
|
1871
|
+
|
|
1872
|
+
2. **Requirements Coverage Matrix**
|
|
1873
|
+
| Requirement | Status | Evidence | Notes |
|
|
1874
|
+
|-------------|--------|----------|-------|
|
|
1875
|
+
| REQ-1 | \u2705 Met | Line 42 | - |
|
|
1876
|
+
| REQ-2 | \u26A0\uFE0F Partial | - | Missing edge case |
|
|
1877
|
+
| REQ-3 | \u274C Not Met | - | Not implemented |
|
|
1878
|
+
|
|
1879
|
+
3. **Functional Validation** (if checked)
|
|
1880
|
+
- Each requirement with implementation evidence
|
|
1881
|
+
- Quote code that implements each requirement
|
|
1882
|
+
|
|
1883
|
+
4. **Edge Case Coverage** (if checked)
|
|
1884
|
+
- Expected edge cases from requirements
|
|
1885
|
+
- Which are handled in code
|
|
1886
|
+
- Which are missing
|
|
1887
|
+
|
|
1888
|
+
5. **Error Handling Review** (if checked)
|
|
1889
|
+
- Error scenarios from requirements
|
|
1890
|
+
- Implementation of each
|
|
1891
|
+
- Missing error handling
|
|
1892
|
+
|
|
1893
|
+
6. **Performance Validation** (if checked)
|
|
1894
|
+
- Performance requirements stated
|
|
1895
|
+
- Code patterns that address them
|
|
1896
|
+
- Potential performance issues
|
|
1897
|
+
|
|
1898
|
+
7. **Security Validation** (if checked)
|
|
1899
|
+
- Security requirements stated
|
|
1900
|
+
- Implementation verification
|
|
1901
|
+
- Security concerns found
|
|
1902
|
+
|
|
1903
|
+
8. **Accessibility Validation** (if checked)
|
|
1904
|
+
- A11y requirements stated
|
|
1905
|
+
- Implementation verification
|
|
1906
|
+
- Missing accessibility features
|
|
1907
|
+
|
|
1908
|
+
9. **UX Validation** (if checked)
|
|
1909
|
+
- UX requirements stated
|
|
1910
|
+
- Implementation verification
|
|
1911
|
+
- UX concerns
|
|
1912
|
+
|
|
1913
|
+
10. **Gaps & Deviations**
|
|
1914
|
+
- Requirements not implemented
|
|
1915
|
+
- Implementations that deviate from requirements
|
|
1916
|
+
- Over-engineering concerns
|
|
1917
|
+
|
|
1918
|
+
11. **Recommendations**
|
|
1919
|
+
- Prioritized list of issues to fix
|
|
1920
|
+
- Suggestions for improvement
|
|
1921
|
+
|
|
1922
|
+
12. **Acceptance Criteria Checklist**
|
|
1923
|
+
- [ ] Criterion 1: Status
|
|
1924
|
+
- [ ] Criterion 2: Status`;
|
|
1925
|
+
const reqTrunc = requirements.length > 8e3 ? requirements.substring(0, 8e3) + "\n... [truncated]" : requirements;
|
|
1926
|
+
const codeTrunc = code.length > 1e4 ? code.substring(0, 1e4) + "\n// ... [truncated]" : code;
|
|
1927
|
+
const userMessage = `Validate this implementation against the requirements:
|
|
1928
|
+
|
|
1929
|
+
## Requirements
|
|
1930
|
+
${reqTrunc}
|
|
1931
|
+
|
|
1932
|
+
## Implementation Code
|
|
1933
|
+
\`\`\`
|
|
1934
|
+
${codeTrunc}
|
|
1935
|
+
\`\`\`
|
|
1936
|
+
|
|
1937
|
+
Produce a comprehensive validation report.`;
|
|
1938
|
+
try {
|
|
1939
|
+
const result = await createCompletion({
|
|
1940
|
+
systemPrompt,
|
|
1941
|
+
userMessage,
|
|
1942
|
+
model: "balanced",
|
|
1943
|
+
maxTokens: 5e3
|
|
1944
|
+
});
|
|
1945
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
1946
|
+
return markdownResult(result.text, totalTokens);
|
|
1947
|
+
} catch (error2) {
|
|
1948
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
1949
|
+
return errorResult(`Failed to validate implementation: ${message}`);
|
|
1950
|
+
}
|
|
1951
|
+
},
|
|
1952
|
+
requiredScope: "tools:validate_implementation"
|
|
1953
|
+
};
|
|
1954
|
+
}
|
|
1955
|
+
});
|
|
1956
|
+
|
|
1957
|
+
// src/tools/suggest-refactors.ts
|
|
1958
|
+
var suggestRefactorsTool;
|
|
1959
|
+
var init_suggest_refactors = __esm({
|
|
1960
|
+
"src/tools/suggest-refactors.ts"() {
|
|
1961
|
+
"use strict";
|
|
1962
|
+
init_types();
|
|
1963
|
+
init_client();
|
|
1964
|
+
suggestRefactorsTool = {
|
|
1965
|
+
tool: {
|
|
1966
|
+
name: "suggest_refactors",
|
|
1967
|
+
description: `Analyze code and suggest refactoring opportunities for product planning.
|
|
1968
|
+
|
|
1969
|
+
Takes source code and identifies:
|
|
1970
|
+
- Technical debt items
|
|
1971
|
+
- Refactoring opportunities
|
|
1972
|
+
- Architecture improvements
|
|
1973
|
+
- Performance optimizations
|
|
1974
|
+
- Maintainability issues
|
|
1975
|
+
|
|
1976
|
+
Produces PM-friendly documentation for sprint planning.
|
|
1977
|
+
|
|
1978
|
+
Best for: Technical debt prioritization and architecture evolution planning.`,
|
|
1979
|
+
inputSchema: {
|
|
1980
|
+
type: "object",
|
|
1981
|
+
properties: {
|
|
1982
|
+
code: {
|
|
1983
|
+
type: "string",
|
|
1984
|
+
description: "Source code to analyze for refactoring opportunities"
|
|
1985
|
+
},
|
|
1986
|
+
codeContext: {
|
|
1987
|
+
type: "string",
|
|
1988
|
+
description: "Context about the codebase or feature"
|
|
1989
|
+
},
|
|
1990
|
+
priorities: {
|
|
1991
|
+
type: "array",
|
|
1992
|
+
items: {
|
|
1993
|
+
type: "string",
|
|
1994
|
+
enum: [
|
|
1995
|
+
"performance",
|
|
1996
|
+
"maintainability",
|
|
1997
|
+
"scalability",
|
|
1998
|
+
"security",
|
|
1999
|
+
"testing",
|
|
2000
|
+
"readability"
|
|
2001
|
+
]
|
|
2002
|
+
},
|
|
2003
|
+
default: ["maintainability", "performance"],
|
|
2004
|
+
description: "What to prioritize in suggestions"
|
|
2005
|
+
},
|
|
2006
|
+
riskTolerance: {
|
|
2007
|
+
type: "string",
|
|
2008
|
+
enum: ["low", "medium", "high"],
|
|
2009
|
+
default: "medium",
|
|
2010
|
+
description: "Risk tolerance for suggested changes"
|
|
2011
|
+
},
|
|
2012
|
+
teamSize: {
|
|
2013
|
+
type: "string",
|
|
2014
|
+
enum: ["small", "medium", "large"],
|
|
2015
|
+
default: "medium",
|
|
2016
|
+
description: "Team size context for effort estimates"
|
|
2017
|
+
}
|
|
2018
|
+
},
|
|
2019
|
+
required: ["code"]
|
|
2020
|
+
}
|
|
2021
|
+
},
|
|
2022
|
+
handler: async (args, validation) => {
|
|
2023
|
+
const code = args.code;
|
|
2024
|
+
const codeContext = args.codeContext;
|
|
2025
|
+
const priorities = args.priorities || ["maintainability", "performance"];
|
|
2026
|
+
const riskTolerance = args.riskTolerance || "medium";
|
|
2027
|
+
const teamSize = args.teamSize || "medium";
|
|
2028
|
+
if (!code || code.trim().length === 0) {
|
|
2029
|
+
return errorResult("Source code is required for analysis");
|
|
2030
|
+
}
|
|
2031
|
+
const riskGuide = {
|
|
2032
|
+
low: "Only suggest low-risk, incremental improvements. No major restructuring.",
|
|
2033
|
+
medium: "Balance impact with risk. Include moderate restructuring where beneficial.",
|
|
2034
|
+
high: "Include high-impact architectural changes. Transformative improvements acceptable."
|
|
2035
|
+
};
|
|
2036
|
+
const systemPrompt = `You are a staff engineer specializing in code quality and technical debt management.
|
|
2037
|
+
|
|
2038
|
+
Analyze the code and suggest refactoring opportunities.
|
|
2039
|
+
|
|
2040
|
+
Priorities: ${priorities.join(", ")}
|
|
2041
|
+
Risk Tolerance: ${riskGuide[riskTolerance]}
|
|
2042
|
+
Team Context: ${teamSize} team
|
|
2043
|
+
${codeContext ? `Context: ${codeContext}` : ""}
|
|
2044
|
+
|
|
2045
|
+
Output Sections:
|
|
2046
|
+
|
|
2047
|
+
1. **Executive Summary**
|
|
2048
|
+
- Overall code health score (1-10)
|
|
2049
|
+
- Top 3 priorities
|
|
2050
|
+
- Estimated total refactoring effort
|
|
2051
|
+
|
|
2052
|
+
2. **Quick Wins** (Low effort, immediate impact)
|
|
2053
|
+
For each:
|
|
2054
|
+
- Issue description
|
|
2055
|
+
- Impact: [Performance/Maintainability/etc.]
|
|
2056
|
+
- Effort: Small
|
|
2057
|
+
- Risk: Low
|
|
2058
|
+
- Code location hint
|
|
2059
|
+
- Suggested approach
|
|
2060
|
+
|
|
2061
|
+
3. **Medium-Term Improvements** (Moderate effort)
|
|
2062
|
+
For each:
|
|
2063
|
+
- Issue description
|
|
2064
|
+
- Business impact (why PM should care)
|
|
2065
|
+
- Effort: Medium (sprint-sized)
|
|
2066
|
+
- Risk assessment
|
|
2067
|
+
- Dependencies
|
|
2068
|
+
- Suggested approach
|
|
2069
|
+
|
|
2070
|
+
4. **Strategic Refactors** (Significant effort)
|
|
2071
|
+
For each:
|
|
2072
|
+
- Current state and problem
|
|
2073
|
+
- Target state and benefits
|
|
2074
|
+
- Business justification
|
|
2075
|
+
- Effort: Large (multi-sprint)
|
|
2076
|
+
- Risk and mitigation
|
|
2077
|
+
- Incremental approach
|
|
2078
|
+
|
|
2079
|
+
5. **Technical Debt Inventory**
|
|
2080
|
+
| Item | Type | Severity | Effort | Risk | Priority |
|
|
2081
|
+
|------|------|----------|--------|------|----------|
|
|
2082
|
+
|
|
2083
|
+
6. **Anti-Patterns Identified**
|
|
2084
|
+
- Pattern name
|
|
2085
|
+
- Where found
|
|
2086
|
+
- Why it's problematic
|
|
2087
|
+
- Recommended pattern
|
|
2088
|
+
|
|
2089
|
+
7. **Testing Debt**
|
|
2090
|
+
- Missing test coverage areas
|
|
2091
|
+
- Suggested testing improvements
|
|
2092
|
+
|
|
2093
|
+
8. **Dependency Concerns**
|
|
2094
|
+
- Outdated dependencies
|
|
2095
|
+
- Security vulnerabilities risk
|
|
2096
|
+
- Upgrade recommendations
|
|
2097
|
+
|
|
2098
|
+
9. **Sprint Planning Recommendations**
|
|
2099
|
+
- Suggested sprint allocation for tech debt
|
|
2100
|
+
- Recommended sequence of refactors
|
|
2101
|
+
- What to pair with feature work
|
|
2102
|
+
|
|
2103
|
+
10. **Business Case Summary**
|
|
2104
|
+
- PM-friendly explanation of why these matter
|
|
2105
|
+
- Risk of not addressing
|
|
2106
|
+
- Velocity impact
|
|
2107
|
+
|
|
2108
|
+
Guidelines:
|
|
2109
|
+
- Frame everything in terms of business impact
|
|
2110
|
+
- Provide concrete before/after examples where helpful
|
|
2111
|
+
- Consider incremental approaches
|
|
2112
|
+
- Note any blockers or dependencies`;
|
|
2113
|
+
const userMessage = `Analyze this code and suggest refactoring opportunities:
|
|
2114
|
+
|
|
2115
|
+
\`\`\`
|
|
2116
|
+
${code.length > 15e3 ? code.substring(0, 15e3) + "\n// ... [truncated]" : code}
|
|
2117
|
+
\`\`\`
|
|
2118
|
+
|
|
2119
|
+
Provide comprehensive refactoring suggestions for sprint planning.`;
|
|
2120
|
+
try {
|
|
2121
|
+
const result = await createCompletion({
|
|
2122
|
+
systemPrompt,
|
|
2123
|
+
userMessage,
|
|
2124
|
+
model: "balanced",
|
|
2125
|
+
maxTokens: 5e3
|
|
2126
|
+
});
|
|
2127
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
2128
|
+
return markdownResult(result.text, totalTokens);
|
|
2129
|
+
} catch (error2) {
|
|
2130
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
2131
|
+
return errorResult(`Failed to suggest refactors: ${message}`);
|
|
2132
|
+
}
|
|
2133
|
+
},
|
|
2134
|
+
requiredScope: "tools:suggest_refactors"
|
|
2135
|
+
};
|
|
2136
|
+
}
|
|
2137
|
+
});
|
|
2138
|
+
|
|
2139
|
+
// src/tools/generate-api-docs.ts
|
|
2140
|
+
var generateApiDocsTool;
|
|
2141
|
+
var init_generate_api_docs = __esm({
|
|
2142
|
+
"src/tools/generate-api-docs.ts"() {
|
|
2143
|
+
"use strict";
|
|
2144
|
+
init_types();
|
|
2145
|
+
init_client();
|
|
2146
|
+
generateApiDocsTool = {
|
|
2147
|
+
tool: {
|
|
2148
|
+
name: "generate_api_docs",
|
|
2149
|
+
description: `Generate API documentation from source code.
|
|
2150
|
+
|
|
2151
|
+
Analyzes API routes and generates:
|
|
2152
|
+
- OpenAPI 3.1 specification
|
|
2153
|
+
- Endpoint summaries
|
|
2154
|
+
- Request/response schemas
|
|
2155
|
+
- Authentication requirements
|
|
2156
|
+
- Example payloads
|
|
2157
|
+
|
|
2158
|
+
Supports REST APIs in TypeScript/JavaScript, Python, Go, and more.
|
|
2159
|
+
|
|
2160
|
+
Best for: Creating or updating API documentation from code.`,
|
|
2161
|
+
inputSchema: {
|
|
2162
|
+
type: "object",
|
|
2163
|
+
properties: {
|
|
2164
|
+
code: {
|
|
2165
|
+
type: "string",
|
|
2166
|
+
description: "API route code to analyze (routes, controllers, handlers)"
|
|
2167
|
+
},
|
|
2168
|
+
framework: {
|
|
2169
|
+
type: "string",
|
|
2170
|
+
enum: ["nextjs", "express", "fastify", "hono", "flask", "fastapi", "gin", "generic"],
|
|
2171
|
+
default: "generic",
|
|
2172
|
+
description: "API framework being used"
|
|
2173
|
+
},
|
|
2174
|
+
format: {
|
|
2175
|
+
type: "string",
|
|
2176
|
+
enum: ["openapi", "markdown", "both"],
|
|
2177
|
+
default: "openapi",
|
|
2178
|
+
description: "Output documentation format"
|
|
2179
|
+
},
|
|
2180
|
+
baseUrl: {
|
|
2181
|
+
type: "string",
|
|
2182
|
+
description: "Base URL for the API"
|
|
2183
|
+
},
|
|
2184
|
+
title: {
|
|
2185
|
+
type: "string",
|
|
2186
|
+
description: "API title for documentation"
|
|
2187
|
+
},
|
|
2188
|
+
version: {
|
|
2189
|
+
type: "string",
|
|
2190
|
+
default: "1.0.0",
|
|
2191
|
+
description: "API version"
|
|
2192
|
+
},
|
|
2193
|
+
includeExamples: {
|
|
2194
|
+
type: "boolean",
|
|
2195
|
+
default: true,
|
|
2196
|
+
description: "Whether to generate example payloads"
|
|
2197
|
+
}
|
|
2198
|
+
},
|
|
2199
|
+
required: ["code"]
|
|
2200
|
+
}
|
|
2201
|
+
},
|
|
2202
|
+
handler: async (args, validation) => {
|
|
2203
|
+
const code = args.code;
|
|
2204
|
+
const framework = args.framework || "generic";
|
|
2205
|
+
const format = args.format || "openapi";
|
|
2206
|
+
const baseUrl = args.baseUrl || "https://api.example.com";
|
|
2207
|
+
const title = args.title || "API Documentation";
|
|
2208
|
+
const version = args.version || "1.0.0";
|
|
2209
|
+
const includeExamples = args.includeExamples !== false;
|
|
2210
|
+
if (!code || code.trim().length === 0) {
|
|
2211
|
+
return errorResult("API code is required for documentation generation");
|
|
2212
|
+
}
|
|
2213
|
+
const frameworkHints = {
|
|
2214
|
+
nextjs: "Next.js App Router with route.ts files. Look for GET, POST, PUT, DELETE, PATCH exports.",
|
|
2215
|
+
express: "Express.js with app.get(), app.post(), router patterns.",
|
|
2216
|
+
fastify: "Fastify with fastify.get(), schema definitions.",
|
|
2217
|
+
hono: "Hono framework with app.get(), c.json() patterns.",
|
|
2218
|
+
flask: "Flask with @app.route decorators.",
|
|
2219
|
+
fastapi: "FastAPI with @app.get() decorators and Pydantic models.",
|
|
2220
|
+
gin: "Gin framework with r.GET(), r.POST() patterns.",
|
|
2221
|
+
generic: "Generic API code - infer patterns from the code."
|
|
2222
|
+
};
|
|
2223
|
+
const systemPrompt = `You are an API documentation expert who generates OpenAPI 3.1 specifications from code.
|
|
2224
|
+
|
|
2225
|
+
Framework: ${framework}
|
|
2226
|
+
${frameworkHints[framework]}
|
|
2227
|
+
|
|
2228
|
+
Generate a complete OpenAPI 3.1 specification with:
|
|
2229
|
+
1. All endpoints with methods (GET, POST, PUT, DELETE, PATCH)
|
|
2230
|
+
2. Path parameters extracted from route patterns
|
|
2231
|
+
3. Query parameters from code analysis
|
|
2232
|
+
4. Request body schemas from TypeScript types or validation
|
|
2233
|
+
5. Response schemas for success and error cases
|
|
2234
|
+
6. Authentication requirements if detected
|
|
2235
|
+
7. Logical grouping with tags
|
|
2236
|
+
${includeExamples ? "8. Realistic example values for all schemas" : ""}
|
|
2237
|
+
|
|
2238
|
+
Output a JSON object with:
|
|
2239
|
+
{
|
|
2240
|
+
"openapi": "3.1.0",
|
|
2241
|
+
"info": { "title": "${title}", "version": "${version}", "description": "..." },
|
|
2242
|
+
"servers": [{ "url": "${baseUrl}", "description": "..." }],
|
|
2243
|
+
"paths": { "/path": { "get": {...}, "post": {...} } },
|
|
2244
|
+
"components": { "schemas": {...}, "securitySchemes": {...} },
|
|
2245
|
+
"tags": [{ "name": "...", "description": "..." }]
|
|
2246
|
+
}
|
|
2247
|
+
|
|
2248
|
+
Be thorough - extract every endpoint, parameter, and type from the code.`;
|
|
2249
|
+
const userMessage = `Generate OpenAPI documentation from this ${framework} API code:
|
|
2250
|
+
|
|
2251
|
+
\`\`\`
|
|
2252
|
+
${code.length > 15e3 ? code.substring(0, 15e3) + "\n// ... [truncated]" : code}
|
|
2253
|
+
\`\`\`
|
|
2254
|
+
|
|
2255
|
+
Generate complete OpenAPI 3.1 specification.`;
|
|
2256
|
+
try {
|
|
2257
|
+
if (format === "markdown") {
|
|
2258
|
+
const mdPrompt = `You are an API documentation expert. Generate clear, comprehensive markdown API documentation.
|
|
2259
|
+
|
|
2260
|
+
Include:
|
|
2261
|
+
1. Overview and authentication
|
|
2262
|
+
2. Each endpoint with method, path, description
|
|
2263
|
+
3. Parameters table (name, type, required, description)
|
|
2264
|
+
4. Request body examples
|
|
2265
|
+
5. Response examples
|
|
2266
|
+
6. Error codes
|
|
2267
|
+
|
|
2268
|
+
Framework: ${framework}`;
|
|
2269
|
+
const result2 = await createCompletion({
|
|
2270
|
+
systemPrompt: mdPrompt,
|
|
2271
|
+
userMessage: `Document this API:
|
|
2272
|
+
|
|
2273
|
+
\`\`\`
|
|
2274
|
+
${code.length > 12e3 ? code.substring(0, 12e3) + "\n// ..." : code}
|
|
2275
|
+
\`\`\``,
|
|
2276
|
+
model: "balanced",
|
|
2277
|
+
maxTokens: 5e3
|
|
2278
|
+
});
|
|
2279
|
+
return markdownResult(result2.text, result2.inputTokens + result2.outputTokens);
|
|
2280
|
+
}
|
|
2281
|
+
const result = await createJsonCompletion({
|
|
2282
|
+
systemPrompt,
|
|
2283
|
+
userMessage,
|
|
2284
|
+
model: "balanced",
|
|
2285
|
+
maxTokens: 6e3
|
|
2286
|
+
});
|
|
2287
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
2288
|
+
if (format === "both") {
|
|
2289
|
+
const mdResult = await createCompletion({
|
|
2290
|
+
systemPrompt: "Summarize this OpenAPI spec as readable markdown documentation.",
|
|
2291
|
+
userMessage: JSON.stringify(result.data, null, 2),
|
|
2292
|
+
model: "fast",
|
|
2293
|
+
maxTokens: 2e3
|
|
2294
|
+
});
|
|
2295
|
+
return {
|
|
2296
|
+
content: [
|
|
2297
|
+
{
|
|
2298
|
+
type: "text",
|
|
2299
|
+
text: `# ${title}
|
|
2300
|
+
|
|
2301
|
+
${mdResult.text}
|
|
2302
|
+
|
|
2303
|
+
---
|
|
2304
|
+
|
|
2305
|
+
## OpenAPI Specification
|
|
2306
|
+
|
|
2307
|
+
\`\`\`json
|
|
2308
|
+
${JSON.stringify(result.data, null, 2)}
|
|
2309
|
+
\`\`\``
|
|
2310
|
+
}
|
|
2311
|
+
],
|
|
2312
|
+
tokensUsed: totalTokens + mdResult.inputTokens + mdResult.outputTokens
|
|
2313
|
+
};
|
|
2314
|
+
}
|
|
2315
|
+
return jsonResult(result.data, totalTokens);
|
|
2316
|
+
} catch (error2) {
|
|
2317
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
2318
|
+
return errorResult(`Failed to generate API docs: ${message}`);
|
|
2319
|
+
}
|
|
2320
|
+
},
|
|
2321
|
+
requiredScope: "tools:generate_api_docs"
|
|
2322
|
+
};
|
|
2323
|
+
}
|
|
2324
|
+
});
|
|
2325
|
+
|
|
2326
|
+
// src/tools/dependency-audit.ts
|
|
2327
|
+
var dependencyAuditTool;
|
|
2328
|
+
var init_dependency_audit = __esm({
|
|
2329
|
+
"src/tools/dependency-audit.ts"() {
|
|
2330
|
+
"use strict";
|
|
2331
|
+
init_types();
|
|
2332
|
+
init_client();
|
|
2333
|
+
dependencyAuditTool = {
|
|
2334
|
+
tool: {
|
|
2335
|
+
name: "dependency_audit",
|
|
2336
|
+
description: `Audit project dependencies for security, updates, and health.
|
|
2337
|
+
|
|
2338
|
+
Analyzes package.json, requirements.txt, go.mod, or similar and provides:
|
|
2339
|
+
- Security vulnerability assessment
|
|
2340
|
+
- Outdated package identification
|
|
2341
|
+
- License compliance check
|
|
2342
|
+
- Bundle size impact analysis
|
|
2343
|
+
- Maintenance health scores
|
|
2344
|
+
- Upgrade recommendations with breaking change warnings
|
|
2345
|
+
|
|
2346
|
+
Best for: Pre-release dependency review and security audits.`,
|
|
2347
|
+
inputSchema: {
|
|
2348
|
+
type: "object",
|
|
2349
|
+
properties: {
|
|
2350
|
+
manifest: {
|
|
2351
|
+
type: "string",
|
|
2352
|
+
description: "Dependency manifest content (package.json, requirements.txt, etc.)"
|
|
2353
|
+
},
|
|
2354
|
+
lockfile: {
|
|
2355
|
+
type: "string",
|
|
2356
|
+
description: "Optional lockfile content for version pinning analysis"
|
|
2357
|
+
},
|
|
2358
|
+
manifestType: {
|
|
2359
|
+
type: "string",
|
|
2360
|
+
enum: ["npm", "yarn", "pnpm", "pip", "poetry", "go", "cargo", "maven", "gradle"],
|
|
2361
|
+
default: "npm",
|
|
2362
|
+
description: "Package manager type"
|
|
2363
|
+
},
|
|
2364
|
+
checkTypes: {
|
|
2365
|
+
type: "array",
|
|
2366
|
+
items: {
|
|
2367
|
+
type: "string",
|
|
2368
|
+
enum: ["security", "updates", "licenses", "maintenance", "size", "duplicates"]
|
|
2369
|
+
},
|
|
2370
|
+
default: ["security", "updates", "maintenance"],
|
|
2371
|
+
description: "Types of checks to perform"
|
|
2372
|
+
},
|
|
2373
|
+
severity: {
|
|
2374
|
+
type: "string",
|
|
2375
|
+
enum: ["all", "moderate", "high", "critical"],
|
|
2376
|
+
default: "moderate",
|
|
2377
|
+
description: "Minimum severity level to report"
|
|
2378
|
+
}
|
|
2379
|
+
},
|
|
2380
|
+
required: ["manifest"]
|
|
2381
|
+
}
|
|
2382
|
+
},
|
|
2383
|
+
handler: async (args, validation) => {
|
|
2384
|
+
const manifest = args.manifest;
|
|
2385
|
+
const lockfile = args.lockfile;
|
|
2386
|
+
const manifestType = args.manifestType || "npm";
|
|
2387
|
+
const checkTypes = args.checkTypes || ["security", "updates", "maintenance"];
|
|
2388
|
+
const severity = args.severity || "moderate";
|
|
2389
|
+
if (!manifest || manifest.trim().length === 0) {
|
|
2390
|
+
return errorResult("Dependency manifest is required");
|
|
2391
|
+
}
|
|
2392
|
+
const severityLevels = {
|
|
2393
|
+
all: "Report all issues including informational",
|
|
2394
|
+
moderate: "Report moderate severity and above",
|
|
2395
|
+
high: "Report only high and critical issues",
|
|
2396
|
+
critical: "Report only critical issues"
|
|
2397
|
+
};
|
|
2398
|
+
const systemPrompt = `You are a security-focused DevOps engineer performing a comprehensive dependency audit.
|
|
2399
|
+
|
|
2400
|
+
Package Manager: ${manifestType}
|
|
2401
|
+
Check Types: ${checkTypes.join(", ")}
|
|
2402
|
+
Severity Threshold: ${severityLevels[severity]}
|
|
2403
|
+
|
|
2404
|
+
Provide a thorough audit report with:
|
|
2405
|
+
|
|
2406
|
+
1. **Executive Summary**
|
|
2407
|
+
- Overall health score (A-F)
|
|
2408
|
+
- Critical issues count
|
|
2409
|
+
- Recommended actions
|
|
2410
|
+
|
|
2411
|
+
2. **Security Vulnerabilities** (if checked)
|
|
2412
|
+
| Package | Version | Vulnerability | Severity | CVE | Fix Version |
|
|
2413
|
+
|---------|---------|---------------|----------|-----|-------------|
|
|
2414
|
+
- Include known CVEs where applicable
|
|
2415
|
+
- Note if no known vulnerabilities
|
|
2416
|
+
|
|
2417
|
+
3. **Outdated Packages** (if checked)
|
|
2418
|
+
| Package | Current | Latest | Type | Breaking Changes |
|
|
2419
|
+
|---------|---------|--------|------|------------------|
|
|
2420
|
+
- Distinguish patch, minor, major updates
|
|
2421
|
+
- Flag breaking changes in major updates
|
|
2422
|
+
|
|
2423
|
+
4. **License Compliance** (if checked)
|
|
2424
|
+
| Package | License | Risk Level | Notes |
|
|
2425
|
+
|---------|---------|------------|-------|
|
|
2426
|
+
- Flag copyleft licenses (GPL, AGPL)
|
|
2427
|
+
- Note license compatibility issues
|
|
2428
|
+
|
|
2429
|
+
5. **Maintenance Health** (if checked)
|
|
2430
|
+
| Package | Last Update | Weekly Downloads | Issues | Health |
|
|
2431
|
+
|---------|-------------|------------------|--------|--------|
|
|
2432
|
+
- Flag abandoned packages (>2 years no update)
|
|
2433
|
+
- Note packages with many open issues
|
|
2434
|
+
|
|
2435
|
+
6. **Size Impact** (if checked)
|
|
2436
|
+
| Package | Size | Gzipped | Alternatives |
|
|
2437
|
+
|---------|------|---------|--------------|
|
|
2438
|
+
- Identify heavy dependencies
|
|
2439
|
+
- Suggest lighter alternatives
|
|
2440
|
+
|
|
2441
|
+
7. **Duplicate Dependencies** (if checked)
|
|
2442
|
+
- Identify packages included multiple times
|
|
2443
|
+
- Note version conflicts
|
|
2444
|
+
|
|
2445
|
+
8. **Recommendations**
|
|
2446
|
+
- Prioritized list of actions
|
|
2447
|
+
- Safe upgrade paths
|
|
2448
|
+
- Packages to consider replacing
|
|
2449
|
+
|
|
2450
|
+
Base your analysis on common knowledge about popular packages. Be conservative with security assessments - note when you're uncertain.`;
|
|
2451
|
+
let userMessage = `Audit these dependencies:
|
|
2452
|
+
|
|
2453
|
+
## Manifest (${manifestType})
|
|
2454
|
+
\`\`\`
|
|
2455
|
+
${manifest.length > 1e4 ? manifest.substring(0, 1e4) + "\n... [truncated]" : manifest}
|
|
2456
|
+
\`\`\``;
|
|
2457
|
+
if (lockfile) {
|
|
2458
|
+
userMessage += `
|
|
2459
|
+
|
|
2460
|
+
## Lockfile
|
|
2461
|
+
\`\`\`
|
|
2462
|
+
${lockfile.length > 5e3 ? lockfile.substring(0, 5e3) + "\n... [truncated]" : lockfile}
|
|
2463
|
+
\`\`\``;
|
|
2464
|
+
}
|
|
2465
|
+
userMessage += "\n\nProvide a comprehensive dependency audit.";
|
|
2466
|
+
try {
|
|
2467
|
+
const result = await createCompletion({
|
|
2468
|
+
systemPrompt,
|
|
2469
|
+
userMessage,
|
|
2470
|
+
model: "balanced",
|
|
2471
|
+
maxTokens: 5e3
|
|
2472
|
+
});
|
|
2473
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
2474
|
+
return markdownResult(result.text, totalTokens);
|
|
2475
|
+
} catch (error2) {
|
|
2476
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
2477
|
+
return errorResult(`Failed to audit dependencies: ${message}`);
|
|
2478
|
+
}
|
|
2479
|
+
},
|
|
2480
|
+
requiredScope: "tools:dependency_audit"
|
|
2481
|
+
};
|
|
2482
|
+
}
|
|
2483
|
+
});
|
|
2484
|
+
|
|
2485
|
+
// src/tools/estimate-migration.ts
|
|
2486
|
+
var estimateMigrationTool;
|
|
2487
|
+
var init_estimate_migration = __esm({
|
|
2488
|
+
"src/tools/estimate-migration.ts"() {
|
|
2489
|
+
"use strict";
|
|
2490
|
+
init_types();
|
|
2491
|
+
init_client();
|
|
2492
|
+
estimateMigrationTool = {
|
|
2493
|
+
tool: {
|
|
2494
|
+
name: "estimate_migration",
|
|
2495
|
+
description: `Estimate the effort required for a code migration or major upgrade.
|
|
2496
|
+
|
|
2497
|
+
Analyzes current code and target state to provide:
|
|
2498
|
+
- Effort estimation (story points, days)
|
|
2499
|
+
- Risk assessment
|
|
2500
|
+
- Breaking changes analysis
|
|
2501
|
+
- Migration path recommendations
|
|
2502
|
+
- Rollback strategy
|
|
2503
|
+
|
|
2504
|
+
Supports: Framework upgrades, language migrations, architecture changes, dependency updates.
|
|
2505
|
+
|
|
2506
|
+
Best for: Sprint planning for major technical initiatives.`,
|
|
2507
|
+
inputSchema: {
|
|
2508
|
+
type: "object",
|
|
2509
|
+
properties: {
|
|
2510
|
+
currentCode: {
|
|
2511
|
+
type: "string",
|
|
2512
|
+
description: "Current codebase sample to analyze"
|
|
2513
|
+
},
|
|
2514
|
+
migrationType: {
|
|
2515
|
+
type: "string",
|
|
2516
|
+
enum: [
|
|
2517
|
+
"framework_upgrade",
|
|
2518
|
+
"language_migration",
|
|
2519
|
+
"architecture_change",
|
|
2520
|
+
"dependency_upgrade",
|
|
2521
|
+
"database_migration",
|
|
2522
|
+
"cloud_migration",
|
|
2523
|
+
"monolith_to_microservices",
|
|
2524
|
+
"other"
|
|
2525
|
+
],
|
|
2526
|
+
description: "Type of migration being planned"
|
|
2527
|
+
},
|
|
2528
|
+
from: {
|
|
2529
|
+
type: "string",
|
|
2530
|
+
description: 'Current technology/version (e.g., "React 17", "Python 2.7")'
|
|
2531
|
+
},
|
|
2532
|
+
to: {
|
|
2533
|
+
type: "string",
|
|
2534
|
+
description: 'Target technology/version (e.g., "React 18", "Python 3.11")'
|
|
2535
|
+
},
|
|
2536
|
+
codebaseSize: {
|
|
2537
|
+
type: "string",
|
|
2538
|
+
enum: ["small", "medium", "large", "enterprise"],
|
|
2539
|
+
default: "medium",
|
|
2540
|
+
description: "Approximate codebase size"
|
|
2541
|
+
},
|
|
2542
|
+
teamSize: {
|
|
2543
|
+
type: "number",
|
|
2544
|
+
default: 3,
|
|
2545
|
+
description: "Number of developers available for migration"
|
|
2546
|
+
},
|
|
2547
|
+
constraints: {
|
|
2548
|
+
type: "string",
|
|
2549
|
+
description: "Any constraints (timeline, budget, parallel development)"
|
|
2550
|
+
}
|
|
2551
|
+
},
|
|
2552
|
+
required: ["migrationType", "from", "to"]
|
|
2553
|
+
}
|
|
2554
|
+
},
|
|
2555
|
+
handler: async (args, validation) => {
|
|
2556
|
+
const currentCode = args.currentCode;
|
|
2557
|
+
const migrationType = args.migrationType;
|
|
2558
|
+
const from = args.from;
|
|
2559
|
+
const to = args.to;
|
|
2560
|
+
const codebaseSize = args.codebaseSize || "medium";
|
|
2561
|
+
const teamSize = args.teamSize || 3;
|
|
2562
|
+
const constraints = args.constraints;
|
|
2563
|
+
if (!migrationType || !from || !to) {
|
|
2564
|
+
return errorResult("Migration type, from, and to are required");
|
|
2565
|
+
}
|
|
2566
|
+
const sizeMultipliers = {
|
|
2567
|
+
small: "<10k LOC, few dependencies",
|
|
2568
|
+
medium: "10k-50k LOC, moderate complexity",
|
|
2569
|
+
large: "50k-200k LOC, many integrations",
|
|
2570
|
+
enterprise: ">200k LOC, complex architecture"
|
|
2571
|
+
};
|
|
2572
|
+
const systemPrompt = `You are a senior technical architect who specializes in migration planning.
|
|
2573
|
+
|
|
2574
|
+
Estimate the effort for migrating from "${from}" to "${to}".
|
|
2575
|
+
|
|
2576
|
+
Migration Type: ${migrationType}
|
|
2577
|
+
Codebase Size: ${codebaseSize} (${sizeMultipliers[codebaseSize]})
|
|
2578
|
+
Team Size: ${teamSize} developers
|
|
2579
|
+
${constraints ? `Constraints: ${constraints}` : ""}
|
|
2580
|
+
|
|
2581
|
+
Provide a comprehensive migration estimate with:
|
|
2582
|
+
|
|
2583
|
+
1. **Executive Summary**
|
|
2584
|
+
- Total estimated effort (story points and calendar time)
|
|
2585
|
+
- Risk level (Low/Medium/High/Critical)
|
|
2586
|
+
- Recommended approach (Big bang / Incremental / Strangler fig)
|
|
2587
|
+
|
|
2588
|
+
2. **Effort Breakdown**
|
|
2589
|
+
| Phase | Tasks | Story Points | Calendar Days | Parallel? |
|
|
2590
|
+
|-------|-------|--------------|---------------|-----------|
|
|
2591
|
+
- Phase 1: Assessment & Planning
|
|
2592
|
+
- Phase 2: Infrastructure Setup
|
|
2593
|
+
- Phase 3: Code Migration
|
|
2594
|
+
- Phase 4: Testing & Validation
|
|
2595
|
+
- Phase 5: Deployment & Cutover
|
|
2596
|
+
|
|
2597
|
+
3. **Breaking Changes Analysis**
|
|
2598
|
+
| Area | Impact | Effort | Notes |
|
|
2599
|
+
|------|--------|--------|-------|
|
|
2600
|
+
- API changes
|
|
2601
|
+
- Dependency updates
|
|
2602
|
+
- Configuration changes
|
|
2603
|
+
- Data format changes
|
|
2604
|
+
|
|
2605
|
+
4. **Risk Assessment**
|
|
2606
|
+
| Risk | Probability | Impact | Mitigation |
|
|
2607
|
+
|------|-------------|--------|------------|
|
|
2608
|
+
|
|
2609
|
+
5. **Migration Path**
|
|
2610
|
+
- Step-by-step migration strategy
|
|
2611
|
+
- Dependencies between steps
|
|
2612
|
+
- Parallel work opportunities
|
|
2613
|
+
|
|
2614
|
+
6. **Testing Strategy**
|
|
2615
|
+
- Unit test updates needed
|
|
2616
|
+
- Integration test requirements
|
|
2617
|
+
- Performance benchmarking
|
|
2618
|
+
- Regression testing approach
|
|
2619
|
+
|
|
2620
|
+
7. **Rollback Strategy**
|
|
2621
|
+
- Feature flags approach
|
|
2622
|
+
- Database rollback plan
|
|
2623
|
+
- Traffic shifting strategy
|
|
2624
|
+
|
|
2625
|
+
8. **Resource Requirements**
|
|
2626
|
+
- Skills needed
|
|
2627
|
+
- Infrastructure for parallel environments
|
|
2628
|
+
- Tools and automation
|
|
2629
|
+
|
|
2630
|
+
9. **Assumptions & Caveats**
|
|
2631
|
+
- What could change the estimate
|
|
2632
|
+
- Unknown risks
|
|
2633
|
+
|
|
2634
|
+
10. **Sprint Breakdown** (for ${teamSize} developers)
|
|
2635
|
+
- Sprint 1: ...
|
|
2636
|
+
- Sprint 2: ...
|
|
2637
|
+
- etc.
|
|
2638
|
+
|
|
2639
|
+
Be realistic and include buffer for unknowns. Better to overestimate than underestimate.`;
|
|
2640
|
+
let userMessage = `Estimate migration effort for:
|
|
2641
|
+
|
|
2642
|
+
**From:** ${from}
|
|
2643
|
+
**To:** ${to}
|
|
2644
|
+
**Type:** ${migrationType}`;
|
|
2645
|
+
if (currentCode) {
|
|
2646
|
+
userMessage += `
|
|
2647
|
+
|
|
2648
|
+
**Sample of Current Code:**
|
|
2649
|
+
\`\`\`
|
|
2650
|
+
${currentCode.length > 8e3 ? currentCode.substring(0, 8e3) + "\n// ..." : currentCode}
|
|
2651
|
+
\`\`\``;
|
|
2652
|
+
}
|
|
2653
|
+
userMessage += "\n\nProvide a comprehensive migration estimate.";
|
|
2654
|
+
try {
|
|
2655
|
+
const result = await createCompletion({
|
|
2656
|
+
systemPrompt,
|
|
2657
|
+
userMessage,
|
|
2658
|
+
model: "balanced",
|
|
2659
|
+
maxTokens: 5e3
|
|
2660
|
+
});
|
|
2661
|
+
const totalTokens = result.inputTokens + result.outputTokens;
|
|
2662
|
+
return markdownResult(result.text, totalTokens);
|
|
2663
|
+
} catch (error2) {
|
|
2664
|
+
const message = error2 instanceof Error ? error2.message : "Unknown error";
|
|
2665
|
+
return errorResult(`Failed to estimate migration: ${message}`);
|
|
2666
|
+
}
|
|
2667
|
+
},
|
|
2668
|
+
requiredScope: "tools:estimate_migration"
|
|
2669
|
+
};
|
|
2670
|
+
}
|
|
2671
|
+
});
|
|
2672
|
+
|
|
2673
|
+
// src/tools/index.ts
|
|
2674
|
+
var toolDefinitions, tools, toolHandlers;
|
|
2675
|
+
var init_tools = __esm({
|
|
2676
|
+
"src/tools/index.ts"() {
|
|
2677
|
+
"use strict";
|
|
2678
|
+
init_explode_backlog();
|
|
2679
|
+
init_infer_prd();
|
|
2680
|
+
init_synthesize_prd();
|
|
2681
|
+
init_generate_architecture();
|
|
2682
|
+
init_handoff_package();
|
|
2683
|
+
init_synthesize_requirements();
|
|
2684
|
+
init_review_prd();
|
|
2685
|
+
init_sync_to_tracker();
|
|
2686
|
+
init_reverse_engineer_flows();
|
|
2687
|
+
init_generate_test_specs();
|
|
2688
|
+
init_explain_codebase();
|
|
2689
|
+
init_validate_implementation();
|
|
2690
|
+
init_suggest_refactors();
|
|
2691
|
+
init_generate_api_docs();
|
|
2692
|
+
init_dependency_audit();
|
|
2693
|
+
init_estimate_migration();
|
|
2694
|
+
init_types();
|
|
2695
|
+
toolDefinitions = [
|
|
2696
|
+
// Phase 3: Initial tools
|
|
2697
|
+
explodeBacklogTool,
|
|
2698
|
+
inferPrdTool,
|
|
2699
|
+
synthesizePrdTool,
|
|
2700
|
+
generateArchitectureTool,
|
|
2701
|
+
handoffPackageTool,
|
|
2702
|
+
// Phase 4: Additional PM tools
|
|
2703
|
+
synthesizeRequirementsTool,
|
|
2704
|
+
reviewPrdTool,
|
|
2705
|
+
syncToTrackerTool,
|
|
2706
|
+
// Phase 5: Code-first tools
|
|
2707
|
+
reverseEngineerFlowsTool,
|
|
2708
|
+
generateTestSpecsTool,
|
|
2709
|
+
explainCodebaseTool,
|
|
2710
|
+
validateImplementationTool,
|
|
2711
|
+
suggestRefactorsTool,
|
|
2712
|
+
// Phase 6: Advanced tools
|
|
2713
|
+
generateApiDocsTool,
|
|
2714
|
+
dependencyAuditTool,
|
|
2715
|
+
estimateMigrationTool
|
|
2716
|
+
];
|
|
2717
|
+
tools = toolDefinitions.map((t) => t.tool);
|
|
2718
|
+
toolHandlers = Object.fromEntries(
|
|
2719
|
+
toolDefinitions.map((t) => [t.tool.name, t.handler])
|
|
2720
|
+
);
|
|
2721
|
+
}
|
|
2722
|
+
});
|
|
2723
|
+
|
|
2724
|
+
// src/server.ts
|
|
2725
|
+
var server_exports = {};
|
|
2726
|
+
__export(server_exports, {
|
|
2727
|
+
startServer: () => startServer
|
|
2728
|
+
});
|
|
2729
|
+
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
2730
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
2731
|
+
import {
|
|
2732
|
+
CallToolRequestSchema,
|
|
2733
|
+
ListToolsRequestSchema,
|
|
2734
|
+
ErrorCode,
|
|
2735
|
+
McpError
|
|
2736
|
+
} from "@modelcontextprotocol/sdk/types.js";
|
|
2737
|
+
function sanitizeArgs(args) {
|
|
2738
|
+
if (!args) return {};
|
|
2739
|
+
const sanitized = {};
|
|
2740
|
+
for (const [key, value] of Object.entries(args)) {
|
|
2741
|
+
if (key.toLowerCase().includes("key") || key.toLowerCase().includes("secret") || key.toLowerCase().includes("token")) {
|
|
2742
|
+
sanitized[key] = "[REDACTED]";
|
|
2743
|
+
} else if (typeof value === "string" && value.length > 500) {
|
|
2744
|
+
sanitized[key] = value.substring(0, 500) + "...[truncated]";
|
|
2745
|
+
} else {
|
|
2746
|
+
sanitized[key] = value;
|
|
2747
|
+
}
|
|
2748
|
+
}
|
|
2749
|
+
return sanitized;
|
|
2750
|
+
}
|
|
2751
|
+
async function startServer() {
|
|
2752
|
+
const transport = new StdioServerTransport();
|
|
2753
|
+
await server.connect(transport);
|
|
2754
|
+
console.error(`VasperaPM MCP Server v${VERSION} running`);
|
|
2755
|
+
}
|
|
2756
|
+
var VERSION, server;
|
|
2757
|
+
var init_server = __esm({
|
|
2758
|
+
"src/server.ts"() {
|
|
2759
|
+
"use strict";
|
|
2760
|
+
init_auth();
|
|
2761
|
+
init_usage();
|
|
2762
|
+
init_rate_limit();
|
|
2763
|
+
init_tools();
|
|
2764
|
+
VERSION = "0.1.1";
|
|
2765
|
+
server = new Server(
|
|
2766
|
+
{
|
|
2767
|
+
name: "vaspera-pm",
|
|
2768
|
+
version: VERSION
|
|
2769
|
+
},
|
|
2770
|
+
{
|
|
2771
|
+
capabilities: {
|
|
2772
|
+
tools: {}
|
|
2773
|
+
}
|
|
2774
|
+
}
|
|
2775
|
+
);
|
|
2776
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
2777
|
+
return { tools };
|
|
2778
|
+
});
|
|
2779
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
2780
|
+
const { name, arguments: args } = request.params;
|
|
2781
|
+
const apiKey = process.env.VASPERA_API_KEY || args?._apiKey;
|
|
2782
|
+
const validation = await validateApiKey(apiKey);
|
|
2783
|
+
if (!validation.valid) {
|
|
2784
|
+
return {
|
|
2785
|
+
content: [{ type: "text", text: `Authentication failed: ${validation.error?.message || "Invalid API key"}` }],
|
|
2786
|
+
isError: true
|
|
2787
|
+
};
|
|
2788
|
+
}
|
|
2789
|
+
const rateCheck = await checkRateLimit(validation.userId, validation.tier);
|
|
2790
|
+
if (!rateCheck.allowed) {
|
|
2791
|
+
return {
|
|
2792
|
+
content: [{ type: "text", text: `Rate limit exceeded. ${rateCheck.remaining} calls remaining. Resets at ${rateCheck.resetsAt?.toISOString()}` }],
|
|
2793
|
+
isError: true
|
|
2794
|
+
};
|
|
2795
|
+
}
|
|
2796
|
+
if (validation.quota && validation.quota.remaining <= 0) {
|
|
2797
|
+
return {
|
|
2798
|
+
content: [{ type: "text", text: `Monthly quota exceeded. Upgrade your plan for more API calls.` }],
|
|
2799
|
+
isError: true
|
|
2800
|
+
};
|
|
2801
|
+
}
|
|
2802
|
+
const startTime = Date.now();
|
|
2803
|
+
let result;
|
|
2804
|
+
let success2 = true;
|
|
2805
|
+
let errorCode;
|
|
2806
|
+
let tokensUsed = 0;
|
|
2807
|
+
try {
|
|
2808
|
+
const handler = toolHandlers[name];
|
|
2809
|
+
if (!handler) {
|
|
2810
|
+
throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`);
|
|
2811
|
+
}
|
|
2812
|
+
const cleanArgs = { ...args };
|
|
2813
|
+
delete cleanArgs._apiKey;
|
|
2814
|
+
const toolResult = await handler(cleanArgs, validation);
|
|
2815
|
+
result = toolResult.content;
|
|
2816
|
+
tokensUsed = toolResult.tokensUsed || 0;
|
|
2817
|
+
} catch (error2) {
|
|
2818
|
+
success2 = false;
|
|
2819
|
+
errorCode = error2 instanceof McpError ? error2.code.toString() : "TOOL_ERROR";
|
|
2820
|
+
const errorMessage = error2 instanceof Error ? error2.message : "Unknown error";
|
|
2821
|
+
result = [{ type: "text", text: `Error: ${errorMessage}` }];
|
|
2822
|
+
}
|
|
2823
|
+
const latencyMs = Date.now() - startTime;
|
|
2824
|
+
trackUsage({
|
|
2825
|
+
userId: validation.userId,
|
|
2826
|
+
apiKeyId: validation.apiKeyId,
|
|
2827
|
+
toolName: name,
|
|
2828
|
+
tokensUsed,
|
|
2829
|
+
latencyMs,
|
|
2830
|
+
success: success2,
|
|
2831
|
+
errorCode,
|
|
2832
|
+
metadata: { args: sanitizeArgs(args) }
|
|
2833
|
+
}).catch((err) => {
|
|
2834
|
+
console.error("Failed to track usage:", err);
|
|
2835
|
+
});
|
|
2836
|
+
return {
|
|
2837
|
+
content: result,
|
|
2838
|
+
isError: !success2
|
|
2839
|
+
};
|
|
2840
|
+
});
|
|
2841
|
+
if (import.meta.url === `file://${process.argv[1]}`) {
|
|
2842
|
+
startServer().catch((error2) => {
|
|
2843
|
+
console.error("Failed to start MCP server:", error2);
|
|
2844
|
+
process.exit(1);
|
|
2845
|
+
});
|
|
2846
|
+
}
|
|
2847
|
+
}
|
|
2848
|
+
});
|
|
2849
|
+
|
|
2850
|
+
// src/cli.ts
|
|
2851
|
+
import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs";
|
|
2852
|
+
import { homedir } from "os";
|
|
2853
|
+
import { join } from "path";
|
|
2854
|
+
import { createInterface } from "readline";
|
|
2855
|
+
var BANNER = `
|
|
2856
|
+
\x1B[36m\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557
|
|
2857
|
+
\u2551 \u2551
|
|
2858
|
+
\u2551 \u2588\u2588\u2557 \u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2557 \u2551
|
|
2859
|
+
\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255D\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255D\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557 \u2551
|
|
2860
|
+
\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255D\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255D\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2551 \u2551
|
|
2861
|
+
\u2551 \u255A\u2588\u2588\u2557 \u2588\u2588\u2554\u255D\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2551\u255A\u2550\u2550\u2550\u2550\u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2550\u255D \u2588\u2588\u2554\u2550\u2550\u255D \u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2551 \u2551
|
|
2862
|
+
\u2551 \u255A\u2588\u2588\u2588\u2588\u2554\u255D \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2551
|
|
2863
|
+
\u2551 \u255A\u2550\u2550\u2550\u255D \u255A\u2550\u255D \u255A\u2550\u255D\u255A\u2550\u2550\u2550\u2550\u2550\u2550\u255D\u255A\u2550\u255D \u255A\u2550\u2550\u2550\u2550\u2550\u2550\u255D\u255A\u2550\u255D \u255A\u2550\u255D\u255A\u2550\u255D \u255A\u2550\u255D \u2551
|
|
2864
|
+
\u2551 \u2551
|
|
2865
|
+
\u2551 \x1B[33mAI-Powered Product Management\x1B[36m \u2551
|
|
2866
|
+
\u2551 \u2551
|
|
2867
|
+
\u255A\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255D\x1B[0m
|
|
2868
|
+
`;
|
|
2869
|
+
var VERSION2 = "0.1.1";
|
|
2870
|
+
var HELP = `
|
|
2871
|
+
\x1B[1mUsage:\x1B[0m vasperapm <command> [options]
|
|
2872
|
+
|
|
2873
|
+
\x1B[1mCommands:\x1B[0m
|
|
2874
|
+
\x1B[32minstall\x1B[0m Configure VasperaPM with Claude Code
|
|
2875
|
+
\x1B[32mconnect\x1B[0m Set up your API key and integrations
|
|
2876
|
+
\x1B[32mserve\x1B[0m Start the MCP server (used by Claude Code)
|
|
2877
|
+
\x1B[32mstatus\x1B[0m Check your current configuration
|
|
2878
|
+
\x1B[32mhelp\x1B[0m Show this help message
|
|
2879
|
+
|
|
2880
|
+
\x1B[1mExamples:\x1B[0m
|
|
2881
|
+
$ vasperapm install # Set up VasperaPM with Claude Code
|
|
2882
|
+
$ vasperapm connect # Configure API key interactively
|
|
2883
|
+
$ vasperapm status # Check configuration status
|
|
2884
|
+
|
|
2885
|
+
\x1B[1mDocumentation:\x1B[0m
|
|
2886
|
+
https://github.com/rcolkitt/VasperaPM
|
|
2887
|
+
|
|
2888
|
+
\x1B[1mGet an API Key:\x1B[0m
|
|
2889
|
+
https://vasperapm.com/dashboard
|
|
2890
|
+
`;
|
|
2891
|
+
function getClaudeConfigPath() {
|
|
2892
|
+
return join(homedir(), ".claude", "claude_desktop_config.json");
|
|
2893
|
+
}
|
|
2894
|
+
function getVasperaConfigPath() {
|
|
2895
|
+
return join(homedir(), ".vasperapm", "config.json");
|
|
2896
|
+
}
|
|
2897
|
+
function readJsonFile(path) {
|
|
2898
|
+
try {
|
|
2899
|
+
if (!existsSync(path)) return null;
|
|
2900
|
+
return JSON.parse(readFileSync(path, "utf-8"));
|
|
2901
|
+
} catch {
|
|
2902
|
+
return null;
|
|
2903
|
+
}
|
|
2904
|
+
}
|
|
2905
|
+
function writeJsonFile(path, data) {
|
|
2906
|
+
const dir = path.substring(0, path.lastIndexOf("/"));
|
|
2907
|
+
if (!existsSync(dir)) {
|
|
2908
|
+
mkdirSync(dir, { recursive: true });
|
|
2909
|
+
}
|
|
2910
|
+
writeFileSync(path, JSON.stringify(data, null, 2));
|
|
2911
|
+
}
|
|
2912
|
+
function prompt(question) {
|
|
2913
|
+
const rl = createInterface({
|
|
2914
|
+
input: process.stdin,
|
|
2915
|
+
output: process.stdout
|
|
2916
|
+
});
|
|
2917
|
+
return new Promise((resolve) => {
|
|
2918
|
+
rl.question(question, (answer) => {
|
|
2919
|
+
rl.close();
|
|
2920
|
+
resolve(answer.trim());
|
|
2921
|
+
});
|
|
2922
|
+
});
|
|
2923
|
+
}
|
|
2924
|
+
function success(message) {
|
|
2925
|
+
console.log(`\x1B[32m\u2713\x1B[0m ${message}`);
|
|
2926
|
+
}
|
|
2927
|
+
function error(message) {
|
|
2928
|
+
console.log(`\x1B[31m\u2717\x1B[0m ${message}`);
|
|
2929
|
+
}
|
|
2930
|
+
function info(message) {
|
|
2931
|
+
console.log(`\x1B[36m\u2139\x1B[0m ${message}`);
|
|
2932
|
+
}
|
|
2933
|
+
function warn(message) {
|
|
2934
|
+
console.log(`\x1B[33m\u26A0\x1B[0m ${message}`);
|
|
2935
|
+
}
|
|
2936
|
+
async function install() {
|
|
2937
|
+
console.log(BANNER);
|
|
2938
|
+
console.log("\x1B[1mInstalling VasperaPM for Claude Code...\x1B[0m\n");
|
|
2939
|
+
const configPath = getClaudeConfigPath();
|
|
2940
|
+
let config = readJsonFile(configPath);
|
|
2941
|
+
if (!config) {
|
|
2942
|
+
config = { mcpServers: {} };
|
|
2943
|
+
info("Creating Claude Code configuration...");
|
|
2944
|
+
}
|
|
2945
|
+
if (!config.mcpServers) {
|
|
2946
|
+
config.mcpServers = {};
|
|
2947
|
+
}
|
|
2948
|
+
if (config.mcpServers["vaspera-pm"]) {
|
|
2949
|
+
warn("VasperaPM is already installed in Claude Code.");
|
|
2950
|
+
const answer = await prompt("\nDo you want to reinstall? (y/N): ");
|
|
2951
|
+
if (answer.toLowerCase() !== "y") {
|
|
2952
|
+
info("Installation cancelled.");
|
|
2953
|
+
return;
|
|
2954
|
+
}
|
|
2955
|
+
}
|
|
2956
|
+
const vasperaConfig = readJsonFile(getVasperaConfigPath());
|
|
2957
|
+
let apiKey = vasperaConfig?.apiKey || process.env.VASPERA_API_KEY || "";
|
|
2958
|
+
if (!apiKey) {
|
|
2959
|
+
console.log("\n\x1B[1mAPI Key Setup\x1B[0m");
|
|
2960
|
+
console.log("Get your API key at: \x1B[36mhttps://vasperapm.com/dashboard\x1B[0m\n");
|
|
2961
|
+
apiKey = await prompt("Enter your VasperaPM API key (or press Enter to use test mode): ");
|
|
2962
|
+
if (!apiKey) {
|
|
2963
|
+
apiKey = "vpm_test_local_development_key_for_testing";
|
|
2964
|
+
info("Using test mode API key (limited features)");
|
|
2965
|
+
}
|
|
2966
|
+
}
|
|
2967
|
+
config.mcpServers["vaspera-pm"] = {
|
|
2968
|
+
command: "vasperapm",
|
|
2969
|
+
args: ["serve"],
|
|
2970
|
+
env: {
|
|
2971
|
+
VASPERA_API_KEY: apiKey,
|
|
2972
|
+
NODE_ENV: apiKey.includes("test") ? "development" : "production"
|
|
2973
|
+
}
|
|
2974
|
+
};
|
|
2975
|
+
writeJsonFile(configPath, config);
|
|
2976
|
+
const vasperaDir = join(homedir(), ".vasperapm");
|
|
2977
|
+
if (!existsSync(vasperaDir)) {
|
|
2978
|
+
mkdirSync(vasperaDir, { recursive: true });
|
|
2979
|
+
}
|
|
2980
|
+
writeJsonFile(getVasperaConfigPath(), { apiKey });
|
|
2981
|
+
console.log("\n");
|
|
2982
|
+
success("VasperaPM installed successfully!");
|
|
2983
|
+
console.log("\n\x1B[1mNext steps:\x1B[0m");
|
|
2984
|
+
console.log(" 1. Restart Claude Code (or VSCode)");
|
|
2985
|
+
console.log(" 2. Look for VasperaPM tools in the MCP panel");
|
|
2986
|
+
console.log(' 3. Try: "Generate a PRD for a todo app"\n');
|
|
2987
|
+
console.log("\x1B[1mAvailable Tools:\x1B[0m");
|
|
2988
|
+
console.log(" \x1B[33m\u2022\x1B[0m generate_prd - Create PRDs from context");
|
|
2989
|
+
console.log(" \x1B[33m\u2022\x1B[0m infer_prd_from_code - Analyze code to generate PRD");
|
|
2990
|
+
console.log(" \x1B[33m\u2022\x1B[0m generate_code - Generate code from PRD");
|
|
2991
|
+
console.log(" \x1B[33m\u2022\x1B[0m sync_jira/linear/github - Sync with PM tools");
|
|
2992
|
+
console.log(" \x1B[33m\u2022\x1B[0m generate_api_spec - Create OpenAPI specs");
|
|
2993
|
+
console.log(" \x1B[33m\u2022\x1B[0m generate_test_cases - Create test plans\n");
|
|
2994
|
+
}
|
|
2995
|
+
async function connect() {
|
|
2996
|
+
console.log(BANNER);
|
|
2997
|
+
console.log("\x1B[1mConnect VasperaPM to your services...\x1B[0m\n");
|
|
2998
|
+
console.log("\x1B[1m1. API Key\x1B[0m");
|
|
2999
|
+
const vasperaConfig = readJsonFile(getVasperaConfigPath());
|
|
3000
|
+
if (vasperaConfig?.apiKey) {
|
|
3001
|
+
const masked = vasperaConfig.apiKey.substring(0, 12) + "..." + vasperaConfig.apiKey.substring(vasperaConfig.apiKey.length - 4);
|
|
3002
|
+
info(`Current API key: ${masked}`);
|
|
3003
|
+
const answer = await prompt("Update API key? (y/N): ");
|
|
3004
|
+
if (answer.toLowerCase() !== "y") {
|
|
3005
|
+
success("Keeping existing API key");
|
|
3006
|
+
} else {
|
|
3007
|
+
const newKey = await prompt("Enter new API key: ");
|
|
3008
|
+
if (newKey) {
|
|
3009
|
+
writeJsonFile(getVasperaConfigPath(), { ...vasperaConfig, apiKey: newKey });
|
|
3010
|
+
success("API key updated");
|
|
3011
|
+
}
|
|
3012
|
+
}
|
|
3013
|
+
} else {
|
|
3014
|
+
console.log("Get your API key at: \x1B[36mhttps://vasperapm.com/dashboard\x1B[0m\n");
|
|
3015
|
+
const apiKey = await prompt("Enter your VasperaPM API key: ");
|
|
3016
|
+
if (apiKey) {
|
|
3017
|
+
writeJsonFile(getVasperaConfigPath(), { apiKey });
|
|
3018
|
+
success("API key saved");
|
|
3019
|
+
} else {
|
|
3020
|
+
warn("No API key provided. Some features will be limited.");
|
|
3021
|
+
}
|
|
3022
|
+
}
|
|
3023
|
+
console.log("\n\x1B[1m2. Integrations\x1B[0m");
|
|
3024
|
+
console.log("Configure integrations at: \x1B[36mhttps://vasperapm.com/dashboard/integrations\x1B[0m\n");
|
|
3025
|
+
console.log(" \x1B[33m\u2022\x1B[0m Jira - Sync PRDs to Jira epics/stories");
|
|
3026
|
+
console.log(" \x1B[33m\u2022\x1B[0m Linear - Export tasks to Linear projects");
|
|
3027
|
+
console.log(" \x1B[33m\u2022\x1B[0m GitHub - Create issues and track progress\n");
|
|
3028
|
+
success("Connection setup complete!");
|
|
3029
|
+
console.log("\nRun \x1B[36mvasperapm status\x1B[0m to check your configuration.\n");
|
|
3030
|
+
}
|
|
3031
|
+
async function status() {
|
|
3032
|
+
console.log(BANNER);
|
|
3033
|
+
console.log("\x1B[1mVasperaPM Status\x1B[0m\n");
|
|
3034
|
+
const vasperaConfig = readJsonFile(getVasperaConfigPath());
|
|
3035
|
+
if (vasperaConfig?.apiKey) {
|
|
3036
|
+
const masked = vasperaConfig.apiKey.substring(0, 12) + "..." + vasperaConfig.apiKey.substring(vasperaConfig.apiKey.length - 4);
|
|
3037
|
+
success(`API Key: ${masked}`);
|
|
3038
|
+
if (vasperaConfig.apiKey.includes("_live_")) {
|
|
3039
|
+
info("Mode: Production");
|
|
3040
|
+
} else if (vasperaConfig.apiKey.includes("_test_")) {
|
|
3041
|
+
info("Mode: Test/Development");
|
|
3042
|
+
}
|
|
3043
|
+
} else {
|
|
3044
|
+
warn("API Key: Not configured");
|
|
3045
|
+
console.log(" Run \x1B[36mvasperapm connect\x1B[0m to set up your API key\n");
|
|
3046
|
+
}
|
|
3047
|
+
const claudeConfig = readJsonFile(getClaudeConfigPath());
|
|
3048
|
+
if (claudeConfig?.mcpServers?.["vaspera-pm"]) {
|
|
3049
|
+
success("Claude Code: Installed");
|
|
3050
|
+
} else {
|
|
3051
|
+
warn("Claude Code: Not installed");
|
|
3052
|
+
console.log(" Run \x1B[36mvasperapm install\x1B[0m to set up Claude Code integration\n");
|
|
3053
|
+
}
|
|
3054
|
+
console.log(`
|
|
3055
|
+
\x1B[1mVersion:\x1B[0m ${VERSION2}`);
|
|
3056
|
+
console.log("\x1B[1mDocs:\x1B[0m https://github.com/rcolkitt/VasperaPM\n");
|
|
3057
|
+
}
|
|
3058
|
+
async function serve() {
|
|
3059
|
+
const { startServer: startServer2 } = await Promise.resolve().then(() => (init_server(), server_exports));
|
|
3060
|
+
await startServer2();
|
|
3061
|
+
}
|
|
3062
|
+
async function main() {
|
|
3063
|
+
const args = process.argv.slice(2);
|
|
3064
|
+
const command = args[0]?.toLowerCase();
|
|
3065
|
+
switch (command) {
|
|
3066
|
+
case "install":
|
|
3067
|
+
await install();
|
|
3068
|
+
break;
|
|
3069
|
+
case "connect":
|
|
3070
|
+
await connect();
|
|
3071
|
+
break;
|
|
3072
|
+
case "serve":
|
|
3073
|
+
await serve();
|
|
3074
|
+
break;
|
|
3075
|
+
case "status":
|
|
3076
|
+
await status();
|
|
3077
|
+
break;
|
|
3078
|
+
case "help":
|
|
3079
|
+
case "--help":
|
|
3080
|
+
case "-h":
|
|
3081
|
+
console.log(BANNER);
|
|
3082
|
+
console.log(HELP);
|
|
3083
|
+
break;
|
|
3084
|
+
case "--version":
|
|
3085
|
+
case "-v":
|
|
3086
|
+
console.log(`vasperapm v${VERSION2}`);
|
|
3087
|
+
break;
|
|
3088
|
+
case void 0:
|
|
3089
|
+
console.log(BANNER);
|
|
3090
|
+
console.log(HELP);
|
|
3091
|
+
break;
|
|
3092
|
+
default:
|
|
3093
|
+
error(`Unknown command: ${command}`);
|
|
3094
|
+
console.log(HELP);
|
|
3095
|
+
process.exit(1);
|
|
3096
|
+
}
|
|
3097
|
+
}
|
|
3098
|
+
main().catch((err) => {
|
|
3099
|
+
error(err.message);
|
|
3100
|
+
process.exit(1);
|
|
3101
|
+
});
|
|
3102
|
+
//# sourceMappingURL=cli.js.map
|