@crowley/rag-mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api-client.d.ts +4 -0
- package/dist/api-client.js +19 -0
- package/dist/context-enrichment.d.ts +44 -0
- package/dist/context-enrichment.js +190 -0
- package/dist/formatters.d.ts +33 -0
- package/dist/formatters.js +70 -0
- package/dist/index.d.ts +13 -0
- package/dist/index.js +109 -0
- package/dist/tool-registry.d.ts +20 -0
- package/dist/tool-registry.js +123 -0
- package/dist/tools/advanced.d.ts +9 -0
- package/dist/tools/advanced.js +315 -0
- package/dist/tools/agents.d.ts +8 -0
- package/dist/tools/agents.js +97 -0
- package/dist/tools/analytics.d.ts +9 -0
- package/dist/tools/analytics.js +261 -0
- package/dist/tools/architecture.d.ts +5 -0
- package/dist/tools/architecture.js +720 -0
- package/dist/tools/ask.d.ts +9 -0
- package/dist/tools/ask.js +256 -0
- package/dist/tools/cache.d.ts +5 -0
- package/dist/tools/cache.js +98 -0
- package/dist/tools/clustering.d.ts +9 -0
- package/dist/tools/clustering.js +251 -0
- package/dist/tools/confluence.d.ts +9 -0
- package/dist/tools/confluence.js +147 -0
- package/dist/tools/database.d.ts +5 -0
- package/dist/tools/database.js +429 -0
- package/dist/tools/feedback.d.ts +9 -0
- package/dist/tools/feedback.js +220 -0
- package/dist/tools/guidelines.d.ts +5 -0
- package/dist/tools/guidelines.js +146 -0
- package/dist/tools/indexing.d.ts +9 -0
- package/dist/tools/indexing.js +129 -0
- package/dist/tools/memory.d.ts +9 -0
- package/dist/tools/memory.js +565 -0
- package/dist/tools/pm.d.ts +9 -0
- package/dist/tools/pm.js +680 -0
- package/dist/tools/review.d.ts +8 -0
- package/dist/tools/review.js +213 -0
- package/dist/tools/search.d.ts +9 -0
- package/dist/tools/search.js +377 -0
- package/dist/tools/session.d.ts +10 -0
- package/dist/tools/session.js +386 -0
- package/dist/tools/suggestions.d.ts +9 -0
- package/dist/tools/suggestions.js +301 -0
- package/dist/types.d.ts +32 -0
- package/dist/types.js +4 -0
- package/package.json +40 -0
package/dist/tools/pm.js
ADDED
|
@@ -0,0 +1,680 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PM tools module - Product Management, requirements analysis, feature estimation,
|
|
3
|
+
* spec generation, and project status tools.
|
|
4
|
+
*/
|
|
5
|
+
import { truncate, pct } from "../formatters.js";
|
|
6
|
+
/**
|
|
7
|
+
* Create the PM tools module with project-specific descriptions.
|
|
8
|
+
*/
|
|
9
|
+
export function createPmTools(projectName) {
|
|
10
|
+
const tools = [
|
|
11
|
+
{
|
|
12
|
+
name: "search_requirements",
|
|
13
|
+
description: `Search technical requirements and product documentation for ${projectName}. Finds relevant requirements, user stories, and specifications from Confluence.`,
|
|
14
|
+
inputSchema: {
|
|
15
|
+
type: "object",
|
|
16
|
+
properties: {
|
|
17
|
+
query: {
|
|
18
|
+
type: "string",
|
|
19
|
+
description: "Search query for requirements (e.g., 'video inspection flow', 'payment integration')",
|
|
20
|
+
},
|
|
21
|
+
limit: {
|
|
22
|
+
type: "number",
|
|
23
|
+
description: "Max results (default: 5)",
|
|
24
|
+
default: 5,
|
|
25
|
+
},
|
|
26
|
+
},
|
|
27
|
+
required: ["query"],
|
|
28
|
+
},
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
name: "analyze_requirements",
|
|
32
|
+
description: `Analyze technical requirements and compare with existing implementation in ${projectName}. Identifies gaps, missing features, and implementation status.`,
|
|
33
|
+
inputSchema: {
|
|
34
|
+
type: "object",
|
|
35
|
+
properties: {
|
|
36
|
+
feature: {
|
|
37
|
+
type: "string",
|
|
38
|
+
description: "Feature or requirement to analyze (e.g., 'video inspection', 'notifications')",
|
|
39
|
+
},
|
|
40
|
+
detailed: {
|
|
41
|
+
type: "boolean",
|
|
42
|
+
description: "Include detailed code references (default: false)",
|
|
43
|
+
default: false,
|
|
44
|
+
},
|
|
45
|
+
},
|
|
46
|
+
required: ["feature"],
|
|
47
|
+
},
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
name: "estimate_feature",
|
|
51
|
+
description: `Estimate development effort for a feature based on requirements and codebase analysis. Returns complexity assessment, affected files, and risk factors.`,
|
|
52
|
+
inputSchema: {
|
|
53
|
+
type: "object",
|
|
54
|
+
properties: {
|
|
55
|
+
feature: {
|
|
56
|
+
type: "string",
|
|
57
|
+
description: "Feature description to estimate",
|
|
58
|
+
},
|
|
59
|
+
includeSubtasks: {
|
|
60
|
+
type: "boolean",
|
|
61
|
+
description: "Break down into subtasks (default: true)",
|
|
62
|
+
default: true,
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
required: ["feature"],
|
|
66
|
+
},
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
name: "get_feature_status",
|
|
70
|
+
description: `Get implementation status of a feature by comparing requirements with codebase. Shows what's implemented, in progress, and missing.`,
|
|
71
|
+
inputSchema: {
|
|
72
|
+
type: "object",
|
|
73
|
+
properties: {
|
|
74
|
+
feature: {
|
|
75
|
+
type: "string",
|
|
76
|
+
description: "Feature name to check status",
|
|
77
|
+
},
|
|
78
|
+
},
|
|
79
|
+
required: ["feature"],
|
|
80
|
+
},
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
name: "list_requirements",
|
|
84
|
+
description: `List all documented requirements/features for ${projectName} from Confluence. Groups by category or status.`,
|
|
85
|
+
inputSchema: {
|
|
86
|
+
type: "object",
|
|
87
|
+
properties: {
|
|
88
|
+
category: {
|
|
89
|
+
type: "string",
|
|
90
|
+
description: "Filter by category (optional)",
|
|
91
|
+
},
|
|
92
|
+
limit: {
|
|
93
|
+
type: "number",
|
|
94
|
+
description: "Max results (default: 20)",
|
|
95
|
+
default: 20,
|
|
96
|
+
},
|
|
97
|
+
},
|
|
98
|
+
},
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
name: "ask_pm",
|
|
102
|
+
description: `Ask product management questions about ${projectName}. Answers questions about requirements, features, priorities, and project status using both documentation and codebase.`,
|
|
103
|
+
inputSchema: {
|
|
104
|
+
type: "object",
|
|
105
|
+
properties: {
|
|
106
|
+
question: {
|
|
107
|
+
type: "string",
|
|
108
|
+
description: "PM question (e.g., 'What features are planned for video inspection?', 'What's the status of notifications?')",
|
|
109
|
+
},
|
|
110
|
+
},
|
|
111
|
+
required: ["question"],
|
|
112
|
+
},
|
|
113
|
+
},
|
|
114
|
+
{
|
|
115
|
+
name: "generate_spec",
|
|
116
|
+
description: `Generate technical specification from requirements. Creates a structured spec document based on Confluence requirements and existing codebase patterns.`,
|
|
117
|
+
inputSchema: {
|
|
118
|
+
type: "object",
|
|
119
|
+
properties: {
|
|
120
|
+
feature: {
|
|
121
|
+
type: "string",
|
|
122
|
+
description: "Feature to generate spec for",
|
|
123
|
+
},
|
|
124
|
+
format: {
|
|
125
|
+
type: "string",
|
|
126
|
+
enum: ["markdown", "jira", "brief"],
|
|
127
|
+
description: "Output format (default: markdown)",
|
|
128
|
+
default: "markdown",
|
|
129
|
+
},
|
|
130
|
+
},
|
|
131
|
+
required: ["feature"],
|
|
132
|
+
},
|
|
133
|
+
},
|
|
134
|
+
];
|
|
135
|
+
const handlers = {
|
|
136
|
+
search_requirements: async (args, ctx) => {
|
|
137
|
+
const { query, limit = 5 } = args;
|
|
138
|
+
const response = await ctx.api.post("/api/search", {
|
|
139
|
+
collection: `${ctx.collectionPrefix}confluence`,
|
|
140
|
+
query,
|
|
141
|
+
limit,
|
|
142
|
+
});
|
|
143
|
+
const results = response.data.results;
|
|
144
|
+
if (!results || results.length === 0) {
|
|
145
|
+
return "No requirements found. Make sure Confluence documentation is indexed.";
|
|
146
|
+
}
|
|
147
|
+
return (`**Requirements Search: "${query}"**\n\n` +
|
|
148
|
+
results
|
|
149
|
+
.map((r, i) => `### ${i + 1}. ${r.title || "Requirement"}\n` +
|
|
150
|
+
`**Relevance:** ${pct(r.score)}\n` +
|
|
151
|
+
`**Source:** ${r.url || "Confluence"}\n\n` +
|
|
152
|
+
truncate(r.content, 800))
|
|
153
|
+
.join("\n\n---\n\n"));
|
|
154
|
+
},
|
|
155
|
+
analyze_requirements: async (args, ctx) => {
|
|
156
|
+
const { feature, detailed = false } = args;
|
|
157
|
+
// Search requirements in Confluence
|
|
158
|
+
const reqResponse = await ctx.api.post("/api/search", {
|
|
159
|
+
collection: `${ctx.collectionPrefix}confluence`,
|
|
160
|
+
query: feature,
|
|
161
|
+
limit: 5,
|
|
162
|
+
});
|
|
163
|
+
// Search implementation in codebase
|
|
164
|
+
const codeResponse = await ctx.api.post("/api/search", {
|
|
165
|
+
collection: `${ctx.collectionPrefix}codebase`,
|
|
166
|
+
query: feature,
|
|
167
|
+
limit: detailed ? 10 : 5,
|
|
168
|
+
});
|
|
169
|
+
const requirements = reqResponse.data.results || [];
|
|
170
|
+
const implementations = codeResponse.data.results || [];
|
|
171
|
+
let result = `# Requirements Analysis: ${feature}\n\n`;
|
|
172
|
+
result += `## Documented Requirements (${requirements.length} found)\n\n`;
|
|
173
|
+
if (requirements.length === 0) {
|
|
174
|
+
result += "_No documented requirements found in Confluence._\n\n";
|
|
175
|
+
}
|
|
176
|
+
else {
|
|
177
|
+
requirements.forEach((r, i) => {
|
|
178
|
+
result += `### ${i + 1}. ${r.title || "Requirement"}\n`;
|
|
179
|
+
result += truncate(r.content, 400) + "\n\n";
|
|
180
|
+
});
|
|
181
|
+
}
|
|
182
|
+
result += `## Implementation Status (${implementations.length} files found)\n\n`;
|
|
183
|
+
if (implementations.length === 0) {
|
|
184
|
+
result += "_No implementation found in codebase._\n\n";
|
|
185
|
+
}
|
|
186
|
+
else {
|
|
187
|
+
implementations.forEach((r) => {
|
|
188
|
+
result += `- **${r.file}** (${pct(r.score)} match)\n`;
|
|
189
|
+
if (detailed) {
|
|
190
|
+
result +=
|
|
191
|
+
"```" +
|
|
192
|
+
(r.language || "") +
|
|
193
|
+
"\n" +
|
|
194
|
+
truncate(r.content, 300) +
|
|
195
|
+
"\n```\n";
|
|
196
|
+
}
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
result += `\n## Summary\n`;
|
|
200
|
+
result += `- Requirements documented: ${requirements.length > 0 ? "Yes" : "No"}\n`;
|
|
201
|
+
result += `- Implementation found: ${implementations.length > 0 ? "Yes" : "No"}\n`;
|
|
202
|
+
if (requirements.length > 0 && implementations.length === 0) {
|
|
203
|
+
result += `\n**Gap detected:** Requirements exist but no implementation found.`;
|
|
204
|
+
}
|
|
205
|
+
else if (requirements.length === 0 && implementations.length > 0) {
|
|
206
|
+
result += `\n**Warning:** Implementation exists but no documented requirements.`;
|
|
207
|
+
}
|
|
208
|
+
return result;
|
|
209
|
+
},
|
|
210
|
+
estimate_feature: async (args, ctx) => {
|
|
211
|
+
const { feature, includeSubtasks = true } = args;
|
|
212
|
+
// Search for related requirements
|
|
213
|
+
const reqResponse = await ctx.api.post("/api/search", {
|
|
214
|
+
collection: `${ctx.collectionPrefix}confluence`,
|
|
215
|
+
query: feature,
|
|
216
|
+
limit: 5,
|
|
217
|
+
});
|
|
218
|
+
// Search for related code
|
|
219
|
+
const codeResponse = await ctx.api.post("/api/search", {
|
|
220
|
+
collection: `${ctx.collectionPrefix}codebase`,
|
|
221
|
+
query: feature,
|
|
222
|
+
limit: 15,
|
|
223
|
+
});
|
|
224
|
+
// Search for related tests
|
|
225
|
+
const testResponse = await ctx.api
|
|
226
|
+
.post("/api/search", {
|
|
227
|
+
collection: `${ctx.collectionPrefix}codebase`,
|
|
228
|
+
query: `${feature} test spec`,
|
|
229
|
+
limit: 10,
|
|
230
|
+
filter: { must: [{ key: "file", match: { text: "test" } }] },
|
|
231
|
+
})
|
|
232
|
+
.catch(() => ({ data: { results: [] } }));
|
|
233
|
+
const requirements = reqResponse.data.results || [];
|
|
234
|
+
const relatedCode = codeResponse.data.results || [];
|
|
235
|
+
const relatedTests = testResponse.data.results || [];
|
|
236
|
+
// Analyze complexity based on findings
|
|
237
|
+
const hasRequirements = requirements.length > 0;
|
|
238
|
+
const hasExistingCode = relatedCode.length > 0;
|
|
239
|
+
const hasTests = relatedTests.length > 0;
|
|
240
|
+
const affectedFiles = Array.from(new Set(relatedCode.map((r) => r.payload?.file || r.file)));
|
|
241
|
+
const testFiles = Array.from(new Set(relatedTests.map((r) => r.payload?.file || r.file)));
|
|
242
|
+
// Advanced code complexity analysis
|
|
243
|
+
let totalComplexityScore = 0;
|
|
244
|
+
let totalIntegrationPoints = 0;
|
|
245
|
+
const integrations = new Set();
|
|
246
|
+
const complexFunctions = [];
|
|
247
|
+
for (const result of relatedCode) {
|
|
248
|
+
const content = result.payload?.content || result.content || "";
|
|
249
|
+
// Count complexity indicators
|
|
250
|
+
const ifCount = (content.match(/\bif\s*\(/g) || []).length;
|
|
251
|
+
const elseCount = (content.match(/\belse\b/g) || []).length;
|
|
252
|
+
const switchCount = (content.match(/\bswitch\s*\(/g) || []).length;
|
|
253
|
+
const forCount = (content.match(/\bfor\s*\(/g) || []).length;
|
|
254
|
+
const whileCount = (content.match(/\bwhile\s*\(/g) || []).length;
|
|
255
|
+
const tryCount = (content.match(/\btry\s*\{/g) || []).length;
|
|
256
|
+
const asyncCount = (content.match(/\basync\b/g) || []).length;
|
|
257
|
+
const awaitCount = (content.match(/\bawait\b/g) || []).length;
|
|
258
|
+
// Cyclomatic complexity approximation
|
|
259
|
+
const complexity = 1 + ifCount + elseCount + switchCount + forCount + whileCount + tryCount;
|
|
260
|
+
totalComplexityScore += complexity;
|
|
261
|
+
// Track complex functions (rough estimate)
|
|
262
|
+
if (complexity > 10) {
|
|
263
|
+
const funcMatch = content.match(/(?:function|const|async)\s+(\w+)/);
|
|
264
|
+
if (funcMatch) {
|
|
265
|
+
complexFunctions.push(`${result.payload?.file || result.file}: ${funcMatch[1]}() - complexity ~${complexity}`);
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
// Analyze integration points
|
|
269
|
+
const imports = content.match(/import\s+.*from\s+['"]([^'"]+)['"]/g) || [];
|
|
270
|
+
const requires = content.match(/require\s*\(['"]([^'"]+)['"]\)/g) || [];
|
|
271
|
+
const apiCalls = content.match(/(?:axios|fetch|http|api)\.[a-z]+\(/gi) || [];
|
|
272
|
+
const dbOps = content.match(/(?:prisma|mongoose|sequelize|knex|db)\.[a-z]+/gi) || [];
|
|
273
|
+
const externalServices = content.match(/(?:redis|kafka|rabbitmq|queue|cache)\.[a-z]+/gi) || [];
|
|
274
|
+
[...imports, ...requires].forEach((imp) => {
|
|
275
|
+
const match = imp.match(/['"]([^'"]+)['"]/);
|
|
276
|
+
if (match && !match[1].startsWith(".")) {
|
|
277
|
+
integrations.add(`Package: ${match[1]}`);
|
|
278
|
+
}
|
|
279
|
+
});
|
|
280
|
+
if (apiCalls.length > 0)
|
|
281
|
+
integrations.add("HTTP/API calls");
|
|
282
|
+
if (dbOps.length > 0)
|
|
283
|
+
integrations.add("Database operations");
|
|
284
|
+
if (externalServices.length > 0)
|
|
285
|
+
integrations.add("External services (cache/queue)");
|
|
286
|
+
if (asyncCount > 3 || awaitCount > 3)
|
|
287
|
+
integrations.add("Heavy async operations");
|
|
288
|
+
totalIntegrationPoints +=
|
|
289
|
+
imports.length + requires.length + apiCalls.length + dbOps.length;
|
|
290
|
+
}
|
|
291
|
+
// Determine complexity level
|
|
292
|
+
const avgComplexity = affectedFiles.length > 0
|
|
293
|
+
? totalComplexityScore / relatedCode.length
|
|
294
|
+
: 0;
|
|
295
|
+
let complexity = "Low";
|
|
296
|
+
let complexityScore = 0;
|
|
297
|
+
// Factor 1: File count (0-30 points)
|
|
298
|
+
if (affectedFiles.length > 15)
|
|
299
|
+
complexityScore += 30;
|
|
300
|
+
else if (affectedFiles.length > 8)
|
|
301
|
+
complexityScore += 20;
|
|
302
|
+
else if (affectedFiles.length > 3)
|
|
303
|
+
complexityScore += 10;
|
|
304
|
+
else
|
|
305
|
+
complexityScore += 5;
|
|
306
|
+
// Factor 2: Code complexity (0-30 points)
|
|
307
|
+
if (avgComplexity > 15)
|
|
308
|
+
complexityScore += 30;
|
|
309
|
+
else if (avgComplexity > 8)
|
|
310
|
+
complexityScore += 20;
|
|
311
|
+
else if (avgComplexity > 4)
|
|
312
|
+
complexityScore += 10;
|
|
313
|
+
else
|
|
314
|
+
complexityScore += 5;
|
|
315
|
+
// Factor 3: Integration points (0-20 points)
|
|
316
|
+
if (integrations.size > 6)
|
|
317
|
+
complexityScore += 20;
|
|
318
|
+
else if (integrations.size > 3)
|
|
319
|
+
complexityScore += 15;
|
|
320
|
+
else if (integrations.size > 1)
|
|
321
|
+
complexityScore += 10;
|
|
322
|
+
else
|
|
323
|
+
complexityScore += 5;
|
|
324
|
+
// Factor 4: Test coverage (0-20 points) - less tests = more risk
|
|
325
|
+
const testRatio = affectedFiles.length > 0
|
|
326
|
+
? testFiles.length / affectedFiles.length
|
|
327
|
+
: 0;
|
|
328
|
+
if (testRatio < 0.2)
|
|
329
|
+
complexityScore += 20;
|
|
330
|
+
else if (testRatio < 0.5)
|
|
331
|
+
complexityScore += 15;
|
|
332
|
+
else if (testRatio < 0.8)
|
|
333
|
+
complexityScore += 10;
|
|
334
|
+
else
|
|
335
|
+
complexityScore += 5;
|
|
336
|
+
if (complexityScore >= 70)
|
|
337
|
+
complexity = "Very High";
|
|
338
|
+
else if (complexityScore >= 50)
|
|
339
|
+
complexity = "High";
|
|
340
|
+
else if (complexityScore >= 30)
|
|
341
|
+
complexity = "Medium";
|
|
342
|
+
else
|
|
343
|
+
complexity = "Low";
|
|
344
|
+
// Risk assessment
|
|
345
|
+
const riskFactors = [];
|
|
346
|
+
let riskScore = 0;
|
|
347
|
+
if (!hasRequirements) {
|
|
348
|
+
riskFactors.push("No documented requirements - scope unclear");
|
|
349
|
+
riskScore += 25;
|
|
350
|
+
}
|
|
351
|
+
if (affectedFiles.length > 10) {
|
|
352
|
+
riskFactors.push(`Wide impact: ${affectedFiles.length} files affected`);
|
|
353
|
+
riskScore += 20;
|
|
354
|
+
}
|
|
355
|
+
if (!hasTests) {
|
|
356
|
+
riskFactors.push("No existing tests found - regression risk");
|
|
357
|
+
riskScore += 20;
|
|
358
|
+
}
|
|
359
|
+
if (integrations.has("Database operations")) {
|
|
360
|
+
riskFactors.push("Database changes - migration complexity");
|
|
361
|
+
riskScore += 15;
|
|
362
|
+
}
|
|
363
|
+
if (integrations.has("External services (cache/queue)")) {
|
|
364
|
+
riskFactors.push("External service dependencies");
|
|
365
|
+
riskScore += 15;
|
|
366
|
+
}
|
|
367
|
+
if (complexFunctions.length > 3) {
|
|
368
|
+
riskFactors.push(`${complexFunctions.length} complex functions to modify`);
|
|
369
|
+
riskScore += 15;
|
|
370
|
+
}
|
|
371
|
+
if (!hasExistingCode) {
|
|
372
|
+
riskFactors.push("New development - no patterns to follow");
|
|
373
|
+
riskScore += 10;
|
|
374
|
+
}
|
|
375
|
+
let riskLevel = "Low";
|
|
376
|
+
if (riskScore >= 60)
|
|
377
|
+
riskLevel = "Critical";
|
|
378
|
+
else if (riskScore >= 40)
|
|
379
|
+
riskLevel = "High";
|
|
380
|
+
else if (riskScore >= 20)
|
|
381
|
+
riskLevel = "Medium";
|
|
382
|
+
// Build result
|
|
383
|
+
let result = `# Feature Estimation: ${feature}\n\n`;
|
|
384
|
+
result += `## Overview\n`;
|
|
385
|
+
result += `| Metric | Value |\n`;
|
|
386
|
+
result += `|--------|-------|\n`;
|
|
387
|
+
result += `| Complexity | **${complexity}** (score: ${complexityScore}/100) |\n`;
|
|
388
|
+
result += `| Risk Level | **${riskLevel}** (score: ${riskScore}/100) |\n`;
|
|
389
|
+
result += `| Affected Files | ${affectedFiles.length} |\n`;
|
|
390
|
+
result += `| Test Files | ${testFiles.length} (ratio: ${(testRatio * 100).toFixed(0)}%) |\n`;
|
|
391
|
+
result += `| Integration Points | ${integrations.size} |\n`;
|
|
392
|
+
result += `| Avg Cyclomatic Complexity | ${avgComplexity.toFixed(1)} |\n`;
|
|
393
|
+
result += `| Requirements Documented | ${hasRequirements ? "Yes" : "No"} |\n\n`;
|
|
394
|
+
if (integrations.size > 0) {
|
|
395
|
+
result += `## Integration Points\n`;
|
|
396
|
+
Array.from(integrations)
|
|
397
|
+
.slice(0, 10)
|
|
398
|
+
.forEach((i) => {
|
|
399
|
+
result += `- ${i}\n`;
|
|
400
|
+
});
|
|
401
|
+
result += "\n";
|
|
402
|
+
}
|
|
403
|
+
if (affectedFiles.length > 0) {
|
|
404
|
+
result += `## Affected Files\n`;
|
|
405
|
+
affectedFiles.slice(0, 15).forEach((f) => {
|
|
406
|
+
const hasTest = testFiles.some((t) => t.includes(f.replace(/\.(ts|js|py|go)$/, "")));
|
|
407
|
+
result += `- ${f} ${hasTest ? "(tested)" : "(no tests)"}\n`;
|
|
408
|
+
});
|
|
409
|
+
if (affectedFiles.length > 15) {
|
|
410
|
+
result += `- ... and ${affectedFiles.length - 15} more\n`;
|
|
411
|
+
}
|
|
412
|
+
result += "\n";
|
|
413
|
+
}
|
|
414
|
+
if (complexFunctions.length > 0) {
|
|
415
|
+
result += `## Complex Functions (may need refactoring)\n`;
|
|
416
|
+
complexFunctions.slice(0, 5).forEach((f) => {
|
|
417
|
+
result += `- ${f}\n`;
|
|
418
|
+
});
|
|
419
|
+
result += "\n";
|
|
420
|
+
}
|
|
421
|
+
result += `## Risk Factors\n`;
|
|
422
|
+
if (riskFactors.length > 0) {
|
|
423
|
+
riskFactors.forEach((r) => {
|
|
424
|
+
result += `- ${r}\n`;
|
|
425
|
+
});
|
|
426
|
+
}
|
|
427
|
+
else {
|
|
428
|
+
result += `- No significant risks identified\n`;
|
|
429
|
+
}
|
|
430
|
+
result += "\n";
|
|
431
|
+
if (includeSubtasks) {
|
|
432
|
+
result += `## Suggested Subtasks\n`;
|
|
433
|
+
let taskNum = 1;
|
|
434
|
+
result += `${taskNum++}. Review and clarify requirements\n`;
|
|
435
|
+
if (!hasRequirements) {
|
|
436
|
+
result += `${taskNum++}. Document requirements\n`;
|
|
437
|
+
}
|
|
438
|
+
if (hasExistingCode) {
|
|
439
|
+
result += `${taskNum++}. Analyze existing implementation and complexity\n`;
|
|
440
|
+
if (complexFunctions.length > 0) {
|
|
441
|
+
result += `${taskNum++}. Refactor complex functions if needed\n`;
|
|
442
|
+
}
|
|
443
|
+
result += `${taskNum++}. Plan modifications\n`;
|
|
444
|
+
}
|
|
445
|
+
else {
|
|
446
|
+
result += `${taskNum++}. Design solution architecture\n`;
|
|
447
|
+
result += `${taskNum++}. Implement core functionality\n`;
|
|
448
|
+
}
|
|
449
|
+
if (integrations.has("Database operations")) {
|
|
450
|
+
result += `${taskNum++}. Create database migrations\n`;
|
|
451
|
+
}
|
|
452
|
+
result += `${taskNum++}. Write/update tests (target: >${affectedFiles.length} test cases)\n`;
|
|
453
|
+
if (integrations.has("External services (cache/queue)")) {
|
|
454
|
+
result += `${taskNum++}. Integration testing with external services\n`;
|
|
455
|
+
}
|
|
456
|
+
result += `${taskNum++}. Code review & QA\n`;
|
|
457
|
+
result += `${taskNum++}. Documentation update\n`;
|
|
458
|
+
}
|
|
459
|
+
return result;
|
|
460
|
+
},
|
|
461
|
+
get_feature_status: async (args, ctx) => {
|
|
462
|
+
const { feature } = args;
|
|
463
|
+
const reqResponse = await ctx.api.post("/api/search", {
|
|
464
|
+
collection: `${ctx.collectionPrefix}confluence`,
|
|
465
|
+
query: feature,
|
|
466
|
+
limit: 3,
|
|
467
|
+
});
|
|
468
|
+
const codeResponse = await ctx.api.post("/api/search", {
|
|
469
|
+
collection: `${ctx.collectionPrefix}codebase`,
|
|
470
|
+
query: feature,
|
|
471
|
+
limit: 5,
|
|
472
|
+
});
|
|
473
|
+
const requirements = reqResponse.data.results || [];
|
|
474
|
+
const implementations = codeResponse.data.results || [];
|
|
475
|
+
let status = "Unknown";
|
|
476
|
+
let statusEmoji = "?";
|
|
477
|
+
if (requirements.length > 0 && implementations.length > 0) {
|
|
478
|
+
status = "Implemented";
|
|
479
|
+
statusEmoji = "[DONE]";
|
|
480
|
+
}
|
|
481
|
+
else if (requirements.length > 0 && implementations.length === 0) {
|
|
482
|
+
status = "Planned (Not Implemented)";
|
|
483
|
+
statusEmoji = "[PLANNED]";
|
|
484
|
+
}
|
|
485
|
+
else if (requirements.length === 0 && implementations.length > 0) {
|
|
486
|
+
status = "Implemented (Undocumented)";
|
|
487
|
+
statusEmoji = "[WARN]";
|
|
488
|
+
}
|
|
489
|
+
else {
|
|
490
|
+
status = "Not Found";
|
|
491
|
+
statusEmoji = "[MISSING]";
|
|
492
|
+
}
|
|
493
|
+
let result = `# Feature Status: ${feature}\n\n`;
|
|
494
|
+
result += `## ${statusEmoji} Status: ${status}\n\n`;
|
|
495
|
+
if (requirements.length > 0) {
|
|
496
|
+
result += `### Requirements\n`;
|
|
497
|
+
requirements.forEach((r) => {
|
|
498
|
+
result += `- ${r.title || "Requirement"}: ${truncate(r.content, 150)}\n`;
|
|
499
|
+
});
|
|
500
|
+
result += "\n";
|
|
501
|
+
}
|
|
502
|
+
if (implementations.length > 0) {
|
|
503
|
+
result += `### Implementation\n`;
|
|
504
|
+
implementations.forEach((r) => {
|
|
505
|
+
result += `- ${r.file}\n`;
|
|
506
|
+
});
|
|
507
|
+
}
|
|
508
|
+
return result;
|
|
509
|
+
},
|
|
510
|
+
list_requirements: async (args, ctx) => {
|
|
511
|
+
const { category, limit = 20 } = args;
|
|
512
|
+
const query = category || "requirements features specifications";
|
|
513
|
+
const response = await ctx.api.post("/api/search", {
|
|
514
|
+
collection: `${ctx.collectionPrefix}confluence`,
|
|
515
|
+
query,
|
|
516
|
+
limit,
|
|
517
|
+
});
|
|
518
|
+
const results = response.data.results || [];
|
|
519
|
+
if (results.length === 0) {
|
|
520
|
+
return "No requirements found in Confluence. Make sure documentation is indexed.";
|
|
521
|
+
}
|
|
522
|
+
let result = `# ${ctx.projectName} Requirements\n\n`;
|
|
523
|
+
if (category) {
|
|
524
|
+
result += `**Category filter:** ${category}\n\n`;
|
|
525
|
+
}
|
|
526
|
+
result += `**Found:** ${results.length} items\n\n`;
|
|
527
|
+
results.forEach((r, i) => {
|
|
528
|
+
result += `${i + 1}. **${r.title || "Untitled"}**\n`;
|
|
529
|
+
result += ` ${truncate(r.content.replace(/\n/g, " "), 150)}\n`;
|
|
530
|
+
if (r.url) {
|
|
531
|
+
result += ` [View in Confluence](${r.url})\n`;
|
|
532
|
+
}
|
|
533
|
+
result += "\n";
|
|
534
|
+
});
|
|
535
|
+
return result;
|
|
536
|
+
},
|
|
537
|
+
ask_pm: async (args, ctx) => {
|
|
538
|
+
const { question } = args;
|
|
539
|
+
// Search both requirements and codebase for context
|
|
540
|
+
const [reqResponse, codeResponse] = await Promise.all([
|
|
541
|
+
ctx.api.post("/api/search", {
|
|
542
|
+
collection: `${ctx.collectionPrefix}confluence`,
|
|
543
|
+
query: question,
|
|
544
|
+
limit: 5,
|
|
545
|
+
}),
|
|
546
|
+
ctx.api.post("/api/search", {
|
|
547
|
+
collection: `${ctx.collectionPrefix}codebase`,
|
|
548
|
+
query: question,
|
|
549
|
+
limit: 3,
|
|
550
|
+
}),
|
|
551
|
+
]);
|
|
552
|
+
const requirements = reqResponse.data.results || [];
|
|
553
|
+
const code = codeResponse.data.results || [];
|
|
554
|
+
// Use LLM to answer the question with context
|
|
555
|
+
try {
|
|
556
|
+
const response = await ctx.api.post("/api/ask", {
|
|
557
|
+
collection: `${ctx.collectionPrefix}confluence`,
|
|
558
|
+
question: `As a Product Manager, answer this question about the project:\n\n${question}\n\nUse the provided context from requirements documentation.`,
|
|
559
|
+
});
|
|
560
|
+
let result = `# PM Question: ${question}\n\n`;
|
|
561
|
+
result += `## Answer\n${response.data.answer}\n\n`;
|
|
562
|
+
if (requirements.length > 0) {
|
|
563
|
+
result += `## Related Documentation\n`;
|
|
564
|
+
requirements.slice(0, 3).forEach((r) => {
|
|
565
|
+
result += `- ${r.title || "Doc"}: ${truncate(r.content, 100)}\n`;
|
|
566
|
+
});
|
|
567
|
+
}
|
|
568
|
+
if (code.length > 0) {
|
|
569
|
+
result += `\n## Related Code\n`;
|
|
570
|
+
code.slice(0, 3).forEach((r) => {
|
|
571
|
+
result += `- ${r.file}\n`;
|
|
572
|
+
});
|
|
573
|
+
}
|
|
574
|
+
return result;
|
|
575
|
+
}
|
|
576
|
+
catch {
|
|
577
|
+
// Fallback without LLM
|
|
578
|
+
let result = `# PM Question: ${question}\n\n`;
|
|
579
|
+
result += `## Related Information\n\n`;
|
|
580
|
+
if (requirements.length > 0) {
|
|
581
|
+
result += `### From Requirements:\n`;
|
|
582
|
+
requirements.forEach((r) => {
|
|
583
|
+
result += `**${r.title || "Doc"}**\n${truncate(r.content, 300)}\n\n`;
|
|
584
|
+
});
|
|
585
|
+
}
|
|
586
|
+
return result;
|
|
587
|
+
}
|
|
588
|
+
},
|
|
589
|
+
generate_spec: async (args, ctx) => {
|
|
590
|
+
const { feature, format = "markdown" } = args;
|
|
591
|
+
// Get requirements
|
|
592
|
+
const reqResponse = await ctx.api.post("/api/search", {
|
|
593
|
+
collection: `${ctx.collectionPrefix}confluence`,
|
|
594
|
+
query: feature,
|
|
595
|
+
limit: 5,
|
|
596
|
+
});
|
|
597
|
+
// Get existing code for patterns
|
|
598
|
+
const codeResponse = await ctx.api.post("/api/search", {
|
|
599
|
+
collection: `${ctx.collectionPrefix}codebase`,
|
|
600
|
+
query: feature,
|
|
601
|
+
limit: 5,
|
|
602
|
+
});
|
|
603
|
+
const requirements = reqResponse.data.results || [];
|
|
604
|
+
const code = codeResponse.data.results || [];
|
|
605
|
+
// Build context for LLM
|
|
606
|
+
const requirementsContext = requirements.length > 0
|
|
607
|
+
? requirements.map((r) => r.content).join("\n---\n")
|
|
608
|
+
: "No documented requirements found.";
|
|
609
|
+
const codeContext = code.length > 0
|
|
610
|
+
? code
|
|
611
|
+
.map((c) => `File: ${c.file}\n${truncate(c.content, 300)}`)
|
|
612
|
+
.join("\n---\n")
|
|
613
|
+
: "No existing implementation found.";
|
|
614
|
+
// Use LLM to generate real specification
|
|
615
|
+
const specPrompt = `Generate a detailed technical specification for: "${feature}"
|
|
616
|
+
|
|
617
|
+
Requirements from documentation:
|
|
618
|
+
${requirementsContext}
|
|
619
|
+
|
|
620
|
+
Existing code context:
|
|
621
|
+
${codeContext}
|
|
622
|
+
|
|
623
|
+
Generate a complete specification including:
|
|
624
|
+
1. Overview and objectives
|
|
625
|
+
2. Detailed functional requirements with acceptance criteria
|
|
626
|
+
3. Technical approach with specific implementation details
|
|
627
|
+
4. API contracts (if applicable)
|
|
628
|
+
5. Database changes (if applicable)
|
|
629
|
+
6. Testing strategy
|
|
630
|
+
7. Rollout considerations`;
|
|
631
|
+
try {
|
|
632
|
+
const llmResponse = await ctx.api.post("/api/ask", {
|
|
633
|
+
collection: `${ctx.collectionPrefix}codebase`,
|
|
634
|
+
question: specPrompt,
|
|
635
|
+
});
|
|
636
|
+
let result = `# Technical Specification: ${feature}\n\n`;
|
|
637
|
+
if (format === "jira") {
|
|
638
|
+
// Convert to Jira format
|
|
639
|
+
result = `h1. ${feature}\n\n`;
|
|
640
|
+
result += llmResponse.data.answer
|
|
641
|
+
.replace(/^## /gm, "h2. ")
|
|
642
|
+
.replace(/^### /gm, "h3. ")
|
|
643
|
+
.replace(/^- \[ \]/gm, "* [ ]")
|
|
644
|
+
.replace(/^- /gm, "* ");
|
|
645
|
+
}
|
|
646
|
+
else if (format === "brief") {
|
|
647
|
+
// Brief summary
|
|
648
|
+
const answer = llmResponse.data.answer;
|
|
649
|
+
const firstParagraph = answer.split("\n\n")[0] || answer.slice(0, 300);
|
|
650
|
+
result = `**${feature}**\n\n${firstParagraph}\n\n`;
|
|
651
|
+
result += `**Files affected:** ${code.map((c) => c.file).join(", ") || "New implementation"}`;
|
|
652
|
+
}
|
|
653
|
+
else {
|
|
654
|
+
// Full markdown
|
|
655
|
+
result += llmResponse.data.answer;
|
|
656
|
+
// Add appendix with source files
|
|
657
|
+
if (code.length > 0) {
|
|
658
|
+
result += `\n\n---\n## Appendix: Related Files\n`;
|
|
659
|
+
code.forEach((c) => {
|
|
660
|
+
result += `- \`${c.file}\`\n`;
|
|
661
|
+
});
|
|
662
|
+
}
|
|
663
|
+
}
|
|
664
|
+
return result;
|
|
665
|
+
}
|
|
666
|
+
catch {
|
|
667
|
+
// Fallback to template if LLM fails
|
|
668
|
+
let result = `# Technical Specification: ${feature}\n\n`;
|
|
669
|
+
result += `## 1. Overview\n${truncate(requirements[0]?.content, 500) || "_Add feature overview_"}\n\n`;
|
|
670
|
+
result += `## 2. Requirements\n_LLM generation failed. Add requirements manually._\n\n`;
|
|
671
|
+
result += `## 3. Affected Files\n`;
|
|
672
|
+
code.forEach((c) => {
|
|
673
|
+
result += `- \`${c.file}\`\n`;
|
|
674
|
+
});
|
|
675
|
+
return result;
|
|
676
|
+
}
|
|
677
|
+
},
|
|
678
|
+
};
|
|
679
|
+
return { tools, handlers };
|
|
680
|
+
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Review & testing tools module - code review, test generation, and test analysis.
|
|
3
|
+
*/
|
|
4
|
+
import type { ToolModule } from "../types.js";
|
|
5
|
+
/**
|
|
6
|
+
* Create the review & testing tools module with project-specific descriptions.
|
|
7
|
+
*/
|
|
8
|
+
export declare function createReviewTools(projectName: string): ToolModule;
|