@aiready/pattern-detect 0.16.18 → 0.16.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/dist/analyzer-entry-BVz-HnZd.d.mts +119 -0
  2. package/dist/analyzer-entry-BwuoiCNm.d.ts +119 -0
  3. package/dist/analyzer-entry.d.mts +3 -0
  4. package/dist/analyzer-entry.d.ts +3 -0
  5. package/dist/analyzer-entry.js +693 -0
  6. package/dist/analyzer-entry.mjs +12 -0
  7. package/dist/chunk-65UQ5J2J.mjs +64 -0
  8. package/dist/chunk-6JTVOBJX.mjs +64 -0
  9. package/dist/chunk-BKRPSTT2.mjs +64 -0
  10. package/dist/chunk-CMWW24HW.mjs +259 -0
  11. package/dist/chunk-DNZS4ESD.mjs +391 -0
  12. package/dist/chunk-GLKAGFKX.mjs +391 -0
  13. package/dist/chunk-GREN7X5H.mjs +143 -0
  14. package/dist/chunk-I6ETJC7L.mjs +179 -0
  15. package/dist/chunk-JBUZ6YHE.mjs +391 -0
  16. package/dist/chunk-KWMNN3TG.mjs +391 -0
  17. package/dist/chunk-LYKRYBSM.mjs +64 -0
  18. package/dist/chunk-MHU3CL4R.mjs +64 -0
  19. package/dist/chunk-RS73WLNI.mjs +251 -0
  20. package/dist/chunk-SVCSIZ2A.mjs +259 -0
  21. package/dist/chunk-THF4RW63.mjs +254 -0
  22. package/dist/chunk-UB3CGOQ7.mjs +64 -0
  23. package/dist/chunk-VGMM3L3O.mjs +143 -0
  24. package/dist/chunk-WBBO35SC.mjs +112 -0
  25. package/dist/chunk-WMOGJFME.mjs +391 -0
  26. package/dist/chunk-XNPID6FU.mjs +391 -0
  27. package/dist/cli.js +62 -219
  28. package/dist/cli.mjs +72 -97
  29. package/dist/context-rules-entry-y2uJSngh.d.mts +60 -0
  30. package/dist/context-rules-entry-y2uJSngh.d.ts +60 -0
  31. package/dist/context-rules-entry.d.mts +2 -0
  32. package/dist/context-rules-entry.d.ts +2 -0
  33. package/dist/context-rules-entry.js +207 -0
  34. package/dist/context-rules-entry.mjs +12 -0
  35. package/dist/detector-entry.d.mts +14 -0
  36. package/dist/detector-entry.d.ts +14 -0
  37. package/dist/detector-entry.js +301 -0
  38. package/dist/detector-entry.mjs +7 -0
  39. package/dist/index.d.mts +7 -235
  40. package/dist/index.d.ts +7 -235
  41. package/dist/index.js +9 -126
  42. package/dist/index.mjs +17 -9
  43. package/dist/scoring-entry.d.mts +23 -0
  44. package/dist/scoring-entry.d.ts +23 -0
  45. package/dist/scoring-entry.js +133 -0
  46. package/dist/scoring-entry.mjs +6 -0
  47. package/dist/types-DU2mmhwb.d.mts +36 -0
  48. package/dist/types-DU2mmhwb.d.ts +36 -0
  49. package/package.json +24 -4
@@ -0,0 +1,254 @@
1
+ import {
2
+ calculateSeverity
3
+ } from "./chunk-I6ETJC7L.mjs";
4
+
5
+ // src/detector.ts
6
+ import { estimateTokens } from "@aiready/core";
7
+ function normalizeCode(code, isPython = false) {
8
+ let normalized = code;
9
+ if (isPython) {
10
+ normalized = normalized.replace(/#.*/g, "");
11
+ } else {
12
+ normalized = normalized.replace(/\/\/.*/g, "").replace(/\/\*[\s\S]*?\*\//g, "");
13
+ }
14
+ return normalized.replace(/['"`]/g, '"').replace(/\s+/g, " ").trim().toLowerCase();
15
+ }
16
+ function extractBlocks(file, content) {
17
+ const isPython = file.toLowerCase().endsWith(".py");
18
+ if (isPython) {
19
+ return extractBlocksPython(file, content);
20
+ }
21
+ const blocks = [];
22
+ const lines = content.split("\n");
23
+ const blockRegex = /^\s*(?:export\s+)?(?:async\s+)?(?:public\s+|private\s+|protected\s+|internal\s+|static\s+|readonly\s+|virtual\s+|abstract\s+|override\s+)*(function|class|interface|type|enum|record|struct|void|func|[a-zA-Z0-9_<>[]]+)\s+([a-zA-Z0-9_]+)(?:\s*\(|(?:\s+extends|\s+implements|\s+where)?\s*\{)|^\s*(?:export\s+)?const\s+([a-zA-Z0-9_]+)\s*=\s*[a-zA-Z0-9_.]+\.object\(|^\s*(app\.(?:get|post|put|delete|patch|use))\(/gm;
24
+ let match;
25
+ while ((match = blockRegex.exec(content)) !== null) {
26
+ const startLine = content.substring(0, match.index).split("\n").length;
27
+ let type;
28
+ let name;
29
+ if (match[1]) {
30
+ type = match[1];
31
+ name = match[2];
32
+ } else if (match[3]) {
33
+ type = "const";
34
+ name = match[3];
35
+ } else {
36
+ type = "handler";
37
+ name = match[4];
38
+ }
39
+ let endLine = -1;
40
+ let openBraces = 0;
41
+ let foundStart = false;
42
+ for (let i = match.index; i < content.length; i++) {
43
+ if (content[i] === "{") {
44
+ openBraces++;
45
+ foundStart = true;
46
+ } else if (content[i] === "}") {
47
+ openBraces--;
48
+ }
49
+ if (foundStart && openBraces === 0) {
50
+ endLine = content.substring(0, i + 1).split("\n").length;
51
+ break;
52
+ }
53
+ }
54
+ if (endLine === -1) {
55
+ const remaining = content.slice(match.index);
56
+ const nextLineMatch = remaining.indexOf("\n");
57
+ if (nextLineMatch !== -1) {
58
+ endLine = startLine;
59
+ } else {
60
+ endLine = lines.length;
61
+ }
62
+ }
63
+ endLine = Math.max(startLine, endLine);
64
+ const blockCode = lines.slice(startLine - 1, endLine).join("\n");
65
+ const tokens = estimateTokens(blockCode);
66
+ blocks.push({
67
+ file,
68
+ startLine,
69
+ endLine,
70
+ code: blockCode,
71
+ tokens,
72
+ patternType: inferPatternType(type, name)
73
+ });
74
+ }
75
+ return blocks;
76
+ }
77
+ function extractBlocksPython(file, content) {
78
+ const blocks = [];
79
+ const lines = content.split("\n");
80
+ const blockRegex = /^\s*(?:async\s+)?(def|class)\s+([a-zA-Z0-9_]+)/gm;
81
+ let match;
82
+ while ((match = blockRegex.exec(content)) !== null) {
83
+ const startLinePos = content.substring(0, match.index).split("\n").length;
84
+ const startLineIdx = startLinePos - 1;
85
+ const initialIndent = lines[startLineIdx].search(/\S/);
86
+ let endLineIdx = startLineIdx;
87
+ for (let i = startLineIdx + 1; i < lines.length; i++) {
88
+ const line = lines[i];
89
+ if (line.trim().length === 0) {
90
+ endLineIdx = i;
91
+ continue;
92
+ }
93
+ const currentIndent = line.search(/\S/);
94
+ if (currentIndent <= initialIndent) {
95
+ break;
96
+ }
97
+ endLineIdx = i;
98
+ }
99
+ while (endLineIdx > startLineIdx && lines[endLineIdx].trim().length === 0) {
100
+ endLineIdx--;
101
+ }
102
+ const blockCode = lines.slice(startLineIdx, endLineIdx + 1).join("\n");
103
+ const tokens = estimateTokens(blockCode);
104
+ blocks.push({
105
+ file,
106
+ startLine: startLinePos,
107
+ endLine: endLineIdx + 1,
108
+ code: blockCode,
109
+ tokens,
110
+ patternType: inferPatternType(match[1], match[2])
111
+ });
112
+ }
113
+ return blocks;
114
+ }
115
+ function inferPatternType(keyword, name) {
116
+ const n = name.toLowerCase();
117
+ if (keyword === "handler" || n.includes("handler") || n.includes("controller") || n.startsWith("app.")) {
118
+ return "api-handler";
119
+ }
120
+ if (n.includes("validate") || n.includes("schema")) return "validator";
121
+ if (n.includes("util") || n.includes("helper")) return "utility";
122
+ if (keyword === "class") return "class-method";
123
+ if (n.match(/^[A-Z]/)) return "component";
124
+ if (keyword === "function") return "function";
125
+ return "unknown";
126
+ }
127
+ function calculateSimilarity(a, b) {
128
+ if (a === b) return 1;
129
+ const tokensA = a.split(/[^a-zA-Z0-9]+/).filter((t) => t.length > 0);
130
+ const tokensB = b.split(/[^a-zA-Z0-9]+/).filter((t) => t.length > 0);
131
+ if (tokensA.length === 0 || tokensB.length === 0) return 0;
132
+ const setA = new Set(tokensA);
133
+ const setB = new Set(tokensB);
134
+ const intersection = new Set([...setA].filter((x) => setB.has(x)));
135
+ const union = /* @__PURE__ */ new Set([...setA, ...setB]);
136
+ return intersection.size / union.size;
137
+ }
138
+ function calculateConfidence(similarity, tokens, lines) {
139
+ let confidence = similarity;
140
+ if (lines > 20) confidence += 0.05;
141
+ if (tokens > 200) confidence += 0.05;
142
+ if (lines < 5) confidence -= 0.1;
143
+ return Math.max(0, Math.min(1, confidence));
144
+ }
145
+ async function detectDuplicatePatterns(fileContents, options) {
146
+ const {
147
+ minSimilarity,
148
+ minLines,
149
+ streamResults,
150
+ onProgress,
151
+ excludePatterns = [],
152
+ confidenceThreshold = 0,
153
+ ignoreWhitelist = []
154
+ } = options;
155
+ const allBlocks = [];
156
+ const excludeRegexes = excludePatterns.map((p) => new RegExp(p, "i"));
157
+ for (const { file, content } of fileContents) {
158
+ const blocks = extractBlocks(file, content);
159
+ for (const b of blocks) {
160
+ if (b.endLine - b.startLine + 1 < minLines) continue;
161
+ const isExcluded = excludeRegexes.some((regex) => regex.test(b.code));
162
+ if (isExcluded) continue;
163
+ allBlocks.push(b);
164
+ }
165
+ }
166
+ const duplicates = [];
167
+ const totalBlocks = allBlocks.length;
168
+ let comparisons = 0;
169
+ const totalComparisons = totalBlocks * (totalBlocks - 1) / 2;
170
+ if (onProgress) {
171
+ onProgress(
172
+ 0,
173
+ totalComparisons,
174
+ `Starting duplicate detection on ${totalBlocks} blocks...`
175
+ );
176
+ }
177
+ for (let i = 0; i < allBlocks.length; i++) {
178
+ if (i % 50 === 0 && i > 0) {
179
+ await new Promise((resolve) => setImmediate(resolve));
180
+ if (onProgress) {
181
+ onProgress(
182
+ comparisons,
183
+ totalComparisons,
184
+ `Analyzing blocks (${i}/${totalBlocks})...`
185
+ );
186
+ }
187
+ }
188
+ const b1 = allBlocks[i];
189
+ const isPython1 = b1.file.toLowerCase().endsWith(".py");
190
+ const norm1 = normalizeCode(b1.code, isPython1);
191
+ for (let j = i + 1; j < allBlocks.length; j++) {
192
+ comparisons++;
193
+ const b2 = allBlocks[j];
194
+ if (b1.file === b2.file) continue;
195
+ const isWhitelisted = ignoreWhitelist.some((pattern) => {
196
+ return b1.file.includes(pattern) && b2.file.includes(pattern) || pattern === `${b1.file}::${b2.file}` || pattern === `${b2.file}::${b1.file}`;
197
+ });
198
+ if (isWhitelisted) continue;
199
+ const isPython2 = b2.file.toLowerCase().endsWith(".py");
200
+ const norm2 = normalizeCode(b2.code, isPython2);
201
+ const sim = calculateSimilarity(norm1, norm2);
202
+ if (sim >= minSimilarity) {
203
+ const confidence = calculateConfidence(
204
+ sim,
205
+ b1.tokens,
206
+ b1.endLine - b1.startLine + 1
207
+ );
208
+ if (confidence < confidenceThreshold) continue;
209
+ const { severity, reason, suggestion, matchedRule } = calculateSeverity(
210
+ b1.file,
211
+ b2.file,
212
+ b1.code,
213
+ sim,
214
+ b1.endLine - b1.startLine + 1
215
+ );
216
+ const dup = {
217
+ file1: b1.file,
218
+ line1: b1.startLine,
219
+ endLine1: b1.endLine,
220
+ file2: b2.file,
221
+ line2: b2.startLine,
222
+ endLine2: b2.endLine,
223
+ code1: b1.code,
224
+ code2: b2.code,
225
+ similarity: sim,
226
+ confidence,
227
+ patternType: b1.patternType,
228
+ tokenCost: b1.tokens + b2.tokens,
229
+ severity,
230
+ reason,
231
+ suggestion,
232
+ matchedRule
233
+ };
234
+ duplicates.push(dup);
235
+ if (streamResults)
236
+ console.log(
237
+ `[DUPLICATE] ${dup.file1}:${dup.line1} <-> ${dup.file2}:${dup.line2} (${Math.round(sim * 100)}%, conf: ${Math.round(confidence * 100)}%)`
238
+ );
239
+ }
240
+ }
241
+ }
242
+ if (onProgress) {
243
+ onProgress(
244
+ totalComparisons,
245
+ totalComparisons,
246
+ `Duplicate detection complete. Found ${duplicates.length} patterns.`
247
+ );
248
+ }
249
+ return duplicates.sort((a, b) => b.similarity - a.similarity);
250
+ }
251
+
252
+ export {
253
+ detectDuplicatePatterns
254
+ };
@@ -0,0 +1,64 @@
1
+ import {
2
+ analyzePatterns
3
+ } from "./chunk-WMOGJFME.mjs";
4
+ import {
5
+ calculatePatternScore
6
+ } from "./chunk-WBBO35SC.mjs";
7
+
8
+ // src/index.ts
9
+ import { ToolRegistry, Severity } from "@aiready/core";
10
+
11
+ // src/provider.ts
12
+ import {
13
+ ToolName,
14
+ SpokeOutputSchema,
15
+ GLOBAL_SCAN_OPTIONS
16
+ } from "@aiready/core";
17
+ var PatternDetectProvider = {
18
+ id: ToolName.PatternDetect,
19
+ alias: ["patterns", "duplicates", "duplication"],
20
+ async analyze(options) {
21
+ const results = await analyzePatterns(options);
22
+ return SpokeOutputSchema.parse({
23
+ results: results.results,
24
+ summary: {
25
+ totalFiles: results.files.length,
26
+ totalIssues: results.results.reduce(
27
+ (sum, r) => sum + r.issues.length,
28
+ 0
29
+ ),
30
+ duplicates: results.duplicates,
31
+ // Keep the raw duplicates for score calculation
32
+ clusters: results.clusters,
33
+ config: Object.fromEntries(
34
+ Object.entries(results.config).filter(
35
+ ([key]) => !GLOBAL_SCAN_OPTIONS.includes(key) || key === "rootDir"
36
+ )
37
+ )
38
+ },
39
+ metadata: {
40
+ toolName: ToolName.PatternDetect,
41
+ version: "0.12.5",
42
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
43
+ }
44
+ });
45
+ },
46
+ score(output, options) {
47
+ const duplicates = output.summary.duplicates || [];
48
+ const totalFiles = output.summary.totalFiles || output.results.length;
49
+ return calculatePatternScore(
50
+ duplicates,
51
+ totalFiles,
52
+ options.costConfig
53
+ );
54
+ },
55
+ defaultWeight: 22
56
+ };
57
+
58
+ // src/index.ts
59
+ ToolRegistry.register(PatternDetectProvider);
60
+
61
+ export {
62
+ PatternDetectProvider,
63
+ Severity
64
+ };
@@ -0,0 +1,143 @@
1
+ import {
2
+ calculateSeverity
3
+ } from "./chunk-I6ETJC7L.mjs";
4
+
5
+ // src/detector.ts
6
+ import {
7
+ calculateStringSimilarity,
8
+ calculateHeuristicConfidence,
9
+ extractCodeBlocks
10
+ } from "@aiready/core";
11
+
12
+ // src/core/normalizer.ts
13
+ function normalizeCode(code, isPython = false) {
14
+ if (!code) return "";
15
+ let normalized = code;
16
+ if (isPython) {
17
+ normalized = normalized.replace(/#.*/g, "");
18
+ } else {
19
+ normalized = normalized.replace(/\/\/.*$/gm, "").replace(/\/\*[\s\S]*?\*\//g, "");
20
+ }
21
+ return normalized.replace(/"[^"]*"/g, '"STR"').replace(/'[^']*'/g, "'STR'").replace(/`[^`]*`/g, "`STR`").replace(/\b\d+\b/g, "NUM").replace(/\s+/g, " ").trim().toLowerCase();
22
+ }
23
+
24
+ // src/detector.ts
25
+ function extractBlocks(file, content) {
26
+ return extractCodeBlocks(file, content);
27
+ }
28
+ function calculateSimilarity(a, b) {
29
+ return calculateStringSimilarity(a, b);
30
+ }
31
+ function calculateConfidence(similarity, tokens, lines) {
32
+ return calculateHeuristicConfidence(similarity, tokens, lines);
33
+ }
34
+ async function detectDuplicatePatterns(fileContents, options) {
35
+ const {
36
+ minSimilarity,
37
+ minLines,
38
+ streamResults,
39
+ onProgress,
40
+ excludePatterns = [],
41
+ confidenceThreshold = 0,
42
+ ignoreWhitelist = []
43
+ } = options;
44
+ const allBlocks = [];
45
+ const excludeRegexes = excludePatterns.map((p) => new RegExp(p, "i"));
46
+ for (const { file, content } of fileContents) {
47
+ const blocks = extractBlocks(file, content);
48
+ for (const b of blocks) {
49
+ if (b.endLine - b.startLine + 1 < minLines) continue;
50
+ const isExcluded = excludeRegexes.some((regex) => regex.test(b.code));
51
+ if (isExcluded) continue;
52
+ allBlocks.push(b);
53
+ }
54
+ }
55
+ const duplicates = [];
56
+ const totalBlocks = allBlocks.length;
57
+ let comparisons = 0;
58
+ const totalComparisons = totalBlocks * (totalBlocks - 1) / 2;
59
+ if (onProgress) {
60
+ onProgress(
61
+ 0,
62
+ totalComparisons,
63
+ `Starting duplicate detection on ${totalBlocks} blocks...`
64
+ );
65
+ }
66
+ for (let i = 0; i < allBlocks.length; i++) {
67
+ if (i % 50 === 0 && i > 0) {
68
+ await new Promise((resolve) => setImmediate(resolve));
69
+ if (onProgress) {
70
+ onProgress(
71
+ comparisons,
72
+ totalComparisons,
73
+ `Analyzing blocks (${i}/${totalBlocks})...`
74
+ );
75
+ }
76
+ }
77
+ const b1 = allBlocks[i];
78
+ const isPython1 = b1.file.toLowerCase().endsWith(".py");
79
+ const norm1 = normalizeCode(b1.code, isPython1);
80
+ for (let j = i + 1; j < allBlocks.length; j++) {
81
+ comparisons++;
82
+ const b2 = allBlocks[j];
83
+ if (b1.file === b2.file) continue;
84
+ const isWhitelisted = ignoreWhitelist.some((pattern) => {
85
+ return b1.file.includes(pattern) && b2.file.includes(pattern) || pattern === `${b1.file}::${b2.file}` || pattern === `${b2.file}::${b1.file}`;
86
+ });
87
+ if (isWhitelisted) continue;
88
+ const isPython2 = b2.file.toLowerCase().endsWith(".py");
89
+ const norm2 = normalizeCode(b2.code, isPython2);
90
+ const sim = calculateSimilarity(norm1, norm2);
91
+ if (sim >= minSimilarity) {
92
+ const confidence = calculateConfidence(
93
+ sim,
94
+ b1.tokens,
95
+ b1.endLine - b1.startLine + 1
96
+ );
97
+ if (confidence < confidenceThreshold) continue;
98
+ const { severity, reason, suggestion, matchedRule } = calculateSeverity(
99
+ b1.file,
100
+ b2.file,
101
+ b1.code,
102
+ sim,
103
+ b1.endLine - b1.startLine + 1
104
+ );
105
+ const dup = {
106
+ file1: b1.file,
107
+ line1: b1.startLine,
108
+ endLine1: b1.endLine,
109
+ file2: b2.file,
110
+ line2: b2.startLine,
111
+ endLine2: b2.endLine,
112
+ code1: b1.code,
113
+ code2: b2.code,
114
+ similarity: sim,
115
+ confidence,
116
+ patternType: b1.patternType,
117
+ tokenCost: b1.tokens + b2.tokens,
118
+ severity,
119
+ reason,
120
+ suggestion,
121
+ matchedRule
122
+ };
123
+ duplicates.push(dup);
124
+ if (streamResults)
125
+ console.log(
126
+ `[DUPLICATE] ${dup.file1}:${dup.line1} <-> ${dup.file2}:${dup.line2} (${Math.round(sim * 100)}%, conf: ${Math.round(confidence * 100)}%)`
127
+ );
128
+ }
129
+ }
130
+ }
131
+ if (onProgress) {
132
+ onProgress(
133
+ totalComparisons,
134
+ totalComparisons,
135
+ `Duplicate detection complete. Found ${duplicates.length} patterns.`
136
+ );
137
+ }
138
+ return duplicates.sort((a, b) => b.similarity - a.similarity);
139
+ }
140
+
141
+ export {
142
+ detectDuplicatePatterns
143
+ };
@@ -0,0 +1,112 @@
1
+ // src/scoring.ts
2
+ import {
3
+ calculateMonthlyCost,
4
+ calculateProductivityImpact,
5
+ DEFAULT_COST_CONFIG,
6
+ ToolName
7
+ } from "@aiready/core";
8
+ function calculatePatternScore(duplicates, totalFilesAnalyzed, costConfig) {
9
+ const totalDuplicates = duplicates.length;
10
+ const totalTokenCost = duplicates.reduce((sum, d) => sum + d.tokenCost, 0);
11
+ const highImpactDuplicates = duplicates.filter(
12
+ (d) => d.tokenCost > 1e3 || d.similarity > 0.7
13
+ ).length;
14
+ if (totalFilesAnalyzed === 0) {
15
+ return {
16
+ toolName: ToolName.PatternDetect,
17
+ score: 100,
18
+ rawMetrics: {
19
+ totalDuplicates: 0,
20
+ totalTokenCost: 0,
21
+ highImpactDuplicates: 0,
22
+ totalFilesAnalyzed: 0
23
+ },
24
+ factors: [],
25
+ recommendations: []
26
+ };
27
+ }
28
+ const duplicatesPerFile = totalDuplicates / totalFilesAnalyzed * 100;
29
+ const tokenWastePerFile = totalTokenCost / totalFilesAnalyzed;
30
+ const duplicatesPenalty = Math.min(60, duplicatesPerFile * 0.6);
31
+ const tokenPenalty = Math.min(40, tokenWastePerFile / 125);
32
+ const highImpactPenalty = highImpactDuplicates > 0 ? Math.min(15, highImpactDuplicates * 2 - 5) : -5;
33
+ const score = 100 - duplicatesPenalty - tokenPenalty - highImpactPenalty;
34
+ const finalScore = Math.max(0, Math.min(100, Math.round(score)));
35
+ const factors = [
36
+ {
37
+ name: "Duplication Density",
38
+ impact: -Math.round(duplicatesPenalty),
39
+ description: `${duplicatesPerFile.toFixed(1)} duplicates per 100 files`
40
+ },
41
+ {
42
+ name: "Token Waste",
43
+ impact: -Math.round(tokenPenalty),
44
+ description: `${Math.round(tokenWastePerFile)} tokens wasted per file`
45
+ }
46
+ ];
47
+ if (highImpactDuplicates > 0) {
48
+ factors.push({
49
+ name: "High-Impact Patterns",
50
+ impact: -Math.round(highImpactPenalty),
51
+ description: `${highImpactDuplicates} high-impact duplicates (>1000 tokens or >70% similar)`
52
+ });
53
+ } else {
54
+ factors.push({
55
+ name: "No High-Impact Patterns",
56
+ impact: 5,
57
+ description: "No severe duplicates detected"
58
+ });
59
+ }
60
+ const recommendations = [];
61
+ if (highImpactDuplicates > 0) {
62
+ const estimatedImpact = Math.min(15, highImpactDuplicates * 3);
63
+ recommendations.push({
64
+ action: `Deduplicate ${highImpactDuplicates} high-impact pattern${highImpactDuplicates > 1 ? "s" : ""}`,
65
+ estimatedImpact,
66
+ priority: "high"
67
+ });
68
+ }
69
+ if (totalDuplicates > 10 && duplicatesPerFile > 20) {
70
+ const estimatedImpact = Math.min(10, Math.round(duplicatesPenalty * 0.3));
71
+ recommendations.push({
72
+ action: "Extract common patterns into shared utilities",
73
+ estimatedImpact,
74
+ priority: "medium"
75
+ });
76
+ }
77
+ if (tokenWastePerFile > 2e3) {
78
+ const estimatedImpact = Math.min(8, Math.round(tokenPenalty * 0.4));
79
+ recommendations.push({
80
+ action: "Consolidate duplicated logic to reduce AI context waste",
81
+ estimatedImpact,
82
+ priority: totalTokenCost > 1e4 ? "high" : "medium"
83
+ });
84
+ }
85
+ const cfg = { ...DEFAULT_COST_CONFIG, ...costConfig };
86
+ const estimatedMonthlyCost = calculateMonthlyCost(totalTokenCost, cfg);
87
+ const issues = duplicates.map((d) => ({
88
+ severity: d.severity === "critical" ? "critical" : d.severity === "major" ? "major" : "minor"
89
+ }));
90
+ const productivityImpact = calculateProductivityImpact(issues);
91
+ return {
92
+ toolName: "pattern-detect",
93
+ score: finalScore,
94
+ rawMetrics: {
95
+ totalDuplicates,
96
+ totalTokenCost,
97
+ highImpactDuplicates,
98
+ totalFilesAnalyzed,
99
+ duplicatesPerFile: Math.round(duplicatesPerFile * 10) / 10,
100
+ tokenWastePerFile: Math.round(tokenWastePerFile),
101
+ // Business value metrics
102
+ estimatedMonthlyCost,
103
+ estimatedDeveloperHours: productivityImpact.totalHours
104
+ },
105
+ factors,
106
+ recommendations
107
+ };
108
+ }
109
+
110
+ export {
111
+ calculatePatternScore
112
+ };