@aiready/pattern-detect 0.1.1 โ†’ 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,134 @@
1
+ # Contributing to @aiready/pattern-detect
2
+
3
+ Thank you for your interest in contributing to AIReady Pattern Detection! We welcome bug reports, feature requests, and code contributions.
4
+
5
+ ## ๐Ÿ› Reporting Issues
6
+
7
+ Found a bug or have a feature request? [Open an issue](https://github.com/caopengau/aiready-pattern-detect/issues) with:
8
+ - Clear description of the problem or feature
9
+ - Sample code that demonstrates the issue
10
+ - Expected vs actual behavior
11
+ - Your environment (Node version, OS)
12
+
13
+ ## ๐Ÿ”ง Development Setup
14
+
15
+ ```bash
16
+ # Clone your fork
17
+ git clone https://github.com/YOUR_USERNAME/aiready-pattern-detect
18
+ cd aiready-pattern-detect
19
+
20
+ # Install dependencies
21
+ pnpm install
22
+
23
+ # Build
24
+ pnpm build
25
+
26
+ # Run tests
27
+ pnpm test
28
+
29
+ # Test CLI locally
30
+ ./dist/cli.js ../test-project
31
+ ```
32
+
33
+ ## ๐Ÿ“ Making Changes
34
+
35
+ 1. **Fork the repository** and create a new branch:
36
+ ```bash
37
+ git checkout -b fix/similarity-calculation
38
+ # or
39
+ git checkout -b feat/new-pattern-type
40
+ ```
41
+
42
+ 2. **Make your changes** following our code style:
43
+ - Use TypeScript strict mode
44
+ - Add tests for new pattern types
45
+ - Update README with new features
46
+ - Keep detection logic modular
47
+
48
+ 3. **Test your changes**:
49
+ ```bash
50
+ pnpm build
51
+ pnpm test
52
+
53
+ # Test on real codebases
54
+ ./dist/cli.js /path/to/test-repo
55
+ ```
56
+
57
+ 4. **Commit using conventional commits**:
58
+ ```bash
59
+ git commit -m "fix: improve similarity threshold accuracy"
60
+ git commit -m "feat: add React component pattern detection"
61
+ ```
62
+
63
+ 5. **Push and open a PR**:
64
+ ```bash
65
+ git push origin feat/new-pattern-type
66
+ ```
67
+
68
+ ## ๐Ÿ“‹ Commit Convention
69
+
70
+ We use [Conventional Commits](https://www.conventionalcommits.org/):
71
+
72
+ - `feat:` - New feature (new pattern type, output format)
73
+ - `fix:` - Bug fix (detection accuracy, false positives)
74
+ - `docs:` - Documentation updates
75
+ - `perf:` - Performance improvements
76
+ - `refactor:` - Code restructuring
77
+ - `test:` - Test additions/updates
78
+
79
+ ## ๐Ÿงช Testing Guidelines
80
+
81
+ - Add test cases in `src/__tests__/detector.test.ts`
82
+ - Include real-world pattern examples
83
+ - Test edge cases (empty files, single-line functions)
84
+ - Verify output formats (console, JSON, HTML)
85
+
86
+ Example test:
87
+ ```typescript
88
+ test('detects API handler patterns', () => {
89
+ const results = detectDuplicatePatterns([...]);
90
+ expect(results).toHaveLength(2);
91
+ expect(results[0].patternType).toBe('api-handler');
92
+ });
93
+ ```
94
+
95
+ ## ๐ŸŽฏ Areas for Contribution
96
+
97
+ Great places to start:
98
+ - **New pattern types**: Add detection for new code patterns
99
+ - **Better categorization**: Improve pattern type classification
100
+ - **Detection accuracy**: Reduce false positives/negatives
101
+ - **Performance**: Optimize for large codebases
102
+ - **Output formats**: Add new export options
103
+ - **Documentation**: Usage examples, best practices
104
+
105
+ ## ๐Ÿ” Code Review
106
+
107
+ - All checks must pass (build, tests, lint)
108
+ - Maintainers review within 2 business days
109
+ - Address feedback and update PR
110
+ - Once approved, we'll merge and publish
111
+
112
+ ## ๐Ÿ“š Documentation
113
+
114
+ - Update README.md for new features
115
+ - Add examples for new pattern types
116
+ - Document CLI options
117
+ - Include real-world use cases
118
+
119
+ ## ๐Ÿ’ก Feature Ideas
120
+
121
+ Looking for inspiration? Consider:
122
+ - Language-specific pattern types (Go, Rust, etc.)
123
+ - Integration with popular linters
124
+ - VS Code extension
125
+ - CI/CD report generation
126
+ - Pattern suggestion improvements
127
+
128
+ ## โ“ Questions?
129
+
130
+ Open an issue or reach out to the maintainers. We're here to help!
131
+
132
+ ---
133
+
134
+ **Thank you for helping make AI-generated code better!** ๐Ÿ’™
package/README.md CHANGED
@@ -47,6 +47,9 @@ aiready-patterns ./src --similarity 0.9
47
47
  # Only look at larger patterns
48
48
  aiready-patterns ./src --min-lines 10
49
49
 
50
+ # Memory optimization for large codebases
51
+ aiready-patterns ./src --max-blocks 1000 --batch-size 200
52
+
50
53
  # Export to JSON
51
54
  aiready-patterns ./src --output json --output-file report.json
52
55
 
@@ -165,12 +168,37 @@ router.get('/posts/:id', createResourceHandler('Post', database.posts.findOne));
165
168
 
166
169
  ## โš™๏ธ Configuration
167
170
 
171
+ ### Common Options
172
+
168
173
  | Option | Description | Default |
169
174
  |--------|-------------|---------|
170
- | `minSimilarity` | Similarity threshold (0-1) | `0.85` |
175
+ | `minSimilarity` | Similarity threshold (0-1). Use 0.40 for Jaccard (default), 0.85+ for Levenshtein | `0.40` |
171
176
  | `minLines` | Minimum lines to consider a pattern | `5` |
172
- | `include` | File patterns to include | `['**/*.ts', '**/*.js']` |
173
- | `exclude` | File patterns to exclude | `['**/node_modules/**', '**/*.test.*']` |
177
+ | `maxBlocks` | Maximum code blocks to analyze (prevents OOM) | `500` |
178
+ | `include` | File patterns to include | `['**/*.{ts,tsx,js,jsx,py,java}']` |
179
+ | `exclude` | File patterns to exclude | See below |
180
+
181
+ ### Exclude Patterns (Default)
182
+
183
+ By default, these patterns are excluded:
184
+ ```bash
185
+ **/node_modules/**
186
+ **/dist/**
187
+ **/build/**
188
+ **/.git/**
189
+ **/coverage/**
190
+ **/*.min.js
191
+ **/*.bundle.js
192
+ ```
193
+
194
+ Override with `--exclude` flag:
195
+ ```bash
196
+ # Exclude test files and generated code
197
+ aiready-patterns ./src --exclude "**/test/**,**/generated/**,**/__snapshots__/**"
198
+
199
+ # Add to defaults (comma-separated)
200
+ aiready-patterns ./src --exclude "**/node_modules/**,**/dist/**,**/build/**,**/*.spec.ts"
201
+ ```
174
202
 
175
203
  ## ๐Ÿ“ˆ Understanding the Output
176
204
 
@@ -205,6 +233,78 @@ Estimated tokens wasted when AI tools process duplicate code:
205
233
  4. **Use pattern types**: Prioritize refactoring by category (API handlers โ†’ validators โ†’ utilities)
206
234
  5. **Export reports**: Generate HTML reports for team reviews
207
235
 
236
+ ## โš ๏ธ Performance & Memory
237
+
238
+ ### Algorithm Complexity
239
+
240
+ **Fast Mode (default)**: **O(B ร— C ร— T)** where:
241
+ - B = number of blocks
242
+ - C = average candidates per block (~100)
243
+ - T = average tokens per block (~50)
244
+ - **Jaccard similarity** is O(T) instead of O(Nยฒ) Levenshtein
245
+ - **Default threshold: 0.40** (comprehensive detection including tests and helpers)
246
+
247
+ **Exact Mode** (`--no-approx --no-fast-mode`): **O(Bยฒ ร— Nยฒ)** where:
248
+ - B = number of blocks
249
+ - N = average characters per block
250
+ - **Levenshtein similarity** - more accurate, much slower
251
+ - **Recommended threshold: 0.85+**
252
+ - **Not recommended for >100 files**
253
+
254
+ ### Performance Benchmarks
255
+
256
+ | Repo Size | Blocks | Fast Mode | Exact Mode |
257
+ |-----------|--------|-----------|------------|
258
+ | Small (<100 files) | ~50 | <1s | ~10s |
259
+ | Medium (100-500 files) | ~500 | ~2s | ~8 min |
260
+ | Large (500+ files) | ~500 (capped) | ~2s | ~76 min |
261
+
262
+ **Example:** 828 code blocks โ†’ limited to 500 โ†’ **2.4s** (fast) vs **76 min** (exact)
263
+
264
+ ### Tuning Options
265
+
266
+ ```bash
267
+ # Default (fast Jaccard mode, 40% threshold - comprehensive detection)
268
+ aiready-patterns ./src
269
+
270
+ # Higher threshold for only obvious duplicates
271
+ aiready-patterns ./src --similarity 0.65
272
+
273
+ # Lower threshold for more potential duplicates
274
+ aiready-patterns ./src --similarity 0.55
275
+
276
+ # Increase quality at cost of speed
277
+ aiready-patterns ./src --no-fast-mode --max-comparisons 100000
278
+
279
+ # Exact mode with progress tracking (slowest, shows % and ETA)
280
+ aiready-patterns ./src --no-approx --no-fast-mode --stream-results --max-blocks 100
281
+
282
+ # Maximum speed (aggressive filtering)
283
+ aiready-patterns ./src --max-blocks 200 --min-shared-tokens 12
284
+
285
+ # Exact mode (slowest, most accurate)
286
+ aiready-patterns ./src --no-approx --no-fast-mode --max-comparisons 500000
287
+ ```
288
+
289
+ **CLI Options:**
290
+ - `--stream-results` - Output duplicates as found (useful for long analysis)
291
+ - `--no-fast-mode` - Use Levenshtein instead of Jaccard (more accurate, much slower)
292
+ - `--no-approx` - Disable candidate filtering (enables progress % and ETA)
293
+ - `--max-comparisons N` - Cap total comparisons (default 50K)
294
+ - `--max-blocks N` - Limit blocks analyzed (default 500)
295
+
296
+ **Progress Indicators:**
297
+ - **Approx mode**: Shows blocks processed + duplicates found
298
+ - **Exact mode**: Shows % complete, ETA, and comparisons processed
299
+ - **Stream mode**: Prints each duplicate immediately when found
300
+
301
+ **Recommendations:**
302
+ - **< 100 files**: Use defaults, or try `--no-fast-mode` for higher accuracy
303
+ - **100-500 files**: Use defaults with fast mode (2-5s typical)
304
+ - **500-1000 files**: Use `--max-blocks 500 --min-lines 10` (~3-10s)
305
+ - **1000+ files**: Use `--max-blocks 300 --min-lines 15` or analyze by module
306
+ - **Slow analysis**: Add `--stream-results` to see progress in real-time
307
+
208
308
  ## ๐Ÿ”ง CI/CD Integration
209
309
 
210
310
  ### GitHub Actions
@@ -0,0 +1,409 @@
1
+ // src/index.ts
2
+ import { scanFiles, readFileContent } from "@aiready/core";
3
+
4
+ // src/detector.ts
5
+ import { similarityScore, estimateTokens } from "@aiready/core";
6
+ function categorizePattern(code) {
7
+ const lower = code.toLowerCase();
8
+ if (lower.includes("request") && lower.includes("response") || lower.includes("router.") || lower.includes("app.get") || lower.includes("app.post") || lower.includes("express") || lower.includes("ctx.body")) {
9
+ return "api-handler";
10
+ }
11
+ if (lower.includes("validate") || lower.includes("schema") || lower.includes("zod") || lower.includes("yup") || lower.includes("if") && lower.includes("throw")) {
12
+ return "validator";
13
+ }
14
+ if (lower.includes("return (") || lower.includes("jsx") || lower.includes("component") || lower.includes("props")) {
15
+ return "component";
16
+ }
17
+ if (lower.includes("class ") || lower.includes("this.")) {
18
+ return "class-method";
19
+ }
20
+ if (lower.includes("return ") && !lower.includes("this") && !lower.includes("new ")) {
21
+ return "utility";
22
+ }
23
+ if (lower.includes("function") || lower.includes("=>")) {
24
+ return "function";
25
+ }
26
+ return "unknown";
27
+ }
28
+ function extractCodeBlocks(content, minLines) {
29
+ const lines = content.split("\n");
30
+ const blocks = [];
31
+ let currentBlock = [];
32
+ let blockStart = 0;
33
+ let braceDepth = 0;
34
+ let inFunction = false;
35
+ for (let i = 0; i < lines.length; i++) {
36
+ const line = lines[i];
37
+ const trimmed = line.trim();
38
+ if (!inFunction && (trimmed.includes("function ") || trimmed.includes("=>") || trimmed.includes("async ") || /^(export\s+)?(async\s+)?function\s+/.test(trimmed) || /^(export\s+)?const\s+\w+\s*=\s*(async\s*)?\(/.test(trimmed))) {
39
+ inFunction = true;
40
+ blockStart = i;
41
+ }
42
+ for (const char of line) {
43
+ if (char === "{") braceDepth++;
44
+ if (char === "}") braceDepth--;
45
+ }
46
+ if (inFunction) {
47
+ currentBlock.push(line);
48
+ }
49
+ if (inFunction && braceDepth === 0 && currentBlock.length >= minLines) {
50
+ const blockContent = currentBlock.join("\n");
51
+ const linesOfCode = currentBlock.filter(
52
+ (l) => l.trim() && !l.trim().startsWith("//")
53
+ ).length;
54
+ blocks.push({
55
+ content: blockContent,
56
+ startLine: blockStart + 1,
57
+ endLine: i + 1,
58
+ patternType: categorizePattern(blockContent),
59
+ linesOfCode
60
+ });
61
+ currentBlock = [];
62
+ inFunction = false;
63
+ } else if (inFunction && braceDepth === 0) {
64
+ currentBlock = [];
65
+ inFunction = false;
66
+ }
67
+ }
68
+ return blocks;
69
+ }
70
+ function normalizeCode(code) {
71
+ return code.replace(/\/\/.*$/gm, "").replace(/\/\*[\s\S]*?\*\//g, "").replace(/"[^"]*"/g, '"STR"').replace(/'[^']*'/g, "'STR'").replace(/`[^`]*`/g, "`STR`").replace(/\b\d+\b/g, "NUM").replace(/\s+/g, " ").trim();
72
+ }
73
+ function jaccardSimilarity(tokens1, tokens2) {
74
+ const set1 = new Set(tokens1);
75
+ const set2 = new Set(tokens2);
76
+ let intersection = 0;
77
+ for (const token of set1) {
78
+ if (set2.has(token)) intersection++;
79
+ }
80
+ const union = set1.size + set2.size - intersection;
81
+ return union === 0 ? 0 : intersection / union;
82
+ }
83
+ function calculateSimilarity(block1, block2) {
84
+ const norm1 = normalizeCode(block1);
85
+ const norm2 = normalizeCode(block2);
86
+ const baseSimilarity = similarityScore(norm1, norm2);
87
+ const tokens1 = norm1.split(/[\s(){}[\];,]+/).filter(Boolean);
88
+ const tokens2 = norm2.split(/[\s(){}[\];,]+/).filter(Boolean);
89
+ const tokenSimilarity = similarityScore(tokens1.join(" "), tokens2.join(" "));
90
+ return baseSimilarity * 0.4 + tokenSimilarity * 0.6;
91
+ }
92
+ async function detectDuplicatePatterns(files, options) {
93
+ const {
94
+ minSimilarity,
95
+ minLines,
96
+ maxBlocks = 500,
97
+ batchSize = 100,
98
+ approx = true,
99
+ minSharedTokens = 8,
100
+ maxCandidatesPerBlock = 100,
101
+ fastMode = true,
102
+ maxComparisons = 5e4,
103
+ // Cap at 50K comparisons by default
104
+ streamResults = false
105
+ } = options;
106
+ const duplicates = [];
107
+ let allBlocks = files.flatMap(
108
+ (file) => extractCodeBlocks(file.content, minLines).map((block) => ({
109
+ content: block.content,
110
+ startLine: block.startLine,
111
+ endLine: block.endLine,
112
+ file: file.file,
113
+ normalized: normalizeCode(block.content),
114
+ patternType: block.patternType,
115
+ tokenCost: estimateTokens(block.content),
116
+ linesOfCode: block.linesOfCode
117
+ }))
118
+ );
119
+ console.log(`Extracted ${allBlocks.length} code blocks for analysis`);
120
+ if (allBlocks.length > maxBlocks) {
121
+ console.log(`\u26A0\uFE0F Limiting to ${maxBlocks} blocks (sorted by size) to prevent memory issues`);
122
+ console.log(` Use --max-blocks to increase limit or --min-lines to filter smaller blocks`);
123
+ allBlocks = allBlocks.sort((a, b) => b.linesOfCode - a.linesOfCode).slice(0, maxBlocks);
124
+ }
125
+ const stopwords = /* @__PURE__ */ new Set([
126
+ "return",
127
+ "const",
128
+ "let",
129
+ "var",
130
+ "function",
131
+ "class",
132
+ "new",
133
+ "if",
134
+ "else",
135
+ "for",
136
+ "while",
137
+ "async",
138
+ "await",
139
+ "try",
140
+ "catch",
141
+ "switch",
142
+ "case",
143
+ "default",
144
+ "import",
145
+ "export",
146
+ "from",
147
+ "true",
148
+ "false",
149
+ "null",
150
+ "undefined",
151
+ "this"
152
+ ]);
153
+ const tokenize = (norm) => norm.split(/[\s(){}\[\];,\.]+/).filter((t) => t && t.length >= 3 && !stopwords.has(t.toLowerCase()));
154
+ const blockTokens = allBlocks.map((b) => tokenize(b.normalized));
155
+ const invertedIndex = /* @__PURE__ */ new Map();
156
+ if (approx) {
157
+ for (let i = 0; i < blockTokens.length; i++) {
158
+ for (const tok of blockTokens[i]) {
159
+ let arr = invertedIndex.get(tok);
160
+ if (!arr) {
161
+ arr = [];
162
+ invertedIndex.set(tok, arr);
163
+ }
164
+ arr.push(i);
165
+ }
166
+ }
167
+ }
168
+ const totalComparisons = approx ? void 0 : allBlocks.length * (allBlocks.length - 1) / 2;
169
+ if (totalComparisons !== void 0) {
170
+ console.log(`Processing ${totalComparisons.toLocaleString()} comparisons in batches...`);
171
+ } else {
172
+ console.log(`Using approximate candidate selection to reduce comparisons...`);
173
+ }
174
+ let comparisonsProcessed = 0;
175
+ let comparisonsBudgetExhausted = false;
176
+ const startTime = Date.now();
177
+ for (let i = 0; i < allBlocks.length; i++) {
178
+ if (maxComparisons && comparisonsProcessed >= maxComparisons) {
179
+ comparisonsBudgetExhausted = true;
180
+ break;
181
+ }
182
+ if (i % batchSize === 0 && i > 0) {
183
+ const elapsed = ((Date.now() - startTime) / 1e3).toFixed(1);
184
+ const duplicatesFound = duplicates.length;
185
+ if (totalComparisons !== void 0) {
186
+ const progress = (comparisonsProcessed / totalComparisons * 100).toFixed(1);
187
+ const remaining = totalComparisons - comparisonsProcessed;
188
+ const rate = comparisonsProcessed / parseFloat(elapsed);
189
+ const eta = remaining > 0 ? (remaining / rate).toFixed(0) : 0;
190
+ console.log(` ${progress}% (${comparisonsProcessed.toLocaleString()}/${totalComparisons.toLocaleString()} comparisons, ${elapsed}s elapsed, ~${eta}s remaining, ${duplicatesFound} duplicates)`);
191
+ } else {
192
+ console.log(` Processed ${i.toLocaleString()}/${allBlocks.length} blocks (${elapsed}s elapsed, ${duplicatesFound} duplicates)`);
193
+ }
194
+ await new Promise((resolve) => setImmediate(resolve));
195
+ }
196
+ const block1 = allBlocks[i];
197
+ let candidates = null;
198
+ if (approx) {
199
+ const counts = /* @__PURE__ */ new Map();
200
+ for (const tok of blockTokens[i]) {
201
+ const ids = invertedIndex.get(tok);
202
+ if (!ids) continue;
203
+ for (const j of ids) {
204
+ if (j <= i) continue;
205
+ if (allBlocks[j].file === block1.file) continue;
206
+ counts.set(j, (counts.get(j) || 0) + 1);
207
+ }
208
+ }
209
+ candidates = Array.from(counts.entries()).filter(([, shared]) => shared >= minSharedTokens).sort((a, b) => b[1] - a[1]).slice(0, maxCandidatesPerBlock).map(([j, shared]) => ({ j, shared }));
210
+ }
211
+ if (approx && candidates) {
212
+ for (const { j } of candidates) {
213
+ if (maxComparisons && comparisonsProcessed >= maxComparisons) break;
214
+ comparisonsProcessed++;
215
+ const block2 = allBlocks[j];
216
+ const similarity = fastMode ? jaccardSimilarity(blockTokens[i], blockTokens[j]) : calculateSimilarity(block1.content, block2.content);
217
+ if (similarity >= minSimilarity) {
218
+ const duplicate = {
219
+ file1: block1.file,
220
+ file2: block2.file,
221
+ line1: block1.startLine,
222
+ line2: block2.startLine,
223
+ endLine1: block1.endLine,
224
+ endLine2: block2.endLine,
225
+ similarity,
226
+ snippet: block1.content.split("\n").slice(0, 5).join("\n") + "\n...",
227
+ patternType: block1.patternType,
228
+ tokenCost: block1.tokenCost + block2.tokenCost,
229
+ linesOfCode: block1.linesOfCode
230
+ };
231
+ duplicates.push(duplicate);
232
+ if (streamResults) {
233
+ console.log(`
234
+ \u2705 Found: ${duplicate.patternType} ${Math.round(similarity * 100)}% similar`);
235
+ console.log(` ${duplicate.file1}:${duplicate.line1}-${duplicate.endLine1} \u21D4 ${duplicate.file2}:${duplicate.line2}-${duplicate.endLine2}`);
236
+ console.log(` Token cost: ${duplicate.tokenCost.toLocaleString()}`);
237
+ }
238
+ }
239
+ }
240
+ } else {
241
+ for (let j = i + 1; j < allBlocks.length; j++) {
242
+ if (maxComparisons && comparisonsProcessed >= maxComparisons) break;
243
+ comparisonsProcessed++;
244
+ const block2 = allBlocks[j];
245
+ if (block1.file === block2.file) continue;
246
+ const similarity = fastMode ? jaccardSimilarity(blockTokens[i], blockTokens[j]) : calculateSimilarity(block1.content, block2.content);
247
+ if (similarity >= minSimilarity) {
248
+ const duplicate = {
249
+ file1: block1.file,
250
+ file2: block2.file,
251
+ line1: block1.startLine,
252
+ line2: block2.startLine,
253
+ endLine1: block1.endLine,
254
+ endLine2: block2.endLine,
255
+ similarity,
256
+ snippet: block1.content.split("\n").slice(0, 5).join("\n") + "\n...",
257
+ patternType: block1.patternType,
258
+ tokenCost: block1.tokenCost + block2.tokenCost,
259
+ linesOfCode: block1.linesOfCode
260
+ };
261
+ duplicates.push(duplicate);
262
+ if (streamResults) {
263
+ console.log(`
264
+ \u2705 Found: ${duplicate.patternType} ${Math.round(similarity * 100)}% similar`);
265
+ console.log(` ${duplicate.file1}:${duplicate.line1}-${duplicate.endLine1} \u21D4 ${duplicate.file2}:${duplicate.line2}-${duplicate.endLine2}`);
266
+ console.log(` Token cost: ${duplicate.tokenCost.toLocaleString()}`);
267
+ }
268
+ }
269
+ }
270
+ }
271
+ }
272
+ if (comparisonsBudgetExhausted) {
273
+ console.log(`\u26A0\uFE0F Comparison budget exhausted (${maxComparisons.toLocaleString()} comparisons). Use --max-comparisons to increase.`);
274
+ }
275
+ return duplicates.sort(
276
+ (a, b) => b.similarity - a.similarity || b.tokenCost - a.tokenCost
277
+ );
278
+ }
279
+
280
+ // src/index.ts
281
+ function getRefactoringSuggestion(patternType, similarity) {
282
+ const baseMessages = {
283
+ "api-handler": "Extract common middleware or create a base handler class",
284
+ validator: "Consolidate validation logic into shared schema validators (Zod/Yup)",
285
+ utility: "Move to a shared utilities file and reuse across modules",
286
+ "class-method": "Consider inheritance or composition to share behavior",
287
+ component: "Extract shared logic into a custom hook or HOC",
288
+ function: "Extract into a shared helper function",
289
+ unknown: "Extract common logic into a reusable module"
290
+ };
291
+ const urgency = similarity > 0.95 ? " (CRITICAL: Nearly identical code)" : similarity > 0.9 ? " (HIGH: Very similar, refactor soon)" : "";
292
+ return baseMessages[patternType] + urgency;
293
+ }
294
+ async function analyzePatterns(options) {
295
+ const {
296
+ minSimilarity = 0.65,
297
+ // Lower default for fast Jaccard mode (Levenshtein would be 0.85+)
298
+ minLines = 5,
299
+ maxBlocks = 500,
300
+ batchSize = 100,
301
+ approx = true,
302
+ minSharedTokens = 8,
303
+ maxCandidatesPerBlock = 100,
304
+ fastMode = true,
305
+ maxComparisons = 5e4,
306
+ streamResults = false,
307
+ ...scanOptions
308
+ } = options;
309
+ const files = await scanFiles(scanOptions);
310
+ const results = [];
311
+ const fileContents = await Promise.all(
312
+ files.map(async (file) => ({
313
+ file,
314
+ content: await readFileContent(file)
315
+ }))
316
+ );
317
+ const duplicates = await detectDuplicatePatterns(fileContents, {
318
+ minSimilarity,
319
+ minLines,
320
+ maxBlocks,
321
+ batchSize,
322
+ approx,
323
+ minSharedTokens,
324
+ maxCandidatesPerBlock,
325
+ fastMode,
326
+ maxComparisons,
327
+ streamResults
328
+ });
329
+ for (const file of files) {
330
+ const fileDuplicates = duplicates.filter(
331
+ (dup) => dup.file1 === file || dup.file2 === file
332
+ );
333
+ const issues = fileDuplicates.map((dup) => {
334
+ const otherFile = dup.file1 === file ? dup.file2 : dup.file1;
335
+ const severity = dup.similarity > 0.95 ? "critical" : dup.similarity > 0.9 ? "major" : "minor";
336
+ return {
337
+ type: "duplicate-pattern",
338
+ severity,
339
+ message: `${dup.patternType} pattern ${Math.round(dup.similarity * 100)}% similar to ${otherFile} (${dup.tokenCost} tokens wasted)`,
340
+ location: {
341
+ file,
342
+ line: dup.file1 === file ? dup.line1 : dup.line2
343
+ },
344
+ suggestion: getRefactoringSuggestion(dup.patternType, dup.similarity)
345
+ };
346
+ });
347
+ const totalTokenCost = fileDuplicates.reduce(
348
+ (sum, dup) => sum + dup.tokenCost,
349
+ 0
350
+ );
351
+ results.push({
352
+ fileName: file,
353
+ issues,
354
+ metrics: {
355
+ tokenCost: totalTokenCost,
356
+ consistencyScore: Math.max(0, 1 - fileDuplicates.length * 0.1)
357
+ }
358
+ });
359
+ }
360
+ return results;
361
+ }
362
+ function generateSummary(results) {
363
+ const allIssues = results.flatMap((r) => r.issues);
364
+ const totalTokenCost = results.reduce(
365
+ (sum, r) => sum + (r.metrics.tokenCost || 0),
366
+ 0
367
+ );
368
+ const patternsByType = {
369
+ "api-handler": 0,
370
+ validator: 0,
371
+ utility: 0,
372
+ "class-method": 0,
373
+ component: 0,
374
+ function: 0,
375
+ unknown: 0
376
+ };
377
+ allIssues.forEach((issue) => {
378
+ const match = issue.message.match(/^(\S+(?:-\S+)*) pattern/);
379
+ if (match) {
380
+ const type = match[1];
381
+ patternsByType[type] = (patternsByType[type] || 0) + 1;
382
+ }
383
+ });
384
+ const topDuplicates = allIssues.slice(0, 10).map((issue) => {
385
+ const similarityMatch = issue.message.match(/(\d+)% similar/);
386
+ const tokenMatch = issue.message.match(/\((\d+) tokens/);
387
+ const typeMatch = issue.message.match(/^(\S+(?:-\S+)*) pattern/);
388
+ const fileMatch = issue.message.match(/similar to (.+?) \(/);
389
+ return {
390
+ file1: issue.location.file,
391
+ file2: fileMatch?.[1] || "unknown",
392
+ similarity: similarityMatch ? parseInt(similarityMatch[1]) / 100 : 0,
393
+ patternType: typeMatch?.[1] || "unknown",
394
+ tokenCost: tokenMatch ? parseInt(tokenMatch[1]) : 0
395
+ };
396
+ });
397
+ return {
398
+ totalPatterns: allIssues.length,
399
+ totalTokenCost,
400
+ patternsByType,
401
+ topDuplicates
402
+ };
403
+ }
404
+
405
+ export {
406
+ detectDuplicatePatterns,
407
+ analyzePatterns,
408
+ generateSummary
409
+ };