deepdebug-local-agent 0.3.7 → 0.3.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/analyzers/config-analyzer.js +446 -0
- package/analyzers/controller-analyzer.js +429 -0
- package/analyzers/dto-analyzer.js +455 -0
- package/detectors/build-tool-detector.js +0 -0
- package/detectors/framework-detector.js +91 -0
- package/detectors/language-detector.js +89 -0
- package/detectors/multi-project-detector.js +191 -0
- package/detectors/service-detector.js +244 -0
- package/detectors.js +30 -0
- package/exec-utils.js +215 -0
- package/fs-utils.js +34 -0
- package/mcp-http-server.js +313 -0
- package/package.json +1 -1
- package/patch.js +607 -0
- package/ports.js +69 -0
- package/server.js +1 -138
- package/workspace/detect-port.js +176 -0
- package/workspace/file-reader.js +54 -0
- package/workspace/git-client.js +0 -0
- package/workspace/process-manager.js +619 -0
- package/workspace/scanner.js +72 -0
- package/workspace-manager.js +172 -0
package/patch.js
ADDED
|
@@ -0,0 +1,607 @@
|
|
|
1
|
+
import path from "path";
|
|
2
|
+
import { writeFile, readFile } from "./fs-utils.js";
|
|
3
|
+
import pkg from "unidiff";
|
|
4
|
+
|
|
5
|
+
const { parsePatch, applyPatch } = pkg;
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Apply unified diff with smart fallback strategies
|
|
9
|
+
*
|
|
10
|
+
* 🆕 FIX v2:
|
|
11
|
+
* - Completely removed any fallback that might append patch as comment
|
|
12
|
+
* - Added validation to ensure file wasn't corrupted
|
|
13
|
+
* - Added Strategy 4: Intelligent line-by-line matching
|
|
14
|
+
*
|
|
15
|
+
* Strategies (in order):
|
|
16
|
+
* 1. Exact match (standard applyPatch)
|
|
17
|
+
* 2. Fuzzy match (try different line offsets)
|
|
18
|
+
* 3. Content match (find best matching context)
|
|
19
|
+
* 4. Intelligent match (find lines by content, ignore line numbers)
|
|
20
|
+
*/
|
|
21
|
+
export async function applyUnifiedDiff(root, diffText) {
|
|
22
|
+
console.log("🔧 [SMART PATCH v2] Starting patch application...");
|
|
23
|
+
|
|
24
|
+
let patches = parsePatch(diffText);
|
|
25
|
+
if (!patches.length) throw new Error("No patches found in diff");
|
|
26
|
+
let p = patches[0];
|
|
27
|
+
|
|
28
|
+
// Discover target path from patch
|
|
29
|
+
const targetRel =
|
|
30
|
+
p?.newFileName?.replace(/^b\//, "") ||
|
|
31
|
+
p?.oldFileName?.replace(/^a\//, "");
|
|
32
|
+
if (!targetRel) throw new Error("Could not infer target file from diff");
|
|
33
|
+
|
|
34
|
+
console.log("🎯 Target:", targetRel);
|
|
35
|
+
const full = path.join(root, targetRel);
|
|
36
|
+
|
|
37
|
+
let original;
|
|
38
|
+
try {
|
|
39
|
+
original = await readFile(full, "utf8");
|
|
40
|
+
} catch (err) {
|
|
41
|
+
throw new Error(`Target file not found: ${targetRel}`);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
// Normalize invisible characters in both file content and diff
|
|
45
|
+
// This fixes encoding mismatches between AI-generated diffs and actual files
|
|
46
|
+
const rawOriginal = original;
|
|
47
|
+
original = stripInvisibleChars(original);
|
|
48
|
+
diffText = stripInvisibleChars(diffText);
|
|
49
|
+
|
|
50
|
+
if (rawOriginal !== original) {
|
|
51
|
+
console.log("⚡ Original file had invisible characters - normalized");
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// Re-parse the normalized diff
|
|
55
|
+
const normalizedPatches = parsePatch(diffText);
|
|
56
|
+
if (normalizedPatches.length > 0) {
|
|
57
|
+
p = normalizedPatches[0];
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const originalLines = original.split('\n');
|
|
61
|
+
const originalLength = original.length;
|
|
62
|
+
console.log("📖 Original file:", originalLines.length, "lines,", originalLength, "chars");
|
|
63
|
+
|
|
64
|
+
// Log patch info
|
|
65
|
+
if (p.hunks && p.hunks.length > 0) {
|
|
66
|
+
const hunk = p.hunks[0];
|
|
67
|
+
console.log("📋 Patch hunk:");
|
|
68
|
+
console.log(" Old start:", hunk.oldStart);
|
|
69
|
+
console.log(" Old lines:", hunk.oldLines);
|
|
70
|
+
console.log(" New start:", hunk.newStart);
|
|
71
|
+
console.log(" New lines:", hunk.newLines);
|
|
72
|
+
console.log(" Context lines:", hunk.lines.filter(l => l.startsWith(' ')).length);
|
|
73
|
+
console.log(" Additions:", hunk.lines.filter(l => l.startsWith('+')).length);
|
|
74
|
+
console.log(" Deletions:", hunk.lines.filter(l => l.startsWith('-')).length);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// STRATEGY 1: Try exact match first
|
|
78
|
+
console.log("\n🎯 STRATEGY 1: Exact match");
|
|
79
|
+
let result = applyPatch(original, p);
|
|
80
|
+
|
|
81
|
+
if (result !== false && isValidResult(result, diffText)) {
|
|
82
|
+
console.log("✅ Exact match succeeded!");
|
|
83
|
+
await writeFile(full, result, "utf8");
|
|
84
|
+
return {
|
|
85
|
+
ok: true,
|
|
86
|
+
target: targetRel,
|
|
87
|
+
bytes: Buffer.byteLength(result, "utf8"),
|
|
88
|
+
strategy: "exact",
|
|
89
|
+
filesModified: 1,
|
|
90
|
+
patchedFiles: [targetRel]
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
console.log("❌ Exact match failed");
|
|
94
|
+
|
|
95
|
+
// STRATEGY 2: Try fuzzy match with line offsets
|
|
96
|
+
console.log("\n🔍 STRATEGY 2: Fuzzy match with offsets");
|
|
97
|
+
const offsets = [1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10,
|
|
98
|
+
15, -15, 20, -20, 25, -25, 30, -30, 40, -40, 50, -50, 75, -75, 100, -100];
|
|
99
|
+
|
|
100
|
+
for (const offset of offsets) {
|
|
101
|
+
const adjustedPatch = adjustPatchLineNumbers(p, offset);
|
|
102
|
+
result = applyPatch(original, adjustedPatch);
|
|
103
|
+
|
|
104
|
+
if (result !== false && isValidResult(result, diffText)) {
|
|
105
|
+
console.log(`✅ Fuzzy match succeeded with offset: ${offset}`);
|
|
106
|
+
await writeFile(full, result, "utf8");
|
|
107
|
+
return {
|
|
108
|
+
ok: true,
|
|
109
|
+
target: targetRel,
|
|
110
|
+
bytes: Buffer.byteLength(result, "utf8"),
|
|
111
|
+
strategy: "fuzzy",
|
|
112
|
+
offset: offset,
|
|
113
|
+
filesModified: 1,
|
|
114
|
+
patchedFiles: [targetRel]
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
console.log("❌ Fuzzy match failed (tried all offsets)");
|
|
119
|
+
|
|
120
|
+
// STRATEGY 3: Try content-based match
|
|
121
|
+
console.log("\n🧠 STRATEGY 3: Content-based matching");
|
|
122
|
+
result = await tryContentMatch(original, p);
|
|
123
|
+
|
|
124
|
+
if (result !== null && isValidResult(result, diffText)) {
|
|
125
|
+
console.log("✅ Content match succeeded!");
|
|
126
|
+
await writeFile(full, result, "utf8");
|
|
127
|
+
return {
|
|
128
|
+
ok: true,
|
|
129
|
+
target: targetRel,
|
|
130
|
+
bytes: Buffer.byteLength(result, "utf8"),
|
|
131
|
+
strategy: "content",
|
|
132
|
+
filesModified: 1,
|
|
133
|
+
patchedFiles: [targetRel]
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
console.log("❌ Content match failed");
|
|
137
|
+
|
|
138
|
+
// STRATEGY 4: Try intelligent line-by-line match
|
|
139
|
+
console.log("\n🔬 STRATEGY 4: Intelligent line-by-line matching");
|
|
140
|
+
result = await tryIntelligentMatch(original, p);
|
|
141
|
+
|
|
142
|
+
if (result !== null && isValidResult(result, diffText)) {
|
|
143
|
+
console.log("✅ Intelligent match succeeded!");
|
|
144
|
+
await writeFile(full, result, "utf8");
|
|
145
|
+
return {
|
|
146
|
+
ok: true,
|
|
147
|
+
target: targetRel,
|
|
148
|
+
bytes: Buffer.byteLength(result, "utf8"),
|
|
149
|
+
strategy: "intelligent",
|
|
150
|
+
filesModified: 1,
|
|
151
|
+
patchedFiles: [targetRel]
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
console.log("❌ Intelligent match failed");
|
|
155
|
+
|
|
156
|
+
// ❌ ALL STRATEGIES FAILED - THROW ERROR
|
|
157
|
+
console.error("\n❌ ALL STRATEGIES EXHAUSTED - PATCH CANNOT BE APPLIED");
|
|
158
|
+
|
|
159
|
+
// Provide detailed error info
|
|
160
|
+
const errorDetails = buildErrorDetails(originalLines, p);
|
|
161
|
+
|
|
162
|
+
const error = new Error(`Patch failed: ${errorDetails}`);
|
|
163
|
+
error.code = "PATCH_FAILED";
|
|
164
|
+
error.file = targetRel;
|
|
165
|
+
error.suggestion = "The file may have been modified. Try re-analyzing the error.";
|
|
166
|
+
|
|
167
|
+
throw error;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
/**
|
|
171
|
+
* 🆕 VALIDATION: Ensure the result is valid and wasn't corrupted
|
|
172
|
+
* Checks that the patch wasn't appended as a comment
|
|
173
|
+
*/
|
|
174
|
+
function isValidResult(result, diffText) {
|
|
175
|
+
if (!result || typeof result !== 'string') {
|
|
176
|
+
return false;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
// Check if the diff was appended as a comment (BUG we're fixing)
|
|
180
|
+
if (result.includes("PATCH COULD NOT BE APPLIED") ||
|
|
181
|
+
result.includes("fuzz 0-3 failed") ||
|
|
182
|
+
result.includes("--- a/") && result.includes("+++ b/") && result.endsWith(diffText.trim())) {
|
|
183
|
+
console.warn("⚠️ Detected corrupted result (patch appended as comment)");
|
|
184
|
+
return false;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// Check if the diff text was literally appended to the file
|
|
188
|
+
const diffLines = diffText.split('\n').slice(0, 5).join('\n');
|
|
189
|
+
if (result.includes(diffLines) && result.indexOf(diffLines) > result.length - diffText.length - 100) {
|
|
190
|
+
console.warn("⚠️ Detected diff appended to end of file");
|
|
191
|
+
return false;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
return true;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
/**
|
|
198
|
+
* 🆕 STRATEGY 4: Intelligent line-by-line matching
|
|
199
|
+
* Finds the exact lines to modify by content, completely ignoring line numbers
|
|
200
|
+
*/
|
|
201
|
+
async function tryIntelligentMatch(original, patch) {
|
|
202
|
+
const originalLines = original.split('\n');
|
|
203
|
+
|
|
204
|
+
if (!patch.hunks || !patch.hunks.length) return null;
|
|
205
|
+
|
|
206
|
+
let resultLines = [...originalLines];
|
|
207
|
+
let totalOffset = 0;
|
|
208
|
+
|
|
209
|
+
for (const hunk of patch.hunks) {
|
|
210
|
+
// Extract lines to remove and add
|
|
211
|
+
const linesToRemove = [];
|
|
212
|
+
const linesToAdd = [];
|
|
213
|
+
const contextBefore = [];
|
|
214
|
+
const contextAfter = [];
|
|
215
|
+
let foundFirstChange = false;
|
|
216
|
+
|
|
217
|
+
for (const line of hunk.lines) {
|
|
218
|
+
if (line.startsWith('-')) {
|
|
219
|
+
linesToRemove.push(line.substring(1));
|
|
220
|
+
foundFirstChange = true;
|
|
221
|
+
} else if (line.startsWith('+')) {
|
|
222
|
+
linesToAdd.push(line.substring(1));
|
|
223
|
+
foundFirstChange = true;
|
|
224
|
+
} else if (line.startsWith(' ')) {
|
|
225
|
+
if (!foundFirstChange) {
|
|
226
|
+
contextBefore.push(line.substring(1));
|
|
227
|
+
} else {
|
|
228
|
+
contextAfter.push(line.substring(1));
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
console.log(` Lines to remove: ${linesToRemove.length}, to add: ${linesToAdd.length}`);
|
|
234
|
+
console.log(` Context before: ${contextBefore.length}, after: ${contextAfter.length}`);
|
|
235
|
+
|
|
236
|
+
if (linesToRemove.length === 0 && linesToAdd.length > 0) {
|
|
237
|
+
// Pure addition - find insertion point using context
|
|
238
|
+
const insertIndex = findInsertionPoint(resultLines, contextBefore, contextAfter, totalOffset);
|
|
239
|
+
if (insertIndex !== -1) {
|
|
240
|
+
console.log(` ✅ Found insertion point at line ${insertIndex + 1}`);
|
|
241
|
+
resultLines.splice(insertIndex, 0, ...linesToAdd);
|
|
242
|
+
totalOffset += linesToAdd.length;
|
|
243
|
+
continue;
|
|
244
|
+
}
|
|
245
|
+
console.log(" ❌ Could not find insertion point");
|
|
246
|
+
return null;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// Find where the lines to remove actually are
|
|
250
|
+
const foundIndex = findLinesInFile(resultLines, linesToRemove, contextBefore);
|
|
251
|
+
|
|
252
|
+
if (foundIndex === -1) {
|
|
253
|
+
console.log(" ❌ Could not find lines to remove in file");
|
|
254
|
+
return null;
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
console.log(` ✅ Found matching lines at index ${foundIndex + 1}`);
|
|
258
|
+
|
|
259
|
+
// Replace the found lines with the new lines
|
|
260
|
+
resultLines.splice(foundIndex, linesToRemove.length, ...linesToAdd);
|
|
261
|
+
totalOffset += linesToAdd.length - linesToRemove.length;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
return resultLines.join('\n');
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Find a sequence of lines in the file using content matching
|
|
269
|
+
*/
|
|
270
|
+
function findLinesInFile(fileLines, searchLines, contextBefore = []) {
|
|
271
|
+
if (searchLines.length === 0) return -1;
|
|
272
|
+
|
|
273
|
+
const searchStart = contextBefore.length > 0 ? contextBefore[contextBefore.length - 1] : null;
|
|
274
|
+
|
|
275
|
+
for (let i = 0; i <= fileLines.length - searchLines.length; i++) {
|
|
276
|
+
// If we have context, check if it matches first
|
|
277
|
+
if (searchStart && contextBefore.length > 0) {
|
|
278
|
+
let contextMatches = true;
|
|
279
|
+
const contextStartIndex = i - contextBefore.length;
|
|
280
|
+
|
|
281
|
+
if (contextStartIndex >= 0) {
|
|
282
|
+
for (let c = 0; c < contextBefore.length; c++) {
|
|
283
|
+
if (normalize(fileLines[contextStartIndex + c]) !== normalize(contextBefore[c])) {
|
|
284
|
+
contextMatches = false;
|
|
285
|
+
break;
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
if (!contextMatches) continue;
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
// Check if search lines match
|
|
294
|
+
let match = true;
|
|
295
|
+
for (let j = 0; j < searchLines.length; j++) {
|
|
296
|
+
if (normalize(fileLines[i + j]) !== normalize(searchLines[j])) {
|
|
297
|
+
match = false;
|
|
298
|
+
break;
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
if (match) {
|
|
303
|
+
return i;
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// Fallback: try partial matching (80% threshold)
|
|
308
|
+
for (let i = 0; i <= fileLines.length - searchLines.length; i++) {
|
|
309
|
+
let matchCount = 0;
|
|
310
|
+
|
|
311
|
+
for (let j = 0; j < searchLines.length; j++) {
|
|
312
|
+
const fileLine = normalize(fileLines[i + j]);
|
|
313
|
+
const searchLine = normalize(searchLines[j]);
|
|
314
|
+
|
|
315
|
+
if (fileLine === searchLine) {
|
|
316
|
+
matchCount++;
|
|
317
|
+
} else if (fileLine.includes(searchLine) || searchLine.includes(fileLine)) {
|
|
318
|
+
matchCount += 0.5;
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
if (matchCount / searchLines.length >= 0.8) {
|
|
323
|
+
return i;
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
return -1;
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
/**
|
|
331
|
+
* Find best insertion point for pure additions using context
|
|
332
|
+
*/
|
|
333
|
+
function findInsertionPoint(fileLines, contextBefore, contextAfter, offset) {
|
|
334
|
+
// Strategy 1: Find context before
|
|
335
|
+
if (contextBefore.length > 0) {
|
|
336
|
+
for (let i = 0; i <= fileLines.length - contextBefore.length; i++) {
|
|
337
|
+
let match = true;
|
|
338
|
+
for (let j = 0; j < contextBefore.length; j++) {
|
|
339
|
+
if (normalize(fileLines[i + j]) !== normalize(contextBefore[j])) {
|
|
340
|
+
match = false;
|
|
341
|
+
break;
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
if (match) {
|
|
345
|
+
return i + contextBefore.length;
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Strategy 2: Find context after and insert before it
|
|
351
|
+
if (contextAfter.length > 0) {
|
|
352
|
+
for (let i = 0; i <= fileLines.length - contextAfter.length; i++) {
|
|
353
|
+
let match = true;
|
|
354
|
+
for (let j = 0; j < contextAfter.length; j++) {
|
|
355
|
+
if (normalize(fileLines[i + j]) !== normalize(contextAfter[j])) {
|
|
356
|
+
match = false;
|
|
357
|
+
break;
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
if (match) {
|
|
361
|
+
return i;
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
return -1;
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
/**
|
|
370
|
+
* Normalize a line for comparison (trim whitespace)
|
|
371
|
+
*/
|
|
372
|
+
/**
|
|
373
|
+
* Strip invisible characters from text to normalize encoding differences
|
|
374
|
+
* between AI-generated diffs and actual file content
|
|
375
|
+
*/
|
|
376
|
+
function stripInvisibleChars(text) {
|
|
377
|
+
if (!text) return text;
|
|
378
|
+
return text
|
|
379
|
+
// Remove BOM
|
|
380
|
+
.replace(/^\uFEFF/, '')
|
|
381
|
+
// Remove zero-width chars
|
|
382
|
+
.replace(/[\u200B\u200C\u200D\u200E\u200F]/g, '')
|
|
383
|
+
// Replace non-breaking spaces with regular spaces
|
|
384
|
+
.replace(/\u00A0/g, ' ')
|
|
385
|
+
// Normalize unicode
|
|
386
|
+
.normalize('NFC');
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
function normalize(line) {
|
|
390
|
+
return (line || '')
|
|
391
|
+
// Remove BOM
|
|
392
|
+
.replace(/^\uFEFF/, '')
|
|
393
|
+
// Remove zero-width spaces and other invisible unicode
|
|
394
|
+
.replace(/[\u200B\u200C\u200D\u200E\u200F\uFEFF\u00A0]/g, '')
|
|
395
|
+
// Normalize unicode (NFC form)
|
|
396
|
+
.normalize('NFC')
|
|
397
|
+
// Normalize whitespace (tabs to spaces, multiple spaces to single)
|
|
398
|
+
.replace(/\t/g, ' ')
|
|
399
|
+
.trim();
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
/**
|
|
403
|
+
* Build detailed error message
|
|
404
|
+
*/
|
|
405
|
+
function buildErrorDetails(originalLines, patch) {
|
|
406
|
+
if (!patch.hunks || patch.hunks.length === 0) {
|
|
407
|
+
return "Patch has no hunks.";
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
const hunk = patch.hunks[0];
|
|
411
|
+
const expectedStart = hunk.oldStart;
|
|
412
|
+
|
|
413
|
+
// Get expected content from patch
|
|
414
|
+
const expectedLines = hunk.lines
|
|
415
|
+
.filter(l => l.startsWith(' ') || l.startsWith('-'))
|
|
416
|
+
.slice(0, 2)
|
|
417
|
+
.map(l => l.substring(1).trim());
|
|
418
|
+
|
|
419
|
+
// Get actual content at that location
|
|
420
|
+
const actualLines = originalLines
|
|
421
|
+
.slice(Math.max(0, expectedStart - 1), expectedStart + 1)
|
|
422
|
+
.map(l => l.trim());
|
|
423
|
+
|
|
424
|
+
return `At line ${expectedStart}: expected "${expectedLines[0] || 'N/A'}" ` +
|
|
425
|
+
`but found "${actualLines[0] || 'N/A'}". Context mismatch.`;
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
/**
|
|
429
|
+
* Adjust line numbers in a patch by an offset
|
|
430
|
+
*/
|
|
431
|
+
function adjustPatchLineNumbers(patch, offset) {
|
|
432
|
+
const adjusted = JSON.parse(JSON.stringify(patch));
|
|
433
|
+
|
|
434
|
+
if (adjusted.hunks) {
|
|
435
|
+
adjusted.hunks = adjusted.hunks.map(hunk => ({
|
|
436
|
+
...hunk,
|
|
437
|
+
oldStart: Math.max(1, hunk.oldStart + offset),
|
|
438
|
+
newStart: Math.max(1, hunk.newStart + offset)
|
|
439
|
+
}));
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
return adjusted;
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
/**
|
|
446
|
+
* STRATEGY 3: Content-based matching
|
|
447
|
+
*/
|
|
448
|
+
async function tryContentMatch(original, patch) {
|
|
449
|
+
const originalLines = original.split('\n');
|
|
450
|
+
|
|
451
|
+
if (!patch.hunks || !patch.hunks.length) return null;
|
|
452
|
+
|
|
453
|
+
let resultLines = [...originalLines];
|
|
454
|
+
let totalOffset = 0;
|
|
455
|
+
|
|
456
|
+
for (const hunk of patch.hunks) {
|
|
457
|
+
const contextLines = [];
|
|
458
|
+
const additions = [];
|
|
459
|
+
const deletions = [];
|
|
460
|
+
|
|
461
|
+
for (const line of hunk.lines) {
|
|
462
|
+
if (line.startsWith(' ')) {
|
|
463
|
+
contextLines.push(line.substring(1));
|
|
464
|
+
} else if (line.startsWith('+')) {
|
|
465
|
+
additions.push(line.substring(1));
|
|
466
|
+
} else if (line.startsWith('-')) {
|
|
467
|
+
deletions.push(line.substring(1));
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
console.log(" Context:", contextLines.length, "Add:", additions.length, "Del:", deletions.length);
|
|
472
|
+
|
|
473
|
+
const matchIndex = findBestContextMatch(resultLines, contextLines);
|
|
474
|
+
|
|
475
|
+
if (matchIndex !== -1) {
|
|
476
|
+
console.log(` ✅ Found context at line ${matchIndex + 1}`);
|
|
477
|
+
|
|
478
|
+
const newResult = applyChangesAtLocation(
|
|
479
|
+
resultLines,
|
|
480
|
+
matchIndex,
|
|
481
|
+
deletions,
|
|
482
|
+
additions,
|
|
483
|
+
contextLines
|
|
484
|
+
);
|
|
485
|
+
|
|
486
|
+
if (newResult) {
|
|
487
|
+
resultLines = newResult;
|
|
488
|
+
totalOffset += additions.length - deletions.length;
|
|
489
|
+
} else {
|
|
490
|
+
return null;
|
|
491
|
+
}
|
|
492
|
+
} else {
|
|
493
|
+
console.log(" ❌ No context match found");
|
|
494
|
+
return null;
|
|
495
|
+
}
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
return resultLines.join('\n');
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
/**
|
|
502
|
+
* Find the best matching context in the file (60% threshold)
|
|
503
|
+
*/
|
|
504
|
+
function findBestContextMatch(fileLines, contextLines) {
|
|
505
|
+
if (contextLines.length === 0) return -1;
|
|
506
|
+
|
|
507
|
+
let bestMatch = -1;
|
|
508
|
+
let bestScore = 0;
|
|
509
|
+
|
|
510
|
+
for (let i = 0; i <= fileLines.length - contextLines.length; i++) {
|
|
511
|
+
let score = 0;
|
|
512
|
+
|
|
513
|
+
for (let j = 0; j < contextLines.length; j++) {
|
|
514
|
+
const fileLine = normalize(fileLines[i + j]);
|
|
515
|
+
const contextLine = normalize(contextLines[j]);
|
|
516
|
+
|
|
517
|
+
if (fileLine === contextLine) {
|
|
518
|
+
score++;
|
|
519
|
+
} else if (fileLine && contextLine &&
|
|
520
|
+
(fileLine.includes(contextLine) || contextLine.includes(fileLine))) {
|
|
521
|
+
score += 0.5;
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
const matchPercentage = score / contextLines.length;
|
|
526
|
+
if (matchPercentage > bestScore && matchPercentage >= 0.6) {
|
|
527
|
+
bestScore = matchPercentage;
|
|
528
|
+
bestMatch = i;
|
|
529
|
+
}
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
return bestMatch;
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
/**
|
|
536
|
+
* Apply changes at a specific location
|
|
537
|
+
*/
|
|
538
|
+
function applyChangesAtLocation(fileLines, startIndex, deletions, additions, contextLines) {
|
|
539
|
+
const result = [...fileLines];
|
|
540
|
+
|
|
541
|
+
// Find where deletions actually are
|
|
542
|
+
let deleteStartIndex = startIndex;
|
|
543
|
+
|
|
544
|
+
if (deletions.length > 0) {
|
|
545
|
+
for (let i = startIndex; i < Math.min(result.length, startIndex + contextLines.length + 10); i++) {
|
|
546
|
+
if (normalize(result[i]) === normalize(deletions[0])) {
|
|
547
|
+
deleteStartIndex = i;
|
|
548
|
+
break;
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
// Remove deletion lines
|
|
554
|
+
for (let i = 0; i < deletions.length; i++) {
|
|
555
|
+
const deletionNorm = normalize(deletions[i]);
|
|
556
|
+
|
|
557
|
+
for (let j = deleteStartIndex; j < Math.min(result.length, deleteStartIndex + 10); j++) {
|
|
558
|
+
if (normalize(result[j]) === deletionNorm) {
|
|
559
|
+
result.splice(j, 1);
|
|
560
|
+
break;
|
|
561
|
+
}
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
// Add new lines
|
|
566
|
+
result.splice(deleteStartIndex, 0, ...additions);
|
|
567
|
+
|
|
568
|
+
return result;
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
/**
|
|
572
|
+
* Validate that a patch can be applied
|
|
573
|
+
*/
|
|
574
|
+
export async function validateDiff(root, diffText) {
|
|
575
|
+
try {
|
|
576
|
+
const patches = parsePatch(diffText);
|
|
577
|
+
|
|
578
|
+
if (!patches || patches.length === 0) {
|
|
579
|
+
return {
|
|
580
|
+
valid: false,
|
|
581
|
+
errors: ["No patches found in diff"]
|
|
582
|
+
};
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
const errors = [];
|
|
586
|
+
|
|
587
|
+
for (const patch of patches) {
|
|
588
|
+
if (!patch.oldFileName && !patch.newFileName) {
|
|
589
|
+
errors.push("Patch missing file names");
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
if (!patch.hunks || patch.hunks.length === 0) {
|
|
593
|
+
errors.push("Patch has no hunks");
|
|
594
|
+
}
|
|
595
|
+
}
|
|
596
|
+
|
|
597
|
+
return {
|
|
598
|
+
valid: errors.length === 0,
|
|
599
|
+
errors
|
|
600
|
+
};
|
|
601
|
+
} catch (error) {
|
|
602
|
+
return {
|
|
603
|
+
valid: false,
|
|
604
|
+
errors: [error.message]
|
|
605
|
+
};
|
|
606
|
+
}
|
|
607
|
+
}
|
package/ports.js
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import path from "path";
|
|
2
|
+
import yaml from "js-yaml";
|
|
3
|
+
import PropertiesReader from "properties-reader";
|
|
4
|
+
import { exists, readFile } from "./fs-utils.js";
|
|
5
|
+
|
|
6
|
+
export async function detectPort(root) {
|
|
7
|
+
// Spring Boot
|
|
8
|
+
const ymlCandidates = [
|
|
9
|
+
"src/main/resources/application.yml",
|
|
10
|
+
"src/main/resources/application.yaml",
|
|
11
|
+
"application.yml",
|
|
12
|
+
"application.yaml"
|
|
13
|
+
];
|
|
14
|
+
for (const rel of ymlCandidates) {
|
|
15
|
+
const full = path.join(root, rel);
|
|
16
|
+
if (await exists(full)) {
|
|
17
|
+
try {
|
|
18
|
+
const content = await readFile(full, "utf8");
|
|
19
|
+
const doc = yaml.load(content);
|
|
20
|
+
|
|
21
|
+
// Tentar pegar do objeto parseado
|
|
22
|
+
let port = doc?.server?.port ?? doc?.SERVER?.PORT;
|
|
23
|
+
|
|
24
|
+
// Se for string com ${PORT:8090}, extrair o valor padrão
|
|
25
|
+
if (typeof port === 'string') {
|
|
26
|
+
const match = port.match(/\$\{PORT:(\d+)\}/);
|
|
27
|
+
if (match) port = match[1];
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
if (port) return Number(port);
|
|
31
|
+
} catch {}
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const propCandidates = [
|
|
36
|
+
"src/main/resources/application.properties",
|
|
37
|
+
"application.properties"
|
|
38
|
+
];
|
|
39
|
+
for (const rel of propCandidates) {
|
|
40
|
+
const full = path.join(root, rel);
|
|
41
|
+
if (await exists(full)) {
|
|
42
|
+
try {
|
|
43
|
+
const props = PropertiesReader(full);
|
|
44
|
+
let port = props.get("server.port");
|
|
45
|
+
|
|
46
|
+
// Se for string com ${PORT:8090}, extrair o valor padrão
|
|
47
|
+
if (typeof port === 'string') {
|
|
48
|
+
const match = port.match(/\$\{PORT:(\d+)\}/);
|
|
49
|
+
if (match) port = match[1];
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
if (port) return Number(port);
|
|
53
|
+
} catch {}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Node (com .env padrão)
|
|
58
|
+
const envCandidates = [".env", "src/.env"];
|
|
59
|
+
for (const rel of envCandidates) {
|
|
60
|
+
const full = path.join(root, rel);
|
|
61
|
+
if (await exists(full)) {
|
|
62
|
+
const content = await readFile(full, "utf8");
|
|
63
|
+
const m = content.match(/PORT\s*=\s*(\d+)/i);
|
|
64
|
+
if (m) return Number(m[1]);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
return null;
|
|
69
|
+
}
|