@coderule/mcp 2.0.3 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +176 -48
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +176 -48
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +176 -48
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +176 -48
- package/dist/index.js.map +1 -1
- package/dist/mcp-cli.cjs +393 -109
- package/dist/mcp-cli.cjs.map +1 -1
- package/dist/mcp-cli.js +395 -111
- package/dist/mcp-cli.js.map +1 -1
- package/package.json +2 -2
package/dist/mcp-cli.cjs
CHANGED
|
@@ -543,7 +543,10 @@ var FilesRepo = class {
|
|
|
543
543
|
`UPDATE files
|
|
544
544
|
SET content_sha256 = ?, service_file_hash = ?, hash_state = 'clean',
|
|
545
545
|
hash_owner = NULL, hash_lease_expires_at = NULL, hash_started_at = NULL
|
|
546
|
-
WHERE id =
|
|
546
|
+
WHERE id = ?
|
|
547
|
+
AND hash_state = 'hashing'
|
|
548
|
+
AND hash_owner = ?
|
|
549
|
+
AND hash_started_at = ?`
|
|
547
550
|
);
|
|
548
551
|
this.requeueExpiredHashingStmt = this.db.prepare(
|
|
549
552
|
`UPDATE files
|
|
@@ -674,13 +677,30 @@ var FilesRepo = class {
|
|
|
674
677
|
tx(ids);
|
|
675
678
|
}
|
|
676
679
|
applyHashResults(results) {
|
|
677
|
-
if (!results.length) return;
|
|
680
|
+
if (!results.length) return 0;
|
|
681
|
+
let applied = 0;
|
|
678
682
|
const tx = this.db.transaction((batch) => {
|
|
679
|
-
for (const {
|
|
680
|
-
|
|
683
|
+
for (const {
|
|
684
|
+
id,
|
|
685
|
+
contentSha256,
|
|
686
|
+
serviceFileHash,
|
|
687
|
+
hashOwner,
|
|
688
|
+
hashStartedAt
|
|
689
|
+
} of batch) {
|
|
690
|
+
const result = this.applyHashesStmt.run(
|
|
691
|
+
contentSha256,
|
|
692
|
+
serviceFileHash,
|
|
693
|
+
id,
|
|
694
|
+
hashOwner,
|
|
695
|
+
hashStartedAt
|
|
696
|
+
);
|
|
697
|
+
if ((result.changes ?? 0) > 0) {
|
|
698
|
+
applied += 1;
|
|
699
|
+
}
|
|
681
700
|
}
|
|
682
701
|
});
|
|
683
702
|
tx(results);
|
|
703
|
+
return applied;
|
|
684
704
|
}
|
|
685
705
|
getCleanFilesForSnapshot() {
|
|
686
706
|
return this.selectCleanSnapshotStmt.all();
|
|
@@ -730,7 +750,7 @@ var Outbox = class {
|
|
|
730
750
|
this.log = logger2.child({ scope: "outbox" });
|
|
731
751
|
this.queue = new qulite.Qulite(db, {
|
|
732
752
|
logger: this.log,
|
|
733
|
-
defaultLeaseMs:
|
|
753
|
+
defaultLeaseMs: 6e5,
|
|
734
754
|
defaultMaxAttempts: 10
|
|
735
755
|
});
|
|
736
756
|
this.markKindStmt = db.prepare(
|
|
@@ -788,7 +808,7 @@ var Outbox = class {
|
|
|
788
808
|
this.log.debug({ rootId }, "Enqueued heartbeat job");
|
|
789
809
|
}
|
|
790
810
|
}
|
|
791
|
-
claimFsControlJob(leaseOwner, leaseMs =
|
|
811
|
+
claimFsControlJob(leaseOwner, leaseMs = 6e5) {
|
|
792
812
|
return this.queue.claimNext({ type: "fs_control", leaseOwner, leaseMs });
|
|
793
813
|
}
|
|
794
814
|
ack(jobId, leaseOwner) {
|
|
@@ -1104,7 +1124,9 @@ var Hasher = class {
|
|
|
1104
1124
|
successes.push({
|
|
1105
1125
|
id: record.id,
|
|
1106
1126
|
contentSha256: result.contentSha256,
|
|
1107
|
-
serviceFileHash: result.serviceFileHash
|
|
1127
|
+
serviceFileHash: result.serviceFileHash,
|
|
1128
|
+
hashOwner: record.hash_owner ?? this.ownerId,
|
|
1129
|
+
hashStartedAt: record.hash_started_at ?? 0
|
|
1108
1130
|
});
|
|
1109
1131
|
} catch (error) {
|
|
1110
1132
|
if (error?.code === "ENOENT") {
|
|
@@ -1263,6 +1285,42 @@ async function runInventory(opts) {
|
|
|
1263
1285
|
async function sleep(ms) {
|
|
1264
1286
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
1265
1287
|
}
|
|
1288
|
+
var SnapshotStaleError = class extends Error {
|
|
1289
|
+
constructor(message = "snapshot became stale") {
|
|
1290
|
+
super(message);
|
|
1291
|
+
this.name = "SnapshotStaleError";
|
|
1292
|
+
}
|
|
1293
|
+
};
|
|
1294
|
+
function assertNotStale(opts) {
|
|
1295
|
+
if (opts?.isStale?.()) {
|
|
1296
|
+
throw new SnapshotStaleError();
|
|
1297
|
+
}
|
|
1298
|
+
}
|
|
1299
|
+
function assertBeforeDeadline(opts) {
|
|
1300
|
+
if (opts?.deadlineMs !== void 0 && Date.now() >= opts.deadlineMs) {
|
|
1301
|
+
throw new Error("snapshot publish deadline exceeded");
|
|
1302
|
+
}
|
|
1303
|
+
}
|
|
1304
|
+
function assertValid(opts) {
|
|
1305
|
+
assertNotStale(opts);
|
|
1306
|
+
assertBeforeDeadline(opts);
|
|
1307
|
+
}
|
|
1308
|
+
function withDeadline(promise, opts) {
|
|
1309
|
+
if (opts?.deadlineMs === void 0) return promise;
|
|
1310
|
+
const remaining = opts.deadlineMs - Date.now();
|
|
1311
|
+
if (remaining <= 0) {
|
|
1312
|
+
return Promise.reject(new Error("snapshot publish deadline exceeded"));
|
|
1313
|
+
}
|
|
1314
|
+
return Promise.race([
|
|
1315
|
+
promise,
|
|
1316
|
+
new Promise(
|
|
1317
|
+
(_, reject) => setTimeout(
|
|
1318
|
+
() => reject(new Error("snapshot publish deadline exceeded")),
|
|
1319
|
+
remaining
|
|
1320
|
+
)
|
|
1321
|
+
)
|
|
1322
|
+
]);
|
|
1323
|
+
}
|
|
1266
1324
|
function computeSnapshot(filesRepo) {
|
|
1267
1325
|
const files = filesRepo.getCleanFilesForSnapshot();
|
|
1268
1326
|
const hashes = files.map((file) => file.service_file_hash).filter((hash) => typeof hash === "string");
|
|
@@ -1278,12 +1336,17 @@ function computeSnapshot(filesRepo) {
|
|
|
1278
1336
|
totalSize
|
|
1279
1337
|
};
|
|
1280
1338
|
}
|
|
1281
|
-
async function withRetries(op, logger2, context, maxAttempts) {
|
|
1339
|
+
async function withRetries(op, logger2, context, maxAttempts, opts) {
|
|
1282
1340
|
let attempt = 0;
|
|
1283
1341
|
while (true) {
|
|
1342
|
+
assertValid(opts);
|
|
1284
1343
|
try {
|
|
1285
|
-
return await op();
|
|
1344
|
+
return await withDeadline(op(), opts);
|
|
1286
1345
|
} catch (err) {
|
|
1346
|
+
if (err instanceof SnapshotStaleError) throw err;
|
|
1347
|
+
if (typeof err?.message === "string" && err.message.includes("deadline exceeded")) {
|
|
1348
|
+
throw err;
|
|
1349
|
+
}
|
|
1287
1350
|
attempt += 1;
|
|
1288
1351
|
if (attempt >= maxAttempts) {
|
|
1289
1352
|
logger2.error(
|
|
@@ -1301,7 +1364,14 @@ async function withRetries(op, logger2, context, maxAttempts) {
|
|
|
1301
1364
|
}
|
|
1302
1365
|
}
|
|
1303
1366
|
}
|
|
1304
|
-
|
|
1367
|
+
function computeServiceFileHash(relPath, content) {
|
|
1368
|
+
const hash = crypto.createHash("sha256");
|
|
1369
|
+
hash.update(relPath);
|
|
1370
|
+
hash.update("\n");
|
|
1371
|
+
hash.update(content);
|
|
1372
|
+
return hash.digest("hex");
|
|
1373
|
+
}
|
|
1374
|
+
async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts, chunkSize = 1, opts) {
|
|
1305
1375
|
if (!missing || missing.length === 0) return;
|
|
1306
1376
|
const total = missing.length;
|
|
1307
1377
|
const chunks = [];
|
|
@@ -1309,17 +1379,36 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1309
1379
|
chunks.push(missing.slice(i, i + chunkSize));
|
|
1310
1380
|
}
|
|
1311
1381
|
for (let idx = 0; idx < chunks.length; idx++) {
|
|
1382
|
+
assertValid(opts);
|
|
1312
1383
|
const list = chunks[idx];
|
|
1313
1384
|
const map = /* @__PURE__ */ new Map();
|
|
1314
1385
|
for (const missingFile of list) {
|
|
1315
1386
|
const absPath = path2__default.default.join(rootPath, missingFile.file_path);
|
|
1316
1387
|
try {
|
|
1317
1388
|
const buffer = await fs5__default.default.readFile(absPath);
|
|
1389
|
+
const serviceHash = computeServiceFileHash(
|
|
1390
|
+
missingFile.file_path,
|
|
1391
|
+
buffer
|
|
1392
|
+
);
|
|
1393
|
+
if (serviceHash !== missingFile.file_hash) {
|
|
1394
|
+
logger2.warn(
|
|
1395
|
+
{
|
|
1396
|
+
relPath: missingFile.file_path,
|
|
1397
|
+
expected: missingFile.file_hash,
|
|
1398
|
+
got: serviceHash
|
|
1399
|
+
},
|
|
1400
|
+
"File content changed during upload; snapshot is stale"
|
|
1401
|
+
);
|
|
1402
|
+
throw new SnapshotStaleError(
|
|
1403
|
+
`file hash mismatch for ${missingFile.file_path}`
|
|
1404
|
+
);
|
|
1405
|
+
}
|
|
1318
1406
|
map.set(missingFile.file_hash, {
|
|
1319
1407
|
path: missingFile.file_path,
|
|
1320
1408
|
content: buffer
|
|
1321
1409
|
});
|
|
1322
1410
|
} catch (error) {
|
|
1411
|
+
if (error instanceof SnapshotStaleError) throw error;
|
|
1323
1412
|
logger2.warn(
|
|
1324
1413
|
{ err: error, relPath: missingFile.file_path },
|
|
1325
1414
|
"Failed to read missing file content"
|
|
@@ -1327,6 +1416,7 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1327
1416
|
}
|
|
1328
1417
|
}
|
|
1329
1418
|
if (map.size === 0) continue;
|
|
1419
|
+
assertValid(opts);
|
|
1330
1420
|
await withRetries(
|
|
1331
1421
|
() => syncClient.uploadFileContent(map),
|
|
1332
1422
|
logger2,
|
|
@@ -1336,65 +1426,76 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1336
1426
|
chunks: chunks.length,
|
|
1337
1427
|
files: map.size
|
|
1338
1428
|
},
|
|
1339
|
-
maxAttempts
|
|
1429
|
+
maxAttempts,
|
|
1430
|
+
opts
|
|
1340
1431
|
);
|
|
1341
1432
|
}
|
|
1342
1433
|
}
|
|
1343
|
-
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2,
|
|
1434
|
+
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2, opts) {
|
|
1344
1435
|
const { snapshotHash, files } = computation;
|
|
1345
|
-
const maxAttempts =
|
|
1346
|
-
const uploadChunkSize =
|
|
1436
|
+
const maxAttempts = opts?.maxAttempts ?? 5;
|
|
1437
|
+
const uploadChunkSize = opts?.uploadChunkSize ?? 1;
|
|
1438
|
+
assertValid(opts);
|
|
1347
1439
|
let status = await withRetries(
|
|
1348
1440
|
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1349
1441
|
logger2,
|
|
1350
1442
|
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1351
|
-
maxAttempts
|
|
1443
|
+
maxAttempts,
|
|
1444
|
+
opts
|
|
1352
1445
|
);
|
|
1353
1446
|
if (status.status === "READY") {
|
|
1354
1447
|
logger2.info({ snapshotHash }, "Snapshot already READY");
|
|
1355
1448
|
return;
|
|
1356
1449
|
}
|
|
1357
|
-
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1358
|
-
status = await withRetries(
|
|
1359
|
-
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1360
|
-
logger2,
|
|
1361
|
-
{ op: "createSnapshot", snapshotHash },
|
|
1362
|
-
maxAttempts
|
|
1363
|
-
);
|
|
1364
|
-
}
|
|
1365
|
-
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1366
|
-
logger2.info(
|
|
1367
|
-
{ missing: status.missing_files.length },
|
|
1368
|
-
"Uploading missing file content"
|
|
1369
|
-
);
|
|
1370
|
-
await uploadMissing(
|
|
1371
|
-
rootPath,
|
|
1372
|
-
status.missing_files,
|
|
1373
|
-
syncClient,
|
|
1374
|
-
logger2,
|
|
1375
|
-
maxAttempts,
|
|
1376
|
-
uploadChunkSize
|
|
1377
|
-
);
|
|
1378
|
-
status = await withRetries(
|
|
1379
|
-
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1380
|
-
logger2,
|
|
1381
|
-
{ op: "createSnapshot", snapshotHash },
|
|
1382
|
-
maxAttempts
|
|
1383
|
-
);
|
|
1384
|
-
}
|
|
1385
1450
|
let attempt = 0;
|
|
1386
1451
|
while (status.status !== "READY") {
|
|
1452
|
+
assertValid(opts);
|
|
1387
1453
|
if (status.status === "FAILED") {
|
|
1388
1454
|
throw new Error(`Snapshot failed processing: ${JSON.stringify(status)}`);
|
|
1389
1455
|
}
|
|
1456
|
+
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1457
|
+
status = await withRetries(
|
|
1458
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1459
|
+
logger2,
|
|
1460
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1461
|
+
maxAttempts,
|
|
1462
|
+
opts
|
|
1463
|
+
);
|
|
1464
|
+
}
|
|
1465
|
+
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1466
|
+
logger2.info(
|
|
1467
|
+
{ missing: status.missing_files.length },
|
|
1468
|
+
"Uploading missing file content"
|
|
1469
|
+
);
|
|
1470
|
+
await uploadMissing(
|
|
1471
|
+
rootPath,
|
|
1472
|
+
status.missing_files,
|
|
1473
|
+
syncClient,
|
|
1474
|
+
logger2,
|
|
1475
|
+
maxAttempts,
|
|
1476
|
+
uploadChunkSize,
|
|
1477
|
+
opts
|
|
1478
|
+
);
|
|
1479
|
+
status = await withRetries(
|
|
1480
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1481
|
+
logger2,
|
|
1482
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1483
|
+
maxAttempts,
|
|
1484
|
+
opts
|
|
1485
|
+
);
|
|
1486
|
+
continue;
|
|
1487
|
+
}
|
|
1488
|
+
if (status.status === "READY") break;
|
|
1390
1489
|
const delay = Math.min(5e3, 1e3 * Math.max(1, 2 ** attempt));
|
|
1391
1490
|
await sleep(delay);
|
|
1392
1491
|
attempt += 1;
|
|
1492
|
+
assertValid(opts);
|
|
1393
1493
|
status = await withRetries(
|
|
1394
1494
|
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1395
1495
|
logger2,
|
|
1396
1496
|
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1397
|
-
maxAttempts
|
|
1497
|
+
maxAttempts,
|
|
1498
|
+
opts
|
|
1398
1499
|
);
|
|
1399
1500
|
}
|
|
1400
1501
|
logger2.info({ snapshotHash }, "Snapshot READY");
|
|
@@ -1561,12 +1662,14 @@ async function sendHeartbeat(rootId, snapshotsRepo, syncClient, logger2) {
|
|
|
1561
1662
|
// src/service/State.ts
|
|
1562
1663
|
var ServiceState = class {
|
|
1563
1664
|
constructor() {
|
|
1665
|
+
this.changeSeq = 0;
|
|
1564
1666
|
this.lastChangeAt = Date.now();
|
|
1565
1667
|
this.lastSnapshotReadyAt = Date.now();
|
|
1566
1668
|
this.lastHeartbeatEnqueuedAt = 0;
|
|
1567
1669
|
}
|
|
1568
1670
|
updateChange(timestamp = Date.now()) {
|
|
1569
1671
|
this.lastChangeAt = timestamp;
|
|
1672
|
+
this.changeSeq += 1;
|
|
1570
1673
|
}
|
|
1571
1674
|
updateSnapshotReady(timestamp = Date.now()) {
|
|
1572
1675
|
this.lastSnapshotReadyAt = timestamp;
|
|
@@ -1592,6 +1695,7 @@ function computeBackoff(attempts) {
|
|
|
1592
1695
|
const delay = 1e3 * 2 ** base;
|
|
1593
1696
|
return Math.min(delay, 6e4);
|
|
1594
1697
|
}
|
|
1698
|
+
var PUBLISH_MAX_MS = 5 * 6e4;
|
|
1595
1699
|
async function readSymlinkTarget2(absPath) {
|
|
1596
1700
|
try {
|
|
1597
1701
|
return await fs5__default.default.readlink(absPath);
|
|
@@ -1870,9 +1974,10 @@ var ServiceRunner = class {
|
|
|
1870
1974
|
if (this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0) {
|
|
1871
1975
|
const delay = computeBackoff(job.attempts);
|
|
1872
1976
|
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, delay);
|
|
1873
|
-
await sleep2(200);
|
|
1874
1977
|
return;
|
|
1875
1978
|
}
|
|
1979
|
+
const publishSeq = this.state.changeSeq;
|
|
1980
|
+
const isStale = () => this.state.changeSeq !== publishSeq || this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0;
|
|
1876
1981
|
try {
|
|
1877
1982
|
const result = await publishSnapshot(
|
|
1878
1983
|
this.runtime.config.rootPath,
|
|
@@ -1882,17 +1987,40 @@ var ServiceRunner = class {
|
|
|
1882
1987
|
log,
|
|
1883
1988
|
{
|
|
1884
1989
|
maxAttempts: this.runtime.config.maxSnapshotAttempts,
|
|
1885
|
-
uploadChunkSize: this.runtime.config.uploadChunkSize
|
|
1990
|
+
uploadChunkSize: this.runtime.config.uploadChunkSize,
|
|
1991
|
+
deadlineMs: Date.now() + PUBLISH_MAX_MS,
|
|
1992
|
+
isStale
|
|
1886
1993
|
}
|
|
1887
1994
|
);
|
|
1888
|
-
this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1995
|
+
const acked = this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1996
|
+
if (!acked) {
|
|
1997
|
+
log.warn(
|
|
1998
|
+
{ jobId: job.id },
|
|
1999
|
+
"Snapshot job ack failed (lease may have expired)"
|
|
2000
|
+
);
|
|
2001
|
+
}
|
|
1889
2002
|
this.state.updateSnapshotReady(result.createdAt);
|
|
1890
2003
|
log.info({ snapshotHash: result.snapshotHash }, "Snapshot job completed");
|
|
2004
|
+
if (this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0) {
|
|
2005
|
+
this.runtime.outbox.enqueueSnapshot(this.runtime.config.rootId, 0);
|
|
2006
|
+
return;
|
|
2007
|
+
}
|
|
2008
|
+
const nowHash = computeSnapshot(this.runtime.filesRepo).snapshotHash;
|
|
2009
|
+
if (nowHash !== result.snapshotHash) {
|
|
2010
|
+
this.runtime.outbox.enqueueSnapshot(this.runtime.config.rootId, 0);
|
|
2011
|
+
}
|
|
1891
2012
|
} catch (error) {
|
|
2013
|
+
if (error instanceof SnapshotStaleError) {
|
|
2014
|
+
log.debug(
|
|
2015
|
+
{ err: error },
|
|
2016
|
+
"Snapshot publish aborted (stale); quick retry"
|
|
2017
|
+
);
|
|
2018
|
+
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, 250);
|
|
2019
|
+
return;
|
|
2020
|
+
}
|
|
1892
2021
|
log.warn({ err: error }, "Snapshot job failed");
|
|
1893
2022
|
const delay = computeBackoff(job.attempts);
|
|
1894
2023
|
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, delay);
|
|
1895
|
-
await sleep2(delay);
|
|
1896
2024
|
}
|
|
1897
2025
|
}
|
|
1898
2026
|
async handleHeartbeatJob(job, log) {
|
|
@@ -2101,33 +2229,20 @@ function createMcpServer({
|
|
|
2101
2229
|
async function sleep3(ms) {
|
|
2102
2230
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
2103
2231
|
}
|
|
2104
|
-
|
|
2105
|
-
|
|
2106
|
-
while (!latest && Date.now() < deadlineMs) {
|
|
2107
|
-
await sleep3(250);
|
|
2108
|
-
latest = runtime.snapshotsRepo.getLatest();
|
|
2109
|
-
}
|
|
2110
|
-
return latest;
|
|
2232
|
+
function isLocalStable() {
|
|
2233
|
+
return runtime.filesRepo.countByState("dirty") === 0 && runtime.filesRepo.countByState("hashing") === 0;
|
|
2111
2234
|
}
|
|
2112
|
-
async function
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
|
|
2116
|
-
const status = await runtime.clients.sync.checkSnapshotStatus(currentHash);
|
|
2117
|
-
if (status.status === "READY") {
|
|
2118
|
-
return currentHash;
|
|
2119
|
-
}
|
|
2120
|
-
if (status.status === "FAILED") {
|
|
2121
|
-
}
|
|
2122
|
-
} catch {
|
|
2123
|
-
}
|
|
2124
|
-
await sleep3(500);
|
|
2125
|
-
const latest = runtime.snapshotsRepo.getLatest();
|
|
2126
|
-
if (latest && latest.snapshot_hash !== currentHash) {
|
|
2127
|
-
currentHash = latest.snapshot_hash;
|
|
2128
|
-
}
|
|
2235
|
+
async function withDeadline2(promise, deadlineMs) {
|
|
2236
|
+
const remaining = deadlineMs - Date.now();
|
|
2237
|
+
if (remaining <= 0) {
|
|
2238
|
+
throw new Error("deadline exceeded");
|
|
2129
2239
|
}
|
|
2130
|
-
return
|
|
2240
|
+
return Promise.race([
|
|
2241
|
+
promise,
|
|
2242
|
+
new Promise(
|
|
2243
|
+
(_, reject) => setTimeout(() => reject(new Error("deadline exceeded")), remaining)
|
|
2244
|
+
)
|
|
2245
|
+
]);
|
|
2131
2246
|
}
|
|
2132
2247
|
const server = new mcp_js.McpServer({
|
|
2133
2248
|
name: SERVER_NAME,
|
|
@@ -2161,20 +2276,26 @@ function createMcpServer({
|
|
|
2161
2276
|
title: "Semantic Code Retrieval",
|
|
2162
2277
|
description: "USE THIS before code exploration. Graph-based RAG for semantic code search using AST relationships and embeddings. Returns 30-50 code segments with file paths and line numbers. After initial query, you may query again with refined keywords or use conventional tools (Read, Glob, Grep) for specific files.",
|
|
2163
2278
|
inputSchema: {
|
|
2164
|
-
query: zod.z.string().min(1, "Query text is required").describe(`
|
|
2279
|
+
query: zod.z.string().min(1, "Query text is required").describe(`Short docstring-style description of what the code does + specific technical terms and identifiers (NOT natural language questions, NOT bare keyword lists).
|
|
2165
2280
|
|
|
2166
|
-
STRATEGY:
|
|
2281
|
+
STRATEGY: Start with a mini-docstring (what the code does), then append concrete identifiers, API names, error types, and domain terms.
|
|
2167
2282
|
|
|
2168
2283
|
\u2705 GOOD EXAMPLES:
|
|
2169
|
-
- "JWT
|
|
2170
|
-
- "
|
|
2171
|
-
- "
|
|
2172
|
-
- "
|
|
2284
|
+
- "Validate JWT access token and extract user claims FastAPI dependency AuthMiddleware jose jwt decode RS256 audience issuer"
|
|
2285
|
+
- "Index repository files into embedding store Celery worker chunker AST parse imports calls graph propagate batch upsert vector_index"
|
|
2286
|
+
- "Create Alembic migration for SQLAlchemy relationship add foreign key backfill existing rows cascade ondelete constraint naming"
|
|
2287
|
+
- "Capture exception with traceback and structured context logging logger.exception request_id correlation_id retryable errors"
|
|
2288
|
+
- "Load cache from file using shelve.open handle OSError fallback recreate cache file path validation"
|
|
2289
|
+
- "Find call sites of verify_jwt decode_jwt get_current_user where used FastAPI router dependency tests pytest"
|
|
2173
2290
|
|
|
2174
2291
|
\u274C BAD EXAMPLES:
|
|
2175
2292
|
- "How does authentication work?" (natural language question)
|
|
2176
2293
|
- "Show me the login code" (conversational)
|
|
2177
2294
|
- "auth" (too vague, needs more context)
|
|
2295
|
+
- "JWT authentication middleware token validation security handler FastAPI" (bare keyword list without intent \u2014 add a docstring prefix describing what the code does)
|
|
2296
|
+
|
|
2297
|
+
NON-ENGLISH QUERIES: Translate to English before querying.
|
|
2298
|
+
- "validate JWT signature and extract claims FastAPI jose RS256 issuer audience"
|
|
2178
2299
|
|
|
2179
2300
|
RETRIEVAL MECHANISM:
|
|
2180
2301
|
Combines multiple strategies: Seeds (semantic similarity), Flood (AST graph propagation through imports/calls/inheritance), Neighbors (adjacent chunks), and Calls (function relationships).
|
|
@@ -2202,53 +2323,216 @@ WHEN TO USE THIS TOOL:
|
|
|
2202
2323
|
budgetTokens
|
|
2203
2324
|
}) => {
|
|
2204
2325
|
const deadline = Date.now() + runtime.config.maxQueryWaitMs;
|
|
2205
|
-
|
|
2206
|
-
|
|
2207
|
-
|
|
2326
|
+
while (Date.now() < deadline) {
|
|
2327
|
+
if (isLocalStable()) break;
|
|
2328
|
+
await sleep3(250);
|
|
2329
|
+
}
|
|
2330
|
+
if (!isLocalStable()) {
|
|
2331
|
+
const statusText2 = formatStatus(
|
|
2208
2332
|
await collectIndexingStatus(runtime, runner)
|
|
2209
2333
|
);
|
|
2210
|
-
const
|
|
2334
|
+
const text2 = `Indexer not ready (local hashing/dirty). Current status:
|
|
2211
2335
|
|
|
2212
|
-
${
|
|
2213
|
-
return { content: [{ type: "text", text }] };
|
|
2336
|
+
${statusText2}`;
|
|
2337
|
+
return { content: [{ type: "text", text: text2 }] };
|
|
2214
2338
|
}
|
|
2215
|
-
|
|
2216
|
-
|
|
2217
|
-
|
|
2339
|
+
let currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2340
|
+
let lastStatus;
|
|
2341
|
+
while (Date.now() < deadline) {
|
|
2342
|
+
if (!isLocalStable()) {
|
|
2343
|
+
while (Date.now() < deadline) {
|
|
2344
|
+
if (isLocalStable()) break;
|
|
2345
|
+
await sleep3(250);
|
|
2346
|
+
}
|
|
2347
|
+
if (!isLocalStable()) break;
|
|
2348
|
+
currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2349
|
+
continue;
|
|
2350
|
+
}
|
|
2351
|
+
try {
|
|
2352
|
+
const status = await withDeadline2(
|
|
2353
|
+
runtime.clients.sync.checkSnapshotStatus(currentHash),
|
|
2354
|
+
deadline
|
|
2355
|
+
);
|
|
2356
|
+
lastStatus = status.status;
|
|
2357
|
+
if (status.status === "READY") {
|
|
2358
|
+
const effectiveBudget = Math.max(100, budgetTokens ?? 1e4);
|
|
2359
|
+
try {
|
|
2360
|
+
const result = await withDeadline2(
|
|
2361
|
+
runtime.clients.retrieval.query(
|
|
2362
|
+
currentHash,
|
|
2363
|
+
query,
|
|
2364
|
+
effectiveBudget,
|
|
2365
|
+
{ formatter: runtime.config.retrievalFormatter }
|
|
2366
|
+
),
|
|
2367
|
+
deadline
|
|
2368
|
+
);
|
|
2369
|
+
return {
|
|
2370
|
+
content: [
|
|
2371
|
+
{
|
|
2372
|
+
type: "text",
|
|
2373
|
+
text: result.formatted_output ?? "(no formatted output)"
|
|
2374
|
+
}
|
|
2375
|
+
]
|
|
2376
|
+
};
|
|
2377
|
+
} catch (error) {
|
|
2378
|
+
const message = error instanceof Error ? error.message : "Unknown retrieval error";
|
|
2379
|
+
runtime.logger.error({ err: error }, "Retrieval query failed");
|
|
2380
|
+
return {
|
|
2381
|
+
content: [
|
|
2382
|
+
{ type: "text", text: `Retrieval error: ${message}` }
|
|
2383
|
+
],
|
|
2384
|
+
isError: true
|
|
2385
|
+
};
|
|
2386
|
+
}
|
|
2387
|
+
}
|
|
2388
|
+
if (status.status === "FAILED") {
|
|
2389
|
+
while (Date.now() < deadline) {
|
|
2390
|
+
if (isLocalStable()) break;
|
|
2391
|
+
await sleep3(250);
|
|
2392
|
+
}
|
|
2393
|
+
if (!isLocalStable()) break;
|
|
2394
|
+
currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2395
|
+
continue;
|
|
2396
|
+
}
|
|
2397
|
+
runtime.outbox.enqueueSnapshot(runtime.config.rootId, 0);
|
|
2398
|
+
} catch {
|
|
2399
|
+
}
|
|
2400
|
+
const newHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2401
|
+
if (newHash !== currentHash) {
|
|
2402
|
+
currentHash = newHash;
|
|
2403
|
+
continue;
|
|
2404
|
+
}
|
|
2405
|
+
await sleep3(500);
|
|
2406
|
+
}
|
|
2407
|
+
const statusText = formatStatus(
|
|
2408
|
+
await collectIndexingStatus(runtime, runner)
|
|
2218
2409
|
);
|
|
2219
|
-
|
|
2220
|
-
const statusText = formatStatus(
|
|
2221
|
-
await collectIndexingStatus(runtime, runner)
|
|
2222
|
-
);
|
|
2223
|
-
const text = `Snapshot not ready on server. Current status:
|
|
2410
|
+
const text = `Snapshot not READY before deadline (last status: ${lastStatus ?? "unknown"}). Current status:
|
|
2224
2411
|
|
|
2225
2412
|
${statusText}`;
|
|
2226
|
-
|
|
2413
|
+
return { content: [{ type: "text", text }] };
|
|
2414
|
+
}
|
|
2415
|
+
);
|
|
2416
|
+
server.registerTool(
|
|
2417
|
+
"dump_bush",
|
|
2418
|
+
{
|
|
2419
|
+
title: "Dump Raw Index (Debug)",
|
|
2420
|
+
description: "Debug tool. Downloads the raw HDF5 bush file for the current snapshot and saves it to the specified path. Returns an HDF5 bush reader skill with format documentation and Python code examples.",
|
|
2421
|
+
inputSchema: {
|
|
2422
|
+
filePath: zod.z.string().min(1, "File path is required").refine((p) => p.endsWith(".h5"), "File must have .h5 extension").describe(
|
|
2423
|
+
"Full file name to save (use base dir + any name with .h5 extension), e.g. /tmp/snapshot.h5"
|
|
2424
|
+
)
|
|
2425
|
+
},
|
|
2426
|
+
annotations: {
|
|
2427
|
+
readOnlyHint: false,
|
|
2428
|
+
destructiveHint: false,
|
|
2429
|
+
idempotentHint: true,
|
|
2430
|
+
openWorldHint: true
|
|
2227
2431
|
}
|
|
2228
|
-
|
|
2229
|
-
|
|
2230
|
-
|
|
2231
|
-
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
|
|
2432
|
+
},
|
|
2433
|
+
async ({ filePath }) => {
|
|
2434
|
+
const deadline = Date.now() + runtime.config.maxQueryWaitMs;
|
|
2435
|
+
while (Date.now() < deadline) {
|
|
2436
|
+
if (isLocalStable()) break;
|
|
2437
|
+
await sleep3(250);
|
|
2438
|
+
}
|
|
2439
|
+
if (!isLocalStable()) {
|
|
2440
|
+
const statusText2 = formatStatus(
|
|
2441
|
+
await collectIndexingStatus(runtime, runner)
|
|
2235
2442
|
);
|
|
2236
2443
|
return {
|
|
2237
2444
|
content: [
|
|
2238
2445
|
{
|
|
2239
2446
|
type: "text",
|
|
2240
|
-
text:
|
|
2447
|
+
text: `Indexer not ready (local hashing/dirty). Current status:
|
|
2448
|
+
|
|
2449
|
+
${statusText2}`
|
|
2241
2450
|
}
|
|
2242
|
-
]
|
|
2243
|
-
};
|
|
2244
|
-
} catch (error) {
|
|
2245
|
-
const message = error instanceof Error ? error.message : "Unknown retrieval error";
|
|
2246
|
-
runtime.logger.error({ err: error }, "Retrieval query failed");
|
|
2247
|
-
return {
|
|
2248
|
-
content: [{ type: "text", text: `Retrieval error: ${message}` }],
|
|
2451
|
+
],
|
|
2249
2452
|
isError: true
|
|
2250
2453
|
};
|
|
2251
2454
|
}
|
|
2455
|
+
let currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2456
|
+
let lastStatus;
|
|
2457
|
+
while (Date.now() < deadline) {
|
|
2458
|
+
if (!isLocalStable()) {
|
|
2459
|
+
while (Date.now() < deadline) {
|
|
2460
|
+
if (isLocalStable()) break;
|
|
2461
|
+
await sleep3(250);
|
|
2462
|
+
}
|
|
2463
|
+
if (!isLocalStable()) break;
|
|
2464
|
+
currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2465
|
+
continue;
|
|
2466
|
+
}
|
|
2467
|
+
try {
|
|
2468
|
+
const status = await withDeadline2(
|
|
2469
|
+
runtime.clients.sync.checkSnapshotStatus(currentHash),
|
|
2470
|
+
deadline
|
|
2471
|
+
);
|
|
2472
|
+
lastStatus = status.status;
|
|
2473
|
+
if (status.status === "READY") {
|
|
2474
|
+
const bushBuffer = await withDeadline2(
|
|
2475
|
+
runtime.clients.sync.downloadSnapshotBush(currentHash),
|
|
2476
|
+
deadline
|
|
2477
|
+
);
|
|
2478
|
+
await fs5.mkdir(path2.dirname(filePath), { recursive: true });
|
|
2479
|
+
await fs5.writeFile(filePath, Buffer.from(bushBuffer));
|
|
2480
|
+
runtime.logger.info(
|
|
2481
|
+
{
|
|
2482
|
+
filePath,
|
|
2483
|
+
snapshotHash: currentHash,
|
|
2484
|
+
bytes: bushBuffer.byteLength
|
|
2485
|
+
},
|
|
2486
|
+
"HDF5 bush file saved"
|
|
2487
|
+
);
|
|
2488
|
+
const skill = await withDeadline2(
|
|
2489
|
+
runtime.clients.sync.getHdf5BushReaderSkill(),
|
|
2490
|
+
deadline
|
|
2491
|
+
);
|
|
2492
|
+
return {
|
|
2493
|
+
content: [
|
|
2494
|
+
{
|
|
2495
|
+
type: "text",
|
|
2496
|
+
text: `HDF5 bush saved to ${filePath} (${bushBuffer.byteLength} bytes, snapshot ${currentHash}).
|
|
2497
|
+
|
|
2498
|
+
${skill}`
|
|
2499
|
+
}
|
|
2500
|
+
]
|
|
2501
|
+
};
|
|
2502
|
+
}
|
|
2503
|
+
if (status.status === "FAILED") {
|
|
2504
|
+
while (Date.now() < deadline) {
|
|
2505
|
+
if (isLocalStable()) break;
|
|
2506
|
+
await sleep3(250);
|
|
2507
|
+
}
|
|
2508
|
+
if (!isLocalStable()) break;
|
|
2509
|
+
currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2510
|
+
continue;
|
|
2511
|
+
}
|
|
2512
|
+
runtime.outbox.enqueueSnapshot(runtime.config.rootId, 0);
|
|
2513
|
+
} catch {
|
|
2514
|
+
}
|
|
2515
|
+
const newHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2516
|
+
if (newHash !== currentHash) {
|
|
2517
|
+
currentHash = newHash;
|
|
2518
|
+
continue;
|
|
2519
|
+
}
|
|
2520
|
+
await sleep3(500);
|
|
2521
|
+
}
|
|
2522
|
+
const statusText = formatStatus(
|
|
2523
|
+
await collectIndexingStatus(runtime, runner)
|
|
2524
|
+
);
|
|
2525
|
+
return {
|
|
2526
|
+
content: [
|
|
2527
|
+
{
|
|
2528
|
+
type: "text",
|
|
2529
|
+
text: `Snapshot not READY before deadline (last status: ${lastStatus ?? "unknown"}). Current status:
|
|
2530
|
+
|
|
2531
|
+
${statusText}`
|
|
2532
|
+
}
|
|
2533
|
+
],
|
|
2534
|
+
isError: true
|
|
2535
|
+
};
|
|
2252
2536
|
}
|
|
2253
2537
|
);
|
|
2254
2538
|
return server;
|