@coderule/mcp 2.0.3 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +176 -48
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +176 -48
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +176 -48
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +176 -48
- package/dist/index.js.map +1 -1
- package/dist/mcp-cli.cjs +393 -109
- package/dist/mcp-cli.cjs.map +1 -1
- package/dist/mcp-cli.js +395 -111
- package/dist/mcp-cli.js.map +1 -1
- package/package.json +2 -2
package/dist/mcp-cli.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
import fs5 from 'fs/promises';
|
|
3
|
-
import path2 from 'path';
|
|
2
|
+
import fs5, { mkdir, writeFile } from 'fs/promises';
|
|
3
|
+
import path2, { dirname } from 'path';
|
|
4
4
|
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
5
5
|
import { createHash } from 'crypto';
|
|
6
6
|
import envPaths from 'env-paths';
|
|
@@ -527,7 +527,10 @@ var FilesRepo = class {
|
|
|
527
527
|
`UPDATE files
|
|
528
528
|
SET content_sha256 = ?, service_file_hash = ?, hash_state = 'clean',
|
|
529
529
|
hash_owner = NULL, hash_lease_expires_at = NULL, hash_started_at = NULL
|
|
530
|
-
WHERE id =
|
|
530
|
+
WHERE id = ?
|
|
531
|
+
AND hash_state = 'hashing'
|
|
532
|
+
AND hash_owner = ?
|
|
533
|
+
AND hash_started_at = ?`
|
|
531
534
|
);
|
|
532
535
|
this.requeueExpiredHashingStmt = this.db.prepare(
|
|
533
536
|
`UPDATE files
|
|
@@ -658,13 +661,30 @@ var FilesRepo = class {
|
|
|
658
661
|
tx(ids);
|
|
659
662
|
}
|
|
660
663
|
applyHashResults(results) {
|
|
661
|
-
if (!results.length) return;
|
|
664
|
+
if (!results.length) return 0;
|
|
665
|
+
let applied = 0;
|
|
662
666
|
const tx = this.db.transaction((batch) => {
|
|
663
|
-
for (const {
|
|
664
|
-
|
|
667
|
+
for (const {
|
|
668
|
+
id,
|
|
669
|
+
contentSha256,
|
|
670
|
+
serviceFileHash,
|
|
671
|
+
hashOwner,
|
|
672
|
+
hashStartedAt
|
|
673
|
+
} of batch) {
|
|
674
|
+
const result = this.applyHashesStmt.run(
|
|
675
|
+
contentSha256,
|
|
676
|
+
serviceFileHash,
|
|
677
|
+
id,
|
|
678
|
+
hashOwner,
|
|
679
|
+
hashStartedAt
|
|
680
|
+
);
|
|
681
|
+
if ((result.changes ?? 0) > 0) {
|
|
682
|
+
applied += 1;
|
|
683
|
+
}
|
|
665
684
|
}
|
|
666
685
|
});
|
|
667
686
|
tx(results);
|
|
687
|
+
return applied;
|
|
668
688
|
}
|
|
669
689
|
getCleanFilesForSnapshot() {
|
|
670
690
|
return this.selectCleanSnapshotStmt.all();
|
|
@@ -714,7 +734,7 @@ var Outbox = class {
|
|
|
714
734
|
this.log = logger2.child({ scope: "outbox" });
|
|
715
735
|
this.queue = new Qulite(db, {
|
|
716
736
|
logger: this.log,
|
|
717
|
-
defaultLeaseMs:
|
|
737
|
+
defaultLeaseMs: 6e5,
|
|
718
738
|
defaultMaxAttempts: 10
|
|
719
739
|
});
|
|
720
740
|
this.markKindStmt = db.prepare(
|
|
@@ -772,7 +792,7 @@ var Outbox = class {
|
|
|
772
792
|
this.log.debug({ rootId }, "Enqueued heartbeat job");
|
|
773
793
|
}
|
|
774
794
|
}
|
|
775
|
-
claimFsControlJob(leaseOwner, leaseMs =
|
|
795
|
+
claimFsControlJob(leaseOwner, leaseMs = 6e5) {
|
|
776
796
|
return this.queue.claimNext({ type: "fs_control", leaseOwner, leaseMs });
|
|
777
797
|
}
|
|
778
798
|
ack(jobId, leaseOwner) {
|
|
@@ -1088,7 +1108,9 @@ var Hasher = class {
|
|
|
1088
1108
|
successes.push({
|
|
1089
1109
|
id: record.id,
|
|
1090
1110
|
contentSha256: result.contentSha256,
|
|
1091
|
-
serviceFileHash: result.serviceFileHash
|
|
1111
|
+
serviceFileHash: result.serviceFileHash,
|
|
1112
|
+
hashOwner: record.hash_owner ?? this.ownerId,
|
|
1113
|
+
hashStartedAt: record.hash_started_at ?? 0
|
|
1092
1114
|
});
|
|
1093
1115
|
} catch (error) {
|
|
1094
1116
|
if (error?.code === "ENOENT") {
|
|
@@ -1247,6 +1269,42 @@ async function runInventory(opts) {
|
|
|
1247
1269
|
async function sleep(ms) {
|
|
1248
1270
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
1249
1271
|
}
|
|
1272
|
+
var SnapshotStaleError = class extends Error {
|
|
1273
|
+
constructor(message = "snapshot became stale") {
|
|
1274
|
+
super(message);
|
|
1275
|
+
this.name = "SnapshotStaleError";
|
|
1276
|
+
}
|
|
1277
|
+
};
|
|
1278
|
+
function assertNotStale(opts) {
|
|
1279
|
+
if (opts?.isStale?.()) {
|
|
1280
|
+
throw new SnapshotStaleError();
|
|
1281
|
+
}
|
|
1282
|
+
}
|
|
1283
|
+
function assertBeforeDeadline(opts) {
|
|
1284
|
+
if (opts?.deadlineMs !== void 0 && Date.now() >= opts.deadlineMs) {
|
|
1285
|
+
throw new Error("snapshot publish deadline exceeded");
|
|
1286
|
+
}
|
|
1287
|
+
}
|
|
1288
|
+
function assertValid(opts) {
|
|
1289
|
+
assertNotStale(opts);
|
|
1290
|
+
assertBeforeDeadline(opts);
|
|
1291
|
+
}
|
|
1292
|
+
function withDeadline(promise, opts) {
|
|
1293
|
+
if (opts?.deadlineMs === void 0) return promise;
|
|
1294
|
+
const remaining = opts.deadlineMs - Date.now();
|
|
1295
|
+
if (remaining <= 0) {
|
|
1296
|
+
return Promise.reject(new Error("snapshot publish deadline exceeded"));
|
|
1297
|
+
}
|
|
1298
|
+
return Promise.race([
|
|
1299
|
+
promise,
|
|
1300
|
+
new Promise(
|
|
1301
|
+
(_, reject) => setTimeout(
|
|
1302
|
+
() => reject(new Error("snapshot publish deadline exceeded")),
|
|
1303
|
+
remaining
|
|
1304
|
+
)
|
|
1305
|
+
)
|
|
1306
|
+
]);
|
|
1307
|
+
}
|
|
1250
1308
|
function computeSnapshot(filesRepo) {
|
|
1251
1309
|
const files = filesRepo.getCleanFilesForSnapshot();
|
|
1252
1310
|
const hashes = files.map((file) => file.service_file_hash).filter((hash) => typeof hash === "string");
|
|
@@ -1262,12 +1320,17 @@ function computeSnapshot(filesRepo) {
|
|
|
1262
1320
|
totalSize
|
|
1263
1321
|
};
|
|
1264
1322
|
}
|
|
1265
|
-
async function withRetries(op, logger2, context, maxAttempts) {
|
|
1323
|
+
async function withRetries(op, logger2, context, maxAttempts, opts) {
|
|
1266
1324
|
let attempt = 0;
|
|
1267
1325
|
while (true) {
|
|
1326
|
+
assertValid(opts);
|
|
1268
1327
|
try {
|
|
1269
|
-
return await op();
|
|
1328
|
+
return await withDeadline(op(), opts);
|
|
1270
1329
|
} catch (err) {
|
|
1330
|
+
if (err instanceof SnapshotStaleError) throw err;
|
|
1331
|
+
if (typeof err?.message === "string" && err.message.includes("deadline exceeded")) {
|
|
1332
|
+
throw err;
|
|
1333
|
+
}
|
|
1271
1334
|
attempt += 1;
|
|
1272
1335
|
if (attempt >= maxAttempts) {
|
|
1273
1336
|
logger2.error(
|
|
@@ -1285,7 +1348,14 @@ async function withRetries(op, logger2, context, maxAttempts) {
|
|
|
1285
1348
|
}
|
|
1286
1349
|
}
|
|
1287
1350
|
}
|
|
1288
|
-
|
|
1351
|
+
function computeServiceFileHash(relPath, content) {
|
|
1352
|
+
const hash = createHash("sha256");
|
|
1353
|
+
hash.update(relPath);
|
|
1354
|
+
hash.update("\n");
|
|
1355
|
+
hash.update(content);
|
|
1356
|
+
return hash.digest("hex");
|
|
1357
|
+
}
|
|
1358
|
+
async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts, chunkSize = 1, opts) {
|
|
1289
1359
|
if (!missing || missing.length === 0) return;
|
|
1290
1360
|
const total = missing.length;
|
|
1291
1361
|
const chunks = [];
|
|
@@ -1293,17 +1363,36 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1293
1363
|
chunks.push(missing.slice(i, i + chunkSize));
|
|
1294
1364
|
}
|
|
1295
1365
|
for (let idx = 0; idx < chunks.length; idx++) {
|
|
1366
|
+
assertValid(opts);
|
|
1296
1367
|
const list = chunks[idx];
|
|
1297
1368
|
const map = /* @__PURE__ */ new Map();
|
|
1298
1369
|
for (const missingFile of list) {
|
|
1299
1370
|
const absPath = path2.join(rootPath, missingFile.file_path);
|
|
1300
1371
|
try {
|
|
1301
1372
|
const buffer = await fs5.readFile(absPath);
|
|
1373
|
+
const serviceHash = computeServiceFileHash(
|
|
1374
|
+
missingFile.file_path,
|
|
1375
|
+
buffer
|
|
1376
|
+
);
|
|
1377
|
+
if (serviceHash !== missingFile.file_hash) {
|
|
1378
|
+
logger2.warn(
|
|
1379
|
+
{
|
|
1380
|
+
relPath: missingFile.file_path,
|
|
1381
|
+
expected: missingFile.file_hash,
|
|
1382
|
+
got: serviceHash
|
|
1383
|
+
},
|
|
1384
|
+
"File content changed during upload; snapshot is stale"
|
|
1385
|
+
);
|
|
1386
|
+
throw new SnapshotStaleError(
|
|
1387
|
+
`file hash mismatch for ${missingFile.file_path}`
|
|
1388
|
+
);
|
|
1389
|
+
}
|
|
1302
1390
|
map.set(missingFile.file_hash, {
|
|
1303
1391
|
path: missingFile.file_path,
|
|
1304
1392
|
content: buffer
|
|
1305
1393
|
});
|
|
1306
1394
|
} catch (error) {
|
|
1395
|
+
if (error instanceof SnapshotStaleError) throw error;
|
|
1307
1396
|
logger2.warn(
|
|
1308
1397
|
{ err: error, relPath: missingFile.file_path },
|
|
1309
1398
|
"Failed to read missing file content"
|
|
@@ -1311,6 +1400,7 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1311
1400
|
}
|
|
1312
1401
|
}
|
|
1313
1402
|
if (map.size === 0) continue;
|
|
1403
|
+
assertValid(opts);
|
|
1314
1404
|
await withRetries(
|
|
1315
1405
|
() => syncClient.uploadFileContent(map),
|
|
1316
1406
|
logger2,
|
|
@@ -1320,65 +1410,76 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1320
1410
|
chunks: chunks.length,
|
|
1321
1411
|
files: map.size
|
|
1322
1412
|
},
|
|
1323
|
-
maxAttempts
|
|
1413
|
+
maxAttempts,
|
|
1414
|
+
opts
|
|
1324
1415
|
);
|
|
1325
1416
|
}
|
|
1326
1417
|
}
|
|
1327
|
-
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2,
|
|
1418
|
+
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2, opts) {
|
|
1328
1419
|
const { snapshotHash, files } = computation;
|
|
1329
|
-
const maxAttempts =
|
|
1330
|
-
const uploadChunkSize =
|
|
1420
|
+
const maxAttempts = opts?.maxAttempts ?? 5;
|
|
1421
|
+
const uploadChunkSize = opts?.uploadChunkSize ?? 1;
|
|
1422
|
+
assertValid(opts);
|
|
1331
1423
|
let status = await withRetries(
|
|
1332
1424
|
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1333
1425
|
logger2,
|
|
1334
1426
|
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1335
|
-
maxAttempts
|
|
1427
|
+
maxAttempts,
|
|
1428
|
+
opts
|
|
1336
1429
|
);
|
|
1337
1430
|
if (status.status === "READY") {
|
|
1338
1431
|
logger2.info({ snapshotHash }, "Snapshot already READY");
|
|
1339
1432
|
return;
|
|
1340
1433
|
}
|
|
1341
|
-
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1342
|
-
status = await withRetries(
|
|
1343
|
-
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1344
|
-
logger2,
|
|
1345
|
-
{ op: "createSnapshot", snapshotHash },
|
|
1346
|
-
maxAttempts
|
|
1347
|
-
);
|
|
1348
|
-
}
|
|
1349
|
-
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1350
|
-
logger2.info(
|
|
1351
|
-
{ missing: status.missing_files.length },
|
|
1352
|
-
"Uploading missing file content"
|
|
1353
|
-
);
|
|
1354
|
-
await uploadMissing(
|
|
1355
|
-
rootPath,
|
|
1356
|
-
status.missing_files,
|
|
1357
|
-
syncClient,
|
|
1358
|
-
logger2,
|
|
1359
|
-
maxAttempts,
|
|
1360
|
-
uploadChunkSize
|
|
1361
|
-
);
|
|
1362
|
-
status = await withRetries(
|
|
1363
|
-
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1364
|
-
logger2,
|
|
1365
|
-
{ op: "createSnapshot", snapshotHash },
|
|
1366
|
-
maxAttempts
|
|
1367
|
-
);
|
|
1368
|
-
}
|
|
1369
1434
|
let attempt = 0;
|
|
1370
1435
|
while (status.status !== "READY") {
|
|
1436
|
+
assertValid(opts);
|
|
1371
1437
|
if (status.status === "FAILED") {
|
|
1372
1438
|
throw new Error(`Snapshot failed processing: ${JSON.stringify(status)}`);
|
|
1373
1439
|
}
|
|
1440
|
+
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1441
|
+
status = await withRetries(
|
|
1442
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1443
|
+
logger2,
|
|
1444
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1445
|
+
maxAttempts,
|
|
1446
|
+
opts
|
|
1447
|
+
);
|
|
1448
|
+
}
|
|
1449
|
+
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1450
|
+
logger2.info(
|
|
1451
|
+
{ missing: status.missing_files.length },
|
|
1452
|
+
"Uploading missing file content"
|
|
1453
|
+
);
|
|
1454
|
+
await uploadMissing(
|
|
1455
|
+
rootPath,
|
|
1456
|
+
status.missing_files,
|
|
1457
|
+
syncClient,
|
|
1458
|
+
logger2,
|
|
1459
|
+
maxAttempts,
|
|
1460
|
+
uploadChunkSize,
|
|
1461
|
+
opts
|
|
1462
|
+
);
|
|
1463
|
+
status = await withRetries(
|
|
1464
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1465
|
+
logger2,
|
|
1466
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1467
|
+
maxAttempts,
|
|
1468
|
+
opts
|
|
1469
|
+
);
|
|
1470
|
+
continue;
|
|
1471
|
+
}
|
|
1472
|
+
if (status.status === "READY") break;
|
|
1374
1473
|
const delay = Math.min(5e3, 1e3 * Math.max(1, 2 ** attempt));
|
|
1375
1474
|
await sleep(delay);
|
|
1376
1475
|
attempt += 1;
|
|
1476
|
+
assertValid(opts);
|
|
1377
1477
|
status = await withRetries(
|
|
1378
1478
|
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1379
1479
|
logger2,
|
|
1380
1480
|
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1381
|
-
maxAttempts
|
|
1481
|
+
maxAttempts,
|
|
1482
|
+
opts
|
|
1382
1483
|
);
|
|
1383
1484
|
}
|
|
1384
1485
|
logger2.info({ snapshotHash }, "Snapshot READY");
|
|
@@ -1545,12 +1646,14 @@ async function sendHeartbeat(rootId, snapshotsRepo, syncClient, logger2) {
|
|
|
1545
1646
|
// src/service/State.ts
|
|
1546
1647
|
var ServiceState = class {
|
|
1547
1648
|
constructor() {
|
|
1649
|
+
this.changeSeq = 0;
|
|
1548
1650
|
this.lastChangeAt = Date.now();
|
|
1549
1651
|
this.lastSnapshotReadyAt = Date.now();
|
|
1550
1652
|
this.lastHeartbeatEnqueuedAt = 0;
|
|
1551
1653
|
}
|
|
1552
1654
|
updateChange(timestamp = Date.now()) {
|
|
1553
1655
|
this.lastChangeAt = timestamp;
|
|
1656
|
+
this.changeSeq += 1;
|
|
1554
1657
|
}
|
|
1555
1658
|
updateSnapshotReady(timestamp = Date.now()) {
|
|
1556
1659
|
this.lastSnapshotReadyAt = timestamp;
|
|
@@ -1576,6 +1679,7 @@ function computeBackoff(attempts) {
|
|
|
1576
1679
|
const delay = 1e3 * 2 ** base;
|
|
1577
1680
|
return Math.min(delay, 6e4);
|
|
1578
1681
|
}
|
|
1682
|
+
var PUBLISH_MAX_MS = 5 * 6e4;
|
|
1579
1683
|
async function readSymlinkTarget2(absPath) {
|
|
1580
1684
|
try {
|
|
1581
1685
|
return await fs5.readlink(absPath);
|
|
@@ -1854,9 +1958,10 @@ var ServiceRunner = class {
|
|
|
1854
1958
|
if (this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0) {
|
|
1855
1959
|
const delay = computeBackoff(job.attempts);
|
|
1856
1960
|
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, delay);
|
|
1857
|
-
await sleep2(200);
|
|
1858
1961
|
return;
|
|
1859
1962
|
}
|
|
1963
|
+
const publishSeq = this.state.changeSeq;
|
|
1964
|
+
const isStale = () => this.state.changeSeq !== publishSeq || this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0;
|
|
1860
1965
|
try {
|
|
1861
1966
|
const result = await publishSnapshot(
|
|
1862
1967
|
this.runtime.config.rootPath,
|
|
@@ -1866,17 +1971,40 @@ var ServiceRunner = class {
|
|
|
1866
1971
|
log,
|
|
1867
1972
|
{
|
|
1868
1973
|
maxAttempts: this.runtime.config.maxSnapshotAttempts,
|
|
1869
|
-
uploadChunkSize: this.runtime.config.uploadChunkSize
|
|
1974
|
+
uploadChunkSize: this.runtime.config.uploadChunkSize,
|
|
1975
|
+
deadlineMs: Date.now() + PUBLISH_MAX_MS,
|
|
1976
|
+
isStale
|
|
1870
1977
|
}
|
|
1871
1978
|
);
|
|
1872
|
-
this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1979
|
+
const acked = this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1980
|
+
if (!acked) {
|
|
1981
|
+
log.warn(
|
|
1982
|
+
{ jobId: job.id },
|
|
1983
|
+
"Snapshot job ack failed (lease may have expired)"
|
|
1984
|
+
);
|
|
1985
|
+
}
|
|
1873
1986
|
this.state.updateSnapshotReady(result.createdAt);
|
|
1874
1987
|
log.info({ snapshotHash: result.snapshotHash }, "Snapshot job completed");
|
|
1988
|
+
if (this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0) {
|
|
1989
|
+
this.runtime.outbox.enqueueSnapshot(this.runtime.config.rootId, 0);
|
|
1990
|
+
return;
|
|
1991
|
+
}
|
|
1992
|
+
const nowHash = computeSnapshot(this.runtime.filesRepo).snapshotHash;
|
|
1993
|
+
if (nowHash !== result.snapshotHash) {
|
|
1994
|
+
this.runtime.outbox.enqueueSnapshot(this.runtime.config.rootId, 0);
|
|
1995
|
+
}
|
|
1875
1996
|
} catch (error) {
|
|
1997
|
+
if (error instanceof SnapshotStaleError) {
|
|
1998
|
+
log.debug(
|
|
1999
|
+
{ err: error },
|
|
2000
|
+
"Snapshot publish aborted (stale); quick retry"
|
|
2001
|
+
);
|
|
2002
|
+
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, 250);
|
|
2003
|
+
return;
|
|
2004
|
+
}
|
|
1876
2005
|
log.warn({ err: error }, "Snapshot job failed");
|
|
1877
2006
|
const delay = computeBackoff(job.attempts);
|
|
1878
2007
|
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, delay);
|
|
1879
|
-
await sleep2(delay);
|
|
1880
2008
|
}
|
|
1881
2009
|
}
|
|
1882
2010
|
async handleHeartbeatJob(job, log) {
|
|
@@ -2085,33 +2213,20 @@ function createMcpServer({
|
|
|
2085
2213
|
async function sleep3(ms) {
|
|
2086
2214
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
2087
2215
|
}
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
while (!latest && Date.now() < deadlineMs) {
|
|
2091
|
-
await sleep3(250);
|
|
2092
|
-
latest = runtime.snapshotsRepo.getLatest();
|
|
2093
|
-
}
|
|
2094
|
-
return latest;
|
|
2216
|
+
function isLocalStable() {
|
|
2217
|
+
return runtime.filesRepo.countByState("dirty") === 0 && runtime.filesRepo.countByState("hashing") === 0;
|
|
2095
2218
|
}
|
|
2096
|
-
async function
|
|
2097
|
-
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
const status = await runtime.clients.sync.checkSnapshotStatus(currentHash);
|
|
2101
|
-
if (status.status === "READY") {
|
|
2102
|
-
return currentHash;
|
|
2103
|
-
}
|
|
2104
|
-
if (status.status === "FAILED") {
|
|
2105
|
-
}
|
|
2106
|
-
} catch {
|
|
2107
|
-
}
|
|
2108
|
-
await sleep3(500);
|
|
2109
|
-
const latest = runtime.snapshotsRepo.getLatest();
|
|
2110
|
-
if (latest && latest.snapshot_hash !== currentHash) {
|
|
2111
|
-
currentHash = latest.snapshot_hash;
|
|
2112
|
-
}
|
|
2219
|
+
async function withDeadline2(promise, deadlineMs) {
|
|
2220
|
+
const remaining = deadlineMs - Date.now();
|
|
2221
|
+
if (remaining <= 0) {
|
|
2222
|
+
throw new Error("deadline exceeded");
|
|
2113
2223
|
}
|
|
2114
|
-
return
|
|
2224
|
+
return Promise.race([
|
|
2225
|
+
promise,
|
|
2226
|
+
new Promise(
|
|
2227
|
+
(_, reject) => setTimeout(() => reject(new Error("deadline exceeded")), remaining)
|
|
2228
|
+
)
|
|
2229
|
+
]);
|
|
2115
2230
|
}
|
|
2116
2231
|
const server = new McpServer({
|
|
2117
2232
|
name: SERVER_NAME,
|
|
@@ -2145,20 +2260,26 @@ function createMcpServer({
|
|
|
2145
2260
|
title: "Semantic Code Retrieval",
|
|
2146
2261
|
description: "USE THIS before code exploration. Graph-based RAG for semantic code search using AST relationships and embeddings. Returns 30-50 code segments with file paths and line numbers. After initial query, you may query again with refined keywords or use conventional tools (Read, Glob, Grep) for specific files.",
|
|
2147
2262
|
inputSchema: {
|
|
2148
|
-
query: z.string().min(1, "Query text is required").describe(`
|
|
2263
|
+
query: z.string().min(1, "Query text is required").describe(`Short docstring-style description of what the code does + specific technical terms and identifiers (NOT natural language questions, NOT bare keyword lists).
|
|
2149
2264
|
|
|
2150
|
-
STRATEGY:
|
|
2265
|
+
STRATEGY: Start with a mini-docstring (what the code does), then append concrete identifiers, API names, error types, and domain terms.
|
|
2151
2266
|
|
|
2152
2267
|
\u2705 GOOD EXAMPLES:
|
|
2153
|
-
- "JWT
|
|
2154
|
-
- "
|
|
2155
|
-
- "
|
|
2156
|
-
- "
|
|
2268
|
+
- "Validate JWT access token and extract user claims FastAPI dependency AuthMiddleware jose jwt decode RS256 audience issuer"
|
|
2269
|
+
- "Index repository files into embedding store Celery worker chunker AST parse imports calls graph propagate batch upsert vector_index"
|
|
2270
|
+
- "Create Alembic migration for SQLAlchemy relationship add foreign key backfill existing rows cascade ondelete constraint naming"
|
|
2271
|
+
- "Capture exception with traceback and structured context logging logger.exception request_id correlation_id retryable errors"
|
|
2272
|
+
- "Load cache from file using shelve.open handle OSError fallback recreate cache file path validation"
|
|
2273
|
+
- "Find call sites of verify_jwt decode_jwt get_current_user where used FastAPI router dependency tests pytest"
|
|
2157
2274
|
|
|
2158
2275
|
\u274C BAD EXAMPLES:
|
|
2159
2276
|
- "How does authentication work?" (natural language question)
|
|
2160
2277
|
- "Show me the login code" (conversational)
|
|
2161
2278
|
- "auth" (too vague, needs more context)
|
|
2279
|
+
- "JWT authentication middleware token validation security handler FastAPI" (bare keyword list without intent \u2014 add a docstring prefix describing what the code does)
|
|
2280
|
+
|
|
2281
|
+
NON-ENGLISH QUERIES: Translate to English before querying.
|
|
2282
|
+
- "validate JWT signature and extract claims FastAPI jose RS256 issuer audience"
|
|
2162
2283
|
|
|
2163
2284
|
RETRIEVAL MECHANISM:
|
|
2164
2285
|
Combines multiple strategies: Seeds (semantic similarity), Flood (AST graph propagation through imports/calls/inheritance), Neighbors (adjacent chunks), and Calls (function relationships).
|
|
@@ -2186,53 +2307,216 @@ WHEN TO USE THIS TOOL:
|
|
|
2186
2307
|
budgetTokens
|
|
2187
2308
|
}) => {
|
|
2188
2309
|
const deadline = Date.now() + runtime.config.maxQueryWaitMs;
|
|
2189
|
-
|
|
2190
|
-
|
|
2191
|
-
|
|
2310
|
+
while (Date.now() < deadline) {
|
|
2311
|
+
if (isLocalStable()) break;
|
|
2312
|
+
await sleep3(250);
|
|
2313
|
+
}
|
|
2314
|
+
if (!isLocalStable()) {
|
|
2315
|
+
const statusText2 = formatStatus(
|
|
2192
2316
|
await collectIndexingStatus(runtime, runner)
|
|
2193
2317
|
);
|
|
2194
|
-
const
|
|
2318
|
+
const text2 = `Indexer not ready (local hashing/dirty). Current status:
|
|
2195
2319
|
|
|
2196
|
-
${
|
|
2197
|
-
return { content: [{ type: "text", text }] };
|
|
2320
|
+
${statusText2}`;
|
|
2321
|
+
return { content: [{ type: "text", text: text2 }] };
|
|
2198
2322
|
}
|
|
2199
|
-
|
|
2200
|
-
|
|
2201
|
-
|
|
2323
|
+
let currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2324
|
+
let lastStatus;
|
|
2325
|
+
while (Date.now() < deadline) {
|
|
2326
|
+
if (!isLocalStable()) {
|
|
2327
|
+
while (Date.now() < deadline) {
|
|
2328
|
+
if (isLocalStable()) break;
|
|
2329
|
+
await sleep3(250);
|
|
2330
|
+
}
|
|
2331
|
+
if (!isLocalStable()) break;
|
|
2332
|
+
currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2333
|
+
continue;
|
|
2334
|
+
}
|
|
2335
|
+
try {
|
|
2336
|
+
const status = await withDeadline2(
|
|
2337
|
+
runtime.clients.sync.checkSnapshotStatus(currentHash),
|
|
2338
|
+
deadline
|
|
2339
|
+
);
|
|
2340
|
+
lastStatus = status.status;
|
|
2341
|
+
if (status.status === "READY") {
|
|
2342
|
+
const effectiveBudget = Math.max(100, budgetTokens ?? 1e4);
|
|
2343
|
+
try {
|
|
2344
|
+
const result = await withDeadline2(
|
|
2345
|
+
runtime.clients.retrieval.query(
|
|
2346
|
+
currentHash,
|
|
2347
|
+
query,
|
|
2348
|
+
effectiveBudget,
|
|
2349
|
+
{ formatter: runtime.config.retrievalFormatter }
|
|
2350
|
+
),
|
|
2351
|
+
deadline
|
|
2352
|
+
);
|
|
2353
|
+
return {
|
|
2354
|
+
content: [
|
|
2355
|
+
{
|
|
2356
|
+
type: "text",
|
|
2357
|
+
text: result.formatted_output ?? "(no formatted output)"
|
|
2358
|
+
}
|
|
2359
|
+
]
|
|
2360
|
+
};
|
|
2361
|
+
} catch (error) {
|
|
2362
|
+
const message = error instanceof Error ? error.message : "Unknown retrieval error";
|
|
2363
|
+
runtime.logger.error({ err: error }, "Retrieval query failed");
|
|
2364
|
+
return {
|
|
2365
|
+
content: [
|
|
2366
|
+
{ type: "text", text: `Retrieval error: ${message}` }
|
|
2367
|
+
],
|
|
2368
|
+
isError: true
|
|
2369
|
+
};
|
|
2370
|
+
}
|
|
2371
|
+
}
|
|
2372
|
+
if (status.status === "FAILED") {
|
|
2373
|
+
while (Date.now() < deadline) {
|
|
2374
|
+
if (isLocalStable()) break;
|
|
2375
|
+
await sleep3(250);
|
|
2376
|
+
}
|
|
2377
|
+
if (!isLocalStable()) break;
|
|
2378
|
+
currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2379
|
+
continue;
|
|
2380
|
+
}
|
|
2381
|
+
runtime.outbox.enqueueSnapshot(runtime.config.rootId, 0);
|
|
2382
|
+
} catch {
|
|
2383
|
+
}
|
|
2384
|
+
const newHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2385
|
+
if (newHash !== currentHash) {
|
|
2386
|
+
currentHash = newHash;
|
|
2387
|
+
continue;
|
|
2388
|
+
}
|
|
2389
|
+
await sleep3(500);
|
|
2390
|
+
}
|
|
2391
|
+
const statusText = formatStatus(
|
|
2392
|
+
await collectIndexingStatus(runtime, runner)
|
|
2202
2393
|
);
|
|
2203
|
-
|
|
2204
|
-
const statusText = formatStatus(
|
|
2205
|
-
await collectIndexingStatus(runtime, runner)
|
|
2206
|
-
);
|
|
2207
|
-
const text = `Snapshot not ready on server. Current status:
|
|
2394
|
+
const text = `Snapshot not READY before deadline (last status: ${lastStatus ?? "unknown"}). Current status:
|
|
2208
2395
|
|
|
2209
2396
|
${statusText}`;
|
|
2210
|
-
|
|
2397
|
+
return { content: [{ type: "text", text }] };
|
|
2398
|
+
}
|
|
2399
|
+
);
|
|
2400
|
+
server.registerTool(
|
|
2401
|
+
"dump_bush",
|
|
2402
|
+
{
|
|
2403
|
+
title: "Dump Raw Index (Debug)",
|
|
2404
|
+
description: "Debug tool. Downloads the raw HDF5 bush file for the current snapshot and saves it to the specified path. Returns an HDF5 bush reader skill with format documentation and Python code examples.",
|
|
2405
|
+
inputSchema: {
|
|
2406
|
+
filePath: z.string().min(1, "File path is required").refine((p) => p.endsWith(".h5"), "File must have .h5 extension").describe(
|
|
2407
|
+
"Full file name to save (use base dir + any name with .h5 extension), e.g. /tmp/snapshot.h5"
|
|
2408
|
+
)
|
|
2409
|
+
},
|
|
2410
|
+
annotations: {
|
|
2411
|
+
readOnlyHint: false,
|
|
2412
|
+
destructiveHint: false,
|
|
2413
|
+
idempotentHint: true,
|
|
2414
|
+
openWorldHint: true
|
|
2211
2415
|
}
|
|
2212
|
-
|
|
2213
|
-
|
|
2214
|
-
|
|
2215
|
-
|
|
2216
|
-
|
|
2217
|
-
|
|
2218
|
-
|
|
2416
|
+
},
|
|
2417
|
+
async ({ filePath }) => {
|
|
2418
|
+
const deadline = Date.now() + runtime.config.maxQueryWaitMs;
|
|
2419
|
+
while (Date.now() < deadline) {
|
|
2420
|
+
if (isLocalStable()) break;
|
|
2421
|
+
await sleep3(250);
|
|
2422
|
+
}
|
|
2423
|
+
if (!isLocalStable()) {
|
|
2424
|
+
const statusText2 = formatStatus(
|
|
2425
|
+
await collectIndexingStatus(runtime, runner)
|
|
2219
2426
|
);
|
|
2220
2427
|
return {
|
|
2221
2428
|
content: [
|
|
2222
2429
|
{
|
|
2223
2430
|
type: "text",
|
|
2224
|
-
text:
|
|
2431
|
+
text: `Indexer not ready (local hashing/dirty). Current status:
|
|
2432
|
+
|
|
2433
|
+
${statusText2}`
|
|
2225
2434
|
}
|
|
2226
|
-
]
|
|
2227
|
-
};
|
|
2228
|
-
} catch (error) {
|
|
2229
|
-
const message = error instanceof Error ? error.message : "Unknown retrieval error";
|
|
2230
|
-
runtime.logger.error({ err: error }, "Retrieval query failed");
|
|
2231
|
-
return {
|
|
2232
|
-
content: [{ type: "text", text: `Retrieval error: ${message}` }],
|
|
2435
|
+
],
|
|
2233
2436
|
isError: true
|
|
2234
2437
|
};
|
|
2235
2438
|
}
|
|
2439
|
+
let currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2440
|
+
let lastStatus;
|
|
2441
|
+
while (Date.now() < deadline) {
|
|
2442
|
+
if (!isLocalStable()) {
|
|
2443
|
+
while (Date.now() < deadline) {
|
|
2444
|
+
if (isLocalStable()) break;
|
|
2445
|
+
await sleep3(250);
|
|
2446
|
+
}
|
|
2447
|
+
if (!isLocalStable()) break;
|
|
2448
|
+
currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2449
|
+
continue;
|
|
2450
|
+
}
|
|
2451
|
+
try {
|
|
2452
|
+
const status = await withDeadline2(
|
|
2453
|
+
runtime.clients.sync.checkSnapshotStatus(currentHash),
|
|
2454
|
+
deadline
|
|
2455
|
+
);
|
|
2456
|
+
lastStatus = status.status;
|
|
2457
|
+
if (status.status === "READY") {
|
|
2458
|
+
const bushBuffer = await withDeadline2(
|
|
2459
|
+
runtime.clients.sync.downloadSnapshotBush(currentHash),
|
|
2460
|
+
deadline
|
|
2461
|
+
);
|
|
2462
|
+
await mkdir(dirname(filePath), { recursive: true });
|
|
2463
|
+
await writeFile(filePath, Buffer.from(bushBuffer));
|
|
2464
|
+
runtime.logger.info(
|
|
2465
|
+
{
|
|
2466
|
+
filePath,
|
|
2467
|
+
snapshotHash: currentHash,
|
|
2468
|
+
bytes: bushBuffer.byteLength
|
|
2469
|
+
},
|
|
2470
|
+
"HDF5 bush file saved"
|
|
2471
|
+
);
|
|
2472
|
+
const skill = await withDeadline2(
|
|
2473
|
+
runtime.clients.sync.getHdf5BushReaderSkill(),
|
|
2474
|
+
deadline
|
|
2475
|
+
);
|
|
2476
|
+
return {
|
|
2477
|
+
content: [
|
|
2478
|
+
{
|
|
2479
|
+
type: "text",
|
|
2480
|
+
text: `HDF5 bush saved to ${filePath} (${bushBuffer.byteLength} bytes, snapshot ${currentHash}).
|
|
2481
|
+
|
|
2482
|
+
${skill}`
|
|
2483
|
+
}
|
|
2484
|
+
]
|
|
2485
|
+
};
|
|
2486
|
+
}
|
|
2487
|
+
if (status.status === "FAILED") {
|
|
2488
|
+
while (Date.now() < deadline) {
|
|
2489
|
+
if (isLocalStable()) break;
|
|
2490
|
+
await sleep3(250);
|
|
2491
|
+
}
|
|
2492
|
+
if (!isLocalStable()) break;
|
|
2493
|
+
currentHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2494
|
+
continue;
|
|
2495
|
+
}
|
|
2496
|
+
runtime.outbox.enqueueSnapshot(runtime.config.rootId, 0);
|
|
2497
|
+
} catch {
|
|
2498
|
+
}
|
|
2499
|
+
const newHash = computeSnapshot(runtime.filesRepo).snapshotHash;
|
|
2500
|
+
if (newHash !== currentHash) {
|
|
2501
|
+
currentHash = newHash;
|
|
2502
|
+
continue;
|
|
2503
|
+
}
|
|
2504
|
+
await sleep3(500);
|
|
2505
|
+
}
|
|
2506
|
+
const statusText = formatStatus(
|
|
2507
|
+
await collectIndexingStatus(runtime, runner)
|
|
2508
|
+
);
|
|
2509
|
+
return {
|
|
2510
|
+
content: [
|
|
2511
|
+
{
|
|
2512
|
+
type: "text",
|
|
2513
|
+
text: `Snapshot not READY before deadline (last status: ${lastStatus ?? "unknown"}). Current status:
|
|
2514
|
+
|
|
2515
|
+
${statusText}`
|
|
2516
|
+
}
|
|
2517
|
+
],
|
|
2518
|
+
isError: true
|
|
2519
|
+
};
|
|
2236
2520
|
}
|
|
2237
2521
|
);
|
|
2238
2522
|
return server;
|