@coderule/mcp 2.0.3 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +176 -48
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +176 -48
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +176 -48
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +176 -48
- package/dist/index.js.map +1 -1
- package/dist/mcp-cli.cjs +281 -119
- package/dist/mcp-cli.cjs.map +1 -1
- package/dist/mcp-cli.js +281 -119
- package/dist/mcp-cli.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -539,7 +539,10 @@ var FilesRepo = class {
|
|
|
539
539
|
`UPDATE files
|
|
540
540
|
SET content_sha256 = ?, service_file_hash = ?, hash_state = 'clean',
|
|
541
541
|
hash_owner = NULL, hash_lease_expires_at = NULL, hash_started_at = NULL
|
|
542
|
-
WHERE id =
|
|
542
|
+
WHERE id = ?
|
|
543
|
+
AND hash_state = 'hashing'
|
|
544
|
+
AND hash_owner = ?
|
|
545
|
+
AND hash_started_at = ?`
|
|
543
546
|
);
|
|
544
547
|
this.requeueExpiredHashingStmt = this.db.prepare(
|
|
545
548
|
`UPDATE files
|
|
@@ -670,13 +673,30 @@ var FilesRepo = class {
|
|
|
670
673
|
tx(ids);
|
|
671
674
|
}
|
|
672
675
|
applyHashResults(results) {
|
|
673
|
-
if (!results.length) return;
|
|
676
|
+
if (!results.length) return 0;
|
|
677
|
+
let applied = 0;
|
|
674
678
|
const tx = this.db.transaction((batch) => {
|
|
675
|
-
for (const {
|
|
676
|
-
|
|
679
|
+
for (const {
|
|
680
|
+
id,
|
|
681
|
+
contentSha256,
|
|
682
|
+
serviceFileHash,
|
|
683
|
+
hashOwner,
|
|
684
|
+
hashStartedAt
|
|
685
|
+
} of batch) {
|
|
686
|
+
const result = this.applyHashesStmt.run(
|
|
687
|
+
contentSha256,
|
|
688
|
+
serviceFileHash,
|
|
689
|
+
id,
|
|
690
|
+
hashOwner,
|
|
691
|
+
hashStartedAt
|
|
692
|
+
);
|
|
693
|
+
if ((result.changes ?? 0) > 0) {
|
|
694
|
+
applied += 1;
|
|
695
|
+
}
|
|
677
696
|
}
|
|
678
697
|
});
|
|
679
698
|
tx(results);
|
|
699
|
+
return applied;
|
|
680
700
|
}
|
|
681
701
|
getCleanFilesForSnapshot() {
|
|
682
702
|
return this.selectCleanSnapshotStmt.all();
|
|
@@ -726,7 +746,7 @@ var Outbox = class {
|
|
|
726
746
|
this.log = logger2.child({ scope: "outbox" });
|
|
727
747
|
this.queue = new qulite.Qulite(db, {
|
|
728
748
|
logger: this.log,
|
|
729
|
-
defaultLeaseMs:
|
|
749
|
+
defaultLeaseMs: 6e5,
|
|
730
750
|
defaultMaxAttempts: 10
|
|
731
751
|
});
|
|
732
752
|
this.markKindStmt = db.prepare(
|
|
@@ -784,7 +804,7 @@ var Outbox = class {
|
|
|
784
804
|
this.log.debug({ rootId }, "Enqueued heartbeat job");
|
|
785
805
|
}
|
|
786
806
|
}
|
|
787
|
-
claimFsControlJob(leaseOwner, leaseMs =
|
|
807
|
+
claimFsControlJob(leaseOwner, leaseMs = 6e5) {
|
|
788
808
|
return this.queue.claimNext({ type: "fs_control", leaseOwner, leaseMs });
|
|
789
809
|
}
|
|
790
810
|
ack(jobId, leaseOwner) {
|
|
@@ -1100,7 +1120,9 @@ var Hasher = class {
|
|
|
1100
1120
|
successes.push({
|
|
1101
1121
|
id: record.id,
|
|
1102
1122
|
contentSha256: result.contentSha256,
|
|
1103
|
-
serviceFileHash: result.serviceFileHash
|
|
1123
|
+
serviceFileHash: result.serviceFileHash,
|
|
1124
|
+
hashOwner: record.hash_owner ?? this.ownerId,
|
|
1125
|
+
hashStartedAt: record.hash_started_at ?? 0
|
|
1104
1126
|
});
|
|
1105
1127
|
} catch (error) {
|
|
1106
1128
|
if (error?.code === "ENOENT") {
|
|
@@ -1259,6 +1281,42 @@ async function runInventory(opts) {
|
|
|
1259
1281
|
async function sleep(ms) {
|
|
1260
1282
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
1261
1283
|
}
|
|
1284
|
+
var SnapshotStaleError = class extends Error {
|
|
1285
|
+
constructor(message = "snapshot became stale") {
|
|
1286
|
+
super(message);
|
|
1287
|
+
this.name = "SnapshotStaleError";
|
|
1288
|
+
}
|
|
1289
|
+
};
|
|
1290
|
+
function assertNotStale(opts) {
|
|
1291
|
+
if (opts?.isStale?.()) {
|
|
1292
|
+
throw new SnapshotStaleError();
|
|
1293
|
+
}
|
|
1294
|
+
}
|
|
1295
|
+
function assertBeforeDeadline(opts) {
|
|
1296
|
+
if (opts?.deadlineMs !== void 0 && Date.now() >= opts.deadlineMs) {
|
|
1297
|
+
throw new Error("snapshot publish deadline exceeded");
|
|
1298
|
+
}
|
|
1299
|
+
}
|
|
1300
|
+
function assertValid(opts) {
|
|
1301
|
+
assertNotStale(opts);
|
|
1302
|
+
assertBeforeDeadline(opts);
|
|
1303
|
+
}
|
|
1304
|
+
function withDeadline(promise, opts) {
|
|
1305
|
+
if (opts?.deadlineMs === void 0) return promise;
|
|
1306
|
+
const remaining = opts.deadlineMs - Date.now();
|
|
1307
|
+
if (remaining <= 0) {
|
|
1308
|
+
return Promise.reject(new Error("snapshot publish deadline exceeded"));
|
|
1309
|
+
}
|
|
1310
|
+
return Promise.race([
|
|
1311
|
+
promise,
|
|
1312
|
+
new Promise(
|
|
1313
|
+
(_, reject) => setTimeout(
|
|
1314
|
+
() => reject(new Error("snapshot publish deadline exceeded")),
|
|
1315
|
+
remaining
|
|
1316
|
+
)
|
|
1317
|
+
)
|
|
1318
|
+
]);
|
|
1319
|
+
}
|
|
1262
1320
|
function computeSnapshot(filesRepo) {
|
|
1263
1321
|
const files = filesRepo.getCleanFilesForSnapshot();
|
|
1264
1322
|
const hashes = files.map((file) => file.service_file_hash).filter((hash) => typeof hash === "string");
|
|
@@ -1274,12 +1332,17 @@ function computeSnapshot(filesRepo) {
|
|
|
1274
1332
|
totalSize
|
|
1275
1333
|
};
|
|
1276
1334
|
}
|
|
1277
|
-
async function withRetries(op, logger2, context, maxAttempts) {
|
|
1335
|
+
async function withRetries(op, logger2, context, maxAttempts, opts) {
|
|
1278
1336
|
let attempt = 0;
|
|
1279
1337
|
while (true) {
|
|
1338
|
+
assertValid(opts);
|
|
1280
1339
|
try {
|
|
1281
|
-
return await op();
|
|
1340
|
+
return await withDeadline(op(), opts);
|
|
1282
1341
|
} catch (err) {
|
|
1342
|
+
if (err instanceof SnapshotStaleError) throw err;
|
|
1343
|
+
if (typeof err?.message === "string" && err.message.includes("deadline exceeded")) {
|
|
1344
|
+
throw err;
|
|
1345
|
+
}
|
|
1283
1346
|
attempt += 1;
|
|
1284
1347
|
if (attempt >= maxAttempts) {
|
|
1285
1348
|
logger2.error(
|
|
@@ -1297,7 +1360,14 @@ async function withRetries(op, logger2, context, maxAttempts) {
|
|
|
1297
1360
|
}
|
|
1298
1361
|
}
|
|
1299
1362
|
}
|
|
1300
|
-
|
|
1363
|
+
function computeServiceFileHash(relPath, content) {
|
|
1364
|
+
const hash = crypto.createHash("sha256");
|
|
1365
|
+
hash.update(relPath);
|
|
1366
|
+
hash.update("\n");
|
|
1367
|
+
hash.update(content);
|
|
1368
|
+
return hash.digest("hex");
|
|
1369
|
+
}
|
|
1370
|
+
async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts, chunkSize = 1, opts) {
|
|
1301
1371
|
if (!missing || missing.length === 0) return;
|
|
1302
1372
|
const total = missing.length;
|
|
1303
1373
|
const chunks = [];
|
|
@@ -1305,17 +1375,36 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1305
1375
|
chunks.push(missing.slice(i, i + chunkSize));
|
|
1306
1376
|
}
|
|
1307
1377
|
for (let idx = 0; idx < chunks.length; idx++) {
|
|
1378
|
+
assertValid(opts);
|
|
1308
1379
|
const list = chunks[idx];
|
|
1309
1380
|
const map = /* @__PURE__ */ new Map();
|
|
1310
1381
|
for (const missingFile of list) {
|
|
1311
1382
|
const absPath = path2__default.default.join(rootPath, missingFile.file_path);
|
|
1312
1383
|
try {
|
|
1313
1384
|
const buffer = await fs5__default.default.readFile(absPath);
|
|
1385
|
+
const serviceHash = computeServiceFileHash(
|
|
1386
|
+
missingFile.file_path,
|
|
1387
|
+
buffer
|
|
1388
|
+
);
|
|
1389
|
+
if (serviceHash !== missingFile.file_hash) {
|
|
1390
|
+
logger2.warn(
|
|
1391
|
+
{
|
|
1392
|
+
relPath: missingFile.file_path,
|
|
1393
|
+
expected: missingFile.file_hash,
|
|
1394
|
+
got: serviceHash
|
|
1395
|
+
},
|
|
1396
|
+
"File content changed during upload; snapshot is stale"
|
|
1397
|
+
);
|
|
1398
|
+
throw new SnapshotStaleError(
|
|
1399
|
+
`file hash mismatch for ${missingFile.file_path}`
|
|
1400
|
+
);
|
|
1401
|
+
}
|
|
1314
1402
|
map.set(missingFile.file_hash, {
|
|
1315
1403
|
path: missingFile.file_path,
|
|
1316
1404
|
content: buffer
|
|
1317
1405
|
});
|
|
1318
1406
|
} catch (error) {
|
|
1407
|
+
if (error instanceof SnapshotStaleError) throw error;
|
|
1319
1408
|
logger2.warn(
|
|
1320
1409
|
{ err: error, relPath: missingFile.file_path },
|
|
1321
1410
|
"Failed to read missing file content"
|
|
@@ -1323,6 +1412,7 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1323
1412
|
}
|
|
1324
1413
|
}
|
|
1325
1414
|
if (map.size === 0) continue;
|
|
1415
|
+
assertValid(opts);
|
|
1326
1416
|
await withRetries(
|
|
1327
1417
|
() => syncClient.uploadFileContent(map),
|
|
1328
1418
|
logger2,
|
|
@@ -1332,65 +1422,76 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1332
1422
|
chunks: chunks.length,
|
|
1333
1423
|
files: map.size
|
|
1334
1424
|
},
|
|
1335
|
-
maxAttempts
|
|
1425
|
+
maxAttempts,
|
|
1426
|
+
opts
|
|
1336
1427
|
);
|
|
1337
1428
|
}
|
|
1338
1429
|
}
|
|
1339
|
-
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2,
|
|
1430
|
+
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2, opts) {
|
|
1340
1431
|
const { snapshotHash, files } = computation;
|
|
1341
|
-
const maxAttempts =
|
|
1342
|
-
const uploadChunkSize =
|
|
1432
|
+
const maxAttempts = opts?.maxAttempts ?? 5;
|
|
1433
|
+
const uploadChunkSize = opts?.uploadChunkSize ?? 1;
|
|
1434
|
+
assertValid(opts);
|
|
1343
1435
|
let status = await withRetries(
|
|
1344
1436
|
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1345
1437
|
logger2,
|
|
1346
1438
|
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1347
|
-
maxAttempts
|
|
1439
|
+
maxAttempts,
|
|
1440
|
+
opts
|
|
1348
1441
|
);
|
|
1349
1442
|
if (status.status === "READY") {
|
|
1350
1443
|
logger2.info({ snapshotHash }, "Snapshot already READY");
|
|
1351
1444
|
return;
|
|
1352
1445
|
}
|
|
1353
|
-
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1354
|
-
status = await withRetries(
|
|
1355
|
-
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1356
|
-
logger2,
|
|
1357
|
-
{ op: "createSnapshot", snapshotHash },
|
|
1358
|
-
maxAttempts
|
|
1359
|
-
);
|
|
1360
|
-
}
|
|
1361
|
-
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1362
|
-
logger2.info(
|
|
1363
|
-
{ missing: status.missing_files.length },
|
|
1364
|
-
"Uploading missing file content"
|
|
1365
|
-
);
|
|
1366
|
-
await uploadMissing(
|
|
1367
|
-
rootPath,
|
|
1368
|
-
status.missing_files,
|
|
1369
|
-
syncClient,
|
|
1370
|
-
logger2,
|
|
1371
|
-
maxAttempts,
|
|
1372
|
-
uploadChunkSize
|
|
1373
|
-
);
|
|
1374
|
-
status = await withRetries(
|
|
1375
|
-
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1376
|
-
logger2,
|
|
1377
|
-
{ op: "createSnapshot", snapshotHash },
|
|
1378
|
-
maxAttempts
|
|
1379
|
-
);
|
|
1380
|
-
}
|
|
1381
1446
|
let attempt = 0;
|
|
1382
1447
|
while (status.status !== "READY") {
|
|
1448
|
+
assertValid(opts);
|
|
1383
1449
|
if (status.status === "FAILED") {
|
|
1384
1450
|
throw new Error(`Snapshot failed processing: ${JSON.stringify(status)}`);
|
|
1385
1451
|
}
|
|
1452
|
+
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1453
|
+
status = await withRetries(
|
|
1454
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1455
|
+
logger2,
|
|
1456
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1457
|
+
maxAttempts,
|
|
1458
|
+
opts
|
|
1459
|
+
);
|
|
1460
|
+
}
|
|
1461
|
+
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1462
|
+
logger2.info(
|
|
1463
|
+
{ missing: status.missing_files.length },
|
|
1464
|
+
"Uploading missing file content"
|
|
1465
|
+
);
|
|
1466
|
+
await uploadMissing(
|
|
1467
|
+
rootPath,
|
|
1468
|
+
status.missing_files,
|
|
1469
|
+
syncClient,
|
|
1470
|
+
logger2,
|
|
1471
|
+
maxAttempts,
|
|
1472
|
+
uploadChunkSize,
|
|
1473
|
+
opts
|
|
1474
|
+
);
|
|
1475
|
+
status = await withRetries(
|
|
1476
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1477
|
+
logger2,
|
|
1478
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1479
|
+
maxAttempts,
|
|
1480
|
+
opts
|
|
1481
|
+
);
|
|
1482
|
+
continue;
|
|
1483
|
+
}
|
|
1484
|
+
if (status.status === "READY") break;
|
|
1386
1485
|
const delay = Math.min(5e3, 1e3 * Math.max(1, 2 ** attempt));
|
|
1387
1486
|
await sleep(delay);
|
|
1388
1487
|
attempt += 1;
|
|
1488
|
+
assertValid(opts);
|
|
1389
1489
|
status = await withRetries(
|
|
1390
1490
|
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1391
1491
|
logger2,
|
|
1392
1492
|
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1393
|
-
maxAttempts
|
|
1493
|
+
maxAttempts,
|
|
1494
|
+
opts
|
|
1394
1495
|
);
|
|
1395
1496
|
}
|
|
1396
1497
|
logger2.info({ snapshotHash }, "Snapshot READY");
|
|
@@ -1557,12 +1658,14 @@ async function sendHeartbeat(rootId, snapshotsRepo, syncClient, logger2) {
|
|
|
1557
1658
|
// src/service/State.ts
|
|
1558
1659
|
var ServiceState = class {
|
|
1559
1660
|
constructor() {
|
|
1661
|
+
this.changeSeq = 0;
|
|
1560
1662
|
this.lastChangeAt = Date.now();
|
|
1561
1663
|
this.lastSnapshotReadyAt = Date.now();
|
|
1562
1664
|
this.lastHeartbeatEnqueuedAt = 0;
|
|
1563
1665
|
}
|
|
1564
1666
|
updateChange(timestamp = Date.now()) {
|
|
1565
1667
|
this.lastChangeAt = timestamp;
|
|
1668
|
+
this.changeSeq += 1;
|
|
1566
1669
|
}
|
|
1567
1670
|
updateSnapshotReady(timestamp = Date.now()) {
|
|
1568
1671
|
this.lastSnapshotReadyAt = timestamp;
|
|
@@ -1588,6 +1691,7 @@ function computeBackoff(attempts) {
|
|
|
1588
1691
|
const delay = 1e3 * 2 ** base;
|
|
1589
1692
|
return Math.min(delay, 6e4);
|
|
1590
1693
|
}
|
|
1694
|
+
var PUBLISH_MAX_MS = 5 * 6e4;
|
|
1591
1695
|
async function readSymlinkTarget2(absPath) {
|
|
1592
1696
|
try {
|
|
1593
1697
|
return await fs5__default.default.readlink(absPath);
|
|
@@ -1866,9 +1970,10 @@ var ServiceRunner = class {
|
|
|
1866
1970
|
if (this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0) {
|
|
1867
1971
|
const delay = computeBackoff(job.attempts);
|
|
1868
1972
|
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, delay);
|
|
1869
|
-
await sleep2(200);
|
|
1870
1973
|
return;
|
|
1871
1974
|
}
|
|
1975
|
+
const publishSeq = this.state.changeSeq;
|
|
1976
|
+
const isStale = () => this.state.changeSeq !== publishSeq || this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0;
|
|
1872
1977
|
try {
|
|
1873
1978
|
const result = await publishSnapshot(
|
|
1874
1979
|
this.runtime.config.rootPath,
|
|
@@ -1878,17 +1983,40 @@ var ServiceRunner = class {
|
|
|
1878
1983
|
log,
|
|
1879
1984
|
{
|
|
1880
1985
|
maxAttempts: this.runtime.config.maxSnapshotAttempts,
|
|
1881
|
-
uploadChunkSize: this.runtime.config.uploadChunkSize
|
|
1986
|
+
uploadChunkSize: this.runtime.config.uploadChunkSize,
|
|
1987
|
+
deadlineMs: Date.now() + PUBLISH_MAX_MS,
|
|
1988
|
+
isStale
|
|
1882
1989
|
}
|
|
1883
1990
|
);
|
|
1884
|
-
this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1991
|
+
const acked = this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1992
|
+
if (!acked) {
|
|
1993
|
+
log.warn(
|
|
1994
|
+
{ jobId: job.id },
|
|
1995
|
+
"Snapshot job ack failed (lease may have expired)"
|
|
1996
|
+
);
|
|
1997
|
+
}
|
|
1885
1998
|
this.state.updateSnapshotReady(result.createdAt);
|
|
1886
1999
|
log.info({ snapshotHash: result.snapshotHash }, "Snapshot job completed");
|
|
2000
|
+
if (this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0) {
|
|
2001
|
+
this.runtime.outbox.enqueueSnapshot(this.runtime.config.rootId, 0);
|
|
2002
|
+
return;
|
|
2003
|
+
}
|
|
2004
|
+
const nowHash = computeSnapshot(this.runtime.filesRepo).snapshotHash;
|
|
2005
|
+
if (nowHash !== result.snapshotHash) {
|
|
2006
|
+
this.runtime.outbox.enqueueSnapshot(this.runtime.config.rootId, 0);
|
|
2007
|
+
}
|
|
1887
2008
|
} catch (error) {
|
|
2009
|
+
if (error instanceof SnapshotStaleError) {
|
|
2010
|
+
log.debug(
|
|
2011
|
+
{ err: error },
|
|
2012
|
+
"Snapshot publish aborted (stale); quick retry"
|
|
2013
|
+
);
|
|
2014
|
+
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, 250);
|
|
2015
|
+
return;
|
|
2016
|
+
}
|
|
1888
2017
|
log.warn({ err: error }, "Snapshot job failed");
|
|
1889
2018
|
const delay = computeBackoff(job.attempts);
|
|
1890
2019
|
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, delay);
|
|
1891
|
-
await sleep2(delay);
|
|
1892
2020
|
}
|
|
1893
2021
|
}
|
|
1894
2022
|
async handleHeartbeatJob(job, log) {
|