@coderule/mcp 2.0.3 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +176 -48
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +176 -48
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +176 -48
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +176 -48
- package/dist/index.js.map +1 -1
- package/dist/mcp-cli.cjs +281 -119
- package/dist/mcp-cli.cjs.map +1 -1
- package/dist/mcp-cli.js +281 -119
- package/dist/mcp-cli.js.map +1 -1
- package/package.json +1 -1
package/dist/cli.cjs
CHANGED
|
@@ -540,7 +540,10 @@ var FilesRepo = class {
|
|
|
540
540
|
`UPDATE files
|
|
541
541
|
SET content_sha256 = ?, service_file_hash = ?, hash_state = 'clean',
|
|
542
542
|
hash_owner = NULL, hash_lease_expires_at = NULL, hash_started_at = NULL
|
|
543
|
-
WHERE id =
|
|
543
|
+
WHERE id = ?
|
|
544
|
+
AND hash_state = 'hashing'
|
|
545
|
+
AND hash_owner = ?
|
|
546
|
+
AND hash_started_at = ?`
|
|
544
547
|
);
|
|
545
548
|
this.requeueExpiredHashingStmt = this.db.prepare(
|
|
546
549
|
`UPDATE files
|
|
@@ -671,13 +674,30 @@ var FilesRepo = class {
|
|
|
671
674
|
tx(ids);
|
|
672
675
|
}
|
|
673
676
|
applyHashResults(results) {
|
|
674
|
-
if (!results.length) return;
|
|
677
|
+
if (!results.length) return 0;
|
|
678
|
+
let applied = 0;
|
|
675
679
|
const tx = this.db.transaction((batch) => {
|
|
676
|
-
for (const {
|
|
677
|
-
|
|
680
|
+
for (const {
|
|
681
|
+
id,
|
|
682
|
+
contentSha256,
|
|
683
|
+
serviceFileHash,
|
|
684
|
+
hashOwner,
|
|
685
|
+
hashStartedAt
|
|
686
|
+
} of batch) {
|
|
687
|
+
const result = this.applyHashesStmt.run(
|
|
688
|
+
contentSha256,
|
|
689
|
+
serviceFileHash,
|
|
690
|
+
id,
|
|
691
|
+
hashOwner,
|
|
692
|
+
hashStartedAt
|
|
693
|
+
);
|
|
694
|
+
if ((result.changes ?? 0) > 0) {
|
|
695
|
+
applied += 1;
|
|
696
|
+
}
|
|
678
697
|
}
|
|
679
698
|
});
|
|
680
699
|
tx(results);
|
|
700
|
+
return applied;
|
|
681
701
|
}
|
|
682
702
|
getCleanFilesForSnapshot() {
|
|
683
703
|
return this.selectCleanSnapshotStmt.all();
|
|
@@ -727,7 +747,7 @@ var Outbox = class {
|
|
|
727
747
|
this.log = logger2.child({ scope: "outbox" });
|
|
728
748
|
this.queue = new qulite.Qulite(db, {
|
|
729
749
|
logger: this.log,
|
|
730
|
-
defaultLeaseMs:
|
|
750
|
+
defaultLeaseMs: 6e5,
|
|
731
751
|
defaultMaxAttempts: 10
|
|
732
752
|
});
|
|
733
753
|
this.markKindStmt = db.prepare(
|
|
@@ -785,7 +805,7 @@ var Outbox = class {
|
|
|
785
805
|
this.log.debug({ rootId }, "Enqueued heartbeat job");
|
|
786
806
|
}
|
|
787
807
|
}
|
|
788
|
-
claimFsControlJob(leaseOwner, leaseMs =
|
|
808
|
+
claimFsControlJob(leaseOwner, leaseMs = 6e5) {
|
|
789
809
|
return this.queue.claimNext({ type: "fs_control", leaseOwner, leaseMs });
|
|
790
810
|
}
|
|
791
811
|
ack(jobId, leaseOwner) {
|
|
@@ -1101,7 +1121,9 @@ var Hasher = class {
|
|
|
1101
1121
|
successes.push({
|
|
1102
1122
|
id: record.id,
|
|
1103
1123
|
contentSha256: result.contentSha256,
|
|
1104
|
-
serviceFileHash: result.serviceFileHash
|
|
1124
|
+
serviceFileHash: result.serviceFileHash,
|
|
1125
|
+
hashOwner: record.hash_owner ?? this.ownerId,
|
|
1126
|
+
hashStartedAt: record.hash_started_at ?? 0
|
|
1105
1127
|
});
|
|
1106
1128
|
} catch (error) {
|
|
1107
1129
|
if (error?.code === "ENOENT") {
|
|
@@ -1260,6 +1282,42 @@ async function runInventory(opts) {
|
|
|
1260
1282
|
async function sleep(ms) {
|
|
1261
1283
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
1262
1284
|
}
|
|
1285
|
+
var SnapshotStaleError = class extends Error {
|
|
1286
|
+
constructor(message = "snapshot became stale") {
|
|
1287
|
+
super(message);
|
|
1288
|
+
this.name = "SnapshotStaleError";
|
|
1289
|
+
}
|
|
1290
|
+
};
|
|
1291
|
+
function assertNotStale(opts) {
|
|
1292
|
+
if (opts?.isStale?.()) {
|
|
1293
|
+
throw new SnapshotStaleError();
|
|
1294
|
+
}
|
|
1295
|
+
}
|
|
1296
|
+
function assertBeforeDeadline(opts) {
|
|
1297
|
+
if (opts?.deadlineMs !== void 0 && Date.now() >= opts.deadlineMs) {
|
|
1298
|
+
throw new Error("snapshot publish deadline exceeded");
|
|
1299
|
+
}
|
|
1300
|
+
}
|
|
1301
|
+
function assertValid(opts) {
|
|
1302
|
+
assertNotStale(opts);
|
|
1303
|
+
assertBeforeDeadline(opts);
|
|
1304
|
+
}
|
|
1305
|
+
function withDeadline(promise, opts) {
|
|
1306
|
+
if (opts?.deadlineMs === void 0) return promise;
|
|
1307
|
+
const remaining = opts.deadlineMs - Date.now();
|
|
1308
|
+
if (remaining <= 0) {
|
|
1309
|
+
return Promise.reject(new Error("snapshot publish deadline exceeded"));
|
|
1310
|
+
}
|
|
1311
|
+
return Promise.race([
|
|
1312
|
+
promise,
|
|
1313
|
+
new Promise(
|
|
1314
|
+
(_, reject) => setTimeout(
|
|
1315
|
+
() => reject(new Error("snapshot publish deadline exceeded")),
|
|
1316
|
+
remaining
|
|
1317
|
+
)
|
|
1318
|
+
)
|
|
1319
|
+
]);
|
|
1320
|
+
}
|
|
1263
1321
|
function computeSnapshot(filesRepo) {
|
|
1264
1322
|
const files = filesRepo.getCleanFilesForSnapshot();
|
|
1265
1323
|
const hashes = files.map((file) => file.service_file_hash).filter((hash) => typeof hash === "string");
|
|
@@ -1275,12 +1333,17 @@ function computeSnapshot(filesRepo) {
|
|
|
1275
1333
|
totalSize
|
|
1276
1334
|
};
|
|
1277
1335
|
}
|
|
1278
|
-
async function withRetries(op, logger2, context, maxAttempts) {
|
|
1336
|
+
async function withRetries(op, logger2, context, maxAttempts, opts) {
|
|
1279
1337
|
let attempt = 0;
|
|
1280
1338
|
while (true) {
|
|
1339
|
+
assertValid(opts);
|
|
1281
1340
|
try {
|
|
1282
|
-
return await op();
|
|
1341
|
+
return await withDeadline(op(), opts);
|
|
1283
1342
|
} catch (err) {
|
|
1343
|
+
if (err instanceof SnapshotStaleError) throw err;
|
|
1344
|
+
if (typeof err?.message === "string" && err.message.includes("deadline exceeded")) {
|
|
1345
|
+
throw err;
|
|
1346
|
+
}
|
|
1284
1347
|
attempt += 1;
|
|
1285
1348
|
if (attempt >= maxAttempts) {
|
|
1286
1349
|
logger2.error(
|
|
@@ -1298,7 +1361,14 @@ async function withRetries(op, logger2, context, maxAttempts) {
|
|
|
1298
1361
|
}
|
|
1299
1362
|
}
|
|
1300
1363
|
}
|
|
1301
|
-
|
|
1364
|
+
function computeServiceFileHash(relPath, content) {
|
|
1365
|
+
const hash = crypto.createHash("sha256");
|
|
1366
|
+
hash.update(relPath);
|
|
1367
|
+
hash.update("\n");
|
|
1368
|
+
hash.update(content);
|
|
1369
|
+
return hash.digest("hex");
|
|
1370
|
+
}
|
|
1371
|
+
async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts, chunkSize = 1, opts) {
|
|
1302
1372
|
if (!missing || missing.length === 0) return;
|
|
1303
1373
|
const total = missing.length;
|
|
1304
1374
|
const chunks = [];
|
|
@@ -1306,17 +1376,36 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1306
1376
|
chunks.push(missing.slice(i, i + chunkSize));
|
|
1307
1377
|
}
|
|
1308
1378
|
for (let idx = 0; idx < chunks.length; idx++) {
|
|
1379
|
+
assertValid(opts);
|
|
1309
1380
|
const list = chunks[idx];
|
|
1310
1381
|
const map = /* @__PURE__ */ new Map();
|
|
1311
1382
|
for (const missingFile of list) {
|
|
1312
1383
|
const absPath = path2__default.default.join(rootPath, missingFile.file_path);
|
|
1313
1384
|
try {
|
|
1314
1385
|
const buffer = await fs5__default.default.readFile(absPath);
|
|
1386
|
+
const serviceHash = computeServiceFileHash(
|
|
1387
|
+
missingFile.file_path,
|
|
1388
|
+
buffer
|
|
1389
|
+
);
|
|
1390
|
+
if (serviceHash !== missingFile.file_hash) {
|
|
1391
|
+
logger2.warn(
|
|
1392
|
+
{
|
|
1393
|
+
relPath: missingFile.file_path,
|
|
1394
|
+
expected: missingFile.file_hash,
|
|
1395
|
+
got: serviceHash
|
|
1396
|
+
},
|
|
1397
|
+
"File content changed during upload; snapshot is stale"
|
|
1398
|
+
);
|
|
1399
|
+
throw new SnapshotStaleError(
|
|
1400
|
+
`file hash mismatch for ${missingFile.file_path}`
|
|
1401
|
+
);
|
|
1402
|
+
}
|
|
1315
1403
|
map.set(missingFile.file_hash, {
|
|
1316
1404
|
path: missingFile.file_path,
|
|
1317
1405
|
content: buffer
|
|
1318
1406
|
});
|
|
1319
1407
|
} catch (error) {
|
|
1408
|
+
if (error instanceof SnapshotStaleError) throw error;
|
|
1320
1409
|
logger2.warn(
|
|
1321
1410
|
{ err: error, relPath: missingFile.file_path },
|
|
1322
1411
|
"Failed to read missing file content"
|
|
@@ -1324,6 +1413,7 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1324
1413
|
}
|
|
1325
1414
|
}
|
|
1326
1415
|
if (map.size === 0) continue;
|
|
1416
|
+
assertValid(opts);
|
|
1327
1417
|
await withRetries(
|
|
1328
1418
|
() => syncClient.uploadFileContent(map),
|
|
1329
1419
|
logger2,
|
|
@@ -1333,65 +1423,76 @@ async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts
|
|
|
1333
1423
|
chunks: chunks.length,
|
|
1334
1424
|
files: map.size
|
|
1335
1425
|
},
|
|
1336
|
-
maxAttempts
|
|
1426
|
+
maxAttempts,
|
|
1427
|
+
opts
|
|
1337
1428
|
);
|
|
1338
1429
|
}
|
|
1339
1430
|
}
|
|
1340
|
-
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2,
|
|
1431
|
+
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2, opts) {
|
|
1341
1432
|
const { snapshotHash, files } = computation;
|
|
1342
|
-
const maxAttempts =
|
|
1343
|
-
const uploadChunkSize =
|
|
1433
|
+
const maxAttempts = opts?.maxAttempts ?? 5;
|
|
1434
|
+
const uploadChunkSize = opts?.uploadChunkSize ?? 1;
|
|
1435
|
+
assertValid(opts);
|
|
1344
1436
|
let status = await withRetries(
|
|
1345
1437
|
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1346
1438
|
logger2,
|
|
1347
1439
|
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1348
|
-
maxAttempts
|
|
1440
|
+
maxAttempts,
|
|
1441
|
+
opts
|
|
1349
1442
|
);
|
|
1350
1443
|
if (status.status === "READY") {
|
|
1351
1444
|
logger2.info({ snapshotHash }, "Snapshot already READY");
|
|
1352
1445
|
return;
|
|
1353
1446
|
}
|
|
1354
|
-
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1355
|
-
status = await withRetries(
|
|
1356
|
-
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1357
|
-
logger2,
|
|
1358
|
-
{ op: "createSnapshot", snapshotHash },
|
|
1359
|
-
maxAttempts
|
|
1360
|
-
);
|
|
1361
|
-
}
|
|
1362
|
-
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1363
|
-
logger2.info(
|
|
1364
|
-
{ missing: status.missing_files.length },
|
|
1365
|
-
"Uploading missing file content"
|
|
1366
|
-
);
|
|
1367
|
-
await uploadMissing(
|
|
1368
|
-
rootPath,
|
|
1369
|
-
status.missing_files,
|
|
1370
|
-
syncClient,
|
|
1371
|
-
logger2,
|
|
1372
|
-
maxAttempts,
|
|
1373
|
-
uploadChunkSize
|
|
1374
|
-
);
|
|
1375
|
-
status = await withRetries(
|
|
1376
|
-
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1377
|
-
logger2,
|
|
1378
|
-
{ op: "createSnapshot", snapshotHash },
|
|
1379
|
-
maxAttempts
|
|
1380
|
-
);
|
|
1381
|
-
}
|
|
1382
1447
|
let attempt = 0;
|
|
1383
1448
|
while (status.status !== "READY") {
|
|
1449
|
+
assertValid(opts);
|
|
1384
1450
|
if (status.status === "FAILED") {
|
|
1385
1451
|
throw new Error(`Snapshot failed processing: ${JSON.stringify(status)}`);
|
|
1386
1452
|
}
|
|
1453
|
+
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1454
|
+
status = await withRetries(
|
|
1455
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1456
|
+
logger2,
|
|
1457
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1458
|
+
maxAttempts,
|
|
1459
|
+
opts
|
|
1460
|
+
);
|
|
1461
|
+
}
|
|
1462
|
+
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1463
|
+
logger2.info(
|
|
1464
|
+
{ missing: status.missing_files.length },
|
|
1465
|
+
"Uploading missing file content"
|
|
1466
|
+
);
|
|
1467
|
+
await uploadMissing(
|
|
1468
|
+
rootPath,
|
|
1469
|
+
status.missing_files,
|
|
1470
|
+
syncClient,
|
|
1471
|
+
logger2,
|
|
1472
|
+
maxAttempts,
|
|
1473
|
+
uploadChunkSize,
|
|
1474
|
+
opts
|
|
1475
|
+
);
|
|
1476
|
+
status = await withRetries(
|
|
1477
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1478
|
+
logger2,
|
|
1479
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1480
|
+
maxAttempts,
|
|
1481
|
+
opts
|
|
1482
|
+
);
|
|
1483
|
+
continue;
|
|
1484
|
+
}
|
|
1485
|
+
if (status.status === "READY") break;
|
|
1387
1486
|
const delay = Math.min(5e3, 1e3 * Math.max(1, 2 ** attempt));
|
|
1388
1487
|
await sleep(delay);
|
|
1389
1488
|
attempt += 1;
|
|
1489
|
+
assertValid(opts);
|
|
1390
1490
|
status = await withRetries(
|
|
1391
1491
|
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1392
1492
|
logger2,
|
|
1393
1493
|
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1394
|
-
maxAttempts
|
|
1494
|
+
maxAttempts,
|
|
1495
|
+
opts
|
|
1395
1496
|
);
|
|
1396
1497
|
}
|
|
1397
1498
|
logger2.info({ snapshotHash }, "Snapshot READY");
|
|
@@ -1558,12 +1659,14 @@ async function sendHeartbeat(rootId, snapshotsRepo, syncClient, logger2) {
|
|
|
1558
1659
|
// src/service/State.ts
|
|
1559
1660
|
var ServiceState = class {
|
|
1560
1661
|
constructor() {
|
|
1662
|
+
this.changeSeq = 0;
|
|
1561
1663
|
this.lastChangeAt = Date.now();
|
|
1562
1664
|
this.lastSnapshotReadyAt = Date.now();
|
|
1563
1665
|
this.lastHeartbeatEnqueuedAt = 0;
|
|
1564
1666
|
}
|
|
1565
1667
|
updateChange(timestamp = Date.now()) {
|
|
1566
1668
|
this.lastChangeAt = timestamp;
|
|
1669
|
+
this.changeSeq += 1;
|
|
1567
1670
|
}
|
|
1568
1671
|
updateSnapshotReady(timestamp = Date.now()) {
|
|
1569
1672
|
this.lastSnapshotReadyAt = timestamp;
|
|
@@ -1589,6 +1692,7 @@ function computeBackoff(attempts) {
|
|
|
1589
1692
|
const delay = 1e3 * 2 ** base;
|
|
1590
1693
|
return Math.min(delay, 6e4);
|
|
1591
1694
|
}
|
|
1695
|
+
var PUBLISH_MAX_MS = 5 * 6e4;
|
|
1592
1696
|
async function readSymlinkTarget2(absPath) {
|
|
1593
1697
|
try {
|
|
1594
1698
|
return await fs5__default.default.readlink(absPath);
|
|
@@ -1867,9 +1971,10 @@ var ServiceRunner = class {
|
|
|
1867
1971
|
if (this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0) {
|
|
1868
1972
|
const delay = computeBackoff(job.attempts);
|
|
1869
1973
|
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, delay);
|
|
1870
|
-
await sleep2(200);
|
|
1871
1974
|
return;
|
|
1872
1975
|
}
|
|
1976
|
+
const publishSeq = this.state.changeSeq;
|
|
1977
|
+
const isStale = () => this.state.changeSeq !== publishSeq || this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0;
|
|
1873
1978
|
try {
|
|
1874
1979
|
const result = await publishSnapshot(
|
|
1875
1980
|
this.runtime.config.rootPath,
|
|
@@ -1879,17 +1984,40 @@ var ServiceRunner = class {
|
|
|
1879
1984
|
log,
|
|
1880
1985
|
{
|
|
1881
1986
|
maxAttempts: this.runtime.config.maxSnapshotAttempts,
|
|
1882
|
-
uploadChunkSize: this.runtime.config.uploadChunkSize
|
|
1987
|
+
uploadChunkSize: this.runtime.config.uploadChunkSize,
|
|
1988
|
+
deadlineMs: Date.now() + PUBLISH_MAX_MS,
|
|
1989
|
+
isStale
|
|
1883
1990
|
}
|
|
1884
1991
|
);
|
|
1885
|
-
this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1992
|
+
const acked = this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1993
|
+
if (!acked) {
|
|
1994
|
+
log.warn(
|
|
1995
|
+
{ jobId: job.id },
|
|
1996
|
+
"Snapshot job ack failed (lease may have expired)"
|
|
1997
|
+
);
|
|
1998
|
+
}
|
|
1886
1999
|
this.state.updateSnapshotReady(result.createdAt);
|
|
1887
2000
|
log.info({ snapshotHash: result.snapshotHash }, "Snapshot job completed");
|
|
2001
|
+
if (this.runtime.filesRepo.countByState("dirty") > 0 || this.runtime.filesRepo.countByState("hashing") > 0) {
|
|
2002
|
+
this.runtime.outbox.enqueueSnapshot(this.runtime.config.rootId, 0);
|
|
2003
|
+
return;
|
|
2004
|
+
}
|
|
2005
|
+
const nowHash = computeSnapshot(this.runtime.filesRepo).snapshotHash;
|
|
2006
|
+
if (nowHash !== result.snapshotHash) {
|
|
2007
|
+
this.runtime.outbox.enqueueSnapshot(this.runtime.config.rootId, 0);
|
|
2008
|
+
}
|
|
1888
2009
|
} catch (error) {
|
|
2010
|
+
if (error instanceof SnapshotStaleError) {
|
|
2011
|
+
log.debug(
|
|
2012
|
+
{ err: error },
|
|
2013
|
+
"Snapshot publish aborted (stale); quick retry"
|
|
2014
|
+
);
|
|
2015
|
+
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, 250);
|
|
2016
|
+
return;
|
|
2017
|
+
}
|
|
1889
2018
|
log.warn({ err: error }, "Snapshot job failed");
|
|
1890
2019
|
const delay = computeBackoff(job.attempts);
|
|
1891
2020
|
this.runtime.outbox.retry(job.id, this.fsControlLeaseOwner, delay);
|
|
1892
|
-
await sleep2(delay);
|
|
1893
2021
|
}
|
|
1894
2022
|
}
|
|
1895
2023
|
async handleHeartbeatJob(job, log) {
|