@coderule/mcp 1.3.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +135 -35
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +136 -36
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +135 -35
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +136 -36
- package/dist/index.js.map +1 -1
- package/dist/mcp-cli.cjs +135 -35
- package/dist/mcp-cli.cjs.map +1 -1
- package/dist/mcp-cli.js +136 -36
- package/dist/mcp-cli.js.map +1 -1
- package/package.json +1 -1
package/dist/cli.cjs
CHANGED
|
@@ -44,7 +44,7 @@ var DEFAULT_HEARTBEAT_CHECK_INTERVAL_MS = 5e3;
|
|
|
44
44
|
var DEFAULT_QUEUE_POLL_INTERVAL_MS = 500;
|
|
45
45
|
var DEFAULT_HASH_BATCH_SIZE = 32;
|
|
46
46
|
var DEFAULT_MAX_SNAPSHOT_ATTEMPTS = 5;
|
|
47
|
-
var DEFAULT_HTTP_TIMEOUT_MS =
|
|
47
|
+
var DEFAULT_HTTP_TIMEOUT_MS = 12e4;
|
|
48
48
|
|
|
49
49
|
// src/config/Configurator.ts
|
|
50
50
|
var DEFAULT_RETRIEVAL_FORMATTER = "standard";
|
|
@@ -549,6 +549,16 @@ var Outbox = class {
|
|
|
549
549
|
if (purged > 0) {
|
|
550
550
|
this.log.warn({ purged }, "Purged legacy fs_control jobs without kind");
|
|
551
551
|
}
|
|
552
|
+
try {
|
|
553
|
+
const counts = {
|
|
554
|
+
pending: this.queue.countByStatus(qulite.JobStatus.Pending),
|
|
555
|
+
processing: this.queue.countByStatus(qulite.JobStatus.Processing),
|
|
556
|
+
done: this.queue.countByStatus(qulite.JobStatus.Done),
|
|
557
|
+
failed: this.queue.countByStatus(qulite.JobStatus.Failed)
|
|
558
|
+
};
|
|
559
|
+
this.log.debug({ counts }, "Outbox initialized");
|
|
560
|
+
} catch {
|
|
561
|
+
}
|
|
552
562
|
}
|
|
553
563
|
getQueue() {
|
|
554
564
|
return this.queue;
|
|
@@ -1060,44 +1070,109 @@ function computeSnapshot(filesRepo) {
|
|
|
1060
1070
|
totalSize
|
|
1061
1071
|
};
|
|
1062
1072
|
}
|
|
1063
|
-
async function
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
for (const missingFile of missing) {
|
|
1067
|
-
const absPath = path__default.default.join(rootPath, missingFile.file_path);
|
|
1073
|
+
async function withRetries(op, logger2, context, maxAttempts) {
|
|
1074
|
+
let attempt = 0;
|
|
1075
|
+
while (true) {
|
|
1068
1076
|
try {
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1077
|
+
return await op();
|
|
1078
|
+
} catch (err) {
|
|
1079
|
+
attempt += 1;
|
|
1080
|
+
if (attempt >= maxAttempts) {
|
|
1081
|
+
logger2.error(
|
|
1082
|
+
{ err, ...context, attempt },
|
|
1083
|
+
"Operation failed after retries"
|
|
1084
|
+
);
|
|
1085
|
+
throw err;
|
|
1086
|
+
}
|
|
1087
|
+
const delay = Math.min(15e3, 1e3 * 2 ** (attempt - 1));
|
|
1075
1088
|
logger2.warn(
|
|
1076
|
-
{ err
|
|
1077
|
-
"
|
|
1089
|
+
{ err, ...context, attempt, delay },
|
|
1090
|
+
"Operation failed; retrying"
|
|
1078
1091
|
);
|
|
1092
|
+
await sleep(delay);
|
|
1093
|
+
}
|
|
1094
|
+
}
|
|
1095
|
+
}
|
|
1096
|
+
async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts, chunkSize = 64) {
|
|
1097
|
+
if (!missing || missing.length === 0) return;
|
|
1098
|
+
const total = missing.length;
|
|
1099
|
+
const chunks = [];
|
|
1100
|
+
for (let i = 0; i < total; i += chunkSize) {
|
|
1101
|
+
chunks.push(missing.slice(i, i + chunkSize));
|
|
1102
|
+
}
|
|
1103
|
+
for (let idx = 0; idx < chunks.length; idx++) {
|
|
1104
|
+
const list = chunks[idx];
|
|
1105
|
+
const map = /* @__PURE__ */ new Map();
|
|
1106
|
+
for (const missingFile of list) {
|
|
1107
|
+
const absPath = path__default.default.join(rootPath, missingFile.file_path);
|
|
1108
|
+
try {
|
|
1109
|
+
const buffer = await fs4__default.default.readFile(absPath);
|
|
1110
|
+
map.set(missingFile.file_hash, {
|
|
1111
|
+
path: missingFile.file_path,
|
|
1112
|
+
content: buffer
|
|
1113
|
+
});
|
|
1114
|
+
} catch (error) {
|
|
1115
|
+
logger2.warn(
|
|
1116
|
+
{ err: error, relPath: missingFile.file_path },
|
|
1117
|
+
"Failed to read missing file content"
|
|
1118
|
+
);
|
|
1119
|
+
}
|
|
1079
1120
|
}
|
|
1121
|
+
if (map.size === 0) continue;
|
|
1122
|
+
await withRetries(
|
|
1123
|
+
() => syncClient.uploadFileContent(map),
|
|
1124
|
+
logger2,
|
|
1125
|
+
{
|
|
1126
|
+
op: "uploadFileContent",
|
|
1127
|
+
chunkIndex: idx + 1,
|
|
1128
|
+
chunks: chunks.length,
|
|
1129
|
+
files: map.size
|
|
1130
|
+
},
|
|
1131
|
+
maxAttempts
|
|
1132
|
+
);
|
|
1080
1133
|
}
|
|
1081
|
-
if (map.size === 0) return;
|
|
1082
|
-
await syncClient.uploadFileContent(map);
|
|
1083
1134
|
}
|
|
1084
|
-
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2) {
|
|
1135
|
+
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2, options) {
|
|
1085
1136
|
const { snapshotHash, files } = computation;
|
|
1086
|
-
|
|
1137
|
+
const maxAttempts = options?.maxAttempts ?? 5;
|
|
1138
|
+
const uploadChunkSize = options?.uploadChunkSize ?? 64;
|
|
1139
|
+
let status = await withRetries(
|
|
1140
|
+
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1141
|
+
logger2,
|
|
1142
|
+
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1143
|
+
maxAttempts
|
|
1144
|
+
);
|
|
1087
1145
|
if (status.status === "READY") {
|
|
1088
1146
|
logger2.info({ snapshotHash }, "Snapshot already READY");
|
|
1089
1147
|
return;
|
|
1090
1148
|
}
|
|
1091
1149
|
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1092
|
-
status = await
|
|
1150
|
+
status = await withRetries(
|
|
1151
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1152
|
+
logger2,
|
|
1153
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1154
|
+
maxAttempts
|
|
1155
|
+
);
|
|
1093
1156
|
}
|
|
1094
1157
|
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1095
1158
|
logger2.info(
|
|
1096
1159
|
{ missing: status.missing_files.length },
|
|
1097
1160
|
"Uploading missing file content"
|
|
1098
1161
|
);
|
|
1099
|
-
await uploadMissing(
|
|
1100
|
-
|
|
1162
|
+
await uploadMissing(
|
|
1163
|
+
rootPath,
|
|
1164
|
+
status.missing_files,
|
|
1165
|
+
syncClient,
|
|
1166
|
+
logger2,
|
|
1167
|
+
maxAttempts,
|
|
1168
|
+
uploadChunkSize
|
|
1169
|
+
);
|
|
1170
|
+
status = await withRetries(
|
|
1171
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1172
|
+
logger2,
|
|
1173
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1174
|
+
maxAttempts
|
|
1175
|
+
);
|
|
1101
1176
|
}
|
|
1102
1177
|
let attempt = 0;
|
|
1103
1178
|
while (status.status !== "READY") {
|
|
@@ -1107,13 +1182,24 @@ async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2)
|
|
|
1107
1182
|
const delay = Math.min(5e3, 1e3 * Math.max(1, 2 ** attempt));
|
|
1108
1183
|
await sleep(delay);
|
|
1109
1184
|
attempt += 1;
|
|
1110
|
-
status = await
|
|
1185
|
+
status = await withRetries(
|
|
1186
|
+
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1187
|
+
logger2,
|
|
1188
|
+
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1189
|
+
maxAttempts
|
|
1190
|
+
);
|
|
1111
1191
|
}
|
|
1112
1192
|
logger2.info({ snapshotHash }, "Snapshot READY");
|
|
1113
1193
|
}
|
|
1114
|
-
async function publishSnapshot(rootPath, filesRepo, snapshotsRepo, syncClient, logger2) {
|
|
1194
|
+
async function publishSnapshot(rootPath, filesRepo, snapshotsRepo, syncClient, logger2, options) {
|
|
1115
1195
|
const computation = computeSnapshot(filesRepo);
|
|
1116
|
-
await ensureSnapshotCreated(
|
|
1196
|
+
await ensureSnapshotCreated(
|
|
1197
|
+
rootPath,
|
|
1198
|
+
computation,
|
|
1199
|
+
syncClient,
|
|
1200
|
+
logger2,
|
|
1201
|
+
options
|
|
1202
|
+
);
|
|
1117
1203
|
const createdAt = Date.now();
|
|
1118
1204
|
snapshotsRepo.insert(
|
|
1119
1205
|
computation.snapshotHash,
|
|
@@ -1153,7 +1239,8 @@ async function runInitialSyncPipeline(runtime) {
|
|
|
1153
1239
|
runtime.filesRepo,
|
|
1154
1240
|
runtime.snapshotsRepo,
|
|
1155
1241
|
runtime.clients.sync,
|
|
1156
|
-
syncLogger
|
|
1242
|
+
syncLogger,
|
|
1243
|
+
{ maxAttempts: runtime.config.maxSnapshotAttempts }
|
|
1157
1244
|
);
|
|
1158
1245
|
return result;
|
|
1159
1246
|
}
|
|
@@ -1556,7 +1643,8 @@ var ServiceRunner = class {
|
|
|
1556
1643
|
this.runtime.filesRepo,
|
|
1557
1644
|
this.runtime.snapshotsRepo,
|
|
1558
1645
|
this.runtime.clients.sync,
|
|
1559
|
-
log
|
|
1646
|
+
log,
|
|
1647
|
+
{ maxAttempts: this.runtime.config.maxSnapshotAttempts }
|
|
1560
1648
|
);
|
|
1561
1649
|
this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1562
1650
|
this.state.updateSnapshotReady(result.createdAt);
|
|
@@ -1637,15 +1725,27 @@ async function runService(params) {
|
|
|
1637
1725
|
try {
|
|
1638
1726
|
runner = new ServiceRunner(runtime);
|
|
1639
1727
|
await runner.prepareWatcher(true);
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
|
|
1728
|
+
let initialCreatedAt;
|
|
1729
|
+
try {
|
|
1730
|
+
const initial = await runInitialSyncPipeline(runtime);
|
|
1731
|
+
runtime.logger.info(
|
|
1732
|
+
{
|
|
1733
|
+
snapshotHash: initial.snapshotHash,
|
|
1734
|
+
filesCount: initial.filesCount
|
|
1735
|
+
},
|
|
1736
|
+
"Initial sync completed; entering continuous mode"
|
|
1737
|
+
);
|
|
1738
|
+
initialCreatedAt = initial.createdAt;
|
|
1739
|
+
} catch (error) {
|
|
1740
|
+
runtime.logger.warn(
|
|
1741
|
+
{ err: error },
|
|
1742
|
+
"Initial sync failed; enqueuing snapshot job and continuing"
|
|
1743
|
+
);
|
|
1744
|
+
runtime.outbox.enqueueSnapshot(runtime.config.rootId, 0);
|
|
1745
|
+
}
|
|
1746
|
+
if (initialCreatedAt) {
|
|
1747
|
+
runner.recordInitialSnapshot(initialCreatedAt);
|
|
1748
|
+
}
|
|
1649
1749
|
await runner.startLoops();
|
|
1650
1750
|
await runner.enableWatcherProcessing();
|
|
1651
1751
|
runtime.logger.info("Coderule scanner service is running");
|