@coderule/mcp 1.3.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.cjs +135 -35
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +136 -36
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +135 -35
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +136 -36
- package/dist/index.js.map +1 -1
- package/dist/mcp-cli.cjs +135 -35
- package/dist/mcp-cli.cjs.map +1 -1
- package/dist/mcp-cli.js +136 -36
- package/dist/mcp-cli.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -43,7 +43,7 @@ var DEFAULT_HEARTBEAT_CHECK_INTERVAL_MS = 5e3;
|
|
|
43
43
|
var DEFAULT_QUEUE_POLL_INTERVAL_MS = 500;
|
|
44
44
|
var DEFAULT_HASH_BATCH_SIZE = 32;
|
|
45
45
|
var DEFAULT_MAX_SNAPSHOT_ATTEMPTS = 5;
|
|
46
|
-
var DEFAULT_HTTP_TIMEOUT_MS =
|
|
46
|
+
var DEFAULT_HTTP_TIMEOUT_MS = 12e4;
|
|
47
47
|
|
|
48
48
|
// src/config/Configurator.ts
|
|
49
49
|
var DEFAULT_RETRIEVAL_FORMATTER = "standard";
|
|
@@ -548,6 +548,16 @@ var Outbox = class {
|
|
|
548
548
|
if (purged > 0) {
|
|
549
549
|
this.log.warn({ purged }, "Purged legacy fs_control jobs without kind");
|
|
550
550
|
}
|
|
551
|
+
try {
|
|
552
|
+
const counts = {
|
|
553
|
+
pending: this.queue.countByStatus(qulite.JobStatus.Pending),
|
|
554
|
+
processing: this.queue.countByStatus(qulite.JobStatus.Processing),
|
|
555
|
+
done: this.queue.countByStatus(qulite.JobStatus.Done),
|
|
556
|
+
failed: this.queue.countByStatus(qulite.JobStatus.Failed)
|
|
557
|
+
};
|
|
558
|
+
this.log.debug({ counts }, "Outbox initialized");
|
|
559
|
+
} catch {
|
|
560
|
+
}
|
|
551
561
|
}
|
|
552
562
|
getQueue() {
|
|
553
563
|
return this.queue;
|
|
@@ -1059,44 +1069,109 @@ function computeSnapshot(filesRepo) {
|
|
|
1059
1069
|
totalSize
|
|
1060
1070
|
};
|
|
1061
1071
|
}
|
|
1062
|
-
async function
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
for (const missingFile of missing) {
|
|
1066
|
-
const absPath = path__default.default.join(rootPath, missingFile.file_path);
|
|
1072
|
+
async function withRetries(op, logger2, context, maxAttempts) {
|
|
1073
|
+
let attempt = 0;
|
|
1074
|
+
while (true) {
|
|
1067
1075
|
try {
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1076
|
+
return await op();
|
|
1077
|
+
} catch (err) {
|
|
1078
|
+
attempt += 1;
|
|
1079
|
+
if (attempt >= maxAttempts) {
|
|
1080
|
+
logger2.error(
|
|
1081
|
+
{ err, ...context, attempt },
|
|
1082
|
+
"Operation failed after retries"
|
|
1083
|
+
);
|
|
1084
|
+
throw err;
|
|
1085
|
+
}
|
|
1086
|
+
const delay = Math.min(15e3, 1e3 * 2 ** (attempt - 1));
|
|
1074
1087
|
logger2.warn(
|
|
1075
|
-
{ err
|
|
1076
|
-
"
|
|
1088
|
+
{ err, ...context, attempt, delay },
|
|
1089
|
+
"Operation failed; retrying"
|
|
1077
1090
|
);
|
|
1091
|
+
await sleep(delay);
|
|
1092
|
+
}
|
|
1093
|
+
}
|
|
1094
|
+
}
|
|
1095
|
+
async function uploadMissing(rootPath, missing, syncClient, logger2, maxAttempts, chunkSize = 64) {
|
|
1096
|
+
if (!missing || missing.length === 0) return;
|
|
1097
|
+
const total = missing.length;
|
|
1098
|
+
const chunks = [];
|
|
1099
|
+
for (let i = 0; i < total; i += chunkSize) {
|
|
1100
|
+
chunks.push(missing.slice(i, i + chunkSize));
|
|
1101
|
+
}
|
|
1102
|
+
for (let idx = 0; idx < chunks.length; idx++) {
|
|
1103
|
+
const list = chunks[idx];
|
|
1104
|
+
const map = /* @__PURE__ */ new Map();
|
|
1105
|
+
for (const missingFile of list) {
|
|
1106
|
+
const absPath = path__default.default.join(rootPath, missingFile.file_path);
|
|
1107
|
+
try {
|
|
1108
|
+
const buffer = await fs4__default.default.readFile(absPath);
|
|
1109
|
+
map.set(missingFile.file_hash, {
|
|
1110
|
+
path: missingFile.file_path,
|
|
1111
|
+
content: buffer
|
|
1112
|
+
});
|
|
1113
|
+
} catch (error) {
|
|
1114
|
+
logger2.warn(
|
|
1115
|
+
{ err: error, relPath: missingFile.file_path },
|
|
1116
|
+
"Failed to read missing file content"
|
|
1117
|
+
);
|
|
1118
|
+
}
|
|
1078
1119
|
}
|
|
1120
|
+
if (map.size === 0) continue;
|
|
1121
|
+
await withRetries(
|
|
1122
|
+
() => syncClient.uploadFileContent(map),
|
|
1123
|
+
logger2,
|
|
1124
|
+
{
|
|
1125
|
+
op: "uploadFileContent",
|
|
1126
|
+
chunkIndex: idx + 1,
|
|
1127
|
+
chunks: chunks.length,
|
|
1128
|
+
files: map.size
|
|
1129
|
+
},
|
|
1130
|
+
maxAttempts
|
|
1131
|
+
);
|
|
1079
1132
|
}
|
|
1080
|
-
if (map.size === 0) return;
|
|
1081
|
-
await syncClient.uploadFileContent(map);
|
|
1082
1133
|
}
|
|
1083
|
-
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2) {
|
|
1134
|
+
async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2, options) {
|
|
1084
1135
|
const { snapshotHash, files } = computation;
|
|
1085
|
-
|
|
1136
|
+
const maxAttempts = options?.maxAttempts ?? 5;
|
|
1137
|
+
const uploadChunkSize = options?.uploadChunkSize ?? 64;
|
|
1138
|
+
let status = await withRetries(
|
|
1139
|
+
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1140
|
+
logger2,
|
|
1141
|
+
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1142
|
+
maxAttempts
|
|
1143
|
+
);
|
|
1086
1144
|
if (status.status === "READY") {
|
|
1087
1145
|
logger2.info({ snapshotHash }, "Snapshot already READY");
|
|
1088
1146
|
return;
|
|
1089
1147
|
}
|
|
1090
1148
|
if (status.status === "NOT_FOUND" || status.status === "MISSING_CONTENT") {
|
|
1091
|
-
status = await
|
|
1149
|
+
status = await withRetries(
|
|
1150
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1151
|
+
logger2,
|
|
1152
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1153
|
+
maxAttempts
|
|
1154
|
+
);
|
|
1092
1155
|
}
|
|
1093
1156
|
if (status.status === "MISSING_CONTENT" && status.missing_files?.length) {
|
|
1094
1157
|
logger2.info(
|
|
1095
1158
|
{ missing: status.missing_files.length },
|
|
1096
1159
|
"Uploading missing file content"
|
|
1097
1160
|
);
|
|
1098
|
-
await uploadMissing(
|
|
1099
|
-
|
|
1161
|
+
await uploadMissing(
|
|
1162
|
+
rootPath,
|
|
1163
|
+
status.missing_files,
|
|
1164
|
+
syncClient,
|
|
1165
|
+
logger2,
|
|
1166
|
+
maxAttempts,
|
|
1167
|
+
uploadChunkSize
|
|
1168
|
+
);
|
|
1169
|
+
status = await withRetries(
|
|
1170
|
+
() => syncClient.createSnapshot(snapshotHash, files),
|
|
1171
|
+
logger2,
|
|
1172
|
+
{ op: "createSnapshot", snapshotHash },
|
|
1173
|
+
maxAttempts
|
|
1174
|
+
);
|
|
1100
1175
|
}
|
|
1101
1176
|
let attempt = 0;
|
|
1102
1177
|
while (status.status !== "READY") {
|
|
@@ -1106,13 +1181,24 @@ async function ensureSnapshotCreated(rootPath, computation, syncClient, logger2)
|
|
|
1106
1181
|
const delay = Math.min(5e3, 1e3 * Math.max(1, 2 ** attempt));
|
|
1107
1182
|
await sleep(delay);
|
|
1108
1183
|
attempt += 1;
|
|
1109
|
-
status = await
|
|
1184
|
+
status = await withRetries(
|
|
1185
|
+
() => syncClient.checkSnapshotStatus(snapshotHash),
|
|
1186
|
+
logger2,
|
|
1187
|
+
{ op: "checkSnapshotStatus", snapshotHash },
|
|
1188
|
+
maxAttempts
|
|
1189
|
+
);
|
|
1110
1190
|
}
|
|
1111
1191
|
logger2.info({ snapshotHash }, "Snapshot READY");
|
|
1112
1192
|
}
|
|
1113
|
-
async function publishSnapshot(rootPath, filesRepo, snapshotsRepo, syncClient, logger2) {
|
|
1193
|
+
async function publishSnapshot(rootPath, filesRepo, snapshotsRepo, syncClient, logger2, options) {
|
|
1114
1194
|
const computation = computeSnapshot(filesRepo);
|
|
1115
|
-
await ensureSnapshotCreated(
|
|
1195
|
+
await ensureSnapshotCreated(
|
|
1196
|
+
rootPath,
|
|
1197
|
+
computation,
|
|
1198
|
+
syncClient,
|
|
1199
|
+
logger2,
|
|
1200
|
+
options
|
|
1201
|
+
);
|
|
1116
1202
|
const createdAt = Date.now();
|
|
1117
1203
|
snapshotsRepo.insert(
|
|
1118
1204
|
computation.snapshotHash,
|
|
@@ -1152,7 +1238,8 @@ async function runInitialSyncPipeline(runtime) {
|
|
|
1152
1238
|
runtime.filesRepo,
|
|
1153
1239
|
runtime.snapshotsRepo,
|
|
1154
1240
|
runtime.clients.sync,
|
|
1155
|
-
syncLogger
|
|
1241
|
+
syncLogger,
|
|
1242
|
+
{ maxAttempts: runtime.config.maxSnapshotAttempts }
|
|
1156
1243
|
);
|
|
1157
1244
|
return result;
|
|
1158
1245
|
}
|
|
@@ -1555,7 +1642,8 @@ var ServiceRunner = class {
|
|
|
1555
1642
|
this.runtime.filesRepo,
|
|
1556
1643
|
this.runtime.snapshotsRepo,
|
|
1557
1644
|
this.runtime.clients.sync,
|
|
1558
|
-
log
|
|
1645
|
+
log,
|
|
1646
|
+
{ maxAttempts: this.runtime.config.maxSnapshotAttempts }
|
|
1559
1647
|
);
|
|
1560
1648
|
this.runtime.outbox.ack(job.id, this.fsControlLeaseOwner);
|
|
1561
1649
|
this.state.updateSnapshotReady(result.createdAt);
|
|
@@ -1636,15 +1724,27 @@ async function runService(params) {
|
|
|
1636
1724
|
try {
|
|
1637
1725
|
runner = new ServiceRunner(runtime);
|
|
1638
1726
|
await runner.prepareWatcher(true);
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1727
|
+
let initialCreatedAt;
|
|
1728
|
+
try {
|
|
1729
|
+
const initial = await runInitialSyncPipeline(runtime);
|
|
1730
|
+
runtime.logger.info(
|
|
1731
|
+
{
|
|
1732
|
+
snapshotHash: initial.snapshotHash,
|
|
1733
|
+
filesCount: initial.filesCount
|
|
1734
|
+
},
|
|
1735
|
+
"Initial sync completed; entering continuous mode"
|
|
1736
|
+
);
|
|
1737
|
+
initialCreatedAt = initial.createdAt;
|
|
1738
|
+
} catch (error) {
|
|
1739
|
+
runtime.logger.warn(
|
|
1740
|
+
{ err: error },
|
|
1741
|
+
"Initial sync failed; enqueuing snapshot job and continuing"
|
|
1742
|
+
);
|
|
1743
|
+
runtime.outbox.enqueueSnapshot(runtime.config.rootId, 0);
|
|
1744
|
+
}
|
|
1745
|
+
if (initialCreatedAt) {
|
|
1746
|
+
runner.recordInitialSnapshot(initialCreatedAt);
|
|
1747
|
+
}
|
|
1648
1748
|
await runner.startLoops();
|
|
1649
1749
|
await runner.enableWatcherProcessing();
|
|
1650
1750
|
runtime.logger.info("Coderule scanner service is running");
|