@lark-apaas/openclaw-scripts-diagnose-cli 0.1.1-alpha.9 → 0.1.1-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +7 -1
- package/dist/index.cjs +2028 -295
- package/package.json +3 -1
- package/template/openclaw.json +49 -32
package/dist/index.cjs
CHANGED
|
@@ -31,6 +31,14 @@ let node_path = require("node:path");
|
|
|
31
31
|
node_path = __toESM(node_path);
|
|
32
32
|
let node_child_process = require("node:child_process");
|
|
33
33
|
let node_crypto = require("node:crypto");
|
|
34
|
+
node_crypto = __toESM(node_crypto);
|
|
35
|
+
let node_os = require("node:os");
|
|
36
|
+
node_os = __toESM(node_os);
|
|
37
|
+
let node_stream = require("node:stream");
|
|
38
|
+
let node_stream_promises = require("node:stream/promises");
|
|
39
|
+
let node_assert = require("node:assert");
|
|
40
|
+
node_assert = __toESM(node_assert);
|
|
41
|
+
let _lark_apaas_http_client = require("@lark-apaas/http-client");
|
|
34
42
|
//#region src/rule-engine/base.ts
|
|
35
43
|
/** Abstract base class for all diagnose rules */
|
|
36
44
|
var DiagnoseRule = class {
|
|
@@ -81,6 +89,17 @@ function topoSort(rules) {
|
|
|
81
89
|
//#endregion
|
|
82
90
|
//#region src/utils.ts
|
|
83
91
|
/**
|
|
92
|
+
* Canonical provider-ref for the feishu app secret. Both
|
|
93
|
+
* `feishu_default_account` (multi-agent path) and `feishu_channel`
|
|
94
|
+
* (single-agent path) use this as the source-of-truth `appSecret`
|
|
95
|
+
* value when repairing.
|
|
96
|
+
*/
|
|
97
|
+
const DEFAULT_FEISHU_APP_SECRET = {
|
|
98
|
+
source: "file",
|
|
99
|
+
provider: "miaoda-secret-provider",
|
|
100
|
+
id: "/channels_feishu_app_secret"
|
|
101
|
+
};
|
|
102
|
+
/**
|
|
84
103
|
* Navigate nested object by keys, returning the value if it's a non-array object,
|
|
85
104
|
* or undefined otherwise.
|
|
86
105
|
*/
|
|
@@ -127,6 +146,14 @@ function isValidJWT(token) {
|
|
|
127
146
|
return false;
|
|
128
147
|
}
|
|
129
148
|
}
|
|
149
|
+
/**
|
|
150
|
+
* Return `val` as a plain-object record (non-null, non-array object), or
|
|
151
|
+
* `undefined` otherwise. Cheaper than `getNestedMap` when the value is already
|
|
152
|
+
* at hand.
|
|
153
|
+
*/
|
|
154
|
+
function asRecord(val) {
|
|
155
|
+
return val != null && typeof val === "object" && !Array.isArray(val) ? val : void 0;
|
|
156
|
+
}
|
|
130
157
|
/** Set a deeply nested value, creating intermediate objects as needed. */
|
|
131
158
|
function setNestedValue(obj, keys, value) {
|
|
132
159
|
let current = obj;
|
|
@@ -137,6 +164,25 @@ function setNestedValue(obj, keys, value) {
|
|
|
137
164
|
}
|
|
138
165
|
current[keys[keys.length - 1]] = value;
|
|
139
166
|
}
|
|
167
|
+
/**
|
|
168
|
+
* Locate the "main" agent in `agents.list`. Preference order:
|
|
169
|
+
* 1. Explicit `default: true` entry.
|
|
170
|
+
* 2. Entry with `id === 'main'` (project naming convention).
|
|
171
|
+
* 3. First entry in the list (positional fallback).
|
|
172
|
+
* Returns `undefined` when `agents.list` is missing or empty.
|
|
173
|
+
*/
|
|
174
|
+
function findMainAgent(config) {
|
|
175
|
+
const agents = getNestedMap(config, "agents");
|
|
176
|
+
if (!agents) return void 0;
|
|
177
|
+
const list = agents.list;
|
|
178
|
+
if (!Array.isArray(list) || list.length === 0) return void 0;
|
|
179
|
+
const isObj = (a) => a != null && typeof a === "object" && !Array.isArray(a);
|
|
180
|
+
const explicit = list.find((a) => isObj(a) && a.default === true);
|
|
181
|
+
if (explicit) return explicit;
|
|
182
|
+
const namedMain = list.find((a) => isObj(a) && a.id === "main");
|
|
183
|
+
if (namedMain) return namedMain;
|
|
184
|
+
return isObj(list[0]) ? list[0] : void 0;
|
|
185
|
+
}
|
|
140
186
|
/** Analyze which miaoda providers the config references. */
|
|
141
187
|
function analyzeProviderDeps(config) {
|
|
142
188
|
const deps = {
|
|
@@ -192,82 +238,14 @@ function fileExists(filePath) {
|
|
|
192
238
|
return node_fs.default.existsSync(filePath);
|
|
193
239
|
}
|
|
194
240
|
/** Execute a shell command, return stdout. Throws on failure. */
|
|
195
|
-
function shell(cmd, timeoutMs =
|
|
241
|
+
function shell(cmd, timeoutMs = 6e4) {
|
|
196
242
|
return (0, node_child_process.execSync)(cmd, {
|
|
197
243
|
encoding: "utf-8",
|
|
198
244
|
timeout: timeoutMs
|
|
199
245
|
}).trim();
|
|
200
246
|
}
|
|
201
|
-
/**
|
|
202
|
-
* Run a long-lived command and kill it only when it goes idle (no stdout/stderr
|
|
203
|
-
* for `idleMs`) or hits the total deadline `maxTotalMs`. Suited to commands like
|
|
204
|
-
* `npm install` that may be slow but keep streaming progress — we don't want to
|
|
205
|
-
* kill them just because they exceeded some fixed wall-clock budget, only when
|
|
206
|
-
* they actually stall (e.g. blocked on a cache / lockfile held by another npm).
|
|
207
|
-
*/
|
|
208
|
-
function shellUntilIdle(cmd, opts = {}) {
|
|
209
|
-
const idleMs = opts.idleMs ?? 9e4;
|
|
210
|
-
const maxTotalMs = opts.maxTotalMs ?? 9e5;
|
|
211
|
-
return new Promise((resolve, reject) => {
|
|
212
|
-
const proc = (0, node_child_process.spawn)("bash", ["-c", cmd], { stdio: [
|
|
213
|
-
"ignore",
|
|
214
|
-
"pipe",
|
|
215
|
-
"pipe"
|
|
216
|
-
] });
|
|
217
|
-
const startedAt = Date.now();
|
|
218
|
-
let lastOutputAt = Date.now();
|
|
219
|
-
let killed = null;
|
|
220
|
-
const bump = () => {
|
|
221
|
-
lastOutputAt = Date.now();
|
|
222
|
-
};
|
|
223
|
-
proc.stdout.on("data", bump);
|
|
224
|
-
proc.stderr.on("data", bump);
|
|
225
|
-
const timer = setInterval(() => {
|
|
226
|
-
const now = Date.now();
|
|
227
|
-
if (now - lastOutputAt > idleMs) {
|
|
228
|
-
killed = `idle > ${idleMs}ms`;
|
|
229
|
-
proc.kill("SIGKILL");
|
|
230
|
-
} else if (now - startedAt > maxTotalMs) {
|
|
231
|
-
killed = `total > ${maxTotalMs}ms`;
|
|
232
|
-
proc.kill("SIGKILL");
|
|
233
|
-
}
|
|
234
|
-
}, 5e3);
|
|
235
|
-
proc.on("exit", (code, signal) => {
|
|
236
|
-
clearInterval(timer);
|
|
237
|
-
if (killed) return reject(/* @__PURE__ */ new Error(`shellUntilIdle killed (${killed}): ${cmd}`));
|
|
238
|
-
if (code === 0) return resolve();
|
|
239
|
-
reject(/* @__PURE__ */ new Error(`shellUntilIdle exit code=${code} signal=${signal}: ${cmd}`));
|
|
240
|
-
});
|
|
241
|
-
proc.on("error", (err) => {
|
|
242
|
-
clearInterval(timer);
|
|
243
|
-
reject(err);
|
|
244
|
-
});
|
|
245
|
-
});
|
|
246
|
-
}
|
|
247
|
-
/**
|
|
248
|
-
* Retry a Promise-returning runner until it succeeds or the overall deadline
|
|
249
|
-
* passes. Designed to pair with `shellUntilIdle` for npm-style operations that
|
|
250
|
-
* may need several attempts if a concurrent npm is holding a lock.
|
|
251
|
-
*/
|
|
252
|
-
async function retryUntilDeadline(run, opts) {
|
|
253
|
-
const betweenAttemptsMs = opts.betweenAttemptsMs ?? 1e4;
|
|
254
|
-
const deadline = Date.now() + opts.maxTotalMs;
|
|
255
|
-
let lastErr;
|
|
256
|
-
let attempt = 0;
|
|
257
|
-
while (Date.now() < deadline) {
|
|
258
|
-
attempt++;
|
|
259
|
-
try {
|
|
260
|
-
return await run();
|
|
261
|
-
} catch (e) {
|
|
262
|
-
lastErr = e;
|
|
263
|
-
if (Date.now() + betweenAttemptsMs >= deadline) break;
|
|
264
|
-
await new Promise((r) => setTimeout(r, betweenAttemptsMs));
|
|
265
|
-
}
|
|
266
|
-
}
|
|
267
|
-
throw new Error(`retryUntilDeadline gave up after ${attempt} attempt(s): ${lastErr?.message ?? lastErr}`);
|
|
268
|
-
}
|
|
269
247
|
//#endregion
|
|
270
|
-
//#region \0@oxc-project+runtime@0.
|
|
248
|
+
//#region \0@oxc-project+runtime@0.115.0/helpers/decorate.js
|
|
271
249
|
function __decorate(decorators, target, key, desc) {
|
|
272
250
|
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
273
251
|
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
@@ -313,11 +291,15 @@ function findBackupFiles(configPath) {
|
|
|
313
291
|
}
|
|
314
292
|
/**
|
|
315
293
|
* Among backup files, find the one with the highest numeric suffix.
|
|
316
|
-
*
|
|
294
|
+
* Supports all three naming styles used by the current backup code and its
|
|
295
|
+
* older variants:
|
|
296
|
+
* `.bak` → n = 0 (legacy single-slot backup)
|
|
297
|
+
* `.bakN` → n = N (older style, dot-less)
|
|
298
|
+
* `.bak.N` → n = N (current style written by reset Step 1)
|
|
317
299
|
*/
|
|
318
300
|
function findHighestBackup(backupFiles) {
|
|
319
301
|
if (backupFiles.length === 0) return null;
|
|
320
|
-
const bakRegex = /\.bak(\d*)$/;
|
|
302
|
+
const bakRegex = /\.bak\.?(\d*)$/;
|
|
321
303
|
let best = null;
|
|
322
304
|
for (const f of backupFiles) {
|
|
323
305
|
const match = bakRegex.exec(f);
|
|
@@ -332,7 +314,7 @@ function findHighestBackup(backupFiles) {
|
|
|
332
314
|
}
|
|
333
315
|
let ConfigFileBackupRule = class ConfigFileBackupRule extends DiagnoseRule {
|
|
334
316
|
validate(ctx) {
|
|
335
|
-
const configPath = ctx
|
|
317
|
+
const { configPath } = ctx;
|
|
336
318
|
if (!configPath) return {
|
|
337
319
|
pass: false,
|
|
338
320
|
message: "configPath not provided"
|
|
@@ -345,7 +327,7 @@ let ConfigFileBackupRule = class ConfigFileBackupRule extends DiagnoseRule {
|
|
|
345
327
|
return { pass: true };
|
|
346
328
|
}
|
|
347
329
|
repair(ctx) {
|
|
348
|
-
const configPath = ctx
|
|
330
|
+
const { configPath } = ctx;
|
|
349
331
|
if (!configPath) return;
|
|
350
332
|
const best = findHighestBackup(findBackupFiles(configPath));
|
|
351
333
|
if (!best) return;
|
|
@@ -374,7 +356,7 @@ function hasBackupFiles(configPath) {
|
|
|
374
356
|
}
|
|
375
357
|
let ConfigFileMissingRule = class ConfigFileMissingRule extends DiagnoseRule {
|
|
376
358
|
validate(ctx) {
|
|
377
|
-
const configPath = ctx
|
|
359
|
+
const { configPath } = ctx;
|
|
378
360
|
if (!configPath) return {
|
|
379
361
|
pass: false,
|
|
380
362
|
message: "configPath not provided"
|
|
@@ -396,7 +378,7 @@ ConfigFileMissingRule = __decorate([Rule({
|
|
|
396
378
|
//#region src/rules/config-syntax.ts
|
|
397
379
|
let ConfigSyntaxRule = class ConfigSyntaxRule extends DiagnoseRule {
|
|
398
380
|
validate(ctx) {
|
|
399
|
-
const configPath = ctx
|
|
381
|
+
const { configPath } = ctx;
|
|
400
382
|
if (!fileExists(configPath)) return { pass: true };
|
|
401
383
|
try {
|
|
402
384
|
loadJSON5().parse(readFile(configPath));
|
|
@@ -415,6 +397,74 @@ ConfigSyntaxRule = __decorate([Rule({
|
|
|
415
397
|
repairMode: "ai"
|
|
416
398
|
})], ConfigSyntaxRule);
|
|
417
399
|
//#endregion
|
|
400
|
+
//#region src/rules/template-vars-unreplaced.ts
|
|
401
|
+
/**
|
|
402
|
+
* Placeholder format used by miaoda-openclaw-template and Go-side templateVars,
|
|
403
|
+
* e.g. `$$__FEISHU_APP_ID__`. Double underscores on both sides act as a natural
|
|
404
|
+
* boundary so split-join replacement can't accidentally overlap between keys.
|
|
405
|
+
*/
|
|
406
|
+
const PLACEHOLDER_RE = /\$\$__[A-Z0-9_]+__/g;
|
|
407
|
+
let TemplateVarsUnreplacedRule = class TemplateVarsUnreplacedRule extends DiagnoseRule {
|
|
408
|
+
validate(ctx) {
|
|
409
|
+
const found = /* @__PURE__ */ new Set();
|
|
410
|
+
collectPlaceholders(ctx.config, found);
|
|
411
|
+
if (found.size === 0) return { pass: true };
|
|
412
|
+
return {
|
|
413
|
+
pass: false,
|
|
414
|
+
message: "存在未替换的模板占位符: " + [...found].sort().join(", ")
|
|
415
|
+
};
|
|
416
|
+
}
|
|
417
|
+
repair(ctx) {
|
|
418
|
+
const map = ctx.templateVars;
|
|
419
|
+
if (!map || Object.keys(map).length === 0) return;
|
|
420
|
+
replaceInPlace(ctx.config, Object.entries(map));
|
|
421
|
+
}
|
|
422
|
+
};
|
|
423
|
+
TemplateVarsUnreplacedRule = __decorate([Rule({
|
|
424
|
+
key: "template_vars_unreplaced",
|
|
425
|
+
dependsOn: ["config_syntax_check"],
|
|
426
|
+
repairMode: "standard"
|
|
427
|
+
})], TemplateVarsUnreplacedRule);
|
|
428
|
+
function collectPlaceholders(value, found) {
|
|
429
|
+
if (typeof value === "string") {
|
|
430
|
+
const matches = value.match(PLACEHOLDER_RE);
|
|
431
|
+
if (matches) for (const m of matches) found.add(m);
|
|
432
|
+
return;
|
|
433
|
+
}
|
|
434
|
+
if (Array.isArray(value)) {
|
|
435
|
+
for (const v of value) collectPlaceholders(v, found);
|
|
436
|
+
return;
|
|
437
|
+
}
|
|
438
|
+
if (value && typeof value === "object") for (const v of Object.values(value)) collectPlaceholders(v, found);
|
|
439
|
+
}
|
|
440
|
+
function replaceInPlace(value, entries) {
|
|
441
|
+
if (Array.isArray(value)) {
|
|
442
|
+
for (let i = 0; i < value.length; i++) {
|
|
443
|
+
const el = value[i];
|
|
444
|
+
if (typeof el === "string") value[i] = applyVars(el, entries);
|
|
445
|
+
else replaceInPlace(el, entries);
|
|
446
|
+
}
|
|
447
|
+
return;
|
|
448
|
+
}
|
|
449
|
+
if (value && typeof value === "object") {
|
|
450
|
+
const obj = value;
|
|
451
|
+
for (const key of Object.keys(obj)) {
|
|
452
|
+
const v = obj[key];
|
|
453
|
+
if (typeof v === "string") obj[key] = applyVars(v, entries);
|
|
454
|
+
else replaceInPlace(v, entries);
|
|
455
|
+
}
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
/** Split-join replacement — matches the algorithm in reset.ts:120 and avoids regex-escaping `$$`. */
|
|
459
|
+
function applyVars(str, entries) {
|
|
460
|
+
let out = str;
|
|
461
|
+
for (const [placeholder, value] of entries) {
|
|
462
|
+
if (!value) continue;
|
|
463
|
+
if (out.includes(placeholder)) out = out.split(placeholder).join(value);
|
|
464
|
+
}
|
|
465
|
+
return out;
|
|
466
|
+
}
|
|
467
|
+
//#endregion
|
|
418
468
|
//#region src/rules/model-provider.ts
|
|
419
469
|
var _ModelProviderRule;
|
|
420
470
|
let ModelProviderRule = class ModelProviderRule extends DiagnoseRule {
|
|
@@ -598,16 +648,11 @@ SecretProviderRule = _SecretProviderRule = __decorate([Rule({
|
|
|
598
648
|
})], SecretProviderRule);
|
|
599
649
|
//#endregion
|
|
600
650
|
//#region src/rules/feishu-channel.ts
|
|
601
|
-
|
|
651
|
+
/**
|
|
652
|
+
* Owns `channels.feishu.enabled` + single-agent top-level appId/appSecret.
|
|
653
|
+
* Multi-agent shape (`accounts` present) belongs to `feishu_default_account`.
|
|
654
|
+
*/
|
|
602
655
|
let FeishuChannelRule = class FeishuChannelRule extends DiagnoseRule {
|
|
603
|
-
static {
|
|
604
|
-
_FeishuChannelRule = this;
|
|
605
|
-
}
|
|
606
|
-
static DEFAULT_APP_SECRET = {
|
|
607
|
-
source: "file",
|
|
608
|
-
provider: "miaoda-secret-provider",
|
|
609
|
-
id: "/channels_feishu_app_secret"
|
|
610
|
-
};
|
|
611
656
|
validate(ctx) {
|
|
612
657
|
const feishu = getNestedMap(ctx.config, "channels", "feishu");
|
|
613
658
|
if (!feishu) return {
|
|
@@ -618,16 +663,16 @@ let FeishuChannelRule = class FeishuChannelRule extends DiagnoseRule {
|
|
|
618
663
|
pass: false,
|
|
619
664
|
message: "channels.feishu.enabled mismatch: got " + feishu.enabled + ", expected true"
|
|
620
665
|
};
|
|
666
|
+
if (asRecord(feishu.accounts)) return { pass: true };
|
|
621
667
|
if (feishu.appId !== ctx.vars.feishuAppID) return {
|
|
622
668
|
pass: false,
|
|
623
|
-
message:
|
|
669
|
+
message: `channels.feishu.appId mismatch: got ${feishu.appId}, expected ${ctx.vars.feishuAppID}`
|
|
624
670
|
};
|
|
625
|
-
const expectedSecret = _FeishuChannelRule.DEFAULT_APP_SECRET;
|
|
626
671
|
const secret = feishu.appSecret;
|
|
627
672
|
if (typeof secret === "object" && secret !== null && !Array.isArray(secret)) {
|
|
628
|
-
if (!matchMap(secret,
|
|
673
|
+
if (!matchMap(secret, DEFAULT_FEISHU_APP_SECRET)) return {
|
|
629
674
|
pass: false,
|
|
630
|
-
message:
|
|
675
|
+
message: `channels.feishu.appSecret object mismatch: got ${JSON.stringify(secret)}`
|
|
631
676
|
};
|
|
632
677
|
} else if (typeof secret === "string") {
|
|
633
678
|
if (secret !== ctx.vars.feishuAppSecret) return {
|
|
@@ -646,6 +691,7 @@ let FeishuChannelRule = class FeishuChannelRule extends DiagnoseRule {
|
|
|
646
691
|
"feishu",
|
|
647
692
|
"enabled"
|
|
648
693
|
], true);
|
|
694
|
+
if (asRecord(getNestedMap(ctx.config, "channels", "feishu").accounts)) return;
|
|
649
695
|
setNestedValue(ctx.config, [
|
|
650
696
|
"channels",
|
|
651
697
|
"feishu",
|
|
@@ -655,15 +701,167 @@ let FeishuChannelRule = class FeishuChannelRule extends DiagnoseRule {
|
|
|
655
701
|
"channels",
|
|
656
702
|
"feishu",
|
|
657
703
|
"appSecret"
|
|
658
|
-
],
|
|
704
|
+
], DEFAULT_FEISHU_APP_SECRET);
|
|
659
705
|
}
|
|
660
706
|
};
|
|
661
|
-
FeishuChannelRule =
|
|
707
|
+
FeishuChannelRule = __decorate([Rule({
|
|
662
708
|
key: "feishu_channel",
|
|
663
|
-
dependsOn: ["config_syntax_check"],
|
|
709
|
+
dependsOn: ["config_syntax_check", "feishu_default_account"],
|
|
664
710
|
repairMode: "standard"
|
|
665
711
|
})], FeishuChannelRule);
|
|
666
712
|
//#endregion
|
|
713
|
+
//#region src/rules/feishu-default-account.ts
|
|
714
|
+
/**
|
|
715
|
+
* Owner of the multi-agent feishu channel shape: migrates legacy v1/v2
|
|
716
|
+
* (top-level appId + defaultAccount/default) into v3 (`bot-<appId>` account),
|
|
717
|
+
* detects + fixes drift on the main bot's appId/appSecret. Single-agent
|
|
718
|
+
* configs (no `accounts`) are out of scope — handled by `feishu_channel`.
|
|
719
|
+
*/
|
|
720
|
+
let FeishuDefaultAccountRule = class FeishuDefaultAccountRule extends DiagnoseRule {
|
|
721
|
+
validate(ctx) {
|
|
722
|
+
const feishu = getNestedMap(ctx.config, "channels", "feishu");
|
|
723
|
+
if (!feishu) return { pass: true };
|
|
724
|
+
const accounts = asRecord(feishu.accounts);
|
|
725
|
+
if (!accounts) return { pass: true };
|
|
726
|
+
const topAppId = feishu.appId;
|
|
727
|
+
if (typeof topAppId === "string" && topAppId !== "") return {
|
|
728
|
+
pass: false,
|
|
729
|
+
message: "channels.feishu has legacy shape; needs migration to accounts.bot-<appId>"
|
|
730
|
+
};
|
|
731
|
+
const mainBot = findMainBotAccount(ctx.config, accounts);
|
|
732
|
+
if (mainBot) {
|
|
733
|
+
const expectedAppId = ctx.vars?.feishuAppID;
|
|
734
|
+
if (typeof expectedAppId === "string" && expectedAppId !== "" && mainBot.acc.appId !== expectedAppId) return {
|
|
735
|
+
pass: false,
|
|
736
|
+
message: `accounts.${mainBot.accountId}.appId mismatch: got ${mainBot.acc.appId}, expected ${expectedAppId}`
|
|
737
|
+
};
|
|
738
|
+
if (!secretMatchesCanonical(mainBot.acc.appSecret)) return {
|
|
739
|
+
pass: false,
|
|
740
|
+
message: `accounts.${mainBot.accountId}.appSecret drift`
|
|
741
|
+
};
|
|
742
|
+
}
|
|
743
|
+
return { pass: true };
|
|
744
|
+
}
|
|
745
|
+
repair(ctx) {
|
|
746
|
+
const feishu = getNestedMap(ctx.config, "channels", "feishu");
|
|
747
|
+
if (!feishu) return;
|
|
748
|
+
const accounts = asRecord(feishu.accounts);
|
|
749
|
+
if (!accounts) return;
|
|
750
|
+
const topAppId = feishu.appId;
|
|
751
|
+
if (typeof topAppId === "string" && topAppId !== "") {
|
|
752
|
+
this.migrate(ctx, feishu, accounts, topAppId);
|
|
753
|
+
return;
|
|
754
|
+
}
|
|
755
|
+
this.enforceMainBotValues(ctx, accounts);
|
|
756
|
+
}
|
|
757
|
+
migrate(ctx, feishu, accounts, topAppId) {
|
|
758
|
+
const effectiveAppId = nonEmpty(ctx.vars?.feishuAppID) ?? topAppId;
|
|
759
|
+
const expectedKey = `bot-${effectiveAppId}`;
|
|
760
|
+
const existingBot = asRecord(accounts[expectedKey]) ?? {};
|
|
761
|
+
const defaultAccount = asRecord(accounts.defaultAccount) ?? {};
|
|
762
|
+
const defaultAcc = asRecord(accounts.default) ?? {};
|
|
763
|
+
const merged = {
|
|
764
|
+
...existingBot,
|
|
765
|
+
...defaultAccount,
|
|
766
|
+
...defaultAcc,
|
|
767
|
+
appId: effectiveAppId,
|
|
768
|
+
appSecret: DEFAULT_FEISHU_APP_SECRET
|
|
769
|
+
};
|
|
770
|
+
const chatID = ctx.vars?.teamChatID;
|
|
771
|
+
if (typeof chatID === "string" && chatID !== "") {
|
|
772
|
+
const existingGroups = asRecord(merged.groups) ?? {};
|
|
773
|
+
if (!(chatID in existingGroups)) merged.groups = {
|
|
774
|
+
...existingGroups,
|
|
775
|
+
[chatID]: { requireMention: false }
|
|
776
|
+
};
|
|
777
|
+
}
|
|
778
|
+
accounts[expectedKey] = merged;
|
|
779
|
+
delete accounts.defaultAccount;
|
|
780
|
+
delete accounts.default;
|
|
781
|
+
delete feishu.appId;
|
|
782
|
+
delete feishu.appSecret;
|
|
783
|
+
this.rewireBindings(ctx.config, expectedKey);
|
|
784
|
+
}
|
|
785
|
+
enforceMainBotValues(ctx, accounts) {
|
|
786
|
+
const mainBot = findMainBotAccount(ctx.config, accounts);
|
|
787
|
+
if (!mainBot) return;
|
|
788
|
+
const acc = accounts[mainBot.accountId];
|
|
789
|
+
const expectedAppId = nonEmpty(ctx.vars?.feishuAppID);
|
|
790
|
+
if (expectedAppId !== void 0 && acc.appId !== expectedAppId) acc.appId = expectedAppId;
|
|
791
|
+
if (!secretMatchesCanonical(acc.appSecret)) acc.appSecret = DEFAULT_FEISHU_APP_SECRET;
|
|
792
|
+
}
|
|
793
|
+
rewireBindings(config, expectedKey) {
|
|
794
|
+
if (!Array.isArray(config.bindings)) return;
|
|
795
|
+
const bindings = config.bindings;
|
|
796
|
+
for (const b of bindings) {
|
|
797
|
+
const match = asRecord(asRecord(b)?.match);
|
|
798
|
+
if (match && match.channel === "feishu" && (match.accountId === "defaultAccount" || match.accountId === "default")) match.accountId = expectedKey;
|
|
799
|
+
}
|
|
800
|
+
const seen = /* @__PURE__ */ new Set();
|
|
801
|
+
const deduped = [];
|
|
802
|
+
for (const b of bindings) {
|
|
803
|
+
const rec = asRecord(b);
|
|
804
|
+
if (!rec) continue;
|
|
805
|
+
const match = asRecord(rec.match);
|
|
806
|
+
const key = JSON.stringify([
|
|
807
|
+
rec.agentId,
|
|
808
|
+
match?.channel,
|
|
809
|
+
match?.accountId
|
|
810
|
+
]);
|
|
811
|
+
if (seen.has(key)) continue;
|
|
812
|
+
seen.add(key);
|
|
813
|
+
deduped.push(rec);
|
|
814
|
+
}
|
|
815
|
+
const mainId = findMainAgent(config)?.id;
|
|
816
|
+
if (typeof mainId === "string" && mainId !== "") {
|
|
817
|
+
if (!deduped.some((b) => {
|
|
818
|
+
const m = asRecord(b.match);
|
|
819
|
+
return b.agentId === mainId && m?.channel === "feishu" && m?.accountId === expectedKey;
|
|
820
|
+
})) deduped.push({
|
|
821
|
+
type: "route",
|
|
822
|
+
agentId: mainId,
|
|
823
|
+
match: {
|
|
824
|
+
channel: "feishu",
|
|
825
|
+
accountId: expectedKey
|
|
826
|
+
}
|
|
827
|
+
});
|
|
828
|
+
}
|
|
829
|
+
config.bindings = deduped;
|
|
830
|
+
}
|
|
831
|
+
};
|
|
832
|
+
FeishuDefaultAccountRule = __decorate([Rule({
|
|
833
|
+
key: "feishu_default_account",
|
|
834
|
+
dependsOn: ["config_syntax_check"],
|
|
835
|
+
repairMode: "standard"
|
|
836
|
+
})], FeishuDefaultAccountRule);
|
|
837
|
+
function nonEmpty(v) {
|
|
838
|
+
return typeof v === "string" && v !== "" ? v : void 0;
|
|
839
|
+
}
|
|
840
|
+
function findMainBotAccount(config, accounts) {
|
|
841
|
+
const mainId = findMainAgent(config)?.id;
|
|
842
|
+
if (typeof mainId !== "string" || mainId === "") return void 0;
|
|
843
|
+
const bindings = Array.isArray(config.bindings) ? config.bindings : [];
|
|
844
|
+
for (const b of bindings) {
|
|
845
|
+
const rec = asRecord(b);
|
|
846
|
+
const match = asRecord(rec?.match);
|
|
847
|
+
if (rec && match && rec.agentId === mainId && match.channel === "feishu") {
|
|
848
|
+
const accountId = match.accountId;
|
|
849
|
+
if (typeof accountId === "string") {
|
|
850
|
+
const acc = asRecord(accounts[accountId]);
|
|
851
|
+
if (acc) return {
|
|
852
|
+
accountId,
|
|
853
|
+
acc
|
|
854
|
+
};
|
|
855
|
+
}
|
|
856
|
+
}
|
|
857
|
+
}
|
|
858
|
+
}
|
|
859
|
+
/** Bot accounts must carry the canonical provider-ref `appSecret`. */
|
|
860
|
+
function secretMatchesCanonical(secret) {
|
|
861
|
+
if (typeof secret !== "object" || secret === null || Array.isArray(secret)) return false;
|
|
862
|
+
return matchMap(secret, DEFAULT_FEISHU_APP_SECRET);
|
|
863
|
+
}
|
|
864
|
+
//#endregion
|
|
667
865
|
//#region src/rules/gateway.ts
|
|
668
866
|
var _GatewayRule;
|
|
669
867
|
let GatewayRule = class GatewayRule extends DiagnoseRule {
|
|
@@ -671,12 +869,17 @@ let GatewayRule = class GatewayRule extends DiagnoseRule {
|
|
|
671
869
|
_GatewayRule = this;
|
|
672
870
|
}
|
|
673
871
|
static DEFAULT_PORT = 18789;
|
|
872
|
+
static DEFAULT_MODE = "local";
|
|
873
|
+
static DEFAULT_BIND = "loopback";
|
|
674
874
|
static DEFAULT_AUTH_MODE = "token";
|
|
675
875
|
static DEFAULT_AUTH_TOKEN = {
|
|
676
876
|
source: "file",
|
|
677
877
|
provider: "miaoda-secret-provider",
|
|
678
878
|
id: "/gateway_auth_token"
|
|
679
879
|
};
|
|
880
|
+
/** Required entries in gateway.trustedProxies. Repair appends any missing
|
|
881
|
+
* entries while preserving caller-added extras (no overwrite). */
|
|
882
|
+
static DEFAULT_TRUSTED_PROXIES = ["::1", "127.0.0.1"];
|
|
680
883
|
validate(ctx) {
|
|
681
884
|
const gateway = ctx.config.gateway;
|
|
682
885
|
if (!gateway || typeof gateway !== "object") return {
|
|
@@ -688,6 +891,14 @@ let GatewayRule = class GatewayRule extends DiagnoseRule {
|
|
|
688
891
|
pass: false,
|
|
689
892
|
message: "gateway.port mismatch: got " + gw.port + ", expected " + _GatewayRule.DEFAULT_PORT
|
|
690
893
|
};
|
|
894
|
+
if (gw.mode !== _GatewayRule.DEFAULT_MODE) return {
|
|
895
|
+
pass: false,
|
|
896
|
+
message: "gateway.mode mismatch: got " + gw.mode + ", expected " + _GatewayRule.DEFAULT_MODE
|
|
897
|
+
};
|
|
898
|
+
if (gw.bind !== _GatewayRule.DEFAULT_BIND) return {
|
|
899
|
+
pass: false,
|
|
900
|
+
message: "gateway.bind mismatch: got " + gw.bind + ", expected " + _GatewayRule.DEFAULT_BIND
|
|
901
|
+
};
|
|
691
902
|
const auth = gw.auth;
|
|
692
903
|
if (!auth || typeof auth !== "object") return {
|
|
693
904
|
pass: false,
|
|
@@ -722,10 +933,22 @@ let GatewayRule = class GatewayRule extends DiagnoseRule {
|
|
|
722
933
|
pass: false,
|
|
723
934
|
message: "gateway.controlUi.dangerouslyDisableDeviceAuth must be true, got " + controlUi.dangerouslyDisableDeviceAuth
|
|
724
935
|
};
|
|
936
|
+
const proxies = gw.trustedProxies;
|
|
937
|
+
if (!Array.isArray(proxies)) return {
|
|
938
|
+
pass: false,
|
|
939
|
+
message: "gateway.trustedProxies missing or not an array"
|
|
940
|
+
};
|
|
941
|
+
const missing = _GatewayRule.DEFAULT_TRUSTED_PROXIES.filter((p) => !proxies.includes(p));
|
|
942
|
+
if (missing.length > 0) return {
|
|
943
|
+
pass: false,
|
|
944
|
+
message: "gateway.trustedProxies missing: " + JSON.stringify(missing)
|
|
945
|
+
};
|
|
725
946
|
return { pass: true };
|
|
726
947
|
}
|
|
727
948
|
repair(ctx) {
|
|
728
949
|
setNestedValue(ctx.config, ["gateway", "port"], _GatewayRule.DEFAULT_PORT);
|
|
950
|
+
setNestedValue(ctx.config, ["gateway", "mode"], _GatewayRule.DEFAULT_MODE);
|
|
951
|
+
setNestedValue(ctx.config, ["gateway", "bind"], _GatewayRule.DEFAULT_BIND);
|
|
729
952
|
setNestedValue(ctx.config, [
|
|
730
953
|
"gateway",
|
|
731
954
|
"auth",
|
|
@@ -741,6 +964,14 @@ let GatewayRule = class GatewayRule extends DiagnoseRule {
|
|
|
741
964
|
"controlUi",
|
|
742
965
|
"dangerouslyDisableDeviceAuth"
|
|
743
966
|
], true);
|
|
967
|
+
const gw = ctx.config.gateway ?? {};
|
|
968
|
+
const current = Array.isArray(gw.trustedProxies) ? gw.trustedProxies.slice() : [];
|
|
969
|
+
const seen = new Set(current.map((v) => String(v)));
|
|
970
|
+
for (const p of _GatewayRule.DEFAULT_TRUSTED_PROXIES) if (!seen.has(p)) {
|
|
971
|
+
current.push(p);
|
|
972
|
+
seen.add(p);
|
|
973
|
+
}
|
|
974
|
+
setNestedValue(ctx.config, ["gateway", "trustedProxies"], current);
|
|
744
975
|
}
|
|
745
976
|
};
|
|
746
977
|
GatewayRule = _GatewayRule = __decorate([Rule({
|
|
@@ -754,7 +985,9 @@ let AllowedOriginsRule = class AllowedOriginsRule extends DiagnoseRule {
|
|
|
754
985
|
validate(ctx) {
|
|
755
986
|
const expected = getExpectedOrigins(ctx.vars);
|
|
756
987
|
if (expected.length === 0) return { pass: true };
|
|
757
|
-
const
|
|
988
|
+
const current = getCurrentOrigins(ctx.config);
|
|
989
|
+
if (hasWildcard(current)) return { pass: true };
|
|
990
|
+
const missing = findMissing(current, expected);
|
|
758
991
|
if (missing.length === 0) return { pass: true };
|
|
759
992
|
return {
|
|
760
993
|
pass: false,
|
|
@@ -764,6 +997,7 @@ let AllowedOriginsRule = class AllowedOriginsRule extends DiagnoseRule {
|
|
|
764
997
|
repair(ctx) {
|
|
765
998
|
const expected = getExpectedOrigins(ctx.vars);
|
|
766
999
|
const current = getCurrentOrigins(ctx.config);
|
|
1000
|
+
if (hasWildcard(current)) return;
|
|
767
1001
|
const missing = findMissing(current, expected);
|
|
768
1002
|
if (missing.length > 0) {
|
|
769
1003
|
const seen = /* @__PURE__ */ new Set();
|
|
@@ -803,6 +1037,10 @@ function findMissing(current, expected) {
|
|
|
803
1037
|
const set = new Set(current);
|
|
804
1038
|
return expected.filter((o) => !set.has(o));
|
|
805
1039
|
}
|
|
1040
|
+
/** Exact "*" entry means allow-all; pattern globs like "https://*.example.com" don't count. */
|
|
1041
|
+
function hasWildcard(origins) {
|
|
1042
|
+
return origins.includes("*");
|
|
1043
|
+
}
|
|
806
1044
|
//#endregion
|
|
807
1045
|
//#region src/rules/jwt-token.ts
|
|
808
1046
|
let JwtTokenRule = class JwtTokenRule extends DiagnoseRule {
|
|
@@ -920,6 +1158,217 @@ SecretsRule = __decorate([Rule({
|
|
|
920
1158
|
skipWhen: ({ hasMiaoda, deps }) => !hasMiaoda || !deps.usesMiaodaSecretProvider
|
|
921
1159
|
})], SecretsRule);
|
|
922
1160
|
//#endregion
|
|
1161
|
+
//#region src/rules/cleanup-install-backup-dirs.ts
|
|
1162
|
+
const DIR_PREFIX = ".openclaw-install-";
|
|
1163
|
+
function resolveExtensionsDir(configPath) {
|
|
1164
|
+
return node_path.default.join(node_path.default.dirname(configPath), "extensions");
|
|
1165
|
+
}
|
|
1166
|
+
function findLeftoverDirs(extensionsDir) {
|
|
1167
|
+
if (!fileExists(extensionsDir)) return [];
|
|
1168
|
+
let entries;
|
|
1169
|
+
try {
|
|
1170
|
+
entries = node_fs.default.readdirSync(extensionsDir, { withFileTypes: true });
|
|
1171
|
+
} catch {
|
|
1172
|
+
return [];
|
|
1173
|
+
}
|
|
1174
|
+
return entries.filter((e) => e.isDirectory() && e.name.startsWith(DIR_PREFIX)).map((e) => node_path.default.join(extensionsDir, e.name));
|
|
1175
|
+
}
|
|
1176
|
+
let CleanupInstallBackupDirsRule = class CleanupInstallBackupDirsRule extends DiagnoseRule {
|
|
1177
|
+
validate(ctx) {
|
|
1178
|
+
const { configPath } = ctx;
|
|
1179
|
+
if (!configPath) return { pass: true };
|
|
1180
|
+
const dirs = findLeftoverDirs(resolveExtensionsDir(configPath));
|
|
1181
|
+
if (dirs.length === 0) return { pass: true };
|
|
1182
|
+
return {
|
|
1183
|
+
pass: false,
|
|
1184
|
+
message: `extensions 目录下发现 ${dirs.length} 个 ${DIR_PREFIX}* 脏目录需要清理`
|
|
1185
|
+
};
|
|
1186
|
+
}
|
|
1187
|
+
repair(ctx) {
|
|
1188
|
+
const { configPath } = ctx;
|
|
1189
|
+
if (!configPath) return;
|
|
1190
|
+
const dirs = findLeftoverDirs(resolveExtensionsDir(configPath));
|
|
1191
|
+
const failures = [];
|
|
1192
|
+
for (const dir of dirs) try {
|
|
1193
|
+
node_fs.default.rmSync(dir, {
|
|
1194
|
+
recursive: true,
|
|
1195
|
+
force: true
|
|
1196
|
+
});
|
|
1197
|
+
} catch (e) {
|
|
1198
|
+
failures.push(`${node_path.default.basename(dir)}: ${e.message}`);
|
|
1199
|
+
}
|
|
1200
|
+
if (dirs.length > 0 && failures.length === dirs.length) throw new Error(`cleanup_install_backup_dirs: 全部清理失败: ${failures.join("; ")}`);
|
|
1201
|
+
}
|
|
1202
|
+
};
|
|
1203
|
+
CleanupInstallBackupDirsRule = __decorate([Rule({
|
|
1204
|
+
key: "cleanup_install_backup_dirs",
|
|
1205
|
+
repairMode: "standard"
|
|
1206
|
+
})], CleanupInstallBackupDirsRule);
|
|
1207
|
+
//#endregion
|
|
1208
|
+
//#region src/rules/miaoda-official-plugins-install-spec-unlock.ts
|
|
1209
|
+
/**
|
|
1210
|
+
* Official miaoda-side plugins that must track manifest — version-locked specs
|
|
1211
|
+
* here block upgrades. Third-party / user-installed plugins are intentionally
|
|
1212
|
+
* out of scope (users may pin them deliberately).
|
|
1213
|
+
*/
|
|
1214
|
+
const OFFICIAL_PLUGIN_NAMES = new Set([
|
|
1215
|
+
"openclaw-extension-miaoda",
|
|
1216
|
+
"openclaw-extension-miaoda-coding",
|
|
1217
|
+
"openclaw-guardian-plugin",
|
|
1218
|
+
"openclaw-mem0-plugin",
|
|
1219
|
+
"openclaw-lark"
|
|
1220
|
+
]);
|
|
1221
|
+
const LOCKED_NPM_SPEC = /^(@[a-z0-9][\w.-]*\/)?[a-z0-9][\w.-]*@[^@/:#\s]+$/i;
|
|
1222
|
+
function isLockedNpmSpec(spec) {
|
|
1223
|
+
return typeof spec === "string" && LOCKED_NPM_SPEC.test(spec);
|
|
1224
|
+
}
|
|
1225
|
+
function unlockSpec(spec) {
|
|
1226
|
+
const slash = spec.indexOf("/");
|
|
1227
|
+
const cut = slash === -1 ? spec.indexOf("@") : spec.indexOf("@", slash + 1);
|
|
1228
|
+
return spec.slice(0, cut);
|
|
1229
|
+
}
|
|
1230
|
+
/** Yield `[key, lockedSpec]` for every official-plugin install whose `spec` is locked. */
|
|
1231
|
+
function* iterLockedOfficialInstalls(config) {
|
|
1232
|
+
const installs = getNestedMap(config, "plugins", "installs");
|
|
1233
|
+
if (!installs) return;
|
|
1234
|
+
for (const [key, entry] of Object.entries(installs)) {
|
|
1235
|
+
if (!OFFICIAL_PLUGIN_NAMES.has(key)) continue;
|
|
1236
|
+
const spec = asRecord(entry)?.spec;
|
|
1237
|
+
if (isLockedNpmSpec(spec)) yield [key, spec];
|
|
1238
|
+
}
|
|
1239
|
+
}
|
|
1240
|
+
let MiaodaOfficialPluginsInstallSpecUnlockRule = class MiaodaOfficialPluginsInstallSpecUnlockRule extends DiagnoseRule {
|
|
1241
|
+
validate(ctx) {
|
|
1242
|
+
const locked = [...iterLockedOfficialInstalls(ctx.config)].map(([k]) => k);
|
|
1243
|
+
if (locked.length === 0) return { pass: true };
|
|
1244
|
+
return {
|
|
1245
|
+
pass: false,
|
|
1246
|
+
message: "plugins.installs 中官方插件存在锁版本的 spec: " + locked.sort().join(",")
|
|
1247
|
+
};
|
|
1248
|
+
}
|
|
1249
|
+
repair(ctx) {
|
|
1250
|
+
for (const [key, spec] of iterLockedOfficialInstalls(ctx.config)) setNestedValue(ctx.config, [
|
|
1251
|
+
"plugins",
|
|
1252
|
+
"installs",
|
|
1253
|
+
key,
|
|
1254
|
+
"spec"
|
|
1255
|
+
], unlockSpec(spec));
|
|
1256
|
+
}
|
|
1257
|
+
};
|
|
1258
|
+
MiaodaOfficialPluginsInstallSpecUnlockRule = __decorate([Rule({
|
|
1259
|
+
key: "miaoda_official_plugins_install_spec_unlock",
|
|
1260
|
+
dependsOn: ["config_syntax_check"],
|
|
1261
|
+
repairMode: "standard"
|
|
1262
|
+
})], MiaodaOfficialPluginsInstallSpecUnlockRule);
|
|
1263
|
+
//#endregion
|
|
1264
|
+
//#region src/rules/old-miaoda-plugins-cleanup.ts
|
|
1265
|
+
const NEW_MIAODA = "openclaw-extension-miaoda";
|
|
1266
|
+
const OLD_PLUGIN_NAMES = Object.freeze([
|
|
1267
|
+
"openclaw-feishu-greeting",
|
|
1268
|
+
"openclaw-miaoda-keepalive",
|
|
1269
|
+
"feishu-greeting",
|
|
1270
|
+
"miaoda-keepalive"
|
|
1271
|
+
]);
|
|
1272
|
+
function getPluginMaps(config) {
|
|
1273
|
+
const rawAllow = asRecord(config.plugins)?.allow;
|
|
1274
|
+
return {
|
|
1275
|
+
entries: getNestedMap(config, "plugins", "entries"),
|
|
1276
|
+
installs: getNestedMap(config, "plugins", "installs"),
|
|
1277
|
+
allow: Array.isArray(rawAllow) ? rawAllow : void 0
|
|
1278
|
+
};
|
|
1279
|
+
}
|
|
1280
|
+
function getExtensionsDir(configPath) {
|
|
1281
|
+
return node_path.default.join(node_path.default.dirname(configPath), "extensions");
|
|
1282
|
+
}
|
|
1283
|
+
function hasNewMiaoda({ entries, installs, allow }) {
|
|
1284
|
+
return asRecord(entries?.[NEW_MIAODA]) != null || asRecord(installs?.[NEW_MIAODA]) != null || (allow?.includes(NEW_MIAODA) ?? false);
|
|
1285
|
+
}
|
|
1286
|
+
function findResiduals({ entries, installs, allow }, extensionsDir) {
|
|
1287
|
+
return OLD_PLUGIN_NAMES.filter((name) => entries?.[name] != null || installs?.[name] != null || (allow?.includes(name) ?? false) || node_fs.default.existsSync(node_path.default.join(extensionsDir, name)));
|
|
1288
|
+
}
|
|
1289
|
+
let OldMiaodaPluginsCleanupRule = class OldMiaodaPluginsCleanupRule extends DiagnoseRule {
|
|
1290
|
+
validate(ctx) {
|
|
1291
|
+
const maps = getPluginMaps(ctx.config);
|
|
1292
|
+
if (!hasNewMiaoda(maps)) return { pass: true };
|
|
1293
|
+
const residuals = findResiduals(maps, getExtensionsDir(ctx.configPath));
|
|
1294
|
+
if (residuals.length === 0) return { pass: true };
|
|
1295
|
+
return {
|
|
1296
|
+
pass: false,
|
|
1297
|
+
message: "旧 miaoda 插件残留: " + residuals.sort().join(",")
|
|
1298
|
+
};
|
|
1299
|
+
}
|
|
1300
|
+
repair(ctx) {
|
|
1301
|
+
const maps = getPluginMaps(ctx.config);
|
|
1302
|
+
if (!hasNewMiaoda(maps)) return;
|
|
1303
|
+
const extensionsDir = getExtensionsDir(ctx.configPath);
|
|
1304
|
+
const { entries, installs, allow } = maps;
|
|
1305
|
+
const oldSet = new Set(OLD_PLUGIN_NAMES);
|
|
1306
|
+
if (allow) for (let i = allow.length - 1; i >= 0; i--) {
|
|
1307
|
+
const v = allow[i];
|
|
1308
|
+
if (typeof v === "string" && oldSet.has(v)) allow.splice(i, 1);
|
|
1309
|
+
}
|
|
1310
|
+
for (const name of OLD_PLUGIN_NAMES) {
|
|
1311
|
+
if (entries && name in entries) delete entries[name];
|
|
1312
|
+
if (installs && name in installs) delete installs[name];
|
|
1313
|
+
const target = node_path.default.join(extensionsDir, name);
|
|
1314
|
+
const rel = node_path.default.relative(extensionsDir, target);
|
|
1315
|
+
if (!rel || rel.startsWith("..") || node_path.default.isAbsolute(rel)) continue;
|
|
1316
|
+
try {
|
|
1317
|
+
node_fs.default.rmSync(target, {
|
|
1318
|
+
recursive: true,
|
|
1319
|
+
force: true
|
|
1320
|
+
});
|
|
1321
|
+
} catch (e) {
|
|
1322
|
+
console.error(`[old_miaoda_plugins_cleanup] rmSync ${target} failed: ${e.message}`);
|
|
1323
|
+
}
|
|
1324
|
+
}
|
|
1325
|
+
}
|
|
1326
|
+
};
|
|
1327
|
+
OldMiaodaPluginsCleanupRule = __decorate([Rule({
|
|
1328
|
+
key: "old_miaoda_plugins_cleanup",
|
|
1329
|
+
dependsOn: ["config_syntax_check"],
|
|
1330
|
+
repairMode: "standard"
|
|
1331
|
+
})], OldMiaodaPluginsCleanupRule);
|
|
1332
|
+
//#endregion
|
|
1333
|
+
//#region src/rules/lark-plugin-allow.ts
|
|
1334
|
+
const LARK_PLUGIN = "openclaw-lark";
|
|
1335
|
+
const LARK_PLUGIN_NAMES = [LARK_PLUGIN, "feishu-openclaw-plugin"];
|
|
1336
|
+
let LarkPluginAllowRule = class LarkPluginAllowRule extends DiagnoseRule {
|
|
1337
|
+
validate(ctx) {
|
|
1338
|
+
const allow = getAllow(ctx.config);
|
|
1339
|
+
if (LARK_PLUGIN_NAMES.some((name) => allow.includes(name))) return { pass: true };
|
|
1340
|
+
return {
|
|
1341
|
+
pass: false,
|
|
1342
|
+
message: `plugins.allow 缺少飞书插件 (expected one of: ${LARK_PLUGIN_NAMES.join(", ")})`
|
|
1343
|
+
};
|
|
1344
|
+
}
|
|
1345
|
+
repair(ctx) {
|
|
1346
|
+
if (ctx.config.plugins == null || typeof ctx.config.plugins !== "object" || Array.isArray(ctx.config.plugins)) {
|
|
1347
|
+
ctx.config.plugins = { allow: [LARK_PLUGIN] };
|
|
1348
|
+
return;
|
|
1349
|
+
}
|
|
1350
|
+
const pluginsMap = ctx.config.plugins;
|
|
1351
|
+
const rawAllow = pluginsMap.allow;
|
|
1352
|
+
const original = Array.isArray(rawAllow) ? rawAllow : [];
|
|
1353
|
+
const stringAllow = original.filter((e) => typeof e === "string");
|
|
1354
|
+
if (LARK_PLUGIN_NAMES.some((name) => stringAllow.includes(name))) return;
|
|
1355
|
+
original.push(LARK_PLUGIN);
|
|
1356
|
+
pluginsMap.allow = original;
|
|
1357
|
+
}
|
|
1358
|
+
};
|
|
1359
|
+
LarkPluginAllowRule = __decorate([Rule({
|
|
1360
|
+
key: "lark_plugin_allow",
|
|
1361
|
+
dependsOn: ["config_syntax_check"],
|
|
1362
|
+
repairMode: "standard"
|
|
1363
|
+
})], LarkPluginAllowRule);
|
|
1364
|
+
function getAllow(config) {
|
|
1365
|
+
const plugins = config.plugins;
|
|
1366
|
+
if (plugins == null || typeof plugins !== "object" || Array.isArray(plugins)) return [];
|
|
1367
|
+
const allow = plugins.allow;
|
|
1368
|
+
if (!Array.isArray(allow)) return [];
|
|
1369
|
+
return allow.filter((e) => typeof e === "string");
|
|
1370
|
+
}
|
|
1371
|
+
//#endregion
|
|
923
1372
|
//#region src/check.ts
|
|
924
1373
|
function runCheck(input) {
|
|
925
1374
|
const result = { failedRules: {
|
|
@@ -932,12 +1381,14 @@ function runCheck(input) {
|
|
|
932
1381
|
const failedKeys = /* @__PURE__ */ new Set();
|
|
933
1382
|
let configParsed = false;
|
|
934
1383
|
let ctx = {
|
|
935
|
-
config: {
|
|
1384
|
+
config: {},
|
|
1385
|
+
configPath: input.configPath,
|
|
936
1386
|
vars: input.vars,
|
|
937
1387
|
providerDeps: {
|
|
938
1388
|
usesMiaodaProvider: false,
|
|
939
1389
|
usesMiaodaSecretProvider: false
|
|
940
|
-
}
|
|
1390
|
+
},
|
|
1391
|
+
templateVars: input.templateVars
|
|
941
1392
|
};
|
|
942
1393
|
for (const rule of rules) {
|
|
943
1394
|
const meta = rule.meta;
|
|
@@ -948,8 +1399,10 @@ function runCheck(input) {
|
|
|
948
1399
|
const deps = analyzeProviderDeps(parsed);
|
|
949
1400
|
ctx = {
|
|
950
1401
|
config: parsed,
|
|
1402
|
+
configPath: input.configPath,
|
|
951
1403
|
vars: input.vars,
|
|
952
|
-
providerDeps: deps
|
|
1404
|
+
providerDeps: deps,
|
|
1405
|
+
templateVars: input.templateVars
|
|
953
1406
|
};
|
|
954
1407
|
configParsed = true;
|
|
955
1408
|
} catch {
|
|
@@ -1001,12 +1454,14 @@ function runRepair(input) {
|
|
|
1001
1454
|
if (rule.meta.repairMode !== "standard") continue;
|
|
1002
1455
|
if (rule.meta.dependsOn?.includes("config_syntax_check")) continue;
|
|
1003
1456
|
rule.repair({
|
|
1004
|
-
config: {
|
|
1457
|
+
config: {},
|
|
1458
|
+
configPath: input.configPath,
|
|
1005
1459
|
vars: input.vars,
|
|
1006
1460
|
providerDeps: {
|
|
1007
1461
|
usesMiaodaProvider: false,
|
|
1008
1462
|
usesMiaodaSecretProvider: false
|
|
1009
|
-
}
|
|
1463
|
+
},
|
|
1464
|
+
templateVars: input.templateVars
|
|
1010
1465
|
});
|
|
1011
1466
|
}
|
|
1012
1467
|
const JSON5 = loadJSON5();
|
|
@@ -1022,8 +1477,10 @@ function runRepair(input) {
|
|
|
1022
1477
|
const deps = analyzeProviderDeps(config);
|
|
1023
1478
|
const ctx = {
|
|
1024
1479
|
config,
|
|
1480
|
+
configPath: input.configPath,
|
|
1025
1481
|
vars: input.vars,
|
|
1026
|
-
providerDeps: deps
|
|
1482
|
+
providerDeps: deps,
|
|
1483
|
+
templateVars: input.templateVars
|
|
1027
1484
|
};
|
|
1028
1485
|
let configDirty = false;
|
|
1029
1486
|
for (const rule of rules) {
|
|
@@ -1053,44 +1510,223 @@ function runRepair(input) {
|
|
|
1053
1510
|
}
|
|
1054
1511
|
}
|
|
1055
1512
|
//#endregion
|
|
1056
|
-
//#region src/
|
|
1057
|
-
|
|
1058
|
-
|
|
1513
|
+
//#region src/paths.ts
|
|
1514
|
+
/**
|
|
1515
|
+
* Central directory for all ephemeral diagnose/reset artifacts: task status
|
|
1516
|
+
* files (`reset-<taskId>.json`) and human-readable step logs
|
|
1517
|
+
* (`reset-<taskId>.log`). Having everything under one dir makes debugging a
|
|
1518
|
+
* stuck reset much easier — `ls /tmp/openclaw-diagnose/` shows every recent
|
|
1519
|
+
* run, and each run's log is right next to its state.
|
|
1520
|
+
*/
|
|
1521
|
+
const DIAGNOSE_DIR = "/tmp/openclaw-diagnose";
|
|
1522
|
+
function resetResultFile(taskId) {
|
|
1523
|
+
return `${DIAGNOSE_DIR}/reset-${taskId}.json`;
|
|
1524
|
+
}
|
|
1525
|
+
function resetLogFile(taskId) {
|
|
1526
|
+
return `${DIAGNOSE_DIR}/reset-${taskId}.log`;
|
|
1527
|
+
}
|
|
1528
|
+
/** Sandbox workspace root where openclaw config + agent state lives. */
|
|
1529
|
+
const WORKSPACE_DIR = "/home/gem/workspace/agent";
|
|
1530
|
+
/** File containing the provider key used by the openclaw miaoda provider. */
|
|
1531
|
+
const PROVIDER_FILE_PATH = "/home/gem/workspace/.force/openclaw/miaoda-provider-key";
|
|
1532
|
+
/** File containing the miaoda openclaw secrets JSON. */
|
|
1533
|
+
const SECRETS_FILE_PATH = "/home/gem/workspace/.force/openclaw/miaoda-openclaw-secrets.json";
|
|
1534
|
+
/** Absolute path to the openclaw config JSON. */
|
|
1535
|
+
const CONFIG_PATH = `${WORKSPACE_DIR}/openclaw.json`;
|
|
1536
|
+
//#endregion
|
|
1537
|
+
//#region src/logger.ts
|
|
1538
|
+
/**
|
|
1539
|
+
* Shared CLI log file. Every log line the CLI emits — whether through
|
|
1540
|
+
* `console.error` (rules, helpers, errors) or through the per-task
|
|
1541
|
+
* `makeLogger` (reset worker) — is tee'd here so operators have a single
|
|
1542
|
+
* file to tail when diagnosing a sandbox.
|
|
1543
|
+
*
|
|
1544
|
+
* `/tmp` is ephemeral on sandbox restart; we rely on that for rotation
|
|
1545
|
+
* (no size-based rotation implemented).
|
|
1546
|
+
*/
|
|
1547
|
+
const CLI_LOG_FILE = "/tmp/openclaw-diagnose/cli.log";
|
|
1548
|
+
/** Append one line to the shared cli.log. Swallows any fs error —
|
|
1549
|
+
* logging must never break the business flow. */
|
|
1550
|
+
function appendCliLog(line) {
|
|
1551
|
+
try {
|
|
1552
|
+
const dir = node_path.default.dirname(CLI_LOG_FILE);
|
|
1553
|
+
if (!node_fs.default.existsSync(dir)) node_fs.default.mkdirSync(dir, { recursive: true });
|
|
1554
|
+
node_fs.default.appendFileSync(CLI_LOG_FILE, line);
|
|
1555
|
+
} catch {}
|
|
1556
|
+
}
|
|
1557
|
+
let stderrMirrorInstalled = false;
|
|
1558
|
+
/**
|
|
1559
|
+
* Install a process-wide `console.error` interceptor that mirrors each
|
|
1560
|
+
* line to BOTH the original stderr AND cli.log. Call once at CLI entry
|
|
1561
|
+
* before any subcommand dispatch; idempotent.
|
|
1562
|
+
*
|
|
1563
|
+
* Why console.error and not console.log: the CLI's stdout carries the
|
|
1564
|
+
* structured JSON result protocol consumed by sandbox_console and other
|
|
1565
|
+
* callers — any log line on stdout would corrupt JSON parsing. Rules,
|
|
1566
|
+
* helpers, and error paths therefore must route debug output through
|
|
1567
|
+
* console.error (stderr).
|
|
1568
|
+
*/
|
|
1569
|
+
function installStderrMirror() {
|
|
1570
|
+
if (stderrMirrorInstalled) return;
|
|
1571
|
+
stderrMirrorInstalled = true;
|
|
1572
|
+
const original = console.error.bind(console);
|
|
1573
|
+
console.error = (...args) => {
|
|
1574
|
+
original(...args);
|
|
1575
|
+
const body = args.map((a) => typeof a === "string" ? a : safeStringify(a)).join(" ");
|
|
1576
|
+
appendCliLog(`[${(/* @__PURE__ */ new Date()).toISOString()}] ${body}\n`);
|
|
1577
|
+
};
|
|
1578
|
+
}
|
|
1579
|
+
function safeStringify(v) {
|
|
1059
1580
|
try {
|
|
1060
|
-
|
|
1581
|
+
return JSON.stringify(v);
|
|
1582
|
+
} catch {
|
|
1583
|
+
return String(v);
|
|
1584
|
+
}
|
|
1585
|
+
}
|
|
1586
|
+
function makeLogger(logFile) {
|
|
1587
|
+
try {
|
|
1588
|
+
const dir = node_path.default.dirname(logFile);
|
|
1589
|
+
if (!node_fs.default.existsSync(dir)) node_fs.default.mkdirSync(dir, { recursive: true });
|
|
1590
|
+
} catch {}
|
|
1591
|
+
return (msg) => {
|
|
1592
|
+
const line = `[${(/* @__PURE__ */ new Date()).toISOString()}] ${msg}\n`;
|
|
1061
1593
|
try {
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1594
|
+
node_fs.default.appendFileSync(logFile, line);
|
|
1595
|
+
} catch {}
|
|
1596
|
+
appendCliLog(line);
|
|
1597
|
+
};
|
|
1598
|
+
}
|
|
1599
|
+
//#endregion
|
|
1600
|
+
//#region src/fs-utils.ts
|
|
1601
|
+
/**
|
|
1602
|
+
* Rename src → dst, falling back to `mv` (which handles cross-device copy)
|
|
1603
|
+
* when the kernel returns EXDEV.
|
|
1604
|
+
*
|
|
1605
|
+
* Sandbox filesystems can put sibling paths on different "devices" from
|
|
1606
|
+
* rename(2)'s point of view: bind mounts, overlayfs copy-up, and
|
|
1607
|
+
* mount-point children inside a single directory all trip EXDEV. Seen in
|
|
1608
|
+
* production when reset's atomic swap did
|
|
1609
|
+
* /home/gem/.npm-global/lib/node_modules/openclaw → openclaw.bak
|
|
1610
|
+
* and the openclaw subdir was a bind-mounted volume.
|
|
1611
|
+
*
|
|
1612
|
+
* Behavior:
|
|
1613
|
+
* - Happy path hits rename(2) — atomic, single syscall, microseconds.
|
|
1614
|
+
* - EXDEV path shells out to `mv`, which does rename() then copy+unlink
|
|
1615
|
+
* on failure. Non-atomic but correct; callers already have rollback
|
|
1616
|
+
* logic (install-openclaw restores from .bak) so loss of atomicity
|
|
1617
|
+
* only matters if the process dies mid-copy, and that's survivable.
|
|
1618
|
+
* - Any other error (ENOENT, EACCES, EBUSY...) rethrows as-is so callers
|
|
1619
|
+
* see the real problem instead of a misleading `mv` fallback failure.
|
|
1620
|
+
*/
|
|
1621
|
+
function moveSafe(src, dst) {
|
|
1622
|
+
try {
|
|
1623
|
+
node_fs.default.renameSync(src, dst);
|
|
1089
1624
|
} catch (e) {
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1625
|
+
if (e?.code !== "EXDEV") throw e;
|
|
1626
|
+
execCaptureErr(`mv ${shellQuote(src)} ${shellQuote(dst)}`);
|
|
1627
|
+
}
|
|
1628
|
+
}
|
|
1629
|
+
/**
|
|
1630
|
+
* Run a shell command, re-throwing with stderr attached on failure.
|
|
1631
|
+
*
|
|
1632
|
+
* Node's `execSync(..., { stdio: 'ignore' })` swallows stderr entirely —
|
|
1633
|
+
* callers only see "Command failed: <cmd>" with no hint of the real error
|
|
1634
|
+
* (ENOSPC, EROFS, "unrecognized option", etc.). Production debugging on
|
|
1635
|
+
* sandboxed boxes is painful without the underlying message, so we pipe
|
|
1636
|
+
* stderr, capture it, and embed it in the thrown Error. stdout stays
|
|
1637
|
+
* suppressed because the commands we run here (tar/mv) are silent on
|
|
1638
|
+
* success.
|
|
1639
|
+
*/
|
|
1640
|
+
function execCaptureErr(cmd) {
|
|
1641
|
+
try {
|
|
1642
|
+
(0, node_child_process.execSync)(cmd, { stdio: [
|
|
1643
|
+
"ignore",
|
|
1644
|
+
"ignore",
|
|
1645
|
+
"pipe"
|
|
1646
|
+
] });
|
|
1647
|
+
} catch (e) {
|
|
1648
|
+
const stderr = e?.stderr;
|
|
1649
|
+
const stderrStr = (typeof stderr === "string" ? stderr : stderr?.toString("utf8") ?? "").trim();
|
|
1650
|
+
const base = e?.message ?? "command failed";
|
|
1651
|
+
throw new Error(stderrStr ? `${base}\nstderr: ${stderrStr}` : base);
|
|
1652
|
+
}
|
|
1653
|
+
}
|
|
1654
|
+
/** POSIX single-quote shell escape. Paths with embedded quotes are rare but
|
|
1655
|
+
* the token-file path conventions in sandboxes don't guarantee cleanliness. */
|
|
1656
|
+
function shellQuote(s) {
|
|
1657
|
+
return `'${s.replace(/'/g, `'\\''`)}'`;
|
|
1658
|
+
}
|
|
1659
|
+
/**
|
|
1660
|
+
* Extract an npm-packed gzipped tarball.
|
|
1661
|
+
*
|
|
1662
|
+
* ## The problem this works around
|
|
1663
|
+
*
|
|
1664
|
+
* Some tarballs (openclaw's among them — they're not packed by vanilla
|
|
1665
|
+
* `npm pack`) include relative symlinks inside nested .bin/ dirs whose
|
|
1666
|
+
* targets contain `..`, e.g.
|
|
1667
|
+
* node_modules/<pkg>/node_modules/.bin/foo -> ../foo/bin/cli.js
|
|
1668
|
+
*
|
|
1669
|
+
* GNU tar classifies any symlink target with `..` or a leading `/` as
|
|
1670
|
+
* "dangerous" and defers its extraction to a post-files pass, while also
|
|
1671
|
+
* needing a post-files pass to restore directory permissions/mtimes. The
|
|
1672
|
+
* two passes race: the deferred-symlink handling mutates parent-dir inodes,
|
|
1673
|
+
* then the directory stat-restore pass does `fstatat()` and the recorded
|
|
1674
|
+
* inode doesn't match, firing
|
|
1675
|
+
*
|
|
1676
|
+
* tar: <path>: Directory renamed before its status could be extracted
|
|
1677
|
+
*
|
|
1678
|
+
* from `apply_nonancestor_delayed_set_stat()` in extract.c. This is an
|
|
1679
|
+
* `ERROR` (hard-fail, exit 2) — the `--warning=no-rename-directory`
|
|
1680
|
+
* keyword controls a different, incremental-archive code path and does
|
|
1681
|
+
* NOT silence this. Reference: Paul Eggert, bug-tar 2004-04:
|
|
1682
|
+
* https://lists.gnu.org/archive/html/bug-tar/2004-04/msg00021.html
|
|
1683
|
+
*
|
|
1684
|
+
* ## The fix
|
|
1685
|
+
*
|
|
1686
|
+
* Pass `--absolute-names` (aka `-P`). Per GNU tar docs, this disables the
|
|
1687
|
+
* "normalize dangerous names" logic — including the deferred-symlink pass
|
|
1688
|
+
* that's racing us. Also stops stripping leading `/`, but our tarballs
|
|
1689
|
+
* only contain relative (`./node_modules/...`) paths so there's nothing
|
|
1690
|
+
* to strip. Safe because:
|
|
1691
|
+
* - The tarball is sha512-verified upstream (downloadWithCache)
|
|
1692
|
+
* - All entry paths are relative, no absolute-path escape risk
|
|
1693
|
+
* - All dangerous symlink targets resolve within the extracted tree
|
|
1694
|
+
*
|
|
1695
|
+
* ## Belt-and-suspenders
|
|
1696
|
+
*
|
|
1697
|
+
* If some tar variant still emits the error despite -P, we fall through
|
|
1698
|
+
* to checking the stderr pattern: if every error line is the benign
|
|
1699
|
+
* "Directory renamed …" text (no real failures like ENOSPC/EACCES/gzip
|
|
1700
|
+
* CRC/etc.), swallow exit 2. Callers MUST still verify extraction
|
|
1701
|
+
* (e.g. `fs.existsSync(path.join(dest, 'package.json'))`) — tar's
|
|
1702
|
+
* `skip_this_one = 1` after the error means some dirs missed their
|
|
1703
|
+
* mtime/mode restore, but content was written.
|
|
1704
|
+
*/
|
|
1705
|
+
function extractTarballTolerant(tarball, destDir, opts = {}) {
|
|
1706
|
+
const strip = opts.stripComponents ?? 0;
|
|
1707
|
+
const stripFlag = strip > 0 ? ` --strip-components=${strip}` : "";
|
|
1708
|
+
const cmd = `tar -xzf ${shellQuote(tarball)} -C ${shellQuote(destDir)}${stripFlag} -P`;
|
|
1709
|
+
try {
|
|
1710
|
+
execCaptureErr(cmd);
|
|
1711
|
+
return;
|
|
1712
|
+
} catch (e) {
|
|
1713
|
+
const msg = e?.message ?? "";
|
|
1714
|
+
const hasFalseAlarm = msg.includes("Directory renamed before its status could be extracted");
|
|
1715
|
+
const hasFatal = [
|
|
1716
|
+
/Cannot open/i,
|
|
1717
|
+
/Cannot mkdir/i,
|
|
1718
|
+
/Cannot create/i,
|
|
1719
|
+
/No space left on device/i,
|
|
1720
|
+
/Disk quota exceeded/i,
|
|
1721
|
+
/Permission denied/i,
|
|
1722
|
+
/Read-only file system/i,
|
|
1723
|
+
/unrecognized option/i,
|
|
1724
|
+
/gzip:/i,
|
|
1725
|
+
/Unexpected EOF/i,
|
|
1726
|
+
/Invalid argument/i
|
|
1727
|
+
].some((r) => r.test(msg));
|
|
1728
|
+
if (!hasFalseAlarm || hasFatal) throw e;
|
|
1729
|
+
console.error(`[tar] -P did not suppress "Directory renamed" on ${tarball}; tolerating (content must be verified by caller)`);
|
|
1094
1730
|
}
|
|
1095
1731
|
}
|
|
1096
1732
|
//#endregion
|
|
@@ -1102,7 +1738,9 @@ function runBackup(input) {
|
|
|
1102
1738
|
*/
|
|
1103
1739
|
function startAsyncReset(ctxBase64) {
|
|
1104
1740
|
const taskId = (0, node_crypto.randomUUID)();
|
|
1105
|
-
const resultFile =
|
|
1741
|
+
const resultFile = resetResultFile(taskId);
|
|
1742
|
+
const log = makeLogger(resetLogFile(taskId));
|
|
1743
|
+
log(`=== startAsyncReset spawning worker for taskId=${taskId} ===`);
|
|
1106
1744
|
const initial = {
|
|
1107
1745
|
status: "running",
|
|
1108
1746
|
step: 0,
|
|
@@ -1114,7 +1752,7 @@ function startAsyncReset(ctxBase64) {
|
|
|
1114
1752
|
const dir = node_path.default.dirname(resultFile);
|
|
1115
1753
|
if (!node_fs.default.existsSync(dir)) node_fs.default.mkdirSync(dir, { recursive: true });
|
|
1116
1754
|
node_fs.default.writeFileSync(tmpPath, JSON.stringify(initial), "utf-8");
|
|
1117
|
-
|
|
1755
|
+
moveSafe(tmpPath, resultFile);
|
|
1118
1756
|
const child = (0, node_child_process.spawn)(process.execPath, [
|
|
1119
1757
|
process.argv[1],
|
|
1120
1758
|
"reset",
|
|
@@ -1126,6 +1764,7 @@ function startAsyncReset(ctxBase64) {
|
|
|
1126
1764
|
stdio: "ignore"
|
|
1127
1765
|
});
|
|
1128
1766
|
child.on("error", (err) => {
|
|
1767
|
+
log(`FATAL worker failed to start: ${err.message}`);
|
|
1129
1768
|
const failResult = {
|
|
1130
1769
|
status: "failed",
|
|
1131
1770
|
step: 0,
|
|
@@ -1137,39 +1776,285 @@ function startAsyncReset(ctxBase64) {
|
|
|
1137
1776
|
};
|
|
1138
1777
|
const errTmpPath = resultFile + ".tmp";
|
|
1139
1778
|
node_fs.default.writeFileSync(errTmpPath, JSON.stringify(failResult));
|
|
1140
|
-
|
|
1779
|
+
moveSafe(errTmpPath, resultFile);
|
|
1141
1780
|
});
|
|
1142
1781
|
child.unref();
|
|
1782
|
+
log(`spawned worker pid=${child.pid}`);
|
|
1143
1783
|
return { taskId };
|
|
1144
1784
|
}
|
|
1145
1785
|
//#endregion
|
|
1786
|
+
//#region src/oss/fetchManifest.ts
|
|
1787
|
+
const MANIFEST_PREFIX = "builtin/manifests/openclaw/recommended/";
|
|
1788
|
+
const MANIFEST_SUFFIX = ".json";
|
|
1789
|
+
async function fetchManifest(ossFileMap, tag) {
|
|
1790
|
+
const key = `${MANIFEST_PREFIX}${tag}${MANIFEST_SUFFIX}`;
|
|
1791
|
+
const url = ossFileMap[key];
|
|
1792
|
+
if (!url) {
|
|
1793
|
+
const available = Object.keys(ossFileMap).filter((k) => k.startsWith(MANIFEST_PREFIX) && k.endsWith(MANIFEST_SUFFIX)).map((k) => k.slice(39, -5));
|
|
1794
|
+
const availStr = available.length ? available.join(", ") : "(none)";
|
|
1795
|
+
throw new Error(`manifest signed URL missing for tag "${tag}" (key ${key}). Available tags in ossFileMap: ${availStr}. Either pass an available tag or update the studio_server TCC openclaw_upgrade_config supported_versions.`);
|
|
1796
|
+
}
|
|
1797
|
+
const res = await fetch(url);
|
|
1798
|
+
if (!res.ok) throw new Error(`fetch manifest failed: HTTP ${res.status} ${res.statusText}`);
|
|
1799
|
+
return await res.json();
|
|
1800
|
+
}
|
|
1801
|
+
async function downloadWithCache(pkg, ossFileMap, opts = {}) {
|
|
1802
|
+
const cacheRoot = opts.cacheRoot ?? "/tmp/openclaw-diagnose/resources";
|
|
1803
|
+
const shortHash = pkg.shasum.slice(0, 16);
|
|
1804
|
+
const destDir = node_path.default.join(cacheRoot, shortHash);
|
|
1805
|
+
const destFile = node_path.default.join(destDir, node_path.default.posix.basename(pkg.ossKey));
|
|
1806
|
+
node_fs.default.mkdirSync(destDir, { recursive: true });
|
|
1807
|
+
if (node_fs.default.existsSync(destFile)) return destFile;
|
|
1808
|
+
const url = ossFileMap[pkg.ossKey];
|
|
1809
|
+
if (!url) throw new Error(`signed URL missing for ${pkg.ossKey}`);
|
|
1810
|
+
if (!pkg.integrity.startsWith("sha512-")) throw new Error(`unsupported integrity format: ${pkg.integrity}`);
|
|
1811
|
+
const expected = pkg.integrity.slice(7);
|
|
1812
|
+
const tmpFile = node_path.default.join(destDir, `.tmp.${process.pid}.${node_crypto.default.randomBytes(4).toString("hex")}`);
|
|
1813
|
+
try {
|
|
1814
|
+
const res = await fetch(url);
|
|
1815
|
+
if (!res.ok) throw new Error(`download failed: HTTP ${res.status}`);
|
|
1816
|
+
if (!res.body) throw new Error(`download failed: empty body for ${pkg.ossKey}`);
|
|
1817
|
+
const hasher = node_crypto.default.createHash("sha512");
|
|
1818
|
+
const source = node_stream.Readable.fromWeb(res.body);
|
|
1819
|
+
async function* teeAndHash(src) {
|
|
1820
|
+
for await (const chunk of src) {
|
|
1821
|
+
hasher.update(chunk);
|
|
1822
|
+
yield chunk;
|
|
1823
|
+
}
|
|
1824
|
+
}
|
|
1825
|
+
await (0, node_stream_promises.pipeline)(source, teeAndHash, node_fs.default.createWriteStream(tmpFile));
|
|
1826
|
+
const actual = hasher.digest("base64");
|
|
1827
|
+
if (actual !== expected) {
|
|
1828
|
+
const envBypass = process.env.OPENCLAW_DEBUG_SKIP_INTEGRITY === "1";
|
|
1829
|
+
if (opts.skipIntegrity || envBypass) {
|
|
1830
|
+
const sourceLabel = opts.skipIntegrity ? "skipIntegrity=true" : "OPENCLAW_DEBUG_SKIP_INTEGRITY=1";
|
|
1831
|
+
console.error(`⚠ [downloadWithCache] INTEGRITY BYPASS for ${pkg.ossKey}: expected ${expected.slice(0, 12)}… got ${actual.slice(0, 12)}… — ${sourceLabel}. DO NOT use this flag in production.`);
|
|
1832
|
+
} else throw new Error(`integrity mismatch for ${pkg.ossKey}: expected ${expected} got ${actual}`);
|
|
1833
|
+
}
|
|
1834
|
+
moveSafe(tmpFile, destFile);
|
|
1835
|
+
return destFile;
|
|
1836
|
+
} catch (e) {
|
|
1837
|
+
try {
|
|
1838
|
+
node_fs.default.unlinkSync(tmpFile);
|
|
1839
|
+
} catch {}
|
|
1840
|
+
throw e;
|
|
1841
|
+
}
|
|
1842
|
+
}
|
|
1843
|
+
async function installOpenclaw(openclawTag, ossFileMap, opts = {}) {
|
|
1844
|
+
const homeBase = opts.homeBase ?? "/home/gem";
|
|
1845
|
+
const t0 = Date.now();
|
|
1846
|
+
const pkg = (await fetchManifest(ossFileMap, openclawTag)).packages.find((p) => p.role === "cli" && p.name === "openclaw");
|
|
1847
|
+
if (!pkg) throw new Error("install-openclaw: role=cli,name=openclaw not found in manifest");
|
|
1848
|
+
const targetDir = opts.targetDir ?? node_path.default.join(homeBase, pkg.installPath);
|
|
1849
|
+
const bakDir = targetDir + ".bak";
|
|
1850
|
+
const newDir = targetDir + ".new";
|
|
1851
|
+
const tarball = await downloadWithCache(pkg, ossFileMap, opts);
|
|
1852
|
+
console.error(`[install-openclaw] tag=${openclawTag} shasum=${pkg.shasum.slice(0, 12)}...`);
|
|
1853
|
+
if (node_fs.default.existsSync(newDir)) node_fs.default.rmSync(newDir, {
|
|
1854
|
+
recursive: true,
|
|
1855
|
+
force: true
|
|
1856
|
+
});
|
|
1857
|
+
if (node_fs.default.existsSync(bakDir)) node_fs.default.rmSync(bakDir, {
|
|
1858
|
+
recursive: true,
|
|
1859
|
+
force: true
|
|
1860
|
+
});
|
|
1861
|
+
node_fs.default.mkdirSync(node_path.default.dirname(targetDir), { recursive: true });
|
|
1862
|
+
const tmpStage = node_fs.default.mkdtempSync(node_path.default.join(opts.tmpRoot ?? node_os.default.tmpdir(), "openclaw-install-"));
|
|
1863
|
+
try {
|
|
1864
|
+
extractTarballTolerant(tarball, tmpStage, { stripComponents: 1 });
|
|
1865
|
+
if (!node_fs.default.existsSync(node_path.default.join(tmpStage, "package.json"))) throw new Error("extracted tarball missing package.json");
|
|
1866
|
+
moveSafe(tmpStage, newDir);
|
|
1867
|
+
const hadExisting = node_fs.default.existsSync(targetDir);
|
|
1868
|
+
try {
|
|
1869
|
+
if (hadExisting) moveSafe(targetDir, bakDir);
|
|
1870
|
+
moveSafe(newDir, targetDir);
|
|
1871
|
+
} catch (e) {
|
|
1872
|
+
if (hadExisting && !node_fs.default.existsSync(targetDir) && node_fs.default.existsSync(bakDir)) try {
|
|
1873
|
+
moveSafe(bakDir, targetDir);
|
|
1874
|
+
} catch {}
|
|
1875
|
+
try {
|
|
1876
|
+
node_fs.default.rmSync(newDir, {
|
|
1877
|
+
recursive: true,
|
|
1878
|
+
force: true
|
|
1879
|
+
});
|
|
1880
|
+
} catch {}
|
|
1881
|
+
throw e;
|
|
1882
|
+
}
|
|
1883
|
+
if (hadExisting && node_fs.default.existsSync(bakDir)) node_fs.default.rmSync(bakDir, {
|
|
1884
|
+
recursive: true,
|
|
1885
|
+
force: true
|
|
1886
|
+
});
|
|
1887
|
+
} finally {
|
|
1888
|
+
if (node_fs.default.existsSync(tmpStage)) try {
|
|
1889
|
+
node_fs.default.rmSync(tmpStage, {
|
|
1890
|
+
recursive: true,
|
|
1891
|
+
force: true
|
|
1892
|
+
});
|
|
1893
|
+
} catch {}
|
|
1894
|
+
}
|
|
1895
|
+
console.error(`[install-openclaw] done in ${Date.now() - t0}ms`);
|
|
1896
|
+
}
|
|
1897
|
+
async function installExtension(tag, ossFileMap, opts = {}) {
|
|
1898
|
+
const homeBase = opts.homeBase ?? "/home/gem";
|
|
1899
|
+
const hasAll = !!opts.all;
|
|
1900
|
+
const hasNames = (opts.names?.length ?? 0) > 0;
|
|
1901
|
+
if (hasAll && hasNames) throw new Error("install-extension: --all and --extension are mutually exclusive");
|
|
1902
|
+
if (!hasAll && !hasNames) throw new Error("install-extension: must provide --all or --extension=<name>");
|
|
1903
|
+
const allExts = (await fetchManifest(ossFileMap, tag)).packages.filter((p) => p.role === "extension");
|
|
1904
|
+
let targets;
|
|
1905
|
+
if (hasAll) targets = allExts;
|
|
1906
|
+
else {
|
|
1907
|
+
const wanted = new Set(opts.names);
|
|
1908
|
+
targets = allExts.filter((p) => wanted.has(p.name) || p.packageName != null && wanted.has(p.packageName));
|
|
1909
|
+
const foundKeys = /* @__PURE__ */ new Set();
|
|
1910
|
+
for (const t of targets) {
|
|
1911
|
+
foundKeys.add(t.name);
|
|
1912
|
+
if (t.packageName) foundKeys.add(t.packageName);
|
|
1913
|
+
}
|
|
1914
|
+
const missing = opts.names.filter((n) => !foundKeys.has(n));
|
|
1915
|
+
if (missing.length > 0) throw new Error(`install-extension: not found in manifest: ${missing.join(", ")}`);
|
|
1916
|
+
}
|
|
1917
|
+
console.error(`[install-extension] tag=${tag} targets=${targets.length}`);
|
|
1918
|
+
const t0 = Date.now();
|
|
1919
|
+
const tarballs = await Promise.all(targets.map(async (p) => {
|
|
1920
|
+
const tb = await downloadWithCache(p, ossFileMap, opts);
|
|
1921
|
+
console.error(`[install-extension] ${p.name}: downloaded`);
|
|
1922
|
+
return {
|
|
1923
|
+
pkg: p,
|
|
1924
|
+
tarball: tb
|
|
1925
|
+
};
|
|
1926
|
+
}));
|
|
1927
|
+
for (const { pkg, tarball } of tarballs) {
|
|
1928
|
+
installOne(pkg, tarball, homeBase);
|
|
1929
|
+
console.error(`[install-extension] ${pkg.name}: installed`);
|
|
1930
|
+
}
|
|
1931
|
+
if (!opts.skipConfigUpdate) updatePluginInstalls(opts.configPath ?? node_path.default.join(homeBase, "workspace/agent/openclaw.json"), targets);
|
|
1932
|
+
else console.error(`[install-extension] skipConfigUpdate=true — not touching openclaw.json`);
|
|
1933
|
+
console.error(`[install-extension] done ${targets.length}/${targets.length} in ${Date.now() - t0}ms`);
|
|
1934
|
+
}
|
|
1935
|
+
/**
|
|
1936
|
+
* Merge each installed extension's installMetadata into openclaw.json's
|
|
1937
|
+
* plugins.installs[<pkg.name>]. Atomic write via tmp + rename.
|
|
1938
|
+
*
|
|
1939
|
+
* - No openclaw.json → log + return (not an error; some install contexts don't have it yet)
|
|
1940
|
+
* - Extension without installMetadata in manifest → skip that entry (log)
|
|
1941
|
+
* - Existing plugins.installs entries for other extensions left untouched
|
|
1942
|
+
*/
|
|
1943
|
+
function updatePluginInstalls(configPath, installedPkgs) {
|
|
1944
|
+
if (!node_fs.default.existsSync(configPath)) {
|
|
1945
|
+
console.error(`[install-extension] no config at ${configPath} — skip plugins.installs update`);
|
|
1946
|
+
return;
|
|
1947
|
+
}
|
|
1948
|
+
const JSON5 = loadJSON5();
|
|
1949
|
+
const raw = node_fs.default.readFileSync(configPath, "utf-8");
|
|
1950
|
+
const config = JSON5.parse(raw);
|
|
1951
|
+
if (!config.plugins || typeof config.plugins !== "object") config.plugins = {};
|
|
1952
|
+
const plugins = config.plugins;
|
|
1953
|
+
if (!plugins.installs || typeof plugins.installs !== "object") plugins.installs = {};
|
|
1954
|
+
const installs = plugins.installs;
|
|
1955
|
+
let updated = 0;
|
|
1956
|
+
let skipped = 0;
|
|
1957
|
+
for (const pkg of installedPkgs) if (pkg.installMetadata) {
|
|
1958
|
+
installs[pkg.name] = pkg.installMetadata;
|
|
1959
|
+
updated++;
|
|
1960
|
+
} else skipped++;
|
|
1961
|
+
const tmpPath = configPath + ".installs-tmp";
|
|
1962
|
+
node_fs.default.writeFileSync(tmpPath, JSON.stringify(config, null, 2), "utf-8");
|
|
1963
|
+
moveSafe(tmpPath, configPath);
|
|
1964
|
+
console.error(`[install-extension] plugins.installs updated: ${updated} entry(ies) in ${configPath}` + (skipped > 0 ? ` (${skipped} package(s) without installMetadata skipped)` : ""));
|
|
1965
|
+
}
|
|
1966
|
+
function installOne(pkg, tarball, homeBase) {
|
|
1967
|
+
const destDir = node_path.default.join(homeBase, pkg.installPath);
|
|
1968
|
+
const stagingDir = destDir + ".new";
|
|
1969
|
+
const oldDir = destDir + ".old";
|
|
1970
|
+
node_fs.default.mkdirSync(node_path.default.dirname(destDir), { recursive: true });
|
|
1971
|
+
if (node_fs.default.existsSync(stagingDir)) node_fs.default.rmSync(stagingDir, {
|
|
1972
|
+
recursive: true,
|
|
1973
|
+
force: true
|
|
1974
|
+
});
|
|
1975
|
+
node_fs.default.mkdirSync(stagingDir);
|
|
1976
|
+
try {
|
|
1977
|
+
extractTarballTolerant(tarball, stagingDir, { stripComponents: 1 });
|
|
1978
|
+
if (!node_fs.default.existsSync(node_path.default.join(stagingDir, "package.json"))) throw new Error(`extension tarball missing package.json: ${pkg.name}`);
|
|
1979
|
+
} catch (e) {
|
|
1980
|
+
try {
|
|
1981
|
+
node_fs.default.rmSync(stagingDir, {
|
|
1982
|
+
recursive: true,
|
|
1983
|
+
force: true
|
|
1984
|
+
});
|
|
1985
|
+
} catch {}
|
|
1986
|
+
throw e;
|
|
1987
|
+
}
|
|
1988
|
+
const hadOld = node_fs.default.existsSync(destDir);
|
|
1989
|
+
if (hadOld) moveSafe(destDir, oldDir);
|
|
1990
|
+
moveSafe(stagingDir, destDir);
|
|
1991
|
+
if (hadOld && node_fs.default.existsSync(oldDir)) node_fs.default.rmSync(oldDir, {
|
|
1992
|
+
recursive: true,
|
|
1993
|
+
force: true
|
|
1994
|
+
});
|
|
1995
|
+
}
|
|
1996
|
+
/**
|
|
1997
|
+
* Download + extract a config/template package to its install destination.
|
|
1998
|
+
*
|
|
1999
|
+
* Current manifest has all resources as format=tgz with content at the root
|
|
2000
|
+
* (config: openclaw.json file at root; template: scripts/ dir at root), so we
|
|
2001
|
+
* always `tar -xzf` without --strip-components into `dirname(fullInstallPath)`.
|
|
2002
|
+
* The final artefact ends up at exactly `homeBase + pkg.installPath`.
|
|
2003
|
+
*/
|
|
2004
|
+
async function downloadResource(tag, ossFileMap, opts) {
|
|
2005
|
+
const homeBase = opts.homeBase ?? "/home/gem";
|
|
2006
|
+
const pkg = (await fetchManifest(ossFileMap, tag)).packages.find((p) => p.role === opts.role && p.name === opts.name);
|
|
2007
|
+
if (!pkg) throw new Error(`download-resource: not found in manifest: role=${opts.role} name=${opts.name}`);
|
|
2008
|
+
const file = await downloadWithCache(pkg, ossFileMap, opts);
|
|
2009
|
+
const fullInstallPath = node_path.default.join(homeBase, pkg.installPath);
|
|
2010
|
+
const extractDir = opts.dir ?? node_path.default.dirname(fullInstallPath);
|
|
2011
|
+
node_fs.default.mkdirSync(extractDir, { recursive: true });
|
|
2012
|
+
const format = (pkg.format ?? "").toLowerCase();
|
|
2013
|
+
const lower = pkg.ossKey.toLowerCase();
|
|
2014
|
+
if (format === "tgz" || lower.endsWith(".tgz") || lower.endsWith(".tar.gz")) {
|
|
2015
|
+
extractTarballTolerant(file, extractDir);
|
|
2016
|
+
console.error(`[download-resource] ${opts.role}/${opts.name}: extracted to ${extractDir}`);
|
|
2017
|
+
} else {
|
|
2018
|
+
const basename = node_path.default.posix.basename(pkg.ossKey);
|
|
2019
|
+
node_fs.default.copyFileSync(file, node_path.default.join(extractDir, basename));
|
|
2020
|
+
console.error(`[download-resource] ${opts.role}/${opts.name}: copied ${basename} to ${extractDir}`);
|
|
2021
|
+
}
|
|
2022
|
+
}
|
|
2023
|
+
//#endregion
|
|
2024
|
+
//#region src/oss/getOpenclawTag.ts
|
|
2025
|
+
/**
|
|
2026
|
+
* Extracts the openclaw tag from the manifest key present in ossFileMap.
|
|
2027
|
+
* Avoids passing an extra ctx field — we already know the tag from the
|
|
2028
|
+
* well-known manifest key studio_server included.
|
|
2029
|
+
*
|
|
2030
|
+
* Manifest key shape: builtin/manifests/openclaw/recommended/<tag>.json
|
|
2031
|
+
*/
|
|
2032
|
+
function getOpenclawTagFromOssFileMap(ossFileMap) {
|
|
2033
|
+
const prefix = "builtin/manifests/openclaw/recommended/";
|
|
2034
|
+
const suffix = ".json";
|
|
2035
|
+
for (const key of Object.keys(ossFileMap)) if (key.startsWith(prefix) && key.endsWith(suffix)) return key.slice(39, -5);
|
|
2036
|
+
throw new Error("cannot resolve openclaw tag: ossFileMap missing manifest key");
|
|
2037
|
+
}
|
|
2038
|
+
//#endregion
|
|
1146
2039
|
//#region src/reset.ts
|
|
1147
2040
|
const STEPS = [
|
|
1148
2041
|
"备份当前配置",
|
|
1149
2042
|
"生成默认配置",
|
|
1150
2043
|
"杀掉 openclaw 进程",
|
|
1151
|
-
"
|
|
2044
|
+
"等待沙箱初始化完成",
|
|
2045
|
+
"确认 openclaw 版本",
|
|
1152
2046
|
"合并核心备份配置",
|
|
1153
|
-
"
|
|
1154
|
-
"
|
|
2047
|
+
"检查启动脚本",
|
|
2048
|
+
"安装扩展",
|
|
1155
2049
|
"启动并验证"
|
|
1156
2050
|
];
|
|
1157
2051
|
const TOTAL_STEPS = STEPS.length;
|
|
1158
|
-
const CORE_BACKUP_PATH = "/home/gem/workspace/.force/openclaw/core-backup.json";
|
|
1159
|
-
/**
|
|
1160
|
-
* Directory holding the bundled openclaw template (openclaw.json + scripts/).
|
|
1161
|
-
* Synced from git@code.byted.org:apaas/miaoda-openclaw-template.git via
|
|
1162
|
-
* scripts/sync-template.sh and published alongside dist/.
|
|
1163
|
-
*
|
|
1164
|
-
* At runtime, __dirname points to dist/, so the template lives one level up.
|
|
1165
|
-
*/
|
|
1166
|
-
const TEMPLATE_DIR = node_path.default.resolve(__dirname, "..", "template");
|
|
1167
2052
|
function writeResultFile(resultFile, result) {
|
|
1168
2053
|
const dir = node_path.default.dirname(resultFile);
|
|
1169
2054
|
if (!node_fs.default.existsSync(dir)) node_fs.default.mkdirSync(dir, { recursive: true });
|
|
1170
2055
|
const tmpPath = resultFile + ".tmp";
|
|
1171
2056
|
node_fs.default.writeFileSync(tmpPath, JSON.stringify(result), "utf-8");
|
|
1172
|
-
|
|
2057
|
+
moveSafe(tmpPath, resultFile);
|
|
1173
2058
|
}
|
|
1174
2059
|
function updateProgress(resultFile, step, startedAt) {
|
|
1175
2060
|
writeResultFile(resultFile, {
|
|
@@ -1201,9 +2086,39 @@ function markFailed(resultFile, step, error, startedAt) {
|
|
|
1201
2086
|
completedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
1202
2087
|
});
|
|
1203
2088
|
}
|
|
2089
|
+
/**
|
|
2090
|
+
* Download the template assets (config/openclaw.json + template/scripts) from
|
|
2091
|
+
* OSS into a scratch directory so the existing step 2 (generateDefaultConfig)
|
|
2092
|
+
* and step 7 (copyStartupScripts) can consume them as local files — the rest
|
|
2093
|
+
* of the orchestrator code stays untouched.
|
|
2094
|
+
*
|
|
2095
|
+
* Called once before step 1. The caller is responsible for rm -rf'ing
|
|
2096
|
+
* stagedDir in a finally{} block after the reset completes (or fails).
|
|
2097
|
+
*/
|
|
2098
|
+
async function stageTemplate(openclawTag, ossFileMap, stagedDir, configDir, log) {
|
|
2099
|
+
if (node_fs.default.existsSync(stagedDir)) node_fs.default.rmSync(stagedDir, {
|
|
2100
|
+
recursive: true,
|
|
2101
|
+
force: true
|
|
2102
|
+
});
|
|
2103
|
+
node_fs.default.mkdirSync(stagedDir, { recursive: true });
|
|
2104
|
+
await downloadResource(openclawTag, ossFileMap, {
|
|
2105
|
+
role: "config",
|
|
2106
|
+
name: "openclaw.json",
|
|
2107
|
+
dir: stagedDir
|
|
2108
|
+
});
|
|
2109
|
+
await downloadResource(openclawTag, ossFileMap, {
|
|
2110
|
+
role: "template",
|
|
2111
|
+
name: "scripts",
|
|
2112
|
+
dir: configDir
|
|
2113
|
+
});
|
|
2114
|
+
log(`staged openclaw.json to ${stagedDir}, scripts directly to ${configDir}/scripts`);
|
|
2115
|
+
}
|
|
1204
2116
|
/** Step 1: Backup current config as openclaw.json.bak.N */
|
|
1205
|
-
function backupCurrentConfig(configPath) {
|
|
1206
|
-
if (!fileExists(configPath))
|
|
2117
|
+
function backupCurrentConfig(configPath, log) {
|
|
2118
|
+
if (!fileExists(configPath)) {
|
|
2119
|
+
log("no existing config, skip backup");
|
|
2120
|
+
return;
|
|
2121
|
+
}
|
|
1207
2122
|
const dir = node_path.default.dirname(configPath);
|
|
1208
2123
|
let maxN = 0;
|
|
1209
2124
|
try {
|
|
@@ -1215,169 +2130,305 @@ function backupCurrentConfig(configPath) {
|
|
|
1215
2130
|
}
|
|
1216
2131
|
}
|
|
1217
2132
|
} catch {}
|
|
1218
|
-
|
|
2133
|
+
const bakPath = configPath + ".bak." + (maxN + 1);
|
|
2134
|
+
node_fs.default.copyFileSync(configPath, bakPath);
|
|
2135
|
+
log(`backed up to ${bakPath}`);
|
|
1219
2136
|
}
|
|
1220
2137
|
/** Step 2: Replace $$__XXX__ placeholders and write default config. */
|
|
1221
|
-
function generateDefaultConfig(srcDir, configPath, templateVars) {
|
|
2138
|
+
function generateDefaultConfig(srcDir, configPath, templateVars, log) {
|
|
1222
2139
|
const srcConfigPath = node_path.default.join(srcDir, "openclaw.json");
|
|
1223
|
-
if (!fileExists(srcConfigPath)) throw new Error("
|
|
2140
|
+
if (!fileExists(srcConfigPath)) throw new Error("staged openclaw.json not found at " + srcConfigPath);
|
|
1224
2141
|
let content = node_fs.default.readFileSync(srcConfigPath, "utf-8");
|
|
1225
|
-
|
|
2142
|
+
let replaced = 0;
|
|
2143
|
+
for (const [placeholder, value] of Object.entries(templateVars)) {
|
|
2144
|
+
const parts = content.split(placeholder);
|
|
2145
|
+
if (parts.length > 1) replaced += parts.length - 1;
|
|
2146
|
+
content = parts.join(value);
|
|
2147
|
+
}
|
|
1226
2148
|
node_fs.default.writeFileSync(configPath, content, "utf-8");
|
|
2149
|
+
log(`wrote ${configPath} (${replaced} placeholder(s) replaced, ${Object.keys(templateVars).length} provided)`);
|
|
1227
2150
|
}
|
|
1228
2151
|
/** Step 3: Kill all openclaw processes. */
|
|
1229
|
-
function killOpenclawProcesses() {
|
|
2152
|
+
function killOpenclawProcesses(log) {
|
|
1230
2153
|
try {
|
|
1231
2154
|
shell("pkill -f openclaw-gateway || true", 5e3);
|
|
1232
2155
|
} catch {}
|
|
1233
2156
|
shell("sleep 2", 5e3);
|
|
2157
|
+
log("killed openclaw-gateway processes");
|
|
1234
2158
|
}
|
|
1235
2159
|
/**
|
|
1236
|
-
* Step 4:
|
|
2160
|
+
* Step 4: Wait for the sandbox's own init (init_sandbox.sh / concurrent npm
|
|
2161
|
+
* install) to finish before we start our own work. Two processes sharing
|
|
2162
|
+
* ~/.npm cache + competing for disk/network just makes everything crawl;
|
|
2163
|
+
* letting init finish first is the cleanest way to get exclusive access.
|
|
2164
|
+
* Polls every 10s up to `maxWaitMs`. If the deadline is hit we fall through
|
|
2165
|
+
* anyway — better to try than to fail the reset outright.
|
|
1237
2166
|
*
|
|
1238
|
-
*
|
|
1239
|
-
*
|
|
1240
|
-
* `shellUntilIdle` lets slow-but-progressing installs run freely (we only kill
|
|
1241
|
-
* on genuine idleness), and `retryUntilDeadline` recovers when we *are* killed
|
|
1242
|
-
* because another npm was holding the lock — the next attempt is likely to
|
|
1243
|
-
* succeed once that npm completes.
|
|
2167
|
+
* Kept even after we switched off `npm install` because the sandbox init
|
|
2168
|
+
* script still runs `npm install` for other packages and holds cache locks.
|
|
1244
2169
|
*/
|
|
1245
|
-
|
|
1246
|
-
const
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
2170
|
+
function waitForInitNpm(maxWaitMs, log) {
|
|
2171
|
+
const deadline = Date.now() + maxWaitMs;
|
|
2172
|
+
const ownPid = String(process.pid);
|
|
2173
|
+
let polls = 0;
|
|
2174
|
+
while (Date.now() < deadline) {
|
|
2175
|
+
polls++;
|
|
2176
|
+
let running = 0;
|
|
2177
|
+
try {
|
|
2178
|
+
const out = shell(`pgrep -af "init_sandbox.sh|npm install|npm i " | grep -v -- "${ownPid}" | wc -l`, 1e4);
|
|
2179
|
+
running = parseInt(out.trim(), 10) || 0;
|
|
2180
|
+
} catch {
|
|
2181
|
+
log(`poll ${polls}: no concurrent npm, proceeding`);
|
|
2182
|
+
return;
|
|
2183
|
+
}
|
|
2184
|
+
if (running === 0) {
|
|
2185
|
+
log(`poll ${polls}: no concurrent npm, proceeding`);
|
|
2186
|
+
return;
|
|
2187
|
+
}
|
|
2188
|
+
log(`poll ${polls}: ${running} concurrent npm/init process(es) still running, waiting 10s`);
|
|
2189
|
+
try {
|
|
2190
|
+
shell("sleep 10", 12e3);
|
|
2191
|
+
} catch {}
|
|
2192
|
+
}
|
|
2193
|
+
log(`deadline (${maxWaitMs}ms) hit after ${polls} poll(s), proceeding anyway`);
|
|
1259
2194
|
}
|
|
1260
|
-
/**
|
|
1261
|
-
|
|
2195
|
+
/**
|
|
2196
|
+
* Step 5: Install openclaw from the OSS-provided tarball at the target tag,
|
|
2197
|
+
* then verify `openclaw --version` output contains that tag. No npm involved.
|
|
2198
|
+
*/
|
|
2199
|
+
async function step5InstallOpenclaw(openclawTag, ossFileMap, log) {
|
|
2200
|
+
log(`install-openclaw tag=${openclawTag}`);
|
|
2201
|
+
await installOpenclaw(openclawTag, ossFileMap);
|
|
2202
|
+
const out = shell("openclaw --version 2>&1 || true", 1e4).trim();
|
|
2203
|
+
if (!out.includes(openclawTag)) throw new Error(`openclaw version verify failed: got "${out}"`);
|
|
2204
|
+
log(`openclaw version verified: ${out}`);
|
|
2205
|
+
}
|
|
2206
|
+
/** Step 6: Merge coreBackup from resetData + ensure allowedOrigins. */
|
|
2207
|
+
function mergeCoreBackupAndOrigins(configPath, vars, resetData, log) {
|
|
1262
2208
|
const JSON5 = loadJSON5();
|
|
1263
|
-
|
|
1264
|
-
|
|
2209
|
+
const backup = resetData.coreBackup;
|
|
2210
|
+
if (backup) {
|
|
1265
2211
|
const config = JSON5.parse(node_fs.default.readFileSync(configPath, "utf-8"));
|
|
1266
|
-
|
|
1267
|
-
if (backup.
|
|
1268
|
-
|
|
1269
|
-
|
|
2212
|
+
const merged = [];
|
|
2213
|
+
if (backup.agents && backup.agents.length > 0) {
|
|
2214
|
+
if (!config.agents) config.agents = {};
|
|
2215
|
+
const agents = config.agents;
|
|
2216
|
+
if (!Array.isArray(agents.list)) agents.list = [];
|
|
2217
|
+
const configDir = node_path.default.dirname(configPath);
|
|
2218
|
+
for (const agent of backup.agents) {
|
|
2219
|
+
const enriched = {
|
|
2220
|
+
id: agent.id,
|
|
2221
|
+
name: agent.id,
|
|
2222
|
+
workspace: agent.workspace,
|
|
2223
|
+
agentDir: configDir + "/agents/" + agent.id + "/agent"
|
|
2224
|
+
};
|
|
2225
|
+
agents.list.push(enriched);
|
|
2226
|
+
}
|
|
2227
|
+
merged.push(`agents(+${backup.agents.length})`);
|
|
2228
|
+
const list = agents.list;
|
|
2229
|
+
let mainIdx = list.findIndex((a) => a.id === "main");
|
|
2230
|
+
if (mainIdx < 0) {
|
|
2231
|
+
list.unshift({ id: "main" });
|
|
2232
|
+
mainIdx = 0;
|
|
2233
|
+
}
|
|
2234
|
+
list[mainIdx].subagents = { allowAgents: ["*"] };
|
|
2235
|
+
list[mainIdx].default = true;
|
|
2236
|
+
merged.push("main-team-mode");
|
|
2237
|
+
const feishu = config.channels?.feishu;
|
|
2238
|
+
if (feishu) {
|
|
2239
|
+
if (!feishu.accounts) feishu.accounts = {};
|
|
2240
|
+
const accounts = feishu.accounts;
|
|
2241
|
+
const defaultAccount = {};
|
|
2242
|
+
for (const key of [
|
|
2243
|
+
"dmPolicy",
|
|
2244
|
+
"allowFrom",
|
|
2245
|
+
"groupPolicy",
|
|
2246
|
+
"groupAllowFrom"
|
|
2247
|
+
]) if (feishu[key] !== void 0) defaultAccount[key] = feishu[key];
|
|
2248
|
+
if (Object.keys(defaultAccount).length > 0) {
|
|
2249
|
+
accounts.default = defaultAccount;
|
|
2250
|
+
merged.push("accounts.default");
|
|
2251
|
+
}
|
|
2252
|
+
}
|
|
2253
|
+
}
|
|
2254
|
+
if (backup.bindings && backup.bindings.length > 0) {
|
|
2255
|
+
config.bindings = backup.bindings;
|
|
2256
|
+
merged.push("bindings");
|
|
2257
|
+
}
|
|
2258
|
+
const backupAccounts = backup.channels?.feishu?.accounts;
|
|
2259
|
+
if (backupAccounts && Object.keys(backupAccounts).length > 0) {
|
|
1270
2260
|
if (!config.channels) config.channels = {};
|
|
1271
2261
|
const ch = config.channels;
|
|
1272
2262
|
if (!ch.feishu) ch.feishu = {};
|
|
1273
|
-
|
|
2263
|
+
const feishu = ch.feishu;
|
|
2264
|
+
if (!feishu.accounts) feishu.accounts = {};
|
|
2265
|
+
Object.assign(feishu.accounts, backupAccounts);
|
|
2266
|
+
merged.push("channels.feishu.accounts");
|
|
1274
2267
|
}
|
|
1275
2268
|
node_fs.default.writeFileSync(configPath, JSON.stringify(config, null, 2), "utf-8");
|
|
1276
|
-
|
|
2269
|
+
log(`merged from coreBackup: [${merged.join(", ") || "nothing"}]`);
|
|
2270
|
+
} else log("no coreBackup in resetData, skip multi-agent merge");
|
|
1277
2271
|
const expectedOrigins = Array.isArray(vars.expectedOrigins) ? vars.expectedOrigins : [];
|
|
1278
|
-
if (expectedOrigins.length
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
cui.allowedOrigins = merged;
|
|
1292
|
-
node_fs.default.writeFileSync(configPath, JSON.stringify(config, null, 2), "utf-8");
|
|
2272
|
+
if (expectedOrigins.length === 0) {
|
|
2273
|
+
log("no expectedOrigins provided");
|
|
2274
|
+
return;
|
|
2275
|
+
}
|
|
2276
|
+
const config = JSON5.parse(node_fs.default.readFileSync(configPath, "utf-8"));
|
|
2277
|
+
if (!config.gateway) config.gateway = {};
|
|
2278
|
+
const gw = config.gateway;
|
|
2279
|
+
if (!gw.controlUi) gw.controlUi = {};
|
|
2280
|
+
const cui = gw.controlUi;
|
|
2281
|
+
const current = Array.isArray(cui.allowedOrigins) ? cui.allowedOrigins.filter((o) => typeof o === "string") : [];
|
|
2282
|
+
if (current.includes("*")) {
|
|
2283
|
+
log("allowedOrigins already contains \"*\", skip origin merge");
|
|
2284
|
+
return;
|
|
1293
2285
|
}
|
|
2286
|
+
const seen = new Set(current);
|
|
2287
|
+
const added = [];
|
|
2288
|
+
const mergedOrigins = [...current];
|
|
2289
|
+
for (const o of expectedOrigins) if (!seen.has(o)) {
|
|
2290
|
+
mergedOrigins.push(o);
|
|
2291
|
+
seen.add(o);
|
|
2292
|
+
added.push(o);
|
|
2293
|
+
}
|
|
2294
|
+
cui.allowedOrigins = mergedOrigins;
|
|
2295
|
+
node_fs.default.writeFileSync(configPath, JSON.stringify(config, null, 2), "utf-8");
|
|
2296
|
+
log(`allowedOrigins: added ${added.length} (${JSON.stringify(added)}), total now ${mergedOrigins.length}`);
|
|
1294
2297
|
}
|
|
1295
|
-
/**
|
|
1296
|
-
|
|
1297
|
-
|
|
2298
|
+
/**
|
|
2299
|
+
* Step 7: Verify startup scripts landed in configDir/scripts/.
|
|
2300
|
+
*
|
|
2301
|
+
* Scripts are extracted directly to configDir/scripts/ during stageTemplate —
|
|
2302
|
+
* there's no intermediate copy any more. This step is now a verification gate
|
|
2303
|
+
* (rather than a copy action) so the step count stays at 9 and we fail early
|
|
2304
|
+
* if the template tgz didn't carry a scripts/ dir.
|
|
2305
|
+
*/
|
|
2306
|
+
function verifyStartupScripts(configDir, log) {
|
|
1298
2307
|
const targetScriptsDir = node_path.default.join(configDir, "scripts");
|
|
1299
|
-
if (!node_fs.default.existsSync(
|
|
1300
|
-
|
|
1301
|
-
shell(`cp -r '${srcScriptsDir}'/* '${targetScriptsDir}/'`, 1e4);
|
|
2308
|
+
if (!node_fs.default.existsSync(targetScriptsDir)) throw new Error(`scripts dir missing at ${targetScriptsDir} — template download failed?`);
|
|
2309
|
+
log(`scripts dir present at ${targetScriptsDir}`);
|
|
1302
2310
|
}
|
|
1303
2311
|
/**
|
|
1304
|
-
* Step
|
|
1305
|
-
*
|
|
1306
|
-
*
|
|
1307
|
-
* we apply the same idle-timeout + retry strategy.
|
|
2312
|
+
* Step 8: Install all extensions listed in the OSS manifest at `openclawTag`.
|
|
2313
|
+
* Replaces the old `plugins update --all` / pre-packed tar.gz flow — the
|
|
2314
|
+
* manifest is now the single source of truth for which extensions ship.
|
|
1308
2315
|
*/
|
|
1309
|
-
async function
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
betweenAttemptsMs: 1e4
|
|
1317
|
-
});
|
|
1318
|
-
} catch {}
|
|
2316
|
+
async function step8InstallExtensions(openclawTag, ossFileMap, log) {
|
|
2317
|
+
log(`install-extension --all tag=${openclawTag}`);
|
|
2318
|
+
await installExtension(openclawTag, ossFileMap, {
|
|
2319
|
+
all: true,
|
|
2320
|
+
skipConfigUpdate: true
|
|
2321
|
+
});
|
|
2322
|
+
log("extensions installed");
|
|
1319
2323
|
}
|
|
1320
|
-
/** Step
|
|
1321
|
-
function writeSecretsAndRestart(vars, resetData, configDir) {
|
|
1322
|
-
if (resetData.secretsContent && vars.secretsFilePath)
|
|
1323
|
-
|
|
2324
|
+
/** Step 9: Write secrets/provider key files and restart openclaw. */
|
|
2325
|
+
function writeSecretsAndRestart(vars, resetData, configDir, log) {
|
|
2326
|
+
if (resetData.secretsContent && vars.secretsFilePath) {
|
|
2327
|
+
writeFile(vars.secretsFilePath, resetData.secretsContent);
|
|
2328
|
+
log(`wrote secrets to ${vars.secretsFilePath}`);
|
|
2329
|
+
}
|
|
2330
|
+
if (resetData.providerKeyContent && vars.providerFilePath) {
|
|
2331
|
+
writeFile(vars.providerFilePath, resetData.providerKeyContent);
|
|
2332
|
+
log(`wrote provider key to ${vars.providerFilePath}`);
|
|
2333
|
+
}
|
|
1324
2334
|
const restartScript = node_path.default.join(configDir, "scripts", "restart.sh");
|
|
1325
|
-
if (fileExists(restartScript))
|
|
2335
|
+
if (fileExists(restartScript)) {
|
|
2336
|
+
const t = Date.now();
|
|
2337
|
+
shell(`bash '${restartScript}'`, 3e4);
|
|
2338
|
+
log(`restart.sh done in ${Date.now() - t}ms`);
|
|
2339
|
+
} else log(`no restart.sh at ${restartScript}, skip`);
|
|
1326
2340
|
}
|
|
1327
2341
|
/**
|
|
1328
|
-
* Run the
|
|
2342
|
+
* Run the 9-step reset process. Called from the worker entry point.
|
|
1329
2343
|
*
|
|
1330
2344
|
* Each step is an independent function. The orchestrator handles progress
|
|
1331
2345
|
* reporting, error handling, and process-level exception guards.
|
|
1332
2346
|
*
|
|
1333
|
-
*
|
|
1334
|
-
*
|
|
1335
|
-
*
|
|
2347
|
+
* Template assets (openclaw.json + scripts/) are downloaded from OSS into a
|
|
2348
|
+
* scratch dir via `stageTemplate` before step 1 — there is no bundled
|
|
2349
|
+
* `template/` directory at runtime any more.
|
|
1336
2350
|
*/
|
|
1337
2351
|
async function runReset(input, taskId, resultFile) {
|
|
1338
2352
|
const startedAt = (/* @__PURE__ */ new Date()).toISOString();
|
|
1339
2353
|
const { configPath, vars, resetData } = input;
|
|
1340
2354
|
const configDir = node_path.default.dirname(configPath);
|
|
1341
|
-
const
|
|
2355
|
+
const stagedDir = node_path.default.join(DIAGNOSE_DIR, `reset-${taskId}-template`);
|
|
1342
2356
|
let currentStep = 0;
|
|
1343
|
-
|
|
1344
|
-
|
|
2357
|
+
let stepStartedAt = Date.now();
|
|
2358
|
+
const log = makeLogger(resetLogFile(taskId));
|
|
2359
|
+
log(`=== reset started, taskId=${taskId}, pid=${process.pid} ===`);
|
|
2360
|
+
log(`configPath=${configPath}, configDir=${configDir}, stagedDir=${stagedDir}`);
|
|
2361
|
+
const ossFileMap = resetData.ossFileMap;
|
|
2362
|
+
if (!ossFileMap || Object.keys(ossFileMap).length === 0) {
|
|
2363
|
+
const err = "resetData.ossFileMap missing or empty";
|
|
2364
|
+
log(`ERROR: ${err}`);
|
|
2365
|
+
markFailed(resultFile, 0, err, startedAt);
|
|
1345
2366
|
process.exit(1);
|
|
1346
2367
|
}
|
|
2368
|
+
let openclawTag;
|
|
2369
|
+
if (resetData.openclawTag) openclawTag = resetData.openclawTag;
|
|
2370
|
+
else try {
|
|
2371
|
+
openclawTag = getOpenclawTagFromOssFileMap(ossFileMap);
|
|
2372
|
+
} catch (e) {
|
|
2373
|
+
const err = e.message;
|
|
2374
|
+
log(`ERROR: ${err}`);
|
|
2375
|
+
markFailed(resultFile, 0, err, startedAt);
|
|
2376
|
+
process.exit(1);
|
|
2377
|
+
}
|
|
2378
|
+
log(`openclawTag=${openclawTag}`);
|
|
1347
2379
|
process.on("uncaughtException", (err) => {
|
|
2380
|
+
log(`FATAL uncaughtException: ${err.message}\n${err.stack ?? ""}`);
|
|
1348
2381
|
markFailed(resultFile, currentStep, `uncaught exception: ${err.message}`, startedAt);
|
|
1349
2382
|
process.exit(1);
|
|
1350
2383
|
});
|
|
1351
2384
|
process.on("unhandledRejection", (reason) => {
|
|
2385
|
+
log(`FATAL unhandledRejection: ${String(reason)}`);
|
|
1352
2386
|
markFailed(resultFile, currentStep, `unhandled rejection: ${reason}`, startedAt);
|
|
1353
2387
|
process.exit(1);
|
|
1354
2388
|
});
|
|
1355
|
-
/** Advance to the next step, updating the progress file. */
|
|
2389
|
+
/** Advance to the next step, updating the progress file and logging a boundary. */
|
|
1356
2390
|
const step = (n) => {
|
|
2391
|
+
if (currentStep > 0) log(`step ${currentStep} "${STEPS[currentStep - 1]}" done in ${Date.now() - stepStartedAt}ms`);
|
|
1357
2392
|
currentStep = n;
|
|
2393
|
+
stepStartedAt = Date.now();
|
|
2394
|
+
log(`--- step ${n}/${TOTAL_STEPS}: ${STEPS[n - 1]} ---`);
|
|
1358
2395
|
updateProgress(resultFile, n, startedAt);
|
|
1359
2396
|
};
|
|
1360
2397
|
try {
|
|
2398
|
+
await stageTemplate(openclawTag, ossFileMap, stagedDir, configDir, log);
|
|
1361
2399
|
step(1);
|
|
1362
|
-
backupCurrentConfig(configPath);
|
|
2400
|
+
backupCurrentConfig(configPath, log);
|
|
1363
2401
|
step(2);
|
|
1364
|
-
generateDefaultConfig(
|
|
2402
|
+
generateDefaultConfig(stagedDir, configPath, resetData.templateVars, log);
|
|
1365
2403
|
step(3);
|
|
1366
|
-
killOpenclawProcesses();
|
|
2404
|
+
killOpenclawProcesses(log);
|
|
1367
2405
|
step(4);
|
|
1368
|
-
|
|
2406
|
+
waitForInitNpm(10 * 6e4, log);
|
|
1369
2407
|
step(5);
|
|
1370
|
-
|
|
2408
|
+
await step5InstallOpenclaw(openclawTag, ossFileMap, log);
|
|
1371
2409
|
step(6);
|
|
1372
|
-
|
|
2410
|
+
mergeCoreBackupAndOrigins(configPath, vars, resetData, log);
|
|
1373
2411
|
step(7);
|
|
1374
|
-
|
|
2412
|
+
verifyStartupScripts(configDir, log);
|
|
1375
2413
|
step(8);
|
|
1376
|
-
|
|
2414
|
+
await step8InstallExtensions(openclawTag, ossFileMap, log);
|
|
2415
|
+
step(9);
|
|
2416
|
+
writeSecretsAndRestart(vars, resetData, configDir, log);
|
|
2417
|
+
log(`step 9 "${STEPS[8]}" done in ${Date.now() - stepStartedAt}ms`);
|
|
2418
|
+
log("=== reset completed successfully ===");
|
|
1377
2419
|
markDone(resultFile, startedAt);
|
|
1378
2420
|
} catch (e) {
|
|
1379
|
-
|
|
2421
|
+
const err = e.message;
|
|
2422
|
+
log(`ERROR in step ${currentStep} "${STEPS[currentStep - 1] ?? "init"}" after ${Date.now() - stepStartedAt}ms: ${err}\n${e.stack ?? ""}`);
|
|
2423
|
+
markFailed(resultFile, currentStep, err, startedAt);
|
|
1380
2424
|
process.exit(1);
|
|
2425
|
+
} finally {
|
|
2426
|
+
try {
|
|
2427
|
+
node_fs.default.rmSync(stagedDir, {
|
|
2428
|
+
recursive: true,
|
|
2429
|
+
force: true
|
|
2430
|
+
});
|
|
2431
|
+
} catch {}
|
|
1381
2432
|
}
|
|
1382
2433
|
}
|
|
1383
2434
|
//#endregion
|
|
@@ -1388,7 +2439,7 @@ async function runReset(input, taskId, resultFile) {
|
|
|
1388
2439
|
* Returns immediately on terminal states (done/failed).
|
|
1389
2440
|
*/
|
|
1390
2441
|
function getResetTask(taskId) {
|
|
1391
|
-
const resultFile =
|
|
2442
|
+
const resultFile = resetResultFile(taskId);
|
|
1392
2443
|
const deadline = Date.now() + 3e4;
|
|
1393
2444
|
while (Date.now() < deadline) {
|
|
1394
2445
|
if (!node_fs.default.existsSync(resultFile)) {
|
|
@@ -1419,68 +2470,750 @@ function sleepSync(ms) {
|
|
|
1419
2470
|
Atomics.wait(arr, 0, 0, ms);
|
|
1420
2471
|
}
|
|
1421
2472
|
//#endregion
|
|
2473
|
+
//#region src/oss/resolveOssFileMap.ts
|
|
2474
|
+
/**
|
|
2475
|
+
* Pick an OssFileMap in the order of decreasing specificity:
|
|
2476
|
+
* 1. `--oss_file_map=` flag — operator override (manual invocations, tests)
|
|
2477
|
+
* 2. `ctx.install.ossFileMap` — new shape (innerapi-driven DoctorCtx)
|
|
2478
|
+
* 3. `ctx.resetData.ossFileMap` — legacy shape (sandbox_console push path)
|
|
2479
|
+
*
|
|
2480
|
+
* Throws when none of the three yields a non-empty map. Empty maps are
|
|
2481
|
+
* treated as missing — an empty map is useless downstream and almost always
|
|
2482
|
+
* indicates a ctx wiring bug.
|
|
2483
|
+
*/
|
|
2484
|
+
function resolveOssFileMap(args) {
|
|
2485
|
+
if (args.ossFileMapFlag) return JSON.parse(Buffer.from(args.ossFileMapFlag, "base64").toString("utf-8"));
|
|
2486
|
+
if (args.installOssFileMap && Object.keys(args.installOssFileMap).length > 0) return args.installOssFileMap;
|
|
2487
|
+
if (args.resetDataOssFileMap && Object.keys(args.resetDataOssFileMap).length > 0) return args.resetDataOssFileMap;
|
|
2488
|
+
throw new Error("ossFileMap missing: provide --oss_file_map flag, ctx.install.ossFileMap, or resetData.ossFileMap");
|
|
2489
|
+
}
|
|
2490
|
+
//#endregion
|
|
2491
|
+
//#region src/innerapi/fetchCtx.ts
|
|
2492
|
+
/**
|
|
2493
|
+
* CLI-side client for studio_server's `openclaw.get_doctor_ctx` inner API.
|
|
2494
|
+
*
|
|
2495
|
+
* Mirrors the proven pattern in
|
|
2496
|
+
* `packages/openclaw/extensions/miaoda/src/shared/innerapi-client.ts`:
|
|
2497
|
+
*
|
|
2498
|
+
* - `baseURL` from env `FORCE_AUTHN_INNERAPI_DOMAIN` (injected into every
|
|
2499
|
+
* openclaw sandbox).
|
|
2500
|
+
* - `platform: { enabled, tokenProvider: { type: 'file' } }` — the platform
|
|
2501
|
+
* plugin auto-attaches the sandbox's identity JWT loaded from the
|
|
2502
|
+
* rootfs token file. Same auth that the miaoda extension already uses.
|
|
2503
|
+
* - POST `/api/v1/studio/innerapi/integration_apis/call`
|
|
2504
|
+
* body = { apiName: 'openclaw.get_doctor_ctx', input: {}, bizType: 'openclaw' }
|
|
2505
|
+
* — the server-side APICall dispatches by `apiName` to
|
|
2506
|
+
* `GetDoctorCtxAPICall.Execute` whose `Name()` returns that string.
|
|
2507
|
+
* - Response envelope: { status_code, error_msg?, data: { success, output, ... } }.
|
|
2508
|
+
* `status_code` is a *string* ('0' = success).
|
|
2509
|
+
* Actual DoctorCtx lives in `data.output`.
|
|
2510
|
+
* - `x-tt-logid` header is logged on every failure path for cross-service
|
|
2511
|
+
* traceability.
|
|
2512
|
+
*
|
|
2513
|
+
* On HTTP 401 (sandbox identity token expired/invalid) we `process.exit(77)`
|
|
2514
|
+
* instead of throwing — the outer catch in `index.ts` cannot then mask auth
|
|
2515
|
+
* failure as a generic "Error: ...". Caller (e.g. sandbox_console) sees the
|
|
2516
|
+
* exit code and can refresh the token + retry.
|
|
2517
|
+
*/
|
|
2518
|
+
const INNERAPI_CALL_PATH = "/api/v1/studio/innerapi/integration_apis/call";
|
|
2519
|
+
const API_NAME = "openclaw.get_doctor_ctx";
|
|
2520
|
+
const BIZ_TYPE = "openclaw";
|
|
2521
|
+
const API_TIMEOUT_MS = 3e4;
|
|
2522
|
+
const MAX_LOG_BODY = 500;
|
|
2523
|
+
let clientInstance = null;
|
|
2524
|
+
function getHttpClient() {
|
|
2525
|
+
if (!clientInstance) {
|
|
2526
|
+
const apiUrl = process.env.FORCE_AUTHN_INNERAPI_DOMAIN;
|
|
2527
|
+
(0, node_assert.default)(apiUrl, "missing env: FORCE_AUTHN_INNERAPI_DOMAIN (openclaw sandbox runtime must expose this)");
|
|
2528
|
+
clientInstance = new _lark_apaas_http_client.HttpClient({
|
|
2529
|
+
baseURL: apiUrl,
|
|
2530
|
+
timeout: API_TIMEOUT_MS,
|
|
2531
|
+
platform: {
|
|
2532
|
+
enabled: true,
|
|
2533
|
+
tokenProvider: { type: "file" }
|
|
2534
|
+
}
|
|
2535
|
+
});
|
|
2536
|
+
}
|
|
2537
|
+
return clientInstance;
|
|
2538
|
+
}
|
|
2539
|
+
/**
|
|
2540
|
+
* Fetch the sandbox's DoctorCtx by calling the innerapi's generic
|
|
2541
|
+
* `integration_apis/call` dispatcher with apiName=openclaw.get_doctor_ctx.
|
|
2542
|
+
*
|
|
2543
|
+
* Throws on HTTP (non-401) / decode / business errors. On 401 calls
|
|
2544
|
+
* `process.exit(77)` directly.
|
|
2545
|
+
*/
|
|
2546
|
+
async function fetchCtxViaInnerApi() {
|
|
2547
|
+
const client = getHttpClient();
|
|
2548
|
+
const body = {
|
|
2549
|
+
apiName: API_NAME,
|
|
2550
|
+
input: {},
|
|
2551
|
+
bizType: BIZ_TYPE
|
|
2552
|
+
};
|
|
2553
|
+
const start = Date.now();
|
|
2554
|
+
const headers = { "Content-Type": "application/json" };
|
|
2555
|
+
const ttEnv = process.env.X_TT_ENV;
|
|
2556
|
+
if (ttEnv) headers["x-tt-env"] = ttEnv;
|
|
2557
|
+
let response;
|
|
2558
|
+
try {
|
|
2559
|
+
response = await client.post(INNERAPI_CALL_PATH, body, { headers });
|
|
2560
|
+
} catch (e) {
|
|
2561
|
+
const durationMs = Date.now() - start;
|
|
2562
|
+
if (e instanceof _lark_apaas_http_client.HttpError && e.response) {
|
|
2563
|
+
const status = e.response.status;
|
|
2564
|
+
const logId = e.response.headers.get("x-tt-logid") ?? "";
|
|
2565
|
+
if (status === 401) {
|
|
2566
|
+
console.error(`[CLI] innerapi 401 (logID: ${logId}) — sandbox identity token expired/invalid; exiting 77`);
|
|
2567
|
+
process.exit(77);
|
|
2568
|
+
}
|
|
2569
|
+
throw new Error(`fetchCtxViaInnerApi HTTP ${status} ${e.response.statusText} (logID: ${logId}, durationMs: ${durationMs})`);
|
|
2570
|
+
}
|
|
2571
|
+
const msg = e instanceof Error ? e.message : String(e);
|
|
2572
|
+
throw new Error(`fetchCtxViaInnerApi network error: ${msg} (durationMs: ${durationMs})`);
|
|
2573
|
+
}
|
|
2574
|
+
const logId = response.headers.get("x-tt-logid") ?? "";
|
|
2575
|
+
const durationMs = Date.now() - start;
|
|
2576
|
+
if (!response.ok) {
|
|
2577
|
+
if (response.status === 401) {
|
|
2578
|
+
console.error(`[CLI] innerapi 401 (logID: ${logId}) — sandbox identity token expired/invalid; exiting 77`);
|
|
2579
|
+
process.exit(77);
|
|
2580
|
+
}
|
|
2581
|
+
let preview = "";
|
|
2582
|
+
try {
|
|
2583
|
+
preview = (await response.text()).slice(0, MAX_LOG_BODY);
|
|
2584
|
+
} catch {}
|
|
2585
|
+
throw new Error(`fetchCtxViaInnerApi HTTP ${response.status} ${response.statusText} (logID: ${logId}, durationMs: ${durationMs})${preview ? ` body=${preview}` : ""}`);
|
|
2586
|
+
}
|
|
2587
|
+
let envelope;
|
|
2588
|
+
try {
|
|
2589
|
+
envelope = await response.json();
|
|
2590
|
+
} catch {
|
|
2591
|
+
throw new Error(`fetchCtxViaInnerApi decode error (logID: ${logId}, durationMs: ${durationMs})`);
|
|
2592
|
+
}
|
|
2593
|
+
if (envelope.status_code !== "0") throw new Error(`fetchCtxViaInnerApi API error (logID: ${logId}, durationMs: ${durationMs}): code=${envelope.status_code}, message=${envelope.error_msg ?? ""}`);
|
|
2594
|
+
if (envelope.data && envelope.data.success === false) throw new Error(`fetchCtxViaInnerApi business error (logID: ${logId}, durationMs: ${durationMs}): ${envelope.error_msg ?? JSON.stringify(envelope.data)}`);
|
|
2595
|
+
const output = envelope.data?.output;
|
|
2596
|
+
if (!output || typeof output !== "object") throw new Error(`fetchCtxViaInnerApi empty/invalid output (logID: ${logId}, durationMs: ${durationMs})`);
|
|
2597
|
+
return output;
|
|
2598
|
+
}
|
|
2599
|
+
//#endregion
|
|
2600
|
+
//#region src/ctx/normalize.ts
|
|
2601
|
+
/**
|
|
2602
|
+
* Accept raw ctx from any of these sources and produce a uniform view:
|
|
2603
|
+
* - New shape (DoctorCtx): `{ app, install, secrets, reset }` — from innerapi.
|
|
2604
|
+
* - Old shape (ResetInput): `{ configPath, vars, resetData }` — from
|
|
2605
|
+
* sandbox_console push path.
|
|
2606
|
+
* Detection is structural: if the top-level has all four new-shape groups we
|
|
2607
|
+
* pass through; otherwise we remap from the old shape.
|
|
2608
|
+
*
|
|
2609
|
+
* Missing fields fall back to safe empty defaults (empty strings / arrays /
|
|
2610
|
+
* maps) so every downstream consumer can read e.g. `ctx.app.feishuAppID`
|
|
2611
|
+
* without an extra nullish guard.
|
|
2612
|
+
*/
|
|
2613
|
+
function normalizeCtx(raw) {
|
|
2614
|
+
const r = raw ?? {};
|
|
2615
|
+
if (r.app && typeof r.app === "object" && r.install && typeof r.install === "object" && r.secrets && typeof r.secrets === "object" && r.reset && typeof r.reset === "object") return {
|
|
2616
|
+
app: fillApp(r.app),
|
|
2617
|
+
install: {
|
|
2618
|
+
openclawTag: r.install.openclawTag,
|
|
2619
|
+
ossFileMap: r.install.ossFileMap ?? {}
|
|
2620
|
+
},
|
|
2621
|
+
secrets: {
|
|
2622
|
+
secretsContent: r.secrets.secretsContent ?? "",
|
|
2623
|
+
providerKeyContent: r.secrets.providerKeyContent ?? ""
|
|
2624
|
+
},
|
|
2625
|
+
reset: {
|
|
2626
|
+
templateVars: r.reset.templateVars ?? {},
|
|
2627
|
+
coreBackup: r.reset.coreBackup
|
|
2628
|
+
}
|
|
2629
|
+
};
|
|
2630
|
+
const vars = r.vars ?? {};
|
|
2631
|
+
const resetData = r.resetData ?? {};
|
|
2632
|
+
const repairData = r.repairData ?? {};
|
|
2633
|
+
return {
|
|
2634
|
+
app: fillApp(vars),
|
|
2635
|
+
install: {
|
|
2636
|
+
openclawTag: r.install?.openclawTag ?? r.openclawTag,
|
|
2637
|
+
ossFileMap: r.install?.ossFileMap ?? resetData.ossFileMap ?? r.ossFileMap ?? {}
|
|
2638
|
+
},
|
|
2639
|
+
secrets: {
|
|
2640
|
+
secretsContent: resetData.secretsContent ?? repairData.secretsContent ?? "",
|
|
2641
|
+
providerKeyContent: resetData.providerKeyContent ?? repairData.providerKeyContent ?? ""
|
|
2642
|
+
},
|
|
2643
|
+
reset: {
|
|
2644
|
+
templateVars: resetData.templateVars ?? {},
|
|
2645
|
+
coreBackup: resetData.coreBackup
|
|
2646
|
+
}
|
|
2647
|
+
};
|
|
2648
|
+
}
|
|
2649
|
+
function fillApp(src) {
|
|
2650
|
+
return {
|
|
2651
|
+
feishuAppID: src.feishuAppID ?? "",
|
|
2652
|
+
feishuAppSecret: src.feishuAppSecret ?? "",
|
|
2653
|
+
teamChatID: typeof src.teamChatID === "string" && src.teamChatID !== "" ? src.teamChatID : void 0,
|
|
2654
|
+
feishuOpenID: src.feishuOpenID ?? "",
|
|
2655
|
+
openClawName: src.openClawName ?? "",
|
|
2656
|
+
gatewayToken: src.gatewayToken ?? "",
|
|
2657
|
+
innerAPIKey: src.innerAPIKey ?? "",
|
|
2658
|
+
baseURL: src.baseURL ?? "",
|
|
2659
|
+
miaodaDomain: src.miaodaDomain ?? "",
|
|
2660
|
+
miaodaOrigin: src.miaodaOrigin ?? "",
|
|
2661
|
+
expectedOrigins: Array.isArray(src.expectedOrigins) ? src.expectedOrigins : []
|
|
2662
|
+
};
|
|
2663
|
+
}
|
|
2664
|
+
//#endregion
|
|
2665
|
+
//#region src/ctx-input.ts
|
|
2666
|
+
/**
|
|
2667
|
+
* Build legacy Check/Repair/Reset input shapes from a raw ctx object. Shared
|
|
2668
|
+
* by both the top-level CLI dispatcher (`index.ts`) and the new `doctor`
|
|
2669
|
+
* subcommand (`doctor.ts`), which need identical input synthesis.
|
|
2670
|
+
*
|
|
2671
|
+
* Behavior:
|
|
2672
|
+
* - If `raw` already carries the legacy `configPath + vars` shape (the one
|
|
2673
|
+
* sandbox_console push emits), it's trusted and returned as-is. This
|
|
2674
|
+
* keeps the existing sandbox_console push contract working.
|
|
2675
|
+
* - Otherwise `raw` is treated as the new-shape DoctorCtx (or anything
|
|
2676
|
+
* structurally close — `normalizeCtx` fills the gaps with safe empties)
|
|
2677
|
+
* and the legacy Vars shape is synthesised using the hardcoded sandbox
|
|
2678
|
+
* path invariants from `paths.ts`.
|
|
2679
|
+
*
|
|
2680
|
+
* The optional `configPathOverride` lets unit tests point the builder at a
|
|
2681
|
+
* tmp file; production callers should leave it undefined so the sandbox
|
|
2682
|
+
* invariant from `paths.ts` is used.
|
|
2683
|
+
*/
|
|
2684
|
+
function buildCheckInput(raw, configPathOverride) {
|
|
2685
|
+
const r = raw ?? {};
|
|
2686
|
+
if (r.configPath && r.vars) {
|
|
2687
|
+
if (configPathOverride) return {
|
|
2688
|
+
...r,
|
|
2689
|
+
configPath: configPathOverride
|
|
2690
|
+
};
|
|
2691
|
+
return r;
|
|
2692
|
+
}
|
|
2693
|
+
const ctx = normalizeCtx(raw);
|
|
2694
|
+
return {
|
|
2695
|
+
configPath: configPathOverride ?? CONFIG_PATH,
|
|
2696
|
+
vars: {
|
|
2697
|
+
feishuAppID: ctx.app.feishuAppID,
|
|
2698
|
+
feishuAppSecret: ctx.app.feishuAppSecret,
|
|
2699
|
+
teamChatID: ctx.app.teamChatID,
|
|
2700
|
+
innerAPIKey: ctx.app.innerAPIKey,
|
|
2701
|
+
gatewayToken: ctx.app.gatewayToken,
|
|
2702
|
+
baseURL: ctx.app.baseURL,
|
|
2703
|
+
expectedOrigins: ctx.app.expectedOrigins,
|
|
2704
|
+
providerFilePath: PROVIDER_FILE_PATH,
|
|
2705
|
+
secretsFilePath: SECRETS_FILE_PATH
|
|
2706
|
+
},
|
|
2707
|
+
templateVars: ctx.reset.templateVars
|
|
2708
|
+
};
|
|
2709
|
+
}
|
|
2710
|
+
function buildRepairInput(raw, configPathOverride) {
|
|
2711
|
+
const r = raw ?? {};
|
|
2712
|
+
if (r.configPath && r.vars) {
|
|
2713
|
+
if (configPathOverride) return {
|
|
2714
|
+
...r,
|
|
2715
|
+
configPath: configPathOverride
|
|
2716
|
+
};
|
|
2717
|
+
return r;
|
|
2718
|
+
}
|
|
2719
|
+
const ctx = normalizeCtx(raw);
|
|
2720
|
+
return {
|
|
2721
|
+
configPath: configPathOverride ?? CONFIG_PATH,
|
|
2722
|
+
vars: {
|
|
2723
|
+
feishuAppID: ctx.app.feishuAppID,
|
|
2724
|
+
feishuAppSecret: ctx.app.feishuAppSecret,
|
|
2725
|
+
teamChatID: ctx.app.teamChatID,
|
|
2726
|
+
innerAPIKey: ctx.app.innerAPIKey,
|
|
2727
|
+
gatewayToken: ctx.app.gatewayToken,
|
|
2728
|
+
baseURL: ctx.app.baseURL,
|
|
2729
|
+
expectedOrigins: ctx.app.expectedOrigins,
|
|
2730
|
+
providerFilePath: PROVIDER_FILE_PATH,
|
|
2731
|
+
secretsFilePath: SECRETS_FILE_PATH
|
|
2732
|
+
},
|
|
2733
|
+
repairData: {
|
|
2734
|
+
secretsContent: ctx.secrets.secretsContent,
|
|
2735
|
+
providerKeyContent: ctx.secrets.providerKeyContent
|
|
2736
|
+
},
|
|
2737
|
+
templateVars: ctx.reset.templateVars
|
|
2738
|
+
};
|
|
2739
|
+
}
|
|
2740
|
+
function buildResetInput(raw, configPathOverride) {
|
|
2741
|
+
const r = raw ?? {};
|
|
2742
|
+
if (r.configPath && r.vars && r.resetData) {
|
|
2743
|
+
if (configPathOverride) return {
|
|
2744
|
+
...r,
|
|
2745
|
+
configPath: configPathOverride
|
|
2746
|
+
};
|
|
2747
|
+
return r;
|
|
2748
|
+
}
|
|
2749
|
+
const ctx = normalizeCtx(raw);
|
|
2750
|
+
return {
|
|
2751
|
+
configPath: configPathOverride ?? CONFIG_PATH,
|
|
2752
|
+
vars: {
|
|
2753
|
+
feishuAppID: ctx.app.feishuAppID,
|
|
2754
|
+
feishuAppSecret: ctx.app.feishuAppSecret,
|
|
2755
|
+
teamChatID: ctx.app.teamChatID,
|
|
2756
|
+
innerAPIKey: ctx.app.innerAPIKey,
|
|
2757
|
+
gatewayToken: ctx.app.gatewayToken,
|
|
2758
|
+
baseURL: ctx.app.baseURL,
|
|
2759
|
+
expectedOrigins: ctx.app.expectedOrigins,
|
|
2760
|
+
providerFilePath: PROVIDER_FILE_PATH,
|
|
2761
|
+
secretsFilePath: SECRETS_FILE_PATH
|
|
2762
|
+
},
|
|
2763
|
+
resetData: {
|
|
2764
|
+
templateVars: ctx.reset.templateVars,
|
|
2765
|
+
secretsContent: ctx.secrets.secretsContent,
|
|
2766
|
+
providerKeyContent: ctx.secrets.providerKeyContent,
|
|
2767
|
+
coreBackup: ctx.reset.coreBackup,
|
|
2768
|
+
ossFileMap: ctx.install.ossFileMap,
|
|
2769
|
+
openclawTag: ctx.install.openclawTag
|
|
2770
|
+
}
|
|
2771
|
+
};
|
|
2772
|
+
}
|
|
2773
|
+
//#endregion
|
|
2774
|
+
//#region src/doctor.ts
|
|
2775
|
+
async function runDoctor(rawCtx, opts) {
|
|
2776
|
+
if (opts.fix && opts.rules.length > 0) {
|
|
2777
|
+
const repairInput = buildRepairInput(rawCtx, opts.configPath);
|
|
2778
|
+
repairInput.failedRules = opts.rules;
|
|
2779
|
+
repairInput.repairData = {
|
|
2780
|
+
...repairInput.repairData ?? {},
|
|
2781
|
+
restartCommand: ""
|
|
2782
|
+
};
|
|
2783
|
+
return { repair: runRepair(repairInput) };
|
|
2784
|
+
}
|
|
2785
|
+
const check = runCheck(buildCheckInput(rawCtx, opts.configPath));
|
|
2786
|
+
if (!opts.fix) return { failedRules: check.failedRules };
|
|
2787
|
+
const repairInput = buildRepairInput(rawCtx, opts.configPath);
|
|
2788
|
+
repairInput.failedRules = check.failedRules.standard;
|
|
2789
|
+
return {
|
|
2790
|
+
check,
|
|
2791
|
+
repair: runRepair(repairInput)
|
|
2792
|
+
};
|
|
2793
|
+
}
|
|
2794
|
+
//#endregion
|
|
2795
|
+
//#region src/help.ts
|
|
2796
|
+
const BIN = "mclaw-diagnose";
|
|
2797
|
+
function versionBanner() {
|
|
2798
|
+
return `v0.1.1-beta.1`;
|
|
2799
|
+
}
|
|
2800
|
+
const COMMANDS = [
|
|
2801
|
+
{
|
|
2802
|
+
name: "doctor",
|
|
2803
|
+
hidden: false,
|
|
2804
|
+
summary: "Diagnose openclaw config; apply repairs with --fix",
|
|
2805
|
+
help: `USAGE
|
|
2806
|
+
${BIN} doctor [--fix] [--rule=<key>]...
|
|
2807
|
+
|
|
2808
|
+
DESCRIPTION
|
|
2809
|
+
Fetches DoctorCtx via innerapi, then runs one of three modes depending
|
|
2810
|
+
on the flags. Output is a single JSON object on stdout.
|
|
2811
|
+
|
|
2812
|
+
MODES
|
|
2813
|
+
(no flags) Check-only. Runs the rule engine against the
|
|
2814
|
+
sandbox's current openclaw config and returns
|
|
2815
|
+
{ failedRules: { standard, ai, reset } }
|
|
2816
|
+
No files are mutated. Use this when you just
|
|
2817
|
+
want to know what's wrong.
|
|
2818
|
+
|
|
2819
|
+
--fix Check + repair-all. First runs the rule engine,
|
|
2820
|
+
then repairs every failing standard-mode rule.
|
|
2821
|
+
Returns
|
|
2822
|
+
{ check: {...}, repair: {...} }
|
|
2823
|
+
Use this as the default "fix everything" action.
|
|
2824
|
+
|
|
2825
|
+
--fix --rule=<key>... Targeted repair. Skips the check pass entirely
|
|
2826
|
+
and runs repair against the listed rule keys
|
|
2827
|
+
only. Unknown keys are silently ignored.
|
|
2828
|
+
Returns { repair: {...} } with only those
|
|
2829
|
+
rules' outcomes. Use this when you already
|
|
2830
|
+
know which rules need fixing.
|
|
2831
|
+
|
|
2832
|
+
OPTIONS
|
|
2833
|
+
--fix Enable repair. See MODES above.
|
|
2834
|
+
--rule=<key> Repair only this rule key. Repeatable. Only
|
|
2835
|
+
meaningful together with --fix.
|
|
2836
|
+
|
|
2837
|
+
EXAMPLES
|
|
2838
|
+
${BIN} doctor # check only
|
|
2839
|
+
${BIN} doctor --fix # check then repair all
|
|
2840
|
+
${BIN} doctor --fix --rule=gateway # repair 'gateway' only
|
|
2841
|
+
${BIN} doctor --fix --rule=gateway --rule=jwt_token # repair multiple
|
|
2842
|
+
|
|
2843
|
+
EXIT CODES
|
|
2844
|
+
0 success
|
|
2845
|
+
1 generic error
|
|
2846
|
+
77 innerapi authentication failed (sandbox JWT expired/invalid)
|
|
2847
|
+
`
|
|
2848
|
+
},
|
|
2849
|
+
{
|
|
2850
|
+
name: "check",
|
|
2851
|
+
hidden: true,
|
|
2852
|
+
summary: "Run rule-engine check only",
|
|
2853
|
+
help: `USAGE
|
|
2854
|
+
${BIN} check [--ctx=<base64>]
|
|
2855
|
+
|
|
2856
|
+
DESCRIPTION
|
|
2857
|
+
Runs the rule engine against the sandbox's current openclaw config and
|
|
2858
|
+
returns { failedRules }. Used by sandbox_console's push-style callers
|
|
2859
|
+
that already own the ctx — end-users should prefer \`doctor\`.
|
|
2860
|
+
|
|
2861
|
+
OPTIONS
|
|
2862
|
+
--ctx=<base64> Opaque ctx JSON (base64). When absent, fetched from
|
|
2863
|
+
innerapi (same path as doctor).
|
|
2864
|
+
`
|
|
2865
|
+
},
|
|
2866
|
+
{
|
|
2867
|
+
name: "repair",
|
|
2868
|
+
hidden: true,
|
|
2869
|
+
summary: "Apply standard-mode repairs",
|
|
2870
|
+
help: `USAGE
|
|
2871
|
+
${BIN} repair [--ctx=<base64>]
|
|
2872
|
+
|
|
2873
|
+
DESCRIPTION
|
|
2874
|
+
Runs repair for the failing rules listed inside the ctx's repairData.
|
|
2875
|
+
Intended for sandbox_console's push path — end-users should use
|
|
2876
|
+
\`doctor --fix\` instead.
|
|
2877
|
+
|
|
2878
|
+
OPTIONS
|
|
2879
|
+
--ctx=<base64> Opaque ctx JSON (base64). When absent, fetched from
|
|
2880
|
+
innerapi.
|
|
2881
|
+
`
|
|
2882
|
+
},
|
|
2883
|
+
{
|
|
2884
|
+
name: "reset",
|
|
2885
|
+
hidden: true,
|
|
2886
|
+
summary: "Re-initialize sandbox via the 9-step reset pipeline",
|
|
2887
|
+
help: `USAGE
|
|
2888
|
+
${BIN} reset --async [--ctx=<base64>]
|
|
2889
|
+
${BIN} reset --worker --task-id=<id> [--ctx=<base64>]
|
|
2890
|
+
|
|
2891
|
+
DESCRIPTION
|
|
2892
|
+
Two-phase pipeline driven asynchronously: the --async invocation spawns
|
|
2893
|
+
a detached worker and returns { taskId } immediately; the --worker
|
|
2894
|
+
invocation (spawned by --async) runs the actual 9 steps and writes
|
|
2895
|
+
progress to /tmp/openclaw-diagnose/reset-<taskId>.json.
|
|
2896
|
+
|
|
2897
|
+
Poll progress with \`${BIN} get_reset_task --task-id=<id>\`.
|
|
2898
|
+
|
|
2899
|
+
OPTIONS
|
|
2900
|
+
--async Start a detached worker and return taskId on stdout.
|
|
2901
|
+
--worker Internal — run the 9-step pipeline (launched by --async).
|
|
2902
|
+
--task-id=<id> Required with --worker; identifies the progress file.
|
|
2903
|
+
--ctx=<base64> Opaque ctx JSON; fetched from innerapi when absent.
|
|
2904
|
+
`
|
|
2905
|
+
},
|
|
2906
|
+
{
|
|
2907
|
+
name: "get_reset_task",
|
|
2908
|
+
hidden: true,
|
|
2909
|
+
summary: "Poll progress of an async reset task",
|
|
2910
|
+
help: `USAGE
|
|
2911
|
+
${BIN} get_reset_task --task-id=<id>
|
|
2912
|
+
|
|
2913
|
+
DESCRIPTION
|
|
2914
|
+
Reads /tmp/openclaw-diagnose/reset-<taskId>.json and prints its content
|
|
2915
|
+
as JSON on stdout. Safe to call repeatedly while reset is in progress.
|
|
2916
|
+
|
|
2917
|
+
OPTIONS
|
|
2918
|
+
--task-id=<id> Required. Matches the id returned by \`reset --async\`.
|
|
2919
|
+
`
|
|
2920
|
+
},
|
|
2921
|
+
{
|
|
2922
|
+
name: "install-openclaw",
|
|
2923
|
+
hidden: true,
|
|
2924
|
+
summary: "Download + install the openclaw tarball",
|
|
2925
|
+
help: `USAGE
|
|
2926
|
+
${BIN} install-openclaw <tag> [--ctx=<base64> | --oss_file_map=<base64>]
|
|
2927
|
+
|
|
2928
|
+
DESCRIPTION
|
|
2929
|
+
Downloads the openclaw@<tag> tgz via the signed OSS URL found in the
|
|
2930
|
+
ctx's install.ossFileMap, extracts it into a tmpfs staging dir, and
|
|
2931
|
+
atomically swaps it into /home/gem/.npm-global/lib/node_modules/openclaw.
|
|
2932
|
+
Used by step 5 of reset.
|
|
2933
|
+
|
|
2934
|
+
ARGUMENTS
|
|
2935
|
+
<tag> Openclaw version tag, e.g. 2026.4.11.
|
|
2936
|
+
|
|
2937
|
+
OPTIONS
|
|
2938
|
+
--ctx=<base64> Opaque ctx; ossFileMap is extracted from it.
|
|
2939
|
+
--oss_file_map=... Pre-built OSS URL map (base64 JSON); skips innerapi
|
|
2940
|
+
entirely. Wins over --ctx when both provided.
|
|
2941
|
+
`
|
|
2942
|
+
},
|
|
2943
|
+
{
|
|
2944
|
+
name: "install-extension",
|
|
2945
|
+
hidden: true,
|
|
2946
|
+
summary: "Install openclaw extension package(s)",
|
|
2947
|
+
help: `USAGE
|
|
2948
|
+
${BIN} install-extension <tag> (--all | --extension=<name>...) [options]
|
|
2949
|
+
|
|
2950
|
+
DESCRIPTION
|
|
2951
|
+
Downloads + installs one or more openclaw extension tarballs
|
|
2952
|
+
(feishu, miaoda, etc.) into <home_base>/workspace/agent/extensions/,
|
|
2953
|
+
then splices installMetadata into openclaw.json's plugins.installs
|
|
2954
|
+
unless --skip-config-update is passed.
|
|
2955
|
+
|
|
2956
|
+
ARGUMENTS
|
|
2957
|
+
<tag> Openclaw version tag; extension versions resolved
|
|
2958
|
+
against the matching manifest.
|
|
2959
|
+
|
|
2960
|
+
OPTIONS
|
|
2961
|
+
--all Install every extension in the manifest.
|
|
2962
|
+
--extension=<name> Install a specific extension (repeatable).
|
|
2963
|
+
--home_base=<dir> Override the /home/gem base (tests).
|
|
2964
|
+
--config_path=<p> Override the openclaw.json path (tests).
|
|
2965
|
+
--skip-config-update Leave plugins.installs in openclaw.json untouched.
|
|
2966
|
+
--ctx=<base64> Opaque ctx; see install-openclaw for semantics.
|
|
2967
|
+
--oss_file_map=... Pre-built OSS URL map (base64 JSON).
|
|
2968
|
+
`
|
|
2969
|
+
},
|
|
2970
|
+
{
|
|
2971
|
+
name: "download-resource",
|
|
2972
|
+
hidden: true,
|
|
2973
|
+
summary: "Download + extract a single OSS resource",
|
|
2974
|
+
help: `USAGE
|
|
2975
|
+
${BIN} download-resource <tag> --role=<role> --name=<name> [--dir=<dir>] [options]
|
|
2976
|
+
|
|
2977
|
+
DESCRIPTION
|
|
2978
|
+
Downloads one resource (template, config asset, etc.) identified by
|
|
2979
|
+
(role, name) from the manifest and extracts/copies it to <dir>.
|
|
2980
|
+
|
|
2981
|
+
ARGUMENTS
|
|
2982
|
+
<tag> Openclaw version tag.
|
|
2983
|
+
|
|
2984
|
+
OPTIONS
|
|
2985
|
+
--role=<role> Package role (e.g. template, config).
|
|
2986
|
+
--name=<name> Package name within the role.
|
|
2987
|
+
--dir=<dir> Target dir (defaults to dirname(pkg.installPath)).
|
|
2988
|
+
--ctx=<base64> Opaque ctx; ossFileMap is extracted from it.
|
|
2989
|
+
--oss_file_map=... Pre-built OSS URL map (base64 JSON).
|
|
2990
|
+
`
|
|
2991
|
+
}
|
|
2992
|
+
];
|
|
2993
|
+
function parseHelpFlags(args) {
|
|
2994
|
+
return {
|
|
2995
|
+
help: args.includes("--help") || args.includes("-h"),
|
|
2996
|
+
expert: args.includes("-x") || args.includes("--expert")
|
|
2997
|
+
};
|
|
2998
|
+
}
|
|
2999
|
+
/**
|
|
3000
|
+
* Render the top-level help to the given stream. When `expert` is true,
|
|
3001
|
+
* hidden commands are listed alongside the user-facing ones.
|
|
3002
|
+
*/
|
|
3003
|
+
function formatTopLevelHelp(expert) {
|
|
3004
|
+
const visible = COMMANDS.filter((c) => !c.hidden);
|
|
3005
|
+
const hidden = COMMANDS.filter((c) => c.hidden);
|
|
3006
|
+
const pad = (s, w) => s + " ".repeat(Math.max(0, w - s.length));
|
|
3007
|
+
const w = Math.max(...COMMANDS.map((c) => c.name.length)) + 2;
|
|
3008
|
+
const lines = [];
|
|
3009
|
+
lines.push(`${BIN} — OpenClaw config diagnose / repair CLI`);
|
|
3010
|
+
lines.push(versionBanner());
|
|
3011
|
+
lines.push("");
|
|
3012
|
+
lines.push("USAGE");
|
|
3013
|
+
lines.push(` ${BIN} <command> [options]`);
|
|
3014
|
+
lines.push(` ${BIN} <command> --help per-command help`);
|
|
3015
|
+
lines.push(` ${BIN} --help this message`);
|
|
3016
|
+
lines.push("");
|
|
3017
|
+
lines.push("COMMANDS");
|
|
3018
|
+
for (const c of visible) lines.push(` ${pad(c.name, w)}${c.summary}`);
|
|
3019
|
+
if (expert && hidden.length > 0) {
|
|
3020
|
+
lines.push("");
|
|
3021
|
+
lines.push("INTERNAL COMMANDS (revealed by -x)");
|
|
3022
|
+
for (const c of hidden) lines.push(` ${pad(c.name, w)}${c.summary}`);
|
|
3023
|
+
}
|
|
3024
|
+
lines.push("");
|
|
3025
|
+
return lines.join("\n");
|
|
3026
|
+
}
|
|
3027
|
+
/** Render per-command help. Returns undefined when the name is unknown. */
|
|
3028
|
+
function formatCommandHelp(name) {
|
|
3029
|
+
const cmd = COMMANDS.find((c) => c.name === name);
|
|
3030
|
+
if (!cmd) return void 0;
|
|
3031
|
+
return cmd.help;
|
|
3032
|
+
}
|
|
3033
|
+
//#endregion
|
|
1422
3034
|
//#region src/index.ts
|
|
1423
3035
|
const args = node_process.default.argv.slice(2);
|
|
1424
|
-
const mode = args.find((a) => !a.startsWith("
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
3036
|
+
const mode = args.find((a) => !a.startsWith("-"));
|
|
3037
|
+
/**
|
|
3038
|
+
* Decode `--ctx=<base64>` into an opaque JSON object. Returns undefined when
|
|
3039
|
+
* the flag isn't present — the caller decides whether to fall back to the
|
|
3040
|
+
* innerapi or to error out.
|
|
3041
|
+
*
|
|
3042
|
+
* The object's shape is not enforced here; downstream code consumes it via
|
|
3043
|
+
* either `normalizeCtx()` (new path) or direct field access for the legacy
|
|
3044
|
+
* check/repair/reset contract still used by sandbox_console push.
|
|
3045
|
+
*/
|
|
3046
|
+
function parseCtxFlag(args) {
|
|
3047
|
+
const ctxArg = args.find((a) => a.startsWith("--ctx="));
|
|
3048
|
+
if (!ctxArg) return void 0;
|
|
3049
|
+
const b64 = ctxArg.slice(6);
|
|
3050
|
+
return JSON.parse(Buffer.from(b64, "base64").toString("utf-8"));
|
|
3051
|
+
}
|
|
3052
|
+
/**
|
|
3053
|
+
* Pull the first non-flag positional after the mode name.
|
|
3054
|
+
* (The mode itself is args[0] in the filtered set, so we skip index 0.)
|
|
3055
|
+
*/
|
|
3056
|
+
function getPositionalTag(args, modeName) {
|
|
3057
|
+
return args.find((a, i) => i > 0 && !a.startsWith("--") && a !== modeName);
|
|
3058
|
+
}
|
|
3059
|
+
function getFlag(args, name) {
|
|
3060
|
+
const prefix = `--${name}=`;
|
|
3061
|
+
return args.find((a) => a.startsWith(prefix))?.slice(prefix.length);
|
|
3062
|
+
}
|
|
3063
|
+
function getMultiFlag(args, name) {
|
|
3064
|
+
const prefix = `--${name}=`;
|
|
3065
|
+
return args.filter((a) => a.startsWith(prefix)).map((a) => a.slice(prefix.length));
|
|
3066
|
+
}
|
|
3067
|
+
async function main() {
|
|
3068
|
+
installStderrMirror();
|
|
3069
|
+
const helpFlags = parseHelpFlags(args);
|
|
3070
|
+
if (mode && helpFlags.help) {
|
|
3071
|
+
const body = formatCommandHelp(mode);
|
|
3072
|
+
if (body) {
|
|
3073
|
+
node_process.default.stdout.write(body);
|
|
3074
|
+
return;
|
|
1432
3075
|
}
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
3076
|
+
node_process.default.stderr.write(`Unknown command: ${mode}\n\n`);
|
|
3077
|
+
node_process.default.stderr.write(formatTopLevelHelp(helpFlags.expert));
|
|
3078
|
+
node_process.default.exit(1);
|
|
3079
|
+
}
|
|
3080
|
+
if (!mode) {
|
|
3081
|
+
if (helpFlags.help) {
|
|
3082
|
+
node_process.default.stdout.write(formatTopLevelHelp(helpFlags.expert));
|
|
3083
|
+
return;
|
|
3084
|
+
}
|
|
3085
|
+
node_process.default.stderr.write(formatTopLevelHelp(helpFlags.expert));
|
|
3086
|
+
node_process.default.exit(1);
|
|
3087
|
+
}
|
|
3088
|
+
switch (mode) {
|
|
3089
|
+
case "check":
|
|
3090
|
+
case "repair": {
|
|
3091
|
+
const raw = parseCtxFlag(args) ?? await fetchCtxViaInnerApi();
|
|
3092
|
+
if (mode === "check") console.log(JSON.stringify(runCheck(buildCheckInput(raw))));
|
|
3093
|
+
else console.log(JSON.stringify(runRepair(buildRepairInput(raw))));
|
|
3094
|
+
break;
|
|
3095
|
+
}
|
|
3096
|
+
case "doctor": {
|
|
3097
|
+
const fix = args.includes("--fix");
|
|
3098
|
+
const rules = getMultiFlag(args, "rule");
|
|
3099
|
+
const result = await runDoctor(await fetchCtxViaInnerApi(), {
|
|
3100
|
+
fix,
|
|
3101
|
+
rules
|
|
3102
|
+
});
|
|
3103
|
+
console.log(JSON.stringify(result));
|
|
3104
|
+
break;
|
|
1443
3105
|
}
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
3106
|
+
case "reset":
|
|
3107
|
+
if (args.includes("--async")) {
|
|
3108
|
+
const ctxArg = args.find((a) => a.startsWith("--ctx="));
|
|
3109
|
+
let ctxBase64;
|
|
3110
|
+
if (ctxArg) ctxBase64 = ctxArg.slice(6);
|
|
3111
|
+
else {
|
|
3112
|
+
const fetched = await fetchCtxViaInnerApi();
|
|
3113
|
+
ctxBase64 = Buffer.from(JSON.stringify(fetched), "utf-8").toString("base64");
|
|
3114
|
+
}
|
|
3115
|
+
console.log(JSON.stringify(startAsyncReset(ctxBase64)));
|
|
3116
|
+
} else if (args.includes("--worker")) {
|
|
3117
|
+
const taskId = args.find((a) => a.startsWith("--task-id="))?.slice(10);
|
|
3118
|
+
if (!taskId) {
|
|
3119
|
+
console.error("Error: --task-id=<id> is required for worker");
|
|
3120
|
+
node_process.default.exit(1);
|
|
3121
|
+
}
|
|
3122
|
+
const resultFile = resetResultFile(taskId);
|
|
3123
|
+
await runReset(buildResetInput(parseCtxFlag(args) ?? await fetchCtxViaInnerApi()), taskId, resultFile);
|
|
3124
|
+
} else {
|
|
3125
|
+
console.error("Usage: reset --async [--ctx=<base64>] | reset --worker --task-id=<id> [--ctx=<base64>]");
|
|
1453
3126
|
node_process.default.exit(1);
|
|
1454
3127
|
}
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
const ctx = args.find((a) => a.startsWith("--ctx="))?.slice(6);
|
|
3128
|
+
break;
|
|
3129
|
+
case "get_reset_task": {
|
|
1458
3130
|
const taskId = args.find((a) => a.startsWith("--task-id="))?.slice(10);
|
|
1459
|
-
if (!
|
|
1460
|
-
console.error("Error: --
|
|
3131
|
+
if (!taskId) {
|
|
3132
|
+
console.error("Error: --task-id=<id> is required");
|
|
3133
|
+
node_process.default.exit(1);
|
|
3134
|
+
}
|
|
3135
|
+
console.log(JSON.stringify(getResetTask(taskId)));
|
|
3136
|
+
break;
|
|
3137
|
+
}
|
|
3138
|
+
case "install-openclaw": {
|
|
3139
|
+
const tag = getPositionalTag(args, "install-openclaw");
|
|
3140
|
+
if (!tag) {
|
|
3141
|
+
console.error("Usage: install-openclaw <tag> [--ctx=<base64> | --oss_file_map=<base64>]");
|
|
1461
3142
|
node_process.default.exit(1);
|
|
1462
3143
|
}
|
|
1463
|
-
const
|
|
1464
|
-
|
|
1465
|
-
|
|
3144
|
+
const ossFileMapFlag = getFlag(args, "oss_file_map");
|
|
3145
|
+
let installOssFileMap;
|
|
3146
|
+
if (!ossFileMapFlag) installOssFileMap = normalizeCtx(parseCtxFlag(args) ?? await fetchCtxViaInnerApi()).install.ossFileMap;
|
|
3147
|
+
await installOpenclaw(tag, resolveOssFileMap({
|
|
3148
|
+
ossFileMapFlag,
|
|
3149
|
+
installOssFileMap
|
|
3150
|
+
}));
|
|
3151
|
+
console.log(JSON.stringify({ ok: true }));
|
|
3152
|
+
break;
|
|
3153
|
+
}
|
|
3154
|
+
case "install-extension": {
|
|
3155
|
+
const tag = getPositionalTag(args, "install-extension");
|
|
3156
|
+
if (!tag) {
|
|
3157
|
+
console.error("Usage: install-extension <tag> (--all | --extension=<name>...) [--home_base=<dir>] [--config_path=<path>] [--skip-config-update] [--ctx=<base64> | --oss_file_map=<base64>]");
|
|
1466
3158
|
node_process.default.exit(1);
|
|
3159
|
+
}
|
|
3160
|
+
const all = args.includes("--all");
|
|
3161
|
+
const names = getMultiFlag(args, "extension");
|
|
3162
|
+
const homeBase = getFlag(args, "home_base");
|
|
3163
|
+
const configPath = getFlag(args, "config_path");
|
|
3164
|
+
const skipConfigUpdate = args.includes("--skip-config-update");
|
|
3165
|
+
const ossFileMapFlag = getFlag(args, "oss_file_map");
|
|
3166
|
+
let installOssFileMap;
|
|
3167
|
+
if (!ossFileMapFlag) installOssFileMap = normalizeCtx(parseCtxFlag(args) ?? await fetchCtxViaInnerApi()).install.ossFileMap;
|
|
3168
|
+
await installExtension(tag, resolveOssFileMap({
|
|
3169
|
+
ossFileMapFlag,
|
|
3170
|
+
installOssFileMap
|
|
3171
|
+
}), {
|
|
3172
|
+
all,
|
|
3173
|
+
names: names.length > 0 ? names : void 0,
|
|
3174
|
+
homeBase,
|
|
3175
|
+
configPath,
|
|
3176
|
+
skipConfigUpdate
|
|
1467
3177
|
});
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
node_process.default.exit(1);
|
|
3178
|
+
console.log(JSON.stringify({ ok: true }));
|
|
3179
|
+
break;
|
|
1471
3180
|
}
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
3181
|
+
case "download-resource": {
|
|
3182
|
+
const tag = getPositionalTag(args, "download-resource");
|
|
3183
|
+
if (!tag) {
|
|
3184
|
+
console.error("Usage: download-resource <tag> --role=<role> --name=<name> [--dir=<dir>] [--ctx=<base64> | --oss_file_map=<base64>]");
|
|
3185
|
+
node_process.default.exit(1);
|
|
3186
|
+
}
|
|
3187
|
+
const role = getFlag(args, "role");
|
|
3188
|
+
const name = getFlag(args, "name");
|
|
3189
|
+
const dir = getFlag(args, "dir");
|
|
3190
|
+
if (!role || !name) {
|
|
3191
|
+
console.error("Usage: download-resource <tag> --role=<role> --name=<name> [--dir=<dir>]");
|
|
3192
|
+
node_process.default.exit(1);
|
|
3193
|
+
}
|
|
3194
|
+
const ossFileMapFlag = getFlag(args, "oss_file_map");
|
|
3195
|
+
let installOssFileMap;
|
|
3196
|
+
if (!ossFileMapFlag) installOssFileMap = normalizeCtx(parseCtxFlag(args) ?? await fetchCtxViaInnerApi()).install.ossFileMap;
|
|
3197
|
+
await downloadResource(tag, resolveOssFileMap({
|
|
3198
|
+
ossFileMapFlag,
|
|
3199
|
+
installOssFileMap
|
|
3200
|
+
}), {
|
|
3201
|
+
role,
|
|
3202
|
+
name,
|
|
3203
|
+
dir
|
|
3204
|
+
});
|
|
3205
|
+
console.log(JSON.stringify({ ok: true }));
|
|
3206
|
+
break;
|
|
1478
3207
|
}
|
|
1479
|
-
|
|
1480
|
-
|
|
3208
|
+
default:
|
|
3209
|
+
node_process.default.stderr.write(`Unknown command: ${mode}\n\n`);
|
|
3210
|
+
node_process.default.stderr.write(formatTopLevelHelp(helpFlags.expert));
|
|
3211
|
+
node_process.default.exit(1);
|
|
1481
3212
|
}
|
|
1482
|
-
default:
|
|
1483
|
-
console.error("Usage: mclaw-diagnose <check|repair|backup|reset|get_reset_task> [options]");
|
|
1484
|
-
node_process.default.exit(1);
|
|
1485
3213
|
}
|
|
3214
|
+
main().catch((err) => {
|
|
3215
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
3216
|
+
console.error(`Error: ${msg}`);
|
|
3217
|
+
node_process.default.exit(1);
|
|
3218
|
+
});
|
|
1486
3219
|
//#endregion
|