openclawsetup 2.4.7 → 2.4.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/cli.mjs +80 -4
- package/package.json +1 -1
package/bin/cli.mjs
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
|
|
13
13
|
import { execSync, spawnSync } from 'child_process';
|
|
14
14
|
import { existsSync, accessSync, constants as fsConstants, rmSync, readFileSync } from 'fs';
|
|
15
|
+
import http from 'http';
|
|
15
16
|
import { homedir, platform } from 'os';
|
|
16
17
|
import { join } from 'path';
|
|
17
18
|
import { createInterface } from 'readline';
|
|
@@ -1236,6 +1237,48 @@ async function runHealthCheck(cliName, autoFix = false) {
|
|
|
1236
1237
|
|
|
1237
1238
|
// ============ 交互式菜单 ============
|
|
1238
1239
|
|
|
1240
|
+
function testModelChat(port, token, model) {
|
|
1241
|
+
return new Promise((resolve) => {
|
|
1242
|
+
const postData = JSON.stringify({ model, input: '请用一句话回复你的模型名称' });
|
|
1243
|
+
const req = http.request({
|
|
1244
|
+
hostname: '127.0.0.1',
|
|
1245
|
+
port,
|
|
1246
|
+
path: '/v1/responses',
|
|
1247
|
+
method: 'POST',
|
|
1248
|
+
timeout: 30000,
|
|
1249
|
+
headers: {
|
|
1250
|
+
'Content-Type': 'application/json',
|
|
1251
|
+
'Authorization': `Bearer ${token}`,
|
|
1252
|
+
'Content-Length': Buffer.byteLength(postData),
|
|
1253
|
+
},
|
|
1254
|
+
}, (res) => {
|
|
1255
|
+
let data = '';
|
|
1256
|
+
res.on('data', chunk => data += chunk);
|
|
1257
|
+
res.on('end', () => {
|
|
1258
|
+
try {
|
|
1259
|
+
const json = JSON.parse(data);
|
|
1260
|
+
const text = json.output?.[0]?.content?.[0]?.text;
|
|
1261
|
+
if (text) {
|
|
1262
|
+
resolve({ success: true, message: text });
|
|
1263
|
+
} else if (json.error) {
|
|
1264
|
+
resolve({ success: false, error: json.error.message || JSON.stringify(json.error) });
|
|
1265
|
+
} else {
|
|
1266
|
+
resolve({ success: false, error: `HTTP ${res.statusCode}: ${data.substring(0, 200)}` });
|
|
1267
|
+
}
|
|
1268
|
+
} catch {
|
|
1269
|
+
resolve({ success: false, error: `HTTP ${res.statusCode}: ${data.substring(0, 200)}` });
|
|
1270
|
+
}
|
|
1271
|
+
});
|
|
1272
|
+
});
|
|
1273
|
+
req.on('timeout', () => { req.destroy(); resolve({ success: false, error: '请求超时 (30s)' }); });
|
|
1274
|
+
req.on('error', (e) => {
|
|
1275
|
+
resolve({ success: false, error: e.code === 'ECONNREFUSED' ? 'Gateway 未响应' : e.message });
|
|
1276
|
+
});
|
|
1277
|
+
req.write(postData);
|
|
1278
|
+
req.end();
|
|
1279
|
+
});
|
|
1280
|
+
}
|
|
1281
|
+
|
|
1239
1282
|
async function showStatusInfo(cliName) {
|
|
1240
1283
|
const config = getConfigInfo();
|
|
1241
1284
|
const port = config.port || 18789;
|
|
@@ -1271,12 +1314,45 @@ async function showStatusInfo(cliName) {
|
|
|
1271
1314
|
}
|
|
1272
1315
|
|
|
1273
1316
|
// 模型配置
|
|
1317
|
+
let primaryModel = '';
|
|
1274
1318
|
if (config.raw) {
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1319
|
+
try {
|
|
1320
|
+
const json = JSON.parse(config.raw);
|
|
1321
|
+
const hasProviders = json.models?.providers && Object.keys(json.models.providers).length > 0;
|
|
1322
|
+
primaryModel = json.agents?.defaults?.model?.primary || '';
|
|
1323
|
+
if (hasProviders) {
|
|
1324
|
+
console.log(colors.green(' ✓ 已配置 AI 模型'));
|
|
1325
|
+
if (primaryModel) {
|
|
1326
|
+
console.log(colors.gray(` 主模型: ${primaryModel}`));
|
|
1327
|
+
}
|
|
1328
|
+
} else {
|
|
1329
|
+
console.log(colors.yellow(' ⚠ 未配置模型,请先选择「配置模型」'));
|
|
1330
|
+
}
|
|
1331
|
+
} catch {
|
|
1332
|
+
const hasProviders = config.raw.includes('"providers"');
|
|
1333
|
+
if (hasProviders) {
|
|
1334
|
+
console.log(colors.green(' ✓ 已配置 AI 模型'));
|
|
1335
|
+
} else {
|
|
1336
|
+
console.log(colors.yellow(' ⚠ 未配置模型,请先选择「配置模型」'));
|
|
1337
|
+
}
|
|
1338
|
+
}
|
|
1339
|
+
}
|
|
1340
|
+
|
|
1341
|
+
// 模型对话测试
|
|
1342
|
+
if (config.token && config.token !== '<未配置>' && primaryModel) {
|
|
1343
|
+
console.log(colors.gray(' … 正在测试模型对话...'));
|
|
1344
|
+
const testResult = await testModelChat(port, config.token, primaryModel);
|
|
1345
|
+
if (testResult.success) {
|
|
1346
|
+
console.log(colors.green(' ✓ 模型对话正常'));
|
|
1347
|
+
if (testResult.message) {
|
|
1348
|
+
const reply = testResult.message.length > 60
|
|
1349
|
+
? testResult.message.substring(0, 60) + '...'
|
|
1350
|
+
: testResult.message;
|
|
1351
|
+
console.log(colors.gray(` 回复: ${reply}`));
|
|
1352
|
+
}
|
|
1278
1353
|
} else {
|
|
1279
|
-
console.log(colors.
|
|
1354
|
+
console.log(colors.red(` ✗ 模型对话失败: ${testResult.error}`));
|
|
1355
|
+
console.log(colors.yellow(' → 请选择「配置模型」检查 API Key 和节点配置'));
|
|
1280
1356
|
}
|
|
1281
1357
|
}
|
|
1282
1358
|
|