metheus-governance-mcp-cli 0.2.71 → 0.2.72

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -231,7 +231,8 @@ Behavior:
231
231
  - For Telegram, the local env key is auto-generated from the matched server bot name or verified username, so you do not have to invent a separate local nickname first.
232
232
  - For Telegram, the CLI tries to match the verified bot identity against the server `me/bots` list first. If the server exposes one logical bot name with multiple roles such as `approval`, `worker`, `review`, and `monitor`, the CLI does not ask you to choose one UUID. It binds by server bot name and keeps the local role/AI fields empty so runtime can use the server bot role for each route.
233
233
  - When the Telegram username matches exactly one server bot role, the CLI still auto-fills the local `role_profile` and blank AI defaults from your local `bot-runner.json` `role_profiles` mapping.
234
- - `bot edit` without flags now uses the same sequential flow every time: provider -> bot entry -> username/token review -> AI field choices -> default choice -> save.
234
+ - `bot edit` without flags now uses the same sequential flow every time: provider -> bot entry -> username/token review -> grouped server-role review when needed -> AI field choices -> default choice -> save.
235
+ - if one server bot name maps to multiple server roles, `bot edit` keeps the Telegram env entry bound to the server identity and lets you review the local `role_profiles` for each detected role instead of forcing one role/profile UUID choice up front.
235
236
  - In the normal Telegram edit path, the CLI keeps or re-resolves the server bot binding automatically. It no longer asks you to pick `approval / worker / review / monitor` or a server bot UUID first.
236
237
  - `bot set-default` without flags starts a guided numbered flow: provider -> bot entry -> confirm default change.
237
238
  - `bot verify` without flags starts a guided numbered flow: provider -> bot entry -> output format.
package/cli.mjs CHANGED
@@ -3362,6 +3362,7 @@ function buildBotCommandDeps() {
3362
3362
  loadProviderEnvConfig,
3363
3363
  verifyLocalProviderToken,
3364
3364
  loadBotRunnerConfig,
3365
+ saveBotRunnerConfig,
3365
3366
  listServerBots: async ({ provider, baseURL, timeoutSeconds }) => {
3366
3367
  const authFlowDeps = buildAuthFlowDeps();
3367
3368
  const resolved = resolveCurrentAccessToken(authFlowDeps);
@@ -595,6 +595,8 @@ function buildDerivedTelegramBotKey(parsedEnv, deps, preferredValues, { excludeK
595
595
 
596
596
  async function editTelegramBotGuided(ui, parsed, selected, current, flags, deps) {
597
597
  let serverRoleAutoResolved = "";
598
+ let groupedServerRoles = [];
599
+ let groupedServerName = "";
598
600
  if (current.serverBotID || current.__preferServerIdentity) {
599
601
  current.__preferServerIdentity = true;
600
602
  serverRoleAutoResolved = String(current.roleProfile || "").trim() || "__server_binding__";
@@ -641,9 +643,15 @@ async function editTelegramBotGuided(ui, parsed, selected, current, flags, deps)
641
643
  current.serverBotID = "";
642
644
  current.__preferServerIdentity = true;
643
645
  current.roleProfile = "";
646
+ current.client = "";
647
+ current.model = "";
648
+ current.permissionMode = "";
649
+ current.reasoningEffort = "";
644
650
  serverRoleAutoResolved = "__server_role_group__";
651
+ groupedServerRoles = preferredRoleSort(serverBot.roles);
652
+ groupedServerName = String(serverBot.name || current.username || current.key).trim();
645
653
  process.stdout.write(
646
- `Using server Telegram bot "${serverBot.name || current.username || current.key}" with roles: ${ensureArray(serverBot.roles).join(", ")}. Runtime will use the server role automatically.\n`,
654
+ `Using server Telegram bot "${groupedServerName}" with roles: ${groupedServerRoles.join(", ")}. Runtime will use the server role automatically.\n`,
647
655
  );
648
656
  } else if (String(serverBot.botID || "").trim()) {
649
657
  current.serverBotID = String(serverBot.botID || "").trim();
@@ -672,53 +680,64 @@ async function editTelegramBotGuided(ui, parsed, selected, current, flags, deps)
672
680
  }
673
681
  }
674
682
 
675
- const clientAction = await promptKeepChangeClear(ui, "AI client", {
676
- allowClear: true,
677
- defaultValue: current.client ? "keep" : "change",
678
- });
679
- if (clientAction === "change") {
680
- current.client = requireDependency(deps, "normalizeLocalAIClientName")(
681
- await promptAIClient(ui, deps, current.client),
682
- "",
683
+ if (serverRoleAutoResolved === "__server_role_group__") {
684
+ await maybePromptGroupedServerRoleProfiles(
685
+ ui,
686
+ {
687
+ name: groupedServerName,
688
+ roles: groupedServerRoles,
689
+ },
690
+ deps,
683
691
  );
684
- } else if (clientAction === "clear") {
685
- current.client = "";
686
- }
692
+ } else {
693
+ const clientAction = await promptKeepChangeClear(ui, "AI client", {
694
+ allowClear: true,
695
+ defaultValue: current.client ? "keep" : "change",
696
+ });
697
+ if (clientAction === "change") {
698
+ current.client = requireDependency(deps, "normalizeLocalAIClientName")(
699
+ await promptAIClient(ui, deps, current.client),
700
+ "",
701
+ );
702
+ } else if (clientAction === "clear") {
703
+ current.client = "";
704
+ }
687
705
 
688
- const modelAction = await promptKeepChangeClear(ui, "AI model", {
689
- allowClear: true,
690
- defaultValue: current.model ? "keep" : "change",
691
- });
692
- if (modelAction === "change") {
693
- current.model = await promptLine(ui, "AI model", current.model);
694
- } else if (modelAction === "clear") {
695
- current.model = "";
696
- }
706
+ const modelAction = await promptKeepChangeClear(ui, "AI model", {
707
+ allowClear: true,
708
+ defaultValue: current.model ? "keep" : "change",
709
+ });
710
+ if (modelAction === "change") {
711
+ current.model = await promptLine(ui, "AI model", current.model);
712
+ } else if (modelAction === "clear") {
713
+ current.model = "";
714
+ }
697
715
 
698
- const permissionAction = await promptKeepChangeClear(ui, "AI permission mode", {
699
- allowClear: true,
700
- defaultValue: current.permissionMode ? "keep" : "change",
701
- });
702
- if (permissionAction === "change") {
703
- current.permissionMode = requireDependency(deps, "normalizeLocalAIPermissionMode")(
704
- await promptPermissionMode(ui, current.permissionMode),
705
- "",
706
- );
707
- } else if (permissionAction === "clear") {
708
- current.permissionMode = "";
709
- }
716
+ const permissionAction = await promptKeepChangeClear(ui, "AI permission mode", {
717
+ allowClear: true,
718
+ defaultValue: current.permissionMode ? "keep" : "change",
719
+ });
720
+ if (permissionAction === "change") {
721
+ current.permissionMode = requireDependency(deps, "normalizeLocalAIPermissionMode")(
722
+ await promptPermissionMode(ui, current.permissionMode),
723
+ "",
724
+ );
725
+ } else if (permissionAction === "clear") {
726
+ current.permissionMode = "";
727
+ }
710
728
 
711
- const reasoningAction = await promptKeepChangeClear(ui, "AI reasoning effort", {
712
- allowClear: true,
713
- defaultValue: current.reasoningEffort ? "keep" : "change",
714
- });
715
- if (reasoningAction === "change") {
716
- current.reasoningEffort = requireDependency(deps, "normalizeLocalAIReasoningEffort")(
717
- await promptReasoningEffort(ui, current.reasoningEffort),
718
- "",
719
- );
720
- } else if (reasoningAction === "clear") {
721
- current.reasoningEffort = "";
729
+ const reasoningAction = await promptKeepChangeClear(ui, "AI reasoning effort", {
730
+ allowClear: true,
731
+ defaultValue: current.reasoningEffort ? "keep" : "change",
732
+ });
733
+ if (reasoningAction === "change") {
734
+ current.reasoningEffort = requireDependency(deps, "normalizeLocalAIReasoningEffort")(
735
+ await promptReasoningEffort(ui, current.reasoningEffort),
736
+ "",
737
+ );
738
+ } else if (reasoningAction === "clear") {
739
+ current.reasoningEffort = "";
740
+ }
722
741
  }
723
742
 
724
743
  const defaultChoice = await promptChoice(
@@ -1285,6 +1304,144 @@ function applyRoleProfileDefaults(entry, defaults, { overwrite = false } = {}) {
1285
1304
  }
1286
1305
  }
1287
1306
 
1307
+ function preferredRoleSort(roles) {
1308
+ const order = ["monitor", "review", "worker", "approval"];
1309
+ return ensureArray(roles).slice().sort((left, right) => {
1310
+ const leftText = String(left || "").trim();
1311
+ const rightText = String(right || "").trim();
1312
+ const leftIndex = order.indexOf(leftText);
1313
+ const rightIndex = order.indexOf(rightText);
1314
+ if (leftIndex >= 0 && rightIndex >= 0) return leftIndex - rightIndex;
1315
+ if (leftIndex >= 0) return -1;
1316
+ if (rightIndex >= 0) return 1;
1317
+ return leftText.localeCompare(rightText);
1318
+ });
1319
+ }
1320
+
1321
+ function currentRoleProfileState(roleName, deps) {
1322
+ const normalizedRole = requireDependency(deps, "normalizeRunnerRoleProfileName")(roleName || "");
1323
+ const defaults = resolveRoleProfileDefaults(normalizedRole, deps);
1324
+ const config = requireDependency(deps, "loadBotRunnerConfig")({ persistIfNeeded: true });
1325
+ const profile = safeObject(safeObject(config.roleProfiles || {})[normalizedRole]);
1326
+ return {
1327
+ role: normalizedRole,
1328
+ client: requireDependency(deps, "normalizeLocalAIClientName")(profile.client || defaults.client || "", ""),
1329
+ model: String(profile.model || defaults.model || "").trim(),
1330
+ permissionMode: requireDependency(deps, "normalizeLocalAIPermissionMode")(
1331
+ profile.permission_mode || profile.permissionMode || defaults.permissionMode || "",
1332
+ "",
1333
+ ),
1334
+ reasoningEffort: requireDependency(deps, "normalizeLocalAIReasoningEffort")(
1335
+ profile.reasoning_effort || profile.reasoningEffort || defaults.reasoningEffort || "",
1336
+ "",
1337
+ ),
1338
+ };
1339
+ }
1340
+
1341
+ function formatRoleProfileSummary(profile) {
1342
+ const current = safeObject(profile);
1343
+ return [
1344
+ current.client ? `client:${current.client}` : "client:(blank)",
1345
+ current.model ? `model:${current.model}` : "model:(blank)",
1346
+ current.permissionMode ? `permission:${current.permissionMode}` : "permission:(blank)",
1347
+ current.reasoningEffort ? `reasoning:${current.reasoningEffort}` : "reasoning:(blank)",
1348
+ ].join(" | ");
1349
+ }
1350
+
1351
+ function persistRoleProfileState(profile, deps) {
1352
+ const current = safeObject(profile);
1353
+ const role = requireDependency(deps, "normalizeRunnerRoleProfileName")(current.role || "");
1354
+ if (!role) return null;
1355
+ const config = requireDependency(deps, "loadBotRunnerConfig")({ persistIfNeeded: true });
1356
+ const nextRoleProfiles = {
1357
+ ...safeObject(config.roleProfiles || {}),
1358
+ [role]: {
1359
+ client: String(current.client || "").trim(),
1360
+ model: String(current.model || "").trim(),
1361
+ permission_mode: String(current.permissionMode || "").trim(),
1362
+ reasoning_effort: String(current.reasoningEffort || "").trim(),
1363
+ },
1364
+ };
1365
+ const nextConfig = {
1366
+ ...config,
1367
+ roleProfiles: nextRoleProfiles,
1368
+ };
1369
+ return requireDependency(deps, "saveBotRunnerConfig")(nextConfig, config.filePath);
1370
+ }
1371
+
1372
+ async function promptRoleExecutionProfile(ui, roleName, deps) {
1373
+ const current = currentRoleProfileState(roleName, deps);
1374
+ const action = await promptChoice(
1375
+ ui,
1376
+ `Role "${current.role}" local execution profile`,
1377
+ [
1378
+ {
1379
+ value: "keep",
1380
+ label: "Keep current settings",
1381
+ description: formatRoleProfileSummary(current),
1382
+ },
1383
+ {
1384
+ value: "edit",
1385
+ label: "Edit settings",
1386
+ description: "change AI client, model, permission, and reasoning",
1387
+ },
1388
+ ],
1389
+ { defaultIndex: 0 },
1390
+ );
1391
+ if (action?.value !== "edit") {
1392
+ return { changed: false, filePath: "" };
1393
+ }
1394
+ current.client = requireDependency(deps, "normalizeLocalAIClientName")(
1395
+ await promptAIClient(ui, deps, current.client),
1396
+ "",
1397
+ );
1398
+ current.model = await promptLine(ui, `AI model for role "${current.role}"`, current.model);
1399
+ current.permissionMode = requireDependency(deps, "normalizeLocalAIPermissionMode")(
1400
+ await promptPermissionMode(ui, current.permissionMode),
1401
+ "",
1402
+ );
1403
+ current.reasoningEffort = requireDependency(deps, "normalizeLocalAIReasoningEffort")(
1404
+ await promptReasoningEffort(ui, current.reasoningEffort),
1405
+ "",
1406
+ );
1407
+ const filePath = persistRoleProfileState(current, deps);
1408
+ if (filePath) {
1409
+ process.stdout.write(`Saved local execution profile for role "${current.role}" to ${filePath}\n`);
1410
+ }
1411
+ return { changed: true, filePath };
1412
+ }
1413
+
1414
+ async function maybePromptGroupedServerRoleProfiles(ui, serverBot, deps) {
1415
+ const roles = preferredRoleSort(ensureArray(serverBot?.roles).filter(Boolean));
1416
+ if (roles.length <= 1) {
1417
+ return false;
1418
+ }
1419
+ const editChoice = await promptChoice(
1420
+ ui,
1421
+ `Server bot "${String(serverBot?.name || "").trim() || "telegram"}" uses multiple roles. What should happen to the local execution settings?`,
1422
+ [
1423
+ {
1424
+ value: "keep",
1425
+ label: "Keep current role settings",
1426
+ description: "use the existing role_profiles defaults as-is",
1427
+ },
1428
+ {
1429
+ value: "review",
1430
+ label: "Review role settings",
1431
+ description: "check each role and change AI client/model/permission/reasoning when needed",
1432
+ },
1433
+ ],
1434
+ { defaultIndex: 0 },
1435
+ );
1436
+ if (editChoice?.value !== "review") {
1437
+ return false;
1438
+ }
1439
+ for (const role of roles) {
1440
+ await promptRoleExecutionProfile(ui, role, deps);
1441
+ }
1442
+ return true;
1443
+ }
1444
+
1288
1445
  function buildTemporaryTelegramEnvConfig({ token, apiBaseURL }) {
1289
1446
  return {
1290
1447
  ok: true,
@@ -1300,8 +1457,11 @@ async function verifyTelegramTokenCandidate(provider, token, apiBaseURL, timeout
1300
1457
 
1301
1458
  async function autoResolveTelegramServerBot(current, flags, deps) {
1302
1459
  const existingBotID = String(current?.serverBotID || "").trim();
1303
- const preferredUsername = String(current?.username || "").trim();
1304
- if (!preferredUsername) {
1460
+ const preferredIdentity = firstNonEmptyString([
1461
+ current?.username,
1462
+ current?.key,
1463
+ ]);
1464
+ if (!preferredIdentity) {
1305
1465
  return {
1306
1466
  botID: existingBotID,
1307
1467
  role: String(current?.roleProfile || "").trim(),
@@ -1314,14 +1474,14 @@ async function autoResolveTelegramServerBot(current, flags, deps) {
1314
1474
  return await resolveServerBotForNonInteractive(
1315
1475
  "telegram",
1316
1476
  {
1317
- "bot-name": preferredUsername,
1477
+ "bot-name": preferredIdentity,
1318
1478
  "base-url": flags["base-url"] || deps.defaultSiteURL,
1319
1479
  "timeout-seconds": intFromRaw(flags["timeout-seconds"], 15) || 15,
1320
1480
  },
1321
1481
  deps,
1322
1482
  {
1323
- preferredUsername,
1324
- preferredName: preferredUsername,
1483
+ preferredUsername: preferredIdentity,
1484
+ preferredName: preferredIdentity,
1325
1485
  },
1326
1486
  );
1327
1487
  } catch {
@@ -341,6 +341,51 @@ export async function runSelftestBotCommands(push, deps) {
341
341
  && String(groupedState.TELEGRAM_BOT_RYOAI_BOT_AI_PERMISSION_MODE || "") === "",
342
342
  `username=${String(groupedState.TELEGRAM_BOT_RYOAI_BOT_USERNAME || "")} server_bot_id=${String(groupedState.TELEGRAM_BOT_RYOAI_BOT_SERVER_BOT_ID || "")} role=${String(groupedState.TELEGRAM_BOT_RYOAI_BOT_ROLE_PROFILE || "")}`,
343
343
  );
344
+
345
+ await runCLI({
346
+ cliPath,
347
+ args: [
348
+ "bot", "edit",
349
+ "--base-url", `http://127.0.0.1:${groupedMock.port}`,
350
+ "--timeout-seconds", "5",
351
+ ],
352
+ env: {
353
+ ...env,
354
+ METHEUS_SCRIPTED_PROMPT_ANSWERS: JSON.stringify([
355
+ "1", // provider: telegram
356
+ "2", // bot entry: ryoai_bot
357
+ "1", // keep username
358
+ "1", // keep token
359
+ "2", // review grouped server role settings
360
+ "1", // monitor: keep current role profile settings
361
+ "1", // review: keep current role profile settings
362
+ "2", // worker: edit settings
363
+ "3", // worker AI client: claude
364
+ "worker-sonnet-4",
365
+ "4", // worker permission: danger_full_access
366
+ "4", // worker reasoning: high
367
+ "2", // approval: edit settings
368
+ "4", // approval AI client: gemini
369
+ "approval-pro-2",
370
+ "4", // approval permission: danger_full_access
371
+ "4", // approval reasoning: high
372
+ "1", // keep current default setting
373
+ "y", // save
374
+ ]),
375
+ },
376
+ });
377
+ const groupedRunnerConfigPath = path.join(tempHome, ".metheus", "bot-runner.json");
378
+ const groupedRunnerConfig = readJSON(fs.readFileSync(groupedRunnerConfigPath, "utf8"));
379
+ push(
380
+ "bot_edit_grouped_server_roles_updates_role_profiles",
381
+ String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker).client || "") === "claude"
382
+ && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker).model || "") === "worker-sonnet-4"
383
+ && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker).permission_mode || "") === "danger_full_access"
384
+ && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker).reasoning_effort || "") === "high"
385
+ && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).approval).client || "") === "gemini"
386
+ && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).approval).model || "") === "approval-pro-2",
387
+ `worker=${JSON.stringify(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker))} approval=${JSON.stringify(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).approval))}`,
388
+ );
344
389
  } finally {
345
390
  await groupedMock.close();
346
391
  await runCLI({
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "metheus-governance-mcp-cli",
3
- "version": "0.2.71",
3
+ "version": "0.2.72",
4
4
  "description": "Metheus Governance MCP CLI (setup + stdio proxy)",
5
5
  "type": "module",
6
6
  "files": [