metheus-governance-mcp-cli 0.2.79 → 0.2.80

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -226,12 +226,13 @@ metheus-governance-mcp-cli bot verify --provider telegram --bot-key main
226
226
  Behavior:
227
227
 
228
228
  - `bot setup` asks for `Telegram / Slack / KakaoTalk` first, then prompts with numbered actions.
229
- - `bot add` without flags now uses the shortest practical guided flow: provider -> token -> verify -> optional save-anyway only when verify fails -> optional username fallback only when verify cannot discover it -> optional default bot.
229
+ - `bot add` without flags now uses the shortest practical guided flow: provider -> token -> verify -> optional save-anyway only when verify fails -> optional username fallback only when verify cannot discover it -> optional AI model selection when the resolved defaults leave it blank -> optional default bot.
230
230
  - In the normal Telegram path, `bot add` does not ask for a local bot key, a server bot UUID, or an approval / worker / review / monitor choice.
231
231
  - For Telegram, the local env key is auto-generated from the matched server bot name or verified username, so you do not have to invent a separate local nickname first.
232
232
  - For Telegram, the CLI tries to match the verified bot identity against the server `me/bots` list first. If the server exposes one logical bot name with multiple roles such as `approval`, `worker`, `review`, and `monitor`, the CLI does not ask you to choose one UUID. It binds by server bot name and keeps the local role/AI fields empty so runtime can use the server bot role for each route.
233
233
  - When the Telegram username matches exactly one server bot role, the CLI still auto-fills the local `role_profile` and blank AI defaults from your local `bot-runner.json` `role_profiles` mapping.
234
234
  - `bot edit` without flags now uses the same sequential flow every time: provider -> bot entry -> username/token review -> grouped server-role review when needed -> AI field choices -> default choice -> save.
235
+ - when the CLI asks for `AI model`, it now shows client-specific model choices first and still allows manual entry when you need a custom model name.
235
236
  - if one server bot name maps to multiple server roles, `bot edit` keeps the Telegram env entry bound to the server identity and lets you review the local `role_profiles` for each detected role instead of forcing one role/profile UUID choice up front.
236
237
  - In the normal Telegram edit path, the CLI keeps or re-resolves the server bot binding automatically. It no longer asks you to pick `approval / worker / review / monitor` or a server bot UUID first.
237
238
  - `bot set-default` without flags starts a guided numbered flow: provider -> bot entry -> confirm default change.
@@ -832,7 +832,7 @@ async function editTelegramBotGuided(ui, parsed, selected, current, flags, deps)
832
832
  defaultValue: current.model ? "keep" : "change",
833
833
  });
834
834
  if (modelAction === "change") {
835
- current.model = await promptLine(ui, "AI model", current.model);
835
+ current.model = await promptAIModel(ui, deps, current.client, current.model);
836
836
  } else if (modelAction === "clear") {
837
837
  current.model = "";
838
838
  }
@@ -1449,6 +1449,60 @@ async function promptAIClient(ui, deps, defaultValue = "") {
1449
1449
  return String(selected?.value || "").trim();
1450
1450
  }
1451
1451
 
1452
+ function suggestedAIModelsForClient(clientName) {
1453
+ const normalizedClient = String(clientName || "").trim().toLowerCase();
1454
+ if (normalizedClient === "codex") {
1455
+ return [
1456
+ { value: "gpt-5-codex", label: "gpt-5-codex", description: "recommended Codex model" },
1457
+ ];
1458
+ }
1459
+ if (normalizedClient === "claude") {
1460
+ return [
1461
+ { value: "claude-sonnet-4", label: "claude-sonnet-4", description: "recommended Claude model" },
1462
+ { value: "claude-3.7-sonnet", label: "claude-3.7-sonnet", description: "older Claude Sonnet variant" },
1463
+ ];
1464
+ }
1465
+ if (normalizedClient === "gemini") {
1466
+ return [
1467
+ { value: "gemini-2.5-pro", label: "gemini-2.5-pro", description: "recommended Gemini model" },
1468
+ ];
1469
+ }
1470
+ if (normalizedClient === "sample") {
1471
+ return [
1472
+ { value: "sample", label: "sample", description: "sample adapter model placeholder" },
1473
+ ];
1474
+ }
1475
+ return [];
1476
+ }
1477
+
1478
+ async function promptAIModel(ui, deps, clientName, defaultValue = "", title = "Select AI model") {
1479
+ const normalizedClient = requireDependency(deps, "normalizeLocalAIClientName")(clientName || "", "");
1480
+ const currentValue = String(defaultValue || "").trim();
1481
+ const suggestions = suggestedAIModelsForClient(normalizedClient);
1482
+ const options = [
1483
+ { value: "", label: "(blank)", description: "leave AI model empty" },
1484
+ ];
1485
+ if (currentValue && !suggestions.some((item) => item.value === currentValue)) {
1486
+ options.push({
1487
+ value: currentValue,
1488
+ label: currentValue,
1489
+ description: "current saved model",
1490
+ });
1491
+ }
1492
+ options.push(...suggestions);
1493
+ options.push({
1494
+ value: "__manual__",
1495
+ label: "Manual entry",
1496
+ description: "type the model name yourself",
1497
+ });
1498
+ const selectedIndex = Math.max(0, options.findIndex((item) => item.value === currentValue));
1499
+ const selected = await promptChoice(ui, title, options, { defaultIndex: selectedIndex >= 0 ? selectedIndex : 0 });
1500
+ if (String(selected?.value || "") === "__manual__") {
1501
+ return String(await promptRequiredLine(ui, "AI model", currentValue)).trim();
1502
+ }
1503
+ return String(selected?.value || "").trim();
1504
+ }
1505
+
1452
1506
  async function promptPermissionMode(ui, defaultValue = "") {
1453
1507
  const options = [
1454
1508
  { value: "", label: "(blank)", description: "leave permission mode empty" },
@@ -1632,7 +1686,7 @@ async function promptRoleExecutionProfile(ui, roleName, deps) {
1632
1686
  await promptAIClient(ui, deps, current.client),
1633
1687
  "",
1634
1688
  );
1635
- current.model = await promptLine(ui, `AI model for role "${current.role}"`, current.model);
1689
+ current.model = await promptAIModel(ui, deps, current.client, current.model, `Select AI model for role "${current.role}"`);
1636
1690
  current.permissionMode = requireDependency(deps, "normalizeLocalAIPermissionMode")(
1637
1691
  await promptPermissionMode(ui, current.permissionMode),
1638
1692
  "",
@@ -2086,8 +2140,8 @@ async function addTelegramBot(ui, flags, deps) {
2086
2140
  : (autoUseServerRoleRouting
2087
2141
  ? getAIModelFlag(flags)
2088
2142
  : (autoApplyRoleDefaults
2089
- ? firstNonEmptyString([getAIModelFlag(flags), roleProfileDefaults.model])
2090
- : await promptLine(ui, "AI model", firstNonEmptyString([getAIModelFlag(flags), roleProfileDefaults.model]))));
2143
+ ? await promptAIModel(ui, deps, client, firstNonEmptyString([getAIModelFlag(flags), roleProfileDefaults.model]))
2144
+ : await promptAIModel(ui, deps, client, firstNonEmptyString([getAIModelFlag(flags), roleProfileDefaults.model]))));
2091
2145
  const permissionMode = requireDependency(deps, "normalizeLocalAIPermissionMode")(
2092
2146
  nonInteractive
2093
2147
  ? firstNonEmptyString([getAIPermissionModeFlag(flags), roleProfileDefaults.permissionMode])
@@ -2109,6 +2163,17 @@ async function addTelegramBot(ui, flags, deps) {
2109
2163
  "",
2110
2164
  );
2111
2165
 
2166
+ if (serverGroupMatched) {
2167
+ await maybePromptGroupedServerRoleProfiles(
2168
+ ui,
2169
+ {
2170
+ name: String(serverBot.name || username || botKey).trim(),
2171
+ roles: preferredRoleSort(serverBot.roles),
2172
+ },
2173
+ deps,
2174
+ );
2175
+ }
2176
+
2112
2177
  const nextParsed = upsertTelegramEntry(parsed, {
2113
2178
  key: botKey,
2114
2179
  serverBotID: String(getServerBotIDFlag(flags) || serverBot.botID || "").trim(),
@@ -364,14 +364,14 @@ export async function runSelftestBotCommands(push, deps) {
364
364
  "3", // select role to edit: worker
365
365
  "2", // worker: edit settings
366
366
  "3", // worker AI client: claude
367
- "worker-sonnet-4",
367
+ "2", // worker AI model: claude-sonnet-4
368
368
  "4", // worker permission: danger_full_access
369
369
  "4", // worker reasoning: high
370
370
  "y", // edit another role
371
371
  "3", // select role to edit: approval
372
372
  "2", // approval: edit settings
373
373
  "4", // approval AI client: gemini
374
- "approval-pro-2",
374
+ "2", // approval AI model: gemini-2.5-pro
375
375
  "4", // approval permission: danger_full_access
376
376
  "4", // approval reasoning: high
377
377
  "n", // stop editing roles
@@ -390,11 +390,11 @@ export async function runSelftestBotCommands(push, deps) {
390
390
  push(
391
391
  "bot_edit_grouped_server_roles_updates_role_profiles",
392
392
  String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker).client || "") === "claude"
393
- && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker).model || "") === "worker-sonnet-4"
393
+ && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker).model || "") === "claude-sonnet-4"
394
394
  && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker).permission_mode || "") === "danger_full_access"
395
395
  && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker).reasoning_effort || "") === "high"
396
396
  && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).approval).client || "") === "gemini"
397
- && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).approval).model || "") === "approval-pro-2",
397
+ && String(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).approval).model || "") === "gemini-2.5-pro",
398
398
  `worker=${JSON.stringify(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).worker))} approval=${JSON.stringify(safeObject(safeObject(groupedRunnerConfig.role_profiles || {}).approval))}`,
399
399
  );
400
400
 
@@ -503,7 +503,7 @@ export async function runSelftestBotCommands(push, deps) {
503
503
  "2", // change AI client
504
504
  "4", // gemini
505
505
  "2", // change AI model
506
- "guided-gemini-pro",
506
+ "2", // gemini model: gemini-2.5-pro
507
507
  "2", // change permission mode
508
508
  "3", // workspace_write
509
509
  "2", // change reasoning effort
@@ -522,7 +522,7 @@ export async function runSelftestBotCommands(push, deps) {
522
522
  push(
523
523
  "bot_edit_guided_prompts_update_ai_binding_fields",
524
524
  String(guidedState.TELEGRAM_BOT_MONITORSELFTESTBOT_AI_CLIENT || "") === "gemini"
525
- && String(guidedState.TELEGRAM_BOT_MONITORSELFTESTBOT_AI_MODEL || "") === "guided-gemini-pro"
525
+ && String(guidedState.TELEGRAM_BOT_MONITORSELFTESTBOT_AI_MODEL || "") === "gemini-2.5-pro"
526
526
  && String(guidedState.TELEGRAM_BOT_MONITORSELFTESTBOT_AI_PERMISSION_MODE || "") === "workspace_write"
527
527
  && String(guidedState.TELEGRAM_BOT_MONITORSELFTESTBOT_AI_REASONING_EFFORT || "") === "medium",
528
528
  `client=${String(guidedState.TELEGRAM_BOT_MONITORSELFTESTBOT_AI_CLIENT || "")} model=${String(guidedState.TELEGRAM_BOT_MONITORSELFTESTBOT_AI_MODEL || "")}`,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "metheus-governance-mcp-cli",
3
- "version": "0.2.79",
3
+ "version": "0.2.80",
4
4
  "description": "Metheus Governance MCP CLI (setup + stdio proxy)",
5
5
  "type": "module",
6
6
  "files": [