metheus-governance-mcp-cli 0.2.82 → 0.2.83

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -232,7 +232,15 @@ Behavior:
232
232
  - For Telegram, the CLI tries to match the verified bot identity against the server `me/bots` list first. If the server exposes one logical bot name with multiple roles such as `approval`, `worker`, `review`, and `monitor`, the CLI does not ask you to choose one UUID. It binds by server bot name and keeps the local role/AI fields empty so runtime can use the server bot role for each route.
233
233
  - When the Telegram username matches exactly one server bot role, the CLI still auto-fills the local `role_profile` and blank AI defaults from your local `bot-runner.json` `role_profiles` mapping.
234
234
  - `bot edit` without flags now uses the same sequential flow every time: provider -> bot entry -> username/token review -> grouped server-role review when needed -> AI field choices -> default choice -> save.
235
- - when the CLI asks for `AI model`, it now shows client-specific model choices first and still allows manual entry when you need a custom model name.
235
+ - when the CLI asks for `AI model`, it now shows client-specific display labels first and still allows manual entry when you need a custom model name.
236
+ - display labels are converted to tested execution model IDs at runtime:
237
+ - `GPT-5.4` -> `gpt-5.2`
238
+ - `GPT-5.3-CODEX` -> `gpt-5.2-codex`
239
+ - `GPT-5.3-CODEX-Spark` -> `gpt-5.1-codex-mini`
240
+ - `Sonnet 4.6r` -> `sonnet`
241
+ - `Haiku 4.5` -> `haiku`
242
+ - `Opus 4.6` -> `opus`
243
+ - `Gemini 3.1 Pro` -> `gemini-2.5-pro`
236
244
  - if one server bot name maps to multiple server roles, `bot edit` keeps the Telegram env entry bound to the server identity and lets you review the local `role_profiles` for each detected role instead of forcing one role/profile UUID choice up front.
237
245
  - In the normal Telegram edit path, the CLI keeps or re-resolves the server bot binding automatically. It no longer asks you to pick `approval / worker / review / monitor` or a server bot UUID first.
238
246
  - `bot set-default` without flags starts a guided numbered flow: provider -> bot entry -> confirm default change.
package/cli.mjs CHANGED
@@ -10,6 +10,7 @@ import http from "node:http";
10
10
  import https from "node:https";
11
11
  import {
12
12
  DEFAULT_LOCAL_AI_CLIENT,
13
+ resolveLocalAIExecutionModel,
13
14
  SUPPORTED_LOCAL_AI_CLIENTS,
14
15
  normalizeLocalAIClientName,
15
16
  normalizeLocalAIPermissionMode,
@@ -5207,6 +5208,7 @@ TELEGRAM_BOT_REVIEW_TOKEN=review-token
5207
5208
  await runSelftestBotCommands(push, {
5208
5209
  cliPath: fileURLToPath(import.meta.url),
5209
5210
  parseSimpleEnvText,
5211
+ resolveLocalAIExecutionModel,
5210
5212
  });
5211
5213
 
5212
5214
  const payload = buildSelftestPayload(checks);
@@ -1462,21 +1462,21 @@ function suggestedAIModelsForClient(clientName) {
1462
1462
  const normalizedClient = String(clientName || "").trim().toLowerCase();
1463
1463
  if (normalizedClient === "codex") {
1464
1464
  return [
1465
- { value: "GPT-5.4", label: "GPT-5.4", description: "recommended GPT model" },
1466
- { value: "GPT-5.3-CODEX", label: "GPT-5.3-CODEX", description: "stable GPT codex-style model" },
1467
- { value: "GPT-5.3-CODEX-Spark", label: "GPT-5.3-CODEX-Spark", description: "faster GPT codex-style model" },
1465
+ { value: "GPT-5.4", label: "GPT-5.4", description: "display label; runs as gpt-5.2" },
1466
+ { value: "GPT-5.3-CODEX", label: "GPT-5.3-CODEX", description: "display label; runs as gpt-5.2-codex" },
1467
+ { value: "GPT-5.3-CODEX-Spark", label: "GPT-5.3-CODEX-Spark", description: "display label; runs as gpt-5.1-codex-mini" },
1468
1468
  ];
1469
1469
  }
1470
1470
  if (normalizedClient === "claude") {
1471
1471
  return [
1472
- { value: "Sonnet 4.6r", label: "Sonnet 4.6r", description: "recommended Claude Sonnet model" },
1473
- { value: "Haiku 4.5", label: "Haiku 4.5", description: "faster Claude Haiku model" },
1474
- { value: "Opus 4.6", label: "Opus 4.6", description: "highest-capability Claude model" },
1472
+ { value: "Sonnet 4.6r", label: "Sonnet 4.6r", description: "display label; runs as sonnet" },
1473
+ { value: "Haiku 4.5", label: "Haiku 4.5", description: "display label; runs as haiku" },
1474
+ { value: "Opus 4.6", label: "Opus 4.6", description: "display label; runs as opus" },
1475
1475
  ];
1476
1476
  }
1477
1477
  if (normalizedClient === "gemini") {
1478
1478
  return [
1479
- { value: "Gemini 3.1 Pro", label: "Gemini 3.1 Pro", description: "recommended Gemini model" },
1479
+ { value: "Gemini 3.1 Pro", label: "Gemini 3.1 Pro", description: "display label; runs as gemini-2.5-pro" },
1480
1480
  ];
1481
1481
  }
1482
1482
  if (normalizedClient === "sample") {
@@ -8,6 +8,50 @@ export const DEFAULT_LOCAL_AI_CLIENT = "codex";
8
8
 
9
9
  const SUPPORTED_PERMISSION_MODES = ["read_only", "workspace_write", "danger_full_access"];
10
10
  const SUPPORTED_REASONING_EFFORTS = ["low", "medium", "high"];
11
+ const LOCAL_AI_MODEL_MAPPINGS = {
12
+ codex: [
13
+ {
14
+ display: "GPT-5.4",
15
+ execution: "gpt-5.2",
16
+ aliases: ["gpt-5.4", "gpt 5.4", "gpt-5.2", "gpt 5.2", "gpt-5"],
17
+ },
18
+ {
19
+ display: "GPT-5.3-CODEX",
20
+ execution: "gpt-5.2-codex",
21
+ aliases: ["gpt-5.3-codex", "gpt 5.3 codex", "gpt-5.2-codex", "gpt 5.2 codex", "gpt-5-codex"],
22
+ },
23
+ {
24
+ display: "GPT-5.3-CODEX-Spark",
25
+ execution: "gpt-5.1-codex-mini",
26
+ aliases: ["gpt-5.3-codex-spark", "gpt 5.3 codex spark", "gpt-5.1-codex-mini", "gpt 5.1 codex mini"],
27
+ },
28
+ ],
29
+ claude: [
30
+ {
31
+ display: "Sonnet 4.6r",
32
+ execution: "sonnet",
33
+ aliases: ["sonnet 4.6r", "claude-sonnet-4", "claude-3.7-sonnet", "sonnet"],
34
+ },
35
+ {
36
+ display: "Haiku 4.5",
37
+ execution: "haiku",
38
+ aliases: ["haiku 4.5", "haiku"],
39
+ },
40
+ {
41
+ display: "Opus 4.6",
42
+ execution: "opus",
43
+ aliases: ["opus 4.6", "opus"],
44
+ },
45
+ ],
46
+ gemini: [
47
+ {
48
+ display: "Gemini 3.1 Pro",
49
+ execution: "gemini-2.5-pro",
50
+ aliases: ["gemini 3.1 pro", "gemini-3.1-pro", "gemini-2.5-pro", "gemini 2.5 pro"],
51
+ },
52
+ ],
53
+ sample: [],
54
+ };
11
55
 
12
56
  function tryJsonParse(text) {
13
57
  try {
@@ -25,6 +69,10 @@ function firstNonEmptyString(values) {
25
69
  return "";
26
70
  }
27
71
 
72
+ function ensureArray(value) {
73
+ return Array.isArray(value) ? value : [];
74
+ }
75
+
28
76
  function intFromRawAllowZero(rawValue, fallback = 0) {
29
77
  const parsed = Number.parseInt(String(rawValue ?? "").trim(), 10);
30
78
  if (!Number.isFinite(parsed) || parsed < 0) {
@@ -176,6 +224,30 @@ function normalizeCliOutput(rawText) {
176
224
  };
177
225
  }
178
226
 
227
+ function normalizeModelAliasText(rawValue) {
228
+ return String(rawValue || "")
229
+ .trim()
230
+ .toLowerCase()
231
+ .replace(/[_\s]+/g, "-");
232
+ }
233
+
234
+ function localAIModelMappingsForClient(clientName) {
235
+ const normalizedClient = normalizeLocalAIClientName(clientName, "");
236
+ return Array.isArray(LOCAL_AI_MODEL_MAPPINGS[normalizedClient]) ? LOCAL_AI_MODEL_MAPPINGS[normalizedClient] : [];
237
+ }
238
+
239
+ export function resolveLocalAIExecutionModel(clientName, rawModelValue = "") {
240
+ const modelValue = String(rawModelValue || "").trim();
241
+ if (!modelValue) return "";
242
+ const normalizedModel = normalizeModelAliasText(modelValue);
243
+ const match = localAIModelMappingsForClient(clientName).find((item) => (
244
+ normalizeModelAliasText(item.display) === normalizedModel
245
+ || normalizeModelAliasText(item.execution) === normalizedModel
246
+ || ensureArray(item.aliases).some((alias) => normalizeModelAliasText(alias) === normalizedModel)
247
+ ));
248
+ return match ? String(match.execution || "").trim() : modelValue;
249
+ }
250
+
179
251
  function buildCodexArgs({ workspaceDir, model, permissionMode, reasoningEffort, outputPath }) {
180
252
  const args = ["exec"];
181
253
  if (model) {
@@ -487,6 +559,7 @@ export function runLocalAIClient({
487
559
  const normalizedClient = normalizeLocalAIClientName(client);
488
560
  const normalizedPermissionMode = normalizeLocalAIPermissionMode(permissionMode);
489
561
  const normalizedReasoningEffort = normalizeLocalAIReasoningEffort(reasoningEffort);
562
+ const resolvedExecutionModel = resolveLocalAIExecutionModel(normalizedClient, model);
490
563
  const resolvedWorkspaceDir = ensureWorkspaceDir(workspaceDir);
491
564
  const promptText = buildLocalBotPrompt(inputPayload);
492
565
  if (normalizedClient === "sample") {
@@ -497,6 +570,7 @@ export function runLocalAIClient({
497
570
  ...env,
498
571
  METHEUS_AI_RUNNER_CLIENT: normalizedClient,
499
572
  METHEUS_AI_RUNNER_MODEL: String(model || "").trim(),
573
+ METHEUS_AI_RUNNER_EXECUTION_MODEL: resolvedExecutionModel,
500
574
  METHEUS_AI_RUNNER_PERMISSION_MODE: normalizedPermissionMode,
501
575
  METHEUS_AI_RUNNER_REASONING_EFFORT: normalizedReasoningEffort,
502
576
  METHEUS_RUNNER_WORKSPACE_DIR: resolvedWorkspaceDir,
@@ -505,7 +579,7 @@ export function runLocalAIClient({
505
579
  return runCodexAdapter({
506
580
  promptText,
507
581
  workspaceDir: resolvedWorkspaceDir,
508
- model: String(model || "").trim(),
582
+ model: resolvedExecutionModel,
509
583
  permissionMode: normalizedPermissionMode,
510
584
  reasoningEffort: normalizedReasoningEffort,
511
585
  env: nextEnv,
@@ -515,7 +589,7 @@ export function runLocalAIClient({
515
589
  return runClaudeAdapter({
516
590
  promptText,
517
591
  workspaceDir: resolvedWorkspaceDir,
518
- model: String(model || "").trim(),
592
+ model: resolvedExecutionModel,
519
593
  permissionMode: normalizedPermissionMode,
520
594
  reasoningEffort: normalizedReasoningEffort,
521
595
  env: nextEnv,
@@ -525,7 +599,7 @@ export function runLocalAIClient({
525
599
  return runGeminiAdapter({
526
600
  promptText,
527
601
  workspaceDir: resolvedWorkspaceDir,
528
- model: String(model || "").trim(),
602
+ model: resolvedExecutionModel,
529
603
  permissionMode: normalizedPermissionMode,
530
604
  env: nextEnv,
531
605
  });
@@ -228,9 +228,28 @@ function runCLI({ cliPath, args, env, input = "" }) {
228
228
  export async function runSelftestBotCommands(push, deps) {
229
229
  const cliPath = String(requireDependency(deps, "cliPath") || "").trim();
230
230
  const parseSimpleEnvText = requireDependency(deps, "parseSimpleEnvText");
231
+ const resolveLocalAIExecutionModel = requireDependency(deps, "resolveLocalAIExecutionModel");
231
232
  let tempHome = "";
232
233
  let mock = null;
233
234
  try {
235
+ push(
236
+ "display_model_labels_map_to_tested_execution_ids",
237
+ resolveLocalAIExecutionModel("codex", "GPT-5.4") === "gpt-5.2"
238
+ && resolveLocalAIExecutionModel("codex", "GPT-5.3-CODEX") === "gpt-5.2-codex"
239
+ && resolveLocalAIExecutionModel("codex", "GPT-5.3-CODEX-Spark") === "gpt-5.1-codex-mini"
240
+ && resolveLocalAIExecutionModel("claude", "Sonnet 4.6r") === "sonnet"
241
+ && resolveLocalAIExecutionModel("claude", "Haiku 4.5") === "haiku"
242
+ && resolveLocalAIExecutionModel("claude", "Opus 4.6") === "opus"
243
+ && resolveLocalAIExecutionModel("gemini", "Gemini 3.1 Pro") === "gemini-2.5-pro",
244
+ [
245
+ `gpt54=${resolveLocalAIExecutionModel("codex", "GPT-5.4")}`,
246
+ `gpt53codex=${resolveLocalAIExecutionModel("codex", "GPT-5.3-CODEX")}`,
247
+ `spark=${resolveLocalAIExecutionModel("codex", "GPT-5.3-CODEX-Spark")}`,
248
+ `claude=${resolveLocalAIExecutionModel("claude", "Sonnet 4.6r")}`,
249
+ `gemini=${resolveLocalAIExecutionModel("gemini", "Gemini 3.1 Pro")}`,
250
+ ].join(" "),
251
+ );
252
+
234
253
  tempHome = fs.mkdtempSync(path.join(os.tmpdir(), "metheus-bot-selftest-"));
235
254
  mock = await createMockServer().listen();
236
255
  const env = buildSpawnEnv(tempHome);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "metheus-governance-mcp-cli",
3
- "version": "0.2.82",
3
+ "version": "0.2.83",
4
4
  "description": "Metheus Governance MCP CLI (setup + stdio proxy)",
5
5
  "type": "module",
6
6
  "files": [