@rely-ai/caliber 1.42.0 → 1.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/bin.js +200 -28
  2. package/package.json +1 -1
package/dist/bin.js CHANGED
@@ -64,6 +64,14 @@ function resolveFromEnv() {
64
64
  baseUrl: process.env.OPENAI_BASE_URL
65
65
  };
66
66
  }
67
+ if (process.env.MINIMAX_API_KEY) {
68
+ return {
69
+ provider: "minimax",
70
+ apiKey: process.env.MINIMAX_API_KEY,
71
+ model: process.env.CALIBER_MODEL || DEFAULT_MODELS.minimax,
72
+ baseUrl: process.env.MINIMAX_BASE_URL
73
+ };
74
+ }
67
75
  if (process.env.CALIBER_USE_CURSOR_SEAT === "1" || process.env.CALIBER_USE_CURSOR_SEAT === "true") {
68
76
  return {
69
77
  provider: "cursor",
@@ -83,7 +91,9 @@ function readConfigFile() {
83
91
  if (!fs4.existsSync(CONFIG_FILE)) return null;
84
92
  const raw = fs4.readFileSync(CONFIG_FILE, "utf-8");
85
93
  const parsed = JSON.parse(raw);
86
- if (!parsed.provider || !["anthropic", "vertex", "openai", "cursor", "claude-cli"].includes(parsed.provider)) {
94
+ if (!parsed.provider || !["anthropic", "vertex", "openai", "minimax", "cursor", "claude-cli"].includes(
95
+ parsed.provider
96
+ )) {
87
97
  return null;
88
98
  }
89
99
  return parsed;
@@ -131,6 +141,7 @@ var init_config = __esm({
131
141
  anthropic: "claude-sonnet-4-6",
132
142
  vertex: "claude-sonnet-4-6",
133
143
  openai: "gpt-5.4-mini",
144
+ minimax: "MiniMax-M2.7",
134
145
  cursor: "sonnet-4.6",
135
146
  "claude-cli": "default"
136
147
  };
@@ -142,7 +153,9 @@ var init_config = __esm({
142
153
  "gpt-5.4-mini": 1e6,
143
154
  "gpt-4o": 128e3,
144
155
  "gpt-4o-mini": 128e3,
145
- "sonnet-4.6": 2e5
156
+ "sonnet-4.6": 2e5,
157
+ "MiniMax-M2.7": 1e6,
158
+ "MiniMax-M2.7-highspeed": 1e6
146
159
  };
147
160
  DEFAULT_CONTEXT_WINDOW = 2e5;
148
161
  INPUT_BUDGET_FRACTION = 0.6;
@@ -152,6 +165,7 @@ var init_config = __esm({
152
165
  anthropic: "claude-haiku-4-5-20251001",
153
166
  vertex: "claude-haiku-4-5-20251001",
154
167
  openai: "gpt-5.4-mini",
168
+ minimax: "MiniMax-M2.7-highspeed",
155
169
  cursor: "gpt-5.3-codex-fast"
156
170
  };
157
171
  }
@@ -262,6 +276,7 @@ var pre_commit_block_exports = {};
262
276
  __export(pre_commit_block_exports, {
263
277
  appendLearningsBlock: () => appendLearningsBlock,
264
278
  appendManagedBlocks: () => appendManagedBlocks,
279
+ appendModelBlock: () => appendModelBlock,
265
280
  appendPreCommitBlock: () => appendPreCommitBlock,
266
281
  appendSyncBlock: () => appendSyncBlock,
267
282
  getCursorLearningsRule: () => getCursorLearningsRule,
@@ -269,6 +284,7 @@ __export(pre_commit_block_exports, {
269
284
  getCursorSetupRule: () => getCursorSetupRule,
270
285
  getCursorSyncRule: () => getCursorSyncRule,
271
286
  hasLearningsBlock: () => hasLearningsBlock,
287
+ hasModelBlock: () => hasModelBlock,
272
288
  hasPreCommitBlock: () => hasPreCommitBlock,
273
289
  hasSyncBlock: () => hasSyncBlock,
274
290
  stripManagedBlocks: () => stripManagedBlocks
@@ -345,6 +361,25 @@ function appendLearningsBlock(content) {
345
361
  function getCursorLearningsRule() {
346
362
  return { filename: CURSOR_LEARNINGS_FILENAME, content: CURSOR_LEARNINGS_CONTENT };
347
363
  }
364
+ function buildManagedModelBlock() {
365
+ const m = DEFAULT_MODELS.anthropic;
366
+ return `${MODEL_BLOCK_START}
367
+ ## Model Configuration
368
+
369
+ Recommended default: \`${m}\` with high effort (stronger reasoning; higher cost and latency than smaller models).
370
+ Smaller/faster models trade quality for speed and cost \u2014 pick what fits the task.
371
+ Pin your choice (\`/model\` in Claude Code, or \`CALIBER_MODEL\` when using Caliber with an API provider) so upstream default changes do not silently change behavior.
372
+
373
+ ${MODEL_BLOCK_END}`;
374
+ }
375
+ function hasModelBlock(content) {
376
+ return content.includes(MODEL_BLOCK_START);
377
+ }
378
+ function appendModelBlock(content) {
379
+ if (hasModelBlock(content)) return content;
380
+ const trimmed = content.trimEnd();
381
+ return trimmed + "\n\n" + buildManagedModelBlock() + "\n";
382
+ }
348
383
  function getSyncSetupInstruction(platform) {
349
384
  switch (platform) {
350
385
  case "claude":
@@ -379,7 +414,10 @@ function appendSyncBlock(content, platform = "claude") {
379
414
  return trimmed + "\n\n" + getSyncBlock(platform) + "\n";
380
415
  }
381
416
  function appendManagedBlocks(content, platform = "claude") {
382
- return appendSyncBlock(appendLearningsBlock(appendPreCommitBlock(content, platform)), platform);
417
+ return appendSyncBlock(
418
+ appendModelBlock(appendLearningsBlock(appendPreCommitBlock(content, platform))),
419
+ platform
420
+ );
383
421
  }
384
422
  function getCursorSyncContent() {
385
423
  const bin = resolveCaliber();
@@ -407,7 +445,7 @@ grep -q "caliber" .git/hooks/pre-commit 2>/dev/null && echo "SYNCED" || echo "NO
407
445
  \`\`\`
408
446
 
409
447
  If SYNCED: proceed normally.
410
- If NOT_SYNCED: Tell the user: "This project uses Caliber to keep agent configs in sync. It's not set up on your machine yet \u2014 want me to set it up? (~30 seconds)". If they agree, read \`.cursor/skills/setup-caliber/SKILL.md\` and follow its instructions.
448
+ If NOT_SYNCED: Tell the user: "This project uses Caliber to keep agent configs in sync \u2014 it is not set up yet. Want me to run /setup-caliber? (~30 seconds)". If they agree, read \`.cursor/skills/setup-caliber/SKILL.md\` and follow its instructions.
411
449
  `;
412
450
  }
413
451
  function getCursorSetupRule() {
@@ -424,11 +462,12 @@ function stripManagedBlocks(content) {
424
462
  }
425
463
  return result.replace(/\n{3,}/g, "\n\n").trim() + "\n";
426
464
  }
427
- var BLOCK_START, BLOCK_END, MANAGED_DOC_PATHS, CURSOR_RULE_FILENAME, LEARNINGS_BLOCK_START, LEARNINGS_BLOCK_END, LEARNINGS_BLOCK, CURSOR_LEARNINGS_FILENAME, CURSOR_LEARNINGS_CONTENT, SYNC_BLOCK_START, SYNC_BLOCK_END, CURSOR_SYNC_FILENAME, CURSOR_SETUP_FILENAME, MANAGED_BLOCK_PAIRS;
465
+ var BLOCK_START, BLOCK_END, MANAGED_DOC_PATHS, CURSOR_RULE_FILENAME, LEARNINGS_BLOCK_START, LEARNINGS_BLOCK_END, LEARNINGS_BLOCK, CURSOR_LEARNINGS_FILENAME, CURSOR_LEARNINGS_CONTENT, MODEL_BLOCK_START, MODEL_BLOCK_END, SYNC_BLOCK_START, SYNC_BLOCK_END, CURSOR_SYNC_FILENAME, CURSOR_SETUP_FILENAME, MANAGED_BLOCK_PAIRS;
428
466
  var init_pre_commit_block = __esm({
429
467
  "src/writers/pre-commit-block.ts"() {
430
468
  "use strict";
431
469
  init_resolve_caliber();
470
+ init_config();
432
471
  BLOCK_START = "<!-- caliber:managed:pre-commit -->";
433
472
  BLOCK_END = "<!-- /caliber:managed:pre-commit -->";
434
473
  MANAGED_DOC_PATHS = "CLAUDE.md .claude/ .cursor/ .cursorrules .github/copilot-instructions.md .github/instructions/ AGENTS.md CALIBER_LEARNINGS.md .agents/ .opencode/";
@@ -449,6 +488,8 @@ alwaysApply: true
449
488
  Read \`CALIBER_LEARNINGS.md\` for patterns and anti-patterns learned from previous sessions.
450
489
  These are auto-extracted from real tool usage \u2014 treat them as project-specific rules.
451
490
  `;
491
+ MODEL_BLOCK_START = "<!-- caliber:managed:model-config -->";
492
+ MODEL_BLOCK_END = "<!-- /caliber:managed:model-config -->";
452
493
  SYNC_BLOCK_START = "<!-- caliber:managed:sync -->";
453
494
  SYNC_BLOCK_END = "<!-- /caliber:managed:sync -->";
454
495
  CURSOR_SYNC_FILENAME = "caliber-sync.mdc";
@@ -456,6 +497,7 @@ These are auto-extracted from real tool usage \u2014 treat them as project-speci
456
497
  MANAGED_BLOCK_PAIRS = [
457
498
  [BLOCK_START, BLOCK_END],
458
499
  [LEARNINGS_BLOCK_START, LEARNINGS_BLOCK_END],
500
+ [MODEL_BLOCK_START, MODEL_BLOCK_END],
459
501
  [SYNC_BLOCK_START, SYNC_BLOCK_END]
460
502
  ];
461
503
  }
@@ -2317,17 +2359,20 @@ import OpenAI from "openai";
2317
2359
  var OpenAICompatProvider = class {
2318
2360
  client;
2319
2361
  defaultModel;
2320
- constructor(config) {
2362
+ temperature;
2363
+ constructor(config, options) {
2321
2364
  this.client = new OpenAI({
2322
2365
  apiKey: config.apiKey,
2323
2366
  ...config.baseUrl && { baseURL: config.baseUrl }
2324
2367
  });
2325
2368
  this.defaultModel = config.model;
2369
+ this.temperature = options?.temperature;
2326
2370
  }
2327
2371
  async call(options) {
2328
2372
  const response = await this.client.chat.completions.create({
2329
2373
  model: options.model || this.defaultModel,
2330
2374
  max_tokens: options.maxTokens || 4096,
2375
+ ...this.temperature !== void 0 && { temperature: this.temperature },
2331
2376
  messages: [
2332
2377
  { role: "system", content: options.system },
2333
2378
  { role: "user", content: options.prompt }
@@ -2362,6 +2407,7 @@ var OpenAICompatProvider = class {
2362
2407
  const stream = await this.client.chat.completions.create({
2363
2408
  model: options.model || this.defaultModel,
2364
2409
  max_tokens: options.maxTokens || 10240,
2410
+ ...this.temperature !== void 0 && { temperature: this.temperature },
2365
2411
  messages,
2366
2412
  stream: true
2367
2413
  });
@@ -2390,6 +2436,26 @@ var OpenAICompatProvider = class {
2390
2436
  }
2391
2437
  };
2392
2438
 
2439
+ // src/llm/minimax.ts
2440
+ var MINIMAX_DEFAULT_BASE_URL = "https://api.minimax.io/v1";
2441
+ var MiniMaxProvider = class extends OpenAICompatProvider {
2442
+ constructor(config) {
2443
+ super(
2444
+ {
2445
+ ...config,
2446
+ apiKey: config.apiKey ?? process.env.MINIMAX_API_KEY,
2447
+ baseUrl: config.baseUrl ?? process.env.MINIMAX_BASE_URL ?? MINIMAX_DEFAULT_BASE_URL
2448
+ },
2449
+ // MiniMax requires temperature in (0.0, 1.0] — 1.0 is the only safe default.
2450
+ { temperature: 1 }
2451
+ );
2452
+ }
2453
+ // MiniMax API doesn't support model listing; return known models statically.
2454
+ async listModels() {
2455
+ return ["MiniMax-M2.7", "MiniMax-M2.7-highspeed"];
2456
+ }
2457
+ };
2458
+
2393
2459
  // src/llm/cursor-acp.ts
2394
2460
  import { spawn, execSync as execSync5, execFileSync } from "child_process";
2395
2461
  import os3 from "os";
@@ -3026,6 +3092,7 @@ var KNOWN_MODELS = {
3026
3092
  "claude-opus-4-1-20250620"
3027
3093
  ],
3028
3094
  openai: ["gpt-5.4-mini", "gpt-4o", "gpt-4o-mini", "o3-mini"],
3095
+ minimax: ["MiniMax-M2.7", "MiniMax-M2.7-highspeed"],
3029
3096
  cursor: ["auto", "composer-1.5"],
3030
3097
  "claude-cli": []
3031
3098
  };
@@ -3129,6 +3196,8 @@ function createProvider(config) {
3129
3196
  return new VertexProvider(config);
3130
3197
  case "openai":
3131
3198
  return new OpenAICompatProvider(config);
3199
+ case "minimax":
3200
+ return new MiniMaxProvider(config);
3132
3201
  case "cursor": {
3133
3202
  if (!isCursorAgentAvailable()) {
3134
3203
  throw new Error(
@@ -3164,7 +3233,7 @@ function getProvider() {
3164
3233
  const config = loadConfig();
3165
3234
  if (!config) {
3166
3235
  throw new Error(
3167
- `No LLM provider configured. Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or VERTEX_PROJECT_ID; or run \`${resolveCaliber()} config\` and choose Cursor or Claude Code; or set CALIBER_USE_CURSOR_SEAT=1 / CALIBER_USE_CLAUDE_CLI=1.`
3236
+ `No LLM provider configured. Set ANTHROPIC_API_KEY, OPENAI_API_KEY, MINIMAX_API_KEY, or VERTEX_PROJECT_ID; or run \`${resolveCaliber()} config\` and choose Cursor or Claude Code; or set CALIBER_USE_CURSOR_SEAT=1 / CALIBER_USE_CLAUDE_CLI=1.`
3168
3237
  );
3169
3238
  }
3170
3239
  cachedConfig = config;
@@ -3175,7 +3244,13 @@ function resetProvider() {
3175
3244
  cachedProvider = null;
3176
3245
  cachedConfig = null;
3177
3246
  }
3178
- var TRANSIENT_ERRORS = ["terminated", "ECONNRESET", "ETIMEDOUT", "socket hang up", "other side closed"];
3247
+ var TRANSIENT_ERRORS = [
3248
+ "terminated",
3249
+ "ECONNRESET",
3250
+ "ETIMEDOUT",
3251
+ "socket hang up",
3252
+ "other side closed"
3253
+ ];
3179
3254
  var MAX_RETRIES = 3;
3180
3255
  function isTransientError(error) {
3181
3256
  const msg = error.message.toLowerCase();
@@ -5504,6 +5579,13 @@ var LIMITS = {
5504
5579
  SKILL_CHARS: 3e3,
5505
5580
  RULES_MAX: 10
5506
5581
  };
5582
+ var BUILD_GENERATE_PROMPT_MAX_TOKENS = 12e4;
5583
+ var PROJECT_FILES_HEADER_RESERVE_TOKENS = 160;
5584
+ function maxCharsForCodeFileContent(runningJoinedLen, pathLine, budgetTokens) {
5585
+ const maxTotalChars = budgetTokens * 4;
5586
+ const overhead = runningJoinedLen + 1 + pathLine.length + 1;
5587
+ return Math.max(0, maxTotalChars - overhead);
5588
+ }
5507
5589
  function truncate(text, maxChars) {
5508
5590
  if (text.length <= maxChars) return text;
5509
5591
  return text.slice(0, maxChars) + `
@@ -5709,9 +5791,12 @@ User instructions: ${prompt}`);
5709
5791
  if (fingerprint.codeAnalysis) {
5710
5792
  const ca = fingerprint.codeAnalysis;
5711
5793
  const basePrompt = parts.join("\n");
5712
- const maxPromptTokens = getMaxPromptTokens();
5794
+ const effectiveMaxTokens = Math.min(getMaxPromptTokens(), BUILD_GENERATE_PROMPT_MAX_TOKENS);
5713
5795
  const baseTokens = estimateTokens(basePrompt);
5714
- const tokenBudgetForCode = Math.max(0, maxPromptTokens - baseTokens);
5796
+ const tokenBudgetForCode = Math.max(
5797
+ 0,
5798
+ effectiveMaxTokens - baseTokens - PROJECT_FILES_HEADER_RESERVE_TOKENS
5799
+ );
5715
5800
  const codeLines = [];
5716
5801
  let codeChars = 0;
5717
5802
  const introLine = "Study these files to extract patterns for skills. Use the exact code patterns you see here.\n";
@@ -5720,13 +5805,24 @@ User instructions: ${prompt}`);
5720
5805
  const sortedFiles = [...ca.files].sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));
5721
5806
  let includedFiles = 0;
5722
5807
  for (const f of sortedFiles) {
5723
- const entry = `[${f.path}]
5724
- ${f.content}
5808
+ const pathLine = `[${f.path}]
5809
+ `;
5810
+ const maxContent = maxCharsForCodeFileContent(runningCodeLen, pathLine, tokenBudgetForCode);
5811
+ if (maxContent < 1) {
5812
+ if (includedFiles > 0) break;
5813
+ continue;
5814
+ }
5815
+ const content = f.content.slice(0, Math.min(f.content.length, maxContent));
5816
+ const entry = `${pathLine}${content}
5725
5817
  `;
5726
5818
  const projectedLen = runningCodeLen + 1 + entry.length;
5727
- if (Math.ceil(projectedLen / 4) > tokenBudgetForCode && includedFiles > 0) break;
5819
+ const projectedTokens = Math.ceil(projectedLen / 4);
5820
+ if (projectedTokens > tokenBudgetForCode) {
5821
+ if (includedFiles > 0) break;
5822
+ continue;
5823
+ }
5728
5824
  codeLines.push(entry);
5729
- codeChars += f.content.length;
5825
+ codeChars += content.length;
5730
5826
  runningCodeLen = projectedLen;
5731
5827
  includedFiles++;
5732
5828
  }
@@ -5764,7 +5860,7 @@ function writeClaudeConfig(config) {
5764
5860
  const written = [];
5765
5861
  fs13.writeFileSync(
5766
5862
  "CLAUDE.md",
5767
- appendSyncBlock(appendLearningsBlock(appendPreCommitBlock(config.claudeMd)))
5863
+ appendManagedBlocks(config.claudeMd)
5768
5864
  );
5769
5865
  written.push("CLAUDE.md");
5770
5866
  if (config.rules?.length) {
@@ -5874,7 +5970,7 @@ function writeCodexConfig(config) {
5874
5970
  const written = [];
5875
5971
  fs15.writeFileSync(
5876
5972
  "AGENTS.md",
5877
- appendLearningsBlock(appendPreCommitBlock(config.agentsMd, "codex"))
5973
+ appendManagedBlocks(config.agentsMd, "codex")
5878
5974
  );
5879
5975
  written.push("AGENTS.md");
5880
5976
  if (config.skills?.length) {
@@ -5906,7 +6002,7 @@ function writeGithubCopilotConfig(config) {
5906
6002
  fs16.mkdirSync(".github", { recursive: true });
5907
6003
  fs16.writeFileSync(
5908
6004
  path15.join(".github", "copilot-instructions.md"),
5909
- appendSyncBlock(appendLearningsBlock(appendPreCommitBlock(config.instructions, "copilot")))
6005
+ appendManagedBlocks(config.instructions, "copilot")
5910
6006
  );
5911
6007
  written.push(".github/copilot-instructions.md");
5912
6008
  }
@@ -6529,7 +6625,8 @@ var PROVIDER_CHOICES = [
6529
6625
  { name: "Cursor \u2014 use your existing subscription (no API key)", value: "cursor" },
6530
6626
  { name: "Anthropic \u2014 API key from console.anthropic.com", value: "anthropic" },
6531
6627
  { name: "Google Vertex AI \u2014 Claude models via GCP", value: "vertex" },
6532
- { name: "OpenAI \u2014 or any OpenAI-compatible endpoint", value: "openai" }
6628
+ { name: "OpenAI \u2014 or any OpenAI-compatible endpoint", value: "openai" },
6629
+ { name: "MiniMax \u2014 API key from platform.minimax.io", value: "minimax" }
6533
6630
  ];
6534
6631
  async function runInteractiveProviderSetup(options) {
6535
6632
  const message = options?.selectMessage ?? "Select LLM provider";
@@ -6543,17 +6640,27 @@ async function runInteractiveProviderSetup(options) {
6543
6640
  config.model = "default";
6544
6641
  if (!isClaudeCliAvailable()) {
6545
6642
  console.log(chalk3.yellow("\n Claude Code CLI not found."));
6546
- console.log(chalk3.dim(" Install it: ") + chalk3.hex("#83D1EB")("npm install -g @anthropic-ai/claude-code"));
6547
- console.log(chalk3.dim(" Then run ") + chalk3.hex("#83D1EB")("claude") + chalk3.dim(" once to log in.\n"));
6643
+ console.log(
6644
+ chalk3.dim(" Install it: ") + chalk3.hex("#83D1EB")("npm install -g @anthropic-ai/claude-code")
6645
+ );
6646
+ console.log(
6647
+ chalk3.dim(" Then run ") + chalk3.hex("#83D1EB")("claude") + chalk3.dim(" once to log in.\n")
6648
+ );
6548
6649
  const proceed = await confirm({ message: "Continue anyway?" });
6549
6650
  if (!proceed) throw new Error("__exit__");
6550
6651
  } else if (!isClaudeCliLoggedIn()) {
6551
6652
  console.log(chalk3.yellow("\n Claude Code CLI found but not logged in."));
6552
- console.log(chalk3.dim(" Run ") + chalk3.hex("#83D1EB")("claude") + chalk3.dim(" once to log in.\n"));
6653
+ console.log(
6654
+ chalk3.dim(" Run ") + chalk3.hex("#83D1EB")("claude") + chalk3.dim(" once to log in.\n")
6655
+ );
6553
6656
  const proceed = await confirm({ message: "Continue anyway?" });
6554
6657
  if (!proceed) throw new Error("__exit__");
6555
6658
  } else {
6556
- console.log(chalk3.dim(" Run `claude` once and log in with your Pro/Max/Team account if you haven't."));
6659
+ console.log(
6660
+ chalk3.dim(
6661
+ " Run `claude` once and log in with your Pro/Max/Team account if you haven't."
6662
+ )
6663
+ );
6557
6664
  }
6558
6665
  break;
6559
6666
  }
@@ -6561,17 +6668,27 @@ async function runInteractiveProviderSetup(options) {
6561
6668
  if (!isCursorAgentAvailable()) {
6562
6669
  console.log(chalk3.yellow("\n Cursor Agent CLI not found."));
6563
6670
  if (IS_WINDOWS3) {
6564
- console.log(chalk3.dim(" Install it from: ") + chalk3.hex("#83D1EB")("https://www.cursor.com/downloads"));
6565
- console.log(chalk3.dim(" Then run ") + chalk3.hex("#83D1EB")("agent login") + chalk3.dim(" in PowerShell to authenticate.\n"));
6671
+ console.log(
6672
+ chalk3.dim(" Install it from: ") + chalk3.hex("#83D1EB")("https://www.cursor.com/downloads")
6673
+ );
6674
+ console.log(
6675
+ chalk3.dim(" Then run ") + chalk3.hex("#83D1EB")("agent login") + chalk3.dim(" in PowerShell to authenticate.\n")
6676
+ );
6566
6677
  } else {
6567
- console.log(chalk3.dim(" Install it: ") + chalk3.hex("#83D1EB")("curl https://cursor.com/install -fsS | bash"));
6568
- console.log(chalk3.dim(" Then run ") + chalk3.hex("#83D1EB")("agent login") + chalk3.dim(" to authenticate.\n"));
6678
+ console.log(
6679
+ chalk3.dim(" Install it: ") + chalk3.hex("#83D1EB")("curl https://cursor.com/install -fsS | bash")
6680
+ );
6681
+ console.log(
6682
+ chalk3.dim(" Then run ") + chalk3.hex("#83D1EB")("agent login") + chalk3.dim(" to authenticate.\n")
6683
+ );
6569
6684
  }
6570
6685
  const proceed = await confirm({ message: "Continue anyway?" });
6571
6686
  if (!proceed) throw new Error("__exit__");
6572
6687
  } else if (!isCursorLoggedIn()) {
6573
6688
  console.log(chalk3.yellow("\n Cursor Agent CLI found but not logged in."));
6574
- console.log(chalk3.dim(" Run ") + chalk3.hex("#83D1EB")("agent login") + chalk3.dim(" to authenticate.\n"));
6689
+ console.log(
6690
+ chalk3.dim(" Run ") + chalk3.hex("#83D1EB")("agent login") + chalk3.dim(" to authenticate.\n")
6691
+ );
6575
6692
  const proceed = await confirm({ message: "Continue anyway?" });
6576
6693
  if (!proceed) throw new Error("__exit__");
6577
6694
  }
@@ -6579,7 +6696,11 @@ async function runInteractiveProviderSetup(options) {
6579
6696
  break;
6580
6697
  }
6581
6698
  case "anthropic": {
6582
- console.log(chalk3.dim(" Get a key at https://console.anthropic.com (same account as Claude Pro/Team/Max)."));
6699
+ console.log(
6700
+ chalk3.dim(
6701
+ " Get a key at https://console.anthropic.com (same account as Claude Pro/Team/Max)."
6702
+ )
6703
+ );
6583
6704
  config.apiKey = await promptInput("Anthropic API key:");
6584
6705
  if (!config.apiKey) {
6585
6706
  console.log(chalk3.red("API key is required."));
@@ -6609,6 +6730,16 @@ async function runInteractiveProviderSetup(options) {
6609
6730
  config.model = await promptInput(`Model (default: ${DEFAULT_MODELS.openai}):`) || DEFAULT_MODELS.openai;
6610
6731
  break;
6611
6732
  }
6733
+ case "minimax": {
6734
+ console.log(chalk3.dim(" Get a key at https://platform.minimax.io"));
6735
+ config.apiKey = await promptInput("MiniMax API key:");
6736
+ if (!config.apiKey) {
6737
+ console.log(chalk3.red("API key is required."));
6738
+ throw new Error("__exit__");
6739
+ }
6740
+ config.model = await promptInput(`Model (default: ${DEFAULT_MODELS.minimax}):`) || DEFAULT_MODELS.minimax;
6741
+ break;
6742
+ }
6612
6743
  }
6613
6744
  writeConfigFile(config);
6614
6745
  return config;
@@ -6649,6 +6780,7 @@ var POINTS_FRESHNESS = 4;
6649
6780
  var POINTS_NO_SECRETS = 4;
6650
6781
  var POINTS_PERMISSIONS = 2;
6651
6782
  var POINTS_HOOKS = 2;
6783
+ var POINTS_MODEL_PINNED = 2;
6652
6784
  var POINTS_AGENTS_MD = 1;
6653
6785
  var POINTS_OPEN_SKILLS_FORMAT = 2;
6654
6786
  var POINTS_LEARNED_CONTENT = 2;
@@ -7433,6 +7565,19 @@ import { execSync as execSync12 } from "child_process";
7433
7565
  import { join as join7 } from "path";
7434
7566
  init_resolve_caliber();
7435
7567
  init_pre_commit_block();
7568
+
7569
+ // src/scoring/model-pinning.ts
7570
+ function configContentSuggestsPinnedModel(lower) {
7571
+ if (/\bcaliber_model\b/.test(lower) || /\bcaliber_fast_model\b/.test(lower)) return true;
7572
+ if (/(?:^|[\s`'"\n])\/model(?:[\s`'"\n]|$)/.test(lower)) return true;
7573
+ if (/claude-(sonnet|opus|haiku)([-.@\d]|\b)/.test(lower)) return true;
7574
+ if (/\bgpt-[45]([-._\d]|\b)/.test(lower)) return true;
7575
+ if (/\bsonnet-4\.[\d.]+\b/.test(lower)) return true;
7576
+ if (/\b(high|medium|low)\s+effort\b/.test(lower)) return true;
7577
+ return false;
7578
+ }
7579
+
7580
+ // src/scoring/checks/bonus.ts
7436
7581
  function hasPreCommitHook(dir) {
7437
7582
  try {
7438
7583
  const gitDir = execSync12("git rev-parse --git-dir", {
@@ -7553,6 +7698,33 @@ function checkBonus(dir) {
7553
7698
  detail: hasLearned ? "Session learnings found in CALIBER_LEARNINGS.md" : "No learned content",
7554
7699
  suggestion: hasLearned ? void 0 : `Session learnings capture patterns from your coding sessions so the agent improves over time. Run \`${resolveCaliber()} learn install\``
7555
7700
  });
7701
+ const configContent = (() => {
7702
+ const parts = [];
7703
+ for (const rel of ["CLAUDE.md", "AGENTS.md"]) {
7704
+ const c = readFileOrNull(join7(dir, rel));
7705
+ if (c) parts.push(c);
7706
+ }
7707
+ try {
7708
+ const rulesDir = join7(dir, ".cursor", "rules");
7709
+ for (const f of readdirSync3(rulesDir).filter((x) => x.endsWith(".mdc"))) {
7710
+ const content = readFileOrNull(join7(rulesDir, f));
7711
+ if (content) parts.push(content);
7712
+ }
7713
+ } catch {
7714
+ }
7715
+ return parts.join("\n").toLowerCase();
7716
+ })();
7717
+ const hasModelRef = configContentSuggestsPinnedModel(configContent);
7718
+ checks.push({
7719
+ id: "model_pinned",
7720
+ name: "Model & effort pinned",
7721
+ category: "bonus",
7722
+ maxPoints: POINTS_MODEL_PINNED,
7723
+ earnedPoints: hasModelRef ? POINTS_MODEL_PINNED : 0,
7724
+ passed: hasModelRef,
7725
+ detail: hasModelRef ? "Model or effort level explicitly set in config" : "Config doesn't pin model or effort level \u2014 behavior may change when defaults are updated",
7726
+ suggestion: hasModelRef ? void 0 : "Add model/effort to config: CALIBER_MODEL env var, or /model in Claude Code, or a Model Configuration section in CLAUDE.md"
7727
+ });
7556
7728
  return checks;
7557
7729
  }
7558
7730
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rely-ai/caliber",
3
- "version": "1.42.0",
3
+ "version": "1.44.0",
4
4
  "description": "AI context infrastructure for coding agents — keeps CLAUDE.md, Cursor rules, and skills in sync as your codebase evolves",
5
5
  "type": "module",
6
6
  "bin": {