code-agent-auto-commit 1.2.0 → 1.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -21,7 +21,7 @@
21
21
  ## Installation
22
22
 
23
23
  ```bash
24
- pnpm add -g code-agent-auto-commit
24
+ pnpm add -g code-agent-auto-commit@latest
25
25
  ```
26
26
 
27
27
  To update to the latest version:
package/dist/cli.js CHANGED
@@ -9,6 +9,7 @@ const node_path_1 = __importDefault(require("node:path"));
9
9
  const claude_1 = require("./adapters/claude");
10
10
  const codex_1 = require("./adapters/codex");
11
11
  const opencode_1 = require("./adapters/opencode");
12
+ const ai_1 = require("./core/ai");
12
13
  const config_1 = require("./core/config");
13
14
  const fs_1 = require("./core/fs");
14
15
  const run_1 = require("./core/run");
@@ -89,6 +90,7 @@ Usage:
89
90
  cac status [--scope project|global] [--worktree <path>] [--config <path>]
90
91
  cac run [--tool opencode|codex|claude|manual] [--worktree <path>] [--config <path>] [--event-json <json>] [--event-stdin]
91
92
  cac set-worktree <path> [--config <path>]
93
+ cac ai <message> [--config <path>]
92
94
  cac version
93
95
  `);
94
96
  }
@@ -243,6 +245,37 @@ async function commandRun(flags, positionals) {
243
245
  if (result.tokenUsage) {
244
246
  console.log(`AI tokens: ${result.tokenUsage.totalTokens} (prompt: ${result.tokenUsage.promptTokens}, completion: ${result.tokenUsage.completionTokens})`);
245
247
  }
248
+ if (result.aiWarning) {
249
+ console.warn(`\nWarning: AI commit message failed — ${result.aiWarning}`);
250
+ console.warn(`Using fallback prefix instead. Run "cac ai hello" to test your AI config.`);
251
+ }
252
+ }
253
+ async function commandAI(flags, positionals) {
254
+ const message = positionals.join(" ").trim();
255
+ if (!message) {
256
+ console.error(`Usage: cac ai <message>`);
257
+ console.error(`Example: cac ai "hello, are you there?"`);
258
+ process.exitCode = 1;
259
+ return;
260
+ }
261
+ const worktree = node_path_1.default.resolve(getStringFlag(flags, "worktree") ?? process.cwd());
262
+ const explicitConfig = getStringFlag(flags, "config");
263
+ const loaded = (0, config_1.loadConfig)({ explicitPath: explicitConfig, worktree });
264
+ console.log(`Provider: ${loaded.config.ai.defaultProvider}`);
265
+ console.log(`Model: ${loaded.config.ai.model}`);
266
+ console.log(`Sending: "${message}"`);
267
+ console.log();
268
+ const result = await (0, ai_1.testAI)(loaded.config.ai, message);
269
+ if (!result.ok) {
270
+ console.error(`AI test failed: ${result.error}`);
271
+ process.exitCode = 1;
272
+ return;
273
+ }
274
+ console.log(`Reply: ${result.reply}`);
275
+ if (result.usage) {
276
+ console.log(`Tokens: ${result.usage.totalTokens} (prompt: ${result.usage.promptTokens}, completion: ${result.usage.completionTokens})`);
277
+ }
278
+ console.log(`\nAI is configured correctly.`);
246
279
  }
247
280
  async function main() {
248
281
  const argv = process.argv.slice(2);
@@ -281,6 +314,10 @@ async function main() {
281
314
  await commandRun(parsed.flags, parsed.positionals);
282
315
  return;
283
316
  }
317
+ if (command === "ai") {
318
+ await commandAI(parsed.flags, parsed.positionals);
319
+ return;
320
+ }
284
321
  throw new Error(`Unknown command: ${command}`);
285
322
  }
286
323
  main().catch((error) => {
package/dist/core/ai.d.ts CHANGED
@@ -1,2 +1,3 @@
1
- import type { AIConfig, AIGenerateResult, CommitSummary } from "../types";
1
+ import type { AIConfig, AIGenerateResult, AITestResult, CommitSummary } from "../types";
2
2
  export declare function generateCommitMessage(ai: AIConfig, summary: CommitSummary, maxLength: number): Promise<AIGenerateResult>;
3
+ export declare function testAI(ai: AIConfig, userMessage: string): Promise<AITestResult>;
package/dist/core/ai.js CHANGED
@@ -1,6 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.generateCommitMessage = generateCommitMessage;
4
+ exports.testAI = testAI;
4
5
  const VALID_TYPES = new Set([
5
6
  "feat", "fix", "refactor", "docs", "style", "test",
6
7
  "chore", "perf", "ci", "build", "revert",
@@ -12,6 +13,32 @@ const TYPE_ALIASES = {
12
13
  refactoring: "refactor",
13
14
  refector: "refactor",
14
15
  };
16
+ const MINIMAX_MODEL_ALIASES = {
17
+ "minimax-m2.5": "MiniMax-M2.5",
18
+ "minimax-m2.5-highspeed": "MiniMax-M2.5-highspeed",
19
+ "minimax-m2.1": "MiniMax-M2.1",
20
+ "minimax-m2.1-highspeed": "MiniMax-M2.1-highspeed",
21
+ "minimax-m2": "MiniMax-M2",
22
+ "minimax-text-01": "MiniMax-Text-01",
23
+ "text-01": "MiniMax-Text-01",
24
+ };
25
+ function normalizeProviderModel(provider, model) {
26
+ const trimmed = model.trim();
27
+ const raw = trimmed.includes("/") ? trimmed.slice(trimmed.lastIndexOf("/") + 1) : trimmed;
28
+ if (provider !== "minimax") {
29
+ return raw;
30
+ }
31
+ return MINIMAX_MODEL_ALIASES[raw.toLowerCase()] ?? raw;
32
+ }
33
+ function minimaxFallbackModel(model) {
34
+ return model === "MiniMax-Text-01" ? undefined : "MiniMax-Text-01";
35
+ }
36
+ function isUnknownModelError(status, body) {
37
+ if (status < 400 || status >= 500) {
38
+ return false;
39
+ }
40
+ return /unknown\s+model|invalid\s+model|model.*not\s+found|does\s+not\s+exist|not\s+supported/i.test(body);
41
+ }
15
42
  function normalizeCommitType(raw) {
16
43
  const value = raw.trim().toLowerCase();
17
44
  if (VALID_TYPES.has(value)) {
@@ -111,7 +138,29 @@ function buildUserPrompt(summary, maxLength) {
111
138
  summary.patch || "(none)",
112
139
  ].join("\n");
113
140
  }
114
- async function generateOpenAiStyleMessage(provider, model, summary, maxLength, signal) {
141
+ function validateAIConfig(ai) {
142
+ if (!ai.enabled) {
143
+ return "ai.enabled is false";
144
+ }
145
+ const { provider, model } = splitModelRef(ai.model, ai.defaultProvider);
146
+ if (!provider || !model) {
147
+ return `invalid ai.model "${ai.model}" — expected "provider/model" format`;
148
+ }
149
+ const providerConfig = ai.providers[provider];
150
+ if (!providerConfig) {
151
+ return `provider "${provider}" not found in ai.providers (available: ${Object.keys(ai.providers).join(", ") || "none"})`;
152
+ }
153
+ const apiKey = getApiKey(providerConfig);
154
+ if (!apiKey) {
155
+ const envName = providerConfig.apiKeyEnv;
156
+ if (envName) {
157
+ return `API key not found — env var "${envName}" is not set. Run: export ${envName}='your-key'`;
158
+ }
159
+ return `no API key configured for provider "${provider}" — set apiKeyEnv or apiKey in config`;
160
+ }
161
+ return undefined;
162
+ }
163
+ async function generateOpenAiStyleMessage(providerName, provider, model, summary, maxLength, signal) {
115
164
  const apiKey = getApiKey(provider);
116
165
  const headers = {
117
166
  "Content-Type": "application/json",
@@ -120,42 +169,64 @@ async function generateOpenAiStyleMessage(provider, model, summary, maxLength, s
120
169
  if (apiKey) {
121
170
  headers.Authorization = `Bearer ${apiKey}`;
122
171
  }
123
- const response = await fetch(`${provider.baseUrl.replace(/\/$/, "")}/chat/completions`, {
124
- method: "POST",
125
- headers,
126
- body: JSON.stringify({
127
- model,
128
- temperature: 0.2,
129
- messages: [
130
- {
131
- role: "system",
132
- content: "You generate exactly one conventional commit message. Format: '<type>(<scope>): <description>'. Scope is optional. Allowed types: feat, fix, refactor, docs, style, test, chore, perf, ci, build. Description must be imperative, lowercase, no period. Describe the actual change, not just 'update <file>'. No quotes. No code block.",
133
- },
134
- {
135
- role: "user",
136
- content: buildUserPrompt(summary, maxLength),
137
- },
138
- ],
139
- }),
140
- signal,
141
- });
142
- if (!response.ok) {
143
- return { content: undefined, usage: undefined };
172
+ async function requestModel(modelName) {
173
+ const response = await fetch(`${provider.baseUrl.replace(/\/$/, "")}/chat/completions`, {
174
+ method: "POST",
175
+ headers,
176
+ body: JSON.stringify({
177
+ model: modelName,
178
+ temperature: 0.2,
179
+ messages: [
180
+ {
181
+ role: "system",
182
+ content: "You generate exactly one conventional commit message. Format: '<type>(<scope>): <description>'. Scope is optional. Allowed types: feat, fix, refactor, docs, style, test, chore, perf, ci, build. Description must be imperative, lowercase, no period. Describe the actual change, not just 'update <file>'. No quotes. No code block.",
183
+ },
184
+ {
185
+ role: "user",
186
+ content: buildUserPrompt(summary, maxLength),
187
+ },
188
+ ],
189
+ }),
190
+ signal,
191
+ });
192
+ if (!response.ok) {
193
+ const body = await response.text().catch(() => "");
194
+ return { ok: false, status: response.status, body };
195
+ }
196
+ const payload = (await response.json());
197
+ const usage = payload.usage
198
+ ? {
199
+ promptTokens: payload.usage.prompt_tokens ?? 0,
200
+ completionTokens: payload.usage.completion_tokens ?? 0,
201
+ totalTokens: payload.usage.total_tokens ?? 0,
202
+ }
203
+ : undefined;
204
+ return { ok: true, content: payload.choices?.[0]?.message?.content, usage };
144
205
  }
145
- const payload = (await response.json());
146
- const usage = payload.usage
147
- ? {
148
- promptTokens: payload.usage.prompt_tokens ?? 0,
149
- completionTokens: payload.usage.completion_tokens ?? 0,
150
- totalTokens: payload.usage.total_tokens ?? 0,
206
+ const first = await requestModel(model);
207
+ if (first.ok) {
208
+ return { content: first.content, usage: first.usage };
209
+ }
210
+ if (providerName === "minimax" && isUnknownModelError(first.status, first.body)) {
211
+ const fallback = minimaxFallbackModel(model);
212
+ if (fallback) {
213
+ const retry = await requestModel(fallback);
214
+ if (retry.ok) {
215
+ return { content: retry.content, usage: retry.usage };
216
+ }
217
+ return {
218
+ content: undefined,
219
+ usage: undefined,
220
+ error: `HTTP ${first.status}: ${first.body.slice(0, 200)} | retry(${fallback}) HTTP ${retry.status}: ${retry.body.slice(0, 120)}`,
221
+ };
151
222
  }
152
- : undefined;
153
- return { content: payload.choices?.[0]?.message?.content, usage };
223
+ }
224
+ return { content: undefined, usage: undefined, error: `HTTP ${first.status}: ${first.body.slice(0, 200)}` };
154
225
  }
155
226
  async function generateAnthropicStyleMessage(provider, model, summary, maxLength, signal) {
156
227
  const apiKey = getApiKey(provider);
157
228
  if (!apiKey) {
158
- return { content: undefined, usage: undefined };
229
+ return { content: undefined, usage: undefined, error: "no API key" };
159
230
  }
160
231
  const headers = {
161
232
  "Content-Type": "application/json",
@@ -181,7 +252,8 @@ async function generateAnthropicStyleMessage(provider, model, summary, maxLength
181
252
  signal,
182
253
  });
183
254
  if (!response.ok) {
184
- return { content: undefined, usage: undefined };
255
+ const body = await response.text().catch(() => "");
256
+ return { content: undefined, usage: undefined, error: `HTTP ${response.status}: ${body.slice(0, 200)}` };
185
257
  }
186
258
  const payload = (await response.json());
187
259
  const firstText = payload.content?.find((item) => item.type === "text")?.text;
@@ -195,33 +267,135 @@ async function generateAnthropicStyleMessage(provider, model, summary, maxLength
195
267
  return { content: firstText, usage };
196
268
  }
197
269
  async function generateCommitMessage(ai, summary, maxLength) {
198
- const empty = { message: undefined, usage: undefined };
199
- if (!ai.enabled) {
200
- return empty;
270
+ const configError = validateAIConfig(ai);
271
+ if (configError) {
272
+ return { message: undefined, usage: undefined, warning: configError };
201
273
  }
202
274
  const { provider, model } = splitModelRef(ai.model, ai.defaultProvider);
203
- if (!provider || !model) {
204
- return empty;
205
- }
275
+ const resolvedModel = normalizeProviderModel(provider, model);
206
276
  const providerConfig = ai.providers[provider];
207
- if (!providerConfig) {
208
- return empty;
209
- }
210
277
  const controller = new AbortController();
211
278
  const timeout = setTimeout(() => controller.abort(), ai.timeoutMs);
212
279
  try {
213
280
  let result;
214
281
  if (providerConfig.api === "openai-completions") {
215
- result = await generateOpenAiStyleMessage(providerConfig, model, summary, maxLength, controller.signal);
282
+ result = await generateOpenAiStyleMessage(provider, providerConfig, resolvedModel, summary, maxLength, controller.signal);
216
283
  }
217
284
  else {
218
- result = await generateAnthropicStyleMessage(providerConfig, model, summary, maxLength, controller.signal);
285
+ result = await generateAnthropicStyleMessage(providerConfig, resolvedModel, summary, maxLength, controller.signal);
286
+ }
287
+ if (result.error) {
288
+ return { message: undefined, usage: result.usage, warning: result.error };
219
289
  }
220
290
  const normalized = normalizeMessage(result.content ?? "", maxLength);
221
291
  return { message: normalized || undefined, usage: result.usage };
222
292
  }
223
- catch {
224
- return empty;
293
+ catch (err) {
294
+ const msg = err instanceof Error && err.name === "AbortError"
295
+ ? `AI request timed out after ${ai.timeoutMs}ms`
296
+ : `AI request failed: ${err instanceof Error ? err.message : String(err)}`;
297
+ return { message: undefined, usage: undefined, warning: msg };
298
+ }
299
+ finally {
300
+ clearTimeout(timeout);
301
+ }
302
+ }
303
+ async function testAI(ai, userMessage) {
304
+ const configError = validateAIConfig(ai);
305
+ if (configError) {
306
+ return { ok: false, error: configError };
307
+ }
308
+ const { provider, model } = splitModelRef(ai.model, ai.defaultProvider);
309
+ const resolvedModel = normalizeProviderModel(provider, model);
310
+ const providerConfig = ai.providers[provider];
311
+ const apiKey = getApiKey(providerConfig);
312
+ const controller = new AbortController();
313
+ const timeout = setTimeout(() => controller.abort(), ai.timeoutMs);
314
+ try {
315
+ if (providerConfig.api === "openai-completions") {
316
+ const headers = {
317
+ "Content-Type": "application/json",
318
+ Authorization: `Bearer ${apiKey}`,
319
+ ...(providerConfig.headers ?? {}),
320
+ };
321
+ async function requestModel(modelName) {
322
+ const response = await fetch(`${providerConfig.baseUrl.replace(/\/$/, "")}/chat/completions`, {
323
+ method: "POST",
324
+ headers,
325
+ body: JSON.stringify({
326
+ model: modelName,
327
+ temperature: 0.2,
328
+ messages: [{ role: "user", content: userMessage }],
329
+ }),
330
+ signal: controller.signal,
331
+ });
332
+ if (!response.ok) {
333
+ const body = await response.text().catch(() => "");
334
+ return { ok: false, status: response.status, body };
335
+ }
336
+ const payload = (await response.json());
337
+ const usage = payload.usage
338
+ ? {
339
+ promptTokens: payload.usage.prompt_tokens ?? 0,
340
+ completionTokens: payload.usage.completion_tokens ?? 0,
341
+ totalTokens: payload.usage.total_tokens ?? 0,
342
+ }
343
+ : undefined;
344
+ return { ok: true, reply: payload.choices?.[0]?.message?.content ?? "", usage };
345
+ }
346
+ const first = await requestModel(resolvedModel);
347
+ if (first.ok) {
348
+ return { ok: true, reply: first.reply, usage: first.usage };
349
+ }
350
+ if (provider === "minimax" && isUnknownModelError(first.status, first.body)) {
351
+ const fallback = minimaxFallbackModel(resolvedModel);
352
+ if (fallback) {
353
+ const retry = await requestModel(fallback);
354
+ if (retry.ok) {
355
+ return { ok: true, reply: retry.reply, usage: retry.usage };
356
+ }
357
+ return {
358
+ ok: false,
359
+ error: `HTTP ${first.status}: ${first.body.slice(0, 300)} | retry(${fallback}) HTTP ${retry.status}: ${retry.body.slice(0, 200)}`,
360
+ };
361
+ }
362
+ }
363
+ return { ok: false, error: `HTTP ${first.status}: ${first.body.slice(0, 300)}` };
364
+ }
365
+ else {
366
+ const headers = {
367
+ "Content-Type": "application/json",
368
+ "x-api-key": apiKey,
369
+ "anthropic-version": "2023-06-01",
370
+ ...(providerConfig.headers ?? {}),
371
+ };
372
+ const response = await fetch(`${providerConfig.baseUrl.replace(/\/$/, "")}/messages`, {
373
+ method: "POST",
374
+ headers,
375
+ body: JSON.stringify({
376
+ model: resolvedModel,
377
+ max_tokens: 256,
378
+ messages: [{ role: "user", content: userMessage }],
379
+ }),
380
+ signal: controller.signal,
381
+ });
382
+ if (!response.ok) {
383
+ const body = await response.text().catch(() => "");
384
+ return { ok: false, error: `HTTP ${response.status}: ${body.slice(0, 300)}` };
385
+ }
386
+ const payload = (await response.json());
387
+ const reply = payload.content?.find((item) => item.type === "text")?.text ?? "";
388
+ const usage = payload.usage
389
+ ? { promptTokens: payload.usage.input_tokens ?? 0, completionTokens: payload.usage.output_tokens ?? 0, totalTokens: (payload.usage.input_tokens ?? 0) + (payload.usage.output_tokens ?? 0) }
390
+ : undefined;
391
+ return { ok: true, reply, usage };
392
+ }
393
+ }
394
+ catch (err) {
395
+ const msg = err instanceof Error && err.name === "AbortError"
396
+ ? `request timed out after ${ai.timeoutMs}ms`
397
+ : `request failed: ${err instanceof Error ? err.message : String(err)}`;
398
+ return { ok: false, error: msg };
225
399
  }
226
400
  finally {
227
401
  clearTimeout(timeout);
package/dist/core/run.js CHANGED
@@ -67,7 +67,7 @@ async function buildMessage(prefix, maxLength, aiConfig, stagedPath, fallback, w
67
67
  const msg = fallback.length <= maxLength
68
68
  ? fallback
69
69
  : `${normalizeFallbackType(prefix)}: update changes`;
70
- return { message: msg, usage: result.usage };
70
+ return { message: msg, usage: result.usage, warning: result.warning };
71
71
  }
72
72
  async function runAutoCommit(context, configOptions) {
73
73
  const { config } = (0, config_1.loadConfig)(configOptions);
@@ -94,6 +94,7 @@ async function runAutoCommit(context, configOptions) {
94
94
  }
95
95
  const commits = [];
96
96
  const totalUsage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 };
97
+ let firstWarning;
97
98
  function addUsage(usage) {
98
99
  if (!usage)
99
100
  return;
@@ -117,6 +118,8 @@ async function runAutoCommit(context, configOptions) {
117
118
  const fallback = fallbackSingleMessage(config.commit.fallbackPrefix, changed.length);
118
119
  const result = await buildMessage(config.commit.fallbackPrefix, config.commit.maxMessageLength, config.ai, undefined, fallback, worktree);
119
120
  addUsage(result.usage);
121
+ if (result.warning && !firstWarning)
122
+ firstWarning = result.warning;
120
123
  const hash = (0, git_1.commit)(worktree, result.message);
121
124
  commits.push({
122
125
  hash,
@@ -136,6 +139,8 @@ async function runAutoCommit(context, configOptions) {
136
139
  const fallback = fallbackPerFileMessage(config.commit.fallbackPrefix, file);
137
140
  const result = await buildMessage(config.commit.fallbackPrefix, config.commit.maxMessageLength, config.ai, file.path, fallback, worktree);
138
141
  addUsage(result.usage);
142
+ if (result.warning && !firstWarning)
143
+ firstWarning = result.warning;
139
144
  const hash = (0, git_1.commit)(worktree, result.message);
140
145
  commits.push({
141
146
  hash,
@@ -157,5 +162,6 @@ async function runAutoCommit(context, configOptions) {
157
162
  committed: commits,
158
163
  pushed,
159
164
  tokenUsage: hasUsage ? totalUsage : undefined,
165
+ aiWarning: firstWarning,
160
166
  };
161
167
  }
package/dist/types.d.ts CHANGED
@@ -65,6 +65,13 @@ export interface TokenUsage {
65
65
  export interface AIGenerateResult {
66
66
  message: string | undefined;
67
67
  usage: TokenUsage | undefined;
68
+ warning?: string;
69
+ }
70
+ export interface AITestResult {
71
+ ok: boolean;
72
+ reply?: string;
73
+ usage?: TokenUsage;
74
+ error?: string;
68
75
  }
69
76
  export interface RunResult {
70
77
  skipped: boolean;
@@ -73,6 +80,7 @@ export interface RunResult {
73
80
  committed: CommitRecord[];
74
81
  pushed: boolean;
75
82
  tokenUsage?: TokenUsage;
83
+ aiWarning?: string;
76
84
  }
77
85
  export interface RunContext {
78
86
  tool: ToolName;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "code-agent-auto-commit",
3
- "version": "1.2.0",
3
+ "version": "1.3.1",
4
4
  "description": "CAC provides configurable AI auto-commit(using your git account) for OpenCode, Claude Code, Codex, and other AI code agents",
5
5
  "license": "MIT",
6
6
  "type": "commonjs",