code-agent-auto-commit 1.3.0 → 1.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/core/ai.js +125 -53
  2. package/package.json +1 -1
package/dist/core/ai.js CHANGED
@@ -13,6 +13,32 @@ const TYPE_ALIASES = {
13
13
  refactoring: "refactor",
14
14
  refector: "refactor",
15
15
  };
16
+ const MINIMAX_MODEL_ALIASES = {
17
+ "minimax-m2.5": "MiniMax-M2.5",
18
+ "minimax-m2.5-highspeed": "MiniMax-M2.5-highspeed",
19
+ "minimax-m2.1": "MiniMax-M2.1",
20
+ "minimax-m2.1-highspeed": "MiniMax-M2.1-highspeed",
21
+ "minimax-m2": "MiniMax-M2",
22
+ "minimax-text-01": "MiniMax-Text-01",
23
+ "text-01": "MiniMax-Text-01",
24
+ };
25
+ function normalizeProviderModel(provider, model) {
26
+ const trimmed = model.trim();
27
+ const raw = trimmed.includes("/") ? trimmed.slice(trimmed.lastIndexOf("/") + 1) : trimmed;
28
+ if (provider !== "minimax") {
29
+ return raw;
30
+ }
31
+ return MINIMAX_MODEL_ALIASES[raw.toLowerCase()] ?? raw;
32
+ }
33
+ function minimaxFallbackModel(model) {
34
+ return model === "MiniMax-Text-01" ? undefined : "MiniMax-Text-01";
35
+ }
36
+ function isUnknownModelError(status, body) {
37
+ if (status < 400 || status >= 500) {
38
+ return false;
39
+ }
40
+ return /unknown\s+model|invalid\s+model|model.*not\s+found|does\s+not\s+exist|not\s+supported/i.test(body);
41
+ }
16
42
  function normalizeCommitType(raw) {
17
43
  const value = raw.trim().toLowerCase();
18
44
  if (VALID_TYPES.has(value)) {
@@ -134,7 +160,7 @@ function validateAIConfig(ai) {
134
160
  }
135
161
  return undefined;
136
162
  }
137
- async function generateOpenAiStyleMessage(provider, model, summary, maxLength, signal) {
163
+ async function generateOpenAiStyleMessage(providerName, provider, model, summary, maxLength, signal) {
138
164
  const apiKey = getApiKey(provider);
139
165
  const headers = {
140
166
  "Content-Type": "application/json",
@@ -143,38 +169,59 @@ async function generateOpenAiStyleMessage(provider, model, summary, maxLength, s
143
169
  if (apiKey) {
144
170
  headers.Authorization = `Bearer ${apiKey}`;
145
171
  }
146
- const response = await fetch(`${provider.baseUrl.replace(/\/$/, "")}/chat/completions`, {
147
- method: "POST",
148
- headers,
149
- body: JSON.stringify({
150
- model,
151
- temperature: 0.2,
152
- messages: [
153
- {
154
- role: "system",
155
- content: "You generate exactly one conventional commit message. Format: '<type>(<scope>): <description>'. Scope is optional. Allowed types: feat, fix, refactor, docs, style, test, chore, perf, ci, build. Description must be imperative, lowercase, no period. Describe the actual change, not just 'update <file>'. No quotes. No code block.",
156
- },
157
- {
158
- role: "user",
159
- content: buildUserPrompt(summary, maxLength),
160
- },
161
- ],
162
- }),
163
- signal,
164
- });
165
- if (!response.ok) {
166
- const body = await response.text().catch(() => "");
167
- return { content: undefined, usage: undefined, error: `HTTP ${response.status}: ${body.slice(0, 200)}` };
172
+ async function requestModel(modelName) {
173
+ const response = await fetch(`${provider.baseUrl.replace(/\/$/, "")}/chat/completions`, {
174
+ method: "POST",
175
+ headers,
176
+ body: JSON.stringify({
177
+ model: modelName,
178
+ temperature: 0.2,
179
+ messages: [
180
+ {
181
+ role: "system",
182
+ content: "You generate exactly one conventional commit message. Format: '<type>(<scope>): <description>'. Scope is optional. Allowed types: feat, fix, refactor, docs, style, test, chore, perf, ci, build. Description must be imperative, lowercase, no period. Describe the actual change, not just 'update <file>'. No quotes. No code block.",
183
+ },
184
+ {
185
+ role: "user",
186
+ content: buildUserPrompt(summary, maxLength),
187
+ },
188
+ ],
189
+ }),
190
+ signal,
191
+ });
192
+ if (!response.ok) {
193
+ const body = await response.text().catch(() => "");
194
+ return { ok: false, status: response.status, body };
195
+ }
196
+ const payload = (await response.json());
197
+ const usage = payload.usage
198
+ ? {
199
+ promptTokens: payload.usage.prompt_tokens ?? 0,
200
+ completionTokens: payload.usage.completion_tokens ?? 0,
201
+ totalTokens: payload.usage.total_tokens ?? 0,
202
+ }
203
+ : undefined;
204
+ return { ok: true, content: payload.choices?.[0]?.message?.content, usage };
168
205
  }
169
- const payload = (await response.json());
170
- const usage = payload.usage
171
- ? {
172
- promptTokens: payload.usage.prompt_tokens ?? 0,
173
- completionTokens: payload.usage.completion_tokens ?? 0,
174
- totalTokens: payload.usage.total_tokens ?? 0,
206
+ const first = await requestModel(model);
207
+ if (first.ok) {
208
+ return { content: first.content, usage: first.usage };
209
+ }
210
+ if (providerName === "minimax" && isUnknownModelError(first.status, first.body)) {
211
+ const fallback = minimaxFallbackModel(model);
212
+ if (fallback) {
213
+ const retry = await requestModel(fallback);
214
+ if (retry.ok) {
215
+ return { content: retry.content, usage: retry.usage };
216
+ }
217
+ return {
218
+ content: undefined,
219
+ usage: undefined,
220
+ error: `HTTP ${first.status}: ${first.body.slice(0, 200)} | retry(${fallback}) HTTP ${retry.status}: ${retry.body.slice(0, 120)}`,
221
+ };
175
222
  }
176
- : undefined;
177
- return { content: payload.choices?.[0]?.message?.content, usage };
223
+ }
224
+ return { content: undefined, usage: undefined, error: `HTTP ${first.status}: ${first.body.slice(0, 200)}` };
178
225
  }
179
226
  async function generateAnthropicStyleMessage(provider, model, summary, maxLength, signal) {
180
227
  const apiKey = getApiKey(provider);
@@ -225,16 +272,17 @@ async function generateCommitMessage(ai, summary, maxLength) {
225
272
  return { message: undefined, usage: undefined, warning: configError };
226
273
  }
227
274
  const { provider, model } = splitModelRef(ai.model, ai.defaultProvider);
275
+ const resolvedModel = normalizeProviderModel(provider, model);
228
276
  const providerConfig = ai.providers[provider];
229
277
  const controller = new AbortController();
230
278
  const timeout = setTimeout(() => controller.abort(), ai.timeoutMs);
231
279
  try {
232
280
  let result;
233
281
  if (providerConfig.api === "openai-completions") {
234
- result = await generateOpenAiStyleMessage(providerConfig, model, summary, maxLength, controller.signal);
282
+ result = await generateOpenAiStyleMessage(provider, providerConfig, resolvedModel, summary, maxLength, controller.signal);
235
283
  }
236
284
  else {
237
- result = await generateAnthropicStyleMessage(providerConfig, model, summary, maxLength, controller.signal);
285
+ result = await generateAnthropicStyleMessage(providerConfig, resolvedModel, summary, maxLength, controller.signal);
238
286
  }
239
287
  if (result.error) {
240
288
  return { message: undefined, usage: result.usage, warning: result.error };
@@ -258,6 +306,7 @@ async function testAI(ai, userMessage) {
258
306
  return { ok: false, error: configError };
259
307
  }
260
308
  const { provider, model } = splitModelRef(ai.model, ai.defaultProvider);
309
+ const resolvedModel = normalizeProviderModel(provider, model);
261
310
  const providerConfig = ai.providers[provider];
262
311
  const apiKey = getApiKey(providerConfig);
263
312
  const controller = new AbortController();
@@ -269,26 +318,49 @@ async function testAI(ai, userMessage) {
269
318
  Authorization: `Bearer ${apiKey}`,
270
319
  ...(providerConfig.headers ?? {}),
271
320
  };
272
- const response = await fetch(`${providerConfig.baseUrl.replace(/\/$/, "")}/chat/completions`, {
273
- method: "POST",
274
- headers,
275
- body: JSON.stringify({
276
- model,
277
- temperature: 0.2,
278
- messages: [{ role: "user", content: userMessage }],
279
- }),
280
- signal: controller.signal,
281
- });
282
- if (!response.ok) {
283
- const body = await response.text().catch(() => "");
284
- return { ok: false, error: `HTTP ${response.status}: ${body.slice(0, 300)}` };
321
+ async function requestModel(modelName) {
322
+ const response = await fetch(`${providerConfig.baseUrl.replace(/\/$/, "")}/chat/completions`, {
323
+ method: "POST",
324
+ headers,
325
+ body: JSON.stringify({
326
+ model: modelName,
327
+ temperature: 0.2,
328
+ messages: [{ role: "user", content: userMessage }],
329
+ }),
330
+ signal: controller.signal,
331
+ });
332
+ if (!response.ok) {
333
+ const body = await response.text().catch(() => "");
334
+ return { ok: false, status: response.status, body };
335
+ }
336
+ const payload = (await response.json());
337
+ const usage = payload.usage
338
+ ? {
339
+ promptTokens: payload.usage.prompt_tokens ?? 0,
340
+ completionTokens: payload.usage.completion_tokens ?? 0,
341
+ totalTokens: payload.usage.total_tokens ?? 0,
342
+ }
343
+ : undefined;
344
+ return { ok: true, reply: payload.choices?.[0]?.message?.content ?? "", usage };
285
345
  }
286
- const payload = (await response.json());
287
- const reply = payload.choices?.[0]?.message?.content ?? "";
288
- const usage = payload.usage
289
- ? { promptTokens: payload.usage.prompt_tokens ?? 0, completionTokens: payload.usage.completion_tokens ?? 0, totalTokens: payload.usage.total_tokens ?? 0 }
290
- : undefined;
291
- return { ok: true, reply, usage };
346
+ const first = await requestModel(resolvedModel);
347
+ if (first.ok) {
348
+ return { ok: true, reply: first.reply, usage: first.usage };
349
+ }
350
+ if (provider === "minimax" && isUnknownModelError(first.status, first.body)) {
351
+ const fallback = minimaxFallbackModel(resolvedModel);
352
+ if (fallback) {
353
+ const retry = await requestModel(fallback);
354
+ if (retry.ok) {
355
+ return { ok: true, reply: retry.reply, usage: retry.usage };
356
+ }
357
+ return {
358
+ ok: false,
359
+ error: `HTTP ${first.status}: ${first.body.slice(0, 300)} | retry(${fallback}) HTTP ${retry.status}: ${retry.body.slice(0, 200)}`,
360
+ };
361
+ }
362
+ }
363
+ return { ok: false, error: `HTTP ${first.status}: ${first.body.slice(0, 300)}` };
292
364
  }
293
365
  else {
294
366
  const headers = {
@@ -301,7 +373,7 @@ async function testAI(ai, userMessage) {
301
373
  method: "POST",
302
374
  headers,
303
375
  body: JSON.stringify({
304
- model,
376
+ model: resolvedModel,
305
377
  max_tokens: 256,
306
378
  messages: [{ role: "user", content: userMessage }],
307
379
  }),
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "code-agent-auto-commit",
3
- "version": "1.3.0",
3
+ "version": "1.3.1",
4
4
  "description": "CAC provides configurable AI auto-commit(using your git account) for OpenCode, Claude Code, Codex, and other AI code agents",
5
5
  "license": "MIT",
6
6
  "type": "commonjs",