@ax-llm/ax 12.0.2 → 12.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.cjs CHANGED
@@ -3215,8 +3215,9 @@ var AxAIDeepSeek = class extends AxAIOpenAIBase {
3215
3215
 
3216
3216
  // ai/google-gemini/types.ts
3217
3217
  var AxAIGoogleGeminiModel = /* @__PURE__ */ ((AxAIGoogleGeminiModel2) => {
3218
- AxAIGoogleGeminiModel2["Gemini25Pro"] = "gemini-2.5-pro-preview-05-06";
3219
- AxAIGoogleGeminiModel2["Gemini25Flash"] = "gemini-2.5-flash-preview-04-17";
3218
+ AxAIGoogleGeminiModel2["Gemini25Pro"] = "gemini-2.5-pro";
3219
+ AxAIGoogleGeminiModel2["Gemini25Flash"] = "gemini-2.5-flash";
3220
+ AxAIGoogleGeminiModel2["Gemini25FlashLite"] = "gemini-2.5-flash-lite-preview-06-17";
3220
3221
  AxAIGoogleGeminiModel2["Gemini20Flash"] = "gemini-2.0-flash";
3221
3222
  AxAIGoogleGeminiModel2["Gemini20FlashLite"] = "gemini-2.0-flash-lite-preview-02-05";
3222
3223
  AxAIGoogleGeminiModel2["Gemini1Pro"] = "gemini-1.0-pro";
@@ -3227,7 +3228,7 @@ var AxAIGoogleGeminiModel = /* @__PURE__ */ ((AxAIGoogleGeminiModel2) => {
3227
3228
  return AxAIGoogleGeminiModel2;
3228
3229
  })(AxAIGoogleGeminiModel || {});
3229
3230
  var AxAIGoogleGeminiEmbedModel = /* @__PURE__ */ ((AxAIGoogleGeminiEmbedModel2) => {
3230
- AxAIGoogleGeminiEmbedModel2["GeminiEmbedding"] = "gemini-embedding-exp-03-07";
3231
+ AxAIGoogleGeminiEmbedModel2["GeminiEmbedding"] = "gemini-embedding-exp";
3231
3232
  AxAIGoogleGeminiEmbedModel2["TextEmbeddingLarge"] = "text-embedding-large-exp-03-07";
3232
3233
  AxAIGoogleGeminiEmbedModel2["TextEmbedding004"] = "text-embedding-004";
3233
3234
  AxAIGoogleGeminiEmbedModel2["TextEmbedding005"] = "text-embedding-005";
@@ -3263,7 +3264,7 @@ var AxAIGoogleGeminiEmbedTypes = /* @__PURE__ */ ((AxAIGoogleGeminiEmbedTypes2)
3263
3264
  // ai/google-gemini/info.ts
3264
3265
  var axModelInfoGoogleGemini = [
3265
3266
  {
3266
- name: "gemini-2.5-pro-preview-05-06" /* Gemini25Pro */,
3267
+ name: "gemini-2.5-pro" /* Gemini25Pro */,
3267
3268
  currency: "usd",
3268
3269
  characterIsToken: false,
3269
3270
  promptTokenCostPer1M: 2.5,
@@ -3272,7 +3273,7 @@ var axModelInfoGoogleGemini = [
3272
3273
  hasShowThoughts: true
3273
3274
  },
3274
3275
  {
3275
- name: "gemini-2.5-flash-preview-04-17" /* Gemini25Flash */,
3276
+ name: "gemini-2.5-flash" /* Gemini25Flash */,
3276
3277
  currency: "usd",
3277
3278
  characterIsToken: false,
3278
3279
  promptTokenCostPer1M: 15,
@@ -3280,6 +3281,15 @@ var axModelInfoGoogleGemini = [
3280
3281
  hasThinkingBudget: true,
3281
3282
  hasShowThoughts: true
3282
3283
  },
3284
+ {
3285
+ name: "gemini-2.5-flash-lite-preview-06-17" /* Gemini25FlashLite */,
3286
+ currency: "usd",
3287
+ characterIsToken: false,
3288
+ promptTokenCostPer1M: 0.1,
3289
+ completionTokenCostPer1M: 0.4,
3290
+ hasThinkingBudget: true,
3291
+ hasShowThoughts: true
3292
+ },
3283
3293
  {
3284
3294
  name: "gemini-2.0-flash" /* Gemini20Flash */,
3285
3295
  currency: "usd",
@@ -3344,15 +3354,29 @@ var safetySettings = [
3344
3354
  }
3345
3355
  ];
3346
3356
  var axAIGoogleGeminiDefaultConfig = () => structuredClone({
3347
- model: "gemini-2.5-flash-preview-04-17" /* Gemini25Flash */,
3357
+ model: "gemini-2.5-flash" /* Gemini25Flash */,
3348
3358
  embedModel: "text-embedding-005" /* TextEmbedding005 */,
3349
3359
  safetySettings,
3360
+ thinkingTokenBudgetLevels: {
3361
+ minimal: 200,
3362
+ low: 800,
3363
+ medium: 5e3,
3364
+ high: 1e4,
3365
+ highest: 24500
3366
+ },
3350
3367
  ...axBaseAIDefaultConfig()
3351
3368
  });
3352
3369
  var axAIGoogleGeminiDefaultCreativeConfig = () => structuredClone({
3353
3370
  model: "gemini-2.0-flash" /* Gemini20Flash */,
3354
3371
  embedModel: "text-embedding-005" /* TextEmbedding005 */,
3355
3372
  safetySettings,
3373
+ thinkingTokenBudgetLevels: {
3374
+ minimal: 200,
3375
+ low: 800,
3376
+ medium: 5e3,
3377
+ high: 1e4,
3378
+ highest: 24500
3379
+ },
3356
3380
  ...axBaseAIDefaultCreativeConfig()
3357
3381
  });
3358
3382
  var AxAIGoogleGeminiImpl = class {
@@ -3539,25 +3563,26 @@ var AxAIGoogleGeminiImpl = class {
3539
3563
  thinkingConfig.thinkingBudget = this.config.thinking.thinkingTokenBudget;
3540
3564
  }
3541
3565
  if (config?.thinkingTokenBudget) {
3566
+ const levels = this.config.thinkingTokenBudgetLevels;
3542
3567
  switch (config.thinkingTokenBudget) {
3543
3568
  case "none":
3544
3569
  thinkingConfig.thinkingBudget = 0;
3545
3570
  thinkingConfig.includeThoughts = false;
3546
3571
  break;
3547
3572
  case "minimal":
3548
- thinkingConfig.thinkingBudget = 200;
3573
+ thinkingConfig.thinkingBudget = levels?.minimal ?? 200;
3549
3574
  break;
3550
3575
  case "low":
3551
- thinkingConfig.thinkingBudget = 800;
3576
+ thinkingConfig.thinkingBudget = levels?.low ?? 800;
3552
3577
  break;
3553
3578
  case "medium":
3554
- thinkingConfig.thinkingBudget = 5e3;
3579
+ thinkingConfig.thinkingBudget = levels?.medium ?? 5e3;
3555
3580
  break;
3556
3581
  case "high":
3557
- thinkingConfig.thinkingBudget = 1e4;
3582
+ thinkingConfig.thinkingBudget = levels?.high ?? 1e4;
3558
3583
  break;
3559
3584
  case "highest":
3560
- thinkingConfig.thinkingBudget = 24500;
3585
+ thinkingConfig.thinkingBudget = levels?.highest ?? 24500;
3561
3586
  break;
3562
3587
  }
3563
3588
  }
@@ -4139,7 +4164,8 @@ var axModelInfoMistral = [
4139
4164
  // ai/mistral/api.ts
4140
4165
  var axAIMistralDefaultConfig = () => structuredClone({
4141
4166
  model: "mistral-small-latest" /* MistralSmall */,
4142
- ...axBaseAIDefaultConfig()
4167
+ ...axBaseAIDefaultConfig(),
4168
+ topP: 1
4143
4169
  });
4144
4170
  var axAIMistralBestConfig = () => structuredClone({
4145
4171
  ...axAIMistralDefaultConfig(),
@@ -4167,6 +4193,15 @@ var AxAIMistral = class extends AxAIOpenAIBase {
4167
4193
  hasThinkingBudget: false,
4168
4194
  hasShowThoughts: false
4169
4195
  };
4196
+ const chatReqUpdater = (req) => {
4197
+ const { max_completion_tokens, stream_options, messages, ...result } = req;
4198
+ return {
4199
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
4200
+ ...result,
4201
+ messages: this.updateMessages(messages),
4202
+ max_tokens: max_completion_tokens
4203
+ };
4204
+ };
4170
4205
  super({
4171
4206
  apiKey,
4172
4207
  config: _config,
@@ -4174,10 +4209,32 @@ var AxAIMistral = class extends AxAIOpenAIBase {
4174
4209
  apiURL: "https://api.mistral.ai/v1",
4175
4210
  modelInfo,
4176
4211
  models,
4177
- supportFor
4212
+ supportFor,
4213
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
4214
+ chatReqUpdater
4178
4215
  });
4179
4216
  super.setName("Mistral");
4180
4217
  }
4218
+ updateMessages(messages) {
4219
+ const messagesUpdated = [];
4220
+ if (!Array.isArray(messages)) {
4221
+ return messages;
4222
+ }
4223
+ for (const message of messages) {
4224
+ if (message.role === "user" && Array.isArray(message.content)) {
4225
+ const contentUpdated = message.content.map((item) => {
4226
+ if (typeof item === "object" && item !== null && item.type === "image_url") {
4227
+ return { type: "image_url", image_url: item.image_url?.url };
4228
+ }
4229
+ return item;
4230
+ });
4231
+ messagesUpdated.push({ ...message, content: contentUpdated });
4232
+ } else {
4233
+ messagesUpdated.push(message);
4234
+ }
4235
+ }
4236
+ return messagesUpdated;
4237
+ }
4181
4238
  };
4182
4239
 
4183
4240
  // ai/ollama/api.ts