@mariozechner/pi-ai 0.61.0 → 0.62.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/index.d.ts +1 -0
  2. package/dist/index.d.ts.map +1 -1
  3. package/dist/index.js.map +1 -1
  4. package/dist/models.generated.d.ts +36 -2
  5. package/dist/models.generated.d.ts.map +1 -1
  6. package/dist/models.generated.js +68 -34
  7. package/dist/models.generated.js.map +1 -1
  8. package/dist/providers/amazon-bedrock.d.ts +5 -0
  9. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  10. package/dist/providers/amazon-bedrock.js +1 -0
  11. package/dist/providers/amazon-bedrock.js.map +1 -1
  12. package/dist/providers/anthropic.d.ts.map +1 -1
  13. package/dist/providers/anthropic.js +19 -13
  14. package/dist/providers/anthropic.js.map +1 -1
  15. package/dist/providers/azure-openai-responses.d.ts.map +1 -1
  16. package/dist/providers/azure-openai-responses.js +1 -12
  17. package/dist/providers/azure-openai-responses.js.map +1 -1
  18. package/dist/providers/google-gemini-cli.d.ts.map +1 -1
  19. package/dist/providers/google-gemini-cli.js +16 -0
  20. package/dist/providers/google-gemini-cli.js.map +1 -1
  21. package/dist/providers/google-vertex.d.ts.map +1 -1
  22. package/dist/providers/google-vertex.js +17 -0
  23. package/dist/providers/google-vertex.js.map +1 -1
  24. package/dist/providers/google.d.ts.map +1 -1
  25. package/dist/providers/google.js +16 -0
  26. package/dist/providers/google.js.map +1 -1
  27. package/dist/providers/openai-completions.d.ts.map +1 -1
  28. package/dist/providers/openai-completions.js +12 -5
  29. package/dist/providers/openai-completions.js.map +1 -1
  30. package/dist/providers/openai-responses-shared.d.ts.map +1 -1
  31. package/dist/providers/openai-responses-shared.js +8 -3
  32. package/dist/providers/openai-responses-shared.js.map +1 -1
  33. package/dist/providers/openai-responses.d.ts.map +1 -1
  34. package/dist/providers/openai-responses.js +1 -12
  35. package/dist/providers/openai-responses.js.map +1 -1
  36. package/package.json +1 -1
@@ -2575,7 +2575,7 @@ export const MODELS = {
2575
2575
  cacheRead: 0,
2576
2576
  cacheWrite: 0,
2577
2577
  },
2578
- contextWindow: 128000,
2578
+ contextWindow: 144000,
2579
2579
  maxTokens: 32000,
2580
2580
  },
2581
2581
  "claude-opus-4.5": {
@@ -2593,7 +2593,7 @@ export const MODELS = {
2593
2593
  cacheRead: 0,
2594
2594
  cacheWrite: 0,
2595
2595
  },
2596
- contextWindow: 128000,
2596
+ contextWindow: 160000,
2597
2597
  maxTokens: 32000,
2598
2598
  },
2599
2599
  "claude-opus-4.6": {
@@ -2629,7 +2629,7 @@ export const MODELS = {
2629
2629
  cacheRead: 0,
2630
2630
  cacheWrite: 0,
2631
2631
  },
2632
- contextWindow: 128000,
2632
+ contextWindow: 216000,
2633
2633
  maxTokens: 16000,
2634
2634
  },
2635
2635
  "claude-sonnet-4.5": {
@@ -2647,7 +2647,7 @@ export const MODELS = {
2647
2647
  cacheRead: 0,
2648
2648
  cacheWrite: 0,
2649
2649
  },
2650
- contextWindow: 128000,
2650
+ contextWindow: 144000,
2651
2651
  maxTokens: 32000,
2652
2652
  },
2653
2653
  "claude-sonnet-4.6": {
@@ -2760,7 +2760,7 @@ export const MODELS = {
2760
2760
  cacheRead: 0,
2761
2761
  cacheWrite: 0,
2762
2762
  },
2763
- contextWindow: 64000,
2763
+ contextWindow: 128000,
2764
2764
  maxTokens: 16384,
2765
2765
  },
2766
2766
  "gpt-4o": {
@@ -2779,8 +2779,8 @@ export const MODELS = {
2779
2779
  cacheRead: 0,
2780
2780
  cacheWrite: 0,
2781
2781
  },
2782
- contextWindow: 64000,
2783
- maxTokens: 16384,
2782
+ contextWindow: 128000,
2783
+ maxTokens: 4096,
2784
2784
  },
2785
2785
  "gpt-5": {
2786
2786
  id: "gpt-5",
@@ -2815,7 +2815,7 @@ export const MODELS = {
2815
2815
  cacheRead: 0,
2816
2816
  cacheWrite: 0,
2817
2817
  },
2818
- contextWindow: 128000,
2818
+ contextWindow: 264000,
2819
2819
  maxTokens: 64000,
2820
2820
  },
2821
2821
  "gpt-5.1": {
@@ -2833,7 +2833,7 @@ export const MODELS = {
2833
2833
  cacheRead: 0,
2834
2834
  cacheWrite: 0,
2835
2835
  },
2836
- contextWindow: 128000,
2836
+ contextWindow: 264000,
2837
2837
  maxTokens: 64000,
2838
2838
  },
2839
2839
  "gpt-5.1-codex": {
@@ -2851,7 +2851,7 @@ export const MODELS = {
2851
2851
  cacheRead: 0,
2852
2852
  cacheWrite: 0,
2853
2853
  },
2854
- contextWindow: 128000,
2854
+ contextWindow: 400000,
2855
2855
  maxTokens: 128000,
2856
2856
  },
2857
2857
  "gpt-5.1-codex-max": {
@@ -2869,7 +2869,7 @@ export const MODELS = {
2869
2869
  cacheRead: 0,
2870
2870
  cacheWrite: 0,
2871
2871
  },
2872
- contextWindow: 128000,
2872
+ contextWindow: 400000,
2873
2873
  maxTokens: 128000,
2874
2874
  },
2875
2875
  "gpt-5.1-codex-mini": {
@@ -2887,7 +2887,7 @@ export const MODELS = {
2887
2887
  cacheRead: 0,
2888
2888
  cacheWrite: 0,
2889
2889
  },
2890
- contextWindow: 128000,
2890
+ contextWindow: 400000,
2891
2891
  maxTokens: 128000,
2892
2892
  },
2893
2893
  "gpt-5.2": {
@@ -4509,8 +4509,8 @@ export const MODELS = {
4509
4509
  cacheRead: 0,
4510
4510
  cacheWrite: 0,
4511
4511
  },
4512
- contextWindow: 196608,
4513
- maxTokens: 128000,
4512
+ contextWindow: 204800,
4513
+ maxTokens: 131072,
4514
4514
  },
4515
4515
  "MiniMax-M2.1": {
4516
4516
  id: "MiniMax-M2.1",
@@ -4529,6 +4529,23 @@ export const MODELS = {
4529
4529
  contextWindow: 204800,
4530
4530
  maxTokens: 131072,
4531
4531
  },
4532
+ "MiniMax-M2.1-highspeed": {
4533
+ id: "MiniMax-M2.1-highspeed",
4534
+ name: "MiniMax-M2.1-highspeed",
4535
+ api: "anthropic-messages",
4536
+ provider: "minimax",
4537
+ baseUrl: "https://api.minimax.io/anthropic",
4538
+ reasoning: true,
4539
+ input: ["text"],
4540
+ cost: {
4541
+ input: 0.6,
4542
+ output: 2.4,
4543
+ cacheRead: 0,
4544
+ cacheWrite: 0,
4545
+ },
4546
+ contextWindow: 204800,
4547
+ maxTokens: 131072,
4548
+ },
4532
4549
  "MiniMax-M2.5": {
4533
4550
  id: "MiniMax-M2.5",
4534
4551
  name: "MiniMax-M2.5",
@@ -4613,8 +4630,8 @@ export const MODELS = {
4613
4630
  cacheRead: 0,
4614
4631
  cacheWrite: 0,
4615
4632
  },
4616
- contextWindow: 196608,
4617
- maxTokens: 128000,
4633
+ contextWindow: 204800,
4634
+ maxTokens: 131072,
4618
4635
  },
4619
4636
  "MiniMax-M2.1": {
4620
4637
  id: "MiniMax-M2.1",
@@ -4633,6 +4650,23 @@ export const MODELS = {
4633
4650
  contextWindow: 204800,
4634
4651
  maxTokens: 131072,
4635
4652
  },
4653
+ "MiniMax-M2.1-highspeed": {
4654
+ id: "MiniMax-M2.1-highspeed",
4655
+ name: "MiniMax-M2.1-highspeed",
4656
+ api: "anthropic-messages",
4657
+ provider: "minimax-cn",
4658
+ baseUrl: "https://api.minimaxi.com/anthropic",
4659
+ reasoning: true,
4660
+ input: ["text"],
4661
+ cost: {
4662
+ input: 0.6,
4663
+ output: 2.4,
4664
+ cacheRead: 0,
4665
+ cacheWrite: 0,
4666
+ },
4667
+ contextWindow: 204800,
4668
+ maxTokens: 131072,
4669
+ },
4636
4670
  "MiniMax-M2.5": {
4637
4671
  id: "MiniMax-M2.5",
4638
4672
  name: "MiniMax-M2.5",
@@ -7256,12 +7290,12 @@ export const MODELS = {
7256
7290
  input: ["text"],
7257
7291
  cost: {
7258
7292
  input: 0.21,
7259
- output: 0.78,
7260
- cacheRead: 0.105,
7293
+ output: 0.7899999999999999,
7294
+ cacheRead: 0.1300000002,
7261
7295
  cacheWrite: 0,
7262
7296
  },
7263
7297
  contextWindow: 163840,
7264
- maxTokens: 65536,
7298
+ maxTokens: 4096,
7265
7299
  },
7266
7300
  "deepseek/deepseek-v3.2": {
7267
7301
  id: "deepseek/deepseek-v3.2",
@@ -7800,12 +7834,12 @@ export const MODELS = {
7800
7834
  input: ["text"],
7801
7835
  cost: {
7802
7836
  input: 0.19999999999999998,
7803
- output: 1.2,
7804
- cacheRead: 0,
7837
+ output: 1.17,
7838
+ cacheRead: 0.09999999999999999,
7805
7839
  cacheWrite: 0,
7806
7840
  },
7807
7841
  contextWindow: 196608,
7808
- maxTokens: 196608,
7842
+ maxTokens: 65536,
7809
7843
  },
7810
7844
  "minimax/minimax-m2.5:free": {
7811
7845
  id: "minimax/minimax-m2.5:free",
@@ -9228,12 +9262,12 @@ export const MODELS = {
9228
9262
  input: ["text"],
9229
9263
  cost: {
9230
9264
  input: 0.03,
9231
- output: 0.14,
9232
- cacheRead: 0,
9265
+ output: 0.11,
9266
+ cacheRead: 0.015,
9233
9267
  cacheWrite: 0,
9234
9268
  },
9235
9269
  contextWindow: 131072,
9236
- maxTokens: 4096,
9270
+ maxTokens: 131072,
9237
9271
  },
9238
9272
  "openai/gpt-oss-20b:free": {
9239
9273
  id: "openai/gpt-oss-20b:free",
@@ -10349,9 +10383,9 @@ export const MODELS = {
10349
10383
  reasoning: true,
10350
10384
  input: ["text"],
10351
10385
  cost: {
10352
- input: 0.25,
10353
- output: 0.85,
10354
- cacheRead: 0.125,
10386
+ input: 0.3,
10387
+ output: 1.1,
10388
+ cacheRead: 0.15,
10355
10389
  cacheWrite: 0,
10356
10390
  },
10357
10391
  contextWindow: 163840,
@@ -13591,9 +13625,9 @@ export const MODELS = {
13591
13625
  contextWindow: 2000000,
13592
13626
  maxTokens: 30000,
13593
13627
  },
13594
- "grok-4.20-beta-latest-non-reasoning": {
13595
- id: "grok-4.20-beta-latest-non-reasoning",
13596
- name: "Grok 4.20 Beta (Non-Reasoning)",
13628
+ "grok-4.20-0309-non-reasoning": {
13629
+ id: "grok-4.20-0309-non-reasoning",
13630
+ name: "Grok 4.20 (Non-Reasoning)",
13597
13631
  api: "openai-completions",
13598
13632
  provider: "xai",
13599
13633
  baseUrl: "https://api.x.ai/v1",
@@ -13608,9 +13642,9 @@ export const MODELS = {
13608
13642
  contextWindow: 2000000,
13609
13643
  maxTokens: 30000,
13610
13644
  },
13611
- "grok-4.20-beta-latest-reasoning": {
13612
- id: "grok-4.20-beta-latest-reasoning",
13613
- name: "Grok 4.20 Beta (Reasoning)",
13645
+ "grok-4.20-0309-reasoning": {
13646
+ id: "grok-4.20-0309-reasoning",
13647
+ name: "Grok 4.20 (Reasoning)",
13614
13648
  api: "openai-completions",
13615
13649
  provider: "xai",
13616
13650
  baseUrl: "https://api.x.ai/v1",