@mariozechner/pi-ai 0.61.1 → 0.63.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/index.d.ts +1 -0
  2. package/dist/index.d.ts.map +1 -1
  3. package/dist/index.js.map +1 -1
  4. package/dist/models.generated.d.ts +142 -189
  5. package/dist/models.generated.d.ts.map +1 -1
  6. package/dist/models.generated.js +182 -232
  7. package/dist/models.generated.js.map +1 -1
  8. package/dist/providers/amazon-bedrock.d.ts +5 -0
  9. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  10. package/dist/providers/amazon-bedrock.js +1 -0
  11. package/dist/providers/amazon-bedrock.js.map +1 -1
  12. package/dist/providers/anthropic.d.ts.map +1 -1
  13. package/dist/providers/anthropic.js +19 -13
  14. package/dist/providers/anthropic.js.map +1 -1
  15. package/dist/providers/azure-openai-responses.d.ts.map +1 -1
  16. package/dist/providers/azure-openai-responses.js +1 -12
  17. package/dist/providers/azure-openai-responses.js.map +1 -1
  18. package/dist/providers/google-gemini-cli.d.ts.map +1 -1
  19. package/dist/providers/google-gemini-cli.js +16 -0
  20. package/dist/providers/google-gemini-cli.js.map +1 -1
  21. package/dist/providers/google-vertex.d.ts.map +1 -1
  22. package/dist/providers/google-vertex.js +18 -1
  23. package/dist/providers/google-vertex.js.map +1 -1
  24. package/dist/providers/google.d.ts.map +1 -1
  25. package/dist/providers/google.js +17 -1
  26. package/dist/providers/google.js.map +1 -1
  27. package/dist/providers/openai-completions.d.ts.map +1 -1
  28. package/dist/providers/openai-completions.js +12 -5
  29. package/dist/providers/openai-completions.js.map +1 -1
  30. package/dist/providers/openai-responses-shared.d.ts.map +1 -1
  31. package/dist/providers/openai-responses-shared.js +8 -3
  32. package/dist/providers/openai-responses-shared.js.map +1 -1
  33. package/dist/providers/openai-responses.d.ts.map +1 -1
  34. package/dist/providers/openai-responses.js +2 -13
  35. package/dist/providers/openai-responses.js.map +1 -1
  36. package/package.json +1 -1
@@ -801,6 +801,23 @@ export const MODELS = {
801
801
  contextWindow: 204800,
802
802
  maxTokens: 131072,
803
803
  },
804
+ "minimax.minimax-m2.5": {
805
+ id: "minimax.minimax-m2.5",
806
+ name: "MiniMax M2.5",
807
+ api: "bedrock-converse-stream",
808
+ provider: "amazon-bedrock",
809
+ baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
810
+ reasoning: true,
811
+ input: ["text"],
812
+ cost: {
813
+ input: 0.3,
814
+ output: 1.2,
815
+ cacheRead: 0,
816
+ cacheWrite: 0,
817
+ },
818
+ contextWindow: 196608,
819
+ maxTokens: 98304,
820
+ },
804
821
  "mistral.devstral-2-123b": {
805
822
  id: "mistral.devstral-2-123b",
806
823
  name: "Devstral 2 123B",
@@ -1039,6 +1056,23 @@ export const MODELS = {
1039
1056
  contextWindow: 128000,
1040
1057
  maxTokens: 4096,
1041
1058
  },
1059
+ "nvidia.nemotron-super-3-120b": {
1060
+ id: "nvidia.nemotron-super-3-120b",
1061
+ name: "NVIDIA Nemotron 3 Super 120B A12B",
1062
+ api: "bedrock-converse-stream",
1063
+ provider: "amazon-bedrock",
1064
+ baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
1065
+ reasoning: true,
1066
+ input: ["text"],
1067
+ cost: {
1068
+ input: 0.15,
1069
+ output: 0.65,
1070
+ cacheRead: 0,
1071
+ cacheWrite: 0,
1072
+ },
1073
+ contextWindow: 262144,
1074
+ maxTokens: 131072,
1075
+ },
1042
1076
  "openai.gpt-oss-120b-1:0": {
1043
1077
  id: "openai.gpt-oss-120b-1:0",
1044
1078
  name: "gpt-oss-120b",
@@ -1413,6 +1447,23 @@ export const MODELS = {
1413
1447
  contextWindow: 200000,
1414
1448
  maxTokens: 131072,
1415
1449
  },
1450
+ "zai.glm-5": {
1451
+ id: "zai.glm-5",
1452
+ name: "GLM-5",
1453
+ api: "bedrock-converse-stream",
1454
+ provider: "amazon-bedrock",
1455
+ baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
1456
+ reasoning: true,
1457
+ input: ["text"],
1458
+ cost: {
1459
+ input: 1,
1460
+ output: 3.2,
1461
+ cacheRead: 0,
1462
+ cacheWrite: 0,
1463
+ },
1464
+ contextWindow: 202752,
1465
+ maxTokens: 101376,
1466
+ },
1416
1467
  },
1417
1468
  "anthropic": {
1418
1469
  "claude-3-5-haiku-20241022": {
@@ -2575,7 +2626,7 @@ export const MODELS = {
2575
2626
  cacheRead: 0,
2576
2627
  cacheWrite: 0,
2577
2628
  },
2578
- contextWindow: 128000,
2629
+ contextWindow: 144000,
2579
2630
  maxTokens: 32000,
2580
2631
  },
2581
2632
  "claude-opus-4.5": {
@@ -2593,7 +2644,7 @@ export const MODELS = {
2593
2644
  cacheRead: 0,
2594
2645
  cacheWrite: 0,
2595
2646
  },
2596
- contextWindow: 128000,
2647
+ contextWindow: 160000,
2597
2648
  maxTokens: 32000,
2598
2649
  },
2599
2650
  "claude-opus-4.6": {
@@ -2629,7 +2680,7 @@ export const MODELS = {
2629
2680
  cacheRead: 0,
2630
2681
  cacheWrite: 0,
2631
2682
  },
2632
- contextWindow: 128000,
2683
+ contextWindow: 216000,
2633
2684
  maxTokens: 16000,
2634
2685
  },
2635
2686
  "claude-sonnet-4.5": {
@@ -2647,7 +2698,7 @@ export const MODELS = {
2647
2698
  cacheRead: 0,
2648
2699
  cacheWrite: 0,
2649
2700
  },
2650
- contextWindow: 128000,
2701
+ contextWindow: 144000,
2651
2702
  maxTokens: 32000,
2652
2703
  },
2653
2704
  "claude-sonnet-4.6": {
@@ -2760,7 +2811,7 @@ export const MODELS = {
2760
2811
  cacheRead: 0,
2761
2812
  cacheWrite: 0,
2762
2813
  },
2763
- contextWindow: 64000,
2814
+ contextWindow: 128000,
2764
2815
  maxTokens: 16384,
2765
2816
  },
2766
2817
  "gpt-4o": {
@@ -2779,8 +2830,8 @@ export const MODELS = {
2779
2830
  cacheRead: 0,
2780
2831
  cacheWrite: 0,
2781
2832
  },
2782
- contextWindow: 64000,
2783
- maxTokens: 16384,
2833
+ contextWindow: 128000,
2834
+ maxTokens: 4096,
2784
2835
  },
2785
2836
  "gpt-5": {
2786
2837
  id: "gpt-5",
@@ -2815,7 +2866,7 @@ export const MODELS = {
2815
2866
  cacheRead: 0,
2816
2867
  cacheWrite: 0,
2817
2868
  },
2818
- contextWindow: 128000,
2869
+ contextWindow: 264000,
2819
2870
  maxTokens: 64000,
2820
2871
  },
2821
2872
  "gpt-5.1": {
@@ -2833,7 +2884,7 @@ export const MODELS = {
2833
2884
  cacheRead: 0,
2834
2885
  cacheWrite: 0,
2835
2886
  },
2836
- contextWindow: 128000,
2887
+ contextWindow: 264000,
2837
2888
  maxTokens: 64000,
2838
2889
  },
2839
2890
  "gpt-5.1-codex": {
@@ -2851,7 +2902,7 @@ export const MODELS = {
2851
2902
  cacheRead: 0,
2852
2903
  cacheWrite: 0,
2853
2904
  },
2854
- contextWindow: 128000,
2905
+ contextWindow: 400000,
2855
2906
  maxTokens: 128000,
2856
2907
  },
2857
2908
  "gpt-5.1-codex-max": {
@@ -2869,7 +2920,7 @@ export const MODELS = {
2869
2920
  cacheRead: 0,
2870
2921
  cacheWrite: 0,
2871
2922
  },
2872
- contextWindow: 128000,
2923
+ contextWindow: 400000,
2873
2924
  maxTokens: 128000,
2874
2925
  },
2875
2926
  "gpt-5.1-codex-mini": {
@@ -2887,7 +2938,7 @@ export const MODELS = {
2887
2938
  cacheRead: 0,
2888
2939
  cacheWrite: 0,
2889
2940
  },
2890
- contextWindow: 128000,
2941
+ contextWindow: 400000,
2891
2942
  maxTokens: 128000,
2892
2943
  },
2893
2944
  "gpt-5.2": {
@@ -4495,91 +4546,6 @@ export const MODELS = {
4495
4546
  },
4496
4547
  },
4497
4548
  "minimax": {
4498
- "MiniMax-M2": {
4499
- id: "MiniMax-M2",
4500
- name: "MiniMax-M2",
4501
- api: "anthropic-messages",
4502
- provider: "minimax",
4503
- baseUrl: "https://api.minimax.io/anthropic",
4504
- reasoning: true,
4505
- input: ["text"],
4506
- cost: {
4507
- input: 0.3,
4508
- output: 1.2,
4509
- cacheRead: 0,
4510
- cacheWrite: 0,
4511
- },
4512
- contextWindow: 204800,
4513
- maxTokens: 131072,
4514
- },
4515
- "MiniMax-M2.1": {
4516
- id: "MiniMax-M2.1",
4517
- name: "MiniMax-M2.1",
4518
- api: "anthropic-messages",
4519
- provider: "minimax",
4520
- baseUrl: "https://api.minimax.io/anthropic",
4521
- reasoning: true,
4522
- input: ["text"],
4523
- cost: {
4524
- input: 0.3,
4525
- output: 1.2,
4526
- cacheRead: 0,
4527
- cacheWrite: 0,
4528
- },
4529
- contextWindow: 204800,
4530
- maxTokens: 131072,
4531
- },
4532
- "MiniMax-M2.1-highspeed": {
4533
- id: "MiniMax-M2.1-highspeed",
4534
- name: "MiniMax-M2.1-highspeed",
4535
- api: "anthropic-messages",
4536
- provider: "minimax",
4537
- baseUrl: "https://api.minimax.io/anthropic",
4538
- reasoning: true,
4539
- input: ["text"],
4540
- cost: {
4541
- input: 0.6,
4542
- output: 2.4,
4543
- cacheRead: 0,
4544
- cacheWrite: 0,
4545
- },
4546
- contextWindow: 204800,
4547
- maxTokens: 131072,
4548
- },
4549
- "MiniMax-M2.5": {
4550
- id: "MiniMax-M2.5",
4551
- name: "MiniMax-M2.5",
4552
- api: "anthropic-messages",
4553
- provider: "minimax",
4554
- baseUrl: "https://api.minimax.io/anthropic",
4555
- reasoning: true,
4556
- input: ["text"],
4557
- cost: {
4558
- input: 0.3,
4559
- output: 1.2,
4560
- cacheRead: 0.03,
4561
- cacheWrite: 0.375,
4562
- },
4563
- contextWindow: 204800,
4564
- maxTokens: 131072,
4565
- },
4566
- "MiniMax-M2.5-highspeed": {
4567
- id: "MiniMax-M2.5-highspeed",
4568
- name: "MiniMax-M2.5-highspeed",
4569
- api: "anthropic-messages",
4570
- provider: "minimax",
4571
- baseUrl: "https://api.minimax.io/anthropic",
4572
- reasoning: true,
4573
- input: ["text"],
4574
- cost: {
4575
- input: 0.6,
4576
- output: 2.4,
4577
- cacheRead: 0.06,
4578
- cacheWrite: 0.375,
4579
- },
4580
- contextWindow: 204800,
4581
- maxTokens: 131072,
4582
- },
4583
4549
  "MiniMax-M2.7": {
4584
4550
  id: "MiniMax-M2.7",
4585
4551
  name: "MiniMax-M2.7",
@@ -4616,91 +4582,6 @@ export const MODELS = {
4616
4582
  },
4617
4583
  },
4618
4584
  "minimax-cn": {
4619
- "MiniMax-M2": {
4620
- id: "MiniMax-M2",
4621
- name: "MiniMax-M2",
4622
- api: "anthropic-messages",
4623
- provider: "minimax-cn",
4624
- baseUrl: "https://api.minimaxi.com/anthropic",
4625
- reasoning: true,
4626
- input: ["text"],
4627
- cost: {
4628
- input: 0.3,
4629
- output: 1.2,
4630
- cacheRead: 0,
4631
- cacheWrite: 0,
4632
- },
4633
- contextWindow: 204800,
4634
- maxTokens: 131072,
4635
- },
4636
- "MiniMax-M2.1": {
4637
- id: "MiniMax-M2.1",
4638
- name: "MiniMax-M2.1",
4639
- api: "anthropic-messages",
4640
- provider: "minimax-cn",
4641
- baseUrl: "https://api.minimaxi.com/anthropic",
4642
- reasoning: true,
4643
- input: ["text"],
4644
- cost: {
4645
- input: 0.3,
4646
- output: 1.2,
4647
- cacheRead: 0,
4648
- cacheWrite: 0,
4649
- },
4650
- contextWindow: 204800,
4651
- maxTokens: 131072,
4652
- },
4653
- "MiniMax-M2.1-highspeed": {
4654
- id: "MiniMax-M2.1-highspeed",
4655
- name: "MiniMax-M2.1-highspeed",
4656
- api: "anthropic-messages",
4657
- provider: "minimax-cn",
4658
- baseUrl: "https://api.minimaxi.com/anthropic",
4659
- reasoning: true,
4660
- input: ["text"],
4661
- cost: {
4662
- input: 0.6,
4663
- output: 2.4,
4664
- cacheRead: 0,
4665
- cacheWrite: 0,
4666
- },
4667
- contextWindow: 204800,
4668
- maxTokens: 131072,
4669
- },
4670
- "MiniMax-M2.5": {
4671
- id: "MiniMax-M2.5",
4672
- name: "MiniMax-M2.5",
4673
- api: "anthropic-messages",
4674
- provider: "minimax-cn",
4675
- baseUrl: "https://api.minimaxi.com/anthropic",
4676
- reasoning: true,
4677
- input: ["text"],
4678
- cost: {
4679
- input: 0.3,
4680
- output: 1.2,
4681
- cacheRead: 0.03,
4682
- cacheWrite: 0.375,
4683
- },
4684
- contextWindow: 204800,
4685
- maxTokens: 131072,
4686
- },
4687
- "MiniMax-M2.5-highspeed": {
4688
- id: "MiniMax-M2.5-highspeed",
4689
- name: "MiniMax-M2.5-highspeed",
4690
- api: "anthropic-messages",
4691
- provider: "minimax-cn",
4692
- baseUrl: "https://api.minimaxi.com/anthropic",
4693
- reasoning: true,
4694
- input: ["text"],
4695
- cost: {
4696
- input: 0.6,
4697
- output: 2.4,
4698
- cacheRead: 0.06,
4699
- cacheWrite: 0.375,
4700
- },
4701
- contextWindow: 204800,
4702
- maxTokens: 131072,
4703
- },
4704
4585
  "MiniMax-M2.7": {
4705
4586
  id: "MiniMax-M2.7",
4706
4587
  name: "MiniMax-M2.7",
@@ -7290,12 +7171,12 @@ export const MODELS = {
7290
7171
  input: ["text"],
7291
7172
  cost: {
7292
7173
  input: 0.21,
7293
- output: 0.78,
7294
- cacheRead: 0.105,
7174
+ output: 0.7899999999999999,
7175
+ cacheRead: 0.1300000002,
7295
7176
  cacheWrite: 0,
7296
7177
  },
7297
7178
  contextWindow: 163840,
7298
- maxTokens: 65536,
7179
+ maxTokens: 4096,
7299
7180
  },
7300
7181
  "deepseek/deepseek-v3.2": {
7301
7182
  id: "deepseek/deepseek-v3.2",
@@ -7501,23 +7382,6 @@ export const MODELS = {
7501
7382
  contextWindow: 1048576,
7502
7383
  maxTokens: 65536,
7503
7384
  },
7504
- "google/gemini-3-pro-preview": {
7505
- id: "google/gemini-3-pro-preview",
7506
- name: "Google: Gemini 3 Pro Preview",
7507
- api: "openai-completions",
7508
- provider: "openrouter",
7509
- baseUrl: "https://openrouter.ai/api/v1",
7510
- reasoning: true,
7511
- input: ["text", "image"],
7512
- cost: {
7513
- input: 2,
7514
- output: 12,
7515
- cacheRead: 0.19999999999999998,
7516
- cacheWrite: 0.375,
7517
- },
7518
- contextWindow: 1048576,
7519
- maxTokens: 65536,
7520
- },
7521
7385
  "google/gemini-3.1-flash-lite-preview": {
7522
7386
  id: "google/gemini-3.1-flash-lite-preview",
7523
7387
  name: "Google: Gemini 3.1 Flash Lite Preview",
@@ -8292,13 +8156,13 @@ export const MODELS = {
8292
8156
  reasoning: false,
8293
8157
  input: ["text"],
8294
8158
  cost: {
8295
- input: 0.55,
8296
- output: 2.2,
8159
+ input: 0.5700000000000001,
8160
+ output: 2.3,
8297
8161
  cacheRead: 0,
8298
8162
  cacheWrite: 0,
8299
8163
  },
8300
- contextWindow: 131000,
8301
- maxTokens: 4096,
8164
+ contextWindow: 131072,
8165
+ maxTokens: 131072,
8302
8166
  },
8303
8167
  "moonshotai/kimi-k2-0905": {
8304
8168
  id: "moonshotai/kimi-k2-0905",
@@ -8360,8 +8224,8 @@ export const MODELS = {
8360
8224
  reasoning: false,
8361
8225
  input: ["text"],
8362
8226
  cost: {
8363
- input: 0.27,
8364
- output: 1,
8227
+ input: 0.135,
8228
+ output: 0.5,
8365
8229
  cacheRead: 0,
8366
8230
  cacheWrite: 0,
8367
8231
  },
@@ -8447,7 +8311,7 @@ export const MODELS = {
8447
8311
  cost: {
8448
8312
  input: 0.09999999999999999,
8449
8313
  output: 0.5,
8450
- cacheRead: 0.04,
8314
+ cacheRead: 0.09999999999999999,
8451
8315
  cacheWrite: 0,
8452
8316
  },
8453
8317
  contextWindow: 262144,
@@ -9262,12 +9126,12 @@ export const MODELS = {
9262
9126
  input: ["text"],
9263
9127
  cost: {
9264
9128
  input: 0.03,
9265
- output: 0.14,
9266
- cacheRead: 0,
9129
+ output: 0.11,
9130
+ cacheRead: 0.015,
9267
9131
  cacheWrite: 0,
9268
9132
  },
9269
9133
  contextWindow: 131072,
9270
- maxTokens: 4096,
9134
+ maxTokens: 131072,
9271
9135
  },
9272
9136
  "openai/gpt-oss-20b:free": {
9273
9137
  id: "openai/gpt-oss-20b:free",
@@ -9539,7 +9403,7 @@ export const MODELS = {
9539
9403
  cacheWrite: 0,
9540
9404
  },
9541
9405
  contextWindow: 32768,
9542
- maxTokens: 4096,
9406
+ maxTokens: 32768,
9543
9407
  },
9544
9408
  "qwen/qwen-max": {
9545
9409
  id: "qwen/qwen-max",
@@ -10202,7 +10066,7 @@ export const MODELS = {
10202
10066
  cacheWrite: 0,
10203
10067
  },
10204
10068
  contextWindow: 256000,
10205
- maxTokens: 4096,
10069
+ maxTokens: 65536,
10206
10070
  },
10207
10071
  "qwen/qwen3.5-flash-02-23": {
10208
10072
  id: "qwen/qwen3.5-flash-02-23",
@@ -10255,6 +10119,23 @@ export const MODELS = {
10255
10119
  contextWindow: 131072,
10256
10120
  maxTokens: 131072,
10257
10121
  },
10122
+ "reka/reka-edge": {
10123
+ id: "reka/reka-edge",
10124
+ name: "Reka Edge",
10125
+ api: "openai-completions",
10126
+ provider: "openrouter",
10127
+ baseUrl: "https://openrouter.ai/api/v1",
10128
+ reasoning: false,
10129
+ input: ["text", "image"],
10130
+ cost: {
10131
+ input: 0.09999999999999999,
10132
+ output: 0.09999999999999999,
10133
+ cacheRead: 0,
10134
+ cacheWrite: 0,
10135
+ },
10136
+ contextWindow: 16384,
10137
+ maxTokens: 16384,
10138
+ },
10258
10139
  "relace/relace-search": {
10259
10140
  id: "relace/relace-search",
10260
10141
  name: "Relace: Relace Search",
@@ -10320,8 +10201,8 @@ export const MODELS = {
10320
10201
  cacheRead: 0.02,
10321
10202
  cacheWrite: 0,
10322
10203
  },
10323
- contextWindow: 256000,
10324
- maxTokens: 256000,
10204
+ contextWindow: 262144,
10205
+ maxTokens: 4096,
10325
10206
  },
10326
10207
  "stepfun/step-3.5-flash:free": {
10327
10208
  id: "stepfun/step-3.5-flash:free",
@@ -10383,9 +10264,9 @@ export const MODELS = {
10383
10264
  reasoning: true,
10384
10265
  input: ["text"],
10385
10266
  cost: {
10386
- input: 0.25,
10387
- output: 0.85,
10388
- cacheRead: 0.125,
10267
+ input: 0.3,
10268
+ output: 1.1,
10269
+ cacheRead: 0.15,
10389
10270
  cacheWrite: 0,
10390
10271
  },
10391
10272
  contextWindow: 163840,
@@ -10791,9 +10672,9 @@ export const MODELS = {
10791
10672
  reasoning: true,
10792
10673
  input: ["text"],
10793
10674
  cost: {
10794
- input: 0.96,
10795
- output: 3.1999999999999997,
10796
- cacheRead: 0.192,
10675
+ input: 1.2,
10676
+ output: 4,
10677
+ cacheRead: 0.24,
10797
10678
  cacheWrite: 0,
10798
10679
  },
10799
10680
  contextWindow: 202752,
@@ -12799,8 +12680,8 @@ export const MODELS = {
12799
12680
  reasoning: false,
12800
12681
  input: ["text", "image"],
12801
12682
  cost: {
12802
- input: 1,
12803
- output: 1,
12683
+ input: 0,
12684
+ output: 0,
12804
12685
  cacheRead: 0,
12805
12686
  cacheWrite: 0,
12806
12687
  },
@@ -12816,8 +12697,8 @@ export const MODELS = {
12816
12697
  reasoning: false,
12817
12698
  input: ["text", "image"],
12818
12699
  cost: {
12819
- input: 3,
12820
- output: 15,
12700
+ input: 0,
12701
+ output: 0,
12821
12702
  cacheRead: 0,
12822
12703
  cacheWrite: 0,
12823
12704
  },
@@ -13011,6 +12892,23 @@ export const MODELS = {
13011
12892
  contextWindow: 2000000,
13012
12893
  maxTokens: 30000,
13013
12894
  },
12895
+ "xai/grok-4.20-multi-agent": {
12896
+ id: "xai/grok-4.20-multi-agent",
12897
+ name: "Grok 4.20 Multi-Agent",
12898
+ api: "anthropic-messages",
12899
+ provider: "vercel-ai-gateway",
12900
+ baseUrl: "https://ai-gateway.vercel.sh",
12901
+ reasoning: true,
12902
+ input: ["text"],
12903
+ cost: {
12904
+ input: 2,
12905
+ output: 6,
12906
+ cacheRead: 0.19999999999999998,
12907
+ cacheWrite: 0,
12908
+ },
12909
+ contextWindow: 2000000,
12910
+ maxTokens: 2000000,
12911
+ },
13014
12912
  "xai/grok-4.20-multi-agent-beta": {
13015
12913
  id: "xai/grok-4.20-multi-agent-beta",
13016
12914
  name: "Grok 4.20 Multi Agent Beta",
@@ -13028,6 +12926,23 @@ export const MODELS = {
13028
12926
  contextWindow: 2000000,
13029
12927
  maxTokens: 2000000,
13030
12928
  },
12929
+ "xai/grok-4.20-non-reasoning": {
12930
+ id: "xai/grok-4.20-non-reasoning",
12931
+ name: "Grok 4.20 Non-Reasoning",
12932
+ api: "anthropic-messages",
12933
+ provider: "vercel-ai-gateway",
12934
+ baseUrl: "https://ai-gateway.vercel.sh",
12935
+ reasoning: false,
12936
+ input: ["text", "image"],
12937
+ cost: {
12938
+ input: 2,
12939
+ output: 6,
12940
+ cacheRead: 0.19999999999999998,
12941
+ cacheWrite: 0,
12942
+ },
12943
+ contextWindow: 2000000,
12944
+ maxTokens: 2000000,
12945
+ },
13031
12946
  "xai/grok-4.20-non-reasoning-beta": {
13032
12947
  id: "xai/grok-4.20-non-reasoning-beta",
13033
12948
  name: "Grok 4.20 Beta Non-Reasoning",
@@ -13045,6 +12960,23 @@ export const MODELS = {
13045
12960
  contextWindow: 2000000,
13046
12961
  maxTokens: 2000000,
13047
12962
  },
12963
+ "xai/grok-4.20-reasoning": {
12964
+ id: "xai/grok-4.20-reasoning",
12965
+ name: "Grok 4.20 Reasoning",
12966
+ api: "anthropic-messages",
12967
+ provider: "vercel-ai-gateway",
12968
+ baseUrl: "https://ai-gateway.vercel.sh",
12969
+ reasoning: true,
12970
+ input: ["text", "image"],
12971
+ cost: {
12972
+ input: 2,
12973
+ output: 6,
12974
+ cacheRead: 0.19999999999999998,
12975
+ cacheWrite: 0,
12976
+ },
12977
+ contextWindow: 2000000,
12978
+ maxTokens: 2000000,
12979
+ },
13048
12980
  "xai/grok-4.20-reasoning-beta": {
13049
12981
  id: "xai/grok-4.20-reasoning-beta",
13050
12982
  name: "Grok 4.20 Beta Reasoning",
@@ -13625,9 +13557,9 @@ export const MODELS = {
13625
13557
  contextWindow: 2000000,
13626
13558
  maxTokens: 30000,
13627
13559
  },
13628
- "grok-4.20-beta-latest-non-reasoning": {
13629
- id: "grok-4.20-beta-latest-non-reasoning",
13630
- name: "Grok 4.20 Beta (Non-Reasoning)",
13560
+ "grok-4.20-0309-non-reasoning": {
13561
+ id: "grok-4.20-0309-non-reasoning",
13562
+ name: "Grok 4.20 (Non-Reasoning)",
13631
13563
  api: "openai-completions",
13632
13564
  provider: "xai",
13633
13565
  baseUrl: "https://api.x.ai/v1",
@@ -13642,9 +13574,9 @@ export const MODELS = {
13642
13574
  contextWindow: 2000000,
13643
13575
  maxTokens: 30000,
13644
13576
  },
13645
- "grok-4.20-beta-latest-reasoning": {
13646
- id: "grok-4.20-beta-latest-reasoning",
13647
- name: "Grok 4.20 Beta (Reasoning)",
13577
+ "grok-4.20-0309-reasoning": {
13578
+ id: "grok-4.20-0309-reasoning",
13579
+ name: "Grok 4.20 (Reasoning)",
13648
13580
  api: "openai-completions",
13649
13581
  provider: "xai",
13650
13582
  baseUrl: "https://api.x.ai/v1",
@@ -13856,6 +13788,24 @@ export const MODELS = {
13856
13788
  contextWindow: 200000,
13857
13789
  maxTokens: 131072,
13858
13790
  },
13791
+ "glm-4.7-flashx": {
13792
+ id: "glm-4.7-flashx",
13793
+ name: "GLM-4.7-FlashX",
13794
+ api: "openai-completions",
13795
+ provider: "zai",
13796
+ baseUrl: "https://api.z.ai/api/coding/paas/v4",
13797
+ compat: { "supportsDeveloperRole": false, "thinkingFormat": "zai" },
13798
+ reasoning: true,
13799
+ input: ["text"],
13800
+ cost: {
13801
+ input: 0.07,
13802
+ output: 0.4,
13803
+ cacheRead: 0.01,
13804
+ cacheWrite: 0,
13805
+ },
13806
+ contextWindow: 200000,
13807
+ maxTokens: 131072,
13808
+ },
13859
13809
  "glm-5": {
13860
13810
  id: "glm-5",
13861
13811
  name: "GLM-5",