@mariozechner/pi-ai 0.52.9 → 0.52.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/dist/models.generated.d.ts +288 -170
  2. package/dist/models.generated.d.ts.map +1 -1
  3. package/dist/models.generated.js +374 -241
  4. package/dist/models.generated.js.map +1 -1
  5. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  6. package/dist/providers/amazon-bedrock.js +2 -2
  7. package/dist/providers/amazon-bedrock.js.map +1 -1
  8. package/dist/providers/anthropic.d.ts.map +1 -1
  9. package/dist/providers/anthropic.js +51 -19
  10. package/dist/providers/anthropic.js.map +1 -1
  11. package/dist/providers/github-copilot-headers.d.ts +8 -0
  12. package/dist/providers/github-copilot-headers.d.ts.map +1 -0
  13. package/dist/providers/github-copilot-headers.js +29 -0
  14. package/dist/providers/github-copilot-headers.js.map +1 -0
  15. package/dist/providers/openai-completions.d.ts.map +1 -1
  16. package/dist/providers/openai-completions.js +7 -25
  17. package/dist/providers/openai-completions.js.map +1 -1
  18. package/dist/providers/openai-responses-shared.d.ts.map +1 -1
  19. package/dist/providers/openai-responses-shared.js +2 -2
  20. package/dist/providers/openai-responses-shared.js.map +1 -1
  21. package/dist/providers/openai-responses.d.ts.map +1 -1
  22. package/dist/providers/openai-responses.js +6 -20
  23. package/dist/providers/openai-responses.js.map +1 -1
  24. package/dist/providers/simple-options.d.ts.map +1 -1
  25. package/dist/providers/simple-options.js +1 -0
  26. package/dist/providers/simple-options.js.map +1 -1
  27. package/dist/types.d.ts +6 -0
  28. package/dist/types.d.ts.map +1 -1
  29. package/dist/types.js.map +1 -1
  30. package/package.json +1 -1
@@ -799,6 +799,23 @@ export declare const MODELS: {
799
799
  contextWindow: number;
800
800
  maxTokens: number;
801
801
  };
802
+ readonly "minimax.minimax-m2.1": {
803
+ id: string;
804
+ name: string;
805
+ api: "bedrock-converse-stream";
806
+ provider: string;
807
+ baseUrl: string;
808
+ reasoning: true;
809
+ input: "text"[];
810
+ cost: {
811
+ input: number;
812
+ output: number;
813
+ cacheRead: number;
814
+ cacheWrite: number;
815
+ };
816
+ contextWindow: number;
817
+ maxTokens: number;
818
+ };
802
819
  readonly "mistral.ministral-3-14b-instruct": {
803
820
  id: string;
804
821
  name: string;
@@ -901,6 +918,23 @@ export declare const MODELS: {
901
918
  contextWindow: number;
902
919
  maxTokens: number;
903
920
  };
921
+ readonly "moonshotai.kimi-k2.5": {
922
+ id: string;
923
+ name: string;
924
+ api: "bedrock-converse-stream";
925
+ provider: string;
926
+ baseUrl: string;
927
+ reasoning: true;
928
+ input: ("image" | "text")[];
929
+ cost: {
930
+ input: number;
931
+ output: number;
932
+ cacheRead: number;
933
+ cacheWrite: number;
934
+ };
935
+ contextWindow: number;
936
+ maxTokens: number;
937
+ };
904
938
  readonly "nvidia.nemotron-nano-12b-v2": {
905
939
  id: string;
906
940
  name: string;
@@ -1224,6 +1258,40 @@ export declare const MODELS: {
1224
1258
  contextWindow: number;
1225
1259
  maxTokens: number;
1226
1260
  };
1261
+ readonly "zai.glm-4.7": {
1262
+ id: string;
1263
+ name: string;
1264
+ api: "bedrock-converse-stream";
1265
+ provider: string;
1266
+ baseUrl: string;
1267
+ reasoning: true;
1268
+ input: "text"[];
1269
+ cost: {
1270
+ input: number;
1271
+ output: number;
1272
+ cacheRead: number;
1273
+ cacheWrite: number;
1274
+ };
1275
+ contextWindow: number;
1276
+ maxTokens: number;
1277
+ };
1278
+ readonly "zai.glm-4.7-flash": {
1279
+ id: string;
1280
+ name: string;
1281
+ api: "bedrock-converse-stream";
1282
+ provider: string;
1283
+ baseUrl: string;
1284
+ reasoning: true;
1285
+ input: "text"[];
1286
+ cost: {
1287
+ input: number;
1288
+ output: number;
1289
+ cacheRead: number;
1290
+ cacheWrite: number;
1291
+ };
1292
+ contextWindow: number;
1293
+ maxTokens: number;
1294
+ };
1227
1295
  };
1228
1296
  readonly anthropic: {
1229
1297
  readonly "claude-3-5-haiku-20241022": {
@@ -2061,6 +2129,23 @@ export declare const MODELS: {
2061
2129
  contextWindow: number;
2062
2130
  maxTokens: number;
2063
2131
  };
2132
+ readonly "gpt-5.3-codex-spark": {
2133
+ id: string;
2134
+ name: string;
2135
+ api: "azure-openai-responses";
2136
+ provider: string;
2137
+ baseUrl: string;
2138
+ reasoning: true;
2139
+ input: ("image" | "text")[];
2140
+ cost: {
2141
+ input: number;
2142
+ output: number;
2143
+ cacheRead: number;
2144
+ cacheWrite: number;
2145
+ };
2146
+ contextWindow: number;
2147
+ maxTokens: number;
2148
+ };
2064
2149
  readonly o1: {
2065
2150
  id: string;
2066
2151
  name: string;
@@ -2216,6 +2301,23 @@ export declare const MODELS: {
2216
2301
  contextWindow: number;
2217
2302
  maxTokens: number;
2218
2303
  };
2304
+ readonly "llama3.1-8b": {
2305
+ id: string;
2306
+ name: string;
2307
+ api: "openai-completions";
2308
+ provider: string;
2309
+ baseUrl: string;
2310
+ reasoning: false;
2311
+ input: "text"[];
2312
+ cost: {
2313
+ input: number;
2314
+ output: number;
2315
+ cacheRead: number;
2316
+ cacheWrite: number;
2317
+ };
2318
+ contextWindow: number;
2319
+ maxTokens: number;
2320
+ };
2219
2321
  readonly "qwen-3-235b-a22b-instruct-2507": {
2220
2322
  id: string;
2221
2323
  name: string;
@@ -2255,7 +2357,7 @@ export declare const MODELS: {
2255
2357
  readonly "claude-haiku-4.5": {
2256
2358
  id: string;
2257
2359
  name: string;
2258
- api: "openai-completions";
2360
+ api: "anthropic-messages";
2259
2361
  provider: string;
2260
2362
  baseUrl: string;
2261
2363
  headers: {
@@ -2264,11 +2366,6 @@ export declare const MODELS: {
2264
2366
  "Editor-Plugin-Version": string;
2265
2367
  "Copilot-Integration-Id": string;
2266
2368
  };
2267
- compat: {
2268
- supportsStore: false;
2269
- supportsDeveloperRole: false;
2270
- supportsReasoningEffort: false;
2271
- };
2272
2369
  reasoning: true;
2273
2370
  input: ("image" | "text")[];
2274
2371
  cost: {
@@ -2283,7 +2380,7 @@ export declare const MODELS: {
2283
2380
  readonly "claude-opus-4.5": {
2284
2381
  id: string;
2285
2382
  name: string;
2286
- api: "openai-completions";
2383
+ api: "anthropic-messages";
2287
2384
  provider: string;
2288
2385
  baseUrl: string;
2289
2386
  headers: {
@@ -2292,11 +2389,6 @@ export declare const MODELS: {
2292
2389
  "Editor-Plugin-Version": string;
2293
2390
  "Copilot-Integration-Id": string;
2294
2391
  };
2295
- compat: {
2296
- supportsStore: false;
2297
- supportsDeveloperRole: false;
2298
- supportsReasoningEffort: false;
2299
- };
2300
2392
  reasoning: true;
2301
2393
  input: ("image" | "text")[];
2302
2394
  cost: {
@@ -2311,7 +2403,7 @@ export declare const MODELS: {
2311
2403
  readonly "claude-opus-4.6": {
2312
2404
  id: string;
2313
2405
  name: string;
2314
- api: "openai-completions";
2406
+ api: "anthropic-messages";
2315
2407
  provider: string;
2316
2408
  baseUrl: string;
2317
2409
  headers: {
@@ -2320,11 +2412,6 @@ export declare const MODELS: {
2320
2412
  "Editor-Plugin-Version": string;
2321
2413
  "Copilot-Integration-Id": string;
2322
2414
  };
2323
- compat: {
2324
- supportsStore: false;
2325
- supportsDeveloperRole: false;
2326
- supportsReasoningEffort: false;
2327
- };
2328
2415
  reasoning: true;
2329
2416
  input: ("image" | "text")[];
2330
2417
  cost: {
@@ -2339,7 +2426,7 @@ export declare const MODELS: {
2339
2426
  readonly "claude-sonnet-4": {
2340
2427
  id: string;
2341
2428
  name: string;
2342
- api: "openai-completions";
2429
+ api: "anthropic-messages";
2343
2430
  provider: string;
2344
2431
  baseUrl: string;
2345
2432
  headers: {
@@ -2348,11 +2435,6 @@ export declare const MODELS: {
2348
2435
  "Editor-Plugin-Version": string;
2349
2436
  "Copilot-Integration-Id": string;
2350
2437
  };
2351
- compat: {
2352
- supportsStore: false;
2353
- supportsDeveloperRole: false;
2354
- supportsReasoningEffort: false;
2355
- };
2356
2438
  reasoning: true;
2357
2439
  input: ("image" | "text")[];
2358
2440
  cost: {
@@ -2367,7 +2449,7 @@ export declare const MODELS: {
2367
2449
  readonly "claude-sonnet-4.5": {
2368
2450
  id: string;
2369
2451
  name: string;
2370
- api: "openai-completions";
2452
+ api: "anthropic-messages";
2371
2453
  provider: string;
2372
2454
  baseUrl: string;
2373
2455
  headers: {
@@ -2376,11 +2458,6 @@ export declare const MODELS: {
2376
2458
  "Editor-Plugin-Version": string;
2377
2459
  "Copilot-Integration-Id": string;
2378
2460
  };
2379
- compat: {
2380
- supportsStore: false;
2381
- supportsDeveloperRole: false;
2382
- supportsReasoningEffort: false;
2383
- };
2384
2461
  reasoning: true;
2385
2462
  input: ("image" | "text")[];
2386
2463
  cost: {
@@ -4039,6 +4116,26 @@ export declare const MODELS: {
4039
4116
  contextWindow: number;
4040
4117
  maxTokens: number;
4041
4118
  };
4119
+ readonly "zai-org/GLM-5": {
4120
+ id: string;
4121
+ name: string;
4122
+ api: "openai-completions";
4123
+ provider: string;
4124
+ baseUrl: string;
4125
+ compat: {
4126
+ supportsDeveloperRole: false;
4127
+ };
4128
+ reasoning: true;
4129
+ input: "text"[];
4130
+ cost: {
4131
+ input: number;
4132
+ output: number;
4133
+ cacheRead: number;
4134
+ cacheWrite: number;
4135
+ };
4136
+ contextWindow: number;
4137
+ maxTokens: number;
4138
+ };
4042
4139
  };
4043
4140
  readonly "kimi-coding": {
4044
4141
  readonly k2p5: {
@@ -5035,6 +5132,23 @@ export declare const MODELS: {
5035
5132
  contextWindow: number;
5036
5133
  maxTokens: number;
5037
5134
  };
5135
+ readonly "gpt-5.3-codex-spark": {
5136
+ id: string;
5137
+ name: string;
5138
+ api: "openai-responses";
5139
+ provider: string;
5140
+ baseUrl: string;
5141
+ reasoning: true;
5142
+ input: ("image" | "text")[];
5143
+ cost: {
5144
+ input: number;
5145
+ output: number;
5146
+ cacheRead: number;
5147
+ cacheWrite: number;
5148
+ };
5149
+ contextWindow: number;
5150
+ maxTokens: number;
5151
+ };
5038
5152
  readonly o1: {
5039
5153
  id: string;
5040
5154
  name: string;
@@ -5275,6 +5389,23 @@ export declare const MODELS: {
5275
5389
  contextWindow: number;
5276
5390
  maxTokens: number;
5277
5391
  };
5392
+ readonly "gpt-5.3-codex-spark": {
5393
+ id: string;
5394
+ name: string;
5395
+ api: "openai-codex-responses";
5396
+ provider: string;
5397
+ baseUrl: string;
5398
+ reasoning: true;
5399
+ input: "text"[];
5400
+ cost: {
5401
+ input: number;
5402
+ output: number;
5403
+ cacheRead: number;
5404
+ cacheWrite: number;
5405
+ };
5406
+ contextWindow: number;
5407
+ maxTokens: number;
5408
+ };
5278
5409
  };
5279
5410
  readonly opencode: {
5280
5411
  readonly "big-pickle": {
@@ -5481,23 +5612,6 @@ export declare const MODELS: {
5481
5612
  contextWindow: number;
5482
5613
  maxTokens: number;
5483
5614
  };
5484
- readonly "glm-4.7-free": {
5485
- id: string;
5486
- name: string;
5487
- api: "openai-completions";
5488
- provider: string;
5489
- baseUrl: string;
5490
- reasoning: true;
5491
- input: "text"[];
5492
- cost: {
5493
- input: number;
5494
- output: number;
5495
- cacheRead: number;
5496
- cacheWrite: number;
5497
- };
5498
- contextWindow: number;
5499
- maxTokens: number;
5500
- };
5501
5615
  readonly "gpt-5": {
5502
5616
  id: string;
5503
5617
  name: string;
@@ -5736,47 +5850,13 @@ export declare const MODELS: {
5736
5850
  contextWindow: number;
5737
5851
  maxTokens: number;
5738
5852
  };
5739
- readonly "minimax-m2.1-free": {
5740
- id: string;
5741
- name: string;
5742
- api: "anthropic-messages";
5743
- provider: string;
5744
- baseUrl: string;
5745
- reasoning: true;
5746
- input: "text"[];
5747
- cost: {
5748
- input: number;
5749
- output: number;
5750
- cacheRead: number;
5751
- cacheWrite: number;
5752
- };
5753
- contextWindow: number;
5754
- maxTokens: number;
5755
- };
5756
- readonly "qwen3-coder": {
5853
+ readonly "minimax-m2.5-free": {
5757
5854
  id: string;
5758
5855
  name: string;
5759
5856
  api: "openai-completions";
5760
5857
  provider: string;
5761
5858
  baseUrl: string;
5762
- reasoning: false;
5763
- input: "text"[];
5764
- cost: {
5765
- input: number;
5766
- output: number;
5767
- cacheRead: number;
5768
- cacheWrite: number;
5769
- };
5770
- contextWindow: number;
5771
- maxTokens: number;
5772
- };
5773
- readonly "trinity-large-preview-free": {
5774
- id: string;
5775
- name: string;
5776
- api: "openai-completions";
5777
- provider: string;
5778
- baseUrl: string;
5779
- reasoning: false;
5859
+ reasoning: true;
5780
5860
  input: "text"[];
5781
5861
  cost: {
5782
5862
  input: number;
@@ -5806,23 +5886,6 @@ export declare const MODELS: {
5806
5886
  contextWindow: number;
5807
5887
  maxTokens: number;
5808
5888
  };
5809
- readonly "ai21/jamba-mini-1.7": {
5810
- id: string;
5811
- name: string;
5812
- api: "openai-completions";
5813
- provider: string;
5814
- baseUrl: string;
5815
- reasoning: false;
5816
- input: "text"[];
5817
- cost: {
5818
- input: number;
5819
- output: number;
5820
- cacheRead: number;
5821
- cacheWrite: number;
5822
- };
5823
- contextWindow: number;
5824
- maxTokens: number;
5825
- };
5826
5889
  readonly "alibaba/tongyi-deepresearch-30b-a3b": {
5827
5890
  id: string;
5828
5891
  name: string;
@@ -6945,13 +7008,13 @@ export declare const MODELS: {
6945
7008
  contextWindow: number;
6946
7009
  maxTokens: number;
6947
7010
  };
6948
- readonly "mistralai/codestral-2508": {
7011
+ readonly "minimax/minimax-m2.5": {
6949
7012
  id: string;
6950
7013
  name: string;
6951
7014
  api: "openai-completions";
6952
7015
  provider: string;
6953
7016
  baseUrl: string;
6954
- reasoning: false;
7017
+ reasoning: true;
6955
7018
  input: "text"[];
6956
7019
  cost: {
6957
7020
  input: number;
@@ -6962,7 +7025,7 @@ export declare const MODELS: {
6962
7025
  contextWindow: number;
6963
7026
  maxTokens: number;
6964
7027
  };
6965
- readonly "mistralai/devstral-2512": {
7028
+ readonly "mistralai/codestral-2508": {
6966
7029
  id: string;
6967
7030
  name: string;
6968
7031
  api: "openai-completions";
@@ -6979,7 +7042,7 @@ export declare const MODELS: {
6979
7042
  contextWindow: number;
6980
7043
  maxTokens: number;
6981
7044
  };
6982
- readonly "mistralai/devstral-medium": {
7045
+ readonly "mistralai/devstral-2512": {
6983
7046
  id: string;
6984
7047
  name: string;
6985
7048
  api: "openai-completions";
@@ -6996,7 +7059,7 @@ export declare const MODELS: {
6996
7059
  contextWindow: number;
6997
7060
  maxTokens: number;
6998
7061
  };
6999
- readonly "mistralai/devstral-small": {
7062
+ readonly "mistralai/devstral-medium": {
7000
7063
  id: string;
7001
7064
  name: string;
7002
7065
  api: "openai-completions";
@@ -7013,24 +7076,7 @@ export declare const MODELS: {
7013
7076
  contextWindow: number;
7014
7077
  maxTokens: number;
7015
7078
  };
7016
- readonly "mistralai/ministral-14b-2512": {
7017
- id: string;
7018
- name: string;
7019
- api: "openai-completions";
7020
- provider: string;
7021
- baseUrl: string;
7022
- reasoning: false;
7023
- input: ("image" | "text")[];
7024
- cost: {
7025
- input: number;
7026
- output: number;
7027
- cacheRead: number;
7028
- cacheWrite: number;
7029
- };
7030
- contextWindow: number;
7031
- maxTokens: number;
7032
- };
7033
- readonly "mistralai/ministral-3b": {
7079
+ readonly "mistralai/devstral-small": {
7034
7080
  id: string;
7035
7081
  name: string;
7036
7082
  api: "openai-completions";
@@ -7047,7 +7093,7 @@ export declare const MODELS: {
7047
7093
  contextWindow: number;
7048
7094
  maxTokens: number;
7049
7095
  };
7050
- readonly "mistralai/ministral-3b-2512": {
7096
+ readonly "mistralai/ministral-14b-2512": {
7051
7097
  id: string;
7052
7098
  name: string;
7053
7099
  api: "openai-completions";
@@ -7064,14 +7110,14 @@ export declare const MODELS: {
7064
7110
  contextWindow: number;
7065
7111
  maxTokens: number;
7066
7112
  };
7067
- readonly "mistralai/ministral-8b": {
7113
+ readonly "mistralai/ministral-3b-2512": {
7068
7114
  id: string;
7069
7115
  name: string;
7070
7116
  api: "openai-completions";
7071
7117
  provider: string;
7072
7118
  baseUrl: string;
7073
7119
  reasoning: false;
7074
- input: "text"[];
7120
+ input: ("image" | "text")[];
7075
7121
  cost: {
7076
7122
  input: number;
7077
7123
  output: number;
@@ -7319,23 +7365,6 @@ export declare const MODELS: {
7319
7365
  contextWindow: number;
7320
7366
  maxTokens: number;
7321
7367
  };
7322
- readonly "mistralai/mistral-tiny": {
7323
- id: string;
7324
- name: string;
7325
- api: "openai-completions";
7326
- provider: string;
7327
- baseUrl: string;
7328
- reasoning: false;
7329
- input: "text"[];
7330
- cost: {
7331
- input: number;
7332
- output: number;
7333
- cacheRead: number;
7334
- cacheWrite: number;
7335
- };
7336
- contextWindow: number;
7337
- maxTokens: number;
7338
- };
7339
7368
  readonly "mistralai/mixtral-8x22b-instruct": {
7340
7369
  id: string;
7341
7370
  name: string;
@@ -7370,23 +7399,6 @@ export declare const MODELS: {
7370
7399
  contextWindow: number;
7371
7400
  maxTokens: number;
7372
7401
  };
7373
- readonly "mistralai/pixtral-12b": {
7374
- id: string;
7375
- name: string;
7376
- api: "openai-completions";
7377
- provider: string;
7378
- baseUrl: string;
7379
- reasoning: false;
7380
- input: ("image" | "text")[];
7381
- cost: {
7382
- input: number;
7383
- output: number;
7384
- cacheRead: number;
7385
- cacheWrite: number;
7386
- };
7387
- contextWindow: number;
7388
- maxTokens: number;
7389
- };
7390
7402
  readonly "mistralai/pixtral-large-2411": {
7391
7403
  id: string;
7392
7404
  name: string;
@@ -8526,14 +8538,14 @@ export declare const MODELS: {
8526
8538
  contextWindow: number;
8527
8539
  maxTokens: number;
8528
8540
  };
8529
- readonly "openrouter/auto": {
8541
+ readonly "openrouter/aurora-alpha": {
8530
8542
  id: string;
8531
8543
  name: string;
8532
8544
  api: "openai-completions";
8533
8545
  provider: string;
8534
8546
  baseUrl: string;
8535
8547
  reasoning: true;
8536
- input: ("image" | "text")[];
8548
+ input: "text"[];
8537
8549
  cost: {
8538
8550
  input: number;
8539
8551
  output: number;
@@ -8543,7 +8555,7 @@ export declare const MODELS: {
8543
8555
  contextWindow: number;
8544
8556
  maxTokens: number;
8545
8557
  };
8546
- readonly "openrouter/free": {
8558
+ readonly "openrouter/auto": {
8547
8559
  id: string;
8548
8560
  name: string;
8549
8561
  api: "openai-completions";
@@ -8560,14 +8572,14 @@ export declare const MODELS: {
8560
8572
  contextWindow: number;
8561
8573
  maxTokens: number;
8562
8574
  };
8563
- readonly "openrouter/pony-alpha": {
8575
+ readonly "openrouter/free": {
8564
8576
  id: string;
8565
8577
  name: string;
8566
8578
  api: "openai-completions";
8567
8579
  provider: string;
8568
8580
  baseUrl: string;
8569
8581
  reasoning: true;
8570
- input: "text"[];
8582
+ input: ("image" | "text")[];
8571
8583
  cost: {
8572
8584
  input: number;
8573
8585
  output: number;
@@ -8906,7 +8918,7 @@ export declare const MODELS: {
8906
8918
  api: "openai-completions";
8907
8919
  provider: string;
8908
8920
  baseUrl: string;
8909
- reasoning: true;
8921
+ reasoning: false;
8910
8922
  input: "text"[];
8911
8923
  cost: {
8912
8924
  input: number;
@@ -9036,6 +9048,23 @@ export declare const MODELS: {
9036
9048
  contextWindow: number;
9037
9049
  maxTokens: number;
9038
9050
  };
9051
+ readonly "qwen/qwen3-max-thinking": {
9052
+ id: string;
9053
+ name: string;
9054
+ api: "openai-completions";
9055
+ provider: string;
9056
+ baseUrl: string;
9057
+ reasoning: true;
9058
+ input: "text"[];
9059
+ cost: {
9060
+ input: number;
9061
+ output: number;
9062
+ cacheRead: number;
9063
+ cacheWrite: number;
9064
+ };
9065
+ contextWindow: number;
9066
+ maxTokens: number;
9067
+ };
9039
9068
  readonly "qwen/qwen3-next-80b-a3b-instruct": {
9040
9069
  id: string;
9041
9070
  name: string;
@@ -9155,6 +9184,23 @@ export declare const MODELS: {
9155
9184
  contextWindow: number;
9156
9185
  maxTokens: number;
9157
9186
  };
9187
+ readonly "qwen/qwen3-vl-32b-instruct": {
9188
+ id: string;
9189
+ name: string;
9190
+ api: "openai-completions";
9191
+ provider: string;
9192
+ baseUrl: string;
9193
+ reasoning: false;
9194
+ input: ("image" | "text")[];
9195
+ cost: {
9196
+ input: number;
9197
+ output: number;
9198
+ cacheRead: number;
9199
+ cacheWrite: number;
9200
+ };
9201
+ contextWindow: number;
9202
+ maxTokens: number;
9203
+ };
9158
9204
  readonly "qwen/qwen3-vl-8b-instruct": {
9159
9205
  id: string;
9160
9206
  name: string;
@@ -9257,14 +9303,14 @@ export declare const MODELS: {
9257
9303
  contextWindow: number;
9258
9304
  maxTokens: number;
9259
9305
  };
9260
- readonly "stepfun-ai/step3": {
9306
+ readonly "stepfun/step-3.5-flash": {
9261
9307
  id: string;
9262
9308
  name: string;
9263
9309
  api: "openai-completions";
9264
9310
  provider: string;
9265
9311
  baseUrl: string;
9266
9312
  reasoning: true;
9267
- input: ("image" | "text")[];
9313
+ input: "text"[];
9268
9314
  cost: {
9269
9315
  input: number;
9270
9316
  output: number;
@@ -9716,6 +9762,23 @@ export declare const MODELS: {
9716
9762
  contextWindow: number;
9717
9763
  maxTokens: number;
9718
9764
  };
9765
+ readonly "z-ai/glm-5": {
9766
+ id: string;
9767
+ name: string;
9768
+ api: "openai-completions";
9769
+ provider: string;
9770
+ baseUrl: string;
9771
+ reasoning: true;
9772
+ input: "text"[];
9773
+ cost: {
9774
+ input: number;
9775
+ output: number;
9776
+ cacheRead: number;
9777
+ cacheWrite: number;
9778
+ };
9779
+ contextWindow: number;
9780
+ maxTokens: number;
9781
+ };
9719
9782
  };
9720
9783
  readonly "vercel-ai-gateway": {
9721
9784
  readonly "alibaba/qwen-3-14b": {
@@ -10228,13 +10291,13 @@ export declare const MODELS: {
10228
10291
  contextWindow: number;
10229
10292
  maxTokens: number;
10230
10293
  };
10231
- readonly "deepseek/deepseek-v3.2-exp": {
10294
+ readonly "deepseek/deepseek-v3.2": {
10232
10295
  id: string;
10233
10296
  name: string;
10234
10297
  api: "anthropic-messages";
10235
10298
  provider: string;
10236
10299
  baseUrl: string;
10237
- reasoning: true;
10300
+ reasoning: false;
10238
10301
  input: "text"[];
10239
10302
  cost: {
10240
10303
  input: number;
@@ -10602,6 +10665,23 @@ export declare const MODELS: {
10602
10665
  contextWindow: number;
10603
10666
  maxTokens: number;
10604
10667
  };
10668
+ readonly "minimax/minimax-m2.5": {
10669
+ id: string;
10670
+ name: string;
10671
+ api: "anthropic-messages";
10672
+ provider: string;
10673
+ baseUrl: string;
10674
+ reasoning: true;
10675
+ input: "text"[];
10676
+ cost: {
10677
+ input: number;
10678
+ output: number;
10679
+ cacheRead: number;
10680
+ cacheWrite: number;
10681
+ };
10682
+ contextWindow: number;
10683
+ maxTokens: number;
10684
+ };
10605
10685
  readonly "mistral/codestral": {
10606
10686
  id: string;
10607
10687
  name: string;
@@ -11843,6 +11923,23 @@ export declare const MODELS: {
11843
11923
  contextWindow: number;
11844
11924
  maxTokens: number;
11845
11925
  };
11926
+ readonly "zai/glm-5": {
11927
+ id: string;
11928
+ name: string;
11929
+ api: "anthropic-messages";
11930
+ provider: string;
11931
+ baseUrl: string;
11932
+ reasoning: true;
11933
+ input: "text"[];
11934
+ cost: {
11935
+ input: number;
11936
+ output: number;
11937
+ cacheRead: number;
11938
+ cacheWrite: number;
11939
+ };
11940
+ contextWindow: number;
11941
+ maxTokens: number;
11942
+ };
11846
11943
  };
11847
11944
  readonly xai: {
11848
11945
  readonly "grok-2": {
@@ -12389,6 +12486,27 @@ export declare const MODELS: {
12389
12486
  contextWindow: number;
12390
12487
  maxTokens: number;
12391
12488
  };
12489
+ readonly "glm-5": {
12490
+ id: string;
12491
+ name: string;
12492
+ api: "openai-completions";
12493
+ provider: string;
12494
+ baseUrl: string;
12495
+ compat: {
12496
+ supportsDeveloperRole: false;
12497
+ thinkingFormat: "zai";
12498
+ };
12499
+ reasoning: true;
12500
+ input: "text"[];
12501
+ cost: {
12502
+ input: number;
12503
+ output: number;
12504
+ cacheRead: number;
12505
+ cacheWrite: number;
12506
+ };
12507
+ contextWindow: number;
12508
+ maxTokens: number;
12509
+ };
12392
12510
  };
12393
12511
  };
12394
12512
  //# sourceMappingURL=models.generated.d.ts.map