@mariozechner/pi-ai 0.63.2 → 0.65.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1549,23 +1549,6 @@ export declare const MODELS: {
1549
1549
  contextWindow: number;
1550
1550
  maxTokens: number;
1551
1551
  };
1552
- readonly "claude-3-7-sonnet-latest": {
1553
- id: string;
1554
- name: string;
1555
- api: "anthropic-messages";
1556
- provider: string;
1557
- baseUrl: string;
1558
- reasoning: true;
1559
- input: ("image" | "text")[];
1560
- cost: {
1561
- input: number;
1562
- output: number;
1563
- cacheRead: number;
1564
- cacheWrite: number;
1565
- };
1566
- contextWindow: number;
1567
- maxTokens: number;
1568
- };
1569
1552
  readonly "claude-3-haiku-20240307": {
1570
1553
  id: string;
1571
1554
  name: string;
@@ -2299,6 +2282,23 @@ export declare const MODELS: {
2299
2282
  contextWindow: number;
2300
2283
  maxTokens: number;
2301
2284
  };
2285
+ readonly "gpt-5.3-chat-latest": {
2286
+ id: string;
2287
+ name: string;
2288
+ api: "azure-openai-responses";
2289
+ provider: string;
2290
+ baseUrl: string;
2291
+ reasoning: false;
2292
+ input: ("image" | "text")[];
2293
+ cost: {
2294
+ input: number;
2295
+ output: number;
2296
+ cacheRead: number;
2297
+ cacheWrite: number;
2298
+ };
2299
+ contextWindow: number;
2300
+ maxTokens: number;
2301
+ };
2302
2302
  readonly "gpt-5.3-codex": {
2303
2303
  id: string;
2304
2304
  name: string;
@@ -5208,13 +5208,30 @@ export declare const MODELS: {
5208
5208
  contextWindow: number;
5209
5209
  maxTokens: number;
5210
5210
  };
5211
+ readonly "mistral-small-2603": {
5212
+ id: string;
5213
+ name: string;
5214
+ api: "mistral-conversations";
5215
+ provider: string;
5216
+ baseUrl: string;
5217
+ reasoning: true;
5218
+ input: ("image" | "text")[];
5219
+ cost: {
5220
+ input: number;
5221
+ output: number;
5222
+ cacheRead: number;
5223
+ cacheWrite: number;
5224
+ };
5225
+ contextWindow: number;
5226
+ maxTokens: number;
5227
+ };
5211
5228
  readonly "mistral-small-latest": {
5212
5229
  id: string;
5213
5230
  name: string;
5214
5231
  api: "mistral-conversations";
5215
5232
  provider: string;
5216
5233
  baseUrl: string;
5217
- reasoning: false;
5234
+ reasoning: true;
5218
5235
  input: ("image" | "text")[];
5219
5236
  cost: {
5220
5237
  input: number;
@@ -5754,6 +5771,23 @@ export declare const MODELS: {
5754
5771
  contextWindow: number;
5755
5772
  maxTokens: number;
5756
5773
  };
5774
+ readonly "gpt-5.3-chat-latest": {
5775
+ id: string;
5776
+ name: string;
5777
+ api: "openai-responses";
5778
+ provider: string;
5779
+ baseUrl: string;
5780
+ reasoning: false;
5781
+ input: ("image" | "text")[];
5782
+ cost: {
5783
+ input: number;
5784
+ output: number;
5785
+ cacheRead: number;
5786
+ cacheWrite: number;
5787
+ };
5788
+ contextWindow: number;
5789
+ maxTokens: number;
5790
+ };
5757
5791
  readonly "gpt-5.3-codex": {
5758
5792
  id: string;
5759
5793
  name: string;
@@ -6608,14 +6642,14 @@ export declare const MODELS: {
6608
6642
  contextWindow: number;
6609
6643
  maxTokens: number;
6610
6644
  };
6611
- readonly "mimo-v2-omni-free": {
6645
+ readonly "minimax-m2.5": {
6612
6646
  id: string;
6613
6647
  name: string;
6614
6648
  api: "openai-completions";
6615
6649
  provider: string;
6616
6650
  baseUrl: string;
6617
6651
  reasoning: true;
6618
- input: ("image" | "text")[];
6652
+ input: "text"[];
6619
6653
  cost: {
6620
6654
  input: number;
6621
6655
  output: number;
@@ -6625,10 +6659,10 @@ export declare const MODELS: {
6625
6659
  contextWindow: number;
6626
6660
  maxTokens: number;
6627
6661
  };
6628
- readonly "mimo-v2-pro-free": {
6662
+ readonly "minimax-m2.5-free": {
6629
6663
  id: string;
6630
6664
  name: string;
6631
- api: "openai-completions";
6665
+ api: "anthropic-messages";
6632
6666
  provider: string;
6633
6667
  baseUrl: string;
6634
6668
  reasoning: true;
@@ -6642,7 +6676,7 @@ export declare const MODELS: {
6642
6676
  contextWindow: number;
6643
6677
  maxTokens: number;
6644
6678
  };
6645
- readonly "minimax-m2.5": {
6679
+ readonly "nemotron-3-super-free": {
6646
6680
  id: string;
6647
6681
  name: string;
6648
6682
  api: "openai-completions";
@@ -6659,10 +6693,10 @@ export declare const MODELS: {
6659
6693
  contextWindow: number;
6660
6694
  maxTokens: number;
6661
6695
  };
6662
- readonly "minimax-m2.5-free": {
6696
+ readonly "qwen3.6-plus-free": {
6663
6697
  id: string;
6664
6698
  name: string;
6665
- api: "anthropic-messages";
6699
+ api: "openai-completions";
6666
6700
  provider: string;
6667
6701
  baseUrl: string;
6668
6702
  reasoning: true;
@@ -6676,7 +6710,9 @@ export declare const MODELS: {
6676
6710
  contextWindow: number;
6677
6711
  maxTokens: number;
6678
6712
  };
6679
- readonly "nemotron-3-super-free": {
6713
+ };
6714
+ readonly "opencode-go": {
6715
+ readonly "glm-5": {
6680
6716
  id: string;
6681
6717
  name: string;
6682
6718
  api: "openai-completions";
@@ -6693,16 +6729,14 @@ export declare const MODELS: {
6693
6729
  contextWindow: number;
6694
6730
  maxTokens: number;
6695
6731
  };
6696
- };
6697
- readonly "opencode-go": {
6698
- readonly "glm-5": {
6732
+ readonly "kimi-k2.5": {
6699
6733
  id: string;
6700
6734
  name: string;
6701
6735
  api: "openai-completions";
6702
6736
  provider: string;
6703
6737
  baseUrl: string;
6704
6738
  reasoning: true;
6705
- input: "text"[];
6739
+ input: ("image" | "text")[];
6706
6740
  cost: {
6707
6741
  input: number;
6708
6742
  output: number;
@@ -6712,7 +6746,7 @@ export declare const MODELS: {
6712
6746
  contextWindow: number;
6713
6747
  maxTokens: number;
6714
6748
  };
6715
- readonly "kimi-k2.5": {
6749
+ readonly "mimo-v2-omni": {
6716
6750
  id: string;
6717
6751
  name: string;
6718
6752
  api: "openai-completions";
@@ -6729,10 +6763,27 @@ export declare const MODELS: {
6729
6763
  contextWindow: number;
6730
6764
  maxTokens: number;
6731
6765
  };
6766
+ readonly "mimo-v2-pro": {
6767
+ id: string;
6768
+ name: string;
6769
+ api: "openai-completions";
6770
+ provider: string;
6771
+ baseUrl: string;
6772
+ reasoning: true;
6773
+ input: "text"[];
6774
+ cost: {
6775
+ input: number;
6776
+ output: number;
6777
+ cacheRead: number;
6778
+ cacheWrite: number;
6779
+ };
6780
+ contextWindow: number;
6781
+ maxTokens: number;
6782
+ };
6732
6783
  readonly "minimax-m2.5": {
6733
6784
  id: string;
6734
6785
  name: string;
6735
- api: "anthropic-messages";
6786
+ api: "openai-completions";
6736
6787
  provider: string;
6737
6788
  baseUrl: string;
6738
6789
  reasoning: true;
@@ -7139,6 +7190,23 @@ export declare const MODELS: {
7139
7190
  contextWindow: number;
7140
7191
  maxTokens: number;
7141
7192
  };
7193
+ readonly "arcee-ai/trinity-large-thinking": {
7194
+ id: string;
7195
+ name: string;
7196
+ api: "openai-completions";
7197
+ provider: string;
7198
+ baseUrl: string;
7199
+ reasoning: true;
7200
+ input: "text"[];
7201
+ cost: {
7202
+ input: number;
7203
+ output: number;
7204
+ cacheRead: number;
7205
+ cacheWrite: number;
7206
+ };
7207
+ contextWindow: number;
7208
+ maxTokens: number;
7209
+ };
7142
7210
  readonly "arcee-ai/trinity-mini": {
7143
7211
  id: string;
7144
7212
  name: string;
@@ -7700,14 +7768,14 @@ export declare const MODELS: {
7700
7768
  contextWindow: number;
7701
7769
  maxTokens: number;
7702
7770
  };
7703
- readonly "inception/mercury": {
7771
+ readonly "google/gemma-4-26b-a4b-it": {
7704
7772
  id: string;
7705
7773
  name: string;
7706
7774
  api: "openai-completions";
7707
7775
  provider: string;
7708
7776
  baseUrl: string;
7709
- reasoning: false;
7710
- input: "text"[];
7777
+ reasoning: true;
7778
+ input: ("image" | "text")[];
7711
7779
  cost: {
7712
7780
  input: number;
7713
7781
  output: number;
@@ -7717,14 +7785,14 @@ export declare const MODELS: {
7717
7785
  contextWindow: number;
7718
7786
  maxTokens: number;
7719
7787
  };
7720
- readonly "inception/mercury-2": {
7788
+ readonly "google/gemma-4-31b-it": {
7721
7789
  id: string;
7722
7790
  name: string;
7723
7791
  api: "openai-completions";
7724
7792
  provider: string;
7725
7793
  baseUrl: string;
7726
7794
  reasoning: true;
7727
- input: "text"[];
7795
+ input: ("image" | "text")[];
7728
7796
  cost: {
7729
7797
  input: number;
7730
7798
  output: number;
@@ -7734,7 +7802,7 @@ export declare const MODELS: {
7734
7802
  contextWindow: number;
7735
7803
  maxTokens: number;
7736
7804
  };
7737
- readonly "inception/mercury-coder": {
7805
+ readonly "inception/mercury": {
7738
7806
  id: string;
7739
7807
  name: string;
7740
7808
  api: "openai-completions";
@@ -7751,7 +7819,24 @@ export declare const MODELS: {
7751
7819
  contextWindow: number;
7752
7820
  maxTokens: number;
7753
7821
  };
7754
- readonly "kwaipilot/kat-coder-pro": {
7822
+ readonly "inception/mercury-2": {
7823
+ id: string;
7824
+ name: string;
7825
+ api: "openai-completions";
7826
+ provider: string;
7827
+ baseUrl: string;
7828
+ reasoning: true;
7829
+ input: "text"[];
7830
+ cost: {
7831
+ input: number;
7832
+ output: number;
7833
+ cacheRead: number;
7834
+ cacheWrite: number;
7835
+ };
7836
+ contextWindow: number;
7837
+ maxTokens: number;
7838
+ };
7839
+ readonly "inception/mercury-coder": {
7755
7840
  id: string;
7756
7841
  name: string;
7757
7842
  api: "openai-completions";
@@ -8278,23 +8363,6 @@ export declare const MODELS: {
8278
8363
  contextWindow: number;
8279
8364
  maxTokens: number;
8280
8365
  };
8281
- readonly "mistralai/mistral-small-24b-instruct-2501": {
8282
- id: string;
8283
- name: string;
8284
- api: "openai-completions";
8285
- provider: string;
8286
- baseUrl: string;
8287
- reasoning: false;
8288
- input: "text"[];
8289
- cost: {
8290
- input: number;
8291
- output: number;
8292
- cacheRead: number;
8293
- cacheWrite: number;
8294
- };
8295
- contextWindow: number;
8296
- maxTokens: number;
8297
- };
8298
8366
  readonly "mistralai/mistral-small-2603": {
8299
8367
  id: string;
8300
8368
  name: string;
@@ -9349,6 +9417,40 @@ export declare const MODELS: {
9349
9417
  contextWindow: number;
9350
9418
  maxTokens: number;
9351
9419
  };
9420
+ readonly "openai/gpt-audio": {
9421
+ id: string;
9422
+ name: string;
9423
+ api: "openai-completions";
9424
+ provider: string;
9425
+ baseUrl: string;
9426
+ reasoning: false;
9427
+ input: "text"[];
9428
+ cost: {
9429
+ input: number;
9430
+ output: number;
9431
+ cacheRead: number;
9432
+ cacheWrite: number;
9433
+ };
9434
+ contextWindow: number;
9435
+ maxTokens: number;
9436
+ };
9437
+ readonly "openai/gpt-audio-mini": {
9438
+ id: string;
9439
+ name: string;
9440
+ api: "openai-completions";
9441
+ provider: string;
9442
+ baseUrl: string;
9443
+ reasoning: false;
9444
+ input: "text"[];
9445
+ cost: {
9446
+ input: number;
9447
+ output: number;
9448
+ cacheRead: number;
9449
+ cacheWrite: number;
9450
+ };
9451
+ contextWindow: number;
9452
+ maxTokens: number;
9453
+ };
9352
9454
  readonly "openai/gpt-oss-120b": {
9353
9455
  id: string;
9354
9456
  name: string;
@@ -10352,6 +10454,23 @@ export declare const MODELS: {
10352
10454
  contextWindow: number;
10353
10455
  maxTokens: number;
10354
10456
  };
10457
+ readonly "qwen/qwen3.6-plus:free": {
10458
+ id: string;
10459
+ name: string;
10460
+ api: "openai-completions";
10461
+ provider: string;
10462
+ baseUrl: string;
10463
+ reasoning: true;
10464
+ input: ("image" | "text")[];
10465
+ cost: {
10466
+ input: number;
10467
+ output: number;
10468
+ cacheRead: number;
10469
+ cacheWrite: number;
10470
+ };
10471
+ contextWindow: number;
10472
+ maxTokens: number;
10473
+ };
10355
10474
  readonly "qwen/qwq-32b": {
10356
10475
  id: string;
10357
10476
  name: string;
@@ -10369,7 +10488,7 @@ export declare const MODELS: {
10369
10488
  contextWindow: number;
10370
10489
  maxTokens: number;
10371
10490
  };
10372
- readonly "reka/reka-edge": {
10491
+ readonly "rekaai/reka-edge": {
10373
10492
  id: string;
10374
10493
  name: string;
10375
10494
  api: "openai-completions";
@@ -10658,7 +10777,7 @@ export declare const MODELS: {
10658
10777
  contextWindow: number;
10659
10778
  maxTokens: number;
10660
10779
  };
10661
- readonly "x-ai/grok-4.20-beta": {
10780
+ readonly "x-ai/grok-4.20": {
10662
10781
  id: string;
10663
10782
  name: string;
10664
10783
  api: "openai-completions";
@@ -10930,6 +11049,23 @@ export declare const MODELS: {
10930
11049
  contextWindow: number;
10931
11050
  maxTokens: number;
10932
11051
  };
11052
+ readonly "z-ai/glm-5v-turbo": {
11053
+ id: string;
11054
+ name: string;
11055
+ api: "openai-completions";
11056
+ provider: string;
11057
+ baseUrl: string;
11058
+ reasoning: true;
11059
+ input: ("image" | "text")[];
11060
+ cost: {
11061
+ input: number;
11062
+ output: number;
11063
+ cacheRead: number;
11064
+ cacheWrite: number;
11065
+ };
11066
+ contextWindow: number;
11067
+ maxTokens: number;
11068
+ };
10933
11069
  };
10934
11070
  readonly "vercel-ai-gateway": {
10935
11071
  readonly "alibaba/qwen-3-14b": {
@@ -11187,6 +11323,23 @@ export declare const MODELS: {
11187
11323
  contextWindow: number;
11188
11324
  maxTokens: number;
11189
11325
  };
11326
+ readonly "alibaba/qwen3.6-plus": {
11327
+ id: string;
11328
+ name: string;
11329
+ api: "anthropic-messages";
11330
+ provider: string;
11331
+ baseUrl: string;
11332
+ reasoning: true;
11333
+ input: ("image" | "text")[];
11334
+ cost: {
11335
+ input: number;
11336
+ output: number;
11337
+ cacheRead: number;
11338
+ cacheWrite: number;
11339
+ };
11340
+ contextWindow: number;
11341
+ maxTokens: number;
11342
+ };
11190
11343
  readonly "anthropic/claude-3-haiku": {
11191
11344
  id: string;
11192
11345
  name: string;
@@ -11425,6 +11578,23 @@ export declare const MODELS: {
11425
11578
  contextWindow: number;
11426
11579
  maxTokens: number;
11427
11580
  };
11581
+ readonly "arcee-ai/trinity-large-thinking": {
11582
+ id: string;
11583
+ name: string;
11584
+ api: "anthropic-messages";
11585
+ provider: string;
11586
+ baseUrl: string;
11587
+ reasoning: true;
11588
+ input: "text"[];
11589
+ cost: {
11590
+ input: number;
11591
+ output: number;
11592
+ cacheRead: number;
11593
+ cacheWrite: number;
11594
+ };
11595
+ contextWindow: number;
11596
+ maxTokens: number;
11597
+ };
11428
11598
  readonly "bytedance/seed-1.6": {
11429
11599
  id: string;
11430
11600
  name: string;
@@ -11714,6 +11884,40 @@ export declare const MODELS: {
11714
11884
  contextWindow: number;
11715
11885
  maxTokens: number;
11716
11886
  };
11887
+ readonly "google/gemma-4-26b-a4b-it": {
11888
+ id: string;
11889
+ name: string;
11890
+ api: "anthropic-messages";
11891
+ provider: string;
11892
+ baseUrl: string;
11893
+ reasoning: true;
11894
+ input: ("image" | "text")[];
11895
+ cost: {
11896
+ input: number;
11897
+ output: number;
11898
+ cacheRead: number;
11899
+ cacheWrite: number;
11900
+ };
11901
+ contextWindow: number;
11902
+ maxTokens: number;
11903
+ };
11904
+ readonly "google/gemma-4-31b-it": {
11905
+ id: string;
11906
+ name: string;
11907
+ api: "anthropic-messages";
11908
+ provider: string;
11909
+ baseUrl: string;
11910
+ reasoning: true;
11911
+ input: ("image" | "text")[];
11912
+ cost: {
11913
+ input: number;
11914
+ output: number;
11915
+ cacheRead: number;
11916
+ cacheWrite: number;
11917
+ };
11918
+ contextWindow: number;
11919
+ maxTokens: number;
11920
+ };
11717
11921
  readonly "inception/mercury-2": {
11718
11922
  id: string;
11719
11923
  name: string;
@@ -12802,6 +13006,23 @@ export declare const MODELS: {
12802
13006
  contextWindow: number;
12803
13007
  maxTokens: number;
12804
13008
  };
13009
+ readonly "openai/gpt-oss-120b": {
13010
+ id: string;
13011
+ name: string;
13012
+ api: "anthropic-messages";
13013
+ provider: string;
13014
+ baseUrl: string;
13015
+ reasoning: true;
13016
+ input: "text"[];
13017
+ cost: {
13018
+ input: number;
13019
+ output: number;
13020
+ cacheRead: number;
13021
+ cacheWrite: number;
13022
+ };
13023
+ contextWindow: number;
13024
+ maxTokens: number;
13025
+ };
12805
13026
  readonly "openai/gpt-oss-20b": {
12806
13027
  id: string;
12807
13028
  name: string;
@@ -13465,6 +13686,23 @@ export declare const MODELS: {
13465
13686
  contextWindow: number;
13466
13687
  maxTokens: number;
13467
13688
  };
13689
+ readonly "zai/glm-5": {
13690
+ id: string;
13691
+ name: string;
13692
+ api: "anthropic-messages";
13693
+ provider: string;
13694
+ baseUrl: string;
13695
+ reasoning: true;
13696
+ input: "text"[];
13697
+ cost: {
13698
+ input: number;
13699
+ output: number;
13700
+ cacheRead: number;
13701
+ cacheWrite: number;
13702
+ };
13703
+ contextWindow: number;
13704
+ maxTokens: number;
13705
+ };
13468
13706
  readonly "zai/glm-5-turbo": {
13469
13707
  id: string;
13470
13708
  name: string;
@@ -13482,6 +13720,23 @@ export declare const MODELS: {
13482
13720
  contextWindow: number;
13483
13721
  maxTokens: number;
13484
13722
  };
13723
+ readonly "zai/glm-5v-turbo": {
13724
+ id: string;
13725
+ name: string;
13726
+ api: "anthropic-messages";
13727
+ provider: string;
13728
+ baseUrl: string;
13729
+ reasoning: true;
13730
+ input: ("image" | "text")[];
13731
+ cost: {
13732
+ input: number;
13733
+ output: number;
13734
+ cacheRead: number;
13735
+ cacheWrite: number;
13736
+ };
13737
+ contextWindow: number;
13738
+ maxTokens: number;
13739
+ };
13485
13740
  };
13486
13741
  readonly xai: {
13487
13742
  readonly "grok-2": {
@@ -13987,6 +14242,7 @@ export declare const MODELS: {
13987
14242
  compat: {
13988
14243
  supportsDeveloperRole: false;
13989
14244
  thinkingFormat: "zai";
14245
+ zaiToolStream: true;
13990
14246
  };
13991
14247
  reasoning: true;
13992
14248
  input: "text"[];
@@ -14008,6 +14264,7 @@ export declare const MODELS: {
14008
14264
  compat: {
14009
14265
  supportsDeveloperRole: false;
14010
14266
  thinkingFormat: "zai";
14267
+ zaiToolStream: true;
14011
14268
  };
14012
14269
  reasoning: true;
14013
14270
  input: ("image" | "text")[];
@@ -14029,6 +14286,7 @@ export declare const MODELS: {
14029
14286
  compat: {
14030
14287
  supportsDeveloperRole: false;
14031
14288
  thinkingFormat: "zai";
14289
+ zaiToolStream: true;
14032
14290
  };
14033
14291
  reasoning: true;
14034
14292
  input: "text"[];
@@ -14050,6 +14308,7 @@ export declare const MODELS: {
14050
14308
  compat: {
14051
14309
  supportsDeveloperRole: false;
14052
14310
  thinkingFormat: "zai";
14311
+ zaiToolStream: true;
14053
14312
  };
14054
14313
  reasoning: true;
14055
14314
  input: "text"[];
@@ -14071,6 +14330,7 @@ export declare const MODELS: {
14071
14330
  compat: {
14072
14331
  supportsDeveloperRole: false;
14073
14332
  thinkingFormat: "zai";
14333
+ zaiToolStream: true;
14074
14334
  };
14075
14335
  reasoning: true;
14076
14336
  input: "text"[];
@@ -14092,6 +14352,7 @@ export declare const MODELS: {
14092
14352
  compat: {
14093
14353
  supportsDeveloperRole: false;
14094
14354
  thinkingFormat: "zai";
14355
+ zaiToolStream: true;
14095
14356
  };
14096
14357
  reasoning: true;
14097
14358
  input: "text"[];
@@ -14113,6 +14374,7 @@ export declare const MODELS: {
14113
14374
  compat: {
14114
14375
  supportsDeveloperRole: false;
14115
14376
  thinkingFormat: "zai";
14377
+ zaiToolStream: true;
14116
14378
  };
14117
14379
  reasoning: true;
14118
14380
  input: "text"[];
@@ -14125,6 +14387,28 @@ export declare const MODELS: {
14125
14387
  contextWindow: number;
14126
14388
  maxTokens: number;
14127
14389
  };
14390
+ readonly "glm-5v-turbo": {
14391
+ id: string;
14392
+ name: string;
14393
+ api: "openai-completions";
14394
+ provider: string;
14395
+ baseUrl: string;
14396
+ compat: {
14397
+ supportsDeveloperRole: false;
14398
+ thinkingFormat: "zai";
14399
+ zaiToolStream: true;
14400
+ };
14401
+ reasoning: true;
14402
+ input: ("image" | "text")[];
14403
+ cost: {
14404
+ input: number;
14405
+ output: number;
14406
+ cacheRead: number;
14407
+ cacheWrite: number;
14408
+ };
14409
+ contextWindow: number;
14410
+ maxTokens: number;
14411
+ };
14128
14412
  };
14129
14413
  };
14130
14414
  //# sourceMappingURL=models.generated.d.ts.map