@mariozechner/pi-ai 0.50.1 → 0.50.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3279,6 +3279,296 @@ export const MODELS = {
3279
3279
  maxTokens: 16384,
3280
3280
  },
3281
3281
  },
3282
+ "huggingface": {
3283
+ "MiniMaxAI/MiniMax-M2.1": {
3284
+ id: "MiniMaxAI/MiniMax-M2.1",
3285
+ name: "MiniMax-M2.1",
3286
+ api: "openai-completions",
3287
+ provider: "huggingface",
3288
+ baseUrl: "https://router.huggingface.co/v1",
3289
+ compat: { "supportsDeveloperRole": false },
3290
+ reasoning: true,
3291
+ input: ["text"],
3292
+ cost: {
3293
+ input: 0.3,
3294
+ output: 1.2,
3295
+ cacheRead: 0,
3296
+ cacheWrite: 0,
3297
+ },
3298
+ contextWindow: 204800,
3299
+ maxTokens: 131072,
3300
+ },
3301
+ "Qwen/Qwen3-235B-A22B-Thinking-2507": {
3302
+ id: "Qwen/Qwen3-235B-A22B-Thinking-2507",
3303
+ name: "Qwen3-235B-A22B-Thinking-2507",
3304
+ api: "openai-completions",
3305
+ provider: "huggingface",
3306
+ baseUrl: "https://router.huggingface.co/v1",
3307
+ compat: { "supportsDeveloperRole": false },
3308
+ reasoning: true,
3309
+ input: ["text"],
3310
+ cost: {
3311
+ input: 0.3,
3312
+ output: 3,
3313
+ cacheRead: 0,
3314
+ cacheWrite: 0,
3315
+ },
3316
+ contextWindow: 262144,
3317
+ maxTokens: 131072,
3318
+ },
3319
+ "Qwen/Qwen3-Coder-480B-A35B-Instruct": {
3320
+ id: "Qwen/Qwen3-Coder-480B-A35B-Instruct",
3321
+ name: "Qwen3-Coder-480B-A35B-Instruct",
3322
+ api: "openai-completions",
3323
+ provider: "huggingface",
3324
+ baseUrl: "https://router.huggingface.co/v1",
3325
+ compat: { "supportsDeveloperRole": false },
3326
+ reasoning: false,
3327
+ input: ["text"],
3328
+ cost: {
3329
+ input: 2,
3330
+ output: 2,
3331
+ cacheRead: 0,
3332
+ cacheWrite: 0,
3333
+ },
3334
+ contextWindow: 262144,
3335
+ maxTokens: 66536,
3336
+ },
3337
+ "Qwen/Qwen3-Next-80B-A3B-Instruct": {
3338
+ id: "Qwen/Qwen3-Next-80B-A3B-Instruct",
3339
+ name: "Qwen3-Next-80B-A3B-Instruct",
3340
+ api: "openai-completions",
3341
+ provider: "huggingface",
3342
+ baseUrl: "https://router.huggingface.co/v1",
3343
+ compat: { "supportsDeveloperRole": false },
3344
+ reasoning: false,
3345
+ input: ["text"],
3346
+ cost: {
3347
+ input: 0.25,
3348
+ output: 1,
3349
+ cacheRead: 0,
3350
+ cacheWrite: 0,
3351
+ },
3352
+ contextWindow: 262144,
3353
+ maxTokens: 66536,
3354
+ },
3355
+ "Qwen/Qwen3-Next-80B-A3B-Thinking": {
3356
+ id: "Qwen/Qwen3-Next-80B-A3B-Thinking",
3357
+ name: "Qwen3-Next-80B-A3B-Thinking",
3358
+ api: "openai-completions",
3359
+ provider: "huggingface",
3360
+ baseUrl: "https://router.huggingface.co/v1",
3361
+ compat: { "supportsDeveloperRole": false },
3362
+ reasoning: false,
3363
+ input: ["text"],
3364
+ cost: {
3365
+ input: 0.3,
3366
+ output: 2,
3367
+ cacheRead: 0,
3368
+ cacheWrite: 0,
3369
+ },
3370
+ contextWindow: 262144,
3371
+ maxTokens: 131072,
3372
+ },
3373
+ "XiaomiMiMo/MiMo-V2-Flash": {
3374
+ id: "XiaomiMiMo/MiMo-V2-Flash",
3375
+ name: "MiMo-V2-Flash",
3376
+ api: "openai-completions",
3377
+ provider: "huggingface",
3378
+ baseUrl: "https://router.huggingface.co/v1",
3379
+ compat: { "supportsDeveloperRole": false },
3380
+ reasoning: true,
3381
+ input: ["text"],
3382
+ cost: {
3383
+ input: 0.1,
3384
+ output: 0.3,
3385
+ cacheRead: 0,
3386
+ cacheWrite: 0,
3387
+ },
3388
+ contextWindow: 262144,
3389
+ maxTokens: 4096,
3390
+ },
3391
+ "deepseek-ai/DeepSeek-R1-0528": {
3392
+ id: "deepseek-ai/DeepSeek-R1-0528",
3393
+ name: "DeepSeek-R1-0528",
3394
+ api: "openai-completions",
3395
+ provider: "huggingface",
3396
+ baseUrl: "https://router.huggingface.co/v1",
3397
+ compat: { "supportsDeveloperRole": false },
3398
+ reasoning: true,
3399
+ input: ["text"],
3400
+ cost: {
3401
+ input: 3,
3402
+ output: 5,
3403
+ cacheRead: 0,
3404
+ cacheWrite: 0,
3405
+ },
3406
+ contextWindow: 163840,
3407
+ maxTokens: 163840,
3408
+ },
3409
+ "deepseek-ai/DeepSeek-V3.2": {
3410
+ id: "deepseek-ai/DeepSeek-V3.2",
3411
+ name: "DeepSeek-V3.2",
3412
+ api: "openai-completions",
3413
+ provider: "huggingface",
3414
+ baseUrl: "https://router.huggingface.co/v1",
3415
+ compat: { "supportsDeveloperRole": false },
3416
+ reasoning: true,
3417
+ input: ["text"],
3418
+ cost: {
3419
+ input: 0.28,
3420
+ output: 0.4,
3421
+ cacheRead: 0,
3422
+ cacheWrite: 0,
3423
+ },
3424
+ contextWindow: 163840,
3425
+ maxTokens: 65536,
3426
+ },
3427
+ "moonshotai/Kimi-K2-Instruct": {
3428
+ id: "moonshotai/Kimi-K2-Instruct",
3429
+ name: "Kimi-K2-Instruct",
3430
+ api: "openai-completions",
3431
+ provider: "huggingface",
3432
+ baseUrl: "https://router.huggingface.co/v1",
3433
+ compat: { "supportsDeveloperRole": false },
3434
+ reasoning: false,
3435
+ input: ["text"],
3436
+ cost: {
3437
+ input: 1,
3438
+ output: 3,
3439
+ cacheRead: 0,
3440
+ cacheWrite: 0,
3441
+ },
3442
+ contextWindow: 131072,
3443
+ maxTokens: 16384,
3444
+ },
3445
+ "moonshotai/Kimi-K2-Instruct-0905": {
3446
+ id: "moonshotai/Kimi-K2-Instruct-0905",
3447
+ name: "Kimi-K2-Instruct-0905",
3448
+ api: "openai-completions",
3449
+ provider: "huggingface",
3450
+ baseUrl: "https://router.huggingface.co/v1",
3451
+ compat: { "supportsDeveloperRole": false },
3452
+ reasoning: false,
3453
+ input: ["text"],
3454
+ cost: {
3455
+ input: 1,
3456
+ output: 3,
3457
+ cacheRead: 0,
3458
+ cacheWrite: 0,
3459
+ },
3460
+ contextWindow: 262144,
3461
+ maxTokens: 16384,
3462
+ },
3463
+ "moonshotai/Kimi-K2-Thinking": {
3464
+ id: "moonshotai/Kimi-K2-Thinking",
3465
+ name: "Kimi-K2-Thinking",
3466
+ api: "openai-completions",
3467
+ provider: "huggingface",
3468
+ baseUrl: "https://router.huggingface.co/v1",
3469
+ compat: { "supportsDeveloperRole": false },
3470
+ reasoning: true,
3471
+ input: ["text"],
3472
+ cost: {
3473
+ input: 0.6,
3474
+ output: 2.5,
3475
+ cacheRead: 0.15,
3476
+ cacheWrite: 0,
3477
+ },
3478
+ contextWindow: 262144,
3479
+ maxTokens: 262144,
3480
+ },
3481
+ "moonshotai/Kimi-K2.5": {
3482
+ id: "moonshotai/Kimi-K2.5",
3483
+ name: "Kimi-K2.5",
3484
+ api: "openai-completions",
3485
+ provider: "huggingface",
3486
+ baseUrl: "https://router.huggingface.co/v1",
3487
+ compat: { "supportsDeveloperRole": false },
3488
+ reasoning: true,
3489
+ input: ["text", "image"],
3490
+ cost: {
3491
+ input: 0.6,
3492
+ output: 3,
3493
+ cacheRead: 0.1,
3494
+ cacheWrite: 0,
3495
+ },
3496
+ contextWindow: 262144,
3497
+ maxTokens: 262144,
3498
+ },
3499
+ "zai-org/GLM-4.7": {
3500
+ id: "zai-org/GLM-4.7",
3501
+ name: "GLM-4.7",
3502
+ api: "openai-completions",
3503
+ provider: "huggingface",
3504
+ baseUrl: "https://router.huggingface.co/v1",
3505
+ compat: { "supportsDeveloperRole": false },
3506
+ reasoning: true,
3507
+ input: ["text"],
3508
+ cost: {
3509
+ input: 0.6,
3510
+ output: 2.2,
3511
+ cacheRead: 0.11,
3512
+ cacheWrite: 0,
3513
+ },
3514
+ contextWindow: 204800,
3515
+ maxTokens: 131072,
3516
+ },
3517
+ "zai-org/GLM-4.7-Flash": {
3518
+ id: "zai-org/GLM-4.7-Flash",
3519
+ name: "GLM-4.7-Flash",
3520
+ api: "openai-completions",
3521
+ provider: "huggingface",
3522
+ baseUrl: "https://router.huggingface.co/v1",
3523
+ compat: { "supportsDeveloperRole": false },
3524
+ reasoning: true,
3525
+ input: ["text"],
3526
+ cost: {
3527
+ input: 0,
3528
+ output: 0,
3529
+ cacheRead: 0,
3530
+ cacheWrite: 0,
3531
+ },
3532
+ contextWindow: 200000,
3533
+ maxTokens: 128000,
3534
+ },
3535
+ },
3536
+ "kimi-coding": {
3537
+ "k2p5": {
3538
+ id: "k2p5",
3539
+ name: "Kimi K2.5",
3540
+ api: "anthropic-messages",
3541
+ provider: "kimi-coding",
3542
+ baseUrl: "https://api.kimi.com/coding",
3543
+ reasoning: true,
3544
+ input: ["text", "image"],
3545
+ cost: {
3546
+ input: 0,
3547
+ output: 0,
3548
+ cacheRead: 0,
3549
+ cacheWrite: 0,
3550
+ },
3551
+ contextWindow: 262144,
3552
+ maxTokens: 32768,
3553
+ },
3554
+ "kimi-k2-thinking": {
3555
+ id: "kimi-k2-thinking",
3556
+ name: "Kimi K2 Thinking",
3557
+ api: "anthropic-messages",
3558
+ provider: "kimi-coding",
3559
+ baseUrl: "https://api.kimi.com/coding",
3560
+ reasoning: true,
3561
+ input: ["text"],
3562
+ cost: {
3563
+ input: 0,
3564
+ output: 0,
3565
+ cacheRead: 0,
3566
+ cacheWrite: 0,
3567
+ },
3568
+ contextWindow: 262144,
3569
+ maxTokens: 32768,
3570
+ },
3571
+ },
3282
3572
  "minimax": {
3283
3573
  "MiniMax-M2": {
3284
3574
  id: "MiniMax-M2",
@@ -4820,6 +5110,40 @@ export const MODELS = {
4820
5110
  contextWindow: 262144,
4821
5111
  maxTokens: 262144,
4822
5112
  },
5113
+ "kimi-k2.5": {
5114
+ id: "kimi-k2.5",
5115
+ name: "Kimi K2.5",
5116
+ api: "openai-completions",
5117
+ provider: "opencode",
5118
+ baseUrl: "https://opencode.ai/zen/v1",
5119
+ reasoning: true,
5120
+ input: ["text", "image"],
5121
+ cost: {
5122
+ input: 0.6,
5123
+ output: 3,
5124
+ cacheRead: 0.1,
5125
+ cacheWrite: 0,
5126
+ },
5127
+ contextWindow: 262144,
5128
+ maxTokens: 262144,
5129
+ },
5130
+ "minimax-m2.1": {
5131
+ id: "minimax-m2.1",
5132
+ name: "MiniMax M2.1",
5133
+ api: "openai-completions",
5134
+ provider: "opencode",
5135
+ baseUrl: "https://opencode.ai/zen/v1",
5136
+ reasoning: true,
5137
+ input: ["text"],
5138
+ cost: {
5139
+ input: 0.3,
5140
+ output: 1.2,
5141
+ cacheRead: 0.1,
5142
+ cacheWrite: 0,
5143
+ },
5144
+ contextWindow: 204800,
5145
+ maxTokens: 131072,
5146
+ },
4823
5147
  "qwen3-coder": {
4824
5148
  id: "qwen3-coder",
4825
5149
  name: "Qwen3 Coder",
@@ -5179,6 +5503,23 @@ export const MODELS = {
5179
5503
  contextWindow: 1000000,
5180
5504
  maxTokens: 64000,
5181
5505
  },
5506
+ "arcee-ai/trinity-large-preview:free": {
5507
+ id: "arcee-ai/trinity-large-preview:free",
5508
+ name: "Arcee AI: Trinity Large Preview (free)",
5509
+ api: "openai-completions",
5510
+ provider: "openrouter",
5511
+ baseUrl: "https://openrouter.ai/api/v1",
5512
+ reasoning: false,
5513
+ input: ["text"],
5514
+ cost: {
5515
+ input: 0,
5516
+ output: 0,
5517
+ cacheRead: 0,
5518
+ cacheWrite: 0,
5519
+ },
5520
+ contextWindow: 131000,
5521
+ maxTokens: 4096,
5522
+ },
5182
5523
  "arcee-ai/trinity-mini": {
5183
5524
  id: "arcee-ai/trinity-mini",
5184
5525
  name: "Arcee AI: Trinity Mini",
@@ -5547,7 +5888,7 @@ export const MODELS = {
5547
5888
  cost: {
5548
5889
  input: 0.21,
5549
5890
  output: 0.32,
5550
- cacheRead: 0,
5891
+ cacheRead: 0.21,
5551
5892
  cacheWrite: 0,
5552
5893
  },
5553
5894
  contextWindow: 163840,
@@ -5670,7 +6011,7 @@ export const MODELS = {
5670
6011
  cacheWrite: 0.08333333333333334,
5671
6012
  },
5672
6013
  contextWindow: 1048576,
5673
- maxTokens: 65535,
6014
+ maxTokens: 65536,
5674
6015
  },
5675
6016
  "google/gemini-2.5-pro": {
5676
6017
  id: "google/gemini-2.5-pro",
@@ -6063,23 +6404,6 @@ export const MODELS = {
6063
6404
  contextWindow: 262144,
6064
6405
  maxTokens: 65536,
6065
6406
  },
6066
- "mistralai/devstral-2512:free": {
6067
- id: "mistralai/devstral-2512:free",
6068
- name: "Mistral: Devstral 2 2512 (free)",
6069
- api: "openai-completions",
6070
- provider: "openrouter",
6071
- baseUrl: "https://openrouter.ai/api/v1",
6072
- reasoning: false,
6073
- input: ["text"],
6074
- cost: {
6075
- input: 0,
6076
- output: 0,
6077
- cacheRead: 0,
6078
- cacheWrite: 0,
6079
- },
6080
- contextWindow: 262144,
6081
- maxTokens: 4096,
6082
- },
6083
6407
  "mistralai/devstral-medium": {
6084
6408
  id: "mistralai/devstral-medium",
6085
6409
  name: "Mistral: Devstral Medium",
@@ -6590,6 +6914,23 @@ export const MODELS = {
6590
6914
  contextWindow: 262144,
6591
6915
  maxTokens: 65535,
6592
6916
  },
6917
+ "moonshotai/kimi-k2.5": {
6918
+ id: "moonshotai/kimi-k2.5",
6919
+ name: "MoonshotAI: Kimi K2.5",
6920
+ api: "openai-completions",
6921
+ provider: "openrouter",
6922
+ baseUrl: "https://openrouter.ai/api/v1",
6923
+ reasoning: true,
6924
+ input: ["text", "image"],
6925
+ cost: {
6926
+ input: 0.5700000000000001,
6927
+ output: 2.8499999999999996,
6928
+ cacheRead: 0,
6929
+ cacheWrite: 0,
6930
+ },
6931
+ contextWindow: 262144,
6932
+ maxTokens: 262144,
6933
+ },
6593
6934
  "nex-agi/deepseek-v3.1-nex-n1": {
6594
6935
  id: "nex-agi/deepseek-v3.1-nex-n1",
6595
6936
  name: "Nex AGI: DeepSeek V3.1 Nex N1",
@@ -6684,13 +7025,13 @@ export const MODELS = {
6684
7025
  reasoning: true,
6685
7026
  input: ["text"],
6686
7027
  cost: {
6687
- input: 0.06,
6688
- output: 0.24,
7028
+ input: 0.049999999999999996,
7029
+ output: 0.19999999999999998,
6689
7030
  cacheRead: 0,
6690
7031
  cacheWrite: 0,
6691
7032
  },
6692
7033
  contextWindow: 262144,
6693
- maxTokens: 262144,
7034
+ maxTokens: 4096,
6694
7035
  },
6695
7036
  "nvidia/nemotron-3-nano-30b-a3b:free": {
6696
7037
  id: "nvidia/nemotron-3-nano-30b-a3b:free",
@@ -7944,7 +8285,7 @@ export const MODELS = {
7944
8285
  cost: {
7945
8286
  input: 0.049999999999999996,
7946
8287
  output: 0.25,
7947
- cacheRead: 0,
8288
+ cacheRead: 0.049999999999999996,
7948
8289
  cacheWrite: 0,
7949
8290
  },
7950
8291
  contextWindow: 32000,
@@ -8392,6 +8733,23 @@ export const MODELS = {
8392
8733
  contextWindow: 163840,
8393
8734
  maxTokens: 65536,
8394
8735
  },
8736
+ "upstage/solar-pro-3:free": {
8737
+ id: "upstage/solar-pro-3:free",
8738
+ name: "Upstage: Solar Pro 3 (free)",
8739
+ api: "openai-completions",
8740
+ provider: "openrouter",
8741
+ baseUrl: "https://openrouter.ai/api/v1",
8742
+ reasoning: true,
8743
+ input: ["text"],
8744
+ cost: {
8745
+ input: 0,
8746
+ output: 0,
8747
+ cacheRead: 0,
8748
+ cacheWrite: 0,
8749
+ },
8750
+ contextWindow: 128000,
8751
+ maxTokens: 4096,
8752
+ },
8395
8753
  "x-ai/grok-3": {
8396
8754
  id: "x-ai/grok-3",
8397
8755
  name: "xAI: Grok 3",
@@ -8887,6 +9245,23 @@ export const MODELS = {
8887
9245
  contextWindow: 262144,
8888
9246
  maxTokens: 32768,
8889
9247
  },
9248
+ "alibaba/qwen3-max-thinking": {
9249
+ id: "alibaba/qwen3-max-thinking",
9250
+ name: "Qwen 3 Max Thinking",
9251
+ api: "anthropic-messages",
9252
+ provider: "vercel-ai-gateway",
9253
+ baseUrl: "https://ai-gateway.vercel.sh",
9254
+ reasoning: true,
9255
+ input: ["text"],
9256
+ cost: {
9257
+ input: 1.2,
9258
+ output: 6,
9259
+ cacheRead: 0.24,
9260
+ cacheWrite: 0,
9261
+ },
9262
+ contextWindow: 256000,
9263
+ maxTokens: 256000,
9264
+ },
8890
9265
  "anthropic/claude-3-haiku": {
8891
9266
  id: "anthropic/claude-3-haiku",
8892
9267
  name: "Claude 3 Haiku",
@@ -9074,6 +9449,23 @@ export const MODELS = {
9074
9449
  contextWindow: 1000000,
9075
9450
  maxTokens: 64000,
9076
9451
  },
9452
+ "arcee-ai/trinity-large-preview": {
9453
+ id: "arcee-ai/trinity-large-preview",
9454
+ name: "Trinity Large Preview",
9455
+ api: "anthropic-messages",
9456
+ provider: "vercel-ai-gateway",
9457
+ baseUrl: "https://ai-gateway.vercel.sh",
9458
+ reasoning: false,
9459
+ input: ["text"],
9460
+ cost: {
9461
+ input: 0.25,
9462
+ output: 1,
9463
+ cacheRead: 0,
9464
+ cacheWrite: 0,
9465
+ },
9466
+ contextWindow: 131000,
9467
+ maxTokens: 131000,
9468
+ },
9077
9469
  "bytedance/seed-1.6": {
9078
9470
  id: "bytedance/seed-1.6",
9079
9471
  name: "Seed 1.6",
@@ -9771,6 +10163,23 @@ export const MODELS = {
9771
10163
  contextWindow: 256000,
9772
10164
  maxTokens: 16384,
9773
10165
  },
10166
+ "moonshotai/kimi-k2.5": {
10167
+ id: "moonshotai/kimi-k2.5",
10168
+ name: "Kimi K2.5",
10169
+ api: "anthropic-messages",
10170
+ provider: "vercel-ai-gateway",
10171
+ baseUrl: "https://ai-gateway.vercel.sh",
10172
+ reasoning: true,
10173
+ input: ["text", "image"],
10174
+ cost: {
10175
+ input: 1.2,
10176
+ output: 1.2,
10177
+ cacheRead: 0.6,
10178
+ cacheWrite: 0,
10179
+ },
10180
+ contextWindow: 256000,
10181
+ maxTokens: 256000,
10182
+ },
9774
10183
  "nvidia/nemotron-nano-12b-v2-vl": {
9775
10184
  id: "nvidia/nemotron-nano-12b-v2-vl",
9776
10185
  name: "Nvidia Nemotron Nano 12B V2 VL",
@@ -11295,6 +11704,24 @@ export const MODELS = {
11295
11704
  contextWindow: 204800,
11296
11705
  maxTokens: 131072,
11297
11706
  },
11707
+ "glm-4.7-flash": {
11708
+ id: "glm-4.7-flash",
11709
+ name: "GLM-4.7-Flash",
11710
+ api: "openai-completions",
11711
+ provider: "zai",
11712
+ baseUrl: "https://api.z.ai/api/coding/paas/v4",
11713
+ compat: { "supportsDeveloperRole": false, "thinkingFormat": "zai" },
11714
+ reasoning: true,
11715
+ input: ["text"],
11716
+ cost: {
11717
+ input: 0,
11718
+ output: 0,
11719
+ cacheRead: 0,
11720
+ cacheWrite: 0,
11721
+ },
11722
+ contextWindow: 200000,
11723
+ maxTokens: 131072,
11724
+ },
11298
11725
  },
11299
11726
  };
11300
11727
  //# sourceMappingURL=models.generated.js.map