@mariozechner/pi-ai 0.69.0 → 0.70.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +4 -1
  2. package/dist/env-api-keys.d.ts +9 -0
  3. package/dist/env-api-keys.d.ts.map +1 -1
  4. package/dist/env-api-keys.js +42 -31
  5. package/dist/env-api-keys.js.map +1 -1
  6. package/dist/models.d.ts +1 -1
  7. package/dist/models.d.ts.map +1 -1
  8. package/dist/models.generated.d.ts +282 -19
  9. package/dist/models.generated.d.ts.map +1 -1
  10. package/dist/models.generated.js +278 -47
  11. package/dist/models.generated.js.map +1 -1
  12. package/dist/models.js +5 -2
  13. package/dist/models.js.map +1 -1
  14. package/dist/providers/anthropic.d.ts.map +1 -1
  15. package/dist/providers/anthropic.js +33 -12
  16. package/dist/providers/anthropic.js.map +1 -1
  17. package/dist/providers/azure-openai-responses.d.ts.map +1 -1
  18. package/dist/providers/azure-openai-responses.js +5 -1
  19. package/dist/providers/azure-openai-responses.js.map +1 -1
  20. package/dist/providers/google-vertex.d.ts.map +1 -1
  21. package/dist/providers/google-vertex.js +34 -13
  22. package/dist/providers/google-vertex.js.map +1 -1
  23. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  24. package/dist/providers/openai-codex-responses.js +8 -7
  25. package/dist/providers/openai-codex-responses.js.map +1 -1
  26. package/dist/providers/openai-completions.d.ts.map +1 -1
  27. package/dist/providers/openai-completions.js +95 -44
  28. package/dist/providers/openai-completions.js.map +1 -1
  29. package/dist/providers/openai-responses.d.ts.map +1 -1
  30. package/dist/providers/openai-responses.js +24 -20
  31. package/dist/providers/openai-responses.js.map +1 -1
  32. package/dist/providers/simple-options.d.ts.map +1 -1
  33. package/dist/providers/simple-options.js +2 -0
  34. package/dist/providers/simple-options.js.map +1 -1
  35. package/dist/types.d.ts +35 -4
  36. package/dist/types.d.ts.map +1 -1
  37. package/dist/types.js.map +1 -1
  38. package/package.json +1 -1
@@ -325,6 +325,40 @@ export const MODELS = {
325
325
  contextWindow: 1000000,
326
326
  maxTokens: 64000,
327
327
  },
328
+ "au.anthropic.claude-opus-4-6-v1": {
329
+ id: "au.anthropic.claude-opus-4-6-v1",
330
+ name: "AU Anthropic Claude Opus 4.6",
331
+ api: "bedrock-converse-stream",
332
+ provider: "amazon-bedrock",
333
+ baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
334
+ reasoning: true,
335
+ input: ["text", "image"],
336
+ cost: {
337
+ input: 16.5,
338
+ output: 82.5,
339
+ cacheRead: 0.5,
340
+ cacheWrite: 6.25,
341
+ },
342
+ contextWindow: 1000000,
343
+ maxTokens: 128000,
344
+ },
345
+ "au.anthropic.claude-sonnet-4-6": {
346
+ id: "au.anthropic.claude-sonnet-4-6",
347
+ name: "AU Anthropic Claude Sonnet 4.6",
348
+ api: "bedrock-converse-stream",
349
+ provider: "amazon-bedrock",
350
+ baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
351
+ reasoning: true,
352
+ input: ["text", "image"],
353
+ cost: {
354
+ input: 3.3,
355
+ output: 16.5,
356
+ cacheRead: 0.33,
357
+ cacheWrite: 4.125,
358
+ },
359
+ contextWindow: 1000000,
360
+ maxTokens: 128000,
361
+ },
328
362
  "deepseek.r1-v1:0": {
329
363
  id: "deepseek.r1-v1:0",
330
364
  name: "DeepSeek-R1",
@@ -2488,6 +2522,23 @@ export const MODELS = {
2488
2522
  contextWindow: 1050000,
2489
2523
  maxTokens: 128000,
2490
2524
  },
2525
+ "gpt-5.5": {
2526
+ id: "gpt-5.5",
2527
+ name: "GPT-5.5",
2528
+ api: "azure-openai-responses",
2529
+ provider: "azure-openai-responses",
2530
+ baseUrl: "",
2531
+ reasoning: true,
2532
+ input: ["text", "image"],
2533
+ cost: {
2534
+ input: 5,
2535
+ output: 30,
2536
+ cacheRead: 0.5,
2537
+ cacheWrite: 0,
2538
+ },
2539
+ contextWindow: 272000,
2540
+ maxTokens: 128000,
2541
+ },
2491
2542
  "o1": {
2492
2543
  id: "o1",
2493
2544
  name: "o1",
@@ -2695,6 +2746,44 @@ export const MODELS = {
2695
2746
  maxTokens: 40000,
2696
2747
  },
2697
2748
  },
2749
+ "deepseek": {
2750
+ "deepseek-v4-flash": {
2751
+ id: "deepseek-v4-flash",
2752
+ name: "DeepSeek V4 Flash",
2753
+ api: "openai-completions",
2754
+ provider: "deepseek",
2755
+ baseUrl: "https://api.deepseek.com",
2756
+ compat: { "requiresReasoningContentOnAssistantMessages": true, "thinkingFormat": "deepseek", "reasoningEffortMap": { "minimal": "high", "low": "high", "medium": "high", "high": "high", "xhigh": "max" } },
2757
+ reasoning: true,
2758
+ input: ["text"],
2759
+ cost: {
2760
+ input: 0.14,
2761
+ output: 0.28,
2762
+ cacheRead: 0.028,
2763
+ cacheWrite: 0,
2764
+ },
2765
+ contextWindow: 1000000,
2766
+ maxTokens: 384000,
2767
+ },
2768
+ "deepseek-v4-pro": {
2769
+ id: "deepseek-v4-pro",
2770
+ name: "DeepSeek V4 Pro",
2771
+ api: "openai-completions",
2772
+ provider: "deepseek",
2773
+ baseUrl: "https://api.deepseek.com",
2774
+ compat: { "requiresReasoningContentOnAssistantMessages": true, "thinkingFormat": "deepseek", "reasoningEffortMap": { "minimal": "high", "low": "high", "medium": "high", "high": "high", "xhigh": "max" } },
2775
+ reasoning: true,
2776
+ input: ["text"],
2777
+ cost: {
2778
+ input: 1.74,
2779
+ output: 3.48,
2780
+ cacheRead: 0.145,
2781
+ cacheWrite: 0,
2782
+ },
2783
+ contextWindow: 1000000,
2784
+ maxTokens: 384000,
2785
+ },
2786
+ },
2698
2787
  "fireworks": {
2699
2788
  "accounts/fireworks/models/deepseek-v3p1": {
2700
2789
  id: "accounts/fireworks/models/deepseek-v3p1",
@@ -3011,6 +3100,7 @@ export const MODELS = {
3011
3100
  provider: "github-copilot",
3012
3101
  baseUrl: "https://api.individual.githubcopilot.com",
3013
3102
  headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
3103
+ compat: { "supportsEagerToolInputStreaming": false },
3014
3104
  reasoning: true,
3015
3105
  input: ["text", "image"],
3016
3106
  cost: {
@@ -3083,6 +3173,7 @@ export const MODELS = {
3083
3173
  provider: "github-copilot",
3084
3174
  baseUrl: "https://api.individual.githubcopilot.com",
3085
3175
  headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
3176
+ compat: { "supportsEagerToolInputStreaming": false },
3086
3177
  reasoning: true,
3087
3178
  input: ["text", "image"],
3088
3179
  cost: {
@@ -3101,6 +3192,7 @@ export const MODELS = {
3101
3192
  provider: "github-copilot",
3102
3193
  baseUrl: "https://api.individual.githubcopilot.com",
3103
3194
  headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
3195
+ compat: { "supportsEagerToolInputStreaming": false },
3104
3196
  reasoning: true,
3105
3197
  input: ["text", "image"],
3106
3198
  cost: {
@@ -3888,8 +3980,8 @@ export const MODELS = {
3888
3980
  contextWindow: 131072,
3889
3981
  maxTokens: 8192,
3890
3982
  },
3891
- "gemma-4-26b-it": {
3892
- id: "gemma-4-26b-it",
3983
+ "gemma-4-26b-a4b-it": {
3984
+ id: "gemma-4-26b-a4b-it",
3893
3985
  name: "Gemma 4 26B",
3894
3986
  api: "google-generative-ai",
3895
3987
  provider: "google",
@@ -5117,6 +5209,7 @@ export const MODELS = {
5117
5209
  api: "anthropic-messages",
5118
5210
  provider: "kimi-coding",
5119
5211
  baseUrl: "https://api.kimi.com/coding",
5212
+ headers: { "User-Agent": "KimiCLI/1.5" },
5120
5213
  reasoning: true,
5121
5214
  input: ["text", "image"],
5122
5215
  cost: {
@@ -5134,6 +5227,7 @@ export const MODELS = {
5134
5227
  api: "anthropic-messages",
5135
5228
  provider: "kimi-coding",
5136
5229
  baseUrl: "https://api.kimi.com/coding",
5230
+ headers: { "User-Agent": "KimiCLI/1.5" },
5137
5231
  reasoning: true,
5138
5232
  input: ["text", "image"],
5139
5233
  cost: {
@@ -5151,6 +5245,7 @@ export const MODELS = {
5151
5245
  api: "anthropic-messages",
5152
5246
  provider: "kimi-coding",
5153
5247
  baseUrl: "https://api.kimi.com/coding",
5248
+ headers: { "User-Agent": "KimiCLI/1.5" },
5154
5249
  reasoning: true,
5155
5250
  input: ["text"],
5156
5251
  cost: {
@@ -6224,6 +6319,23 @@ export const MODELS = {
6224
6319
  contextWindow: 1050000,
6225
6320
  maxTokens: 128000,
6226
6321
  },
6322
+ "gpt-5.5": {
6323
+ id: "gpt-5.5",
6324
+ name: "GPT-5.5",
6325
+ api: "openai-responses",
6326
+ provider: "openai",
6327
+ baseUrl: "https://api.openai.com/v1",
6328
+ reasoning: true,
6329
+ input: ["text", "image"],
6330
+ cost: {
6331
+ input: 5,
6332
+ output: 30,
6333
+ cacheRead: 0.5,
6334
+ cacheWrite: 0,
6335
+ },
6336
+ contextWindow: 272000,
6337
+ maxTokens: 128000,
6338
+ },
6227
6339
  "o1": {
6228
6340
  id: "o1",
6229
6341
  name: "o1",
@@ -6515,6 +6627,23 @@ export const MODELS = {
6515
6627
  contextWindow: 272000,
6516
6628
  maxTokens: 128000,
6517
6629
  },
6630
+ "gpt-5.5": {
6631
+ id: "gpt-5.5",
6632
+ name: "GPT-5.5",
6633
+ api: "openai-codex-responses",
6634
+ provider: "openai-codex",
6635
+ baseUrl: "https://chatgpt.com/backend-api",
6636
+ reasoning: true,
6637
+ input: ["text", "image"],
6638
+ cost: {
6639
+ input: 5,
6640
+ output: 30,
6641
+ cacheRead: 0.5,
6642
+ cacheWrite: 0,
6643
+ },
6644
+ contextWindow: 272000,
6645
+ maxTokens: 128000,
6646
+ },
6518
6647
  },
6519
6648
  "opencode": {
6520
6649
  "big-pickle": {
@@ -6993,6 +7122,23 @@ export const MODELS = {
6993
7122
  contextWindow: 1050000,
6994
7123
  maxTokens: 128000,
6995
7124
  },
7125
+ "hy3-preview-free": {
7126
+ id: "hy3-preview-free",
7127
+ name: "Hy3 preview Free",
7128
+ api: "openai-completions",
7129
+ provider: "opencode",
7130
+ baseUrl: "https://opencode.ai/zen/v1",
7131
+ reasoning: true,
7132
+ input: ["text"],
7133
+ cost: {
7134
+ input: 0,
7135
+ output: 0,
7136
+ cacheRead: 0,
7137
+ cacheWrite: 0,
7138
+ },
7139
+ contextWindow: 256000,
7140
+ maxTokens: 64000,
7141
+ },
6996
7142
  "kimi-k2.5": {
6997
7143
  id: "kimi-k2.5",
6998
7144
  name: "Kimi K2.5",
@@ -7745,23 +7891,6 @@ export const MODELS = {
7745
7891
  contextWindow: 131000,
7746
7892
  maxTokens: 4096,
7747
7893
  },
7748
- "arcee-ai/trinity-large-preview:free": {
7749
- id: "arcee-ai/trinity-large-preview:free",
7750
- name: "Arcee AI: Trinity Large Preview (free)",
7751
- api: "openai-completions",
7752
- provider: "openrouter",
7753
- baseUrl: "https://openrouter.ai/api/v1",
7754
- reasoning: false,
7755
- input: ["text"],
7756
- cost: {
7757
- input: 0,
7758
- output: 0,
7759
- cacheRead: 0,
7760
- cacheWrite: 0,
7761
- },
7762
- contextWindow: 131000,
7763
- maxTokens: 4096,
7764
- },
7765
7894
  "arcee-ai/trinity-large-thinking": {
7766
7895
  id: "arcee-ai/trinity-large-thinking",
7767
7896
  name: "Arcee AI: Trinity Large Thinking",
@@ -8083,7 +8212,7 @@ export const MODELS = {
8083
8212
  cacheWrite: 0,
8084
8213
  },
8085
8214
  contextWindow: 131072,
8086
- maxTokens: 32768,
8215
+ maxTokens: 65536,
8087
8216
  },
8088
8217
  "deepseek/deepseek-v3.2-exp": {
8089
8218
  id: "deepseek/deepseek-v3.2-exp",
@@ -8102,6 +8231,40 @@ export const MODELS = {
8102
8231
  contextWindow: 163840,
8103
8232
  maxTokens: 65536,
8104
8233
  },
8234
+ "deepseek/deepseek-v4-flash": {
8235
+ id: "deepseek/deepseek-v4-flash",
8236
+ name: "DeepSeek: DeepSeek V4 Flash",
8237
+ api: "openai-completions",
8238
+ provider: "openrouter",
8239
+ baseUrl: "https://openrouter.ai/api/v1",
8240
+ reasoning: true,
8241
+ input: ["text"],
8242
+ cost: {
8243
+ input: 0.14,
8244
+ output: 0.28,
8245
+ cacheRead: 0.028,
8246
+ cacheWrite: 0,
8247
+ },
8248
+ contextWindow: 1048576,
8249
+ maxTokens: 384000,
8250
+ },
8251
+ "deepseek/deepseek-v4-pro": {
8252
+ id: "deepseek/deepseek-v4-pro",
8253
+ name: "DeepSeek: DeepSeek V4 Pro",
8254
+ api: "openai-completions",
8255
+ provider: "openrouter",
8256
+ baseUrl: "https://openrouter.ai/api/v1",
8257
+ reasoning: true,
8258
+ input: ["text"],
8259
+ cost: {
8260
+ input: 1.74,
8261
+ output: 3.48,
8262
+ cacheRead: 0.145,
8263
+ cacheWrite: 0,
8264
+ },
8265
+ contextWindow: 1048576,
8266
+ maxTokens: 384000,
8267
+ },
8105
8268
  "essentialai/rnj-1-instruct": {
8106
8269
  id: "essentialai/rnj-1-instruct",
8107
8270
  name: "EssentialAI: Rnj 1 Instruct",
@@ -8332,13 +8495,13 @@ export const MODELS = {
8332
8495
  reasoning: true,
8333
8496
  input: ["text", "image"],
8334
8497
  cost: {
8335
- input: 0.07,
8336
- output: 0.35,
8337
- cacheRead: 0.04,
8498
+ input: 0.06,
8499
+ output: 0.33,
8500
+ cacheRead: 0,
8338
8501
  cacheWrite: 0,
8339
8502
  },
8340
8503
  contextWindow: 262144,
8341
- maxTokens: 262144,
8504
+ maxTokens: 4096,
8342
8505
  },
8343
8506
  "google/gemma-4-26b-a4b-it:free": {
8344
8507
  id: "google/gemma-4-26b-a4b-it:free",
@@ -8368,7 +8531,7 @@ export const MODELS = {
8368
8531
  cost: {
8369
8532
  input: 0.13,
8370
8533
  output: 0.38,
8371
- cacheRead: 0.019999999499999997,
8534
+ cacheRead: 0,
8372
8535
  cacheWrite: 0,
8373
8536
  },
8374
8537
  contextWindow: 262144,
@@ -8408,6 +8571,23 @@ export const MODELS = {
8408
8571
  contextWindow: 128000,
8409
8572
  maxTokens: 50000,
8410
8573
  },
8574
+ "inclusionai/ling-2.6-1t:free": {
8575
+ id: "inclusionai/ling-2.6-1t:free",
8576
+ name: "inclusionAI: Ling-2.6-1T (free)",
8577
+ api: "openai-completions",
8578
+ provider: "openrouter",
8579
+ baseUrl: "https://openrouter.ai/api/v1",
8580
+ reasoning: false,
8581
+ input: ["text"],
8582
+ cost: {
8583
+ input: 0,
8584
+ output: 0,
8585
+ cacheRead: 0,
8586
+ cacheWrite: 0,
8587
+ },
8588
+ contextWindow: 262144,
8589
+ maxTokens: 32768,
8590
+ },
8411
8591
  "inclusionai/ling-2.6-flash:free": {
8412
8592
  id: "inclusionai/ling-2.6-flash:free",
8413
8593
  name: "inclusionAI: Ling-2.6-flash (free)",
@@ -8605,12 +8785,12 @@ export const MODELS = {
8605
8785
  input: ["text"],
8606
8786
  cost: {
8607
8787
  input: 0.15,
8608
- output: 1.2,
8609
- cacheRead: 0.075,
8788
+ output: 1.15,
8789
+ cacheRead: 0.03,
8610
8790
  cacheWrite: 0,
8611
8791
  },
8612
8792
  contextWindow: 196608,
8613
- maxTokens: 65536,
8793
+ maxTokens: 4096,
8614
8794
  },
8615
8795
  "minimax/minimax-m2.5:free": {
8616
8796
  id: "minimax/minimax-m2.5:free",
@@ -8876,13 +9056,13 @@ export const MODELS = {
8876
9056
  reasoning: false,
8877
9057
  input: ["text"],
8878
9058
  cost: {
8879
- input: 0.02,
8880
- output: 0.04,
9059
+ input: 0.01,
9060
+ output: 0.03,
8881
9061
  cacheRead: 0,
8882
9062
  cacheWrite: 0,
8883
9063
  },
8884
9064
  contextWindow: 131072,
8885
- maxTokens: 16384,
9065
+ maxTokens: 4096,
8886
9066
  },
8887
9067
  "mistralai/mistral-saba": {
8888
9068
  id: "mistralai/mistral-saba",
@@ -9097,13 +9277,13 @@ export const MODELS = {
9097
9277
  reasoning: true,
9098
9278
  input: ["text", "image"],
9099
9279
  cost: {
9100
- input: 0.7999999999999999,
9101
- output: 3.5,
9102
- cacheRead: 0.19999999999999998,
9280
+ input: 0.7448,
9281
+ output: 4.655,
9282
+ cacheRead: 0.1463,
9103
9283
  cacheWrite: 0,
9104
9284
  },
9105
- contextWindow: 262144,
9106
- maxTokens: 262144,
9285
+ contextWindow: 256000,
9286
+ maxTokens: 65536,
9107
9287
  },
9108
9288
  "nex-agi/deepseek-v3.1-nex-n1": {
9109
9289
  id: "nex-agi/deepseek-v3.1-nex-n1",
@@ -10440,13 +10620,13 @@ export const MODELS = {
10440
10620
  reasoning: true,
10441
10621
  input: ["text"],
10442
10622
  cost: {
10443
- input: 0.13,
10444
- output: 0.6,
10623
+ input: 0.14950000000000002,
10624
+ output: 1.495,
10445
10625
  cacheRead: 0,
10446
10626
  cacheWrite: 0,
10447
10627
  },
10448
- contextWindow: 262144,
10449
- maxTokens: 262144,
10628
+ contextWindow: 131072,
10629
+ maxTokens: 4096,
10450
10630
  },
10451
10631
  "qwen/qwen3-30b-a3b": {
10452
10632
  id: "qwen/qwen3-30b-a3b",
@@ -11077,6 +11257,23 @@ export const MODELS = {
11077
11257
  contextWindow: 262144,
11078
11258
  maxTokens: 65536,
11079
11259
  },
11260
+ "tencent/hy3-preview:free": {
11261
+ id: "tencent/hy3-preview:free",
11262
+ name: "Tencent: Hy3 preview (free)",
11263
+ api: "openai-completions",
11264
+ provider: "openrouter",
11265
+ baseUrl: "https://openrouter.ai/api/v1",
11266
+ reasoning: true,
11267
+ input: ["text"],
11268
+ cost: {
11269
+ input: 0,
11270
+ output: 0,
11271
+ cacheRead: 0,
11272
+ cacheWrite: 0,
11273
+ },
11274
+ contextWindow: 262144,
11275
+ maxTokens: 262144,
11276
+ },
11080
11277
  "thedrummer/rocinante-12b": {
11081
11278
  id: "thedrummer/rocinante-12b",
11082
11279
  name: "TheDrummer: Rocinante 12B",
@@ -11907,7 +12104,7 @@ export const MODELS = {
11907
12104
  input: 0.5,
11908
12105
  output: 3,
11909
12106
  cacheRead: 0.09999999999999999,
11910
- cacheWrite: 0,
12107
+ cacheWrite: 0.625,
11911
12108
  },
11912
12109
  contextWindow: 1000000,
11913
12110
  maxTokens: 64000,
@@ -12286,6 +12483,40 @@ export const MODELS = {
12286
12483
  contextWindow: 128000,
12287
12484
  maxTokens: 64000,
12288
12485
  },
12486
+ "deepseek/deepseek-v4-flash": {
12487
+ id: "deepseek/deepseek-v4-flash",
12488
+ name: "DeepSeek V4 Flash",
12489
+ api: "anthropic-messages",
12490
+ provider: "vercel-ai-gateway",
12491
+ baseUrl: "https://ai-gateway.vercel.sh",
12492
+ reasoning: true,
12493
+ input: ["text"],
12494
+ cost: {
12495
+ input: 0.14,
12496
+ output: 0.28,
12497
+ cacheRead: 0.014,
12498
+ cacheWrite: 0,
12499
+ },
12500
+ contextWindow: 1000000,
12501
+ maxTokens: 384000,
12502
+ },
12503
+ "deepseek/deepseek-v4-pro": {
12504
+ id: "deepseek/deepseek-v4-pro",
12505
+ name: "DeepSeek V4 Pro",
12506
+ api: "anthropic-messages",
12507
+ provider: "vercel-ai-gateway",
12508
+ baseUrl: "https://ai-gateway.vercel.sh",
12509
+ reasoning: true,
12510
+ input: ["text"],
12511
+ cost: {
12512
+ input: 1.74,
12513
+ output: 3.48,
12514
+ cacheRead: 0.145,
12515
+ cacheWrite: 0,
12516
+ },
12517
+ contextWindow: 1000000,
12518
+ maxTokens: 384000,
12519
+ },
12289
12520
  "google/gemini-2.0-flash": {
12290
12521
  id: "google/gemini-2.0-flash",
12291
12522
  name: "Gemini 2.0 Flash",
@@ -14029,9 +14260,9 @@ export const MODELS = {
14029
14260
  reasoning: true,
14030
14261
  input: ["text"],
14031
14262
  cost: {
14032
- input: 0.09,
14033
- output: 0.29,
14034
- cacheRead: 0.045,
14263
+ input: 0.09999999999999999,
14264
+ output: 0.3,
14265
+ cacheRead: 0.01,
14035
14266
  cacheWrite: 0,
14036
14267
  },
14037
14268
  contextWindow: 262144,
@@ -14248,15 +14479,15 @@ export const MODELS = {
14248
14479
  provider: "vercel-ai-gateway",
14249
14480
  baseUrl: "https://ai-gateway.vercel.sh",
14250
14481
  reasoning: true,
14251
- input: ["text", "image"],
14482
+ input: ["text"],
14252
14483
  cost: {
14253
14484
  input: 1.4,
14254
14485
  output: 4.4,
14255
14486
  cacheRead: 0.26,
14256
14487
  cacheWrite: 0,
14257
14488
  },
14258
- contextWindow: 202752,
14259
- maxTokens: 202752,
14489
+ contextWindow: 202800,
14490
+ maxTokens: 64000,
14260
14491
  },
14261
14492
  "zai/glm-5v-turbo": {
14262
14493
  id: "zai/glm-5v-turbo",