@mariozechner/pi-ai 0.49.2 → 0.49.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/models.generated.d.ts +68 -74
  2. package/dist/models.generated.d.ts.map +1 -1
  3. package/dist/models.generated.js +77 -78
  4. package/dist/models.generated.js.map +1 -1
  5. package/dist/providers/anthropic.d.ts.map +1 -1
  6. package/dist/providers/anthropic.js +15 -8
  7. package/dist/providers/anthropic.js.map +1 -1
  8. package/dist/providers/google-gemini-cli.d.ts.map +1 -1
  9. package/dist/providers/google-gemini-cli.js +1 -0
  10. package/dist/providers/google-gemini-cli.js.map +1 -1
  11. package/dist/providers/google-vertex.d.ts.map +1 -1
  12. package/dist/providers/google-vertex.js +4 -4
  13. package/dist/providers/google-vertex.js.map +1 -1
  14. package/dist/providers/google.d.ts.map +1 -1
  15. package/dist/providers/google.js +4 -4
  16. package/dist/providers/google.js.map +1 -1
  17. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  18. package/dist/providers/openai-codex-responses.js +15 -5
  19. package/dist/providers/openai-codex-responses.js.map +1 -1
  20. package/dist/providers/openai-completions.d.ts.map +1 -1
  21. package/dist/providers/openai-completions.js +6 -2
  22. package/dist/providers/openai-completions.js.map +1 -1
  23. package/dist/providers/openai-responses.d.ts.map +1 -1
  24. package/dist/providers/openai-responses.js +23 -4
  25. package/dist/providers/openai-responses.js.map +1 -1
  26. package/dist/stream.d.ts.map +1 -1
  27. package/dist/stream.js +25 -5
  28. package/dist/stream.js.map +1 -1
  29. package/dist/types.d.ts +6 -0
  30. package/dist/types.d.ts.map +1 -1
  31. package/dist/types.js.map +1 -1
  32. package/dist/utils/oauth/openai-codex.d.ts +5 -0
  33. package/dist/utils/oauth/openai-codex.d.ts.map +1 -1
  34. package/dist/utils/oauth/openai-codex.js +27 -8
  35. package/dist/utils/oauth/openai-codex.js.map +1 -1
  36. package/package.json +1 -1
@@ -1540,24 +1540,6 @@ export const MODELS = {
1540
1540
  contextWindow: 128000,
1541
1541
  maxTokens: 128000,
1542
1542
  },
1543
- "gpt-5-codex": {
1544
- id: "gpt-5-codex",
1545
- name: "GPT-5-Codex",
1546
- api: "openai-responses",
1547
- provider: "github-copilot",
1548
- baseUrl: "https://api.individual.githubcopilot.com",
1549
- headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
1550
- reasoning: true,
1551
- input: ["text", "image"],
1552
- cost: {
1553
- input: 0,
1554
- output: 0,
1555
- cacheRead: 0,
1556
- cacheWrite: 0,
1557
- },
1558
- contextWindow: 128000,
1559
- maxTokens: 128000,
1560
- },
1561
1543
  "gpt-5-mini": {
1562
1544
  id: "gpt-5-mini",
1563
1545
  name: "GPT-5-mini",
@@ -3415,7 +3397,7 @@ export const MODELS = {
3415
3397
  cost: {
3416
3398
  input: 1.25,
3417
3399
  output: 10,
3418
- cacheRead: 0.13,
3400
+ cacheRead: 0.125,
3419
3401
  cacheWrite: 0,
3420
3402
  },
3421
3403
  contextWindow: 400000,
@@ -3466,7 +3448,7 @@ export const MODELS = {
3466
3448
  cost: {
3467
3449
  input: 0.25,
3468
3450
  output: 2,
3469
- cacheRead: 0.03,
3451
+ cacheRead: 0.025,
3470
3452
  cacheWrite: 0,
3471
3453
  },
3472
3454
  contextWindow: 400000,
@@ -3483,7 +3465,7 @@ export const MODELS = {
3483
3465
  cost: {
3484
3466
  input: 0.05,
3485
3467
  output: 0.4,
3486
- cacheRead: 0.01,
3468
+ cacheRead: 0.005,
3487
3469
  cacheWrite: 0,
3488
3470
  },
3489
3471
  contextWindow: 400000,
@@ -4802,7 +4784,7 @@ export const MODELS = {
4802
4784
  cacheWrite: 0,
4803
4785
  },
4804
4786
  contextWindow: 262144,
4805
- maxTokens: 16384,
4787
+ maxTokens: 32768,
4806
4788
  },
4807
4789
  "cohere/command-r-08-2024": {
4808
4790
  id: "cohere/command-r-08-2024",
@@ -5159,7 +5141,7 @@ export const MODELS = {
5159
5141
  cacheWrite: 0.0833,
5160
5142
  },
5161
5143
  contextWindow: 1048576,
5162
- maxTokens: 65536,
5144
+ maxTokens: 65535,
5163
5145
  },
5164
5146
  "google/gemini-2.5-flash-preview-09-2025": {
5165
5147
  id: "google/gemini-2.5-flash-preview-09-2025",
@@ -5172,7 +5154,7 @@ export const MODELS = {
5172
5154
  cost: {
5173
5155
  input: 0.3,
5174
5156
  output: 2.5,
5175
- cacheRead: 0.075,
5157
+ cacheRead: 0.03,
5176
5158
  cacheWrite: 0.0833,
5177
5159
  },
5178
5160
  contextWindow: 1048576,
@@ -5822,7 +5804,7 @@ export const MODELS = {
5822
5804
  cacheWrite: 0,
5823
5805
  },
5824
5806
  contextWindow: 131072,
5825
- maxTokens: 131072,
5807
+ maxTokens: 16384,
5826
5808
  },
5827
5809
  "mistralai/mistral-saba": {
5828
5810
  id: "mistralai/mistral-saba",
@@ -7286,6 +7268,23 @@ export const MODELS = {
7286
7268
  contextWindow: 131072,
7287
7269
  maxTokens: 8192,
7288
7270
  },
7271
+ "qwen/qwen2.5-vl-72b-instruct": {
7272
+ id: "qwen/qwen2.5-vl-72b-instruct",
7273
+ name: "Qwen: Qwen2.5 VL 72B Instruct",
7274
+ api: "openai-completions",
7275
+ provider: "openrouter",
7276
+ baseUrl: "https://openrouter.ai/api/v1",
7277
+ reasoning: false,
7278
+ input: ["text", "image"],
7279
+ cost: {
7280
+ input: 0.15,
7281
+ output: 0.6,
7282
+ cacheRead: 0,
7283
+ cacheWrite: 0,
7284
+ },
7285
+ contextWindow: 32768,
7286
+ maxTokens: 32768,
7287
+ },
7289
7288
  "qwen/qwen3-14b": {
7290
7289
  id: "qwen/qwen3-14b",
7291
7290
  name: "Qwen: Qwen3 14B",
@@ -7643,6 +7642,23 @@ export const MODELS = {
7643
7642
  contextWindow: 262144,
7644
7643
  maxTokens: 4096,
7645
7644
  },
7645
+ "qwen/qwen3-vl-235b-a22b-thinking": {
7646
+ id: "qwen/qwen3-vl-235b-a22b-thinking",
7647
+ name: "Qwen: Qwen3 VL 235B A22B Thinking",
7648
+ api: "openai-completions",
7649
+ provider: "openrouter",
7650
+ baseUrl: "https://openrouter.ai/api/v1",
7651
+ reasoning: true,
7652
+ input: ["text", "image"],
7653
+ cost: {
7654
+ input: 0.44999999999999996,
7655
+ output: 3.5,
7656
+ cacheRead: 0,
7657
+ cacheWrite: 0,
7658
+ },
7659
+ contextWindow: 262144,
7660
+ maxTokens: 262144,
7661
+ },
7646
7662
  "qwen/qwen3-vl-30b-a3b-instruct": {
7647
7663
  id: "qwen/qwen3-vl-30b-a3b-instruct",
7648
7664
  name: "Qwen: Qwen3 VL 30B A3B Instruct",
@@ -8204,6 +8220,23 @@ export const MODELS = {
8204
8220
  contextWindow: 202752,
8205
8221
  maxTokens: 65535,
8206
8222
  },
8223
+ "z-ai/glm-4.7-flash": {
8224
+ id: "z-ai/glm-4.7-flash",
8225
+ name: "Z.AI: GLM 4.7 Flash",
8226
+ api: "openai-completions",
8227
+ provider: "openrouter",
8228
+ baseUrl: "https://openrouter.ai/api/v1",
8229
+ reasoning: true,
8230
+ input: ["text"],
8231
+ cost: {
8232
+ input: 0.07,
8233
+ output: 0.39999999999999997,
8234
+ cacheRead: 0.01,
8235
+ cacheWrite: 0,
8236
+ },
8237
+ contextWindow: 200000,
8238
+ maxTokens: 131072,
8239
+ },
8207
8240
  },
8208
8241
  "vercel-ai-gateway": {
8209
8242
  "alibaba/qwen-3-14b": {
@@ -8393,23 +8426,6 @@ export const MODELS = {
8393
8426
  contextWindow: 200000,
8394
8427
  maxTokens: 4096,
8395
8428
  },
8396
- "anthropic/claude-3-opus": {
8397
- id: "anthropic/claude-3-opus",
8398
- name: "Claude 3 Opus",
8399
- api: "anthropic-messages",
8400
- provider: "vercel-ai-gateway",
8401
- baseUrl: "https://ai-gateway.vercel.sh",
8402
- reasoning: false,
8403
- input: ["text", "image"],
8404
- cost: {
8405
- input: 15,
8406
- output: 75,
8407
- cacheRead: 0,
8408
- cacheWrite: 0,
8409
- },
8410
- contextWindow: 200000,
8411
- maxTokens: 8192,
8412
- },
8413
8429
  "anthropic/claude-3.5-haiku": {
8414
8430
  id: "anthropic/claude-3.5-haiku",
8415
8431
  name: "Claude 3.5 Haiku",
@@ -8560,7 +8576,7 @@ export const MODELS = {
8560
8576
  cacheRead: 0.3,
8561
8577
  cacheWrite: 3.75,
8562
8578
  },
8563
- contextWindow: 200000,
8579
+ contextWindow: 1000000,
8564
8580
  maxTokens: 64000,
8565
8581
  },
8566
8582
  "anthropic/claude-sonnet-4.5": {
@@ -8577,7 +8593,7 @@ export const MODELS = {
8577
8593
  cacheRead: 0.3,
8578
8594
  cacheWrite: 3.75,
8579
8595
  },
8580
- contextWindow: 200000,
8596
+ contextWindow: 1000000,
8581
8597
  maxTokens: 64000,
8582
8598
  },
8583
8599
  "bytedance/seed-1.6": {
@@ -8699,40 +8715,6 @@ export const MODELS = {
8699
8715
  contextWindow: 128000,
8700
8716
  maxTokens: 64000,
8701
8717
  },
8702
- "google/gemini-2.0-flash": {
8703
- id: "google/gemini-2.0-flash",
8704
- name: "Gemini 2.0 Flash",
8705
- api: "anthropic-messages",
8706
- provider: "vercel-ai-gateway",
8707
- baseUrl: "https://ai-gateway.vercel.sh",
8708
- reasoning: false,
8709
- input: ["text", "image"],
8710
- cost: {
8711
- input: 0.09999999999999999,
8712
- output: 0.39999999999999997,
8713
- cacheRead: 0.024999999999999998,
8714
- cacheWrite: 0,
8715
- },
8716
- contextWindow: 1000000,
8717
- maxTokens: 8192,
8718
- },
8719
- "google/gemini-2.0-flash-lite": {
8720
- id: "google/gemini-2.0-flash-lite",
8721
- name: "Gemini 2.0 Flash Lite",
8722
- api: "anthropic-messages",
8723
- provider: "vercel-ai-gateway",
8724
- baseUrl: "https://ai-gateway.vercel.sh",
8725
- reasoning: false,
8726
- input: ["text", "image"],
8727
- cost: {
8728
- input: 0.075,
8729
- output: 0.3,
8730
- cacheRead: 0,
8731
- cacheWrite: 0,
8732
- },
8733
- contextWindow: 1048576,
8734
- maxTokens: 8192,
8735
- },
8736
8718
  "google/gemini-2.5-flash": {
8737
8719
  id: "google/gemini-2.5-flash",
8738
8720
  name: "Gemini 2.5 Flash",
@@ -10314,6 +10296,23 @@ export const MODELS = {
10314
10296
  contextWindow: 202752,
10315
10297
  maxTokens: 120000,
10316
10298
  },
10299
+ "zai/glm-4.7-flashx": {
10300
+ id: "zai/glm-4.7-flashx",
10301
+ name: "GLM 4.7 FlashX",
10302
+ api: "anthropic-messages",
10303
+ provider: "vercel-ai-gateway",
10304
+ baseUrl: "https://ai-gateway.vercel.sh",
10305
+ reasoning: true,
10306
+ input: ["text"],
10307
+ cost: {
10308
+ input: 0.06,
10309
+ output: 0.39999999999999997,
10310
+ cacheRead: 0.01,
10311
+ cacheWrite: 0,
10312
+ },
10313
+ contextWindow: 200000,
10314
+ maxTokens: 128000,
10315
+ },
10317
10316
  },
10318
10317
  "xai": {
10319
10318
  "grok-2": {