@mariozechner/pi-ai 0.56.2 → 0.57.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/models.generated.d.ts +108 -0
  2. package/dist/models.generated.d.ts.map +1 -1
  3. package/dist/models.generated.js +115 -12
  4. package/dist/models.generated.js.map +1 -1
  5. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  6. package/dist/providers/amazon-bedrock.js +5 -2
  7. package/dist/providers/amazon-bedrock.js.map +1 -1
  8. package/dist/providers/anthropic.d.ts.map +1 -1
  9. package/dist/providers/anthropic.js +5 -2
  10. package/dist/providers/anthropic.js.map +1 -1
  11. package/dist/providers/azure-openai-responses.d.ts.map +1 -1
  12. package/dist/providers/azure-openai-responses.js +5 -2
  13. package/dist/providers/azure-openai-responses.js.map +1 -1
  14. package/dist/providers/google-gemini-cli.d.ts.map +1 -1
  15. package/dist/providers/google-gemini-cli.js +9 -7
  16. package/dist/providers/google-gemini-cli.js.map +1 -1
  17. package/dist/providers/google-vertex.d.ts.map +1 -1
  18. package/dist/providers/google-vertex.js +5 -2
  19. package/dist/providers/google-vertex.js.map +1 -1
  20. package/dist/providers/google.d.ts.map +1 -1
  21. package/dist/providers/google.js +5 -2
  22. package/dist/providers/google.js.map +1 -1
  23. package/dist/providers/mistral.d.ts.map +1 -1
  24. package/dist/providers/mistral.js +5 -2
  25. package/dist/providers/mistral.js.map +1 -1
  26. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  27. package/dist/providers/openai-codex-responses.js +5 -2
  28. package/dist/providers/openai-codex-responses.js.map +1 -1
  29. package/dist/providers/openai-completions.d.ts.map +1 -1
  30. package/dist/providers/openai-completions.js +5 -2
  31. package/dist/providers/openai-completions.js.map +1 -1
  32. package/dist/providers/openai-responses-shared.d.ts.map +1 -1
  33. package/dist/providers/openai-responses-shared.js +0 -2
  34. package/dist/providers/openai-responses-shared.js.map +1 -1
  35. package/dist/providers/openai-responses.d.ts.map +1 -1
  36. package/dist/providers/openai-responses.js +5 -2
  37. package/dist/providers/openai-responses.js.map +1 -1
  38. package/dist/providers/register-builtins.d.ts +3 -2
  39. package/dist/providers/register-builtins.d.ts.map +1 -1
  40. package/dist/providers/register-builtins.js.map +1 -1
  41. package/dist/types.d.ts +3 -2
  42. package/dist/types.d.ts.map +1 -1
  43. package/dist/types.js.map +1 -1
  44. package/package.json +1 -1
@@ -2301,6 +2301,23 @@ export const MODELS = {
2301
2301
  contextWindow: 272000,
2302
2302
  maxTokens: 128000,
2303
2303
  },
2304
+ "gpt-5.4-pro": {
2305
+ id: "gpt-5.4-pro",
2306
+ name: "GPT-5.4 Pro",
2307
+ api: "azure-openai-responses",
2308
+ provider: "azure-openai-responses",
2309
+ baseUrl: "",
2310
+ reasoning: true,
2311
+ input: ["text", "image"],
2312
+ cost: {
2313
+ input: 30,
2314
+ output: 180,
2315
+ cacheRead: 0,
2316
+ cacheWrite: 0,
2317
+ },
2318
+ contextWindow: 1050000,
2319
+ maxTokens: 128000,
2320
+ },
2304
2321
  "o1": {
2305
2322
  id: "o1",
2306
2323
  name: "o1",
@@ -2877,7 +2894,7 @@ export const MODELS = {
2877
2894
  },
2878
2895
  "gpt-5.3-codex": {
2879
2896
  id: "gpt-5.3-codex",
2880
- name: "GPT-5.3 Codex",
2897
+ name: "GPT-5.3-Codex",
2881
2898
  api: "openai-responses",
2882
2899
  provider: "github-copilot",
2883
2900
  baseUrl: "https://api.individual.githubcopilot.com",
@@ -2890,7 +2907,25 @@ export const MODELS = {
2890
2907
  cacheRead: 0,
2891
2908
  cacheWrite: 0,
2892
2909
  },
2893
- contextWindow: 272000,
2910
+ contextWindow: 400000,
2911
+ maxTokens: 128000,
2912
+ },
2913
+ "gpt-5.4": {
2914
+ id: "gpt-5.4",
2915
+ name: "GPT-5.4",
2916
+ api: "openai-responses",
2917
+ provider: "github-copilot",
2918
+ baseUrl: "https://api.individual.githubcopilot.com",
2919
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2920
+ reasoning: true,
2921
+ input: ["text", "image"],
2922
+ cost: {
2923
+ input: 0,
2924
+ output: 0,
2925
+ cacheRead: 0,
2926
+ cacheWrite: 0,
2927
+ },
2928
+ contextWindow: 400000,
2894
2929
  maxTokens: 128000,
2895
2930
  },
2896
2931
  "grok-code-fast-1": {
@@ -3392,6 +3427,23 @@ export const MODELS = {
3392
3427
  contextWindow: 200000,
3393
3428
  maxTokens: 64000,
3394
3429
  },
3430
+ "claude-sonnet-4-6": {
3431
+ id: "claude-sonnet-4-6",
3432
+ name: "Claude Sonnet 4.6 (Antigravity)",
3433
+ api: "google-gemini-cli",
3434
+ provider: "google-antigravity",
3435
+ baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com",
3436
+ reasoning: true,
3437
+ input: ["text", "image"],
3438
+ cost: {
3439
+ input: 3,
3440
+ output: 15,
3441
+ cacheRead: 0.3,
3442
+ cacheWrite: 3.75,
3443
+ },
3444
+ contextWindow: 200000,
3445
+ maxTokens: 64000,
3446
+ },
3395
3447
  "gemini-3-flash": {
3396
3448
  id: "gemini-3-flash",
3397
3449
  name: "Gemini 3 Flash (Antigravity)",
@@ -5451,6 +5503,23 @@ export const MODELS = {
5451
5503
  contextWindow: 272000,
5452
5504
  maxTokens: 128000,
5453
5505
  },
5506
+ "gpt-5.4-pro": {
5507
+ id: "gpt-5.4-pro",
5508
+ name: "GPT-5.4 Pro",
5509
+ api: "openai-responses",
5510
+ provider: "openai",
5511
+ baseUrl: "https://api.openai.com/v1",
5512
+ reasoning: true,
5513
+ input: ["text", "image"],
5514
+ cost: {
5515
+ input: 30,
5516
+ output: 180,
5517
+ cacheRead: 0,
5518
+ cacheWrite: 0,
5519
+ },
5520
+ contextWindow: 1050000,
5521
+ maxTokens: 128000,
5522
+ },
5454
5523
  "o1": {
5455
5524
  id: "o1",
5456
5525
  name: "o1",
@@ -6169,6 +6238,23 @@ export const MODELS = {
6169
6238
  contextWindow: 272000,
6170
6239
  maxTokens: 128000,
6171
6240
  },
6241
+ "gpt-5.4-pro": {
6242
+ id: "gpt-5.4-pro",
6243
+ name: "GPT-5.4 Pro",
6244
+ api: "openai-responses",
6245
+ provider: "opencode",
6246
+ baseUrl: "https://opencode.ai/zen/v1",
6247
+ reasoning: true,
6248
+ input: ["text", "image"],
6249
+ cost: {
6250
+ input: 30,
6251
+ output: 180,
6252
+ cacheRead: 30,
6253
+ cacheWrite: 0,
6254
+ },
6255
+ contextWindow: 1050000,
6256
+ maxTokens: 128000,
6257
+ },
6172
6258
  "kimi-k2.5": {
6173
6259
  id: "kimi-k2.5",
6174
6260
  name: "Kimi K2.5",
@@ -7544,7 +7630,7 @@ export const MODELS = {
7544
7630
  cost: {
7545
7631
  input: 0.27,
7546
7632
  output: 0.95,
7547
- cacheRead: 0.0299999997,
7633
+ cacheRead: 0.0290000007,
7548
7634
  cacheWrite: 0,
7549
7635
  },
7550
7636
  contextWindow: 196608,
@@ -9344,13 +9430,13 @@ export const MODELS = {
9344
9430
  reasoning: true,
9345
9431
  input: ["text"],
9346
9432
  cost: {
9347
- input: 0,
9348
- output: 0,
9349
- cacheRead: 0,
9433
+ input: 0.11,
9434
+ output: 0.6,
9435
+ cacheRead: 0.055,
9350
9436
  cacheWrite: 0,
9351
9437
  },
9352
- contextWindow: 131072,
9353
- maxTokens: 4096,
9438
+ contextWindow: 262144,
9439
+ maxTokens: 262144,
9354
9440
  },
9355
9441
  "qwen/qwen3-30b-a3b": {
9356
9442
  id: "qwen/qwen3-30b-a3b",
@@ -10347,9 +10433,9 @@ export const MODELS = {
10347
10433
  reasoning: true,
10348
10434
  input: ["text"],
10349
10435
  cost: {
10350
- input: 0.3,
10351
- output: 1.4,
10352
- cacheRead: 0.15,
10436
+ input: 0.38,
10437
+ output: 1.9800000000000002,
10438
+ cacheRead: 0.19,
10353
10439
  cacheWrite: 0,
10354
10440
  },
10355
10441
  contextWindow: 202752,
@@ -11394,6 +11480,23 @@ export const MODELS = {
11394
11480
  contextWindow: 204800,
11395
11481
  maxTokens: 131000,
11396
11482
  },
11483
+ "minimax/minimax-m2.5-highspeed": {
11484
+ id: "minimax/minimax-m2.5-highspeed",
11485
+ name: "MiniMax M2.5 High Speed",
11486
+ api: "anthropic-messages",
11487
+ provider: "vercel-ai-gateway",
11488
+ baseUrl: "https://ai-gateway.vercel.sh",
11489
+ reasoning: true,
11490
+ input: ["text"],
11491
+ cost: {
11492
+ input: 0.6,
11493
+ output: 2.4,
11494
+ cacheRead: 0.03,
11495
+ cacheWrite: 0.375,
11496
+ },
11497
+ contextWindow: 4096,
11498
+ maxTokens: 4096,
11499
+ },
11397
11500
  "mistral/codestral": {
11398
11501
  id: "mistral/codestral",
11399
11502
  name: "Mistral Codestral",
@@ -12105,7 +12208,7 @@ export const MODELS = {
12105
12208
  cacheRead: 0.25,
12106
12209
  cacheWrite: 0,
12107
12210
  },
12108
- contextWindow: 1050000,
12211
+ contextWindow: 200000,
12109
12212
  maxTokens: 128000,
12110
12213
  },
12111
12214
  "openai/gpt-5.4-pro": {