@hyperspaceng/neural-ai 0.67.3 → 0.67.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/dist/index.d.ts +2 -2
  2. package/dist/index.d.ts.map +1 -1
  3. package/dist/index.js.map +1 -1
  4. package/dist/models.d.ts +1 -1
  5. package/dist/models.d.ts.map +1 -1
  6. package/dist/models.generated.d.ts +230 -37
  7. package/dist/models.generated.d.ts.map +1 -1
  8. package/dist/models.generated.js +267 -79
  9. package/dist/models.generated.js.map +1 -1
  10. package/dist/models.js +5 -2
  11. package/dist/models.js.map +1 -1
  12. package/dist/providers/amazon-bedrock.d.ts +18 -0
  13. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  14. package/dist/providers/amazon-bedrock.js +55 -3
  15. package/dist/providers/amazon-bedrock.js.map +1 -1
  16. package/dist/providers/anthropic.d.ts +16 -2
  17. package/dist/providers/anthropic.d.ts.map +1 -1
  18. package/dist/providers/anthropic.js +36 -15
  19. package/dist/providers/anthropic.js.map +1 -1
  20. package/dist/providers/azure-openai-responses.d.ts.map +1 -1
  21. package/dist/providers/azure-openai-responses.js +5 -1
  22. package/dist/providers/azure-openai-responses.js.map +1 -1
  23. package/dist/providers/faux.d.ts.map +1 -1
  24. package/dist/providers/faux.js +1 -0
  25. package/dist/providers/faux.js.map +1 -1
  26. package/dist/providers/google-gemini-cli.d.ts.map +1 -1
  27. package/dist/providers/google-gemini-cli.js +3 -0
  28. package/dist/providers/google-gemini-cli.js.map +1 -1
  29. package/dist/providers/google-vertex.d.ts.map +1 -1
  30. package/dist/providers/google-vertex.js +2 -1
  31. package/dist/providers/google-vertex.js.map +1 -1
  32. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  33. package/dist/providers/openai-codex-responses.js +10 -7
  34. package/dist/providers/openai-codex-responses.js.map +1 -1
  35. package/dist/providers/openai-completions.d.ts.map +1 -1
  36. package/dist/providers/openai-completions.js +5 -1
  37. package/dist/providers/openai-completions.js.map +1 -1
  38. package/dist/providers/openai-responses-shared.d.ts +1 -0
  39. package/dist/providers/openai-responses-shared.d.ts.map +1 -1
  40. package/dist/providers/openai-responses-shared.js +3 -1
  41. package/dist/providers/openai-responses-shared.js.map +1 -1
  42. package/dist/providers/openai-responses.d.ts.map +1 -1
  43. package/dist/providers/openai-responses.js +6 -2
  44. package/dist/providers/openai-responses.js.map +1 -1
  45. package/dist/providers/simple-options.d.ts.map +1 -1
  46. package/dist/providers/simple-options.js +1 -0
  47. package/dist/providers/simple-options.js.map +1 -1
  48. package/dist/providers/transform-messages.d.ts.map +1 -1
  49. package/dist/providers/transform-messages.js +20 -32
  50. package/dist/providers/transform-messages.js.map +1 -1
  51. package/dist/types.d.ts +9 -0
  52. package/dist/types.d.ts.map +1 -1
  53. package/dist/types.js.map +1 -1
  54. package/dist/utils/headers.d.ts +2 -0
  55. package/dist/utils/headers.d.ts.map +1 -0
  56. package/dist/utils/headers.js +8 -0
  57. package/dist/utils/headers.js.map +1 -0
  58. package/package.json +3 -3
@@ -257,6 +257,23 @@ export const MODELS = {
257
257
  contextWindow: 1000000,
258
258
  maxTokens: 128000,
259
259
  },
260
+ "anthropic.claude-opus-4-7": {
261
+ id: "anthropic.claude-opus-4-7",
262
+ name: "Claude Opus 4.7",
263
+ api: "bedrock-converse-stream",
264
+ provider: "amazon-bedrock",
265
+ baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
266
+ reasoning: true,
267
+ input: ["text", "image"],
268
+ cost: {
269
+ input: 5,
270
+ output: 25,
271
+ cacheRead: 0.5,
272
+ cacheWrite: 6.25,
273
+ },
274
+ contextWindow: 1000000,
275
+ maxTokens: 128000,
276
+ },
260
277
  "anthropic.claude-sonnet-4-20250514-v1:0": {
261
278
  id: "anthropic.claude-sonnet-4-20250514-v1:0",
262
279
  name: "Claude Sonnet 4",
@@ -410,6 +427,23 @@ export const MODELS = {
410
427
  contextWindow: 1000000,
411
428
  maxTokens: 128000,
412
429
  },
430
+ "eu.anthropic.claude-opus-4-7": {
431
+ id: "eu.anthropic.claude-opus-4-7",
432
+ name: "Claude Opus 4.7 (EU)",
433
+ api: "bedrock-converse-stream",
434
+ provider: "amazon-bedrock",
435
+ baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
436
+ reasoning: true,
437
+ input: ["text", "image"],
438
+ cost: {
439
+ input: 5,
440
+ output: 25,
441
+ cacheRead: 0.5,
442
+ cacheWrite: 6.25,
443
+ },
444
+ contextWindow: 1000000,
445
+ maxTokens: 128000,
446
+ },
413
447
  "eu.anthropic.claude-sonnet-4-20250514-v1:0": {
414
448
  id: "eu.anthropic.claude-sonnet-4-20250514-v1:0",
415
449
  name: "Claude Sonnet 4 (EU)",
@@ -512,6 +546,23 @@ export const MODELS = {
512
546
  contextWindow: 1000000,
513
547
  maxTokens: 128000,
514
548
  },
549
+ "global.anthropic.claude-opus-4-7": {
550
+ id: "global.anthropic.claude-opus-4-7",
551
+ name: "Claude Opus 4.7 (Global)",
552
+ api: "bedrock-converse-stream",
553
+ provider: "amazon-bedrock",
554
+ baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
555
+ reasoning: true,
556
+ input: ["text", "image"],
557
+ cost: {
558
+ input: 5,
559
+ output: 25,
560
+ cacheRead: 0.5,
561
+ cacheWrite: 6.25,
562
+ },
563
+ contextWindow: 1000000,
564
+ maxTokens: 128000,
565
+ },
515
566
  "global.anthropic.claude-sonnet-4-20250514-v1:0": {
516
567
  id: "global.anthropic.claude-sonnet-4-20250514-v1:0",
517
568
  name: "Claude Sonnet 4 (Global)",
@@ -1345,6 +1396,23 @@ export const MODELS = {
1345
1396
  contextWindow: 1000000,
1346
1397
  maxTokens: 128000,
1347
1398
  },
1399
+ "us.anthropic.claude-opus-4-7": {
1400
+ id: "us.anthropic.claude-opus-4-7",
1401
+ name: "Claude Opus 4.7 (US)",
1402
+ api: "bedrock-converse-stream",
1403
+ provider: "amazon-bedrock",
1404
+ baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
1405
+ reasoning: true,
1406
+ input: ["text", "image"],
1407
+ cost: {
1408
+ input: 5,
1409
+ output: 25,
1410
+ cacheRead: 0.5,
1411
+ cacheWrite: 6.25,
1412
+ },
1413
+ contextWindow: 1000000,
1414
+ maxTokens: 128000,
1415
+ },
1348
1416
  "us.anthropic.claude-sonnet-4-20250514-v1:0": {
1349
1417
  id: "us.anthropic.claude-sonnet-4-20250514-v1:0",
1350
1418
  name: "Claude Sonnet 4 (US)",
@@ -1772,6 +1840,23 @@ export const MODELS = {
1772
1840
  contextWindow: 1000000,
1773
1841
  maxTokens: 128000,
1774
1842
  },
1843
+ "claude-opus-4-7": {
1844
+ id: "claude-opus-4-7",
1845
+ name: "Claude Opus 4.7",
1846
+ api: "anthropic-messages",
1847
+ provider: "anthropic",
1848
+ baseUrl: "https://api.anthropic.com",
1849
+ reasoning: true,
1850
+ input: ["text", "image"],
1851
+ cost: {
1852
+ input: 5,
1853
+ output: 25,
1854
+ cacheRead: 0.5,
1855
+ cacheWrite: 6.25,
1856
+ },
1857
+ contextWindow: 1000000,
1858
+ maxTokens: 128000,
1859
+ },
1775
1860
  "claude-sonnet-4-0": {
1776
1861
  id: "claude-sonnet-4-0",
1777
1862
  name: "Claude Sonnet 4 (latest)",
@@ -1859,23 +1944,6 @@ export const MODELS = {
1859
1944
  },
1860
1945
  },
1861
1946
  "azure-openai-responses": {
1862
- "codex-mini-latest": {
1863
- id: "codex-mini-latest",
1864
- name: "Codex Mini",
1865
- api: "azure-openai-responses",
1866
- provider: "azure-openai-responses",
1867
- baseUrl: "",
1868
- reasoning: true,
1869
- input: ["text"],
1870
- cost: {
1871
- input: 1.5,
1872
- output: 6,
1873
- cacheRead: 0.375,
1874
- cacheWrite: 0,
1875
- },
1876
- contextWindow: 200000,
1877
- maxTokens: 100000,
1878
- },
1879
1947
  "gpt-4": {
1880
1948
  id: "gpt-4",
1881
1949
  name: "GPT-4",
@@ -2682,6 +2750,24 @@ export const MODELS = {
2682
2750
  contextWindow: 1000000,
2683
2751
  maxTokens: 64000,
2684
2752
  },
2753
+ "claude-opus-4.7": {
2754
+ id: "claude-opus-4.7",
2755
+ name: "Claude Opus 4.7",
2756
+ api: "anthropic-messages",
2757
+ provider: "github-copilot",
2758
+ baseUrl: "https://api.individual.githubcopilot.com",
2759
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2760
+ reasoning: true,
2761
+ input: ["text", "image"],
2762
+ cost: {
2763
+ input: 0,
2764
+ output: 0,
2765
+ cacheRead: 0,
2766
+ cacheWrite: 0,
2767
+ },
2768
+ contextWindow: 144000,
2769
+ maxTokens: 64000,
2770
+ },
2685
2771
  "claude-sonnet-4": {
2686
2772
  id: "claude-sonnet-4",
2687
2773
  name: "Claude Sonnet 4",
@@ -4682,9 +4768,9 @@ export const MODELS = {
4682
4768
  },
4683
4769
  },
4684
4770
  "kimi-coding": {
4685
- "k2p5": {
4686
- id: "k2p5",
4687
- name: "Kimi K2.5",
4771
+ "kimi-for-coding": {
4772
+ id: "kimi-for-coding",
4773
+ name: "Kimi For Coding",
4688
4774
  api: "anthropic-messages",
4689
4775
  provider: "kimi-coding",
4690
4776
  baseUrl: "https://api.kimi.com/coding",
@@ -5234,23 +5320,6 @@ export const MODELS = {
5234
5320
  },
5235
5321
  },
5236
5322
  "openai": {
5237
- "codex-mini-latest": {
5238
- id: "codex-mini-latest",
5239
- name: "Codex Mini",
5240
- api: "openai-responses",
5241
- provider: "openai",
5242
- baseUrl: "https://api.openai.com/v1",
5243
- reasoning: true,
5244
- input: ["text"],
5245
- cost: {
5246
- input: 1.5,
5247
- output: 6,
5248
- cacheRead: 0.375,
5249
- cacheWrite: 0,
5250
- },
5251
- contextWindow: 200000,
5252
- maxTokens: 100000,
5253
- },
5254
5323
  "gpt-4": {
5255
5324
  id: "gpt-4",
5256
5325
  name: "GPT-4",
@@ -6190,6 +6259,23 @@ export const MODELS = {
6190
6259
  contextWindow: 1000000,
6191
6260
  maxTokens: 128000,
6192
6261
  },
6262
+ "claude-opus-4-7": {
6263
+ id: "claude-opus-4-7",
6264
+ name: "Claude Opus 4.7",
6265
+ api: "anthropic-messages",
6266
+ provider: "opencode",
6267
+ baseUrl: "https://opencode.ai/zen",
6268
+ reasoning: true,
6269
+ input: ["text", "image"],
6270
+ cost: {
6271
+ input: 5,
6272
+ output: 25,
6273
+ cacheRead: 0.5,
6274
+ cacheWrite: 6.25,
6275
+ },
6276
+ contextWindow: 1000000,
6277
+ maxTokens: 128000,
6278
+ },
6193
6279
  "claude-sonnet-4": {
6194
6280
  id: "claude-sonnet-4",
6195
6281
  name: "Claude Sonnet 4",
@@ -6615,6 +6701,40 @@ export const MODELS = {
6615
6701
  contextWindow: 204800,
6616
6702
  maxTokens: 128000,
6617
6703
  },
6704
+ "qwen3.5-plus": {
6705
+ id: "qwen3.5-plus",
6706
+ name: "Qwen3.5 Plus",
6707
+ api: "openai-completions",
6708
+ provider: "opencode",
6709
+ baseUrl: "https://opencode.ai/zen/v1",
6710
+ reasoning: true,
6711
+ input: ["text", "image"],
6712
+ cost: {
6713
+ input: 0.2,
6714
+ output: 1.2,
6715
+ cacheRead: 0.02,
6716
+ cacheWrite: 0.25,
6717
+ },
6718
+ contextWindow: 262144,
6719
+ maxTokens: 65536,
6720
+ },
6721
+ "qwen3.6-plus": {
6722
+ id: "qwen3.6-plus",
6723
+ name: "Qwen3.6 Plus",
6724
+ api: "openai-completions",
6725
+ provider: "opencode",
6726
+ baseUrl: "https://opencode.ai/zen/v1",
6727
+ reasoning: true,
6728
+ input: ["text", "image"],
6729
+ cost: {
6730
+ input: 0.5,
6731
+ output: 3,
6732
+ cacheRead: 0.05,
6733
+ cacheWrite: 0.625,
6734
+ },
6735
+ contextWindow: 262144,
6736
+ maxTokens: 65536,
6737
+ },
6618
6738
  },
6619
6739
  "opencode-go": {
6620
6740
  "glm-5": {
@@ -6705,9 +6825,9 @@ export const MODELS = {
6705
6825
  "minimax-m2.5": {
6706
6826
  id: "minimax-m2.5",
6707
6827
  name: "MiniMax M2.5",
6708
- api: "anthropic-messages",
6828
+ api: "openai-completions",
6709
6829
  provider: "opencode-go",
6710
- baseUrl: "https://opencode.ai/zen/go",
6830
+ baseUrl: "https://opencode.ai/zen/go/v1",
6711
6831
  reasoning: true,
6712
6832
  input: ["text"],
6713
6833
  cost: {
@@ -6717,7 +6837,7 @@ export const MODELS = {
6717
6837
  cacheWrite: 0,
6718
6838
  },
6719
6839
  contextWindow: 204800,
6720
- maxTokens: 131072,
6840
+ maxTokens: 65536,
6721
6841
  },
6722
6842
  "minimax-m2.7": {
6723
6843
  id: "minimax-m2.7",
@@ -6736,6 +6856,40 @@ export const MODELS = {
6736
6856
  contextWindow: 204800,
6737
6857
  maxTokens: 131072,
6738
6858
  },
6859
+ "qwen3.5-plus": {
6860
+ id: "qwen3.5-plus",
6861
+ name: "Qwen3.5 Plus",
6862
+ api: "openai-completions",
6863
+ provider: "opencode-go",
6864
+ baseUrl: "https://opencode.ai/zen/go/v1",
6865
+ reasoning: true,
6866
+ input: ["text", "image"],
6867
+ cost: {
6868
+ input: 0.2,
6869
+ output: 1.2,
6870
+ cacheRead: 0.02,
6871
+ cacheWrite: 0.25,
6872
+ },
6873
+ contextWindow: 262144,
6874
+ maxTokens: 65536,
6875
+ },
6876
+ "qwen3.6-plus": {
6877
+ id: "qwen3.6-plus",
6878
+ name: "Qwen3.6 Plus",
6879
+ api: "openai-completions",
6880
+ provider: "opencode-go",
6881
+ baseUrl: "https://opencode.ai/zen/go/v1",
6882
+ reasoning: true,
6883
+ input: ["text", "image"],
6884
+ cost: {
6885
+ input: 0.5,
6886
+ output: 3,
6887
+ cacheRead: 0.05,
6888
+ cacheWrite: 0.625,
6889
+ },
6890
+ contextWindow: 262144,
6891
+ maxTokens: 65536,
6892
+ },
6739
6893
  },
6740
6894
  "openrouter": {
6741
6895
  "ai21/jamba-large-1.7": {
@@ -7044,6 +7198,23 @@ export const MODELS = {
7044
7198
  contextWindow: 1000000,
7045
7199
  maxTokens: 128000,
7046
7200
  },
7201
+ "anthropic/claude-opus-4.7": {
7202
+ id: "anthropic/claude-opus-4.7",
7203
+ name: "Anthropic: Claude Opus 4.7",
7204
+ api: "openai-completions",
7205
+ provider: "openrouter",
7206
+ baseUrl: "https://openrouter.ai/api/v1",
7207
+ reasoning: true,
7208
+ input: ["text", "image"],
7209
+ cost: {
7210
+ input: 5,
7211
+ output: 25,
7212
+ cacheRead: 0.5,
7213
+ cacheWrite: 6.25,
7214
+ },
7215
+ contextWindow: 1000000,
7216
+ maxTokens: 128000,
7217
+ },
7047
7218
  "anthropic/claude-sonnet-4": {
7048
7219
  id: "anthropic/claude-sonnet-4",
7049
7220
  name: "Anthropic: Claude Sonnet 4",
@@ -7339,7 +7510,7 @@ export const MODELS = {
7339
7510
  api: "openai-completions",
7340
7511
  provider: "openrouter",
7341
7512
  baseUrl: "https://openrouter.ai/api/v1",
7342
- reasoning: true,
7513
+ reasoning: false,
7343
7514
  input: ["text"],
7344
7515
  cost: {
7345
7516
  input: 0.19999999999999998,
@@ -7427,13 +7598,13 @@ export const MODELS = {
7427
7598
  reasoning: true,
7428
7599
  input: ["text"],
7429
7600
  cost: {
7430
- input: 0.26,
7431
- output: 0.38,
7432
- cacheRead: 0.13,
7601
+ input: 0.25899999999999995,
7602
+ output: 0.42,
7603
+ cacheRead: 0.135,
7433
7604
  cacheWrite: 0,
7434
7605
  },
7435
7606
  contextWindow: 163840,
7436
- maxTokens: 4096,
7607
+ maxTokens: 163840,
7437
7608
  },
7438
7609
  "deepseek/deepseek-v3.2-exp": {
7439
7610
  id: "deepseek/deepseek-v3.2-exp",
@@ -7483,7 +7654,7 @@ export const MODELS = {
7483
7654
  cacheRead: 0.024999999999999998,
7484
7655
  cacheWrite: 0.08333333333333334,
7485
7656
  },
7486
- contextWindow: 1048576,
7657
+ contextWindow: 1000000,
7487
7658
  maxTokens: 8192,
7488
7659
  },
7489
7660
  "google/gemini-2.0-flash-lite-001": {
@@ -8385,7 +8556,7 @@ export const MODELS = {
8385
8556
  cacheWrite: 0,
8386
8557
  },
8387
8558
  contextWindow: 131072,
8388
- maxTokens: 131072,
8559
+ maxTokens: 32768,
8389
8560
  },
8390
8561
  "moonshotai/kimi-k2-0905": {
8391
8562
  id: "moonshotai/kimi-k2-0905",
@@ -8435,7 +8606,7 @@ export const MODELS = {
8435
8606
  cacheRead: 0.07,
8436
8607
  cacheWrite: 0,
8437
8608
  },
8438
- contextWindow: 262144,
8609
+ contextWindow: 256000,
8439
8610
  maxTokens: 4096,
8440
8611
  },
8441
8612
  "nex-agi/deepseek-v3.1-nex-n1": {
@@ -8532,9 +8703,9 @@ export const MODELS = {
8532
8703
  reasoning: true,
8533
8704
  input: ["text"],
8534
8705
  cost: {
8535
- input: 0.09999999999999999,
8536
- output: 0.5,
8537
- cacheRead: 0.09999999999999999,
8706
+ input: 0.09,
8707
+ output: 0.44999999999999996,
8708
+ cacheRead: 0,
8538
8709
  cacheWrite: 0,
8539
8710
  },
8540
8711
  contextWindow: 262144,
@@ -9708,7 +9879,7 @@ export const MODELS = {
9708
9879
  input: 0.26,
9709
9880
  output: 0.78,
9710
9881
  cacheRead: 0.052000000000000005,
9711
- cacheWrite: 0,
9882
+ cacheWrite: 0.325,
9712
9883
  },
9713
9884
  contextWindow: 1000000,
9714
9885
  maxTokens: 32768,
@@ -9725,7 +9896,7 @@ export const MODELS = {
9725
9896
  input: 0.26,
9726
9897
  output: 0.78,
9727
9898
  cacheRead: 0,
9728
- cacheWrite: 0,
9899
+ cacheWrite: 0.325,
9729
9900
  },
9730
9901
  contextWindow: 1000000,
9731
9902
  maxTokens: 32768,
@@ -9742,7 +9913,7 @@ export const MODELS = {
9742
9913
  input: 0.26,
9743
9914
  output: 0.78,
9744
9915
  cacheRead: 0,
9745
- cacheWrite: 0,
9916
+ cacheWrite: 0.325,
9746
9917
  },
9747
9918
  contextWindow: 1000000,
9748
9919
  maxTokens: 32768,
@@ -9841,13 +10012,13 @@ export const MODELS = {
9841
10012
  reasoning: true,
9842
10013
  input: ["text"],
9843
10014
  cost: {
9844
- input: 0.14950000000000002,
9845
- output: 1.495,
10015
+ input: 0.13,
10016
+ output: 0.6,
9846
10017
  cacheRead: 0,
9847
10018
  cacheWrite: 0,
9848
10019
  },
9849
- contextWindow: 131072,
9850
- maxTokens: 4096,
10020
+ contextWindow: 262144,
10021
+ maxTokens: 262144,
9851
10022
  },
9852
10023
  "qwen/qwen3-30b-a3b": {
9853
10024
  id: "qwen/qwen3-30b-a3b",
@@ -9980,7 +10151,7 @@ export const MODELS = {
9980
10151
  input: 0.195,
9981
10152
  output: 0.975,
9982
10153
  cacheRead: 0.039,
9983
- cacheWrite: 0,
10154
+ cacheWrite: 0.24375,
9984
10155
  },
9985
10156
  contextWindow: 1000000,
9986
10157
  maxTokens: 65536,
@@ -10014,7 +10185,7 @@ export const MODELS = {
10014
10185
  input: 0.65,
10015
10186
  output: 3.25,
10016
10187
  cacheRead: 0.13,
10017
- cacheWrite: 0,
10188
+ cacheWrite: 0.8125,
10018
10189
  },
10019
10190
  contextWindow: 1000000,
10020
10191
  maxTokens: 65536,
@@ -10048,7 +10219,7 @@ export const MODELS = {
10048
10219
  input: 0.78,
10049
10220
  output: 3.9,
10050
10221
  cacheRead: 0.156,
10051
- cacheWrite: 0,
10222
+ cacheWrite: 0.975,
10052
10223
  },
10053
10224
  contextWindow: 262144,
10054
10225
  maxTokens: 32768,
@@ -10302,7 +10473,7 @@ export const MODELS = {
10302
10473
  cost: {
10303
10474
  input: 0.39,
10304
10475
  output: 2.34,
10305
- cacheRead: 0,
10476
+ cacheRead: 0.195,
10306
10477
  cacheWrite: 0,
10307
10478
  },
10308
10479
  contextWindow: 262144,
@@ -10317,13 +10488,13 @@ export const MODELS = {
10317
10488
  reasoning: true,
10318
10489
  input: ["text", "image"],
10319
10490
  cost: {
10320
- input: 0.049999999999999996,
10491
+ input: 0.09999999999999999,
10321
10492
  output: 0.15,
10322
10493
  cacheRead: 0,
10323
10494
  cacheWrite: 0,
10324
10495
  },
10325
- contextWindow: 256000,
10326
- maxTokens: 32768,
10496
+ contextWindow: 262144,
10497
+ maxTokens: 4096,
10327
10498
  },
10328
10499
  "qwen/qwen3.5-flash-02-23": {
10329
10500
  id: "qwen/qwen3.5-flash-02-23",
@@ -10337,7 +10508,7 @@ export const MODELS = {
10337
10508
  input: 0.065,
10338
10509
  output: 0.26,
10339
10510
  cacheRead: 0,
10340
- cacheWrite: 0,
10511
+ cacheWrite: 0.08125,
10341
10512
  },
10342
10513
  contextWindow: 1000000,
10343
10514
  maxTokens: 65536,
@@ -10354,7 +10525,7 @@ export const MODELS = {
10354
10525
  input: 0.26,
10355
10526
  output: 1.56,
10356
10527
  cacheRead: 0,
10357
- cacheWrite: 0,
10528
+ cacheWrite: 0.325,
10358
10529
  },
10359
10530
  contextWindow: 1000000,
10360
10531
  maxTokens: 65536,
@@ -10371,7 +10542,7 @@ export const MODELS = {
10371
10542
  input: 0.325,
10372
10543
  output: 1.95,
10373
10544
  cacheRead: 0,
10374
- cacheWrite: 0,
10545
+ cacheWrite: 0.40625,
10375
10546
  },
10376
10547
  contextWindow: 1000000,
10377
10548
  maxTokens: 65536,
@@ -11381,6 +11552,23 @@ export const MODELS = {
11381
11552
  contextWindow: 1000000,
11382
11553
  maxTokens: 128000,
11383
11554
  },
11555
+ "anthropic/claude-opus-4.7": {
11556
+ id: "anthropic/claude-opus-4.7",
11557
+ name: "Claude Opus 4.7",
11558
+ api: "anthropic-messages",
11559
+ provider: "vercel-ai-gateway",
11560
+ baseUrl: "https://ai-gateway.vercel.sh",
11561
+ reasoning: true,
11562
+ input: ["text", "image"],
11563
+ cost: {
11564
+ input: 5,
11565
+ output: 25,
11566
+ cacheRead: 0.5,
11567
+ cacheWrite: 6.25,
11568
+ },
11569
+ contextWindow: 1000000,
11570
+ maxTokens: 128000,
11571
+ },
11384
11572
  "anthropic/claude-sonnet-4": {
11385
11573
  id: "anthropic/claude-sonnet-4",
11386
11574
  name: "Claude Sonnet 4",
@@ -12862,7 +13050,7 @@ export const MODELS = {
12862
13050
  },
12863
13051
  "openai/gpt-oss-20b": {
12864
13052
  id: "openai/gpt-oss-20b",
12865
- name: "gpt-oss-20b",
13053
+ name: "GPT OSS 120B",
12866
13054
  api: "anthropic-messages",
12867
13055
  provider: "vercel-ai-gateway",
12868
13056
  baseUrl: "https://ai-gateway.vercel.sh",
@@ -12879,7 +13067,7 @@ export const MODELS = {
12879
13067
  },
12880
13068
  "openai/gpt-oss-safeguard-20b": {
12881
13069
  id: "openai/gpt-oss-safeguard-20b",
12882
- name: "gpt-oss-safeguard-20b",
13070
+ name: "GPT OSS Safeguard 20B",
12883
13071
  api: "anthropic-messages",
12884
13072
  provider: "vercel-ai-gateway",
12885
13073
  baseUrl: "https://ai-gateway.vercel.sh",
@@ -13139,7 +13327,7 @@ export const MODELS = {
13139
13327
  provider: "vercel-ai-gateway",
13140
13328
  baseUrl: "https://ai-gateway.vercel.sh",
13141
13329
  reasoning: false,
13142
- input: ["text"],
13330
+ input: ["text", "image"],
13143
13331
  cost: {
13144
13332
  input: 0.19999999999999998,
13145
13333
  output: 0.5,
@@ -13156,7 +13344,7 @@ export const MODELS = {
13156
13344
  provider: "vercel-ai-gateway",
13157
13345
  baseUrl: "https://ai-gateway.vercel.sh",
13158
13346
  reasoning: true,
13159
- input: ["text"],
13347
+ input: ["text", "image"],
13160
13348
  cost: {
13161
13349
  input: 0.19999999999999998,
13162
13350
  output: 0.5,
@@ -13173,7 +13361,7 @@ export const MODELS = {
13173
13361
  provider: "vercel-ai-gateway",
13174
13362
  baseUrl: "https://ai-gateway.vercel.sh",
13175
13363
  reasoning: false,
13176
- input: ["text"],
13364
+ input: ["text", "image"],
13177
13365
  cost: {
13178
13366
  input: 0.19999999999999998,
13179
13367
  output: 0.5,
@@ -13190,7 +13378,7 @@ export const MODELS = {
13190
13378
  provider: "vercel-ai-gateway",
13191
13379
  baseUrl: "https://ai-gateway.vercel.sh",
13192
13380
  reasoning: true,
13193
- input: ["text"],
13381
+ input: ["text", "image"],
13194
13382
  cost: {
13195
13383
  input: 0.19999999999999998,
13196
13384
  output: 0.5,
@@ -13207,7 +13395,7 @@ export const MODELS = {
13207
13395
  provider: "vercel-ai-gateway",
13208
13396
  baseUrl: "https://ai-gateway.vercel.sh",
13209
13397
  reasoning: true,
13210
- input: ["text"],
13398
+ input: ["text", "image"],
13211
13399
  cost: {
13212
13400
  input: 2,
13213
13401
  output: 6,
@@ -13224,7 +13412,7 @@ export const MODELS = {
13224
13412
  provider: "vercel-ai-gateway",
13225
13413
  baseUrl: "https://ai-gateway.vercel.sh",
13226
13414
  reasoning: true,
13227
- input: ["text"],
13415
+ input: ["text", "image"],
13228
13416
  cost: {
13229
13417
  input: 2,
13230
13418
  output: 6,
@@ -13547,15 +13735,15 @@ export const MODELS = {
13547
13735
  provider: "vercel-ai-gateway",
13548
13736
  baseUrl: "https://ai-gateway.vercel.sh",
13549
13737
  reasoning: true,
13550
- input: ["text"],
13738
+ input: ["text", "image"],
13551
13739
  cost: {
13552
13740
  input: 1.4,
13553
13741
  output: 4.4,
13554
13742
  cacheRead: 0.26,
13555
13743
  cacheWrite: 0,
13556
13744
  },
13557
- contextWindow: 202800,
13558
- maxTokens: 64000,
13745
+ contextWindow: 202752,
13746
+ maxTokens: 202752,
13559
13747
  },
13560
13748
  "zai/glm-5v-turbo": {
13561
13749
  id: "zai/glm-5v-turbo",