@hyperspaceng/neural-ai 0.67.7 → 0.68.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +3 -1
  2. package/dist/models.generated.d.ts +186 -81
  3. package/dist/models.generated.d.ts.map +1 -1
  4. package/dist/models.generated.js +222 -119
  5. package/dist/models.generated.js.map +1 -1
  6. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  7. package/dist/providers/amazon-bedrock.js +28 -38
  8. package/dist/providers/amazon-bedrock.js.map +1 -1
  9. package/dist/providers/anthropic.d.ts.map +1 -1
  10. package/dist/providers/anthropic.js +1 -2
  11. package/dist/providers/anthropic.js.map +1 -1
  12. package/dist/providers/google-shared.d.ts.map +1 -1
  13. package/dist/providers/google-shared.js +30 -4
  14. package/dist/providers/google-shared.js.map +1 -1
  15. package/dist/providers/mistral.d.ts +3 -0
  16. package/dist/providers/mistral.d.ts.map +1 -1
  17. package/dist/providers/mistral.js +31 -4
  18. package/dist/providers/mistral.js.map +1 -1
  19. package/dist/providers/openai-completions.d.ts +5 -1
  20. package/dist/providers/openai-completions.d.ts.map +1 -1
  21. package/dist/providers/openai-completions.js +153 -61
  22. package/dist/providers/openai-completions.js.map +1 -1
  23. package/dist/providers/openai-responses-shared.d.ts.map +1 -1
  24. package/dist/providers/openai-responses-shared.js +2 -5
  25. package/dist/providers/openai-responses-shared.js.map +1 -1
  26. package/dist/providers/simple-options.d.ts.map +1 -1
  27. package/dist/providers/simple-options.js +1 -1
  28. package/dist/providers/simple-options.js.map +1 -1
  29. package/dist/providers/transform-messages.d.ts.map +1 -1
  30. package/dist/providers/transform-messages.js +41 -2
  31. package/dist/providers/transform-messages.js.map +1 -1
  32. package/dist/types.d.ts +4 -0
  33. package/dist/types.d.ts.map +1 -1
  34. package/dist/types.js.map +1 -1
  35. package/dist/utils/oauth/anthropic.d.ts.map +1 -1
  36. package/dist/utils/oauth/anthropic.js +1 -1
  37. package/dist/utils/oauth/anthropic.js.map +1 -1
  38. package/dist/utils/oauth/google-antigravity.d.ts.map +1 -1
  39. package/dist/utils/oauth/google-antigravity.js +2 -1
  40. package/dist/utils/oauth/google-antigravity.js.map +1 -1
  41. package/dist/utils/oauth/google-gemini-cli.d.ts.map +1 -1
  42. package/dist/utils/oauth/google-gemini-cli.js +2 -1
  43. package/dist/utils/oauth/google-gemini-cli.js.map +1 -1
  44. package/dist/utils/oauth/openai-codex.d.ts.map +1 -1
  45. package/dist/utils/oauth/openai-codex.js +3 -2
  46. package/dist/utils/oauth/openai-codex.js.map +1 -1
  47. package/package.json +2 -2
@@ -3251,7 +3251,7 @@ export const MODELS = {
3251
3251
  cost: {
3252
3252
  input: 0.3,
3253
3253
  output: 2.5,
3254
- cacheRead: 0.075,
3254
+ cacheRead: 0.03,
3255
3255
  cacheWrite: 0,
3256
3256
  },
3257
3257
  contextWindow: 1048576,
@@ -3370,7 +3370,7 @@ export const MODELS = {
3370
3370
  cost: {
3371
3371
  input: 1.25,
3372
3372
  output: 10,
3373
- cacheRead: 0.31,
3373
+ cacheRead: 0.125,
3374
3374
  cacheWrite: 0,
3375
3375
  },
3376
3376
  contextWindow: 1048576,
@@ -4694,6 +4694,24 @@ export const MODELS = {
4694
4694
  contextWindow: 262144,
4695
4695
  maxTokens: 262144,
4696
4696
  },
4697
+ "moonshotai/Kimi-K2.6": {
4698
+ id: "moonshotai/Kimi-K2.6",
4699
+ name: "Kimi-K2.6",
4700
+ api: "openai-completions",
4701
+ provider: "huggingface",
4702
+ baseUrl: "https://router.huggingface.co/v1",
4703
+ compat: { "supportsDeveloperRole": false },
4704
+ reasoning: true,
4705
+ input: ["text", "image"],
4706
+ cost: {
4707
+ input: 0.95,
4708
+ output: 4,
4709
+ cacheRead: 0.16,
4710
+ cacheWrite: 0,
4711
+ },
4712
+ contextWindow: 262144,
4713
+ maxTokens: 262144,
4714
+ },
4697
4715
  "zai-org/GLM-4.7": {
4698
4716
  id: "zai-org/GLM-4.7",
4699
4717
  name: "GLM-4.7",
@@ -4768,6 +4786,23 @@ export const MODELS = {
4768
4786
  },
4769
4787
  },
4770
4788
  "kimi-coding": {
4789
+ "k2p6": {
4790
+ id: "k2p6",
4791
+ name: "Kimi K2.6",
4792
+ api: "anthropic-messages",
4793
+ provider: "kimi-coding",
4794
+ baseUrl: "https://api.kimi.com/coding",
4795
+ reasoning: true,
4796
+ input: ["text", "image"],
4797
+ cost: {
4798
+ input: 0,
4799
+ output: 0,
4800
+ cacheRead: 0,
4801
+ cacheWrite: 0,
4802
+ },
4803
+ contextWindow: 262144,
4804
+ maxTokens: 32768,
4805
+ },
4771
4806
  "kimi-for-coding": {
4772
4807
  id: "kimi-for-coding",
4773
4808
  name: "Kimi For Coding",
@@ -6650,6 +6685,40 @@ export const MODELS = {
6650
6685
  contextWindow: 262144,
6651
6686
  maxTokens: 65536,
6652
6687
  },
6688
+ "kimi-k2.6": {
6689
+ id: "kimi-k2.6",
6690
+ name: "Kimi K2.6",
6691
+ api: "openai-completions",
6692
+ provider: "opencode",
6693
+ baseUrl: "https://opencode.ai/zen/v1",
6694
+ reasoning: true,
6695
+ input: ["text", "image"],
6696
+ cost: {
6697
+ input: 0.95,
6698
+ output: 4,
6699
+ cacheRead: 0.16,
6700
+ cacheWrite: 0,
6701
+ },
6702
+ contextWindow: 262144,
6703
+ maxTokens: 65536,
6704
+ },
6705
+ "ling-2.6-flash-free": {
6706
+ id: "ling-2.6-flash-free",
6707
+ name: "Ling 2.6 Flash Free",
6708
+ api: "openai-completions",
6709
+ provider: "opencode",
6710
+ baseUrl: "https://opencode.ai/zen/v1",
6711
+ reasoning: false,
6712
+ input: ["text"],
6713
+ cost: {
6714
+ input: 0,
6715
+ output: 0,
6716
+ cacheRead: 0,
6717
+ cacheWrite: 0,
6718
+ },
6719
+ contextWindow: 262100,
6720
+ maxTokens: 32800,
6721
+ },
6653
6722
  "minimax-m2.5": {
6654
6723
  id: "minimax-m2.5",
6655
6724
  name: "MiniMax M2.5",
@@ -6684,6 +6753,23 @@ export const MODELS = {
6684
6753
  contextWindow: 204800,
6685
6754
  maxTokens: 131072,
6686
6755
  },
6756
+ "minimax-m2.7": {
6757
+ id: "minimax-m2.7",
6758
+ name: "MiniMax M2.7",
6759
+ api: "openai-completions",
6760
+ provider: "opencode",
6761
+ baseUrl: "https://opencode.ai/zen/v1",
6762
+ reasoning: true,
6763
+ input: ["text"],
6764
+ cost: {
6765
+ input: 0.3,
6766
+ output: 1.2,
6767
+ cacheRead: 0.06,
6768
+ cacheWrite: 0,
6769
+ },
6770
+ contextWindow: 204800,
6771
+ maxTokens: 131072,
6772
+ },
6687
6773
  "nemotron-3-super-free": {
6688
6774
  id: "nemotron-3-super-free",
6689
6775
  name: "Nemotron 3 Super Free",
@@ -6704,9 +6790,9 @@ export const MODELS = {
6704
6790
  "qwen3.5-plus": {
6705
6791
  id: "qwen3.5-plus",
6706
6792
  name: "Qwen3.5 Plus",
6707
- api: "openai-completions",
6793
+ api: "anthropic-messages",
6708
6794
  provider: "opencode",
6709
- baseUrl: "https://opencode.ai/zen/v1",
6795
+ baseUrl: "https://opencode.ai/zen",
6710
6796
  reasoning: true,
6711
6797
  input: ["text", "image"],
6712
6798
  cost: {
@@ -6721,9 +6807,9 @@ export const MODELS = {
6721
6807
  "qwen3.6-plus": {
6722
6808
  id: "qwen3.6-plus",
6723
6809
  name: "Qwen3.6 Plus",
6724
- api: "openai-completions",
6810
+ api: "anthropic-messages",
6725
6811
  provider: "opencode",
6726
- baseUrl: "https://opencode.ai/zen/v1",
6812
+ baseUrl: "https://opencode.ai/zen",
6727
6813
  reasoning: true,
6728
6814
  input: ["text", "image"],
6729
6815
  cost: {
@@ -6788,6 +6874,23 @@ export const MODELS = {
6788
6874
  contextWindow: 262144,
6789
6875
  maxTokens: 65536,
6790
6876
  },
6877
+ "kimi-k2.6": {
6878
+ id: "kimi-k2.6",
6879
+ name: "Kimi K2.6 (3x limits)",
6880
+ api: "openai-completions",
6881
+ provider: "opencode-go",
6882
+ baseUrl: "https://opencode.ai/zen/go/v1",
6883
+ reasoning: true,
6884
+ input: ["text", "image"],
6885
+ cost: {
6886
+ input: 0.32,
6887
+ output: 1.34,
6888
+ cacheRead: 0.054,
6889
+ cacheWrite: 0,
6890
+ },
6891
+ contextWindow: 262144,
6892
+ maxTokens: 65536,
6893
+ },
6791
6894
  "mimo-v2-omni": {
6792
6895
  id: "mimo-v2-omni",
6793
6896
  name: "MiMo V2 Omni",
@@ -6859,9 +6962,9 @@ export const MODELS = {
6859
6962
  "qwen3.5-plus": {
6860
6963
  id: "qwen3.5-plus",
6861
6964
  name: "Qwen3.5 Plus",
6862
- api: "openai-completions",
6965
+ api: "anthropic-messages",
6863
6966
  provider: "opencode-go",
6864
- baseUrl: "https://opencode.ai/zen/go/v1",
6967
+ baseUrl: "https://opencode.ai/zen/go",
6865
6968
  reasoning: true,
6866
6969
  input: ["text", "image"],
6867
6970
  cost: {
@@ -6876,9 +6979,9 @@ export const MODELS = {
6876
6979
  "qwen3.6-plus": {
6877
6980
  id: "qwen3.6-plus",
6878
6981
  name: "Qwen3.6 Plus",
6879
- api: "openai-completions",
6982
+ api: "anthropic-messages",
6880
6983
  provider: "opencode-go",
6881
- baseUrl: "https://opencode.ai/zen/go/v1",
6984
+ baseUrl: "https://opencode.ai/zen/go",
6882
6985
  reasoning: true,
6883
6986
  input: ["text", "image"],
6884
6987
  cost: {
@@ -7598,13 +7701,13 @@ export const MODELS = {
7598
7701
  reasoning: true,
7599
7702
  input: ["text"],
7600
7703
  cost: {
7601
- input: 0.25899999999999995,
7602
- output: 0.42,
7603
- cacheRead: 0.135,
7704
+ input: 0.252,
7705
+ output: 0.378,
7706
+ cacheRead: 0.0252,
7604
7707
  cacheWrite: 0,
7605
7708
  },
7606
- contextWindow: 163840,
7607
- maxTokens: 163840,
7709
+ contextWindow: 131072,
7710
+ maxTokens: 32768,
7608
7711
  },
7609
7712
  "deepseek/deepseek-v3.2-exp": {
7610
7713
  id: "deepseek/deepseek-v3.2-exp",
@@ -7654,7 +7757,7 @@ export const MODELS = {
7654
7757
  cacheRead: 0.024999999999999998,
7655
7758
  cacheWrite: 0.08333333333333334,
7656
7759
  },
7657
- contextWindow: 1000000,
7760
+ contextWindow: 1048576,
7658
7761
  maxTokens: 8192,
7659
7762
  },
7660
7763
  "google/gemini-2.0-flash-lite-001": {
@@ -7853,13 +7956,13 @@ export const MODELS = {
7853
7956
  reasoning: true,
7854
7957
  input: ["text", "image"],
7855
7958
  cost: {
7856
- input: 0.08,
7959
+ input: 0.07,
7857
7960
  output: 0.35,
7858
- cacheRead: 0.01,
7961
+ cacheRead: 0.04,
7859
7962
  cacheWrite: 0,
7860
7963
  },
7861
7964
  contextWindow: 262144,
7862
- maxTokens: 4096,
7965
+ maxTokens: 262144,
7863
7966
  },
7864
7967
  "google/gemma-4-26b-a4b-it:free": {
7865
7968
  id: "google/gemma-4-26b-a4b-it:free",
@@ -7929,6 +8032,23 @@ export const MODELS = {
7929
8032
  contextWindow: 128000,
7930
8033
  maxTokens: 50000,
7931
8034
  },
8035
+ "inclusionai/ling-2.6-flash:free": {
8036
+ id: "inclusionai/ling-2.6-flash:free",
8037
+ name: "inclusionAI: Ling-2.6-flash (free)",
8038
+ api: "openai-completions",
8039
+ provider: "openrouter",
8040
+ baseUrl: "https://openrouter.ai/api/v1",
8041
+ reasoning: false,
8042
+ input: ["text"],
8043
+ cost: {
8044
+ input: 0,
8045
+ output: 0,
8046
+ cacheRead: 0,
8047
+ cacheWrite: 0,
8048
+ },
8049
+ contextWindow: 262144,
8050
+ maxTokens: 32768,
8051
+ },
7932
8052
  "kwaipilot/kat-coder-pro-v2": {
7933
8053
  id: "kwaipilot/kat-coder-pro-v2",
7934
8054
  name: "Kwaipilot: KAT-Coder-Pro V2",
@@ -8031,23 +8151,6 @@ export const MODELS = {
8031
8151
  contextWindow: 65536,
8032
8152
  maxTokens: 4096,
8033
8153
  },
8034
- "meta-llama/llama-4-maverick": {
8035
- id: "meta-llama/llama-4-maverick",
8036
- name: "Meta: Llama 4 Maverick",
8037
- api: "openai-completions",
8038
- provider: "openrouter",
8039
- baseUrl: "https://openrouter.ai/api/v1",
8040
- reasoning: false,
8041
- input: ["text", "image"],
8042
- cost: {
8043
- input: 0.15,
8044
- output: 0.6,
8045
- cacheRead: 0,
8046
- cacheWrite: 0,
8047
- },
8048
- contextWindow: 1048576,
8049
- maxTokens: 16384,
8050
- },
8051
8154
  "meta-llama/llama-4-scout": {
8052
8155
  id: "meta-llama/llama-4-scout",
8053
8156
  name: "Meta: Llama 4 Scout",
@@ -8125,9 +8228,9 @@ export const MODELS = {
8125
8228
  reasoning: true,
8126
8229
  input: ["text"],
8127
8230
  cost: {
8128
- input: 0.118,
8129
- output: 0.9900000000000001,
8130
- cacheRead: 0.059,
8231
+ input: 0.15,
8232
+ output: 1.2,
8233
+ cacheRead: 0.075,
8131
8234
  cacheWrite: 0,
8132
8235
  },
8133
8236
  contextWindow: 196608,
@@ -8606,9 +8709,26 @@ export const MODELS = {
8606
8709
  cacheRead: 0.07,
8607
8710
  cacheWrite: 0,
8608
8711
  },
8609
- contextWindow: 256000,
8712
+ contextWindow: 262144,
8610
8713
  maxTokens: 4096,
8611
8714
  },
8715
+ "moonshotai/kimi-k2.6": {
8716
+ id: "moonshotai/kimi-k2.6",
8717
+ name: "MoonshotAI: Kimi K2.6",
8718
+ api: "openai-completions",
8719
+ provider: "openrouter",
8720
+ baseUrl: "https://openrouter.ai/api/v1",
8721
+ reasoning: true,
8722
+ input: ["text", "image"],
8723
+ cost: {
8724
+ input: 0.7999999999999999,
8725
+ output: 3.5,
8726
+ cacheRead: 0.19999999999999998,
8727
+ cacheWrite: 0,
8728
+ },
8729
+ contextWindow: 262144,
8730
+ maxTokens: 262144,
8731
+ },
8612
8732
  "nex-agi/deepseek-v3.1-nex-n1": {
8613
8733
  id: "nex-agi/deepseek-v3.1-nex-n1",
8614
8734
  name: "Nex AGI: DeepSeek V3.1 Nex N1",
@@ -9085,23 +9205,6 @@ export const MODELS = {
9085
9205
  contextWindow: 128000,
9086
9206
  maxTokens: 16384,
9087
9207
  },
9088
- "openai/gpt-4o:extended": {
9089
- id: "openai/gpt-4o:extended",
9090
- name: "OpenAI: GPT-4o (extended)",
9091
- api: "openai-completions",
9092
- provider: "openrouter",
9093
- baseUrl: "https://openrouter.ai/api/v1",
9094
- reasoning: false,
9095
- input: ["text", "image"],
9096
- cost: {
9097
- input: 6,
9098
- output: 18,
9099
- cacheRead: 0,
9100
- cacheWrite: 0,
9101
- },
9102
- contextWindow: 128000,
9103
- maxTokens: 64000,
9104
- },
9105
9208
  "openai/gpt-5": {
9106
9209
  id: "openai/gpt-5",
9107
9210
  name: "OpenAI: GPT-5",
@@ -9136,40 +9239,6 @@ export const MODELS = {
9136
9239
  contextWindow: 400000,
9137
9240
  maxTokens: 128000,
9138
9241
  },
9139
- "openai/gpt-5-image": {
9140
- id: "openai/gpt-5-image",
9141
- name: "OpenAI: GPT-5 Image",
9142
- api: "openai-completions",
9143
- provider: "openrouter",
9144
- baseUrl: "https://openrouter.ai/api/v1",
9145
- reasoning: true,
9146
- input: ["text", "image"],
9147
- cost: {
9148
- input: 10,
9149
- output: 10,
9150
- cacheRead: 1.25,
9151
- cacheWrite: 0,
9152
- },
9153
- contextWindow: 400000,
9154
- maxTokens: 128000,
9155
- },
9156
- "openai/gpt-5-image-mini": {
9157
- id: "openai/gpt-5-image-mini",
9158
- name: "OpenAI: GPT-5 Image Mini",
9159
- api: "openai-completions",
9160
- provider: "openrouter",
9161
- baseUrl: "https://openrouter.ai/api/v1",
9162
- reasoning: true,
9163
- input: ["text", "image"],
9164
- cost: {
9165
- input: 2.5,
9166
- output: 2,
9167
- cacheRead: 0.25,
9168
- cacheWrite: 0,
9169
- },
9170
- contextWindow: 400000,
9171
- maxTokens: 128000,
9172
- },
9173
9242
  "openai/gpt-5-mini": {
9174
9243
  id: "openai/gpt-5-mini",
9175
9244
  name: "OpenAI: GPT-5 Mini",
@@ -9765,23 +9834,6 @@ export const MODELS = {
9765
9834
  contextWindow: 2000000,
9766
9835
  maxTokens: 4096,
9767
9836
  },
9768
- "openrouter/elephant-alpha": {
9769
- id: "openrouter/elephant-alpha",
9770
- name: "Elephant",
9771
- api: "openai-completions",
9772
- provider: "openrouter",
9773
- baseUrl: "https://openrouter.ai/api/v1",
9774
- reasoning: false,
9775
- input: ["text"],
9776
- cost: {
9777
- input: 0,
9778
- output: 0,
9779
- cacheRead: 0,
9780
- cacheWrite: 0,
9781
- },
9782
- contextWindow: 262144,
9783
- maxTokens: 32768,
9784
- },
9785
9837
  "openrouter/free": {
9786
9838
  id: "openrouter/free",
9787
9839
  name: "Free Models Router",
@@ -9992,7 +10044,7 @@ export const MODELS = {
9992
10044
  api: "openai-completions",
9993
10045
  provider: "openrouter",
9994
10046
  baseUrl: "https://openrouter.ai/api/v1",
9995
- reasoning: true,
10047
+ reasoning: false,
9996
10048
  input: ["text"],
9997
10049
  cost: {
9998
10050
  input: 0.071,
@@ -10167,7 +10219,7 @@ export const MODELS = {
10167
10219
  cost: {
10168
10220
  input: 0.15,
10169
10221
  output: 0.7999999999999999,
10170
- cacheRead: 0.12,
10222
+ cacheRead: 0.11,
10171
10223
  cacheWrite: 0,
10172
10224
  },
10173
10225
  contextWindow: 262144,
@@ -11049,13 +11101,13 @@ export const MODELS = {
11049
11101
  reasoning: true,
11050
11102
  input: ["text"],
11051
11103
  cost: {
11052
- input: 0.39,
11053
- output: 1.75,
11054
- cacheRead: 0.195,
11104
+ input: 0.38,
11105
+ output: 1.74,
11106
+ cacheRead: 0,
11055
11107
  cacheWrite: 0,
11056
11108
  },
11057
11109
  contextWindow: 202752,
11058
- maxTokens: 65535,
11110
+ maxTokens: 4096,
11059
11111
  },
11060
11112
  "z-ai/glm-4.7-flash": {
11061
11113
  id: "z-ai/glm-4.7-flash",
@@ -11088,8 +11140,8 @@ export const MODELS = {
11088
11140
  cacheRead: 0.119,
11089
11141
  cacheWrite: 0,
11090
11142
  },
11091
- contextWindow: 80000,
11092
- maxTokens: 131072,
11143
+ contextWindow: 202752,
11144
+ maxTokens: 4096,
11093
11145
  },
11094
11146
  "z-ai/glm-5-turbo": {
11095
11147
  id: "z-ai/glm-5-turbo",
@@ -11117,9 +11169,9 @@ export const MODELS = {
11117
11169
  reasoning: true,
11118
11170
  input: ["text"],
11119
11171
  cost: {
11120
- input: 0.95,
11121
- output: 3.15,
11122
- cacheRead: 0.475,
11172
+ input: 1.0499999999999998,
11173
+ output: 3.5,
11174
+ cacheRead: 0.5249999999999999,
11123
11175
  cacheWrite: 0,
11124
11176
  },
11125
11177
  contextWindow: 202752,
@@ -11142,6 +11194,23 @@ export const MODELS = {
11142
11194
  contextWindow: 202752,
11143
11195
  maxTokens: 131072,
11144
11196
  },
11197
+ "~anthropic/claude-opus-latest": {
11198
+ id: "~anthropic/claude-opus-latest",
11199
+ name: "Anthropic: Claude Opus Latest",
11200
+ api: "openai-completions",
11201
+ provider: "openrouter",
11202
+ baseUrl: "https://openrouter.ai/api/v1",
11203
+ reasoning: true,
11204
+ input: ["text", "image"],
11205
+ cost: {
11206
+ input: 5,
11207
+ output: 25,
11208
+ cacheRead: 0.5,
11209
+ cacheWrite: 6.25,
11210
+ },
11211
+ contextWindow: 1000000,
11212
+ maxTokens: 128000,
11213
+ },
11145
11214
  },
11146
11215
  "vercel-ai-gateway": {
11147
11216
  "alibaba/qwen-3-14b": {
@@ -11212,6 +11281,23 @@ export const MODELS = {
11212
11281
  contextWindow: 128000,
11213
11282
  maxTokens: 8192,
11214
11283
  },
11284
+ "alibaba/qwen-3.6-max-preview": {
11285
+ id: "alibaba/qwen-3.6-max-preview",
11286
+ name: "Qwen 3.6 Max Preview",
11287
+ api: "anthropic-messages",
11288
+ provider: "vercel-ai-gateway",
11289
+ baseUrl: "https://ai-gateway.vercel.sh",
11290
+ reasoning: true,
11291
+ input: ["text", "image"],
11292
+ cost: {
11293
+ input: 1.3,
11294
+ output: 7.8,
11295
+ cacheRead: 0.26,
11296
+ cacheWrite: 1.625,
11297
+ },
11298
+ contextWindow: 240000,
11299
+ maxTokens: 64000,
11300
+ },
11215
11301
  "alibaba/qwen3-235b-a22b-thinking": {
11216
11302
  id: "alibaba/qwen3-235b-a22b-thinking",
11217
11303
  name: "Qwen3 235B A22B Thinking 2507",
@@ -12555,6 +12641,23 @@ export const MODELS = {
12555
12641
  contextWindow: 262114,
12556
12642
  maxTokens: 262114,
12557
12643
  },
12644
+ "moonshotai/kimi-k2.6": {
12645
+ id: "moonshotai/kimi-k2.6",
12646
+ name: "Kimi K2.6",
12647
+ api: "anthropic-messages",
12648
+ provider: "vercel-ai-gateway",
12649
+ baseUrl: "https://ai-gateway.vercel.sh",
12650
+ reasoning: true,
12651
+ input: ["text", "image"],
12652
+ cost: {
12653
+ input: 0.95,
12654
+ output: 4,
12655
+ cacheRead: 0.16,
12656
+ cacheWrite: 0,
12657
+ },
12658
+ contextWindow: 262000,
12659
+ maxTokens: 262000,
12660
+ },
12558
12661
  "nvidia/nemotron-nano-12b-v2-vl": {
12559
12662
  id: "nvidia/nemotron-nano-12b-v2-vl",
12560
12663
  name: "Nvidia Nemotron Nano 12B V2 VL",