@mariozechner/pi-ai 0.5.47 → 0.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/README.md +47 -11
  2. package/dist/agent/agent-loop.d.ts.map +1 -1
  3. package/dist/agent/agent-loop.js +3 -1
  4. package/dist/agent/agent-loop.js.map +1 -1
  5. package/dist/agent/index.d.ts.map +1 -1
  6. package/dist/agent/tools/calculate.d.ts +6 -3
  7. package/dist/agent/tools/calculate.d.ts.map +1 -1
  8. package/dist/agent/tools/calculate.js +1 -1
  9. package/dist/agent/tools/calculate.js.map +1 -1
  10. package/dist/agent/tools/get-current-time.d.ts.map +1 -1
  11. package/dist/agent/tools/get-current-time.js +8 -6
  12. package/dist/agent/tools/get-current-time.js.map +1 -1
  13. package/dist/agent/tools/index.d.ts.map +1 -1
  14. package/dist/agent/types.d.ts +2 -2
  15. package/dist/agent/types.d.ts.map +1 -1
  16. package/dist/agent/types.js.map +1 -1
  17. package/dist/index.d.ts.map +1 -1
  18. package/dist/models.d.ts.map +1 -1
  19. package/dist/models.generated.d.ts +1628 -302
  20. package/dist/models.generated.d.ts.map +1 -1
  21. package/dist/models.generated.js +1509 -183
  22. package/dist/models.generated.js.map +1 -1
  23. package/dist/models.js.map +1 -1
  24. package/dist/providers/anthropic.d.ts.map +1 -1
  25. package/dist/providers/anthropic.js +56 -9
  26. package/dist/providers/anthropic.js.map +1 -1
  27. package/dist/providers/google.d.ts.map +1 -1
  28. package/dist/providers/google.js +32 -12
  29. package/dist/providers/google.js.map +1 -1
  30. package/dist/providers/openai-completions.d.ts.map +1 -1
  31. package/dist/providers/openai-completions.js +33 -1
  32. package/dist/providers/openai-completions.js.map +1 -1
  33. package/dist/providers/openai-responses.d.ts.map +1 -1
  34. package/dist/providers/openai-responses.js +32 -1
  35. package/dist/providers/openai-responses.js.map +1 -1
  36. package/dist/providers/transorm-messages.d.ts.map +1 -1
  37. package/dist/providers/transorm-messages.js.map +1 -1
  38. package/dist/stream.d.ts.map +1 -1
  39. package/dist/stream.js +2 -2
  40. package/dist/stream.js.map +1 -1
  41. package/dist/types.d.ts +1 -1
  42. package/dist/types.d.ts.map +1 -1
  43. package/dist/types.js.map +1 -1
  44. package/dist/utils/event-stream.d.ts.map +1 -1
  45. package/dist/utils/event-stream.js +7 -3
  46. package/dist/utils/event-stream.js.map +1 -1
  47. package/dist/utils/json-parse.d.ts.map +1 -1
  48. package/dist/utils/json-parse.js.map +1 -1
  49. package/dist/utils/sanitize-unicode.d.ts.map +1 -1
  50. package/dist/utils/sanitize-unicode.js.map +1 -1
  51. package/dist/utils/typebox-helpers.d.ts.map +1 -1
  52. package/dist/utils/typebox-helpers.js.map +1 -1
  53. package/dist/utils/validation.d.ts.map +1 -1
  54. package/dist/utils/validation.js.map +1 -1
  55. package/package.json +5 -4
@@ -1043,6 +1043,23 @@ export const MODELS = {
1043
1043
  contextWindow: 400000,
1044
1044
  maxTokens: 128000,
1045
1045
  },
1046
+ "gpt-5-pro": {
1047
+ id: "gpt-5-pro",
1048
+ name: "GPT-5 Pro",
1049
+ api: "openai-responses",
1050
+ provider: "openai",
1051
+ baseUrl: "https://api.openai.com/v1",
1052
+ reasoning: true,
1053
+ input: ["text", "image"],
1054
+ cost: {
1055
+ input: 15,
1056
+ output: 120,
1057
+ cacheRead: 0,
1058
+ cacheWrite: 0,
1059
+ },
1060
+ contextWindow: 400000,
1061
+ maxTokens: 272000,
1062
+ },
1046
1063
  "gpt-5-chat-latest": {
1047
1064
  id: "gpt-5-chat-latest",
1048
1065
  name: "GPT-5 Chat Latest",
@@ -1336,6 +1353,23 @@ export const MODELS = {
1336
1353
  contextWindow: 131000,
1337
1354
  maxTokens: 32000,
1338
1355
  },
1356
+ "zai-glm-4.6": {
1357
+ id: "zai-glm-4.6",
1358
+ name: "Z.AI GLM-4.6",
1359
+ api: "openai-completions",
1360
+ provider: "cerebras",
1361
+ baseUrl: "https://api.cerebras.ai/v1",
1362
+ reasoning: false,
1363
+ input: ["text"],
1364
+ cost: {
1365
+ input: 0,
1366
+ output: 0,
1367
+ cacheRead: 0,
1368
+ cacheWrite: 0,
1369
+ },
1370
+ contextWindow: 131072,
1371
+ maxTokens: 40960,
1372
+ },
1339
1373
  "qwen-3-coder-480b": {
1340
1374
  id: "qwen-3-coder-480b",
1341
1375
  name: "Qwen 3 Coder 480B",
@@ -1801,13 +1835,13 @@ export const MODELS = {
1801
1835
  },
1802
1836
  },
1803
1837
  openrouter: {
1804
- "minimax/minimax-m2:free": {
1805
- id: "minimax/minimax-m2:free",
1806
- name: "MiniMax: MiniMax M2 (free)",
1838
+ "kwaipilot/kat-coder-pro:free": {
1839
+ id: "kwaipilot/kat-coder-pro:free",
1840
+ name: "Kwaipilot: KAT-Coder-Pro V1 (free)",
1807
1841
  api: "openai-completions",
1808
1842
  provider: "openrouter",
1809
1843
  baseUrl: "https://openrouter.ai/api/v1",
1810
- reasoning: true,
1844
+ reasoning: false,
1811
1845
  input: ["text"],
1812
1846
  cost: {
1813
1847
  input: 0,
@@ -1815,12 +1849,97 @@ export const MODELS = {
1815
1849
  cacheRead: 0,
1816
1850
  cacheWrite: 0,
1817
1851
  },
1818
- contextWindow: 204800,
1819
- maxTokens: 131072,
1852
+ contextWindow: 256000,
1853
+ maxTokens: 32000,
1854
+ },
1855
+ "openrouter/polaris-alpha": {
1856
+ id: "openrouter/polaris-alpha",
1857
+ name: "Polaris Alpha",
1858
+ api: "openai-completions",
1859
+ provider: "openrouter",
1860
+ baseUrl: "https://openrouter.ai/api/v1",
1861
+ reasoning: false,
1862
+ input: ["text", "image"],
1863
+ cost: {
1864
+ input: 0,
1865
+ output: 0,
1866
+ cacheRead: 0,
1867
+ cacheWrite: 0,
1868
+ },
1869
+ contextWindow: 256000,
1870
+ maxTokens: 128000,
1871
+ },
1872
+ "moonshotai/kimi-k2-thinking": {
1873
+ id: "moonshotai/kimi-k2-thinking",
1874
+ name: "MoonshotAI: Kimi K2 Thinking",
1875
+ api: "openai-completions",
1876
+ provider: "openrouter",
1877
+ baseUrl: "https://openrouter.ai/api/v1",
1878
+ reasoning: true,
1879
+ input: ["text"],
1880
+ cost: {
1881
+ input: 0.55,
1882
+ output: 2.25,
1883
+ cacheRead: 0,
1884
+ cacheWrite: 0,
1885
+ },
1886
+ contextWindow: 262144,
1887
+ maxTokens: 16384,
1888
+ },
1889
+ "amazon/nova-premier-v1": {
1890
+ id: "amazon/nova-premier-v1",
1891
+ name: "Amazon: Nova Premier 1.0",
1892
+ api: "openai-completions",
1893
+ provider: "openrouter",
1894
+ baseUrl: "https://openrouter.ai/api/v1",
1895
+ reasoning: false,
1896
+ input: ["text", "image"],
1897
+ cost: {
1898
+ input: 2.5,
1899
+ output: 12.5,
1900
+ cacheRead: 0.625,
1901
+ cacheWrite: 0,
1902
+ },
1903
+ contextWindow: 1000000,
1904
+ maxTokens: 32000,
1905
+ },
1906
+ "mistralai/voxtral-small-24b-2507": {
1907
+ id: "mistralai/voxtral-small-24b-2507",
1908
+ name: "Mistral: Voxtral Small 24B 2507",
1909
+ api: "openai-completions",
1910
+ provider: "openrouter",
1911
+ baseUrl: "https://openrouter.ai/api/v1",
1912
+ reasoning: false,
1913
+ input: ["text"],
1914
+ cost: {
1915
+ input: 0.09999999999999999,
1916
+ output: 0.3,
1917
+ cacheRead: 0,
1918
+ cacheWrite: 0,
1919
+ },
1920
+ contextWindow: 32000,
1921
+ maxTokens: 4096,
1922
+ },
1923
+ "openai/gpt-oss-safeguard-20b": {
1924
+ id: "openai/gpt-oss-safeguard-20b",
1925
+ name: "OpenAI: gpt-oss-safeguard-20b",
1926
+ api: "openai-completions",
1927
+ provider: "openrouter",
1928
+ baseUrl: "https://openrouter.ai/api/v1",
1929
+ reasoning: true,
1930
+ input: ["text"],
1931
+ cost: {
1932
+ input: 0.075,
1933
+ output: 0.3,
1934
+ cacheRead: 0.037,
1935
+ cacheWrite: 0,
1936
+ },
1937
+ contextWindow: 131072,
1938
+ maxTokens: 65536,
1820
1939
  },
1821
- "openrouter/andromeda-alpha": {
1822
- id: "openrouter/andromeda-alpha",
1823
- name: "Andromeda Alpha",
1940
+ "nvidia/nemotron-nano-12b-v2-vl:free": {
1941
+ id: "nvidia/nemotron-nano-12b-v2-vl:free",
1942
+ name: "NVIDIA: Nemotron Nano 12B 2 VL (free)",
1824
1943
  api: "openai-completions",
1825
1944
  provider: "openrouter",
1826
1945
  baseUrl: "https://openrouter.ai/api/v1",
@@ -1835,6 +1954,23 @@ export const MODELS = {
1835
1954
  contextWindow: 128000,
1836
1955
  maxTokens: 128000,
1837
1956
  },
1957
+ "minimax/minimax-m2": {
1958
+ id: "minimax/minimax-m2",
1959
+ name: "MiniMax: MiniMax M2",
1960
+ api: "openai-completions",
1961
+ provider: "openrouter",
1962
+ baseUrl: "https://openrouter.ai/api/v1",
1963
+ reasoning: true,
1964
+ input: ["text"],
1965
+ cost: {
1966
+ input: 0.255,
1967
+ output: 1.02,
1968
+ cacheRead: 0,
1969
+ cacheWrite: 0,
1970
+ },
1971
+ contextWindow: 204800,
1972
+ maxTokens: 131072,
1973
+ },
1838
1974
  "deepcogito/cogito-v2-preview-llama-405b": {
1839
1975
  id: "deepcogito/cogito-v2-preview-llama-405b",
1840
1976
  name: "Deep Cogito: Cogito V2 Preview Llama 405B",
@@ -1852,6 +1988,40 @@ export const MODELS = {
1852
1988
  contextWindow: 32768,
1853
1989
  maxTokens: 4096,
1854
1990
  },
1991
+ "openai/gpt-5-image-mini": {
1992
+ id: "openai/gpt-5-image-mini",
1993
+ name: "OpenAI: GPT-5 Image Mini",
1994
+ api: "openai-completions",
1995
+ provider: "openrouter",
1996
+ baseUrl: "https://openrouter.ai/api/v1",
1997
+ reasoning: true,
1998
+ input: ["text", "image"],
1999
+ cost: {
2000
+ input: 2.5,
2001
+ output: 2,
2002
+ cacheRead: 0.25,
2003
+ cacheWrite: 0,
2004
+ },
2005
+ contextWindow: 400000,
2006
+ maxTokens: 128000,
2007
+ },
2008
+ "anthropic/claude-haiku-4.5": {
2009
+ id: "anthropic/claude-haiku-4.5",
2010
+ name: "Anthropic: Claude Haiku 4.5",
2011
+ api: "openai-completions",
2012
+ provider: "openrouter",
2013
+ baseUrl: "https://openrouter.ai/api/v1",
2014
+ reasoning: true,
2015
+ input: ["text", "image"],
2016
+ cost: {
2017
+ input: 1,
2018
+ output: 5,
2019
+ cacheRead: 0.09999999999999999,
2020
+ cacheWrite: 1.25,
2021
+ },
2022
+ contextWindow: 200000,
2023
+ maxTokens: 64000,
2024
+ },
1855
2025
  "qwen/qwen3-vl-8b-thinking": {
1856
2026
  id: "qwen/qwen3-vl-8b-thinking",
1857
2027
  name: "Qwen: Qwen3 VL 8B Thinking",
@@ -1886,6 +2056,23 @@ export const MODELS = {
1886
2056
  contextWindow: 131072,
1887
2057
  maxTokens: 32768,
1888
2058
  },
2059
+ "openai/gpt-5-image": {
2060
+ id: "openai/gpt-5-image",
2061
+ name: "OpenAI: GPT-5 Image",
2062
+ api: "openai-completions",
2063
+ provider: "openrouter",
2064
+ baseUrl: "https://openrouter.ai/api/v1",
2065
+ reasoning: true,
2066
+ input: ["text", "image"],
2067
+ cost: {
2068
+ input: 10,
2069
+ output: 10,
2070
+ cacheRead: 1.25,
2071
+ cacheWrite: 0,
2072
+ },
2073
+ contextWindow: 400000,
2074
+ maxTokens: 128000,
2075
+ },
1889
2076
  "inclusionai/ring-1t": {
1890
2077
  id: "inclusionai/ring-1t",
1891
2078
  name: "inclusionAI: Ring 1T",
@@ -1912,14 +2099,48 @@ export const MODELS = {
1912
2099
  reasoning: false,
1913
2100
  input: ["text"],
1914
2101
  cost: {
1915
- input: 0.39999999999999997,
1916
- output: 2,
2102
+ input: 0.5700000000000001,
2103
+ output: 2.2800000000000002,
1917
2104
  cacheRead: 0,
1918
2105
  cacheWrite: 0,
1919
2106
  },
1920
2107
  contextWindow: 131072,
1921
2108
  maxTokens: 131072,
1922
2109
  },
2110
+ "openai/o3-deep-research": {
2111
+ id: "openai/o3-deep-research",
2112
+ name: "OpenAI: o3 Deep Research",
2113
+ api: "openai-completions",
2114
+ provider: "openrouter",
2115
+ baseUrl: "https://openrouter.ai/api/v1",
2116
+ reasoning: true,
2117
+ input: ["text", "image"],
2118
+ cost: {
2119
+ input: 10,
2120
+ output: 40,
2121
+ cacheRead: 2.5,
2122
+ cacheWrite: 0,
2123
+ },
2124
+ contextWindow: 200000,
2125
+ maxTokens: 100000,
2126
+ },
2127
+ "openai/o4-mini-deep-research": {
2128
+ id: "openai/o4-mini-deep-research",
2129
+ name: "OpenAI: o4 Mini Deep Research",
2130
+ api: "openai-completions",
2131
+ provider: "openrouter",
2132
+ baseUrl: "https://openrouter.ai/api/v1",
2133
+ reasoning: true,
2134
+ input: ["text", "image"],
2135
+ cost: {
2136
+ input: 2,
2137
+ output: 8,
2138
+ cacheRead: 0.5,
2139
+ cacheWrite: 0,
2140
+ },
2141
+ contextWindow: 200000,
2142
+ maxTokens: 100000,
2143
+ },
1923
2144
  "nvidia/llama-3.3-nemotron-super-49b-v1.5": {
1924
2145
  id: "nvidia/llama-3.3-nemotron-super-49b-v1.5",
1925
2146
  name: "NVIDIA: Llama 3.3 Nemotron Super 49B V1.5",
@@ -1963,13 +2184,30 @@ export const MODELS = {
1963
2184
  reasoning: false,
1964
2185
  input: ["text", "image"],
1965
2186
  cost: {
1966
- input: 0.19999999999999998,
1967
- output: 0.7,
2187
+ input: 0.15,
2188
+ output: 0.6,
1968
2189
  cacheRead: 0,
1969
2190
  cacheWrite: 0,
1970
2191
  },
1971
- contextWindow: 131072,
1972
- maxTokens: 32768,
2192
+ contextWindow: 262144,
2193
+ maxTokens: 4096,
2194
+ },
2195
+ "openai/gpt-5-pro": {
2196
+ id: "openai/gpt-5-pro",
2197
+ name: "OpenAI: GPT-5 Pro",
2198
+ api: "openai-completions",
2199
+ provider: "openrouter",
2200
+ baseUrl: "https://openrouter.ai/api/v1",
2201
+ reasoning: true,
2202
+ input: ["text", "image"],
2203
+ cost: {
2204
+ input: 15,
2205
+ output: 120,
2206
+ cacheRead: 0,
2207
+ cacheWrite: 0,
2208
+ },
2209
+ contextWindow: 400000,
2210
+ maxTokens: 128000,
1973
2211
  },
1974
2212
  "z-ai/glm-4.6": {
1975
2213
  id: "z-ai/glm-4.6",
@@ -1980,7 +2218,7 @@ export const MODELS = {
1980
2218
  reasoning: true,
1981
2219
  input: ["text"],
1982
2220
  cost: {
1983
- input: 0.5,
2221
+ input: 0.39999999999999997,
1984
2222
  output: 1.75,
1985
2223
  cacheRead: 0,
1986
2224
  cacheWrite: 0,
@@ -1998,12 +2236,29 @@ export const MODELS = {
1998
2236
  input: ["text"],
1999
2237
  cost: {
2000
2238
  input: 0.6,
2001
- output: 1.9,
2239
+ output: 2.2,
2002
2240
  cacheRead: 0,
2003
2241
  cacheWrite: 0,
2004
2242
  },
2005
- contextWindow: 202752,
2006
- maxTokens: 4096,
2243
+ contextWindow: 204800,
2244
+ maxTokens: 131072,
2245
+ },
2246
+ "anthropic/claude-sonnet-4.5": {
2247
+ id: "anthropic/claude-sonnet-4.5",
2248
+ name: "Anthropic: Claude Sonnet 4.5",
2249
+ api: "openai-completions",
2250
+ provider: "openrouter",
2251
+ baseUrl: "https://openrouter.ai/api/v1",
2252
+ reasoning: true,
2253
+ input: ["text", "image"],
2254
+ cost: {
2255
+ input: 3,
2256
+ output: 15,
2257
+ cacheRead: 0.3,
2258
+ cacheWrite: 3.75,
2259
+ },
2260
+ contextWindow: 1000000,
2261
+ maxTokens: 64000,
2007
2262
  },
2008
2263
  "deepseek/deepseek-v3.2-exp": {
2009
2264
  id: "deepseek/deepseek-v3.2-exp",
@@ -2022,6 +2277,40 @@ export const MODELS = {
2022
2277
  contextWindow: 163840,
2023
2278
  maxTokens: 4096,
2024
2279
  },
2280
+ "google/gemini-2.5-flash-preview-09-2025": {
2281
+ id: "google/gemini-2.5-flash-preview-09-2025",
2282
+ name: "Google: Gemini 2.5 Flash Preview 09-2025",
2283
+ api: "openai-completions",
2284
+ provider: "openrouter",
2285
+ baseUrl: "https://openrouter.ai/api/v1",
2286
+ reasoning: true,
2287
+ input: ["text", "image"],
2288
+ cost: {
2289
+ input: 0.3,
2290
+ output: 2.5,
2291
+ cacheRead: 0.075,
2292
+ cacheWrite: 0.3833,
2293
+ },
2294
+ contextWindow: 1048576,
2295
+ maxTokens: 65536,
2296
+ },
2297
+ "google/gemini-2.5-flash-lite-preview-09-2025": {
2298
+ id: "google/gemini-2.5-flash-lite-preview-09-2025",
2299
+ name: "Google: Gemini 2.5 Flash Lite Preview 09-2025",
2300
+ api: "openai-completions",
2301
+ provider: "openrouter",
2302
+ baseUrl: "https://openrouter.ai/api/v1",
2303
+ reasoning: true,
2304
+ input: ["text", "image"],
2305
+ cost: {
2306
+ input: 0.09999999999999999,
2307
+ output: 0.39999999999999997,
2308
+ cacheRead: 0,
2309
+ cacheWrite: 0,
2310
+ },
2311
+ contextWindow: 1048576,
2312
+ maxTokens: 65536,
2313
+ },
2025
2314
  "qwen/qwen3-vl-235b-a22b-thinking": {
2026
2315
  id: "qwen/qwen3-vl-235b-a22b-thinking",
2027
2316
  name: "Qwen: Qwen3 VL 235B A22B Thinking",
@@ -2048,13 +2337,13 @@ export const MODELS = {
2048
2337
  reasoning: false,
2049
2338
  input: ["text", "image"],
2050
2339
  cost: {
2051
- input: 0.3,
2052
- output: 1.2,
2340
+ input: 0.22,
2341
+ output: 0.88,
2053
2342
  cacheRead: 0,
2054
2343
  cacheWrite: 0,
2055
2344
  },
2056
2345
  contextWindow: 262144,
2057
- maxTokens: 262144,
2346
+ maxTokens: 4096,
2058
2347
  },
2059
2348
  "qwen/qwen3-max": {
2060
2349
  id: "qwen/qwen3-max",
@@ -2090,6 +2379,23 @@ export const MODELS = {
2090
2379
  contextWindow: 128000,
2091
2380
  maxTokens: 65536,
2092
2381
  },
2382
+ "openai/gpt-5-codex": {
2383
+ id: "openai/gpt-5-codex",
2384
+ name: "OpenAI: GPT-5 Codex",
2385
+ api: "openai-completions",
2386
+ provider: "openrouter",
2387
+ baseUrl: "https://openrouter.ai/api/v1",
2388
+ reasoning: true,
2389
+ input: ["text", "image"],
2390
+ cost: {
2391
+ input: 1.25,
2392
+ output: 10,
2393
+ cacheRead: 0.125,
2394
+ cacheWrite: 0,
2395
+ },
2396
+ contextWindow: 400000,
2397
+ maxTokens: 128000,
2398
+ },
2093
2399
  "deepseek/deepseek-v3.1-terminus": {
2094
2400
  id: "deepseek/deepseek-v3.1-terminus",
2095
2401
  name: "DeepSeek: DeepSeek V3.1 Terminus",
@@ -2124,6 +2430,23 @@ export const MODELS = {
2124
2430
  contextWindow: 131072,
2125
2431
  maxTokens: 65536,
2126
2432
  },
2433
+ "x-ai/grok-4-fast": {
2434
+ id: "x-ai/grok-4-fast",
2435
+ name: "xAI: Grok 4 Fast",
2436
+ api: "openai-completions",
2437
+ provider: "openrouter",
2438
+ baseUrl: "https://openrouter.ai/api/v1",
2439
+ reasoning: true,
2440
+ input: ["text", "image"],
2441
+ cost: {
2442
+ input: 0.19999999999999998,
2443
+ output: 0.5,
2444
+ cacheRead: 0.049999999999999996,
2445
+ cacheWrite: 0,
2446
+ },
2447
+ contextWindow: 2000000,
2448
+ maxTokens: 30000,
2449
+ },
2127
2450
  "alibaba/tongyi-deepresearch-30b-a3b:free": {
2128
2451
  id: "alibaba/tongyi-deepresearch-30b-a3b:free",
2129
2452
  name: "Tongyi DeepResearch 30B A3B (free)",
@@ -2388,13 +2711,30 @@ export const MODELS = {
2388
2711
  reasoning: true,
2389
2712
  input: ["text"],
2390
2713
  cost: {
2391
- input: 0.08,
2392
- output: 0.29,
2714
+ input: 0.09,
2715
+ output: 0.3,
2393
2716
  cacheRead: 0,
2394
2717
  cacheWrite: 0,
2395
2718
  },
2396
2719
  contextWindow: 262144,
2397
- maxTokens: 262144,
2720
+ maxTokens: 131072,
2721
+ },
2722
+ "x-ai/grok-code-fast-1": {
2723
+ id: "x-ai/grok-code-fast-1",
2724
+ name: "xAI: Grok Code Fast 1",
2725
+ api: "openai-completions",
2726
+ provider: "openrouter",
2727
+ baseUrl: "https://openrouter.ai/api/v1",
2728
+ reasoning: true,
2729
+ input: ["text"],
2730
+ cost: {
2731
+ input: 0.19999999999999998,
2732
+ output: 1.5,
2733
+ cacheRead: 0.02,
2734
+ cacheWrite: 0,
2735
+ },
2736
+ contextWindow: 256000,
2737
+ maxTokens: 10000,
2398
2738
  },
2399
2739
  "nousresearch/hermes-4-70b": {
2400
2740
  id: "nousresearch/hermes-4-70b",
@@ -2439,13 +2779,30 @@ export const MODELS = {
2439
2779
  reasoning: true,
2440
2780
  input: ["text"],
2441
2781
  cost: {
2442
- input: 0.27,
2443
- output: 1,
2782
+ input: 0.19999999999999998,
2783
+ output: 0.7999999999999999,
2444
2784
  cacheRead: 0,
2445
2785
  cacheWrite: 0,
2446
2786
  },
2447
- contextWindow: 131072,
2448
- maxTokens: 32768,
2787
+ contextWindow: 163840,
2788
+ maxTokens: 163840,
2789
+ },
2790
+ "openai/gpt-4o-audio-preview": {
2791
+ id: "openai/gpt-4o-audio-preview",
2792
+ name: "OpenAI: GPT-4o Audio",
2793
+ api: "openai-completions",
2794
+ provider: "openrouter",
2795
+ baseUrl: "https://openrouter.ai/api/v1",
2796
+ reasoning: false,
2797
+ input: ["text"],
2798
+ cost: {
2799
+ input: 2.5,
2800
+ output: 10,
2801
+ cacheRead: 0,
2802
+ cacheWrite: 0,
2803
+ },
2804
+ contextWindow: 128000,
2805
+ maxTokens: 16384,
2449
2806
  },
2450
2807
  "mistralai/mistral-medium-3.1": {
2451
2808
  id: "mistralai/mistral-medium-3.1",
@@ -2549,6 +2906,142 @@ export const MODELS = {
2549
2906
  contextWindow: 256000,
2550
2907
  maxTokens: 4096,
2551
2908
  },
2909
+ "openai/gpt-5": {
2910
+ id: "openai/gpt-5",
2911
+ name: "OpenAI: GPT-5",
2912
+ api: "openai-completions",
2913
+ provider: "openrouter",
2914
+ baseUrl: "https://openrouter.ai/api/v1",
2915
+ reasoning: true,
2916
+ input: ["text", "image"],
2917
+ cost: {
2918
+ input: 1.25,
2919
+ output: 10,
2920
+ cacheRead: 0.125,
2921
+ cacheWrite: 0,
2922
+ },
2923
+ contextWindow: 400000,
2924
+ maxTokens: 128000,
2925
+ },
2926
+ "openai/gpt-5-mini": {
2927
+ id: "openai/gpt-5-mini",
2928
+ name: "OpenAI: GPT-5 Mini",
2929
+ api: "openai-completions",
2930
+ provider: "openrouter",
2931
+ baseUrl: "https://openrouter.ai/api/v1",
2932
+ reasoning: true,
2933
+ input: ["text", "image"],
2934
+ cost: {
2935
+ input: 0.25,
2936
+ output: 2,
2937
+ cacheRead: 0.024999999999999998,
2938
+ cacheWrite: 0,
2939
+ },
2940
+ contextWindow: 400000,
2941
+ maxTokens: 128000,
2942
+ },
2943
+ "openai/gpt-5-nano": {
2944
+ id: "openai/gpt-5-nano",
2945
+ name: "OpenAI: GPT-5 Nano",
2946
+ api: "openai-completions",
2947
+ provider: "openrouter",
2948
+ baseUrl: "https://openrouter.ai/api/v1",
2949
+ reasoning: true,
2950
+ input: ["text", "image"],
2951
+ cost: {
2952
+ input: 0.049999999999999996,
2953
+ output: 0.39999999999999997,
2954
+ cacheRead: 0.005,
2955
+ cacheWrite: 0,
2956
+ },
2957
+ contextWindow: 400000,
2958
+ maxTokens: 128000,
2959
+ },
2960
+ "openai/gpt-oss-120b": {
2961
+ id: "openai/gpt-oss-120b",
2962
+ name: "OpenAI: gpt-oss-120b",
2963
+ api: "openai-completions",
2964
+ provider: "openrouter",
2965
+ baseUrl: "https://openrouter.ai/api/v1",
2966
+ reasoning: true,
2967
+ input: ["text"],
2968
+ cost: {
2969
+ input: 0.04,
2970
+ output: 0.39999999999999997,
2971
+ cacheRead: 0,
2972
+ cacheWrite: 0,
2973
+ },
2974
+ contextWindow: 131072,
2975
+ maxTokens: 131072,
2976
+ },
2977
+ "openai/gpt-oss-120b:exacto": {
2978
+ id: "openai/gpt-oss-120b:exacto",
2979
+ name: "OpenAI: gpt-oss-120b (exacto)",
2980
+ api: "openai-completions",
2981
+ provider: "openrouter",
2982
+ baseUrl: "https://openrouter.ai/api/v1",
2983
+ reasoning: true,
2984
+ input: ["text"],
2985
+ cost: {
2986
+ input: 0.049999999999999996,
2987
+ output: 0.24,
2988
+ cacheRead: 0,
2989
+ cacheWrite: 0,
2990
+ },
2991
+ contextWindow: 131072,
2992
+ maxTokens: 4096,
2993
+ },
2994
+ "openai/gpt-oss-20b:free": {
2995
+ id: "openai/gpt-oss-20b:free",
2996
+ name: "OpenAI: gpt-oss-20b (free)",
2997
+ api: "openai-completions",
2998
+ provider: "openrouter",
2999
+ baseUrl: "https://openrouter.ai/api/v1",
3000
+ reasoning: true,
3001
+ input: ["text"],
3002
+ cost: {
3003
+ input: 0,
3004
+ output: 0,
3005
+ cacheRead: 0,
3006
+ cacheWrite: 0,
3007
+ },
3008
+ contextWindow: 131072,
3009
+ maxTokens: 131072,
3010
+ },
3011
+ "openai/gpt-oss-20b": {
3012
+ id: "openai/gpt-oss-20b",
3013
+ name: "OpenAI: gpt-oss-20b",
3014
+ api: "openai-completions",
3015
+ provider: "openrouter",
3016
+ baseUrl: "https://openrouter.ai/api/v1",
3017
+ reasoning: true,
3018
+ input: ["text"],
3019
+ cost: {
3020
+ input: 0.03,
3021
+ output: 0.14,
3022
+ cacheRead: 0,
3023
+ cacheWrite: 0,
3024
+ },
3025
+ contextWindow: 131072,
3026
+ maxTokens: 4096,
3027
+ },
3028
+ "anthropic/claude-opus-4.1": {
3029
+ id: "anthropic/claude-opus-4.1",
3030
+ name: "Anthropic: Claude Opus 4.1",
3031
+ api: "openai-completions",
3032
+ provider: "openrouter",
3033
+ baseUrl: "https://openrouter.ai/api/v1",
3034
+ reasoning: true,
3035
+ input: ["text", "image"],
3036
+ cost: {
3037
+ input: 15,
3038
+ output: 75,
3039
+ cacheRead: 1.5,
3040
+ cacheWrite: 18.75,
3041
+ },
3042
+ contextWindow: 200000,
3043
+ maxTokens: 32000,
3044
+ },
2552
3045
  "mistralai/codestral-2508": {
2553
3046
  id: "mistralai/codestral-2508",
2554
3047
  name: "Mistral: Codestral 2508",
@@ -2736,6 +3229,23 @@ export const MODELS = {
2736
3229
  contextWindow: 262144,
2737
3230
  maxTokens: 262144,
2738
3231
  },
3232
+ "google/gemini-2.5-flash-lite": {
3233
+ id: "google/gemini-2.5-flash-lite",
3234
+ name: "Google: Gemini 2.5 Flash Lite",
3235
+ api: "openai-completions",
3236
+ provider: "openrouter",
3237
+ baseUrl: "https://openrouter.ai/api/v1",
3238
+ reasoning: true,
3239
+ input: ["text", "image"],
3240
+ cost: {
3241
+ input: 0.09999999999999999,
3242
+ output: 0.39999999999999997,
3243
+ cacheRead: 0.01,
3244
+ cacheWrite: 0.18330000000000002,
3245
+ },
3246
+ contextWindow: 1048576,
3247
+ maxTokens: 65535,
3248
+ },
2739
3249
  "qwen/qwen3-235b-a22b-2507": {
2740
3250
  id: "qwen/qwen3-235b-a22b-2507",
2741
3251
  name: "Qwen: Qwen3 235B A22B Instruct 2507",
@@ -2762,13 +3272,13 @@ export const MODELS = {
2762
3272
  reasoning: false,
2763
3273
  input: ["text"],
2764
3274
  cost: {
2765
- input: 0.14,
2766
- output: 2.4899999999999998,
3275
+ input: 0.5,
3276
+ output: 2.4,
2767
3277
  cacheRead: 0,
2768
3278
  cacheWrite: 0,
2769
3279
  },
2770
- contextWindow: 63000,
2771
- maxTokens: 63000,
3280
+ contextWindow: 131072,
3281
+ maxTokens: 4096,
2772
3282
  },
2773
3283
  "mistralai/devstral-medium": {
2774
3284
  id: "mistralai/devstral-medium",
@@ -2804,6 +3314,23 @@ export const MODELS = {
2804
3314
  contextWindow: 128000,
2805
3315
  maxTokens: 4096,
2806
3316
  },
3317
+ "x-ai/grok-4": {
3318
+ id: "x-ai/grok-4",
3319
+ name: "xAI: Grok 4",
3320
+ api: "openai-completions",
3321
+ provider: "openrouter",
3322
+ baseUrl: "https://openrouter.ai/api/v1",
3323
+ reasoning: true,
3324
+ input: ["text", "image"],
3325
+ cost: {
3326
+ input: 3,
3327
+ output: 15,
3328
+ cacheRead: 0.75,
3329
+ cacheWrite: 0,
3330
+ },
3331
+ contextWindow: 256000,
3332
+ maxTokens: 4096,
3333
+ },
2807
3334
  "tngtech/deepseek-r1t2-chimera": {
2808
3335
  id: "tngtech/deepseek-r1t2-chimera",
2809
3336
  name: "TNG: DeepSeek R1T2 Chimera",
@@ -2889,57 +3416,176 @@ export const MODELS = {
2889
3416
  contextWindow: 1000000,
2890
3417
  maxTokens: 40000,
2891
3418
  },
2892
- "mistralai/magistral-small-2506": {
2893
- id: "mistralai/magistral-small-2506",
2894
- name: "Mistral: Magistral Small 2506",
3419
+ "google/gemini-2.5-flash-lite-preview-06-17": {
3420
+ id: "google/gemini-2.5-flash-lite-preview-06-17",
3421
+ name: "Google: Gemini 2.5 Flash Lite Preview 06-17",
2895
3422
  api: "openai-completions",
2896
3423
  provider: "openrouter",
2897
3424
  baseUrl: "https://openrouter.ai/api/v1",
2898
3425
  reasoning: true,
2899
- input: ["text"],
3426
+ input: ["text", "image"],
2900
3427
  cost: {
2901
- input: 0.5,
2902
- output: 1.5,
2903
- cacheRead: 0,
2904
- cacheWrite: 0,
3428
+ input: 0.09999999999999999,
3429
+ output: 0.39999999999999997,
3430
+ cacheRead: 0.024999999999999998,
3431
+ cacheWrite: 0.18330000000000002,
2905
3432
  },
2906
- contextWindow: 40000,
2907
- maxTokens: 40000,
3433
+ contextWindow: 1048576,
3434
+ maxTokens: 65535,
2908
3435
  },
2909
- "mistralai/magistral-medium-2506:thinking": {
2910
- id: "mistralai/magistral-medium-2506:thinking",
2911
- name: "Mistral: Magistral Medium 2506 (thinking)",
3436
+ "google/gemini-2.5-flash": {
3437
+ id: "google/gemini-2.5-flash",
3438
+ name: "Google: Gemini 2.5 Flash",
2912
3439
  api: "openai-completions",
2913
3440
  provider: "openrouter",
2914
3441
  baseUrl: "https://openrouter.ai/api/v1",
2915
3442
  reasoning: true,
2916
- input: ["text"],
3443
+ input: ["text", "image"],
2917
3444
  cost: {
2918
- input: 2,
2919
- output: 5,
2920
- cacheRead: 0,
2921
- cacheWrite: 0,
3445
+ input: 0.3,
3446
+ output: 2.5,
3447
+ cacheRead: 0.03,
3448
+ cacheWrite: 0.3833,
2922
3449
  },
2923
- contextWindow: 40960,
2924
- maxTokens: 40000,
3450
+ contextWindow: 1048576,
3451
+ maxTokens: 65535,
2925
3452
  },
2926
- "mistralai/magistral-medium-2506": {
2927
- id: "mistralai/magistral-medium-2506",
2928
- name: "Mistral: Magistral Medium 2506",
3453
+ "google/gemini-2.5-pro": {
3454
+ id: "google/gemini-2.5-pro",
3455
+ name: "Google: Gemini 2.5 Pro",
2929
3456
  api: "openai-completions",
2930
3457
  provider: "openrouter",
2931
3458
  baseUrl: "https://openrouter.ai/api/v1",
2932
3459
  reasoning: true,
2933
- input: ["text"],
3460
+ input: ["text", "image"],
2934
3461
  cost: {
2935
- input: 2,
2936
- output: 5,
2937
- cacheRead: 0,
2938
- cacheWrite: 0,
3462
+ input: 1.25,
3463
+ output: 10,
3464
+ cacheRead: 0.125,
3465
+ cacheWrite: 1.625,
2939
3466
  },
2940
- contextWindow: 40960,
2941
- maxTokens: 40000,
2942
- },
3467
+ contextWindow: 1048576,
3468
+ maxTokens: 65536,
3469
+ },
3470
+ "openai/o3-pro": {
3471
+ id: "openai/o3-pro",
3472
+ name: "OpenAI: o3 Pro",
3473
+ api: "openai-completions",
3474
+ provider: "openrouter",
3475
+ baseUrl: "https://openrouter.ai/api/v1",
3476
+ reasoning: true,
3477
+ input: ["text", "image"],
3478
+ cost: {
3479
+ input: 20,
3480
+ output: 80,
3481
+ cacheRead: 0,
3482
+ cacheWrite: 0,
3483
+ },
3484
+ contextWindow: 200000,
3485
+ maxTokens: 100000,
3486
+ },
3487
+ "x-ai/grok-3-mini": {
3488
+ id: "x-ai/grok-3-mini",
3489
+ name: "xAI: Grok 3 Mini",
3490
+ api: "openai-completions",
3491
+ provider: "openrouter",
3492
+ baseUrl: "https://openrouter.ai/api/v1",
3493
+ reasoning: true,
3494
+ input: ["text"],
3495
+ cost: {
3496
+ input: 0.3,
3497
+ output: 0.5,
3498
+ cacheRead: 0.075,
3499
+ cacheWrite: 0,
3500
+ },
3501
+ contextWindow: 131072,
3502
+ maxTokens: 4096,
3503
+ },
3504
+ "x-ai/grok-3": {
3505
+ id: "x-ai/grok-3",
3506
+ name: "xAI: Grok 3",
3507
+ api: "openai-completions",
3508
+ provider: "openrouter",
3509
+ baseUrl: "https://openrouter.ai/api/v1",
3510
+ reasoning: false,
3511
+ input: ["text"],
3512
+ cost: {
3513
+ input: 3,
3514
+ output: 15,
3515
+ cacheRead: 0.75,
3516
+ cacheWrite: 0,
3517
+ },
3518
+ contextWindow: 131072,
3519
+ maxTokens: 4096,
3520
+ },
3521
+ "mistralai/magistral-small-2506": {
3522
+ id: "mistralai/magistral-small-2506",
3523
+ name: "Mistral: Magistral Small 2506",
3524
+ api: "openai-completions",
3525
+ provider: "openrouter",
3526
+ baseUrl: "https://openrouter.ai/api/v1",
3527
+ reasoning: true,
3528
+ input: ["text"],
3529
+ cost: {
3530
+ input: 0.5,
3531
+ output: 1.5,
3532
+ cacheRead: 0,
3533
+ cacheWrite: 0,
3534
+ },
3535
+ contextWindow: 40000,
3536
+ maxTokens: 40000,
3537
+ },
3538
+ "mistralai/magistral-medium-2506:thinking": {
3539
+ id: "mistralai/magistral-medium-2506:thinking",
3540
+ name: "Mistral: Magistral Medium 2506 (thinking)",
3541
+ api: "openai-completions",
3542
+ provider: "openrouter",
3543
+ baseUrl: "https://openrouter.ai/api/v1",
3544
+ reasoning: true,
3545
+ input: ["text"],
3546
+ cost: {
3547
+ input: 2,
3548
+ output: 5,
3549
+ cacheRead: 0,
3550
+ cacheWrite: 0,
3551
+ },
3552
+ contextWindow: 40960,
3553
+ maxTokens: 40000,
3554
+ },
3555
+ "mistralai/magistral-medium-2506": {
3556
+ id: "mistralai/magistral-medium-2506",
3557
+ name: "Mistral: Magistral Medium 2506",
3558
+ api: "openai-completions",
3559
+ provider: "openrouter",
3560
+ baseUrl: "https://openrouter.ai/api/v1",
3561
+ reasoning: true,
3562
+ input: ["text"],
3563
+ cost: {
3564
+ input: 2,
3565
+ output: 5,
3566
+ cacheRead: 0,
3567
+ cacheWrite: 0,
3568
+ },
3569
+ contextWindow: 40960,
3570
+ maxTokens: 40000,
3571
+ },
3572
+ "google/gemini-2.5-pro-preview": {
3573
+ id: "google/gemini-2.5-pro-preview",
3574
+ name: "Google: Gemini 2.5 Pro Preview 06-05",
3575
+ api: "openai-completions",
3576
+ provider: "openrouter",
3577
+ baseUrl: "https://openrouter.ai/api/v1",
3578
+ reasoning: true,
3579
+ input: ["text", "image"],
3580
+ cost: {
3581
+ input: 1.25,
3582
+ output: 10,
3583
+ cacheRead: 0.31,
3584
+ cacheWrite: 1.625,
3585
+ },
3586
+ contextWindow: 1048576,
3587
+ maxTokens: 65536,
3588
+ },
2943
3589
  "deepseek/deepseek-r1-0528": {
2944
3590
  id: "deepseek/deepseek-r1-0528",
2945
3591
  name: "DeepSeek: R1 0528",
@@ -2957,22 +3603,39 @@ export const MODELS = {
2957
3603
  contextWindow: 163840,
2958
3604
  maxTokens: 163840,
2959
3605
  },
2960
- "mistralai/devstral-small-2505:free": {
2961
- id: "mistralai/devstral-small-2505:free",
2962
- name: "Mistral: Devstral Small 2505 (free)",
3606
+ "anthropic/claude-opus-4": {
3607
+ id: "anthropic/claude-opus-4",
3608
+ name: "Anthropic: Claude Opus 4",
2963
3609
  api: "openai-completions",
2964
3610
  provider: "openrouter",
2965
3611
  baseUrl: "https://openrouter.ai/api/v1",
2966
- reasoning: false,
2967
- input: ["text"],
3612
+ reasoning: true,
3613
+ input: ["text", "image"],
2968
3614
  cost: {
2969
- input: 0,
2970
- output: 0,
2971
- cacheRead: 0,
2972
- cacheWrite: 0,
3615
+ input: 15,
3616
+ output: 75,
3617
+ cacheRead: 1.5,
3618
+ cacheWrite: 18.75,
2973
3619
  },
2974
- contextWindow: 32768,
2975
- maxTokens: 4096,
3620
+ contextWindow: 200000,
3621
+ maxTokens: 32000,
3622
+ },
3623
+ "anthropic/claude-sonnet-4": {
3624
+ id: "anthropic/claude-sonnet-4",
3625
+ name: "Anthropic: Claude Sonnet 4",
3626
+ api: "openai-completions",
3627
+ provider: "openrouter",
3628
+ baseUrl: "https://openrouter.ai/api/v1",
3629
+ reasoning: true,
3630
+ input: ["text", "image"],
3631
+ cost: {
3632
+ input: 3,
3633
+ output: 15,
3634
+ cacheRead: 0.3,
3635
+ cacheWrite: 3.75,
3636
+ },
3637
+ contextWindow: 1000000,
3638
+ maxTokens: 64000,
2976
3639
  },
2977
3640
  "mistralai/devstral-small-2505": {
2978
3641
  id: "mistralai/devstral-small-2505",
@@ -2983,13 +3646,30 @@ export const MODELS = {
2983
3646
  reasoning: false,
2984
3647
  input: ["text"],
2985
3648
  cost: {
2986
- input: 0.049999999999999996,
2987
- output: 0.22,
3649
+ input: 0.06,
3650
+ output: 0.12,
2988
3651
  cacheRead: 0,
2989
3652
  cacheWrite: 0,
2990
3653
  },
2991
- contextWindow: 131072,
2992
- maxTokens: 131072,
3654
+ contextWindow: 128000,
3655
+ maxTokens: 4096,
3656
+ },
3657
+ "openai/codex-mini": {
3658
+ id: "openai/codex-mini",
3659
+ name: "OpenAI: Codex Mini",
3660
+ api: "openai-completions",
3661
+ provider: "openrouter",
3662
+ baseUrl: "https://openrouter.ai/api/v1",
3663
+ reasoning: true,
3664
+ input: ["text", "image"],
3665
+ cost: {
3666
+ input: 1.5,
3667
+ output: 6,
3668
+ cacheRead: 0.375,
3669
+ cacheWrite: 0,
3670
+ },
3671
+ contextWindow: 200000,
3672
+ maxTokens: 100000,
2993
3673
  },
2994
3674
  "meta-llama/llama-3.3-8b-instruct:free": {
2995
3675
  id: "meta-llama/llama-3.3-8b-instruct:free",
@@ -3042,6 +3722,23 @@ export const MODELS = {
3042
3722
  contextWindow: 131072,
3043
3723
  maxTokens: 4096,
3044
3724
  },
3725
+ "google/gemini-2.5-pro-preview-05-06": {
3726
+ id: "google/gemini-2.5-pro-preview-05-06",
3727
+ name: "Google: Gemini 2.5 Pro Preview 05-06",
3728
+ api: "openai-completions",
3729
+ provider: "openrouter",
3730
+ baseUrl: "https://openrouter.ai/api/v1",
3731
+ reasoning: true,
3732
+ input: ["text", "image"],
3733
+ cost: {
3734
+ input: 1.25,
3735
+ output: 10,
3736
+ cacheRead: 0.31,
3737
+ cacheWrite: 1.625,
3738
+ },
3739
+ contextWindow: 1048576,
3740
+ maxTokens: 65535,
3741
+ },
3045
3742
  "arcee-ai/virtuoso-large": {
3046
3743
  id: "arcee-ai/virtuoso-large",
3047
3744
  name: "Arcee AI: Virtuoso Large",
@@ -3195,94 +3892,230 @@ export const MODELS = {
3195
3892
  contextWindow: 40960,
3196
3893
  maxTokens: 40960,
3197
3894
  },
3198
- "meta-llama/llama-4-maverick:free": {
3199
- id: "meta-llama/llama-4-maverick:free",
3200
- name: "Meta: Llama 4 Maverick (free)",
3895
+ "openai/o4-mini-high": {
3896
+ id: "openai/o4-mini-high",
3897
+ name: "OpenAI: o4 Mini High",
3201
3898
  api: "openai-completions",
3202
3899
  provider: "openrouter",
3203
3900
  baseUrl: "https://openrouter.ai/api/v1",
3204
- reasoning: false,
3901
+ reasoning: true,
3205
3902
  input: ["text", "image"],
3206
3903
  cost: {
3207
- input: 0,
3208
- output: 0,
3209
- cacheRead: 0,
3904
+ input: 1.1,
3905
+ output: 4.4,
3906
+ cacheRead: 0.275,
3210
3907
  cacheWrite: 0,
3211
3908
  },
3212
- contextWindow: 128000,
3213
- maxTokens: 4028,
3909
+ contextWindow: 200000,
3910
+ maxTokens: 100000,
3214
3911
  },
3215
- "meta-llama/llama-4-maverick": {
3216
- id: "meta-llama/llama-4-maverick",
3217
- name: "Meta: Llama 4 Maverick",
3912
+ "openai/o3": {
3913
+ id: "openai/o3",
3914
+ name: "OpenAI: o3",
3218
3915
  api: "openai-completions",
3219
3916
  provider: "openrouter",
3220
3917
  baseUrl: "https://openrouter.ai/api/v1",
3221
- reasoning: false,
3918
+ reasoning: true,
3222
3919
  input: ["text", "image"],
3223
3920
  cost: {
3224
- input: 0.15,
3225
- output: 0.6,
3226
- cacheRead: 0,
3921
+ input: 2,
3922
+ output: 8,
3923
+ cacheRead: 0.5,
3227
3924
  cacheWrite: 0,
3228
3925
  },
3229
- contextWindow: 1048576,
3230
- maxTokens: 16384,
3926
+ contextWindow: 200000,
3927
+ maxTokens: 100000,
3231
3928
  },
3232
- "meta-llama/llama-4-scout:free": {
3233
- id: "meta-llama/llama-4-scout:free",
3234
- name: "Meta: Llama 4 Scout (free)",
3929
+ "openai/o4-mini": {
3930
+ id: "openai/o4-mini",
3931
+ name: "OpenAI: o4 Mini",
3932
+ api: "openai-completions",
3933
+ provider: "openrouter",
3934
+ baseUrl: "https://openrouter.ai/api/v1",
3935
+ reasoning: true,
3936
+ input: ["text", "image"],
3937
+ cost: {
3938
+ input: 1.1,
3939
+ output: 4.4,
3940
+ cacheRead: 0.275,
3941
+ cacheWrite: 0,
3942
+ },
3943
+ contextWindow: 200000,
3944
+ maxTokens: 100000,
3945
+ },
3946
+ "openai/gpt-4.1": {
3947
+ id: "openai/gpt-4.1",
3948
+ name: "OpenAI: GPT-4.1",
3235
3949
  api: "openai-completions",
3236
3950
  provider: "openrouter",
3237
3951
  baseUrl: "https://openrouter.ai/api/v1",
3238
3952
  reasoning: false,
3239
3953
  input: ["text", "image"],
3240
3954
  cost: {
3241
- input: 0,
3242
- output: 0,
3243
- cacheRead: 0,
3955
+ input: 2,
3956
+ output: 8,
3957
+ cacheRead: 0.5,
3244
3958
  cacheWrite: 0,
3245
3959
  },
3246
- contextWindow: 128000,
3247
- maxTokens: 4028,
3960
+ contextWindow: 1047576,
3961
+ maxTokens: 32768,
3248
3962
  },
3249
- "meta-llama/llama-4-scout": {
3250
- id: "meta-llama/llama-4-scout",
3251
- name: "Meta: Llama 4 Scout",
3963
+ "openai/gpt-4.1-mini": {
3964
+ id: "openai/gpt-4.1-mini",
3965
+ name: "OpenAI: GPT-4.1 Mini",
3252
3966
  api: "openai-completions",
3253
3967
  provider: "openrouter",
3254
3968
  baseUrl: "https://openrouter.ai/api/v1",
3255
3969
  reasoning: false,
3256
3970
  input: ["text", "image"],
3257
3971
  cost: {
3258
- input: 0.08,
3259
- output: 0.3,
3260
- cacheRead: 0,
3972
+ input: 0.39999999999999997,
3973
+ output: 1.5999999999999999,
3974
+ cacheRead: 0.09999999999999999,
3261
3975
  cacheWrite: 0,
3262
3976
  },
3263
- contextWindow: 327680,
3264
- maxTokens: 16384,
3977
+ contextWindow: 1047576,
3978
+ maxTokens: 32768,
3265
3979
  },
3266
- "deepseek/deepseek-chat-v3-0324:free": {
3267
- id: "deepseek/deepseek-chat-v3-0324:free",
3268
- name: "DeepSeek: DeepSeek V3 0324 (free)",
3980
+ "openai/gpt-4.1-nano": {
3981
+ id: "openai/gpt-4.1-nano",
3982
+ name: "OpenAI: GPT-4.1 Nano",
3269
3983
  api: "openai-completions",
3270
3984
  provider: "openrouter",
3271
3985
  baseUrl: "https://openrouter.ai/api/v1",
3272
3986
  reasoning: false,
3273
- input: ["text"],
3987
+ input: ["text", "image"],
3274
3988
  cost: {
3275
- input: 0,
3276
- output: 0,
3277
- cacheRead: 0,
3989
+ input: 0.09999999999999999,
3990
+ output: 0.39999999999999997,
3991
+ cacheRead: 0.024999999999999998,
3278
3992
  cacheWrite: 0,
3279
3993
  },
3280
- contextWindow: 163840,
3281
- maxTokens: 4096,
3994
+ contextWindow: 1047576,
3995
+ maxTokens: 32768,
3282
3996
  },
3283
- "deepseek/deepseek-chat-v3-0324": {
3284
- id: "deepseek/deepseek-chat-v3-0324",
3285
- name: "DeepSeek: DeepSeek V3 0324",
3997
+ "x-ai/grok-3-mini-beta": {
3998
+ id: "x-ai/grok-3-mini-beta",
3999
+ name: "xAI: Grok 3 Mini Beta",
4000
+ api: "openai-completions",
4001
+ provider: "openrouter",
4002
+ baseUrl: "https://openrouter.ai/api/v1",
4003
+ reasoning: true,
4004
+ input: ["text"],
4005
+ cost: {
4006
+ input: 0.3,
4007
+ output: 0.5,
4008
+ cacheRead: 0.075,
4009
+ cacheWrite: 0,
4010
+ },
4011
+ contextWindow: 131072,
4012
+ maxTokens: 4096,
4013
+ },
4014
+ "x-ai/grok-3-beta": {
4015
+ id: "x-ai/grok-3-beta",
4016
+ name: "xAI: Grok 3 Beta",
4017
+ api: "openai-completions",
4018
+ provider: "openrouter",
4019
+ baseUrl: "https://openrouter.ai/api/v1",
4020
+ reasoning: false,
4021
+ input: ["text"],
4022
+ cost: {
4023
+ input: 3,
4024
+ output: 15,
4025
+ cacheRead: 0.75,
4026
+ cacheWrite: 0,
4027
+ },
4028
+ contextWindow: 131072,
4029
+ maxTokens: 4096,
4030
+ },
4031
+ "meta-llama/llama-4-maverick:free": {
4032
+ id: "meta-llama/llama-4-maverick:free",
4033
+ name: "Meta: Llama 4 Maverick (free)",
4034
+ api: "openai-completions",
4035
+ provider: "openrouter",
4036
+ baseUrl: "https://openrouter.ai/api/v1",
4037
+ reasoning: false,
4038
+ input: ["text", "image"],
4039
+ cost: {
4040
+ input: 0,
4041
+ output: 0,
4042
+ cacheRead: 0,
4043
+ cacheWrite: 0,
4044
+ },
4045
+ contextWindow: 128000,
4046
+ maxTokens: 4028,
4047
+ },
4048
+ "meta-llama/llama-4-maverick": {
4049
+ id: "meta-llama/llama-4-maverick",
4050
+ name: "Meta: Llama 4 Maverick",
4051
+ api: "openai-completions",
4052
+ provider: "openrouter",
4053
+ baseUrl: "https://openrouter.ai/api/v1",
4054
+ reasoning: false,
4055
+ input: ["text", "image"],
4056
+ cost: {
4057
+ input: 0.15,
4058
+ output: 0.6,
4059
+ cacheRead: 0,
4060
+ cacheWrite: 0,
4061
+ },
4062
+ contextWindow: 1048576,
4063
+ maxTokens: 16384,
4064
+ },
4065
+ "meta-llama/llama-4-scout:free": {
4066
+ id: "meta-llama/llama-4-scout:free",
4067
+ name: "Meta: Llama 4 Scout (free)",
4068
+ api: "openai-completions",
4069
+ provider: "openrouter",
4070
+ baseUrl: "https://openrouter.ai/api/v1",
4071
+ reasoning: false,
4072
+ input: ["text", "image"],
4073
+ cost: {
4074
+ input: 0,
4075
+ output: 0,
4076
+ cacheRead: 0,
4077
+ cacheWrite: 0,
4078
+ },
4079
+ contextWindow: 128000,
4080
+ maxTokens: 4028,
4081
+ },
4082
+ "meta-llama/llama-4-scout": {
4083
+ id: "meta-llama/llama-4-scout",
4084
+ name: "Meta: Llama 4 Scout",
4085
+ api: "openai-completions",
4086
+ provider: "openrouter",
4087
+ baseUrl: "https://openrouter.ai/api/v1",
4088
+ reasoning: false,
4089
+ input: ["text", "image"],
4090
+ cost: {
4091
+ input: 0.08,
4092
+ output: 0.3,
4093
+ cacheRead: 0,
4094
+ cacheWrite: 0,
4095
+ },
4096
+ contextWindow: 327680,
4097
+ maxTokens: 16384,
4098
+ },
4099
+ "deepseek/deepseek-chat-v3-0324:free": {
4100
+ id: "deepseek/deepseek-chat-v3-0324:free",
4101
+ name: "DeepSeek: DeepSeek V3 0324 (free)",
4102
+ api: "openai-completions",
4103
+ provider: "openrouter",
4104
+ baseUrl: "https://openrouter.ai/api/v1",
4105
+ reasoning: false,
4106
+ input: ["text"],
4107
+ cost: {
4108
+ input: 0,
4109
+ output: 0,
4110
+ cacheRead: 0,
4111
+ cacheWrite: 0,
4112
+ },
4113
+ contextWindow: 163840,
4114
+ maxTokens: 4096,
4115
+ },
4116
+ "deepseek/deepseek-chat-v3-0324": {
4117
+ id: "deepseek/deepseek-chat-v3-0324",
4118
+ name: "DeepSeek: DeepSeek V3 0324",
3286
4119
  api: "openai-completions",
3287
4120
  provider: "openrouter",
3288
4121
  baseUrl: "https://openrouter.ai/api/v1",
@@ -3324,12 +4157,29 @@ export const MODELS = {
3324
4157
  input: ["text", "image"],
3325
4158
  cost: {
3326
4159
  input: 0.049999999999999996,
3327
- output: 0.09999999999999999,
4160
+ output: 0.22,
3328
4161
  cacheRead: 0,
3329
4162
  cacheWrite: 0,
3330
4163
  },
3331
- contextWindow: 128000,
3332
- maxTokens: 4096,
4164
+ contextWindow: 131072,
4165
+ maxTokens: 131072,
4166
+ },
4167
+ "google/gemma-3-27b-it": {
4168
+ id: "google/gemma-3-27b-it",
4169
+ name: "Google: Gemma 3 27B",
4170
+ api: "openai-completions",
4171
+ provider: "openrouter",
4172
+ baseUrl: "https://openrouter.ai/api/v1",
4173
+ reasoning: false,
4174
+ input: ["text", "image"],
4175
+ cost: {
4176
+ input: 0.09,
4177
+ output: 0.16,
4178
+ cacheRead: 0,
4179
+ cacheWrite: 0,
4180
+ },
4181
+ contextWindow: 131072,
4182
+ maxTokens: 16384,
3333
4183
  },
3334
4184
  "qwen/qwq-32b": {
3335
4185
  id: "qwen/qwq-32b",
@@ -3348,22 +4198,56 @@ export const MODELS = {
3348
4198
  contextWindow: 32768,
3349
4199
  maxTokens: 4096,
3350
4200
  },
3351
- "nousresearch/deephermes-3-llama-3-8b-preview": {
3352
- id: "nousresearch/deephermes-3-llama-3-8b-preview",
3353
- name: "Nous: DeepHermes 3 Llama 3 8B Preview",
4201
+ "google/gemini-2.0-flash-lite-001": {
4202
+ id: "google/gemini-2.0-flash-lite-001",
4203
+ name: "Google: Gemini 2.0 Flash Lite",
3354
4204
  api: "openai-completions",
3355
4205
  provider: "openrouter",
3356
4206
  baseUrl: "https://openrouter.ai/api/v1",
3357
4207
  reasoning: false,
3358
- input: ["text"],
4208
+ input: ["text", "image"],
3359
4209
  cost: {
3360
- input: 0.03,
3361
- output: 0.11,
4210
+ input: 0.075,
4211
+ output: 0.3,
3362
4212
  cacheRead: 0,
3363
4213
  cacheWrite: 0,
3364
4214
  },
3365
- contextWindow: 131072,
3366
- maxTokens: 131072,
4215
+ contextWindow: 1048576,
4216
+ maxTokens: 8192,
4217
+ },
4218
+ "anthropic/claude-3.7-sonnet:thinking": {
4219
+ id: "anthropic/claude-3.7-sonnet:thinking",
4220
+ name: "Anthropic: Claude 3.7 Sonnet (thinking)",
4221
+ api: "openai-completions",
4222
+ provider: "openrouter",
4223
+ baseUrl: "https://openrouter.ai/api/v1",
4224
+ reasoning: true,
4225
+ input: ["text", "image"],
4226
+ cost: {
4227
+ input: 3,
4228
+ output: 15,
4229
+ cacheRead: 0.3,
4230
+ cacheWrite: 3.75,
4231
+ },
4232
+ contextWindow: 200000,
4233
+ maxTokens: 64000,
4234
+ },
4235
+ "anthropic/claude-3.7-sonnet": {
4236
+ id: "anthropic/claude-3.7-sonnet",
4237
+ name: "Anthropic: Claude 3.7 Sonnet",
4238
+ api: "openai-completions",
4239
+ provider: "openrouter",
4240
+ baseUrl: "https://openrouter.ai/api/v1",
4241
+ reasoning: true,
4242
+ input: ["text", "image"],
4243
+ cost: {
4244
+ input: 3,
4245
+ output: 15,
4246
+ cacheRead: 0.3,
4247
+ cacheWrite: 3.75,
4248
+ },
4249
+ contextWindow: 200000,
4250
+ maxTokens: 64000,
3367
4251
  },
3368
4252
  "mistralai/mistral-saba": {
3369
4253
  id: "mistralai/mistral-saba",
@@ -3382,6 +4266,40 @@ export const MODELS = {
3382
4266
  contextWindow: 32768,
3383
4267
  maxTokens: 4096,
3384
4268
  },
4269
+ "openai/o3-mini-high": {
4270
+ id: "openai/o3-mini-high",
4271
+ name: "OpenAI: o3 Mini High",
4272
+ api: "openai-completions",
4273
+ provider: "openrouter",
4274
+ baseUrl: "https://openrouter.ai/api/v1",
4275
+ reasoning: false,
4276
+ input: ["text"],
4277
+ cost: {
4278
+ input: 1.1,
4279
+ output: 4.4,
4280
+ cacheRead: 0.55,
4281
+ cacheWrite: 0,
4282
+ },
4283
+ contextWindow: 200000,
4284
+ maxTokens: 100000,
4285
+ },
4286
+ "google/gemini-2.0-flash-001": {
4287
+ id: "google/gemini-2.0-flash-001",
4288
+ name: "Google: Gemini 2.0 Flash",
4289
+ api: "openai-completions",
4290
+ provider: "openrouter",
4291
+ baseUrl: "https://openrouter.ai/api/v1",
4292
+ reasoning: false,
4293
+ input: ["text", "image"],
4294
+ cost: {
4295
+ input: 0.09999999999999999,
4296
+ output: 0.39999999999999997,
4297
+ cacheRead: 0.024999999999999998,
4298
+ cacheWrite: 0.18330000000000002,
4299
+ },
4300
+ contextWindow: 1048576,
4301
+ maxTokens: 8192,
4302
+ },
3385
4303
  "qwen/qwen-vl-max": {
3386
4304
  id: "qwen/qwen-vl-max",
3387
4305
  name: "Qwen: Qwen VL Max",
@@ -3450,6 +4368,23 @@ export const MODELS = {
3450
4368
  contextWindow: 32768,
3451
4369
  maxTokens: 8192,
3452
4370
  },
4371
+ "openai/o3-mini": {
4372
+ id: "openai/o3-mini",
4373
+ name: "OpenAI: o3 Mini",
4374
+ api: "openai-completions",
4375
+ provider: "openrouter",
4376
+ baseUrl: "https://openrouter.ai/api/v1",
4377
+ reasoning: false,
4378
+ input: ["text"],
4379
+ cost: {
4380
+ input: 1.1,
4381
+ output: 4.4,
4382
+ cacheRead: 0.55,
4383
+ cacheWrite: 0,
4384
+ },
4385
+ contextWindow: 200000,
4386
+ maxTokens: 100000,
4387
+ },
3453
4388
  "mistralai/mistral-small-24b-instruct-2501": {
3454
4389
  id: "mistralai/mistral-small-24b-instruct-2501",
3455
4390
  name: "Mistral: Mistral Small 3",
@@ -3493,13 +4428,13 @@ export const MODELS = {
3493
4428
  reasoning: true,
3494
4429
  input: ["text"],
3495
4430
  cost: {
3496
- input: 0.39999999999999997,
3497
- output: 2,
4431
+ input: 0.3,
4432
+ output: 1.2,
3498
4433
  cacheRead: 0,
3499
4434
  cacheWrite: 0,
3500
4435
  },
3501
4436
  contextWindow: 163840,
3502
- maxTokens: 163840,
4437
+ maxTokens: 4096,
3503
4438
  },
3504
4439
  "mistralai/codestral-2501": {
3505
4440
  id: "mistralai/codestral-2501",
@@ -3515,7 +4450,7 @@ export const MODELS = {
3515
4450
  cacheRead: 0,
3516
4451
  cacheWrite: 0,
3517
4452
  },
3518
- contextWindow: 262144,
4453
+ contextWindow: 256000,
3519
4454
  maxTokens: 4096,
3520
4455
  },
3521
4456
  "deepseek/deepseek-chat": {
@@ -3528,13 +4463,47 @@ export const MODELS = {
3528
4463
  input: ["text"],
3529
4464
  cost: {
3530
4465
  input: 0.3,
3531
- output: 0.85,
4466
+ output: 1.2,
3532
4467
  cacheRead: 0,
3533
4468
  cacheWrite: 0,
3534
4469
  },
3535
4470
  contextWindow: 163840,
3536
4471
  maxTokens: 163840,
3537
4472
  },
4473
+ "openai/o1": {
4474
+ id: "openai/o1",
4475
+ name: "OpenAI: o1",
4476
+ api: "openai-completions",
4477
+ provider: "openrouter",
4478
+ baseUrl: "https://openrouter.ai/api/v1",
4479
+ reasoning: false,
4480
+ input: ["text", "image"],
4481
+ cost: {
4482
+ input: 15,
4483
+ output: 60,
4484
+ cacheRead: 7.5,
4485
+ cacheWrite: 0,
4486
+ },
4487
+ contextWindow: 200000,
4488
+ maxTokens: 100000,
4489
+ },
4490
+ "google/gemini-2.0-flash-exp:free": {
4491
+ id: "google/gemini-2.0-flash-exp:free",
4492
+ name: "Google: Gemini 2.0 Flash Experimental (free)",
4493
+ api: "openai-completions",
4494
+ provider: "openrouter",
4495
+ baseUrl: "https://openrouter.ai/api/v1",
4496
+ reasoning: false,
4497
+ input: ["text", "image"],
4498
+ cost: {
4499
+ input: 0,
4500
+ output: 0,
4501
+ cacheRead: 0,
4502
+ cacheWrite: 0,
4503
+ },
4504
+ contextWindow: 1048576,
4505
+ maxTokens: 8192,
4506
+ },
3538
4507
  "meta-llama/llama-3.3-70b-instruct:free": {
3539
4508
  id: "meta-llama/llama-3.3-70b-instruct:free",
3540
4509
  name: "Meta: Llama 3.3 70B Instruct (free)",
@@ -3550,7 +4519,7 @@ export const MODELS = {
3550
4519
  cacheWrite: 0,
3551
4520
  },
3552
4521
  contextWindow: 131072,
3553
- maxTokens: 2048,
4522
+ maxTokens: 4096,
3554
4523
  },
3555
4524
  "meta-llama/llama-3.3-70b-instruct": {
3556
4525
  id: "meta-llama/llama-3.3-70b-instruct",
@@ -3620,14 +4589,31 @@ export const MODELS = {
3620
4589
  contextWindow: 300000,
3621
4590
  maxTokens: 5120,
3622
4591
  },
3623
- "mistralai/mistral-large-2411": {
3624
- id: "mistralai/mistral-large-2411",
3625
- name: "Mistral Large 2411",
4592
+ "openai/gpt-4o-2024-11-20": {
4593
+ id: "openai/gpt-4o-2024-11-20",
4594
+ name: "OpenAI: GPT-4o (2024-11-20)",
3626
4595
  api: "openai-completions",
3627
4596
  provider: "openrouter",
3628
4597
  baseUrl: "https://openrouter.ai/api/v1",
3629
4598
  reasoning: false,
3630
- input: ["text"],
4599
+ input: ["text", "image"],
4600
+ cost: {
4601
+ input: 2.5,
4602
+ output: 10,
4603
+ cacheRead: 1.25,
4604
+ cacheWrite: 0,
4605
+ },
4606
+ contextWindow: 128000,
4607
+ maxTokens: 16384,
4608
+ },
4609
+ "mistralai/mistral-large-2411": {
4610
+ id: "mistralai/mistral-large-2411",
4611
+ name: "Mistral Large 2411",
4612
+ api: "openai-completions",
4613
+ provider: "openrouter",
4614
+ baseUrl: "https://openrouter.ai/api/v1",
4615
+ reasoning: false,
4616
+ input: ["text"],
3631
4617
  cost: {
3632
4618
  input: 2,
3633
4619
  output: 6,
@@ -3688,6 +4674,74 @@ export const MODELS = {
3688
4674
  contextWindow: 32768,
3689
4675
  maxTokens: 4096,
3690
4676
  },
4677
+ "anthropic/claude-3.5-haiku-20241022": {
4678
+ id: "anthropic/claude-3.5-haiku-20241022",
4679
+ name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
4680
+ api: "openai-completions",
4681
+ provider: "openrouter",
4682
+ baseUrl: "https://openrouter.ai/api/v1",
4683
+ reasoning: false,
4684
+ input: ["text", "image"],
4685
+ cost: {
4686
+ input: 0.7999999999999999,
4687
+ output: 4,
4688
+ cacheRead: 0.08,
4689
+ cacheWrite: 1,
4690
+ },
4691
+ contextWindow: 200000,
4692
+ maxTokens: 8192,
4693
+ },
4694
+ "anthropic/claude-3.5-haiku": {
4695
+ id: "anthropic/claude-3.5-haiku",
4696
+ name: "Anthropic: Claude 3.5 Haiku",
4697
+ api: "openai-completions",
4698
+ provider: "openrouter",
4699
+ baseUrl: "https://openrouter.ai/api/v1",
4700
+ reasoning: false,
4701
+ input: ["text", "image"],
4702
+ cost: {
4703
+ input: 0.7999999999999999,
4704
+ output: 4,
4705
+ cacheRead: 0.08,
4706
+ cacheWrite: 1,
4707
+ },
4708
+ contextWindow: 200000,
4709
+ maxTokens: 8192,
4710
+ },
4711
+ "anthropic/claude-3.5-sonnet": {
4712
+ id: "anthropic/claude-3.5-sonnet",
4713
+ name: "Anthropic: Claude 3.5 Sonnet",
4714
+ api: "openai-completions",
4715
+ provider: "openrouter",
4716
+ baseUrl: "https://openrouter.ai/api/v1",
4717
+ reasoning: false,
4718
+ input: ["text", "image"],
4719
+ cost: {
4720
+ input: 3,
4721
+ output: 15,
4722
+ cacheRead: 0.3,
4723
+ cacheWrite: 3.75,
4724
+ },
4725
+ contextWindow: 200000,
4726
+ maxTokens: 8192,
4727
+ },
4728
+ "mistralai/ministral-3b": {
4729
+ id: "mistralai/ministral-3b",
4730
+ name: "Mistral: Ministral 3B",
4731
+ api: "openai-completions",
4732
+ provider: "openrouter",
4733
+ baseUrl: "https://openrouter.ai/api/v1",
4734
+ reasoning: false,
4735
+ input: ["text"],
4736
+ cost: {
4737
+ input: 0.04,
4738
+ output: 0.04,
4739
+ cacheRead: 0,
4740
+ cacheWrite: 0,
4741
+ },
4742
+ contextWindow: 131072,
4743
+ maxTokens: 4096,
4744
+ },
3691
4745
  "mistralai/ministral-8b": {
3692
4746
  id: "mistralai/ministral-8b",
3693
4747
  name: "Mistral: Ministral 8B",
@@ -3770,7 +4824,7 @@ export const MODELS = {
3770
4824
  cacheRead: 0,
3771
4825
  cacheWrite: 0,
3772
4826
  },
3773
- contextWindow: 16384,
4827
+ contextWindow: 131072,
3774
4828
  maxTokens: 16384,
3775
4829
  },
3776
4830
  "qwen/qwen-2.5-72b-instruct": {
@@ -3892,22 +4946,39 @@ export const MODELS = {
3892
4946
  contextWindow: 65536,
3893
4947
  maxTokens: 4096,
3894
4948
  },
3895
- "meta-llama/llama-3.1-8b-instruct": {
3896
- id: "meta-llama/llama-3.1-8b-instruct",
3897
- name: "Meta: Llama 3.1 8B Instruct",
4949
+ "openai/gpt-4o-2024-08-06": {
4950
+ id: "openai/gpt-4o-2024-08-06",
4951
+ name: "OpenAI: GPT-4o (2024-08-06)",
4952
+ api: "openai-completions",
4953
+ provider: "openrouter",
4954
+ baseUrl: "https://openrouter.ai/api/v1",
4955
+ reasoning: false,
4956
+ input: ["text", "image"],
4957
+ cost: {
4958
+ input: 2.5,
4959
+ output: 10,
4960
+ cacheRead: 1.25,
4961
+ cacheWrite: 0,
4962
+ },
4963
+ contextWindow: 128000,
4964
+ maxTokens: 16384,
4965
+ },
4966
+ "meta-llama/llama-3.1-405b-instruct": {
4967
+ id: "meta-llama/llama-3.1-405b-instruct",
4968
+ name: "Meta: Llama 3.1 405B Instruct",
3898
4969
  api: "openai-completions",
3899
4970
  provider: "openrouter",
3900
4971
  baseUrl: "https://openrouter.ai/api/v1",
3901
4972
  reasoning: false,
3902
4973
  input: ["text"],
3903
4974
  cost: {
3904
- input: 0.02,
3905
- output: 0.03,
4975
+ input: 3.5,
4976
+ output: 3.5,
3906
4977
  cacheRead: 0,
3907
4978
  cacheWrite: 0,
3908
4979
  },
3909
- contextWindow: 16384,
3910
- maxTokens: 16384,
4980
+ contextWindow: 130815,
4981
+ maxTokens: 4096,
3911
4982
  },
3912
4983
  "meta-llama/llama-3.1-70b-instruct": {
3913
4984
  id: "meta-llama/llama-3.1-70b-instruct",
@@ -3926,21 +4997,21 @@ export const MODELS = {
3926
4997
  contextWindow: 131072,
3927
4998
  maxTokens: 4096,
3928
4999
  },
3929
- "meta-llama/llama-3.1-405b-instruct": {
3930
- id: "meta-llama/llama-3.1-405b-instruct",
3931
- name: "Meta: Llama 3.1 405B Instruct",
5000
+ "meta-llama/llama-3.1-8b-instruct": {
5001
+ id: "meta-llama/llama-3.1-8b-instruct",
5002
+ name: "Meta: Llama 3.1 8B Instruct",
3932
5003
  api: "openai-completions",
3933
5004
  provider: "openrouter",
3934
5005
  baseUrl: "https://openrouter.ai/api/v1",
3935
5006
  reasoning: false,
3936
5007
  input: ["text"],
3937
5008
  cost: {
3938
- input: 0.7999999999999999,
3939
- output: 0.7999999999999999,
5009
+ input: 0.02,
5010
+ output: 0.03,
3940
5011
  cacheRead: 0,
3941
5012
  cacheWrite: 0,
3942
5013
  },
3943
- contextWindow: 32768,
5014
+ contextWindow: 131072,
3944
5015
  maxTokens: 16384,
3945
5016
  },
3946
5017
  "mistralai/mistral-nemo": {
@@ -3960,6 +5031,57 @@ export const MODELS = {
3960
5031
  contextWindow: 131072,
3961
5032
  maxTokens: 16384,
3962
5033
  },
5034
+ "openai/gpt-4o-mini": {
5035
+ id: "openai/gpt-4o-mini",
5036
+ name: "OpenAI: GPT-4o-mini",
5037
+ api: "openai-completions",
5038
+ provider: "openrouter",
5039
+ baseUrl: "https://openrouter.ai/api/v1",
5040
+ reasoning: false,
5041
+ input: ["text", "image"],
5042
+ cost: {
5043
+ input: 0.15,
5044
+ output: 0.6,
5045
+ cacheRead: 0.075,
5046
+ cacheWrite: 0,
5047
+ },
5048
+ contextWindow: 128000,
5049
+ maxTokens: 16384,
5050
+ },
5051
+ "openai/gpt-4o-mini-2024-07-18": {
5052
+ id: "openai/gpt-4o-mini-2024-07-18",
5053
+ name: "OpenAI: GPT-4o-mini (2024-07-18)",
5054
+ api: "openai-completions",
5055
+ provider: "openrouter",
5056
+ baseUrl: "https://openrouter.ai/api/v1",
5057
+ reasoning: false,
5058
+ input: ["text", "image"],
5059
+ cost: {
5060
+ input: 0.15,
5061
+ output: 0.6,
5062
+ cacheRead: 0.075,
5063
+ cacheWrite: 0,
5064
+ },
5065
+ contextWindow: 128000,
5066
+ maxTokens: 16384,
5067
+ },
5068
+ "anthropic/claude-3.5-sonnet-20240620": {
5069
+ id: "anthropic/claude-3.5-sonnet-20240620",
5070
+ name: "Anthropic: Claude 3.5 Sonnet (2024-06-20)",
5071
+ api: "openai-completions",
5072
+ provider: "openrouter",
5073
+ baseUrl: "https://openrouter.ai/api/v1",
5074
+ reasoning: false,
5075
+ input: ["text", "image"],
5076
+ cost: {
5077
+ input: 3,
5078
+ output: 15,
5079
+ cacheRead: 0.3,
5080
+ cacheWrite: 3.75,
5081
+ },
5082
+ contextWindow: 200000,
5083
+ maxTokens: 8192,
5084
+ },
3963
5085
  "sao10k/l3-euryale-70b": {
3964
5086
  id: "sao10k/l3-euryale-70b",
3965
5087
  name: "Sao10k: Llama 3 Euryale 70B v2.1",
@@ -4011,23 +5133,6 @@ export const MODELS = {
4011
5133
  contextWindow: 32768,
4012
5134
  maxTokens: 16384,
4013
5135
  },
4014
- "mistralai/mistral-7b-instruct-v0.3": {
4015
- id: "mistralai/mistral-7b-instruct-v0.3",
4016
- name: "Mistral: Mistral 7B Instruct v0.3",
4017
- api: "openai-completions",
4018
- provider: "openrouter",
4019
- baseUrl: "https://openrouter.ai/api/v1",
4020
- reasoning: false,
4021
- input: ["text"],
4022
- cost: {
4023
- input: 0.028,
4024
- output: 0.054,
4025
- cacheRead: 0,
4026
- cacheWrite: 0,
4027
- },
4028
- contextWindow: 32768,
4029
- maxTokens: 16384,
4030
- },
4031
5136
  "microsoft/phi-3-mini-128k-instruct": {
4032
5137
  id: "microsoft/phi-3-mini-128k-instruct",
4033
5138
  name: "Microsoft: Phi-3 Mini 128K Instruct",
@@ -4062,6 +5167,57 @@ export const MODELS = {
4062
5167
  contextWindow: 128000,
4063
5168
  maxTokens: 4096,
4064
5169
  },
5170
+ "openai/gpt-4o-2024-05-13": {
5171
+ id: "openai/gpt-4o-2024-05-13",
5172
+ name: "OpenAI: GPT-4o (2024-05-13)",
5173
+ api: "openai-completions",
5174
+ provider: "openrouter",
5175
+ baseUrl: "https://openrouter.ai/api/v1",
5176
+ reasoning: false,
5177
+ input: ["text", "image"],
5178
+ cost: {
5179
+ input: 5,
5180
+ output: 15,
5181
+ cacheRead: 0,
5182
+ cacheWrite: 0,
5183
+ },
5184
+ contextWindow: 128000,
5185
+ maxTokens: 4096,
5186
+ },
5187
+ "openai/gpt-4o": {
5188
+ id: "openai/gpt-4o",
5189
+ name: "OpenAI: GPT-4o",
5190
+ api: "openai-completions",
5191
+ provider: "openrouter",
5192
+ baseUrl: "https://openrouter.ai/api/v1",
5193
+ reasoning: false,
5194
+ input: ["text", "image"],
5195
+ cost: {
5196
+ input: 2.5,
5197
+ output: 10,
5198
+ cacheRead: 1.25,
5199
+ cacheWrite: 0,
5200
+ },
5201
+ contextWindow: 128000,
5202
+ maxTokens: 16384,
5203
+ },
5204
+ "openai/gpt-4o:extended": {
5205
+ id: "openai/gpt-4o:extended",
5206
+ name: "OpenAI: GPT-4o (extended)",
5207
+ api: "openai-completions",
5208
+ provider: "openrouter",
5209
+ baseUrl: "https://openrouter.ai/api/v1",
5210
+ reasoning: false,
5211
+ input: ["text", "image"],
5212
+ cost: {
5213
+ input: 6,
5214
+ output: 18,
5215
+ cacheRead: 0,
5216
+ cacheWrite: 0,
5217
+ },
5218
+ contextWindow: 128000,
5219
+ maxTokens: 64000,
5220
+ },
4065
5221
  "meta-llama/llama-3-70b-instruct": {
4066
5222
  id: "meta-llama/llama-3-70b-instruct",
4067
5223
  name: "Meta: Llama 3 70B Instruct",
@@ -4113,6 +5269,57 @@ export const MODELS = {
4113
5269
  contextWindow: 65536,
4114
5270
  maxTokens: 4096,
4115
5271
  },
5272
+ "openai/gpt-4-turbo": {
5273
+ id: "openai/gpt-4-turbo",
5274
+ name: "OpenAI: GPT-4 Turbo",
5275
+ api: "openai-completions",
5276
+ provider: "openrouter",
5277
+ baseUrl: "https://openrouter.ai/api/v1",
5278
+ reasoning: false,
5279
+ input: ["text", "image"],
5280
+ cost: {
5281
+ input: 10,
5282
+ output: 30,
5283
+ cacheRead: 0,
5284
+ cacheWrite: 0,
5285
+ },
5286
+ contextWindow: 128000,
5287
+ maxTokens: 4096,
5288
+ },
5289
+ "anthropic/claude-3-haiku": {
5290
+ id: "anthropic/claude-3-haiku",
5291
+ name: "Anthropic: Claude 3 Haiku",
5292
+ api: "openai-completions",
5293
+ provider: "openrouter",
5294
+ baseUrl: "https://openrouter.ai/api/v1",
5295
+ reasoning: false,
5296
+ input: ["text", "image"],
5297
+ cost: {
5298
+ input: 0.25,
5299
+ output: 1.25,
5300
+ cacheRead: 0.03,
5301
+ cacheWrite: 0.3,
5302
+ },
5303
+ contextWindow: 200000,
5304
+ maxTokens: 4096,
5305
+ },
5306
+ "anthropic/claude-3-opus": {
5307
+ id: "anthropic/claude-3-opus",
5308
+ name: "Anthropic: Claude 3 Opus",
5309
+ api: "openai-completions",
5310
+ provider: "openrouter",
5311
+ baseUrl: "https://openrouter.ai/api/v1",
5312
+ reasoning: false,
5313
+ input: ["text", "image"],
5314
+ cost: {
5315
+ input: 15,
5316
+ output: 75,
5317
+ cacheRead: 1.5,
5318
+ cacheWrite: 18.75,
5319
+ },
5320
+ contextWindow: 200000,
5321
+ maxTokens: 4096,
5322
+ },
4116
5323
  "mistralai/mistral-large": {
4117
5324
  id: "mistralai/mistral-large",
4118
5325
  name: "Mistral Large",
@@ -4130,21 +5337,38 @@ export const MODELS = {
4130
5337
  contextWindow: 128000,
4131
5338
  maxTokens: 4096,
4132
5339
  },
4133
- "mistralai/mistral-tiny": {
4134
- id: "mistralai/mistral-tiny",
4135
- name: "Mistral Tiny",
5340
+ "openai/gpt-3.5-turbo-0613": {
5341
+ id: "openai/gpt-3.5-turbo-0613",
5342
+ name: "OpenAI: GPT-3.5 Turbo (older v0613)",
4136
5343
  api: "openai-completions",
4137
5344
  provider: "openrouter",
4138
5345
  baseUrl: "https://openrouter.ai/api/v1",
4139
5346
  reasoning: false,
4140
5347
  input: ["text"],
4141
5348
  cost: {
4142
- input: 0.25,
4143
- output: 0.25,
5349
+ input: 1,
5350
+ output: 2,
4144
5351
  cacheRead: 0,
4145
5352
  cacheWrite: 0,
4146
5353
  },
4147
- contextWindow: 32768,
5354
+ contextWindow: 4095,
5355
+ maxTokens: 4096,
5356
+ },
5357
+ "openai/gpt-4-turbo-preview": {
5358
+ id: "openai/gpt-4-turbo-preview",
5359
+ name: "OpenAI: GPT-4 Turbo Preview",
5360
+ api: "openai-completions",
5361
+ provider: "openrouter",
5362
+ baseUrl: "https://openrouter.ai/api/v1",
5363
+ reasoning: false,
5364
+ input: ["text"],
5365
+ cost: {
5366
+ input: 10,
5367
+ output: 30,
5368
+ cacheRead: 0,
5369
+ cacheWrite: 0,
5370
+ },
5371
+ contextWindow: 128000,
4148
5372
  maxTokens: 4096,
4149
5373
  },
4150
5374
  "mistralai/mistral-small": {
@@ -4164,6 +5388,23 @@ export const MODELS = {
4164
5388
  contextWindow: 32768,
4165
5389
  maxTokens: 4096,
4166
5390
  },
5391
+ "mistralai/mistral-tiny": {
5392
+ id: "mistralai/mistral-tiny",
5393
+ name: "Mistral Tiny",
5394
+ api: "openai-completions",
5395
+ provider: "openrouter",
5396
+ baseUrl: "https://openrouter.ai/api/v1",
5397
+ reasoning: false,
5398
+ input: ["text"],
5399
+ cost: {
5400
+ input: 0.25,
5401
+ output: 0.25,
5402
+ cacheRead: 0,
5403
+ cacheWrite: 0,
5404
+ },
5405
+ contextWindow: 32768,
5406
+ maxTokens: 4096,
5407
+ },
4167
5408
  "mistralai/mixtral-8x7b-instruct": {
4168
5409
  id: "mistralai/mixtral-8x7b-instruct",
4169
5410
  name: "Mistral: Mixtral 8x7B Instruct",
@@ -4181,6 +5422,23 @@ export const MODELS = {
4181
5422
  contextWindow: 32768,
4182
5423
  maxTokens: 16384,
4183
5424
  },
5425
+ "openai/gpt-4-1106-preview": {
5426
+ id: "openai/gpt-4-1106-preview",
5427
+ name: "OpenAI: GPT-4 Turbo (older v1106)",
5428
+ api: "openai-completions",
5429
+ provider: "openrouter",
5430
+ baseUrl: "https://openrouter.ai/api/v1",
5431
+ reasoning: false,
5432
+ input: ["text"],
5433
+ cost: {
5434
+ input: 10,
5435
+ output: 30,
5436
+ cacheRead: 0,
5437
+ cacheWrite: 0,
5438
+ },
5439
+ contextWindow: 128000,
5440
+ maxTokens: 4096,
5441
+ },
4184
5442
  "mistralai/mistral-7b-instruct-v0.1": {
4185
5443
  id: "mistralai/mistral-7b-instruct-v0.1",
4186
5444
  name: "Mistral: Mistral 7B Instruct v0.1",
@@ -4198,6 +5456,74 @@ export const MODELS = {
4198
5456
  contextWindow: 2824,
4199
5457
  maxTokens: 4096,
4200
5458
  },
5459
+ "openai/gpt-3.5-turbo-16k": {
5460
+ id: "openai/gpt-3.5-turbo-16k",
5461
+ name: "OpenAI: GPT-3.5 Turbo 16k",
5462
+ api: "openai-completions",
5463
+ provider: "openrouter",
5464
+ baseUrl: "https://openrouter.ai/api/v1",
5465
+ reasoning: false,
5466
+ input: ["text"],
5467
+ cost: {
5468
+ input: 3,
5469
+ output: 4,
5470
+ cacheRead: 0,
5471
+ cacheWrite: 0,
5472
+ },
5473
+ contextWindow: 16385,
5474
+ maxTokens: 4096,
5475
+ },
5476
+ "openai/gpt-4": {
5477
+ id: "openai/gpt-4",
5478
+ name: "OpenAI: GPT-4",
5479
+ api: "openai-completions",
5480
+ provider: "openrouter",
5481
+ baseUrl: "https://openrouter.ai/api/v1",
5482
+ reasoning: false,
5483
+ input: ["text"],
5484
+ cost: {
5485
+ input: 30,
5486
+ output: 60,
5487
+ cacheRead: 0,
5488
+ cacheWrite: 0,
5489
+ },
5490
+ contextWindow: 8191,
5491
+ maxTokens: 4096,
5492
+ },
5493
+ "openai/gpt-3.5-turbo": {
5494
+ id: "openai/gpt-3.5-turbo",
5495
+ name: "OpenAI: GPT-3.5 Turbo",
5496
+ api: "openai-completions",
5497
+ provider: "openrouter",
5498
+ baseUrl: "https://openrouter.ai/api/v1",
5499
+ reasoning: false,
5500
+ input: ["text"],
5501
+ cost: {
5502
+ input: 0.5,
5503
+ output: 1.5,
5504
+ cacheRead: 0,
5505
+ cacheWrite: 0,
5506
+ },
5507
+ contextWindow: 16385,
5508
+ maxTokens: 4096,
5509
+ },
5510
+ "openai/gpt-4-0314": {
5511
+ id: "openai/gpt-4-0314",
5512
+ name: "OpenAI: GPT-4 (older v0314)",
5513
+ api: "openai-completions",
5514
+ provider: "openrouter",
5515
+ baseUrl: "https://openrouter.ai/api/v1",
5516
+ reasoning: false,
5517
+ input: ["text"],
5518
+ cost: {
5519
+ input: 30,
5520
+ output: 60,
5521
+ cacheRead: 0,
5522
+ cacheWrite: 0,
5523
+ },
5524
+ contextWindow: 8191,
5525
+ maxTokens: 4096,
5526
+ },
4201
5527
  },
4202
5528
  };
4203
5529
  //# sourceMappingURL=models.generated.js.map