@node-llm/core 1.5.4 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (188) hide show
  1. package/README.md +76 -43
  2. package/dist/aliases.d.ts +4 -0
  3. package/dist/aliases.d.ts.map +1 -1
  4. package/dist/aliases.js +4 -0
  5. package/dist/chat/Chat.d.ts +28 -17
  6. package/dist/chat/Chat.d.ts.map +1 -1
  7. package/dist/chat/Chat.js +75 -42
  8. package/dist/chat/ChatOptions.d.ts +8 -9
  9. package/dist/chat/ChatOptions.d.ts.map +1 -1
  10. package/dist/chat/ChatResponse.d.ts +26 -1
  11. package/dist/chat/ChatResponse.d.ts.map +1 -1
  12. package/dist/chat/ChatResponse.js +54 -8
  13. package/dist/chat/ChatStream.d.ts.map +1 -1
  14. package/dist/chat/ChatStream.js +14 -21
  15. package/dist/chat/Content.d.ts +3 -3
  16. package/dist/chat/Content.d.ts.map +1 -1
  17. package/dist/chat/Content.js +3 -6
  18. package/dist/chat/Message.d.ts +3 -1
  19. package/dist/chat/Message.d.ts.map +1 -1
  20. package/dist/chat/Role.d.ts.map +1 -1
  21. package/dist/chat/Tool.d.ts +8 -8
  22. package/dist/chat/Tool.d.ts.map +1 -1
  23. package/dist/chat/Tool.js +9 -7
  24. package/dist/chat/ToolHandler.d.ts +4 -3
  25. package/dist/chat/ToolHandler.d.ts.map +1 -1
  26. package/dist/chat/ToolHandler.js +10 -15
  27. package/dist/chat/Validation.d.ts.map +1 -1
  28. package/dist/chat/Validation.js +9 -3
  29. package/dist/config.d.ts +4 -0
  30. package/dist/config.d.ts.map +1 -1
  31. package/dist/config.js +80 -25
  32. package/dist/constants.js +1 -1
  33. package/dist/errors/index.d.ts +21 -7
  34. package/dist/errors/index.d.ts.map +1 -1
  35. package/dist/errors/index.js +14 -0
  36. package/dist/index.d.ts +1 -1
  37. package/dist/index.d.ts.map +1 -1
  38. package/dist/index.js +1 -1
  39. package/dist/llm.d.ts +44 -46
  40. package/dist/llm.d.ts.map +1 -1
  41. package/dist/llm.js +201 -130
  42. package/dist/model_aliases.d.ts.map +1 -1
  43. package/dist/models/ModelRegistry.d.ts.map +1 -1
  44. package/dist/models/ModelRegistry.js +13 -10
  45. package/dist/models/PricingRegistry.d.ts +31 -0
  46. package/dist/models/PricingRegistry.d.ts.map +1 -0
  47. package/dist/models/PricingRegistry.js +109 -0
  48. package/dist/models/models.d.ts.map +1 -1
  49. package/dist/models/models.js +230 -138
  50. package/dist/models/types.d.ts +37 -34
  51. package/dist/models/types.d.ts.map +1 -1
  52. package/dist/moderation/Moderation.d.ts.map +1 -1
  53. package/dist/moderation/Moderation.js +15 -5
  54. package/dist/providers/BaseProvider.d.ts +12 -8
  55. package/dist/providers/BaseProvider.d.ts.map +1 -1
  56. package/dist/providers/BaseProvider.js +17 -7
  57. package/dist/providers/Provider.d.ts +20 -5
  58. package/dist/providers/Provider.d.ts.map +1 -1
  59. package/dist/providers/anthropic/AnthropicProvider.d.ts +1 -1
  60. package/dist/providers/anthropic/AnthropicProvider.d.ts.map +1 -1
  61. package/dist/providers/anthropic/AnthropicProvider.js +3 -3
  62. package/dist/providers/anthropic/Capabilities.d.ts +2 -1
  63. package/dist/providers/anthropic/Capabilities.d.ts.map +1 -1
  64. package/dist/providers/anthropic/Capabilities.js +3 -20
  65. package/dist/providers/anthropic/Chat.d.ts.map +1 -1
  66. package/dist/providers/anthropic/Chat.js +27 -17
  67. package/dist/providers/anthropic/Errors.d.ts.map +1 -1
  68. package/dist/providers/anthropic/Errors.js +5 -2
  69. package/dist/providers/anthropic/Models.d.ts.map +1 -1
  70. package/dist/providers/anthropic/Models.js +6 -6
  71. package/dist/providers/anthropic/Streaming.d.ts.map +1 -1
  72. package/dist/providers/anthropic/Streaming.js +17 -12
  73. package/dist/providers/anthropic/Utils.js +8 -5
  74. package/dist/providers/anthropic/index.d.ts.map +1 -1
  75. package/dist/providers/anthropic/index.js +4 -3
  76. package/dist/providers/anthropic/types.d.ts +11 -4
  77. package/dist/providers/anthropic/types.d.ts.map +1 -1
  78. package/dist/providers/deepseek/Capabilities.d.ts +7 -5
  79. package/dist/providers/deepseek/Capabilities.d.ts.map +1 -1
  80. package/dist/providers/deepseek/Capabilities.js +9 -5
  81. package/dist/providers/deepseek/Chat.d.ts.map +1 -1
  82. package/dist/providers/deepseek/Chat.js +10 -9
  83. package/dist/providers/deepseek/DeepSeekProvider.d.ts +1 -1
  84. package/dist/providers/deepseek/DeepSeekProvider.d.ts.map +1 -1
  85. package/dist/providers/deepseek/DeepSeekProvider.js +4 -4
  86. package/dist/providers/deepseek/Models.d.ts.map +1 -1
  87. package/dist/providers/deepseek/Models.js +7 -7
  88. package/dist/providers/deepseek/Streaming.d.ts.map +1 -1
  89. package/dist/providers/deepseek/Streaming.js +11 -8
  90. package/dist/providers/deepseek/index.d.ts.map +1 -1
  91. package/dist/providers/deepseek/index.js +5 -4
  92. package/dist/providers/gemini/Capabilities.d.ts +5 -33
  93. package/dist/providers/gemini/Capabilities.d.ts.map +1 -1
  94. package/dist/providers/gemini/Capabilities.js +7 -30
  95. package/dist/providers/gemini/Chat.d.ts.map +1 -1
  96. package/dist/providers/gemini/Chat.js +24 -19
  97. package/dist/providers/gemini/ChatUtils.d.ts.map +1 -1
  98. package/dist/providers/gemini/ChatUtils.js +10 -10
  99. package/dist/providers/gemini/Embeddings.d.ts.map +1 -1
  100. package/dist/providers/gemini/Embeddings.js +2 -2
  101. package/dist/providers/gemini/Errors.d.ts.map +1 -1
  102. package/dist/providers/gemini/Errors.js +5 -2
  103. package/dist/providers/gemini/GeminiProvider.d.ts +1 -1
  104. package/dist/providers/gemini/GeminiProvider.d.ts.map +1 -1
  105. package/dist/providers/gemini/GeminiProvider.js +3 -3
  106. package/dist/providers/gemini/Image.d.ts.map +1 -1
  107. package/dist/providers/gemini/Image.js +7 -7
  108. package/dist/providers/gemini/Models.d.ts.map +1 -1
  109. package/dist/providers/gemini/Models.js +6 -6
  110. package/dist/providers/gemini/Streaming.d.ts.map +1 -1
  111. package/dist/providers/gemini/Streaming.js +18 -14
  112. package/dist/providers/gemini/Transcription.d.ts.map +1 -1
  113. package/dist/providers/gemini/Transcription.js +11 -11
  114. package/dist/providers/gemini/index.d.ts +1 -1
  115. package/dist/providers/gemini/index.d.ts.map +1 -1
  116. package/dist/providers/gemini/index.js +5 -4
  117. package/dist/providers/gemini/types.d.ts +4 -4
  118. package/dist/providers/gemini/types.d.ts.map +1 -1
  119. package/dist/providers/ollama/Capabilities.d.ts.map +1 -1
  120. package/dist/providers/ollama/Capabilities.js +6 -2
  121. package/dist/providers/ollama/Models.d.ts.map +1 -1
  122. package/dist/providers/ollama/Models.js +1 -1
  123. package/dist/providers/ollama/OllamaProvider.d.ts +1 -1
  124. package/dist/providers/ollama/OllamaProvider.d.ts.map +1 -1
  125. package/dist/providers/ollama/OllamaProvider.js +2 -2
  126. package/dist/providers/ollama/index.d.ts +1 -1
  127. package/dist/providers/ollama/index.d.ts.map +1 -1
  128. package/dist/providers/ollama/index.js +7 -3
  129. package/dist/providers/openai/Capabilities.d.ts +2 -1
  130. package/dist/providers/openai/Capabilities.d.ts.map +1 -1
  131. package/dist/providers/openai/Capabilities.js +9 -21
  132. package/dist/providers/openai/Chat.d.ts.map +1 -1
  133. package/dist/providers/openai/Chat.js +18 -15
  134. package/dist/providers/openai/Embedding.d.ts.map +1 -1
  135. package/dist/providers/openai/Embedding.js +11 -7
  136. package/dist/providers/openai/Errors.d.ts.map +1 -1
  137. package/dist/providers/openai/Errors.js +5 -2
  138. package/dist/providers/openai/Image.d.ts.map +1 -1
  139. package/dist/providers/openai/Image.js +6 -6
  140. package/dist/providers/openai/Models.d.ts +1 -1
  141. package/dist/providers/openai/Models.d.ts.map +1 -1
  142. package/dist/providers/openai/Models.js +12 -8
  143. package/dist/providers/openai/Moderation.d.ts.map +1 -1
  144. package/dist/providers/openai/Moderation.js +6 -6
  145. package/dist/providers/openai/OpenAIProvider.d.ts +2 -3
  146. package/dist/providers/openai/OpenAIProvider.d.ts.map +1 -1
  147. package/dist/providers/openai/OpenAIProvider.js +4 -4
  148. package/dist/providers/openai/Streaming.d.ts.map +1 -1
  149. package/dist/providers/openai/Streaming.js +18 -13
  150. package/dist/providers/openai/Transcription.d.ts.map +1 -1
  151. package/dist/providers/openai/Transcription.js +15 -12
  152. package/dist/providers/openai/index.d.ts +1 -1
  153. package/dist/providers/openai/index.d.ts.map +1 -1
  154. package/dist/providers/openai/index.js +6 -5
  155. package/dist/providers/openai/types.d.ts +1 -1
  156. package/dist/providers/openai/utils.js +2 -2
  157. package/dist/providers/openrouter/Capabilities.d.ts +3 -3
  158. package/dist/providers/openrouter/Capabilities.d.ts.map +1 -1
  159. package/dist/providers/openrouter/Capabilities.js +21 -24
  160. package/dist/providers/openrouter/Models.d.ts.map +1 -1
  161. package/dist/providers/openrouter/Models.js +20 -16
  162. package/dist/providers/openrouter/OpenRouterProvider.d.ts.map +1 -1
  163. package/dist/providers/openrouter/OpenRouterProvider.js +1 -1
  164. package/dist/providers/openrouter/index.d.ts +1 -1
  165. package/dist/providers/openrouter/index.d.ts.map +1 -1
  166. package/dist/providers/openrouter/index.js +6 -5
  167. package/dist/providers/registry.d.ts +18 -2
  168. package/dist/providers/registry.d.ts.map +1 -1
  169. package/dist/providers/registry.js +17 -2
  170. package/dist/providers/utils.js +1 -1
  171. package/dist/schema/Schema.d.ts +3 -3
  172. package/dist/schema/Schema.d.ts.map +1 -1
  173. package/dist/schema/Schema.js +2 -2
  174. package/dist/schema/to-json-schema.d.ts +1 -1
  175. package/dist/schema/to-json-schema.d.ts.map +1 -1
  176. package/dist/streaming/Stream.d.ts.map +1 -1
  177. package/dist/streaming/Stream.js +3 -3
  178. package/dist/utils/Binary.d.ts.map +1 -1
  179. package/dist/utils/Binary.js +23 -13
  180. package/dist/utils/FileLoader.d.ts.map +1 -1
  181. package/dist/utils/FileLoader.js +25 -4
  182. package/dist/utils/audio.js +1 -1
  183. package/dist/utils/fetch.d.ts.map +1 -1
  184. package/dist/utils/fetch.js +3 -2
  185. package/dist/utils/logger.d.ts +3 -3
  186. package/dist/utils/logger.d.ts.map +1 -1
  187. package/dist/utils/logger.js +2 -2
  188. package/package.json +1 -1
@@ -1399,7 +1399,7 @@ export const modelsData = [
1399
1399
  "id": "codex-mini-latest",
1400
1400
  "name": "Codex Mini",
1401
1401
  "provider": "openai",
1402
- "family": "codex",
1402
+ "family": "gpt-codex-mini",
1403
1403
  "created_at": "2025-05-16 00:00:00 UTC",
1404
1404
  "context_window": 200000,
1405
1405
  "max_output_tokens": 100000,
@@ -1527,7 +1527,7 @@ export const modelsData = [
1527
1527
  "id": "deepseek-chat",
1528
1528
  "name": "DeepSeek Chat",
1529
1529
  "provider": "deepseek",
1530
- "family": "deepseek-chat",
1530
+ "family": "deepseek",
1531
1531
  "created_at": "2024-12-26 00:00:00 UTC",
1532
1532
  "context_window": 128000,
1533
1533
  "max_output_tokens": 8192,
@@ -1571,7 +1571,7 @@ export const modelsData = [
1571
1571
  "id": "deepseek-reasoner",
1572
1572
  "name": "DeepSeek Reasoner",
1573
1573
  "provider": "deepseek",
1574
- "family": "deepseek",
1574
+ "family": "deepseek-thinking",
1575
1575
  "created_at": "2025-01-20 00:00:00 UTC",
1576
1576
  "context_window": 128000,
1577
1577
  "max_output_tokens": 128000,
@@ -1616,7 +1616,7 @@ export const modelsData = [
1616
1616
  "id": "deepseek/deepseek-chat-v3-0324",
1617
1617
  "name": "DeepSeek V3 0324",
1618
1618
  "provider": "openrouter",
1619
- "family": "deepseek-v3",
1619
+ "family": "deepseek",
1620
1620
  "created_at": "2025-03-24 00:00:00 UTC",
1621
1621
  "context_window": 16384,
1622
1622
  "max_output_tokens": 8192,
@@ -1653,7 +1653,7 @@ export const modelsData = [
1653
1653
  "id": "deepseek/deepseek-chat-v3.1",
1654
1654
  "name": "DeepSeek-V3.1",
1655
1655
  "provider": "openrouter",
1656
- "family": "deepseek-v3",
1656
+ "family": "deepseek",
1657
1657
  "created_at": "2025-08-21 00:00:00 UTC",
1658
1658
  "context_window": 163840,
1659
1659
  "max_output_tokens": 163840,
@@ -1695,7 +1695,7 @@ export const modelsData = [
1695
1695
  "id": "deepseek/deepseek-r1-0528-qwen3-8b:free",
1696
1696
  "name": "Deepseek R1 0528 Qwen3 8B (free)",
1697
1697
  "provider": "openrouter",
1698
- "family": "qwen3",
1698
+ "family": "qwen",
1699
1699
  "created_at": "2025-05-29 00:00:00 UTC",
1700
1700
  "context_window": 131072,
1701
1701
  "max_output_tokens": 131072,
@@ -1737,7 +1737,7 @@ export const modelsData = [
1737
1737
  "id": "deepseek/deepseek-r1-0528:free",
1738
1738
  "name": "R1 0528 (free)",
1739
1739
  "provider": "openrouter",
1740
- "family": "deepseek-r1",
1740
+ "family": "deepseek",
1741
1741
  "created_at": "2025-05-28 00:00:00 UTC",
1742
1742
  "context_window": 163840,
1743
1743
  "max_output_tokens": 163840,
@@ -1779,7 +1779,7 @@ export const modelsData = [
1779
1779
  "id": "deepseek/deepseek-r1-distill-llama-70b",
1780
1780
  "name": "DeepSeek R1 Distill Llama 70B",
1781
1781
  "provider": "openrouter",
1782
- "family": "deepseek-r1-distill-llama",
1782
+ "family": "deepseek-thinking",
1783
1783
  "created_at": "2025-01-23 00:00:00 UTC",
1784
1784
  "context_window": 8192,
1785
1785
  "max_output_tokens": 8192,
@@ -1855,7 +1855,7 @@ export const modelsData = [
1855
1855
  "id": "deepseek/deepseek-r1:free",
1856
1856
  "name": "R1 (free)",
1857
1857
  "provider": "openrouter",
1858
- "family": "deepseek-r1",
1858
+ "family": "deepseek",
1859
1859
  "created_at": "2025-01-20 00:00:00 UTC",
1860
1860
  "context_window": 163840,
1861
1861
  "max_output_tokens": 163840,
@@ -1897,7 +1897,7 @@ export const modelsData = [
1897
1897
  "id": "deepseek/deepseek-v3-base:free",
1898
1898
  "name": "DeepSeek V3 Base (free)",
1899
1899
  "provider": "openrouter",
1900
- "family": "deepseek-v3",
1900
+ "family": "deepseek",
1901
1901
  "created_at": "2025-03-29 00:00:00 UTC",
1902
1902
  "context_window": 163840,
1903
1903
  "max_output_tokens": 163840,
@@ -1934,7 +1934,7 @@ export const modelsData = [
1934
1934
  "id": "deepseek/deepseek-v3.1-terminus",
1935
1935
  "name": "DeepSeek V3.1 Terminus",
1936
1936
  "provider": "openrouter",
1937
- "family": "deepseek-v3",
1937
+ "family": "deepseek",
1938
1938
  "created_at": "2025-09-22 00:00:00 UTC",
1939
1939
  "context_window": 131072,
1940
1940
  "max_output_tokens": 65536,
@@ -1976,7 +1976,7 @@ export const modelsData = [
1976
1976
  "id": "deepseek/deepseek-v3.1-terminus:exacto",
1977
1977
  "name": "DeepSeek V3.1 Terminus (exacto)",
1978
1978
  "provider": "openrouter",
1979
- "family": "deepseek-v3",
1979
+ "family": "deepseek",
1980
1980
  "created_at": "2025-09-22 00:00:00 UTC",
1981
1981
  "context_window": 131072,
1982
1982
  "max_output_tokens": 65536,
@@ -2018,7 +2018,7 @@ export const modelsData = [
2018
2018
  "id": "deepseek/deepseek-v3.2",
2019
2019
  "name": "DeepSeek V3.2",
2020
2020
  "provider": "openrouter",
2021
- "family": "deepseek-v3",
2021
+ "family": "deepseek",
2022
2022
  "created_at": "2025-12-01 00:00:00 UTC",
2023
2023
  "context_window": 163840,
2024
2024
  "max_output_tokens": 65536,
@@ -2060,7 +2060,7 @@ export const modelsData = [
2060
2060
  "id": "deepseek/deepseek-v3.2-speciale",
2061
2061
  "name": "DeepSeek V3.2 Speciale",
2062
2062
  "provider": "openrouter",
2063
- "family": "deepseek-v3",
2063
+ "family": "deepseek",
2064
2064
  "created_at": "2025-12-01 00:00:00 UTC",
2065
2065
  "context_window": 163840,
2066
2066
  "max_output_tokens": 65536,
@@ -2577,7 +2577,7 @@ export const modelsData = [
2577
2577
  "id": "gemini-2.5-flash-image",
2578
2578
  "name": "Gemini 2.5 Flash Image",
2579
2579
  "provider": "gemini",
2580
- "family": "gemini-flash-image",
2580
+ "family": "gemini-flash",
2581
2581
  "created_at": "2025-08-26 00:00:00 UTC",
2582
2582
  "context_window": 32768,
2583
2583
  "max_output_tokens": 32768,
@@ -2621,7 +2621,7 @@ export const modelsData = [
2621
2621
  "id": "gemini-2.5-flash-image-preview",
2622
2622
  "name": "Gemini 2.5 Flash Image (Preview)",
2623
2623
  "provider": "gemini",
2624
- "family": "gemini-flash-image",
2624
+ "family": "gemini-flash",
2625
2625
  "created_at": "2025-08-26 00:00:00 UTC",
2626
2626
  "context_window": 32768,
2627
2627
  "max_output_tokens": 32768,
@@ -3268,7 +3268,7 @@ export const modelsData = [
3268
3268
  "id": "gemini-2.5-flash-preview-tts",
3269
3269
  "name": "Gemini 2.5 Flash Preview TTS",
3270
3270
  "provider": "gemini",
3271
- "family": "gemini-flash-tts",
3271
+ "family": "gemini-flash",
3272
3272
  "created_at": "2025-05-01 00:00:00 UTC",
3273
3273
  "context_window": 8000,
3274
3274
  "max_output_tokens": 16000,
@@ -3606,7 +3606,7 @@ export const modelsData = [
3606
3606
  "id": "gemini-2.5-pro-preview-tts",
3607
3607
  "name": "Gemini 2.5 Pro Preview TTS",
3608
3608
  "provider": "gemini",
3609
- "family": "gemini-flash-tts",
3609
+ "family": "gemini-flash",
3610
3610
  "created_at": "2025-05-01 00:00:00 UTC",
3611
3611
  "context_window": 8000,
3612
3612
  "max_output_tokens": 16000,
@@ -4780,7 +4780,7 @@ export const modelsData = [
4780
4780
  "id": "google/gemma-2-9b-it:free",
4781
4781
  "name": "Gemma 2 9B (free)",
4782
4782
  "provider": "openrouter",
4783
- "family": "gemma-2",
4783
+ "family": "gemma",
4784
4784
  "created_at": "2024-06-28 00:00:00 UTC",
4785
4785
  "context_window": 8192,
4786
4786
  "max_output_tokens": 8192,
@@ -4821,7 +4821,7 @@ export const modelsData = [
4821
4821
  "id": "google/gemma-3-12b-it",
4822
4822
  "name": "Gemma 3 12B IT",
4823
4823
  "provider": "openrouter",
4824
- "family": "gemma-3",
4824
+ "family": "gemma",
4825
4825
  "created_at": "2025-03-13 00:00:00 UTC",
4826
4826
  "context_window": 96000,
4827
4827
  "max_output_tokens": 8192,
@@ -4864,7 +4864,7 @@ export const modelsData = [
4864
4864
  "id": "google/gemma-3-27b-it",
4865
4865
  "name": "Gemma 3 27B IT",
4866
4866
  "provider": "openrouter",
4867
- "family": "gemma-3",
4867
+ "family": "gemma",
4868
4868
  "created_at": "2025-03-12 00:00:00 UTC",
4869
4869
  "context_window": 96000,
4870
4870
  "max_output_tokens": 8192,
@@ -4907,7 +4907,7 @@ export const modelsData = [
4907
4907
  "id": "google/gemma-3n-e4b-it",
4908
4908
  "name": "Gemma 3n E4B IT",
4909
4909
  "provider": "openrouter",
4910
- "family": "gemma-3",
4910
+ "family": "gemma",
4911
4911
  "created_at": "2025-05-20 00:00:00 UTC",
4912
4912
  "context_window": 8192,
4913
4913
  "max_output_tokens": 8192,
@@ -4948,7 +4948,7 @@ export const modelsData = [
4948
4948
  "id": "google/gemma-3n-e4b-it:free",
4949
4949
  "name": "Gemma 3n 4B (free)",
4950
4950
  "provider": "openrouter",
4951
- "family": "gemma-3",
4951
+ "family": "gemma",
4952
4952
  "created_at": "2025-05-20 00:00:00 UTC",
4953
4953
  "context_window": 8192,
4954
4954
  "max_output_tokens": 8192,
@@ -4993,7 +4993,7 @@ export const modelsData = [
4993
4993
  "id": "gpt-3.5-turbo",
4994
4994
  "name": "GPT-3.5-turbo",
4995
4995
  "provider": "openai",
4996
- "family": "gpt-3.5-turbo",
4996
+ "family": "gpt",
4997
4997
  "created_at": "2023-03-01 00:00:00 UTC",
4998
4998
  "context_window": 16385,
4999
4999
  "max_output_tokens": 4096,
@@ -5032,7 +5032,7 @@ export const modelsData = [
5032
5032
  "id": "gpt-4",
5033
5033
  "name": "GPT-4",
5034
5034
  "provider": "openai",
5035
- "family": "gpt-4",
5035
+ "family": "gpt",
5036
5036
  "created_at": "2023-11-06 00:00:00 UTC",
5037
5037
  "context_window": 8192,
5038
5038
  "max_output_tokens": 8192,
@@ -5074,7 +5074,7 @@ export const modelsData = [
5074
5074
  "id": "gpt-4-turbo",
5075
5075
  "name": "GPT-4 Turbo",
5076
5076
  "provider": "openai",
5077
- "family": "gpt-4-turbo",
5077
+ "family": "gpt",
5078
5078
  "created_at": "2023-11-06 00:00:00 UTC",
5079
5079
  "context_window": 128000,
5080
5080
  "max_output_tokens": 4096,
@@ -5117,7 +5117,7 @@ export const modelsData = [
5117
5117
  "id": "gpt-4.1",
5118
5118
  "name": "GPT-4.1",
5119
5119
  "provider": "openai",
5120
- "family": "gpt-4.1",
5120
+ "family": "gpt",
5121
5121
  "created_at": "2025-04-14 00:00:00 UTC",
5122
5122
  "context_window": 1047576,
5123
5123
  "max_output_tokens": 32768,
@@ -5162,7 +5162,7 @@ export const modelsData = [
5162
5162
  "id": "gpt-4.1-mini",
5163
5163
  "name": "GPT-4.1 mini",
5164
5164
  "provider": "openai",
5165
- "family": "gpt-4.1-mini",
5165
+ "family": "gpt-mini",
5166
5166
  "created_at": "2025-04-14 00:00:00 UTC",
5167
5167
  "context_window": 1047576,
5168
5168
  "max_output_tokens": 32768,
@@ -5207,7 +5207,7 @@ export const modelsData = [
5207
5207
  "id": "gpt-4.1-nano",
5208
5208
  "name": "GPT-4.1 nano",
5209
5209
  "provider": "openai",
5210
- "family": "gpt-4.1-nano",
5210
+ "family": "gpt-nano",
5211
5211
  "created_at": "2025-04-14 00:00:00 UTC",
5212
5212
  "context_window": 1047576,
5213
5213
  "max_output_tokens": 32768,
@@ -5252,7 +5252,7 @@ export const modelsData = [
5252
5252
  "id": "gpt-4o",
5253
5253
  "name": "GPT-4o",
5254
5254
  "provider": "openai",
5255
- "family": "gpt-4o",
5255
+ "family": "gpt",
5256
5256
  "created_at": "2024-05-13 00:00:00 UTC",
5257
5257
  "context_window": 128000,
5258
5258
  "max_output_tokens": 16384,
@@ -5297,7 +5297,7 @@ export const modelsData = [
5297
5297
  "id": "gpt-4o-2024-05-13",
5298
5298
  "name": "GPT-4o (2024-05-13)",
5299
5299
  "provider": "openai",
5300
- "family": "gpt-4o",
5300
+ "family": "gpt",
5301
5301
  "created_at": "2024-05-13 00:00:00 UTC",
5302
5302
  "context_window": 128000,
5303
5303
  "max_output_tokens": 4096,
@@ -5340,7 +5340,7 @@ export const modelsData = [
5340
5340
  "id": "gpt-4o-2024-08-06",
5341
5341
  "name": "GPT-4o (2024-08-06)",
5342
5342
  "provider": "openai",
5343
- "family": "gpt-4o",
5343
+ "family": "gpt",
5344
5344
  "created_at": "2024-08-06 00:00:00 UTC",
5345
5345
  "context_window": 128000,
5346
5346
  "max_output_tokens": 16384,
@@ -5385,7 +5385,7 @@ export const modelsData = [
5385
5385
  "id": "gpt-4o-2024-11-20",
5386
5386
  "name": "GPT-4o (2024-11-20)",
5387
5387
  "provider": "openai",
5388
- "family": "gpt-4o",
5388
+ "family": "gpt",
5389
5389
  "created_at": "2024-11-20 00:00:00 UTC",
5390
5390
  "context_window": 128000,
5391
5391
  "max_output_tokens": 16384,
@@ -5430,7 +5430,7 @@ export const modelsData = [
5430
5430
  "id": "gpt-4o-mini",
5431
5431
  "name": "GPT-4o mini",
5432
5432
  "provider": "openai",
5433
- "family": "gpt-4o-mini",
5433
+ "family": "gpt-mini",
5434
5434
  "created_at": "2024-07-18 00:00:00 UTC",
5435
5435
  "context_window": 128000,
5436
5436
  "max_output_tokens": 16384,
@@ -5475,7 +5475,7 @@ export const modelsData = [
5475
5475
  "id": "gpt-5",
5476
5476
  "name": "GPT-5",
5477
5477
  "provider": "openai",
5478
- "family": "gpt-5",
5478
+ "family": "gpt",
5479
5479
  "created_at": "2025-08-07 00:00:00 UTC",
5480
5480
  "context_window": 400000,
5481
5481
  "max_output_tokens": 128000,
@@ -5521,7 +5521,7 @@ export const modelsData = [
5521
5521
  "id": "gpt-5-chat-latest",
5522
5522
  "name": "GPT-5 Chat (latest)",
5523
5523
  "provider": "openai",
5524
- "family": "gpt-5-chat",
5524
+ "family": "gpt-codex",
5525
5525
  "created_at": "2025-08-07 00:00:00 UTC",
5526
5526
  "context_window": 400000,
5527
5527
  "max_output_tokens": 128000,
@@ -5561,7 +5561,7 @@ export const modelsData = [
5561
5561
  "id": "gpt-5-codex",
5562
5562
  "name": "GPT-5-Codex",
5563
5563
  "provider": "openai",
5564
- "family": "gpt-5-codex",
5564
+ "family": "gpt-codex",
5565
5565
  "created_at": "2025-09-15 00:00:00 UTC",
5566
5566
  "context_window": 400000,
5567
5567
  "max_output_tokens": 128000,
@@ -5607,7 +5607,7 @@ export const modelsData = [
5607
5607
  "id": "gpt-5-mini",
5608
5608
  "name": "GPT-5 Mini",
5609
5609
  "provider": "openai",
5610
- "family": "gpt-5-mini",
5610
+ "family": "gpt-mini",
5611
5611
  "created_at": "2025-08-07 00:00:00 UTC",
5612
5612
  "context_window": 400000,
5613
5613
  "max_output_tokens": 128000,
@@ -5653,7 +5653,7 @@ export const modelsData = [
5653
5653
  "id": "gpt-5-nano",
5654
5654
  "name": "GPT-5 Nano",
5655
5655
  "provider": "openai",
5656
- "family": "gpt-5-nano",
5656
+ "family": "gpt-nano",
5657
5657
  "created_at": "2025-08-07 00:00:00 UTC",
5658
5658
  "context_window": 400000,
5659
5659
  "max_output_tokens": 128000,
@@ -5699,7 +5699,7 @@ export const modelsData = [
5699
5699
  "id": "gpt-5-pro",
5700
5700
  "name": "GPT-5 Pro",
5701
5701
  "provider": "openai",
5702
- "family": "gpt-5-pro",
5702
+ "family": "gpt-pro",
5703
5703
  "created_at": "2025-10-06 00:00:00 UTC",
5704
5704
  "context_window": 400000,
5705
5705
  "max_output_tokens": 272000,
@@ -5743,7 +5743,7 @@ export const modelsData = [
5743
5743
  "id": "gpt-5.1",
5744
5744
  "name": "GPT-5.1",
5745
5745
  "provider": "openai",
5746
- "family": "gpt-5",
5746
+ "family": "gpt",
5747
5747
  "created_at": "2025-11-13 00:00:00 UTC",
5748
5748
  "context_window": 400000,
5749
5749
  "max_output_tokens": 128000,
@@ -5789,7 +5789,7 @@ export const modelsData = [
5789
5789
  "id": "gpt-5.1-chat-latest",
5790
5790
  "name": "GPT-5.1 Chat",
5791
5791
  "provider": "openai",
5792
- "family": "gpt-5-chat",
5792
+ "family": "gpt-codex",
5793
5793
  "created_at": "2025-11-13 00:00:00 UTC",
5794
5794
  "context_window": 128000,
5795
5795
  "max_output_tokens": 16384,
@@ -5835,7 +5835,7 @@ export const modelsData = [
5835
5835
  "id": "gpt-5.1-codex",
5836
5836
  "name": "GPT-5.1 Codex",
5837
5837
  "provider": "openai",
5838
- "family": "gpt-5-codex",
5838
+ "family": "gpt-codex",
5839
5839
  "created_at": "2025-11-13 00:00:00 UTC",
5840
5840
  "context_window": 400000,
5841
5841
  "max_output_tokens": 128000,
@@ -5871,7 +5871,7 @@ export const modelsData = [
5871
5871
  },
5872
5872
  "metadata": {
5873
5873
  "source": "models.dev",
5874
- "input": 1.25,
5874
+ "input": 272000,
5875
5875
  "output": 128000,
5876
5876
  "cache_read": 0.125,
5877
5877
  "context": 400000
@@ -5881,7 +5881,7 @@ export const modelsData = [
5881
5881
  "id": "gpt-5.1-codex-max",
5882
5882
  "name": "GPT-5.1 Codex Max",
5883
5883
  "provider": "openai",
5884
- "family": "gpt-5-codex",
5884
+ "family": "gpt-codex",
5885
5885
  "created_at": "2025-11-13 00:00:00 UTC",
5886
5886
  "context_window": 400000,
5887
5887
  "max_output_tokens": 128000,
@@ -5917,7 +5917,7 @@ export const modelsData = [
5917
5917
  },
5918
5918
  "metadata": {
5919
5919
  "source": "models.dev",
5920
- "input": 1.25,
5920
+ "input": 272000,
5921
5921
  "output": 128000,
5922
5922
  "cache_read": 0.125,
5923
5923
  "context": 400000
@@ -5927,7 +5927,7 @@ export const modelsData = [
5927
5927
  "id": "gpt-5.1-codex-mini",
5928
5928
  "name": "GPT-5.1 Codex mini",
5929
5929
  "provider": "openai",
5930
- "family": "gpt-5-codex-mini",
5930
+ "family": "gpt-codex",
5931
5931
  "created_at": "2025-11-13 00:00:00 UTC",
5932
5932
  "context_window": 400000,
5933
5933
  "max_output_tokens": 128000,
@@ -5963,7 +5963,7 @@ export const modelsData = [
5963
5963
  },
5964
5964
  "metadata": {
5965
5965
  "source": "models.dev",
5966
- "input": 0.25,
5966
+ "input": 272000,
5967
5967
  "output": 128000,
5968
5968
  "cache_read": 0.025,
5969
5969
  "context": 400000
@@ -5973,7 +5973,7 @@ export const modelsData = [
5973
5973
  "id": "gpt-5.2",
5974
5974
  "name": "GPT-5.2",
5975
5975
  "provider": "openai",
5976
- "family": "gpt-5",
5976
+ "family": "gpt",
5977
5977
  "created_at": "2025-12-11 00:00:00 UTC",
5978
5978
  "context_window": 400000,
5979
5979
  "max_output_tokens": 128000,
@@ -6019,7 +6019,7 @@ export const modelsData = [
6019
6019
  "id": "gpt-5.2-chat-latest",
6020
6020
  "name": "GPT-5.2 Chat",
6021
6021
  "provider": "openai",
6022
- "family": "gpt-5-chat",
6022
+ "family": "gpt-codex",
6023
6023
  "created_at": "2025-12-11 00:00:00 UTC",
6024
6024
  "context_window": 128000,
6025
6025
  "max_output_tokens": 16384,
@@ -6061,11 +6061,57 @@ export const modelsData = [
6061
6061
  "context": 128000
6062
6062
  }
6063
6063
  },
6064
+ {
6065
+ "id": "gpt-5.2-codex",
6066
+ "name": "GPT-5.2 Codex",
6067
+ "provider": "openai",
6068
+ "family": "gpt-codex",
6069
+ "created_at": "2025-12-11 00:00:00 UTC",
6070
+ "context_window": 400000,
6071
+ "max_output_tokens": 128000,
6072
+ "knowledge_cutoff": "2025-08-31",
6073
+ "modalities": {
6074
+ "input": [
6075
+ "text",
6076
+ "image"
6077
+ ],
6078
+ "output": [
6079
+ "text"
6080
+ ]
6081
+ },
6082
+ "capabilities": [
6083
+ "streaming",
6084
+ "reasoning",
6085
+ "chat",
6086
+ "vision",
6087
+ "function_calling",
6088
+ "tools",
6089
+ "structured_output",
6090
+ "json_mode"
6091
+ ],
6092
+ "pricing": {
6093
+ "text_tokens": {
6094
+ "standard": {
6095
+ "input_per_million": 1.75,
6096
+ "output_per_million": 14,
6097
+ "cached_input_per_million": 0.175,
6098
+ "reasoning_output_per_million": 14
6099
+ }
6100
+ }
6101
+ },
6102
+ "metadata": {
6103
+ "source": "models.dev",
6104
+ "input": 272000,
6105
+ "output": 128000,
6106
+ "cache_read": 0.175,
6107
+ "context": 400000
6108
+ }
6109
+ },
6064
6110
  {
6065
6111
  "id": "gpt-5.2-pro",
6066
6112
  "name": "GPT-5.2 Pro",
6067
6113
  "provider": "openai",
6068
- "family": "gpt-5-pro",
6114
+ "family": "gpt-pro",
6069
6115
  "created_at": "2025-12-11 00:00:00 UTC",
6070
6116
  "context_window": 400000,
6071
6117
  "max_output_tokens": 128000,
@@ -6109,7 +6155,7 @@ export const modelsData = [
6109
6155
  "id": "kwaipilot/kat-coder-pro:free",
6110
6156
  "name": "Kat Coder Pro (free)",
6111
6157
  "provider": "openrouter",
6112
- "family": "kat-coder-pro",
6158
+ "family": "kat-coder",
6113
6159
  "created_at": "2025-11-10 00:00:00 UTC",
6114
6160
  "context_window": 256000,
6115
6161
  "max_output_tokens": 65536,
@@ -6150,7 +6196,7 @@ export const modelsData = [
6150
6196
  "id": "meta-llama/llama-3.2-11b-vision-instruct",
6151
6197
  "name": "Llama 3.2 11B Vision Instruct",
6152
6198
  "provider": "openrouter",
6153
- "family": "llama-3.2",
6199
+ "family": "llama",
6154
6200
  "created_at": "2024-09-25 00:00:00 UTC",
6155
6201
  "context_window": 131072,
6156
6202
  "max_output_tokens": 8192,
@@ -6189,7 +6235,7 @@ export const modelsData = [
6189
6235
  "id": "meta-llama/llama-3.3-70b-instruct:free",
6190
6236
  "name": "Llama 3.3 70B Instruct (free)",
6191
6237
  "provider": "openrouter",
6192
- "family": "llama-3.3",
6238
+ "family": "llama",
6193
6239
  "created_at": "2024-12-06 00:00:00 UTC",
6194
6240
  "context_window": 65536,
6195
6241
  "max_output_tokens": 65536,
@@ -6230,7 +6276,7 @@ export const modelsData = [
6230
6276
  "id": "meta-llama/llama-4-scout:free",
6231
6277
  "name": "Llama 4 Scout (free)",
6232
6278
  "provider": "openrouter",
6233
- "family": "llama-4-scout",
6279
+ "family": "llama",
6234
6280
  "created_at": "2025-04-05 00:00:00 UTC",
6235
6281
  "context_window": 64000,
6236
6282
  "max_output_tokens": 64000,
@@ -6273,7 +6319,7 @@ export const modelsData = [
6273
6319
  "id": "microsoft/mai-ds-r1:free",
6274
6320
  "name": "MAI DS R1 (free)",
6275
6321
  "provider": "openrouter",
6276
- "family": "mai-ds-r1",
6322
+ "family": "mai",
6277
6323
  "created_at": "2025-04-21 00:00:00 UTC",
6278
6324
  "context_window": 163840,
6279
6325
  "max_output_tokens": 163840,
@@ -6607,7 +6653,7 @@ export const modelsData = [
6607
6653
  "id": "mistralai/devstral-medium-2507",
6608
6654
  "name": "Devstral Medium",
6609
6655
  "provider": "openrouter",
6610
- "family": "devstral-medium",
6656
+ "family": "devstral",
6611
6657
  "created_at": "2025-07-10 00:00:00 UTC",
6612
6658
  "context_window": 131072,
6613
6659
  "max_output_tokens": 131072,
@@ -6648,7 +6694,7 @@ export const modelsData = [
6648
6694
  "id": "mistralai/devstral-small-2505",
6649
6695
  "name": "Devstral Small",
6650
6696
  "provider": "openrouter",
6651
- "family": "devstral-small",
6697
+ "family": "devstral",
6652
6698
  "created_at": "2025-05-07 00:00:00 UTC",
6653
6699
  "context_window": 128000,
6654
6700
  "max_output_tokens": 128000,
@@ -6689,7 +6735,7 @@ export const modelsData = [
6689
6735
  "id": "mistralai/devstral-small-2505:free",
6690
6736
  "name": "Devstral Small 2505 (free)",
6691
6737
  "provider": "openrouter",
6692
- "family": "devstral-small",
6738
+ "family": "devstral",
6693
6739
  "created_at": "2025-05-21 00:00:00 UTC",
6694
6740
  "context_window": 32768,
6695
6741
  "max_output_tokens": 32768,
@@ -6730,7 +6776,7 @@ export const modelsData = [
6730
6776
  "id": "mistralai/devstral-small-2507",
6731
6777
  "name": "Devstral Small 1.1",
6732
6778
  "provider": "openrouter",
6733
- "family": "devstral-small",
6779
+ "family": "devstral",
6734
6780
  "created_at": "2025-07-10 00:00:00 UTC",
6735
6781
  "context_window": 131072,
6736
6782
  "max_output_tokens": 131072,
@@ -6771,7 +6817,7 @@ export const modelsData = [
6771
6817
  "id": "mistralai/mistral-7b-instruct:free",
6772
6818
  "name": "Mistral 7B Instruct (free)",
6773
6819
  "provider": "openrouter",
6774
- "family": "mistral-7b",
6820
+ "family": "mistral",
6775
6821
  "created_at": "2024-05-27 00:00:00 UTC",
6776
6822
  "context_window": 32768,
6777
6823
  "max_output_tokens": 32768,
@@ -7109,7 +7155,7 @@ export const modelsData = [
7109
7155
  "id": "moonshotai/kimi-k2",
7110
7156
  "name": "Kimi K2",
7111
7157
  "provider": "openrouter",
7112
- "family": "kimi-k2",
7158
+ "family": "kimi",
7113
7159
  "created_at": "2025-07-11 00:00:00 UTC",
7114
7160
  "context_window": 131072,
7115
7161
  "max_output_tokens": 32768,
@@ -7150,7 +7196,7 @@ export const modelsData = [
7150
7196
  "id": "moonshotai/kimi-k2-0905",
7151
7197
  "name": "Kimi K2 Instruct 0905",
7152
7198
  "provider": "openrouter",
7153
- "family": "kimi-k2",
7199
+ "family": "kimi",
7154
7200
  "created_at": "2025-09-05 00:00:00 UTC",
7155
7201
  "context_window": 262144,
7156
7202
  "max_output_tokens": 16384,
@@ -7191,7 +7237,7 @@ export const modelsData = [
7191
7237
  "id": "moonshotai/kimi-k2-0905:exacto",
7192
7238
  "name": "Kimi K2 Instruct 0905 (exacto)",
7193
7239
  "provider": "openrouter",
7194
- "family": "kimi-k2",
7240
+ "family": "kimi",
7195
7241
  "created_at": "2025-09-05 00:00:00 UTC",
7196
7242
  "context_window": 262144,
7197
7243
  "max_output_tokens": 16384,
@@ -7232,7 +7278,7 @@ export const modelsData = [
7232
7278
  "id": "moonshotai/kimi-k2-thinking",
7233
7279
  "name": "Kimi K2 Thinking",
7234
7280
  "provider": "openrouter",
7235
- "family": "kimi-k2",
7281
+ "family": "kimi-thinking",
7236
7282
  "created_at": "2025-11-06 00:00:00 UTC",
7237
7283
  "context_window": 262144,
7238
7284
  "max_output_tokens": 262144,
@@ -7276,7 +7322,7 @@ export const modelsData = [
7276
7322
  "id": "moonshotai/kimi-k2:free",
7277
7323
  "name": "Kimi K2 (free)",
7278
7324
  "provider": "openrouter",
7279
- "family": "kimi-k2",
7325
+ "family": "kimi",
7280
7326
  "created_at": "2025-07-11 00:00:00 UTC",
7281
7327
  "context_window": 32800,
7282
7328
  "max_output_tokens": 32800,
@@ -7317,7 +7363,7 @@ export const modelsData = [
7317
7363
  "id": "nousresearch/deephermes-3-llama-3-8b-preview",
7318
7364
  "name": "DeepHermes 3 Llama 3 8B Preview",
7319
7365
  "provider": "openrouter",
7320
- "family": "llama-3",
7366
+ "family": "llama",
7321
7367
  "created_at": "2025-02-28 00:00:00 UTC",
7322
7368
  "context_window": 131072,
7323
7369
  "max_output_tokens": 8192,
@@ -7485,7 +7531,7 @@ export const modelsData = [
7485
7531
  "id": "o1",
7486
7532
  "name": "o1",
7487
7533
  "provider": "openai",
7488
- "family": "o1",
7534
+ "family": "o",
7489
7535
  "created_at": "2024-12-05 00:00:00 UTC",
7490
7536
  "context_window": 200000,
7491
7537
  "max_output_tokens": 100000,
@@ -7531,7 +7577,7 @@ export const modelsData = [
7531
7577
  "id": "o1-mini",
7532
7578
  "name": "o1-mini",
7533
7579
  "provider": "openai",
7534
- "family": "o1-mini",
7580
+ "family": "o-mini",
7535
7581
  "created_at": "2024-09-12 00:00:00 UTC",
7536
7582
  "context_window": 128000,
7537
7583
  "max_output_tokens": 65536,
@@ -7571,7 +7617,7 @@ export const modelsData = [
7571
7617
  "id": "o1-preview",
7572
7618
  "name": "o1-preview",
7573
7619
  "provider": "openai",
7574
- "family": "o1-preview",
7620
+ "family": "o",
7575
7621
  "created_at": "2024-09-12 00:00:00 UTC",
7576
7622
  "context_window": 128000,
7577
7623
  "max_output_tokens": 32768,
@@ -7611,7 +7657,7 @@ export const modelsData = [
7611
7657
  "id": "o1-pro",
7612
7658
  "name": "o1-pro",
7613
7659
  "provider": "openai",
7614
- "family": "o1-pro",
7660
+ "family": "o-pro",
7615
7661
  "created_at": "2025-03-19 00:00:00 UTC",
7616
7662
  "context_window": 200000,
7617
7663
  "max_output_tokens": 100000,
@@ -7655,7 +7701,7 @@ export const modelsData = [
7655
7701
  "id": "o3",
7656
7702
  "name": "o3",
7657
7703
  "provider": "openai",
7658
- "family": "o3",
7704
+ "family": "o",
7659
7705
  "created_at": "2025-04-16 00:00:00 UTC",
7660
7706
  "context_window": 200000,
7661
7707
  "max_output_tokens": 100000,
@@ -7701,7 +7747,7 @@ export const modelsData = [
7701
7747
  "id": "o3-deep-research",
7702
7748
  "name": "o3-deep-research",
7703
7749
  "provider": "openai",
7704
- "family": "o3",
7750
+ "family": "o",
7705
7751
  "created_at": "2024-06-26 00:00:00 UTC",
7706
7752
  "context_window": 200000,
7707
7753
  "max_output_tokens": 100000,
@@ -7747,7 +7793,7 @@ export const modelsData = [
7747
7793
  "id": "o3-mini",
7748
7794
  "name": "o3-mini",
7749
7795
  "provider": "openai",
7750
- "family": "o3-mini",
7796
+ "family": "o-mini",
7751
7797
  "created_at": "2024-12-20 00:00:00 UTC",
7752
7798
  "context_window": 200000,
7753
7799
  "max_output_tokens": 100000,
@@ -7791,7 +7837,7 @@ export const modelsData = [
7791
7837
  "id": "o3-pro",
7792
7838
  "name": "o3-pro",
7793
7839
  "provider": "openai",
7794
- "family": "o3-pro",
7840
+ "family": "o-pro",
7795
7841
  "created_at": "2025-06-10 00:00:00 UTC",
7796
7842
  "context_window": 200000,
7797
7843
  "max_output_tokens": 100000,
@@ -7835,7 +7881,7 @@ export const modelsData = [
7835
7881
  "id": "o4-mini",
7836
7882
  "name": "o4-mini",
7837
7883
  "provider": "openai",
7838
- "family": "o4-mini",
7884
+ "family": "o-mini",
7839
7885
  "created_at": "2025-04-16 00:00:00 UTC",
7840
7886
  "context_window": 200000,
7841
7887
  "max_output_tokens": 100000,
@@ -7881,7 +7927,7 @@ export const modelsData = [
7881
7927
  "id": "o4-mini-deep-research",
7882
7928
  "name": "o4-mini-deep-research",
7883
7929
  "provider": "openai",
7884
- "family": "o4-mini",
7930
+ "family": "o-mini",
7885
7931
  "created_at": "2024-06-26 00:00:00 UTC",
7886
7932
  "context_window": 200000,
7887
7933
  "max_output_tokens": 100000,
@@ -7927,7 +7973,7 @@ export const modelsData = [
7927
7973
  "id": "openai/gpt-4.1",
7928
7974
  "name": "GPT-4.1",
7929
7975
  "provider": "openrouter",
7930
- "family": "gpt-4.1",
7976
+ "family": "gpt",
7931
7977
  "created_at": "2025-04-14 00:00:00 UTC",
7932
7978
  "context_window": 1047576,
7933
7979
  "max_output_tokens": 32768,
@@ -7972,7 +8018,7 @@ export const modelsData = [
7972
8018
  "id": "openai/gpt-4.1-mini",
7973
8019
  "name": "GPT-4.1 Mini",
7974
8020
  "provider": "openrouter",
7975
- "family": "gpt-4.1-mini",
8021
+ "family": "gpt-mini",
7976
8022
  "created_at": "2025-04-14 00:00:00 UTC",
7977
8023
  "context_window": 1047576,
7978
8024
  "max_output_tokens": 32768,
@@ -8017,7 +8063,7 @@ export const modelsData = [
8017
8063
  "id": "openai/gpt-4o-mini",
8018
8064
  "name": "GPT-4o-mini",
8019
8065
  "provider": "openrouter",
8020
- "family": "gpt-4o-mini",
8066
+ "family": "gpt-mini",
8021
8067
  "created_at": "2024-07-18 00:00:00 UTC",
8022
8068
  "context_window": 128000,
8023
8069
  "max_output_tokens": 16384,
@@ -8062,7 +8108,7 @@ export const modelsData = [
8062
8108
  "id": "openai/gpt-5",
8063
8109
  "name": "GPT-5",
8064
8110
  "provider": "openrouter",
8065
- "family": "gpt-5",
8111
+ "family": "gpt",
8066
8112
  "created_at": "2025-08-07 00:00:00 UTC",
8067
8113
  "context_window": 400000,
8068
8114
  "max_output_tokens": 128000,
@@ -8106,7 +8152,7 @@ export const modelsData = [
8106
8152
  "id": "openai/gpt-5-chat",
8107
8153
  "name": "GPT-5 Chat (latest)",
8108
8154
  "provider": "openrouter",
8109
- "family": "gpt-5-chat",
8155
+ "family": "gpt-codex",
8110
8156
  "created_at": "2025-08-07 00:00:00 UTC",
8111
8157
  "context_window": 400000,
8112
8158
  "max_output_tokens": 128000,
@@ -8146,7 +8192,7 @@ export const modelsData = [
8146
8192
  "id": "openai/gpt-5-codex",
8147
8193
  "name": "GPT-5 Codex",
8148
8194
  "provider": "openrouter",
8149
- "family": "gpt-5-codex",
8195
+ "family": "gpt-codex",
8150
8196
  "created_at": "2025-09-15 00:00:00 UTC",
8151
8197
  "context_window": 400000,
8152
8198
  "max_output_tokens": 128000,
@@ -8192,7 +8238,7 @@ export const modelsData = [
8192
8238
  "id": "openai/gpt-5-image",
8193
8239
  "name": "GPT-5 Image",
8194
8240
  "provider": "openrouter",
8195
- "family": "gpt-5",
8241
+ "family": "gpt",
8196
8242
  "created_at": "2025-10-14 00:00:00 UTC",
8197
8243
  "context_window": 400000,
8198
8244
  "max_output_tokens": 128000,
@@ -8241,7 +8287,7 @@ export const modelsData = [
8241
8287
  "id": "openai/gpt-5-mini",
8242
8288
  "name": "GPT-5 Mini",
8243
8289
  "provider": "openrouter",
8244
- "family": "gpt-5-mini",
8290
+ "family": "gpt-mini",
8245
8291
  "created_at": "2025-08-07 00:00:00 UTC",
8246
8292
  "context_window": 400000,
8247
8293
  "max_output_tokens": 128000,
@@ -8285,7 +8331,7 @@ export const modelsData = [
8285
8331
  "id": "openai/gpt-5-nano",
8286
8332
  "name": "GPT-5 Nano",
8287
8333
  "provider": "openrouter",
8288
- "family": "gpt-5-nano",
8334
+ "family": "gpt-nano",
8289
8335
  "created_at": "2025-08-07 00:00:00 UTC",
8290
8336
  "context_window": 400000,
8291
8337
  "max_output_tokens": 128000,
@@ -8329,7 +8375,7 @@ export const modelsData = [
8329
8375
  "id": "openai/gpt-5-pro",
8330
8376
  "name": "GPT-5 Pro",
8331
8377
  "provider": "openrouter",
8332
- "family": "gpt-5-pro",
8378
+ "family": "gpt-pro",
8333
8379
  "created_at": "2025-10-06 00:00:00 UTC",
8334
8380
  "context_window": 400000,
8335
8381
  "max_output_tokens": 272000,
@@ -8373,7 +8419,7 @@ export const modelsData = [
8373
8419
  "id": "openai/gpt-5.1",
8374
8420
  "name": "GPT-5.1",
8375
8421
  "provider": "openrouter",
8376
- "family": "gpt-5",
8422
+ "family": "gpt",
8377
8423
  "created_at": "2025-11-13 00:00:00 UTC",
8378
8424
  "context_window": 400000,
8379
8425
  "max_output_tokens": 128000,
@@ -8419,7 +8465,7 @@ export const modelsData = [
8419
8465
  "id": "openai/gpt-5.1-chat",
8420
8466
  "name": "GPT-5.1 Chat",
8421
8467
  "provider": "openrouter",
8422
- "family": "gpt-5-chat",
8468
+ "family": "gpt-codex",
8423
8469
  "created_at": "2025-11-13 00:00:00 UTC",
8424
8470
  "context_window": 128000,
8425
8471
  "max_output_tokens": 16384,
@@ -8465,7 +8511,7 @@ export const modelsData = [
8465
8511
  "id": "openai/gpt-5.1-codex",
8466
8512
  "name": "GPT-5.1-Codex",
8467
8513
  "provider": "openrouter",
8468
- "family": "gpt-5-codex",
8514
+ "family": "gpt-codex",
8469
8515
  "created_at": "2025-11-13 00:00:00 UTC",
8470
8516
  "context_window": 400000,
8471
8517
  "max_output_tokens": 128000,
@@ -8511,7 +8557,7 @@ export const modelsData = [
8511
8557
  "id": "openai/gpt-5.1-codex-mini",
8512
8558
  "name": "GPT-5.1-Codex-Mini",
8513
8559
  "provider": "openrouter",
8514
- "family": "gpt-5-codex-mini",
8560
+ "family": "gpt-codex",
8515
8561
  "created_at": "2025-11-13 00:00:00 UTC",
8516
8562
  "context_window": 400000,
8517
8563
  "max_output_tokens": 100000,
@@ -8557,7 +8603,7 @@ export const modelsData = [
8557
8603
  "id": "openai/gpt-5.2",
8558
8604
  "name": "GPT-5.2",
8559
8605
  "provider": "openrouter",
8560
- "family": "gpt-5",
8606
+ "family": "gpt",
8561
8607
  "created_at": "2025-12-11 00:00:00 UTC",
8562
8608
  "context_window": 400000,
8563
8609
  "max_output_tokens": 128000,
@@ -8603,7 +8649,7 @@ export const modelsData = [
8603
8649
  "id": "openai/gpt-5.2-chat-latest",
8604
8650
  "name": "GPT-5.2 Chat",
8605
8651
  "provider": "openrouter",
8606
- "family": "gpt-5-chat",
8652
+ "family": "gpt-codex",
8607
8653
  "created_at": "2025-12-11 00:00:00 UTC",
8608
8654
  "context_window": 128000,
8609
8655
  "max_output_tokens": 16384,
@@ -8645,11 +8691,57 @@ export const modelsData = [
8645
8691
  "context": 128000
8646
8692
  }
8647
8693
  },
8694
+ {
8695
+ "id": "openai/gpt-5.2-codex",
8696
+ "name": "GPT-5.2-Codex",
8697
+ "provider": "openrouter",
8698
+ "family": "gpt-codex",
8699
+ "created_at": "2026-01-14 00:00:00 UTC",
8700
+ "context_window": 400000,
8701
+ "max_output_tokens": 128000,
8702
+ "knowledge_cutoff": "2025-08-31",
8703
+ "modalities": {
8704
+ "input": [
8705
+ "text",
8706
+ "image"
8707
+ ],
8708
+ "output": [
8709
+ "text"
8710
+ ]
8711
+ },
8712
+ "capabilities": [
8713
+ "streaming",
8714
+ "reasoning",
8715
+ "chat",
8716
+ "vision",
8717
+ "function_calling",
8718
+ "tools",
8719
+ "structured_output",
8720
+ "json_mode"
8721
+ ],
8722
+ "pricing": {
8723
+ "text_tokens": {
8724
+ "standard": {
8725
+ "input_per_million": 1.75,
8726
+ "output_per_million": 14,
8727
+ "cached_input_per_million": 0.175,
8728
+ "reasoning_output_per_million": 14
8729
+ }
8730
+ }
8731
+ },
8732
+ "metadata": {
8733
+ "source": "models.dev",
8734
+ "input": 1.75,
8735
+ "output": 128000,
8736
+ "cache_read": 0.175,
8737
+ "context": 400000
8738
+ }
8739
+ },
8648
8740
  {
8649
8741
  "id": "openai/gpt-5.2-pro",
8650
8742
  "name": "GPT-5.2 Pro",
8651
8743
  "provider": "openrouter",
8652
- "family": "gpt-5-pro",
8744
+ "family": "gpt-pro",
8653
8745
  "created_at": "2025-12-11 00:00:00 UTC",
8654
8746
  "context_window": 400000,
8655
8747
  "max_output_tokens": 128000,
@@ -8939,7 +9031,7 @@ export const modelsData = [
8939
9031
  "id": "openai/o4-mini",
8940
9032
  "name": "o4 Mini",
8941
9033
  "provider": "openrouter",
8942
- "family": "o4-mini",
9034
+ "family": "o-mini",
8943
9035
  "created_at": "2025-04-16 00:00:00 UTC",
8944
9036
  "context_window": 200000,
8945
9037
  "max_output_tokens": 100000,
@@ -9109,7 +9201,7 @@ export const modelsData = [
9109
9201
  "id": "qwen/qwen2.5-vl-32b-instruct:free",
9110
9202
  "name": "Qwen2.5 VL 32B Instruct (free)",
9111
9203
  "provider": "openrouter",
9112
- "family": "qwen2.5-vl",
9204
+ "family": "qwen",
9113
9205
  "created_at": "2025-03-24 00:00:00 UTC",
9114
9206
  "context_window": 8192,
9115
9207
  "max_output_tokens": 8192,
@@ -9153,7 +9245,7 @@ export const modelsData = [
9153
9245
  "id": "qwen/qwen2.5-vl-72b-instruct",
9154
9246
  "name": "Qwen2.5 VL 72B Instruct",
9155
9247
  "provider": "openrouter",
9156
- "family": "qwen2.5-vl",
9248
+ "family": "qwen",
9157
9249
  "created_at": "2025-02-01 00:00:00 UTC",
9158
9250
  "context_window": 32768,
9159
9251
  "max_output_tokens": 8192,
@@ -9192,7 +9284,7 @@ export const modelsData = [
9192
9284
  "id": "qwen/qwen2.5-vl-72b-instruct:free",
9193
9285
  "name": "Qwen2.5 VL 72B Instruct (free)",
9194
9286
  "provider": "openrouter",
9195
- "family": "qwen2.5-vl",
9287
+ "family": "qwen",
9196
9288
  "created_at": "2025-02-01 00:00:00 UTC",
9197
9289
  "context_window": 32768,
9198
9290
  "max_output_tokens": 32768,
@@ -9235,7 +9327,7 @@ export const modelsData = [
9235
9327
  "id": "qwen/qwen3-14b:free",
9236
9328
  "name": "Qwen3 14B (free)",
9237
9329
  "provider": "openrouter",
9238
- "family": "qwen3",
9330
+ "family": "qwen",
9239
9331
  "created_at": "2025-04-28 00:00:00 UTC",
9240
9332
  "context_window": 40960,
9241
9333
  "max_output_tokens": 40960,
@@ -9277,7 +9369,7 @@ export const modelsData = [
9277
9369
  "id": "qwen/qwen3-235b-a22b-07-25",
9278
9370
  "name": "Qwen3 235B A22B Instruct 2507",
9279
9371
  "provider": "openrouter",
9280
- "family": "qwen3",
9372
+ "family": "qwen",
9281
9373
  "created_at": "2025-04-28 00:00:00 UTC",
9282
9374
  "context_window": 262144,
9283
9375
  "max_output_tokens": 131072,
@@ -9318,7 +9410,7 @@ export const modelsData = [
9318
9410
  "id": "qwen/qwen3-235b-a22b-07-25:free",
9319
9411
  "name": "Qwen3 235B A22B Instruct 2507 (free)",
9320
9412
  "provider": "openrouter",
9321
- "family": "qwen3",
9413
+ "family": "qwen",
9322
9414
  "created_at": "2025-04-28 00:00:00 UTC",
9323
9415
  "context_window": 262144,
9324
9416
  "max_output_tokens": 131072,
@@ -9359,7 +9451,7 @@ export const modelsData = [
9359
9451
  "id": "qwen/qwen3-235b-a22b-thinking-2507",
9360
9452
  "name": "Qwen3 235B A22B Thinking 2507",
9361
9453
  "provider": "openrouter",
9362
- "family": "qwen3",
9454
+ "family": "qwen",
9363
9455
  "created_at": "2025-07-25 00:00:00 UTC",
9364
9456
  "context_window": 262144,
9365
9457
  "max_output_tokens": 81920,
@@ -9401,7 +9493,7 @@ export const modelsData = [
9401
9493
  "id": "qwen/qwen3-235b-a22b:free",
9402
9494
  "name": "Qwen3 235B A22B (free)",
9403
9495
  "provider": "openrouter",
9404
- "family": "qwen3",
9496
+ "family": "qwen",
9405
9497
  "created_at": "2025-04-28 00:00:00 UTC",
9406
9498
  "context_window": 131072,
9407
9499
  "max_output_tokens": 131072,
@@ -9443,7 +9535,7 @@ export const modelsData = [
9443
9535
  "id": "qwen/qwen3-30b-a3b-instruct-2507",
9444
9536
  "name": "Qwen3 30B A3B Instruct 2507",
9445
9537
  "provider": "openrouter",
9446
- "family": "qwen3",
9538
+ "family": "qwen",
9447
9539
  "created_at": "2025-07-29 00:00:00 UTC",
9448
9540
  "context_window": 262000,
9449
9541
  "max_output_tokens": 262000,
@@ -9484,7 +9576,7 @@ export const modelsData = [
9484
9576
  "id": "qwen/qwen3-30b-a3b-thinking-2507",
9485
9577
  "name": "Qwen3 30B A3B Thinking 2507",
9486
9578
  "provider": "openrouter",
9487
- "family": "qwen3",
9579
+ "family": "qwen",
9488
9580
  "created_at": "2025-07-29 00:00:00 UTC",
9489
9581
  "context_window": 262000,
9490
9582
  "max_output_tokens": 262000,
@@ -9526,7 +9618,7 @@ export const modelsData = [
9526
9618
  "id": "qwen/qwen3-30b-a3b:free",
9527
9619
  "name": "Qwen3 30B A3B (free)",
9528
9620
  "provider": "openrouter",
9529
- "family": "qwen3",
9621
+ "family": "qwen",
9530
9622
  "created_at": "2025-04-28 00:00:00 UTC",
9531
9623
  "context_window": 40960,
9532
9624
  "max_output_tokens": 40960,
@@ -9568,7 +9660,7 @@ export const modelsData = [
9568
9660
  "id": "qwen/qwen3-32b:free",
9569
9661
  "name": "Qwen3 32B (free)",
9570
9662
  "provider": "openrouter",
9571
- "family": "qwen3",
9663
+ "family": "qwen",
9572
9664
  "created_at": "2025-04-28 00:00:00 UTC",
9573
9665
  "context_window": 40960,
9574
9666
  "max_output_tokens": 40960,
@@ -9610,7 +9702,7 @@ export const modelsData = [
9610
9702
  "id": "qwen/qwen3-8b:free",
9611
9703
  "name": "Qwen3 8B (free)",
9612
9704
  "provider": "openrouter",
9613
- "family": "qwen3",
9705
+ "family": "qwen",
9614
9706
  "created_at": "2025-04-28 00:00:00 UTC",
9615
9707
  "context_window": 40960,
9616
9708
  "max_output_tokens": 40960,
@@ -9652,7 +9744,7 @@ export const modelsData = [
9652
9744
  "id": "qwen/qwen3-coder",
9653
9745
  "name": "Qwen3 Coder",
9654
9746
  "provider": "openrouter",
9655
- "family": "qwen3-coder",
9747
+ "family": "qwen",
9656
9748
  "created_at": "2025-07-23 00:00:00 UTC",
9657
9749
  "context_window": 262144,
9658
9750
  "max_output_tokens": 66536,
@@ -9693,7 +9785,7 @@ export const modelsData = [
9693
9785
  "id": "qwen/qwen3-coder-30b-a3b-instruct",
9694
9786
  "name": "Qwen3 Coder 30B A3B Instruct",
9695
9787
  "provider": "openrouter",
9696
- "family": "qwen3-coder",
9788
+ "family": "qwen",
9697
9789
  "created_at": "2025-07-31 00:00:00 UTC",
9698
9790
  "context_window": 160000,
9699
9791
  "max_output_tokens": 65536,
@@ -9734,7 +9826,7 @@ export const modelsData = [
9734
9826
  "id": "qwen/qwen3-coder-flash",
9735
9827
  "name": "Qwen3 Coder Flash",
9736
9828
  "provider": "openrouter",
9737
- "family": "qwen3-coder",
9829
+ "family": "qwen",
9738
9830
  "created_at": "2025-07-23 00:00:00 UTC",
9739
9831
  "context_window": 128000,
9740
9832
  "max_output_tokens": 66536,
@@ -9775,7 +9867,7 @@ export const modelsData = [
9775
9867
  "id": "qwen/qwen3-coder:exacto",
9776
9868
  "name": "Qwen3 Coder (exacto)",
9777
9869
  "provider": "openrouter",
9778
- "family": "qwen3-coder",
9870
+ "family": "qwen",
9779
9871
  "created_at": "2025-07-23 00:00:00 UTC",
9780
9872
  "context_window": 131072,
9781
9873
  "max_output_tokens": 32768,
@@ -9816,7 +9908,7 @@ export const modelsData = [
9816
9908
  "id": "qwen/qwen3-coder:free",
9817
9909
  "name": "Qwen3 Coder 480B A35B Instruct (free)",
9818
9910
  "provider": "openrouter",
9819
- "family": "qwen3-coder",
9911
+ "family": "qwen",
9820
9912
  "created_at": "2025-07-23 00:00:00 UTC",
9821
9913
  "context_window": 262144,
9822
9914
  "max_output_tokens": 66536,
@@ -9857,7 +9949,7 @@ export const modelsData = [
9857
9949
  "id": "qwen/qwen3-max",
9858
9950
  "name": "Qwen3 Max",
9859
9951
  "provider": "openrouter",
9860
- "family": "qwen3",
9952
+ "family": "qwen",
9861
9953
  "created_at": "2025-09-05 00:00:00 UTC",
9862
9954
  "context_window": 262144,
9863
9955
  "max_output_tokens": 32768,
@@ -9898,7 +9990,7 @@ export const modelsData = [
9898
9990
  "id": "qwen/qwen3-next-80b-a3b-instruct",
9899
9991
  "name": "Qwen3 Next 80B A3B Instruct",
9900
9992
  "provider": "openrouter",
9901
- "family": "qwen3",
9993
+ "family": "qwen",
9902
9994
  "created_at": "2025-09-11 00:00:00 UTC",
9903
9995
  "context_window": 262144,
9904
9996
  "max_output_tokens": 262144,
@@ -9939,7 +10031,7 @@ export const modelsData = [
9939
10031
  "id": "qwen/qwen3-next-80b-a3b-thinking",
9940
10032
  "name": "Qwen3 Next 80B A3B Thinking",
9941
10033
  "provider": "openrouter",
9942
- "family": "qwen3",
10034
+ "family": "qwen",
9943
10035
  "created_at": "2025-09-11 00:00:00 UTC",
9944
10036
  "context_window": 262144,
9945
10037
  "max_output_tokens": 262144,
@@ -9981,7 +10073,7 @@ export const modelsData = [
9981
10073
  "id": "qwen/qwq-32b:free",
9982
10074
  "name": "QwQ 32B (free)",
9983
10075
  "provider": "openrouter",
9984
- "family": "qwq",
10076
+ "family": "qwen",
9985
10077
  "created_at": "2025-03-05 00:00:00 UTC",
9986
10078
  "context_window": 32768,
9987
10079
  "max_output_tokens": 32768,
@@ -10023,7 +10115,7 @@ export const modelsData = [
10023
10115
  "id": "rekaai/reka-flash-3",
10024
10116
  "name": "Reka Flash 3",
10025
10117
  "provider": "openrouter",
10026
- "family": "reka-flash",
10118
+ "family": "reka",
10027
10119
  "created_at": "2025-03-12 00:00:00 UTC",
10028
10120
  "context_window": 32768,
10029
10121
  "max_output_tokens": 8192,
@@ -10065,7 +10157,7 @@ export const modelsData = [
10065
10157
  "id": "sarvamai/sarvam-m:free",
10066
10158
  "name": "Sarvam-M (free)",
10067
10159
  "provider": "openrouter",
10068
- "family": "sarvam-m",
10160
+ "family": "sarvam",
10069
10161
  "created_at": "2025-05-25 00:00:00 UTC",
10070
10162
  "context_window": 32768,
10071
10163
  "max_output_tokens": 32768,
@@ -10107,7 +10199,7 @@ export const modelsData = [
10107
10199
  "id": "text-embedding-3-large",
10108
10200
  "name": "text-embedding-3-large",
10109
10201
  "provider": "openai",
10110
- "family": "text-embedding-3-large",
10202
+ "family": "text-embedding",
10111
10203
  "created_at": "2024-01-25 00:00:00 UTC",
10112
10204
  "context_window": 8191,
10113
10205
  "max_output_tokens": 3072,
@@ -10144,7 +10236,7 @@ export const modelsData = [
10144
10236
  "id": "text-embedding-3-small",
10145
10237
  "name": "text-embedding-3-small",
10146
10238
  "provider": "openai",
10147
- "family": "text-embedding-3-small",
10239
+ "family": "text-embedding",
10148
10240
  "created_at": "2024-01-25 00:00:00 UTC",
10149
10241
  "context_window": 8191,
10150
10242
  "max_output_tokens": 1536,
@@ -10181,7 +10273,7 @@ export const modelsData = [
10181
10273
  "id": "text-embedding-ada-002",
10182
10274
  "name": "text-embedding-ada-002",
10183
10275
  "provider": "openai",
10184
- "family": "text-embedding-ada",
10276
+ "family": "text-embedding",
10185
10277
  "created_at": "2022-12-15 00:00:00 UTC",
10186
10278
  "context_window": 8192,
10187
10279
  "max_output_tokens": 1536,
@@ -10218,7 +10310,7 @@ export const modelsData = [
10218
10310
  "id": "thudm/glm-z1-32b:free",
10219
10311
  "name": "GLM Z1 32B (free)",
10220
10312
  "provider": "openrouter",
10221
- "family": "glm-z1",
10313
+ "family": "glm-z",
10222
10314
  "created_at": "2025-04-17 00:00:00 UTC",
10223
10315
  "context_window": 32768,
10224
10316
  "max_output_tokens": 32768,
@@ -10260,7 +10352,7 @@ export const modelsData = [
10260
10352
  "id": "tngtech/deepseek-r1t2-chimera:free",
10261
10353
  "name": "DeepSeek R1T2 Chimera (free)",
10262
10354
  "provider": "openrouter",
10263
- "family": "deepseek-r1",
10355
+ "family": "deepseek-thinking",
10264
10356
  "created_at": "2025-07-08 00:00:00 UTC",
10265
10357
  "context_window": 163840,
10266
10358
  "max_output_tokens": 163840,
@@ -10298,7 +10390,7 @@ export const modelsData = [
10298
10390
  "id": "x-ai/grok-3",
10299
10391
  "name": "Grok 3",
10300
10392
  "provider": "openrouter",
10301
- "family": "grok-3",
10393
+ "family": "grok",
10302
10394
  "created_at": "2025-02-17 00:00:00 UTC",
10303
10395
  "context_window": 131072,
10304
10396
  "max_output_tokens": 8192,
@@ -10342,7 +10434,7 @@ export const modelsData = [
10342
10434
  "id": "x-ai/grok-3-beta",
10343
10435
  "name": "Grok 3 Beta",
10344
10436
  "provider": "openrouter",
10345
- "family": "grok-3",
10437
+ "family": "grok",
10346
10438
  "created_at": "2025-02-17 00:00:00 UTC",
10347
10439
  "context_window": 131072,
10348
10440
  "max_output_tokens": 8192,
@@ -10386,7 +10478,7 @@ export const modelsData = [
10386
10478
  "id": "x-ai/grok-3-mini",
10387
10479
  "name": "Grok 3 Mini",
10388
10480
  "provider": "openrouter",
10389
- "family": "grok-3",
10481
+ "family": "grok",
10390
10482
  "created_at": "2025-02-17 00:00:00 UTC",
10391
10483
  "context_window": 131072,
10392
10484
  "max_output_tokens": 8192,
@@ -10431,7 +10523,7 @@ export const modelsData = [
10431
10523
  "id": "x-ai/grok-3-mini-beta",
10432
10524
  "name": "Grok 3 Mini Beta",
10433
10525
  "provider": "openrouter",
10434
- "family": "grok-3",
10526
+ "family": "grok",
10435
10527
  "created_at": "2025-02-17 00:00:00 UTC",
10436
10528
  "context_window": 131072,
10437
10529
  "max_output_tokens": 8192,
@@ -10476,7 +10568,7 @@ export const modelsData = [
10476
10568
  "id": "x-ai/grok-4",
10477
10569
  "name": "Grok 4",
10478
10570
  "provider": "openrouter",
10479
- "family": "grok-4",
10571
+ "family": "grok",
10480
10572
  "created_at": "2025-07-09 00:00:00 UTC",
10481
10573
  "context_window": 256000,
10482
10574
  "max_output_tokens": 64000,
@@ -10521,7 +10613,7 @@ export const modelsData = [
10521
10613
  "id": "x-ai/grok-4-fast",
10522
10614
  "name": "Grok 4 Fast",
10523
10615
  "provider": "openrouter",
10524
- "family": "grok-4",
10616
+ "family": "grok",
10525
10617
  "created_at": "2025-08-19 00:00:00 UTC",
10526
10618
  "context_window": 2000000,
10527
10619
  "max_output_tokens": 30000,
@@ -10568,7 +10660,7 @@ export const modelsData = [
10568
10660
  "id": "x-ai/grok-4.1-fast",
10569
10661
  "name": "Grok 4.1 Fast",
10570
10662
  "provider": "openrouter",
10571
- "family": "grok-4",
10663
+ "family": "grok",
10572
10664
  "created_at": "2025-11-19 00:00:00 UTC",
10573
10665
  "context_window": 2000000,
10574
10666
  "max_output_tokens": 30000,
@@ -10659,7 +10751,7 @@ export const modelsData = [
10659
10751
  "id": "z-ai/glm-4.5",
10660
10752
  "name": "GLM 4.5",
10661
10753
  "provider": "openrouter",
10662
- "family": "glm-4.5",
10754
+ "family": "glm",
10663
10755
  "created_at": "2025-07-28 00:00:00 UTC",
10664
10756
  "context_window": 128000,
10665
10757
  "max_output_tokens": 96000,
@@ -10701,7 +10793,7 @@ export const modelsData = [
10701
10793
  "id": "z-ai/glm-4.5-air",
10702
10794
  "name": "GLM 4.5 Air",
10703
10795
  "provider": "openrouter",
10704
- "family": "glm-4.5-air",
10796
+ "family": "glm-air",
10705
10797
  "created_at": "2025-07-28 00:00:00 UTC",
10706
10798
  "context_window": 128000,
10707
10799
  "max_output_tokens": 96000,
@@ -10743,7 +10835,7 @@ export const modelsData = [
10743
10835
  "id": "z-ai/glm-4.5-air:free",
10744
10836
  "name": "GLM 4.5 Air (free)",
10745
10837
  "provider": "openrouter",
10746
- "family": "glm-4.5-air",
10838
+ "family": "glm-air",
10747
10839
  "created_at": "2025-07-28 00:00:00 UTC",
10748
10840
  "context_window": 128000,
10749
10841
  "max_output_tokens": 96000,
@@ -10781,7 +10873,7 @@ export const modelsData = [
10781
10873
  "id": "z-ai/glm-4.5v",
10782
10874
  "name": "GLM 4.5V",
10783
10875
  "provider": "openrouter",
10784
- "family": "glm-4.5v",
10876
+ "family": "glm",
10785
10877
  "created_at": "2025-08-11 00:00:00 UTC",
10786
10878
  "context_window": 64000,
10787
10879
  "max_output_tokens": 16384,
@@ -10826,7 +10918,7 @@ export const modelsData = [
10826
10918
  "id": "z-ai/glm-4.6",
10827
10919
  "name": "GLM 4.6",
10828
10920
  "provider": "openrouter",
10829
- "family": "glm-4.6",
10921
+ "family": "glm",
10830
10922
  "created_at": "2025-09-30 00:00:00 UTC",
10831
10923
  "context_window": 200000,
10832
10924
  "max_output_tokens": 128000,
@@ -10870,7 +10962,7 @@ export const modelsData = [
10870
10962
  "id": "z-ai/glm-4.6:exacto",
10871
10963
  "name": "GLM 4.6 (exacto)",
10872
10964
  "provider": "openrouter",
10873
- "family": "glm-4.6",
10965
+ "family": "glm",
10874
10966
  "created_at": "2025-09-30 00:00:00 UTC",
10875
10967
  "context_window": 200000,
10876
10968
  "max_output_tokens": 128000,
@@ -10914,7 +11006,7 @@ export const modelsData = [
10914
11006
  "id": "z-ai/glm-4.7",
10915
11007
  "name": "GLM-4.7",
10916
11008
  "provider": "openrouter",
10917
- "family": "glm-4.7",
11009
+ "family": "glm",
10918
11010
  "created_at": "2025-12-22 00:00:00 UTC",
10919
11011
  "context_window": 204800,
10920
11012
  "max_output_tokens": 131072,