@lobehub/lobehub 2.0.0-next.105 → 2.0.0-next.106
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +9 -0
- package/locales/ar/image.json +8 -0
- package/locales/ar/models.json +110 -64
- package/locales/ar/providers.json +3 -0
- package/locales/bg-BG/image.json +8 -0
- package/locales/bg-BG/models.json +98 -68
- package/locales/bg-BG/providers.json +3 -0
- package/locales/de-DE/image.json +8 -0
- package/locales/de-DE/models.json +176 -38
- package/locales/de-DE/providers.json +3 -0
- package/locales/en-US/image.json +8 -0
- package/locales/en-US/models.json +176 -38
- package/locales/en-US/providers.json +3 -0
- package/locales/es-ES/image.json +8 -0
- package/locales/es-ES/models.json +176 -38
- package/locales/es-ES/providers.json +3 -0
- package/locales/fa-IR/image.json +8 -0
- package/locales/fa-IR/models.json +110 -64
- package/locales/fa-IR/providers.json +3 -0
- package/locales/fr-FR/image.json +8 -0
- package/locales/fr-FR/models.json +110 -64
- package/locales/fr-FR/providers.json +3 -0
- package/locales/it-IT/image.json +8 -0
- package/locales/it-IT/models.json +176 -38
- package/locales/it-IT/providers.json +3 -0
- package/locales/ja-JP/image.json +8 -0
- package/locales/ja-JP/models.json +110 -64
- package/locales/ja-JP/providers.json +3 -0
- package/locales/ko-KR/image.json +8 -0
- package/locales/ko-KR/models.json +110 -64
- package/locales/ko-KR/providers.json +3 -0
- package/locales/nl-NL/image.json +8 -0
- package/locales/nl-NL/models.json +176 -38
- package/locales/nl-NL/providers.json +3 -0
- package/locales/pl-PL/image.json +8 -0
- package/locales/pl-PL/models.json +110 -64
- package/locales/pl-PL/providers.json +3 -0
- package/locales/pt-BR/image.json +8 -0
- package/locales/pt-BR/models.json +176 -38
- package/locales/pt-BR/providers.json +3 -0
- package/locales/ru-RU/image.json +8 -0
- package/locales/ru-RU/models.json +98 -68
- package/locales/ru-RU/providers.json +3 -0
- package/locales/tr-TR/image.json +8 -0
- package/locales/tr-TR/models.json +110 -64
- package/locales/tr-TR/providers.json +3 -0
- package/locales/vi-VN/image.json +8 -0
- package/locales/vi-VN/models.json +176 -38
- package/locales/vi-VN/providers.json +3 -0
- package/locales/zh-CN/image.json +8 -0
- package/locales/zh-CN/models.json +179 -38
- package/locales/zh-CN/providers.json +3 -0
- package/locales/zh-TW/image.json +8 -0
- package/locales/zh-TW/models.json +176 -38
- package/locales/zh-TW/providers.json +3 -0
- package/package.json +1 -1
- package/packages/model-runtime/src/utils/postProcessModelList.ts +15 -13
|
@@ -720,25 +720,28 @@
|
|
|
720
720
|
"description": "Claude 3 Opus is Anthropic's smartest model, delivering market-leading performance on highly complex tasks. It navigates open-ended prompts and novel scenarios with exceptional fluency and human-like understanding."
|
|
721
721
|
},
|
|
722
722
|
"anthropic/claude-3.5-haiku": {
|
|
723
|
-
"description": "Claude 3.5 Haiku
|
|
723
|
+
"description": "Claude 3.5 Haiku offers enhanced speed, coding accuracy, and tool usage. Ideal for scenarios requiring high performance in speed and tool interaction."
|
|
724
724
|
},
|
|
725
725
|
"anthropic/claude-3.5-sonnet": {
|
|
726
|
-
"description": "Claude 3.5 Sonnet
|
|
726
|
+
"description": "Claude 3.5 Sonnet is a fast and efficient model in the Sonnet family, offering improved coding and reasoning performance. Some versions will gradually be replaced by models like Sonnet 3.7."
|
|
727
727
|
},
|
|
728
728
|
"anthropic/claude-3.7-sonnet": {
|
|
729
|
-
"description": "Claude 3.7 Sonnet is
|
|
729
|
+
"description": "Claude 3.7 Sonnet is an upgraded model in the Sonnet series, delivering stronger reasoning and coding capabilities, suitable for complex enterprise-level tasks."
|
|
730
|
+
},
|
|
731
|
+
"anthropic/claude-haiku-4.5": {
|
|
732
|
+
"description": "Claude Haiku 4.5 is a high-performance, low-latency model from Anthropic that maintains high accuracy."
|
|
730
733
|
},
|
|
731
734
|
"anthropic/claude-opus-4": {
|
|
732
|
-
"description": "
|
|
735
|
+
"description": "Opus 4 is Anthropic’s flagship model, designed for complex tasks and enterprise-grade applications."
|
|
733
736
|
},
|
|
734
737
|
"anthropic/claude-opus-4.1": {
|
|
735
|
-
"description": "
|
|
738
|
+
"description": "Opus 4.1 is a premium model from Anthropic, optimized for programming, complex reasoning, and sustained tasks."
|
|
736
739
|
},
|
|
737
740
|
"anthropic/claude-sonnet-4": {
|
|
738
|
-
"description": "Claude Sonnet 4
|
|
741
|
+
"description": "Claude Sonnet 4 is Anthropic’s hybrid reasoning model, offering a blend of cognitive and non-cognitive capabilities."
|
|
739
742
|
},
|
|
740
743
|
"anthropic/claude-sonnet-4.5": {
|
|
741
|
-
"description": "Claude Sonnet 4.5 is Anthropic
|
|
744
|
+
"description": "Claude Sonnet 4.5 is Anthropic’s latest hybrid reasoning model, optimized for complex reasoning and coding tasks."
|
|
742
745
|
},
|
|
743
746
|
"ascend-tribe/pangu-pro-moe": {
|
|
744
747
|
"description": "Pangu-Pro-MoE 72B-A16B is a sparse large language model with 72 billion parameters and 16 billion activated parameters. It is based on the Group Mixture of Experts (MoGE) architecture, which groups experts during the expert selection phase and constrains tokens to activate an equal number of experts within each group, achieving expert load balancing and significantly improving deployment efficiency on the Ascend platform."
|
|
@@ -761,6 +764,9 @@
|
|
|
761
764
|
"baidu/ERNIE-4.5-300B-A47B": {
|
|
762
765
|
"description": "ERNIE-4.5-300B-A47B is a large language model developed by Baidu based on a Mixture of Experts (MoE) architecture. The model has a total of 300 billion parameters, but only activates 47 billion parameters per token during inference, balancing powerful performance with computational efficiency. As a core model in the ERNIE 4.5 series, it demonstrates outstanding capabilities in text understanding, generation, reasoning, and programming tasks. The model employs an innovative multimodal heterogeneous MoE pretraining method, jointly training text and visual modalities to effectively enhance overall capabilities, especially excelling in instruction following and world knowledge retention."
|
|
763
766
|
},
|
|
767
|
+
"baidu/ernie-5.0-thinking-preview": {
|
|
768
|
+
"description": "ERNIE 5.0 Thinking Preview is Baidu’s next-generation native multimodal Wenxin model, excelling in multimodal understanding, instruction following, content creation, factual Q&A, and tool usage."
|
|
769
|
+
},
|
|
764
770
|
"c4ai-aya-expanse-32b": {
|
|
765
771
|
"description": "Aya Expanse is a high-performance 32B multilingual model designed to challenge the performance of single-language models through innovations in instruction tuning, data arbitrage, preference training, and model merging. It supports 23 languages."
|
|
766
772
|
},
|
|
@@ -869,6 +875,9 @@
|
|
|
869
875
|
"codex-mini-latest": {
|
|
870
876
|
"description": "codex-mini-latest is a fine-tuned version of o4-mini, specifically designed for Codex CLI. For direct API usage, we recommend starting with gpt-4.1."
|
|
871
877
|
},
|
|
878
|
+
"cogito-2.1:671b": {
|
|
879
|
+
"description": "Cogito v2.1 671B is a U.S.-based open-source large language model available for free commercial use. It offers top-tier performance, high token inference efficiency, 128k long context, and robust general capabilities."
|
|
880
|
+
},
|
|
872
881
|
"cogview-4": {
|
|
873
882
|
"description": "CogView-4 is Zhipu's first open-source text-to-image model supporting Chinese character generation. It offers comprehensive improvements in semantic understanding, image generation quality, and bilingual Chinese-English text generation capabilities. It supports bilingual input of any length and can generate images at any resolution within a specified range."
|
|
874
883
|
},
|
|
@@ -1139,6 +1148,9 @@
|
|
|
1139
1148
|
"deepseek-vl2-small": {
|
|
1140
1149
|
"description": "DeepSeek VL2 Small, a lightweight multimodal version designed for resource-constrained and high-concurrency scenarios."
|
|
1141
1150
|
},
|
|
1151
|
+
"deepseek/deepseek-chat": {
|
|
1152
|
+
"description": "DeepSeek-V3 is a high-performance hybrid reasoning model from the DeepSeek team, suitable for complex tasks and tool integration."
|
|
1153
|
+
},
|
|
1142
1154
|
"deepseek/deepseek-chat-v3-0324": {
|
|
1143
1155
|
"description": "DeepSeek V3 is a 685B parameter expert mixture model, the latest iteration in the DeepSeek team's flagship chat model series.\n\nIt inherits from the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs excellently across various tasks."
|
|
1144
1156
|
},
|
|
@@ -1146,13 +1158,13 @@
|
|
|
1146
1158
|
"description": "DeepSeek V3 is a 685B parameter expert mixture model, the latest iteration in the DeepSeek team's flagship chat model series.\n\nIt inherits from the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs excellently across various tasks."
|
|
1147
1159
|
},
|
|
1148
1160
|
"deepseek/deepseek-chat-v3.1": {
|
|
1149
|
-
"description": "DeepSeek-V3.1 is a
|
|
1161
|
+
"description": "DeepSeek-V3.1 is a long-context hybrid reasoning model from DeepSeek, supporting cognitive/non-cognitive hybrid modes and tool integration."
|
|
1150
1162
|
},
|
|
1151
1163
|
"deepseek/deepseek-r1": {
|
|
1152
1164
|
"description": "The DeepSeek R1 model has undergone minor version upgrades, currently at DeepSeek-R1-0528. The latest update significantly enhances inference depth and capability by leveraging increased compute resources and post-training algorithmic optimizations. The model performs excellently on benchmarks in mathematics, programming, and general logic, with overall performance approaching leading models like O3 and Gemini 2.5 Pro."
|
|
1153
1165
|
},
|
|
1154
1166
|
"deepseek/deepseek-r1-0528": {
|
|
1155
|
-
"description": "DeepSeek
|
|
1167
|
+
"description": "DeepSeek R1 0528 is an updated variant from DeepSeek, focused on open-source usability and deep reasoning."
|
|
1156
1168
|
},
|
|
1157
1169
|
"deepseek/deepseek-r1-0528:free": {
|
|
1158
1170
|
"description": "DeepSeek-R1 greatly improves model reasoning capabilities with minimal labeled data. Before outputting the final answer, the model first generates a chain of thought to enhance answer accuracy."
|
|
@@ -1175,6 +1187,9 @@
|
|
|
1175
1187
|
"deepseek/deepseek-r1:free": {
|
|
1176
1188
|
"description": "DeepSeek-R1 significantly enhances model reasoning capabilities with minimal labeled data. Before outputting the final answer, the model first provides a chain of thought to improve the accuracy of the final response."
|
|
1177
1189
|
},
|
|
1190
|
+
"deepseek/deepseek-reasoner": {
|
|
1191
|
+
"description": "DeepSeek-V3 Thinking (reasoner) is an experimental reasoning model from DeepSeek, designed for high-complexity reasoning tasks."
|
|
1192
|
+
},
|
|
1178
1193
|
"deepseek/deepseek-v3": {
|
|
1179
1194
|
"description": "A fast, general-purpose large language model with enhanced reasoning capabilities."
|
|
1180
1195
|
},
|
|
@@ -1523,8 +1538,14 @@
|
|
|
1523
1538
|
"gemini-2.5-pro-preview-06-05": {
|
|
1524
1539
|
"description": "Gemini 2.5 Pro Preview is Google's most advanced cognitive model, capable of reasoning through complex problems in code, mathematics, and STEM fields, as well as analyzing large datasets, codebases, and documents using long-context understanding."
|
|
1525
1540
|
},
|
|
1541
|
+
"gemini-3-pro-image-preview": {
|
|
1542
|
+
"description": "Gemini 3 Pro Image (Nano Banana Pro) is Google’s image generation model, also supporting multimodal dialogue."
|
|
1543
|
+
},
|
|
1544
|
+
"gemini-3-pro-image-preview:image": {
|
|
1545
|
+
"description": "Gemini 3 Pro Image (Nano Banana Pro) is Google’s image generation model, also supporting multimodal dialogue."
|
|
1546
|
+
},
|
|
1526
1547
|
"gemini-3-pro-preview": {
|
|
1527
|
-
"description": "Gemini 3 Pro is Google
|
|
1548
|
+
"description": "Gemini 3 Pro is the world’s leading multimodal understanding model and Google’s most powerful agent and ambient programming model to date, offering rich visual output and deep interactivity, all built on cutting-edge reasoning capabilities."
|
|
1528
1549
|
},
|
|
1529
1550
|
"gemini-flash-latest": {
|
|
1530
1551
|
"description": "Latest release of Gemini Flash"
|
|
@@ -1650,7 +1671,7 @@
|
|
|
1650
1671
|
"description": "GLM-Zero-Preview possesses strong complex reasoning abilities, excelling in logical reasoning, mathematics, programming, and other fields."
|
|
1651
1672
|
},
|
|
1652
1673
|
"google/gemini-2.0-flash": {
|
|
1653
|
-
"description": "Gemini 2.0 Flash
|
|
1674
|
+
"description": "Gemini 2.0 Flash is Google’s high-performance reasoning model, suitable for extended multimodal tasks."
|
|
1654
1675
|
},
|
|
1655
1676
|
"google/gemini-2.0-flash-001": {
|
|
1656
1677
|
"description": "Gemini 2.0 Flash offers next-generation features and improvements, including exceptional speed, native tool usage, multimodal generation, and a 1M token context window."
|
|
@@ -1661,14 +1682,23 @@
|
|
|
1661
1682
|
"google/gemini-2.0-flash-lite": {
|
|
1662
1683
|
"description": "Gemini 2.0 Flash Lite provides next-generation features and improvements, including exceptional speed, built-in tool usage, multimodal generation, and a 1 million token context window."
|
|
1663
1684
|
},
|
|
1685
|
+
"google/gemini-2.0-flash-lite-001": {
|
|
1686
|
+
"description": "Gemini 2.0 Flash Lite is a lightweight version of the Gemini family. By default, it disables reasoning to improve latency and cost efficiency, but it can be enabled via parameters."
|
|
1687
|
+
},
|
|
1664
1688
|
"google/gemini-2.5-flash": {
|
|
1665
|
-
"description": "Gemini 2.5 Flash
|
|
1689
|
+
"description": "The Gemini 2.5 Flash (Lite/Pro/Flash) series are Google’s reasoning models ranging from low-latency to high-performance."
|
|
1690
|
+
},
|
|
1691
|
+
"google/gemini-2.5-flash-image": {
|
|
1692
|
+
"description": "Gemini 2.5 Flash Image (Nano Banana) is Google’s image generation model, also supporting multimodal dialogue."
|
|
1693
|
+
},
|
|
1694
|
+
"google/gemini-2.5-flash-image-free": {
|
|
1695
|
+
"description": "Gemini 2.5 Flash Image Free Edition supports limited multimodal generation."
|
|
1666
1696
|
},
|
|
1667
1697
|
"google/gemini-2.5-flash-image-preview": {
|
|
1668
1698
|
"description": "Gemini 2.5 Flash experimental model, supporting image generation."
|
|
1669
1699
|
},
|
|
1670
1700
|
"google/gemini-2.5-flash-lite": {
|
|
1671
|
-
"description": "Gemini 2.5 Flash
|
|
1701
|
+
"description": "Gemini 2.5 Flash Lite is a lightweight version of Gemini 2.5, optimized for latency and cost, ideal for high-throughput scenarios."
|
|
1672
1702
|
},
|
|
1673
1703
|
"google/gemini-2.5-flash-preview": {
|
|
1674
1704
|
"description": "Gemini 2.5 Flash is Google's most advanced flagship model, designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in 'thinking' capabilities that allow it to provide responses with higher accuracy and detailed context handling.\n\nNote: This model has two variants: thinking and non-thinking. Output pricing varies significantly based on whether the thinking capability is activated. If you choose the standard variant (without the ':thinking' suffix), the model will explicitly avoid generating thinking tokens.\n\nTo leverage the thinking capability and receive thinking tokens, you must select the ':thinking' variant, which will incur higher thinking output pricing.\n\nAdditionally, Gemini 2.5 Flash can be configured via the 'maximum tokens for reasoning' parameter, as described in the documentation (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning)."
|
|
@@ -1677,11 +1707,23 @@
|
|
|
1677
1707
|
"description": "Gemini 2.5 Flash is Google's most advanced flagship model, designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in 'thinking' capabilities that allow it to provide responses with higher accuracy and detailed context handling.\n\nNote: This model has two variants: thinking and non-thinking. Output pricing varies significantly based on whether the thinking capability is activated. If you choose the standard variant (without the ':thinking' suffix), the model will explicitly avoid generating thinking tokens.\n\nTo leverage the thinking capability and receive thinking tokens, you must select the ':thinking' variant, which will incur higher thinking output pricing.\n\nAdditionally, Gemini 2.5 Flash can be configured via the 'maximum tokens for reasoning' parameter, as described in the documentation (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning)."
|
|
1678
1708
|
},
|
|
1679
1709
|
"google/gemini-2.5-pro": {
|
|
1680
|
-
"description": "Gemini 2.5 Pro is
|
|
1710
|
+
"description": "Gemini 2.5 Pro is Google’s flagship reasoning model, supporting long context and complex tasks."
|
|
1711
|
+
},
|
|
1712
|
+
"google/gemini-2.5-pro-free": {
|
|
1713
|
+
"description": "Gemini 2.5 Pro Free Edition supports limited multimodal long-context usage, ideal for trials and lightweight workflows."
|
|
1681
1714
|
},
|
|
1682
1715
|
"google/gemini-2.5-pro-preview": {
|
|
1683
1716
|
"description": "Gemini 2.5 Pro Preview is Google's most advanced thinking model, capable of reasoning through complex problems in code, mathematics, and STEM fields, as well as analyzing large datasets, codebases, and documents using extended context."
|
|
1684
1717
|
},
|
|
1718
|
+
"google/gemini-3-pro-image-preview-free": {
|
|
1719
|
+
"description": "Gemini 3 Pro Image Free Edition supports limited multimodal generation."
|
|
1720
|
+
},
|
|
1721
|
+
"google/gemini-3-pro-preview": {
|
|
1722
|
+
"description": "Gemini 3 Pro is the next-generation multimodal reasoning model in the Gemini series, capable of understanding text, audio, images, and video, and handling complex tasks and large codebases."
|
|
1723
|
+
},
|
|
1724
|
+
"google/gemini-3-pro-preview-free": {
|
|
1725
|
+
"description": "Gemini 3 Pro Free Preview offers the same multimodal understanding and reasoning capabilities as the standard version, but with usage and rate limitations, making it more suitable for exploration and low-frequency use."
|
|
1726
|
+
},
|
|
1685
1727
|
"google/gemini-embedding-001": {
|
|
1686
1728
|
"description": "A state-of-the-art embedding model delivering excellent performance on English, multilingual, and code tasks."
|
|
1687
1729
|
},
|
|
@@ -2057,21 +2099,36 @@
|
|
|
2057
2099
|
"inception/mercury-coder-small": {
|
|
2058
2100
|
"description": "Mercury Coder Small is ideal for code generation, debugging, and refactoring tasks, offering minimal latency."
|
|
2059
2101
|
},
|
|
2060
|
-
"inclusionAI/Ling-1T": {
|
|
2061
|
-
"description": "Ling-1T is the first flagship non-thinking model in the 'Ling 2.0' series, featuring 1 trillion total parameters and approximately 50 billion active parameters per token. Built on the Ling 2.0 architecture, Ling-1T aims to push the boundaries of efficient reasoning and scalable cognition. Ling-1T-base is trained on over 20 trillion high-quality, reasoning-intensive tokens."
|
|
2062
|
-
},
|
|
2063
2102
|
"inclusionAI/Ling-flash-2.0": {
|
|
2064
2103
|
"description": "Ling-flash-2.0 is the third model in the Ling 2.0 architecture series released by Ant Group's Bailing team. It is a mixture-of-experts (MoE) model with a total of 100 billion parameters, but activates only 6.1 billion parameters per token (4.8 billion non-embedding). As a lightweight configuration model, Ling-flash-2.0 demonstrates performance comparable to or surpassing 40-billion-parameter dense models and larger MoE models across multiple authoritative benchmarks. The model aims to explore efficient pathways under the consensus that \"large models equal large parameters\" through extreme architectural design and training strategies."
|
|
2065
2104
|
},
|
|
2066
2105
|
"inclusionAI/Ling-mini-2.0": {
|
|
2067
2106
|
"description": "Ling-mini-2.0 is a small-sized, high-performance large language model based on the MoE architecture. It has 16 billion total parameters but activates only 1.4 billion per token (789 million non-embedding), achieving extremely high generation speed. Thanks to the efficient MoE design and large-scale high-quality training data, despite activating only 1.4 billion parameters, Ling-mini-2.0 still delivers top-tier performance comparable to dense LLMs under 10 billion parameters and larger MoE models on downstream tasks."
|
|
2068
2107
|
},
|
|
2069
|
-
"inclusionAI/Ring-1T": {
|
|
2070
|
-
"description": "Ring-1T is a trillion-parameter open-source cognitive model released by the Bailing team. It is trained on the Ling 2.0 architecture and the Ling-1T-base model, with 1 trillion total parameters and 50 billion active parameters. It supports context windows up to 128K and is optimized through large-scale verifiable reward reinforcement learning."
|
|
2071
|
-
},
|
|
2072
2108
|
"inclusionAI/Ring-flash-2.0": {
|
|
2073
2109
|
"description": "Ring-flash-2.0 is a high-performance reasoning model deeply optimized based on Ling-flash-2.0-base. It employs a mixture-of-experts (MoE) architecture with a total of 100 billion parameters but activates only 6.1 billion parameters per inference. The model uses the proprietary icepop algorithm to solve the instability issues of MoE large models during reinforcement learning (RL) training, enabling continuous improvement of complex reasoning capabilities over long training cycles. Ring-flash-2.0 has achieved significant breakthroughs in challenging benchmarks such as math competitions, code generation, and logical reasoning. Its performance not only surpasses top dense models under 40 billion parameters but also rivals larger open-source MoE models and closed-source high-performance reasoning models. Although focused on complex reasoning, it also performs well in creative writing tasks. Additionally, thanks to its efficient architecture, Ring-flash-2.0 delivers strong performance with high-speed inference, significantly reducing deployment costs for reasoning models in high-concurrency scenarios."
|
|
2074
2110
|
},
|
|
2111
|
+
"inclusionai/ling-1t": {
|
|
2112
|
+
"description": "Ling-1T is inclusionAI’s 1T MoE large model, optimized for high-intensity reasoning tasks and large-scale context."
|
|
2113
|
+
},
|
|
2114
|
+
"inclusionai/ling-flash-2.0": {
|
|
2115
|
+
"description": "Ling-flash-2.0 is inclusionAI’s MoE model, optimized for efficiency and reasoning performance, suitable for medium to large-scale tasks."
|
|
2116
|
+
},
|
|
2117
|
+
"inclusionai/ling-mini-2.0": {
|
|
2118
|
+
"description": "Ling-mini-2.0 is a lightweight MoE model from inclusionAI, significantly reducing cost while maintaining reasoning capabilities."
|
|
2119
|
+
},
|
|
2120
|
+
"inclusionai/ming-flash-omini-preview": {
|
|
2121
|
+
"description": "Ming-flash-omni Preview is inclusionAI’s multimodal model supporting voice, image, and video input, with enhanced image rendering and speech recognition capabilities."
|
|
2122
|
+
},
|
|
2123
|
+
"inclusionai/ring-1t": {
|
|
2124
|
+
"description": "Ring-1T is inclusionAI’s trillion-parameter MoE reasoning model, designed for large-scale reasoning and research tasks."
|
|
2125
|
+
},
|
|
2126
|
+
"inclusionai/ring-flash-2.0": {
|
|
2127
|
+
"description": "Ring-flash-2.0 is a high-throughput variant of the Ring model from inclusionAI, emphasizing speed and cost efficiency."
|
|
2128
|
+
},
|
|
2129
|
+
"inclusionai/ring-mini-2.0": {
|
|
2130
|
+
"description": "Ring-mini-2.0 is a high-throughput, lightweight MoE version from inclusionAI, primarily used in concurrent scenarios."
|
|
2131
|
+
},
|
|
2075
2132
|
"internlm/internlm2_5-7b-chat": {
|
|
2076
2133
|
"description": "InternLM2.5 offers intelligent dialogue solutions across multiple scenarios."
|
|
2077
2134
|
},
|
|
@@ -2123,6 +2180,12 @@
|
|
|
2123
2180
|
"kimi-k2-instruct": {
|
|
2124
2181
|
"description": "Kimi K2 Instruct, the official Kimi inference model supporting long context, code, Q&A, and more."
|
|
2125
2182
|
},
|
|
2183
|
+
"kimi-k2-thinking": {
|
|
2184
|
+
"description": "K2 Long Thinking Model supports 256k context, multi-step tool usage, and deep reasoning, excelling at solving complex problems."
|
|
2185
|
+
},
|
|
2186
|
+
"kimi-k2-thinking-turbo": {
|
|
2187
|
+
"description": "The high-speed version of the K2 Long Thinking Model supports 256k context and deep reasoning, with output speeds of 60–100 tokens per second."
|
|
2188
|
+
},
|
|
2126
2189
|
"kimi-k2-turbo-preview": {
|
|
2127
2190
|
"description": "Kimi-K2 is a Mixture-of-Experts (MoE) foundation model with exceptional coding and agent capabilities, featuring 1T total parameters and 32B activated parameters. In benchmark evaluations across core categories — general knowledge reasoning, programming, mathematics, and agent tasks — the K2 model outperforms other leading open-source models."
|
|
2128
2191
|
},
|
|
@@ -2135,6 +2198,9 @@
|
|
|
2135
2198
|
"kimi-thinking-preview": {
|
|
2136
2199
|
"description": "kimi-thinking-preview is a multimodal thinking model provided by Dark Side of the Moon, featuring multimodal and general reasoning abilities. It excels at deep reasoning to help solve more complex and challenging problems."
|
|
2137
2200
|
},
|
|
2201
|
+
"kuaishou/kat-coder-pro-v1": {
|
|
2202
|
+
"description": "KAT-Coder-Pro-V1 (limited-time free) focuses on code understanding and automated programming, designed for efficient coding agent tasks."
|
|
2203
|
+
},
|
|
2138
2204
|
"learnlm-1.5-pro-experimental": {
|
|
2139
2205
|
"description": "LearnLM is an experimental, task-specific language model trained to align with learning science principles, capable of following systematic instructions in teaching and learning scenarios, acting as an expert tutor, among other roles."
|
|
2140
2206
|
},
|
|
@@ -2466,7 +2532,7 @@
|
|
|
2466
2532
|
"description": "MiniMax M2 is a high-efficiency large language model built for coding and agent-based workflows."
|
|
2467
2533
|
},
|
|
2468
2534
|
"minimax/minimax-m2": {
|
|
2469
|
-
"description": "
|
|
2535
|
+
"description": "MiniMax-M2 is a cost-effective model with strong performance in coding and agent tasks, suitable for various engineering scenarios."
|
|
2470
2536
|
},
|
|
2471
2537
|
"minimaxai/minimax-m2": {
|
|
2472
2538
|
"description": "MiniMax-M2 is a compact, fast, and cost-efficient Mixture of Experts (MoE) model with 230 billion total parameters and 10 billion active parameters. It is engineered for top performance in coding and agent tasks while maintaining robust general intelligence. Excelling in multi-file editing, code-run-debug loops, test validation and repair, and complex long-chain tool integrations, it is an ideal choice for developer workflows."
|
|
@@ -2615,12 +2681,21 @@
|
|
|
2615
2681
|
"moonshotai/kimi-k2": {
|
|
2616
2682
|
"description": "Kimi K2 is a large-scale Mixture of Experts (MoE) language model developed by Moonshot AI, with a total of 1 trillion parameters and 32 billion active parameters per forward pass. It is optimized for agent capabilities, including advanced tool use, reasoning, and code synthesis."
|
|
2617
2683
|
},
|
|
2684
|
+
"moonshotai/kimi-k2-0711": {
|
|
2685
|
+
"description": "Kimi K2 0711 is the Instruct version of the Kimi series, ideal for high-quality coding and tool usage scenarios."
|
|
2686
|
+
},
|
|
2618
2687
|
"moonshotai/kimi-k2-0905": {
|
|
2619
|
-
"description": "
|
|
2688
|
+
"description": "Kimi K2 0905 is the September 5th update of the Kimi series, with expanded context and improved reasoning and coding performance."
|
|
2620
2689
|
},
|
|
2621
2690
|
"moonshotai/kimi-k2-instruct-0905": {
|
|
2622
2691
|
"description": "The kimi-k2-0905-preview model has a context length of 256k, featuring stronger Agentic Coding capabilities, more outstanding aesthetics and practicality of frontend code, and better context understanding."
|
|
2623
2692
|
},
|
|
2693
|
+
"moonshotai/kimi-k2-thinking": {
|
|
2694
|
+
"description": "Kimi K2 Thinking is Moonshot’s optimized model for deep reasoning tasks, equipped with general agent capabilities."
|
|
2695
|
+
},
|
|
2696
|
+
"moonshotai/kimi-k2-thinking-turbo": {
|
|
2697
|
+
"description": "Kimi K2 Thinking Turbo is the high-speed version of Kimi K2 Thinking, maintaining deep reasoning capabilities while significantly reducing response latency."
|
|
2698
|
+
},
|
|
2624
2699
|
"morph/morph-v3-fast": {
|
|
2625
2700
|
"description": "Morph offers a specialized AI model that applies code changes suggested by cutting-edge models like Claude or GPT-4o to your existing code files FAST - 4500+ tokens/second. It acts as the final step in the AI coding workflow. Supports 16k input tokens and 16k output tokens."
|
|
2626
2701
|
},
|
|
@@ -2703,28 +2778,49 @@
|
|
|
2703
2778
|
"description": "OpenAI's gpt-4-turbo features broad general knowledge and domain expertise, enabling it to follow complex natural language instructions and accurately solve difficult problems. Its knowledge cutoff is April 2023, with a 128,000 token context window."
|
|
2704
2779
|
},
|
|
2705
2780
|
"openai/gpt-4.1": {
|
|
2706
|
-
"description": "GPT
|
|
2781
|
+
"description": "The GPT-4.1 series offers extended context and enhanced engineering and reasoning capabilities."
|
|
2707
2782
|
},
|
|
2708
2783
|
"openai/gpt-4.1-mini": {
|
|
2709
|
-
"description": "GPT
|
|
2784
|
+
"description": "GPT-4.1 Mini provides lower latency and better cost-efficiency, suitable for medium-context scenarios."
|
|
2710
2785
|
},
|
|
2711
2786
|
"openai/gpt-4.1-nano": {
|
|
2712
|
-
"description": "GPT-4.1
|
|
2787
|
+
"description": "GPT-4.1 Nano is a low-cost, low-latency option ideal for high-frequency short conversations or classification tasks."
|
|
2713
2788
|
},
|
|
2714
2789
|
"openai/gpt-4o": {
|
|
2715
|
-
"description": "GPT-4o
|
|
2790
|
+
"description": "The GPT-4o series is OpenAI’s Omni model, supporting text + image input and text output."
|
|
2716
2791
|
},
|
|
2717
2792
|
"openai/gpt-4o-mini": {
|
|
2718
|
-
"description": "GPT-4o
|
|
2793
|
+
"description": "GPT-4o-mini is a fast, compact version of GPT-4o, suitable for low-latency multimodal scenarios."
|
|
2719
2794
|
},
|
|
2720
2795
|
"openai/gpt-5": {
|
|
2721
|
-
"description": "GPT-5 is OpenAI
|
|
2796
|
+
"description": "GPT-5 is OpenAI’s high-performance model, suitable for a wide range of production and research tasks."
|
|
2797
|
+
},
|
|
2798
|
+
"openai/gpt-5-chat": {
|
|
2799
|
+
"description": "GPT-5 Chat is a GPT-5 variant optimized for conversational scenarios, reducing latency to enhance interaction."
|
|
2800
|
+
},
|
|
2801
|
+
"openai/gpt-5-codex": {
|
|
2802
|
+
"description": "GPT-5-Codex is a GPT-5 variant further optimized for coding tasks, ideal for large-scale code workflows."
|
|
2722
2803
|
},
|
|
2723
2804
|
"openai/gpt-5-mini": {
|
|
2724
|
-
"description": "GPT-5
|
|
2805
|
+
"description": "GPT-5 Mini is a compact version of the GPT-5 family, suitable for low-latency, low-cost scenarios."
|
|
2725
2806
|
},
|
|
2726
2807
|
"openai/gpt-5-nano": {
|
|
2727
|
-
"description": "GPT-5
|
|
2808
|
+
"description": "GPT-5 Nano is the ultra-compact version in the family, ideal for scenarios with strict cost and latency requirements."
|
|
2809
|
+
},
|
|
2810
|
+
"openai/gpt-5-pro": {
|
|
2811
|
+
"description": "GPT-5 Pro is OpenAI’s flagship model, offering advanced reasoning, code generation, and enterprise-grade features, with support for test-time routing and stricter safety policies."
|
|
2812
|
+
},
|
|
2813
|
+
"openai/gpt-5.1": {
|
|
2814
|
+
"description": "GPT-5.1 is the latest flagship model in the GPT-5 series, significantly improved in general reasoning, instruction following, and conversational naturalness, suitable for a wide range of tasks."
|
|
2815
|
+
},
|
|
2816
|
+
"openai/gpt-5.1-chat": {
|
|
2817
|
+
"description": "GPT-5.1 Chat is a lightweight member of the GPT-5.1 family, optimized for low-latency conversations while retaining strong reasoning and instruction execution capabilities."
|
|
2818
|
+
},
|
|
2819
|
+
"openai/gpt-5.1-codex": {
|
|
2820
|
+
"description": "GPT-5.1-Codex is a GPT-5.1 variant optimized for software engineering and coding workflows, ideal for large-scale refactoring, complex debugging, and long-term autonomous coding tasks."
|
|
2821
|
+
},
|
|
2822
|
+
"openai/gpt-5.1-codex-mini": {
|
|
2823
|
+
"description": "GPT-5.1-Codex-Mini is a smaller, faster version of GPT-5.1-Codex, better suited for latency- and cost-sensitive coding scenarios."
|
|
2728
2824
|
},
|
|
2729
2825
|
"openai/gpt-oss-120b": {
|
|
2730
2826
|
"description": "An extremely capable general-purpose large language model with powerful, controllable reasoning abilities."
|
|
@@ -2751,7 +2847,7 @@
|
|
|
2751
2847
|
"description": "O3-mini high inference level version provides high intelligence at the same cost and latency targets as o1-mini."
|
|
2752
2848
|
},
|
|
2753
2849
|
"openai/o4-mini": {
|
|
2754
|
-
"description": "OpenAI
|
|
2850
|
+
"description": "OpenAI o4-mini is a compact and efficient reasoning model from OpenAI, ideal for low-latency scenarios."
|
|
2755
2851
|
},
|
|
2756
2852
|
"openai/o4-mini-high": {
|
|
2757
2853
|
"description": "o4-mini high inference level version, optimized for fast and efficient inference, demonstrating high efficiency and performance in coding and visual tasks."
|
|
@@ -2955,7 +3051,7 @@
|
|
|
2955
3051
|
"description": "A powerful medium-sized code model supporting 32K context length, proficient in multilingual programming."
|
|
2956
3052
|
},
|
|
2957
3053
|
"qwen/qwen3-14b": {
|
|
2958
|
-
"description": "Qwen3-14B is
|
|
3054
|
+
"description": "Qwen3-14B is the 14B version in the Qwen series, suitable for general reasoning and dialogue tasks."
|
|
2959
3055
|
},
|
|
2960
3056
|
"qwen/qwen3-14b:free": {
|
|
2961
3057
|
"description": "Qwen3-14B is a dense 14.8 billion parameter causal language model in the Qwen3 series, designed for complex reasoning and efficient dialogue. It supports seamless switching between a 'thinking' mode for tasks such as mathematics, programming, and logical reasoning, and a 'non-thinking' mode for general conversation. This model is fine-tuned for instruction following, agent tool usage, creative writing, and multilingual tasks across more than 100 languages and dialects. It natively handles a 32K token context and can be extended to 131K tokens using YaRN."
|
|
@@ -2963,6 +3059,12 @@
|
|
|
2963
3059
|
"qwen/qwen3-235b-a22b": {
|
|
2964
3060
|
"description": "Qwen3-235B-A22B is a 235 billion parameter mixture of experts (MoE) model developed by Qwen, activating 22 billion parameters per forward pass. It supports seamless switching between a 'thinking' mode for complex reasoning, mathematics, and coding tasks, and a 'non-thinking' mode for general conversational efficiency. This model showcases strong reasoning capabilities, multilingual support (over 100 languages and dialects), advanced instruction following, and agent tool invocation capabilities. It natively handles a 32K token context window and can be extended to 131K tokens using YaRN."
|
|
2965
3061
|
},
|
|
3062
|
+
"qwen/qwen3-235b-a22b-2507": {
|
|
3063
|
+
"description": "Qwen3-235B-A22B-Instruct-2507 is an Instruct version in the Qwen3 series, supporting multilingual instructions and long-context scenarios."
|
|
3064
|
+
},
|
|
3065
|
+
"qwen/qwen3-235b-a22b-thinking-2507": {
|
|
3066
|
+
"description": "Qwen3-235B-A22B-Thinking-2507 is a Thinking variant of Qwen3, enhanced for complex math and reasoning tasks."
|
|
3067
|
+
},
|
|
2966
3068
|
"qwen/qwen3-235b-a22b:free": {
|
|
2967
3069
|
"description": "Qwen3-235B-A22B is a 235 billion parameter mixture of experts (MoE) model developed by Qwen, activating 22 billion parameters per forward pass. It supports seamless switching between a 'thinking' mode for complex reasoning, mathematics, and coding tasks, and a 'non-thinking' mode for general conversational efficiency. This model showcases strong reasoning capabilities, multilingual support (over 100 languages and dialects), advanced instruction following, and agent tool invocation capabilities. It natively handles a 32K token context window and can be extended to 131K tokens using YaRN."
|
|
2968
3070
|
},
|
|
@@ -2981,6 +3083,21 @@
|
|
|
2981
3083
|
"qwen/qwen3-8b:free": {
|
|
2982
3084
|
"description": "Qwen3-8B is a dense 8.2 billion parameter causal language model in the Qwen3 series, designed for reasoning-intensive tasks and efficient dialogue. It supports seamless switching between a 'thinking' mode for mathematics, coding, and logical reasoning, and a 'non-thinking' mode for general conversation. This model is fine-tuned for instruction following, agent integration, creative writing, and multilingual use across more than 100 languages and dialects. It natively supports a 32K token context window and can be extended to 131K tokens via YaRN."
|
|
2983
3085
|
},
|
|
3086
|
+
"qwen/qwen3-coder": {
|
|
3087
|
+
"description": "Qwen3-Coder is the code generation family in Qwen3, excelling at understanding and generating code within long documents."
|
|
3088
|
+
},
|
|
3089
|
+
"qwen/qwen3-coder-plus": {
|
|
3090
|
+
"description": "Qwen3-Coder-Plus is a specially optimized coding agent model in the Qwen series, supporting more complex tool usage and long-term conversations."
|
|
3091
|
+
},
|
|
3092
|
+
"qwen/qwen3-max": {
|
|
3093
|
+
"description": "Qwen3 Max is a high-end reasoning model in the Qwen3 series, suitable for multilingual reasoning and tool integration."
|
|
3094
|
+
},
|
|
3095
|
+
"qwen/qwen3-max-preview": {
|
|
3096
|
+
"description": "Qwen3 Max (preview) is the preview version of the Max model in the Qwen series, designed for advanced reasoning and tool integration."
|
|
3097
|
+
},
|
|
3098
|
+
"qwen/qwen3-vl-plus": {
|
|
3099
|
+
"description": "Qwen3 VL-Plus is a vision-enhanced version of Qwen3, improving multimodal reasoning and video processing capabilities."
|
|
3100
|
+
},
|
|
2984
3101
|
"qwen2": {
|
|
2985
3102
|
"description": "Qwen2 is Alibaba's next-generation large-scale language model, supporting diverse application needs with excellent performance."
|
|
2986
3103
|
},
|
|
@@ -3275,9 +3392,6 @@
|
|
|
3275
3392
|
"step-r1-v-mini": {
|
|
3276
3393
|
"description": "This model is a powerful reasoning model with strong image understanding capabilities, able to process both image and text information, generating text content after deep reasoning. It excels in visual reasoning while also possessing first-tier capabilities in mathematics, coding, and text reasoning. The context length is 100k."
|
|
3277
3394
|
},
|
|
3278
|
-
"step3": {
|
|
3279
|
-
"description": "Step3 is a multimodal model developed by StepStar, offering advanced visual understanding capabilities."
|
|
3280
|
-
},
|
|
3281
3395
|
"stepfun-ai/step3": {
|
|
3282
3396
|
"description": "Step3 is a cutting-edge multimodal reasoning model released by StepFun. It is built on a mixture-of-experts (MoE) architecture with 321B total parameters and 38B active parameters. The model adopts an end-to-end design to minimize decoding cost while delivering top-tier performance in visual-language reasoning. Through the combined design of Multi-Matrix Factorized Attention (MFA) and Attention-FFN Decoupling (AFD), Step3 maintains exceptional efficiency on both high-end and low-end accelerators. During pretraining, Step3 processed over 20 trillion text tokens and 4 trillion image-text mixed tokens, covering more than a dozen languages. The model achieves leading performance among open-source models across benchmarks in mathematics, code, and multimodal tasks."
|
|
3283
3397
|
},
|
|
@@ -3359,6 +3473,9 @@
|
|
|
3359
3473
|
"vercel/v0-1.5-md": {
|
|
3360
3474
|
"description": "Access the model behind v0 to generate, fix, and optimize modern web applications, with framework-specific reasoning and up-to-date knowledge."
|
|
3361
3475
|
},
|
|
3476
|
+
"volcengine/doubao-seed-code": {
|
|
3477
|
+
"description": "Doubao-Seed-Code is a large model from ByteDance Volcano Engine optimized for Agentic Programming, excelling in various programming and agent benchmarks, supporting 256K context."
|
|
3478
|
+
},
|
|
3362
3479
|
"wan2.2-t2i-flash": {
|
|
3363
3480
|
"description": "Wanxiang 2.2 Flash version, the latest model currently available. Fully upgraded in creativity, stability, and realism, with fast generation speed and high cost-effectiveness."
|
|
3364
3481
|
},
|
|
@@ -3386,11 +3503,23 @@
|
|
|
3386
3503
|
"wizardlm2:8x22b": {
|
|
3387
3504
|
"description": "WizardLM 2 is a language model provided by Microsoft AI, excelling in complex dialogues, multilingual capabilities, reasoning, and intelligent assistant applications."
|
|
3388
3505
|
},
|
|
3506
|
+
"x-ai/grok-4": {
|
|
3507
|
+
"description": "Grok 4 is xAI’s flagship reasoning model, offering powerful reasoning and multimodal capabilities."
|
|
3508
|
+
},
|
|
3389
3509
|
"x-ai/grok-4-fast": {
|
|
3390
|
-
"description": "
|
|
3510
|
+
"description": "Grok 4 Fast is xAI’s high-throughput, low-cost model (supports 2M context window), ideal for high-concurrency and long-context scenarios."
|
|
3511
|
+
},
|
|
3512
|
+
"x-ai/grok-4-fast-non-reasoning": {
|
|
3513
|
+
"description": "Grok 4 Fast (Non-Reasoning) is xAI’s high-throughput, low-cost multimodal model (supports 2M context window), designed for latency- and cost-sensitive scenarios that do not require internal reasoning. It runs alongside the reasoning version of Grok 4 Fast and can enable reasoning via the API’s reasoning enable parameter. Prompts and completions may be used by xAI or OpenRouter to improve future models."
|
|
3514
|
+
},
|
|
3515
|
+
"x-ai/grok-4.1-fast": {
|
|
3516
|
+
"description": "Grok 4 Fast is xAI’s high-throughput, low-cost model (supports 2M context window), ideal for high-concurrency and long-context scenarios."
|
|
3517
|
+
},
|
|
3518
|
+
"x-ai/grok-4.1-fast-non-reasoning": {
|
|
3519
|
+
"description": "Grok 4 Fast (Non-Reasoning) is xAI’s high-throughput, low-cost multimodal model (supports 2M context window), designed for latency- and cost-sensitive scenarios that do not require internal reasoning. It runs alongside the reasoning version of Grok 4 Fast and can enable reasoning via the API’s reasoning enable parameter. Prompts and completions may be used by xAI or OpenRouter to improve future models."
|
|
3391
3520
|
},
|
|
3392
3521
|
"x-ai/grok-code-fast-1": {
|
|
3393
|
-
"description": "
|
|
3522
|
+
"description": "Grok Code Fast 1 is xAI’s fast code model, delivering readable and production-ready output."
|
|
3394
3523
|
},
|
|
3395
3524
|
"x1": {
|
|
3396
3525
|
"description": "The Spark X1 model will undergo further upgrades, achieving results in reasoning, text generation, and language understanding tasks that match OpenAI o1 and DeepSeek R1, building on its leading position in domestic mathematical tasks."
|
|
@@ -3452,8 +3581,14 @@
|
|
|
3452
3581
|
"yi-vision-v2": {
|
|
3453
3582
|
"description": "A complex visual task model that provides high-performance understanding and analysis capabilities based on multiple images."
|
|
3454
3583
|
},
|
|
3584
|
+
"z-ai/glm-4.5": {
|
|
3585
|
+
"description": "GLM 4.5 is Z.AI’s flagship model, supporting hybrid reasoning and optimized for engineering and long-context tasks."
|
|
3586
|
+
},
|
|
3587
|
+
"z-ai/glm-4.5-air": {
|
|
3588
|
+
"description": "GLM 4.5 Air is a lightweight version of GLM 4.5, suitable for cost-sensitive scenarios while retaining strong reasoning capabilities."
|
|
3589
|
+
},
|
|
3455
3590
|
"z-ai/glm-4.6": {
|
|
3456
|
-
"description": "GLM
|
|
3591
|
+
"description": "GLM 4.6 is Z.AI’s flagship model, with extended context length and enhanced coding capabilities."
|
|
3457
3592
|
},
|
|
3458
3593
|
"zai-org/GLM-4.5": {
|
|
3459
3594
|
"description": "GLM-4.5 is a foundational model designed specifically for agent applications, using a Mixture-of-Experts (MoE) architecture. It is deeply optimized for tool invocation, web browsing, software engineering, and front-end programming, supporting seamless integration with code agents like Claude Code and Roo Code. GLM-4.5 employs a hybrid inference mode, adaptable to complex reasoning and everyday use scenarios."
|
|
@@ -3475,5 +3610,8 @@
|
|
|
3475
3610
|
},
|
|
3476
3611
|
"zai/glm-4.5v": {
|
|
3477
3612
|
"description": "GLM-4.5V is built on the GLM-4.5-Air foundational model, inheriting the proven techniques of GLM-4.1V-Thinking while achieving efficient scaling through a powerful 106 billion parameter MoE architecture."
|
|
3613
|
+
},
|
|
3614
|
+
"zenmux/auto": {
|
|
3615
|
+
"description": "ZenMux’s auto-routing feature automatically selects the best-performing and most cost-effective model from supported options based on your request."
|
|
3478
3616
|
}
|
|
3479
3617
|
}
|
|
@@ -191,6 +191,9 @@
|
|
|
191
191
|
"xinference": {
|
|
192
192
|
"description": "Xorbits Inference (Xinference) is an open-source platform designed to simplify the deployment and integration of diverse AI models. With Xinference, you can leverage any open-source LLM, embedding model, or multimodal model to perform inference in cloud or on-premises environments, enabling the creation of powerful AI applications."
|
|
193
193
|
},
|
|
194
|
+
"zenmux": {
|
|
195
|
+
"description": "ZenMux is a unified AI service aggregation platform that supports a variety of mainstream AI service interfaces, including OpenAI, Anthropic, and Google VertexAI. It offers flexible routing capabilities, allowing you to easily switch between and manage different AI models."
|
|
196
|
+
},
|
|
194
197
|
"zeroone": {
|
|
195
198
|
"description": "01.AI focuses on AI 2.0 era technologies, vigorously promoting the innovation and application of 'human + artificial intelligence', using powerful models and advanced AI technologies to enhance human productivity and achieve technological empowerment."
|
|
196
199
|
},
|
package/locales/es-ES/image.json
CHANGED