@lobehub/chat 1.43.6 → 1.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (298) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/docs/self-hosting/server-database/docker-compose.mdx +2 -2
  4. package/locales/ar/common.json +1 -0
  5. package/locales/ar/modelProvider.json +176 -0
  6. package/locales/ar/setting.json +1 -0
  7. package/locales/bg-BG/common.json +1 -0
  8. package/locales/bg-BG/modelProvider.json +176 -0
  9. package/locales/bg-BG/setting.json +1 -0
  10. package/locales/de-DE/common.json +1 -0
  11. package/locales/de-DE/modelProvider.json +176 -0
  12. package/locales/de-DE/setting.json +1 -0
  13. package/locales/en-US/common.json +1 -0
  14. package/locales/en-US/modelProvider.json +176 -0
  15. package/locales/en-US/setting.json +1 -0
  16. package/locales/es-ES/common.json +1 -0
  17. package/locales/es-ES/modelProvider.json +176 -0
  18. package/locales/es-ES/setting.json +1 -0
  19. package/locales/fa-IR/common.json +1 -0
  20. package/locales/fa-IR/modelProvider.json +176 -0
  21. package/locales/fa-IR/setting.json +1 -0
  22. package/locales/fr-FR/common.json +1 -0
  23. package/locales/fr-FR/modelProvider.json +176 -0
  24. package/locales/fr-FR/setting.json +1 -0
  25. package/locales/it-IT/common.json +1 -0
  26. package/locales/it-IT/modelProvider.json +176 -0
  27. package/locales/it-IT/setting.json +1 -0
  28. package/locales/ja-JP/common.json +1 -0
  29. package/locales/ja-JP/modelProvider.json +176 -0
  30. package/locales/ja-JP/setting.json +1 -0
  31. package/locales/ko-KR/common.json +1 -0
  32. package/locales/ko-KR/modelProvider.json +176 -0
  33. package/locales/ko-KR/setting.json +1 -0
  34. package/locales/nl-NL/common.json +1 -0
  35. package/locales/nl-NL/modelProvider.json +176 -0
  36. package/locales/nl-NL/setting.json +1 -0
  37. package/locales/pl-PL/common.json +1 -0
  38. package/locales/pl-PL/modelProvider.json +176 -0
  39. package/locales/pl-PL/setting.json +1 -0
  40. package/locales/pt-BR/common.json +1 -0
  41. package/locales/pt-BR/modelProvider.json +176 -0
  42. package/locales/pt-BR/setting.json +1 -0
  43. package/locales/ru-RU/common.json +1 -0
  44. package/locales/ru-RU/modelProvider.json +176 -0
  45. package/locales/ru-RU/setting.json +1 -0
  46. package/locales/tr-TR/common.json +1 -0
  47. package/locales/tr-TR/modelProvider.json +176 -0
  48. package/locales/tr-TR/setting.json +1 -0
  49. package/locales/vi-VN/common.json +1 -0
  50. package/locales/vi-VN/modelProvider.json +176 -0
  51. package/locales/vi-VN/setting.json +1 -0
  52. package/locales/zh-CN/common.json +1 -0
  53. package/locales/zh-CN/modelProvider.json +176 -0
  54. package/locales/zh-CN/setting.json +1 -0
  55. package/locales/zh-TW/common.json +1 -0
  56. package/locales/zh-TW/modelProvider.json +176 -0
  57. package/locales/zh-TW/setting.json +1 -0
  58. package/package.json +4 -4
  59. package/src/app/(main)/(mobile)/me/settings/features/Category.tsx +1 -1
  60. package/src/app/(main)/(mobile)/me/settings/features/useCategory.tsx +12 -5
  61. package/src/app/(main)/changelog/features/VersionTag.tsx +1 -2
  62. package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/Thread.tsx +1 -1
  63. package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/ThreadItem.tsx +1 -2
  64. package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/index.tsx +0 -1
  65. package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/WelcomeChatItem/InboxWelcome/AgentsSuggest.tsx +1 -1
  66. package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/WelcomeChatItem/InboxWelcome/QuestionSuggest.tsx +1 -1
  67. package/src/app/(main)/chat/(workspace)/@conversation/features/ZenModeToast/Toast.tsx +1 -1
  68. package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/ThreadItem/index.tsx +0 -2
  69. package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/TopicItem/index.tsx +0 -1
  70. package/src/app/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Tags.tsx +2 -3
  71. package/src/app/(main)/chat/@session/features/SessionListContent/CollapseGroup/index.tsx +1 -1
  72. package/src/app/(main)/chat/features/Migration/Start.tsx +1 -1
  73. package/src/app/(main)/discover/(detail)/assistant/[slug]/features/ConversationExample/TopicList.tsx +4 -3
  74. package/src/app/(main)/discover/(detail)/assistant/[slug]/features/Header.tsx +1 -1
  75. package/src/app/(main)/discover/(detail)/features/ShareButton.tsx +2 -1
  76. package/src/app/(main)/discover/(detail)/model/[...slugs]/features/Header.tsx +1 -1
  77. package/src/app/(main)/discover/(detail)/plugin/[slug]/features/Header.tsx +1 -1
  78. package/src/app/(main)/discover/(detail)/provider/[slug]/features/Header.tsx +1 -1
  79. package/src/app/(main)/discover/(list)/_layout/Desktop/Nav.tsx +0 -1
  80. package/src/app/(main)/discover/(list)/assistants/features/Card.tsx +1 -1
  81. package/src/app/(main)/discover/(list)/models/features/Card.tsx +1 -1
  82. package/src/app/(main)/discover/(list)/plugins/features/Card.tsx +1 -1
  83. package/src/app/(main)/discover/(list)/providers/features/Card.tsx +1 -1
  84. package/src/app/(main)/discover/components/GridLoadingCard.tsx +2 -1
  85. package/src/app/(main)/discover/components/Title.tsx +1 -1
  86. package/src/app/(main)/files/(content)/@menu/features/KnowledgeBase/EmptyStatus.tsx +1 -1
  87. package/src/app/(main)/files/(content)/@menu/features/KnowledgeBase/Item/index.tsx +0 -1
  88. package/src/app/(main)/files/(content)/@modal/(.)[id]/FullscreenModal.tsx +2 -2
  89. package/src/app/(main)/files/(content)/NotSupportClient.tsx +2 -2
  90. package/src/app/(main)/profile/_layout/Desktop/SideBar.tsx +1 -1
  91. package/src/app/(main)/profile/stats/features/ShareButton/Preview.tsx +5 -5
  92. package/src/app/(main)/repos/[id]/evals/components/Container.tsx +1 -1
  93. package/src/app/(main)/repos/[id]/evals/dataset/DatasetList/Item.tsx +0 -1
  94. package/src/app/(main)/settings/_layout/Desktop/SideBar.tsx +1 -1
  95. package/src/app/(main)/settings/about/features/ItemCard.tsx +3 -3
  96. package/src/app/(main)/settings/about/features/Version.tsx +1 -1
  97. package/src/app/(main)/settings/hooks/useCategory.tsx +22 -9
  98. package/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx +2 -1
  99. package/src/app/(main)/settings/llm/components/ProviderModelList/ModelFetcher.tsx +0 -1
  100. package/src/app/(main)/settings/provider/(detail)/[id]/index.tsx +19 -0
  101. package/src/app/(main)/settings/provider/(detail)/[id]/page.tsx +95 -0
  102. package/src/app/(main)/settings/provider/(detail)/azure/page.tsx +119 -0
  103. package/src/app/(main)/settings/provider/(detail)/bedrock/page.tsx +91 -0
  104. package/src/app/(main)/settings/provider/(detail)/cloudflare/page.tsx +58 -0
  105. package/src/app/(main)/settings/provider/(detail)/github/page.tsx +67 -0
  106. package/src/app/(main)/settings/provider/(detail)/huggingface/page.tsx +67 -0
  107. package/src/app/(main)/settings/provider/(detail)/ollama/Checker.tsx +73 -0
  108. package/src/app/(main)/settings/provider/(detail)/ollama/page.tsx +34 -0
  109. package/src/app/(main)/settings/provider/(detail)/openai/page.tsx +23 -0
  110. package/src/app/(main)/settings/provider/(detail)/wenxin/page.tsx +61 -0
  111. package/src/app/(main)/settings/provider/(list)/Footer.tsx +36 -0
  112. package/src/app/(main)/settings/provider/(list)/ProviderGrid/Card.tsx +134 -0
  113. package/src/app/(main)/settings/provider/(list)/ProviderGrid/index.tsx +91 -0
  114. package/src/app/(main)/settings/provider/(list)/index.tsx +19 -0
  115. package/src/app/(main)/settings/provider/ProviderMenu/AddNew.tsx +28 -0
  116. package/src/app/(main)/settings/provider/ProviderMenu/All.tsx +29 -0
  117. package/src/app/(main)/settings/provider/ProviderMenu/Item.tsx +69 -0
  118. package/src/app/(main)/settings/provider/ProviderMenu/List.tsx +76 -0
  119. package/src/app/(main)/settings/provider/ProviderMenu/SearchResult.tsx +43 -0
  120. package/src/app/(main)/settings/provider/ProviderMenu/SkeletonList.tsx +60 -0
  121. package/src/app/(main)/settings/provider/ProviderMenu/SortProviderModal/GroupItem.tsx +30 -0
  122. package/src/app/(main)/settings/provider/ProviderMenu/SortProviderModal/index.tsx +91 -0
  123. package/src/app/(main)/settings/provider/ProviderMenu/index.tsx +80 -0
  124. package/src/app/(main)/settings/provider/_layout/Desktop.tsx +37 -0
  125. package/src/app/(main)/settings/provider/_layout/Mobile.tsx +14 -0
  126. package/src/app/(main)/settings/provider/const.ts +20 -0
  127. package/src/app/(main)/settings/provider/features/CreateNewProvider/index.tsx +146 -0
  128. package/src/app/(main)/settings/provider/features/ModelList/CreateNewModelModal/Form.tsx +105 -0
  129. package/src/app/(main)/settings/provider/features/ModelList/CreateNewModelModal/index.tsx +69 -0
  130. package/src/app/(main)/settings/provider/features/ModelList/DisabledModels.tsx +29 -0
  131. package/src/app/(main)/settings/provider/features/ModelList/EmptyModels.tsx +101 -0
  132. package/src/app/(main)/settings/provider/features/ModelList/EnabledModelList/index.tsx +85 -0
  133. package/src/app/(main)/settings/provider/features/ModelList/ModelConfigModal/Form.tsx +109 -0
  134. package/src/app/(main)/settings/provider/features/ModelList/ModelConfigModal/index.tsx +76 -0
  135. package/src/app/(main)/settings/provider/features/ModelList/ModelItem.tsx +346 -0
  136. package/src/app/(main)/settings/provider/features/ModelList/ModelTitle/Search.tsx +37 -0
  137. package/src/app/(main)/settings/provider/features/ModelList/ModelTitle/index.tsx +145 -0
  138. package/src/app/(main)/settings/provider/features/ModelList/SearchResult.tsx +67 -0
  139. package/src/app/(main)/settings/provider/features/ModelList/SkeletonList.tsx +63 -0
  140. package/src/app/(main)/settings/provider/features/ModelList/SortModelModal/ListItem.tsx +20 -0
  141. package/src/app/(main)/settings/provider/features/ModelList/SortModelModal/index.tsx +96 -0
  142. package/src/app/(main)/settings/provider/features/ModelList/index.tsx +59 -0
  143. package/src/app/(main)/settings/provider/features/ProviderConfig/Checker.tsx +120 -0
  144. package/src/app/(main)/settings/provider/features/ProviderConfig/SkeletonInput.tsx +5 -0
  145. package/src/app/(main)/settings/provider/features/ProviderConfig/UpdateProviderInfo/SettingModal.tsx +137 -0
  146. package/src/app/(main)/settings/provider/features/ProviderConfig/UpdateProviderInfo/index.tsx +49 -0
  147. package/src/app/(main)/settings/provider/features/ProviderConfig/index.tsx +343 -0
  148. package/src/app/(main)/settings/provider/layout.tsx +21 -0
  149. package/src/app/(main)/settings/provider/page.tsx +17 -0
  150. package/src/app/(main)/settings/provider/type.ts +5 -0
  151. package/src/app/(main)/settings/sync/features/DeviceInfo/Card.tsx +1 -1
  152. package/src/app/(main)/settings/sync/features/DeviceInfo/index.tsx +1 -1
  153. package/src/app/@modal/(.)changelog/modal/features/ReadDetail.tsx +1 -1
  154. package/src/app/@modal/(.)changelog/modal/features/VersionTag.tsx +1 -2
  155. package/src/app/@modal/(.)changelog/modal/layout.tsx +1 -1
  156. package/src/components/Cell/index.tsx +1 -1
  157. package/src/components/DragUpload/index.tsx +2 -3
  158. package/src/components/FeatureList/index.tsx +1 -1
  159. package/src/components/FileParsingStatus/EmbeddingStatus.tsx +1 -1
  160. package/src/components/FileParsingStatus/index.tsx +1 -1
  161. package/src/components/FunctionModal/style.tsx +2 -2
  162. package/src/components/GoBack/index.tsx +1 -2
  163. package/src/components/HotKeys/index.tsx +1 -1
  164. package/src/components/InstantSwitch/index.tsx +28 -0
  165. package/src/components/Menu/index.tsx +1 -1
  166. package/src/components/ModelSelect/index.tsx +2 -3
  167. package/src/components/Notification/index.tsx +2 -1
  168. package/src/components/StatisticCard/index.tsx +5 -6
  169. package/src/config/aiModels/ai21.ts +38 -0
  170. package/src/config/aiModels/ai360.ts +71 -0
  171. package/src/config/aiModels/anthropic.ts +152 -0
  172. package/src/config/aiModels/azure.ts +86 -0
  173. package/src/config/aiModels/baichuan.ts +107 -0
  174. package/src/config/aiModels/bedrock.ts +315 -0
  175. package/src/config/aiModels/cloudflare.ts +88 -0
  176. package/src/config/aiModels/deepseek.ts +27 -0
  177. package/src/config/aiModels/fireworksai.ts +232 -0
  178. package/src/config/aiModels/giteeai.ts +137 -0
  179. package/src/config/aiModels/github.ts +273 -0
  180. package/src/config/aiModels/google.ts +317 -0
  181. package/src/config/aiModels/groq.ts +202 -0
  182. package/src/config/aiModels/higress.ts +2828 -0
  183. package/src/config/aiModels/huggingface.ts +56 -0
  184. package/src/config/aiModels/hunyuan.ts +151 -0
  185. package/src/config/aiModels/index.ts +98 -0
  186. package/src/config/aiModels/internlm.ts +40 -0
  187. package/src/config/aiModels/minimax.ts +55 -0
  188. package/src/config/aiModels/mistral.ts +172 -0
  189. package/src/config/aiModels/moonshot.ts +44 -0
  190. package/src/config/aiModels/novita.ts +124 -0
  191. package/src/config/aiModels/ollama.ts +412 -0
  192. package/src/config/aiModels/openai.ts +537 -0
  193. package/src/config/aiModels/openrouter.ts +252 -0
  194. package/src/config/aiModels/perplexity.ts +67 -0
  195. package/src/config/aiModels/qwen.ts +302 -0
  196. package/src/config/aiModels/sensenova.ts +114 -0
  197. package/src/config/aiModels/siliconcloud.ts +679 -0
  198. package/src/config/aiModels/spark.ts +68 -0
  199. package/src/config/aiModels/stepfun.ts +153 -0
  200. package/src/config/aiModels/taichu.ts +19 -0
  201. package/src/config/aiModels/togetherai.ts +334 -0
  202. package/src/config/aiModels/upstage.ts +37 -0
  203. package/src/config/aiModels/wenxin.ts +171 -0
  204. package/src/config/aiModels/xai.ts +72 -0
  205. package/src/config/aiModels/zeroone.ts +156 -0
  206. package/src/config/aiModels/zhipu.ts +235 -0
  207. package/src/config/featureFlags/schema.ts +3 -0
  208. package/src/config/modelProviders/anthropic.ts +1 -0
  209. package/src/config/modelProviders/github.ts +0 -1
  210. package/src/config/modelProviders/google.ts +1 -0
  211. package/src/config/modelProviders/stepfun.ts +2 -0
  212. package/src/database/migrations/0013_add_ai_infra.sql +44 -0
  213. package/src/database/migrations/meta/0013_snapshot.json +3598 -0
  214. package/src/database/migrations/meta/_journal.json +7 -0
  215. package/src/database/repositories/aiInfra/index.ts +115 -0
  216. package/src/database/schemas/aiInfra.ts +69 -0
  217. package/src/database/schemas/index.ts +1 -0
  218. package/src/database/server/models/__tests__/aiModel.test.ts +318 -0
  219. package/src/database/server/models/__tests__/aiProvider.test.ts +373 -0
  220. package/src/database/server/models/aiModel.ts +250 -0
  221. package/src/database/server/models/aiProvider.ts +234 -0
  222. package/src/features/AgentSetting/AgentPrompt/index.tsx +2 -2
  223. package/src/features/ChatInput/ActionBar/Token/TokenTag.tsx +2 -1
  224. package/src/features/ChatInput/ActionBar/Tools/index.tsx +2 -3
  225. package/src/features/ChatInput/ActionBar/Upload/ServerMode.tsx +2 -3
  226. package/src/features/ChatInput/Desktop/FilePreview/FileItem/index.tsx +3 -2
  227. package/src/features/ChatInput/Desktop/FilePreview/FileList.tsx +2 -2
  228. package/src/features/ChatInput/Mobile/Files/FileItem/File.tsx +2 -2
  229. package/src/features/ChatInput/Mobile/InputArea/index.tsx +1 -1
  230. package/src/features/ChatInput/STT/common.tsx +1 -1
  231. package/src/features/Conversation/Error/style.tsx +2 -2
  232. package/src/features/Conversation/Messages/Assistant/FileChunks/Item/style.ts +2 -2
  233. package/src/features/Conversation/Messages/Assistant/FileChunks/index.tsx +1 -1
  234. package/src/features/Conversation/Messages/Assistant/ToolCallItem/Inspector/style.ts +2 -3
  235. package/src/features/Conversation/Messages/Assistant/ToolCallItem/style.ts +2 -3
  236. package/src/features/Conversation/Messages/User/FileListViewer/Item.tsx +0 -1
  237. package/src/features/Conversation/components/BackBottom/style.ts +2 -2
  238. package/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/Icon.tsx +2 -3
  239. package/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/index.tsx +3 -3
  240. package/src/features/Conversation/components/MarkdownElements/LobeThinking/Render.tsx +1 -1
  241. package/src/features/Conversation/components/OTPInput.tsx +2 -2
  242. package/src/features/DataImporter/Loading.tsx +1 -1
  243. package/src/features/FileManager/FileList/EmptyStatus.tsx +1 -1
  244. package/src/features/FileManager/FileList/index.tsx +1 -1
  245. package/src/features/FileManager/UploadDock/Item.tsx +1 -1
  246. package/src/features/FileManager/UploadDock/index.tsx +4 -4
  247. package/src/features/FileViewer/NotSupport/index.tsx +1 -1
  248. package/src/features/FileViewer/Renderer/MSDoc/index.tsx +0 -1
  249. package/src/features/FileViewer/Renderer/TXT/index.tsx +1 -1
  250. package/src/features/InitClientDB/EnableModal.tsx +1 -1
  251. package/src/features/InitClientDB/ErrorResult.tsx +1 -1
  252. package/src/features/InitClientDB/InitIndicator.tsx +1 -1
  253. package/src/features/KnowledgeBaseModal/AddFilesToKnowledgeBase/SelectForm.tsx +0 -1
  254. package/src/features/ModelSwitchPanel/index.tsx +2 -2
  255. package/src/features/PluginsUI/Render/Loading.tsx +0 -1
  256. package/src/features/Portal/Home/Body/Files/FileList/Item.tsx +1 -1
  257. package/src/features/Portal/Home/Body/Plugins/ArtifactList/Item/style.ts +1 -2
  258. package/src/features/Setting/SettingContainer.tsx +8 -1
  259. package/src/features/ShareModal/ShareImage/style.ts +2 -2
  260. package/src/features/ShareModal/style.ts +2 -2
  261. package/src/features/User/DataStatistics.tsx +1 -1
  262. package/src/hooks/useEnabledChatModels.ts +10 -1
  263. package/src/hooks/useModelSupportToolUse.ts +15 -0
  264. package/src/hooks/useModelSupportVision.ts +15 -0
  265. package/src/layout/AuthProvider/Clerk/useAppearance.ts +3 -3
  266. package/src/layout/GlobalProvider/AppTheme.tsx +1 -1
  267. package/src/layout/GlobalProvider/StoreInitialization.tsx +5 -0
  268. package/src/locales/default/common.ts +1 -0
  269. package/src/locales/default/modelProvider.ts +178 -0
  270. package/src/locales/default/setting.ts +1 -0
  271. package/src/server/modules/KeyVaultsEncrypt/index.ts +1 -1
  272. package/src/server/routers/lambda/aiModel.ts +128 -0
  273. package/src/server/routers/lambda/aiProvider.ts +127 -0
  274. package/src/server/routers/lambda/index.ts +4 -0
  275. package/src/services/__tests__/_auth.test.ts +16 -49
  276. package/src/services/__tests__/chat.test.ts +2 -0
  277. package/src/services/_auth.ts +42 -25
  278. package/src/services/aiModel.ts +52 -0
  279. package/src/services/aiProvider.ts +47 -0
  280. package/src/services/chat.ts +62 -18
  281. package/src/store/aiInfra/index.ts +2 -0
  282. package/src/store/aiInfra/initialState.ts +11 -0
  283. package/src/store/aiInfra/selectors.ts +2 -0
  284. package/src/store/aiInfra/slices/aiModel/action.ts +146 -0
  285. package/src/store/aiInfra/slices/aiModel/index.ts +3 -0
  286. package/src/store/aiInfra/slices/aiModel/initialState.ts +14 -0
  287. package/src/store/aiInfra/slices/aiModel/selectors.ts +63 -0
  288. package/src/store/aiInfra/slices/aiProvider/action.ts +208 -0
  289. package/src/store/aiInfra/slices/aiProvider/index.ts +3 -0
  290. package/src/store/aiInfra/slices/aiProvider/initialState.ts +32 -0
  291. package/src/store/aiInfra/slices/aiProvider/selectors.ts +99 -0
  292. package/src/store/aiInfra/store.ts +25 -0
  293. package/src/store/global/initialState.ts +1 -0
  294. package/src/store/serverConfig/selectors.test.ts +1 -0
  295. package/src/styles/global.ts +1 -1
  296. package/src/types/aiModel.ts +32 -6
  297. package/src/types/aiProvider.ts +11 -4
  298. package/src/utils/fetch/fetchSSE.ts +3 -1
@@ -0,0 +1,315 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const bedrockChatModels: AIChatModelCard[] = [
4
+ /*
5
+ // TODO: Not support for now
6
+ {
7
+ description: '亚马逊 Titan Text Lite 是一款轻量级高效模型,非常适合对英语任务进行微调,包括总结和文案编写等,客户希望有一个更小、更经济的模型,同时也非常可定制。',
8
+ displayName: 'Titan Text G1 - Lite',
9
+ id: 'amazon.titan-text-lite-v1',
10
+ tokens: 4000,
11
+ },
12
+ {
13
+ description: '亚马逊 Titan Text Express 的上下文长度可达 8,000 个标记,非常适合广泛的高级通用语言任务,如开放式文本生成和对话聊天,以及在检索增强生成 (RAG) 中的支持。在推出时,该模型针对英语进行了优化,预览版还支持其他 100 多种语言。',
14
+ displayName: 'Titan Text G1 - Express',
15
+ id: 'amazon.titan-text-express-v1',
16
+ tokens: 8000,
17
+ },
18
+ {
19
+ description: 'Titan Text Premier 是 Titan Text 系列中一款强大的先进模型,旨在为广泛的企业应用提供卓越的性能。凭借其尖端能力,它提供了更高的准确性和卓越的结果,是寻求一流文本处理解决方案的组织的绝佳选择。',
20
+ displayName: 'Titan Text G1 - Premier',
21
+ id: 'amazon.titan-text-premier-v1:0',
22
+ tokens: 32_000,
23
+ },
24
+ */
25
+ {
26
+ abilities: {
27
+ functionCall: true,
28
+ vision: true,
29
+ },
30
+ contextWindowTokens: 200_000,
31
+ description:
32
+ 'Claude 3.5 Sonnet 提升了行业标准,性能超过竞争对手模型和 Claude 3 Opus,在广泛的评估中表现出色,同时具有我们中等层级模型的速度和成本。',
33
+ displayName: 'Claude 3.5 Sonnet',
34
+ enabled: true,
35
+ id: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
36
+ pricing: {
37
+ input: 3,
38
+ output: 15,
39
+ },
40
+ releasedAt: '2024-10-22',
41
+ type: 'chat',
42
+ },
43
+ {
44
+ abilities: {
45
+ functionCall: true,
46
+ vision: true,
47
+ },
48
+ contextWindowTokens: 200_000,
49
+ description:
50
+ 'Claude 3.5 Sonnet 提升了行业标准,性能超过竞争对手模型和 Claude 3 Opus,在广泛的评估中表现出色,同时具有我们中等层级模型的速度和成本。',
51
+ displayName: 'Claude 3.5 Sonnet v2 (Inference profile)',
52
+ enabled: true,
53
+ id: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0',
54
+ maxOutput: 4096,
55
+ pricing: {
56
+ input: 3,
57
+ output: 15,
58
+ },
59
+ releasedAt: '2024-10-22',
60
+ type: 'chat',
61
+ },
62
+ {
63
+ abilities: {
64
+ functionCall: true,
65
+ vision: true,
66
+ },
67
+ contextWindowTokens: 200_000,
68
+ description:
69
+ 'Claude 3.5 Sonnet 提升了行业标准,性能超过竞争对手模型和 Claude 3 Opus,在广泛的评估中表现出色,同时具有我们中等层级模型的速度和成本。',
70
+ displayName: 'Claude 3.5 Sonnet 0620',
71
+ enabled: true,
72
+
73
+ id: 'anthropic.claude-3-5-sonnet-20240620-v1:0',
74
+ pricing: {
75
+ input: 3,
76
+ output: 15,
77
+ },
78
+ releasedAt: '2024-06-20',
79
+ type: 'chat',
80
+ },
81
+ {
82
+ abilities: {
83
+ functionCall: true,
84
+ vision: true,
85
+ },
86
+ contextWindowTokens: 200_000,
87
+ description:
88
+ 'Claude 3 Haiku 是 Anthropic 最快、最紧凑的模型,提供近乎即时的响应速度。它可以快速回答简单的查询和请求。客户将能够构建模仿人类互动的无缝 AI 体验。Claude 3 Haiku 可以处理图像并返回文本输出,具有 200K 的上下文窗口。',
89
+ displayName: 'Claude 3 Haiku',
90
+ enabled: true,
91
+ id: 'anthropic.claude-3-haiku-20240307-v1:0',
92
+ maxOutput: 4096,
93
+ pricing: {
94
+ input: 0.25,
95
+ output: 1.25,
96
+ },
97
+ releasedAt: '2024-03-07',
98
+ type: 'chat',
99
+ },
100
+ {
101
+ abilities: {
102
+ functionCall: true,
103
+ vision: true,
104
+ },
105
+ contextWindowTokens: 200_000,
106
+ description:
107
+ 'Claude 3.5 Sonnet 提升了行业标准,性能超过竞争对手模型和 Claude 3 Opus,在广泛的评估中表现出色,同时具有我们中等层级模型的速度和成本。',
108
+ displayName: 'Claude 3.5 Sonnet',
109
+ enabled: true,
110
+ id: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
111
+ pricing: {
112
+ input: 3,
113
+ output: 15,
114
+ },
115
+ type: 'chat',
116
+ },
117
+ {
118
+ abilities: {
119
+ functionCall: true,
120
+ vision: true,
121
+ },
122
+ contextWindowTokens: 200_000,
123
+ description:
124
+ 'Claude 3.5 Sonnet 提升了行业标准,性能超过竞争对手模型和 Claude 3 Opus,在广泛的评估中表现出色,同时具有我们中等层级模型的速度和成本。',
125
+ displayName: 'Claude 3.5 Sonnet v2 (Inference profile)',
126
+ enabled: true,
127
+ id: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0',
128
+ pricing: {
129
+ input: 3,
130
+ output: 15,
131
+ },
132
+ type: 'chat',
133
+ },
134
+ {
135
+ abilities: {
136
+ functionCall: true,
137
+ vision: true,
138
+ },
139
+ contextWindowTokens: 200_000,
140
+ description:
141
+ 'Claude 3.5 Sonnet 提升了行业标准,性能超过竞争对手模型和 Claude 3 Opus,在广泛的评估中表现出色,同时具有我们中等层级模型的速度和成本。',
142
+ displayName: 'Claude 3.5 Sonnet 0620',
143
+ enabled: true,
144
+ id: 'anthropic.claude-3-5-sonnet-20240620-v1:0',
145
+ pricing: {
146
+ input: 3,
147
+ output: 15,
148
+ },
149
+ type: 'chat',
150
+ },
151
+ {
152
+ abilities: {
153
+ functionCall: true,
154
+ vision: true,
155
+ },
156
+ contextWindowTokens: 200_000,
157
+ description:
158
+ 'Claude 3 Haiku 是 Anthropic 最快、最紧凑的模型,提供近乎即时的响应速度。它可以快速回答简单的查询和请求。客户将能够构建模仿人类互动的无缝 AI 体验。Claude 3 Haiku 可以处理图像并返回文本输出,具有 200K 的上下文窗口。',
159
+ displayName: 'Claude 3 Haiku',
160
+ enabled: true,
161
+ id: 'anthropic.claude-3-haiku-20240307-v1:0',
162
+ pricing: {
163
+ input: 0.25,
164
+ output: 1.25,
165
+ },
166
+ type: 'chat',
167
+ },
168
+ {
169
+ abilities: {
170
+ functionCall: true,
171
+ vision: true,
172
+ },
173
+ contextWindowTokens: 200_000,
174
+ description:
175
+ 'Anthropic 的 Claude 3 Sonnet 在智能和速度之间达到了理想的平衡——特别适合企业工作负载。它以低于竞争对手的价格提供最大的效用,并被设计成为可靠的、高耐用的主力机,适用于规模化的 AI 部署。Claude 3 Sonnet 可以处理图像并返回文本输出,具有 200K 的上下文窗口。',
176
+ displayName: 'Claude 3 Sonnet',
177
+ enabled: true,
178
+ id: 'anthropic.claude-3-sonnet-20240229-v1:0',
179
+ pricing: {
180
+ input: 3,
181
+ output: 15,
182
+ },
183
+ type: 'chat',
184
+ },
185
+ {
186
+ abilities: {
187
+ functionCall: true,
188
+ },
189
+ contextWindowTokens: 200_000,
190
+ description:
191
+ 'Claude 3 Opus 是 Anthropic 最强大的 AI 模型,具有在高度复杂任务上的最先进性能。它可以处理开放式提示和未见过的场景,具有出色的流畅性和类人的理解能力。Claude 3 Opus 展示了生成 AI 可能性的前沿。Claude 3 Opus 可以处理图像并返回文本输出,具有 200K 的上下文窗口。',
192
+ displayName: 'Claude 3 Opus',
193
+ enabled: true,
194
+ id: 'anthropic.claude-3-opus-20240229-v1:0',
195
+ maxOutput: 4096,
196
+ pricing: {
197
+ input: 15,
198
+ output: 75,
199
+ },
200
+ releasedAt: '2024-02-29',
201
+ type: 'chat',
202
+ },
203
+ {
204
+ contextWindowTokens: 200_000,
205
+ description:
206
+ 'Claude 2 的更新版,具有双倍的上下文窗口,以及在长文档和 RAG 上下文中的可靠性、幻觉率和基于证据的准确性的改进。',
207
+ displayName: 'Claude 2.1',
208
+ id: 'anthropic.claude-v2:1',
209
+ pricing: {
210
+ input: 8,
211
+ output: 24,
212
+ },
213
+ type: 'chat',
214
+ },
215
+ {
216
+ contextWindowTokens: 100_000,
217
+ description:
218
+ 'Anthropic 在从复杂对话和创意内容生成到详细指令跟随的广泛任务中都表现出高度能力的模型。',
219
+ displayName: 'Claude 2.0',
220
+ id: 'anthropic.claude-v2',
221
+ pricing: {
222
+ input: 8,
223
+ output: 24,
224
+ },
225
+ type: 'chat',
226
+ },
227
+ {
228
+ contextWindowTokens: 100_000,
229
+ description:
230
+ '一款快速、经济且仍然非常有能力的模型,可以处理包括日常对话、文本分析、总结和文档问答在内的一系列任务。',
231
+ displayName: 'Claude Instant',
232
+ id: 'anthropic.claude-instant-v1',
233
+ pricing: {
234
+ input: 0.8,
235
+ output: 2.4,
236
+ },
237
+ type: 'chat',
238
+ },
239
+ {
240
+ abilities: {
241
+ functionCall: true,
242
+ },
243
+ contextWindowTokens: 128_000,
244
+ description:
245
+ 'Meta Llama 3.1 8B Instruct 的更新版,包括扩展的 128K 上下文长度、多语言性和改进的推理能力。Llama 3.1 提供的多语言大型语言模型 (LLMs) 是一组预训练的、指令调整的生成模型,包括 8B、70B 和 405B 大小 (文本输入/输出)。Llama 3.1 指令调整的文本模型 (8B、70B、405B) 专为多语言对话用例进行了优化,并在常见的行业基准测试中超过了许多可用的开源聊天模型。Llama 3.1 旨在用于多种语言的商业和研究用途。指令调整的文本模型适用于类似助手的聊天,而预训练模型可以适应各种自然语言生成任务。Llama 3.1 模型还支持利用其模型的输出来改进其他模型,包括合成数据生成和精炼。Llama 3.1 是使用优化的变压器架构的自回归语言模型。调整版本使用监督微调 (SFT) 和带有人类反馈的强化学习 (RLHF) 来符合人类对帮助性和安全性的偏好。',
246
+ displayName: 'Llama 3.1 8B Instruct',
247
+ enabled: true,
248
+ id: 'meta.llama3-1-8b-instruct-v1:0',
249
+ pricing: {
250
+ input: 0.22,
251
+ output: 0.22,
252
+ },
253
+ type: 'chat',
254
+ },
255
+ {
256
+ abilities: {
257
+ functionCall: true,
258
+ },
259
+ contextWindowTokens: 128_000,
260
+ description:
261
+ 'Meta Llama 3.1 70B Instruct 的更新版,包括扩展的 128K 上下文长度、多语言性和改进的推理能力。Llama 3.1 提供的多语言大型语言模型 (LLMs) 是一组预训练的、指令调整的生成模型,包括 8B、70B 和 405B 大小 (文本输入/输出)。Llama 3.1 指令调整的文本模型 (8B、70B、405B) 专为多语言对话用例进行了优化,并在常见的行业基准测试中超过了许多可用的开源聊天模型。Llama 3.1 旨在用于多种语言的商业和研究用途。指令调整的文本模型适用于类似助手的聊天,而预训练模型可以适应各种自然语言生成任务。Llama 3.1 模型还支持利用其模型的输出来改进其他模型,包括合成数据生成和精炼。Llama 3.1 是使用优化的变压器架构的自回归语言模型。调整版本使用监督微调 (SFT) 和带有人类反馈的强化学习 (RLHF) 来符合人类对帮助性和安全性的偏好。',
262
+ displayName: 'Llama 3.1 70B Instruct',
263
+ enabled: true,
264
+ id: 'meta.llama3-1-70b-instruct-v1:0',
265
+ pricing: {
266
+ input: 0.99,
267
+ output: 0.99,
268
+ },
269
+ type: 'chat',
270
+ },
271
+ {
272
+ abilities: {
273
+ functionCall: true,
274
+ },
275
+ contextWindowTokens: 128_000,
276
+ description:
277
+ 'Meta Llama 3.1 405B Instruct 是 Llama 3.1 Instruct 模型中最大、最强大的模型,是一款高度先进的对话推理和合成数据生成模型,也可以用作在特定领域进行专业持续预训练或微调的基础。Llama 3.1 提供的多语言大型语言模型 (LLMs) 是一组预训练的、指令调整的生成模型,包括 8B、70B 和 405B 大小 (文本输入/输出)。Llama 3.1 指令调整的文本模型 (8B、70B、405B) 专为多语言对话用例进行了优化,并在常见的行业基准测试中超过了许多可用的开源聊天模型。Llama 3.1 旨在用于多种语言的商业和研究用途。指令调整的文本模型适用于类似助手的聊天,而预训练模型可以适应各种自然语言生成任务。Llama 3.1 模型还支持利用其模型的输出来改进其他模型,包括合成数据生成和精炼。Llama 3.1 是使用优化的变压器架构的自回归语言模型。调整版本使用监督微调 (SFT) 和带有人类反馈的强化学习 (RLHF) 来符合人类对帮助性和安全性的偏好。',
278
+ displayName: 'Llama 3.1 405B Instruct',
279
+ enabled: true,
280
+ id: 'meta.llama3-1-405b-instruct-v1:0',
281
+ pricing: {
282
+ input: 5.32,
283
+ output: 16,
284
+ },
285
+ type: 'chat',
286
+ },
287
+ {
288
+ contextWindowTokens: 8000,
289
+ description:
290
+ 'Meta Llama 3 是一款面向开发者、研究人员和企业的开放大型语言模型 (LLM),旨在帮助他们构建、实验并负责任地扩展他们的生成 AI 想法。作为全球社区创新的基础系统的一部分,它非常适合计算能力和资源有限、边缘设备和更快的训练时间。',
291
+ displayName: 'Llama 3 8B Instruct',
292
+ id: 'meta.llama3-8b-instruct-v1:0',
293
+ pricing: {
294
+ input: 0.3,
295
+ output: 0.6,
296
+ },
297
+ type: 'chat',
298
+ },
299
+ {
300
+ contextWindowTokens: 8000,
301
+ description:
302
+ 'Meta Llama 3 是一款面向开发者、研究人员和企业的开放大型语言模型 (LLM),旨在帮助他们构建、实验并负责任地扩展他们的生成 AI 想法。作为全球社区创新的基础系统的一部分,它非常适合内容创建、对话 AI、语言理解、研发和企业应用。',
303
+ displayName: 'Llama 3 70B Instruct',
304
+ id: 'meta.llama3-70b-instruct-v1:0',
305
+ pricing: {
306
+ input: 2.65,
307
+ output: 3.5,
308
+ },
309
+ type: 'chat',
310
+ },
311
+ ];
312
+
313
+ export const allModels = [...bedrockChatModels];
314
+
315
+ export default allModels;
@@ -0,0 +1,88 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const cloudflareChatModels: AIChatModelCard[] = [
4
+ {
5
+ contextWindowTokens: 16_384,
6
+ displayName: 'deepseek-coder-6.7b-instruct-awq',
7
+ enabled: true,
8
+ id: '@hf/thebloke/deepseek-coder-6.7b-instruct-awq',
9
+ type: 'chat',
10
+ },
11
+ {
12
+ contextWindowTokens: 2048,
13
+ displayName: 'gemma-7b-it',
14
+ enabled: true,
15
+ id: '@hf/google/gemma-7b-it',
16
+ type: 'chat',
17
+ },
18
+ {
19
+ contextWindowTokens: 4096,
20
+ displayName: 'hermes-2-pro-mistral-7b',
21
+ enabled: true,
22
+ id: '@hf/nousresearch/hermes-2-pro-mistral-7b',
23
+ type: 'chat',
24
+ },
25
+ {
26
+ contextWindowTokens: 8192,
27
+ displayName: 'llama-3-8b-instruct-awq',
28
+ id: '@cf/meta/llama-3-8b-instruct-awq',
29
+ type: 'chat',
30
+ },
31
+ {
32
+ contextWindowTokens: 4096,
33
+ displayName: 'mistral-7b-instruct-v0.2',
34
+ id: '@hf/mistral/mistral-7b-instruct-v0.2',
35
+ type: 'chat',
36
+ },
37
+ {
38
+ contextWindowTokens: 32_768,
39
+ displayName: 'neural-chat-7b-v3-1-awq',
40
+ enabled: true,
41
+ id: '@hf/thebloke/neural-chat-7b-v3-1-awq',
42
+ type: 'chat',
43
+ },
44
+ {
45
+ contextWindowTokens: 8192,
46
+ displayName: 'openchat-3.5-0106',
47
+ id: '@cf/openchat/openchat-3.5-0106',
48
+ type: 'chat',
49
+ },
50
+ {
51
+ contextWindowTokens: 32_768,
52
+ displayName: 'openhermes-2.5-mistral-7b-awq',
53
+ enabled: true,
54
+ id: '@hf/thebloke/openhermes-2.5-mistral-7b-awq',
55
+ type: 'chat',
56
+ },
57
+ {
58
+ contextWindowTokens: 32_768,
59
+ displayName: 'qwen1.5-14b-chat-awq',
60
+ enabled: true,
61
+ id: '@cf/qwen/qwen1.5-14b-chat-awq',
62
+ type: 'chat',
63
+ },
64
+ {
65
+ contextWindowTokens: 4096,
66
+ displayName: 'starling-lm-7b-beta',
67
+ enabled: true,
68
+ id: '@hf/nexusflow/starling-lm-7b-beta',
69
+ type: 'chat',
70
+ },
71
+ {
72
+ contextWindowTokens: 32_768,
73
+ displayName: 'zephyr-7b-beta-awq',
74
+ enabled: true,
75
+ id: '@hf/thebloke/zephyr-7b-beta-awq',
76
+ type: 'chat',
77
+ },
78
+ {
79
+ displayName: 'meta-llama-3-8b-instruct',
80
+ enabled: true,
81
+ id: '@hf/meta-llama/meta-llama-3-8b-instruct',
82
+ type: 'chat',
83
+ },
84
+ ];
85
+
86
+ export const allModels = [...cloudflareChatModels];
87
+
88
+ export default allModels;
@@ -0,0 +1,27 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const deepseekChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ },
8
+ contextWindowTokens: 65_536,
9
+ description:
10
+ '最新模型 DeepSeek-V3 多项评测成绩超越 Qwen2.5-72B 和 Llama-3.1-405B 等开源模型,性能对齐领军闭源模型 GPT-4o 与 Claude-3.5-Sonnet。',
11
+ displayName: 'DeepSeek V3',
12
+ enabled: true,
13
+ id: 'deepseek-chat',
14
+ pricing: {
15
+ cachedInput: 0.5,
16
+ currency: 'CNY',
17
+ input: 2,
18
+ output: 8,
19
+ },
20
+ releasedAt: '2024-12-26',
21
+ type: 'chat',
22
+ },
23
+ ];
24
+
25
+ export const allModels = [...deepseekChatModels];
26
+
27
+ export default allModels;
@@ -0,0 +1,232 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const fireworksaiChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ },
8
+ contextWindowTokens: 8192,
9
+ description:
10
+ 'Fireworks 公司最新推出的 Firefunction-v2 是一款性能卓越的函数调用模型,基于 Llama-3 开发,并通过大量优化,特别适用于函数调用、对话及指令跟随等场景。',
11
+ displayName: 'Firefunction V2',
12
+ enabled: true,
13
+ id: 'accounts/fireworks/models/firefunction-v2',
14
+ type: 'chat',
15
+ },
16
+ {
17
+ abilities: {
18
+ functionCall: true,
19
+ },
20
+ contextWindowTokens: 32_768,
21
+ description: 'Fireworks 开源函数调用模型,提供卓越的指令执行能力和开放可定制的特性。',
22
+ displayName: 'Firefunction V1',
23
+ id: 'accounts/fireworks/models/firefunction-v1',
24
+ type: 'chat',
25
+ },
26
+ {
27
+ abilities: {
28
+ vision: true,
29
+ },
30
+ contextWindowTokens: 4096,
31
+ description:
32
+ 'fireworks-ai/FireLLaVA-13b 是一款视觉语言模型,可以同时接收图像和文本输入,经过高质量数据训练,适合多模态任务。',
33
+ displayName: 'FireLLaVA-13B',
34
+ enabled: true,
35
+ id: 'accounts/fireworks/models/firellava-13b',
36
+ type: 'chat',
37
+ },
38
+ {
39
+ contextWindowTokens: 131_072,
40
+ description:
41
+ 'Llama 3.2 1B 指令模型是Meta推出的一款轻量级多语言模型。该模型旨在提高效率,与更大型的模型相比,在延迟和成本方面提供了显著的改进。该模型的示例用例包括检索和摘要。',
42
+ displayName: 'Llama 3.2 1B Instruct',
43
+ enabled: true,
44
+ id: 'accounts/fireworks/models/llama-v3p2-1b-instruct',
45
+ type: 'chat',
46
+ },
47
+ {
48
+ contextWindowTokens: 131_072,
49
+ description:
50
+ 'Llama 3.2 3B 指令模型是Meta推出的一款轻量级多语言模型。该模型旨在提高效率,与更大型的模型相比,在延迟和成本方面提供了显著的改进。该模型的示例用例包括查询和提示重写以及写作辅助。',
51
+ displayName: 'Llama 3.2 3B Instruct',
52
+ enabled: true,
53
+ id: 'accounts/fireworks/models/llama-v3p2-3b-instruct',
54
+ type: 'chat',
55
+ },
56
+ {
57
+ abilities: {
58
+ vision: true,
59
+ },
60
+ contextWindowTokens: 131_072,
61
+ description:
62
+ 'Meta的11B参数指令调整图像推理模型。该模型针对视觉识别、图像推理、图像描述和回答关于图像的一般性问题进行了优化。该模型能够理解视觉数据,如图表和图形,并通过生成文本描述图像细节来弥合视觉与语言之间的差距。',
63
+ displayName: 'Llama 3.2 11B Vision Instruct',
64
+ enabled: true,
65
+ id: 'accounts/fireworks/models/llama-v3p2-11b-vision-instruct',
66
+ type: 'chat',
67
+ },
68
+ {
69
+ abilities: {
70
+ vision: true,
71
+ },
72
+ contextWindowTokens: 131_072,
73
+ description:
74
+ 'Meta的90B参数指令调整图像推理模型。该模型针对视觉识别、图像推理、图像描述和回答关于图像的一般性问题进行了优化。该模型能够理解视觉数据,如图表和图形,并通过生成文本描述图像细节来弥合视觉与语言之间的差距。',
75
+ displayName: 'Llama 3.2 90B Vision Instruct',
76
+ enabled: true,
77
+ id: 'accounts/fireworks/models/llama-v3p2-90b-vision-instruct',
78
+ type: 'chat',
79
+ },
80
+ {
81
+ contextWindowTokens: 131_072,
82
+ description:
83
+ 'Llama 3.1 8B 指令模型,专为多语言对话优化,能够在常见行业基准上超越多数开源及闭源模型。',
84
+ displayName: 'Llama 3.1 8B Instruct',
85
+ enabled: true,
86
+ id: 'accounts/fireworks/models/llama-v3p1-8b-instruct',
87
+ type: 'chat',
88
+ },
89
+ {
90
+ contextWindowTokens: 131_072,
91
+ description:
92
+ 'Llama 3.1 70B 指令模型,提供卓越的自然语言理解和生成能力,是对话及分析任务的理想选择。',
93
+ displayName: 'Llama 3.1 70B Instruct',
94
+ enabled: true,
95
+ id: 'accounts/fireworks/models/llama-v3p1-70b-instruct',
96
+ type: 'chat',
97
+ },
98
+ {
99
+ contextWindowTokens: 131_072,
100
+ description:
101
+ 'Llama 3.1 405B 指令模型,具备超大规模参数,适合复杂任务和高负载场景下的指令跟随。',
102
+ displayName: 'Llama 3.1 405B Instruct',
103
+ enabled: true,
104
+ id: 'accounts/fireworks/models/llama-v3p1-405b-instruct',
105
+ type: 'chat',
106
+ },
107
+ {
108
+ contextWindowTokens: 8192,
109
+ description: 'Llama 3 8B 指令模型,优化用于对话及多语言任务,表现卓越且高效。',
110
+ displayName: 'Llama 3 8B Instruct',
111
+ id: 'accounts/fireworks/models/llama-v3-8b-instruct',
112
+ type: 'chat',
113
+ },
114
+ {
115
+ contextWindowTokens: 8192,
116
+ description: 'Llama 3 70B 指令模型,专为多语言对话和自然语言理解优化,性能优于多数竞争模型。',
117
+ displayName: 'Llama 3 70B Instruct',
118
+ id: 'accounts/fireworks/models/llama-v3-70b-instruct',
119
+ type: 'chat',
120
+ },
121
+ {
122
+ contextWindowTokens: 8192,
123
+ description:
124
+ 'Llama 3 8B 指令模型(HF 版本),与官方实现结果一致,具备高度一致性和跨平台兼容性。',
125
+ displayName: 'Llama 3 8B Instruct (HF version)',
126
+ id: 'accounts/fireworks/models/llama-v3-8b-instruct-hf',
127
+ type: 'chat',
128
+ },
129
+ {
130
+ contextWindowTokens: 8192,
131
+ description:
132
+ 'Llama 3 70B 指令模型(HF 版本),与官方实现结果保持一致,适合高质量的指令跟随任务。',
133
+ displayName: 'Llama 3 70B Instruct (HF version)',
134
+ id: 'accounts/fireworks/models/llama-v3-70b-instruct-hf',
135
+ type: 'chat',
136
+ },
137
+ {
138
+ contextWindowTokens: 32_768,
139
+ description: 'Mixtral MoE 8x7B 指令模型,多专家架构提供高效的指令跟随及执行。',
140
+ displayName: 'Mixtral MoE 8x7B Instruct',
141
+ enabled: true,
142
+ id: 'accounts/fireworks/models/mixtral-8x7b-instruct',
143
+ type: 'chat',
144
+ },
145
+ {
146
+ contextWindowTokens: 65_536,
147
+ description:
148
+ 'Mixtral MoE 8x22B 指令模型,大规模参数和多专家架构,全方位支持复杂任务的高效处理。',
149
+ displayName: 'Mixtral MoE 8x22B Instruct',
150
+ enabled: true,
151
+ id: 'accounts/fireworks/models/mixtral-8x22b-instruct',
152
+ type: 'chat',
153
+ },
154
+ {
155
+ contextWindowTokens: 32_768,
156
+ description: 'Mixtral MoE 8x7B 指令模型(HF 版本),性能与官方实现一致,适合多种高效任务场景。',
157
+ displayName: 'Mixtral MoE 8x7B Instruct (HF version)',
158
+ id: 'accounts/fireworks/models/mixtral-8x7b-instruct-hf',
159
+ type: 'chat',
160
+ },
161
+ {
162
+ abilities: {
163
+ vision: true,
164
+ },
165
+ contextWindowTokens: 32_064,
166
+ description:
167
+ 'Phi-3-Vision-128K-Instruct 是一个轻量级的、最先进的开放多模态模型,它基于包括合成数据和经过筛选的公开网站在内的数据集构建,专注于非常高质量、推理密集型的数据,这些数据既包括文本也包括视觉。该模型属于 Phi-3 模型系列,其多模态版本支持 128K 的上下文长度(以标记为单位)。该模型经过严格的增强过程,结合了监督微调和直接偏好优化,以确保精确遵循指令和强大的安全措施。',
168
+ displayName: 'Phi 3.5 Vision Instruct',
169
+ enabled: true,
170
+ id: 'accounts/fireworks/models/phi-3-vision-128k-instruct',
171
+ type: 'chat',
172
+ },
173
+ {
174
+ contextWindowTokens: 32_768,
175
+ description: 'QwQ模型是由 Qwen 团队开发的实验性研究模型,专注于增强 AI 推理能力。',
176
+ displayName: 'QwQ 32B Preview',
177
+ enabled: true,
178
+ id: 'accounts/fireworks/models/qwen-qwq-32b-preview',
179
+ type: 'chat',
180
+ },
181
+ {
182
+ contextWindowTokens: 32_768,
183
+ description:
184
+ 'Qwen2.5 是由阿里云 Qwen 团队开发的一系列仅包含解码器的语言模型。这些模型提供不同的大小,包括 0.5B、1.5B、3B、7B、14B、32B 和 72B,并且有基础版(base)和指令版(instruct)两种变体。',
185
+ displayName: 'Qwen2.5 72B Instruct',
186
+ enabled: true,
187
+ id: 'accounts/fireworks/models/qwen2p5-72b-instruct',
188
+ type: 'chat',
189
+ },
190
+ {
191
+ contextWindowTokens: 32_768,
192
+ description:
193
+ 'Qwen2.5 Coder 32B Instruct 是阿里云发布的代码特定大语言模型系列的最新版本。该模型在 Qwen2.5 的基础上,通过 5.5 万亿个 tokens 的训练,显著提升了代码生成、推理和修复能力。它不仅增强了编码能力,还保持了数学和通用能力的优势。模型为代码智能体等实际应用提供了更全面的基础',
194
+ displayName: 'Qwen2.5 Coder 32B Instruct',
195
+ enabled: false,
196
+ id: 'accounts/fireworks/models/qwen2p5-coder-32b-instruct',
197
+ type: 'chat',
198
+ },
199
+ {
200
+ contextWindowTokens: 32_768,
201
+ description: 'Yi-Large 模型,具备卓越的多语言处理能力,可用于各类语言生成和理解任务。',
202
+ displayName: 'Yi-Large',
203
+ enabled: true,
204
+ id: 'accounts/yi-01-ai/models/yi-large',
205
+ type: 'chat',
206
+ },
207
+ {
208
+ contextWindowTokens: 8192,
209
+ description: 'StarCoder 7B 模型,针对80多种编程语言训练,拥有出色的编程填充能力和语境理解。',
210
+ displayName: 'StarCoder 7B',
211
+ id: 'accounts/fireworks/models/starcoder-7b',
212
+ type: 'chat',
213
+ },
214
+ {
215
+ contextWindowTokens: 8192,
216
+ description: 'StarCoder 15.5B 模型,支持高级编程任务,多语言能力增强,适合复杂代码生成和理解。',
217
+ displayName: 'StarCoder 15.5B',
218
+ id: 'accounts/fireworks/models/starcoder-16b',
219
+ type: 'chat',
220
+ },
221
+ {
222
+ contextWindowTokens: 4096,
223
+ description: 'MythoMax L2 13B 模型,结合新颖的合并技术,擅长叙事和角色扮演。',
224
+ displayName: 'MythoMax L2 13b',
225
+ id: 'accounts/fireworks/models/mythomax-l2-13b',
226
+ type: 'chat',
227
+ },
228
+ ];
229
+
230
+ export const allModels = [...fireworksaiChatModels];
231
+
232
+ export default allModels;