@lobehub/chat 1.43.6 → 1.44.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +9 -0
- package/docs/self-hosting/server-database/docker-compose.mdx +2 -2
- package/locales/ar/common.json +1 -0
- package/locales/ar/modelProvider.json +176 -0
- package/locales/ar/setting.json +1 -0
- package/locales/bg-BG/common.json +1 -0
- package/locales/bg-BG/modelProvider.json +176 -0
- package/locales/bg-BG/setting.json +1 -0
- package/locales/de-DE/common.json +1 -0
- package/locales/de-DE/modelProvider.json +176 -0
- package/locales/de-DE/setting.json +1 -0
- package/locales/en-US/common.json +1 -0
- package/locales/en-US/modelProvider.json +176 -0
- package/locales/en-US/setting.json +1 -0
- package/locales/es-ES/common.json +1 -0
- package/locales/es-ES/modelProvider.json +176 -0
- package/locales/es-ES/setting.json +1 -0
- package/locales/fa-IR/common.json +1 -0
- package/locales/fa-IR/modelProvider.json +176 -0
- package/locales/fa-IR/setting.json +1 -0
- package/locales/fr-FR/common.json +1 -0
- package/locales/fr-FR/modelProvider.json +176 -0
- package/locales/fr-FR/setting.json +1 -0
- package/locales/it-IT/common.json +1 -0
- package/locales/it-IT/modelProvider.json +176 -0
- package/locales/it-IT/setting.json +1 -0
- package/locales/ja-JP/common.json +1 -0
- package/locales/ja-JP/modelProvider.json +176 -0
- package/locales/ja-JP/setting.json +1 -0
- package/locales/ko-KR/common.json +1 -0
- package/locales/ko-KR/modelProvider.json +176 -0
- package/locales/ko-KR/setting.json +1 -0
- package/locales/nl-NL/common.json +1 -0
- package/locales/nl-NL/modelProvider.json +176 -0
- package/locales/nl-NL/setting.json +1 -0
- package/locales/pl-PL/common.json +1 -0
- package/locales/pl-PL/modelProvider.json +176 -0
- package/locales/pl-PL/setting.json +1 -0
- package/locales/pt-BR/common.json +1 -0
- package/locales/pt-BR/modelProvider.json +176 -0
- package/locales/pt-BR/setting.json +1 -0
- package/locales/ru-RU/common.json +1 -0
- package/locales/ru-RU/modelProvider.json +176 -0
- package/locales/ru-RU/setting.json +1 -0
- package/locales/tr-TR/common.json +1 -0
- package/locales/tr-TR/modelProvider.json +176 -0
- package/locales/tr-TR/setting.json +1 -0
- package/locales/vi-VN/common.json +1 -0
- package/locales/vi-VN/modelProvider.json +176 -0
- package/locales/vi-VN/setting.json +1 -0
- package/locales/zh-CN/common.json +1 -0
- package/locales/zh-CN/modelProvider.json +176 -0
- package/locales/zh-CN/setting.json +1 -0
- package/locales/zh-TW/common.json +1 -0
- package/locales/zh-TW/modelProvider.json +176 -0
- package/locales/zh-TW/setting.json +1 -0
- package/package.json +4 -4
- package/src/app/(main)/(mobile)/me/settings/features/Category.tsx +1 -1
- package/src/app/(main)/(mobile)/me/settings/features/useCategory.tsx +12 -5
- package/src/app/(main)/changelog/features/VersionTag.tsx +1 -2
- package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/Thread.tsx +1 -1
- package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/ThreadItem.tsx +1 -2
- package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/index.tsx +0 -1
- package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/WelcomeChatItem/InboxWelcome/AgentsSuggest.tsx +1 -1
- package/src/app/(main)/chat/(workspace)/@conversation/features/ChatList/WelcomeChatItem/InboxWelcome/QuestionSuggest.tsx +1 -1
- package/src/app/(main)/chat/(workspace)/@conversation/features/ZenModeToast/Toast.tsx +1 -1
- package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/ThreadItem/index.tsx +0 -2
- package/src/app/(main)/chat/(workspace)/@topic/features/TopicListContent/TopicItem/index.tsx +0 -1
- package/src/app/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Tags.tsx +2 -3
- package/src/app/(main)/chat/@session/features/SessionListContent/CollapseGroup/index.tsx +1 -1
- package/src/app/(main)/chat/features/Migration/Start.tsx +1 -1
- package/src/app/(main)/discover/(detail)/assistant/[slug]/features/ConversationExample/TopicList.tsx +4 -3
- package/src/app/(main)/discover/(detail)/assistant/[slug]/features/Header.tsx +1 -1
- package/src/app/(main)/discover/(detail)/features/ShareButton.tsx +2 -1
- package/src/app/(main)/discover/(detail)/model/[...slugs]/features/Header.tsx +1 -1
- package/src/app/(main)/discover/(detail)/plugin/[slug]/features/Header.tsx +1 -1
- package/src/app/(main)/discover/(detail)/provider/[slug]/features/Header.tsx +1 -1
- package/src/app/(main)/discover/(list)/_layout/Desktop/Nav.tsx +0 -1
- package/src/app/(main)/discover/(list)/assistants/features/Card.tsx +1 -1
- package/src/app/(main)/discover/(list)/models/features/Card.tsx +1 -1
- package/src/app/(main)/discover/(list)/plugins/features/Card.tsx +1 -1
- package/src/app/(main)/discover/(list)/providers/features/Card.tsx +1 -1
- package/src/app/(main)/discover/components/GridLoadingCard.tsx +2 -1
- package/src/app/(main)/discover/components/Title.tsx +1 -1
- package/src/app/(main)/files/(content)/@menu/features/KnowledgeBase/EmptyStatus.tsx +1 -1
- package/src/app/(main)/files/(content)/@menu/features/KnowledgeBase/Item/index.tsx +0 -1
- package/src/app/(main)/files/(content)/@modal/(.)[id]/FullscreenModal.tsx +2 -2
- package/src/app/(main)/files/(content)/NotSupportClient.tsx +2 -2
- package/src/app/(main)/profile/_layout/Desktop/SideBar.tsx +1 -1
- package/src/app/(main)/profile/stats/features/ShareButton/Preview.tsx +5 -5
- package/src/app/(main)/repos/[id]/evals/components/Container.tsx +1 -1
- package/src/app/(main)/repos/[id]/evals/dataset/DatasetList/Item.tsx +0 -1
- package/src/app/(main)/settings/_layout/Desktop/SideBar.tsx +1 -1
- package/src/app/(main)/settings/about/features/ItemCard.tsx +3 -3
- package/src/app/(main)/settings/about/features/Version.tsx +1 -1
- package/src/app/(main)/settings/hooks/useCategory.tsx +22 -9
- package/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx +2 -1
- package/src/app/(main)/settings/llm/components/ProviderModelList/ModelFetcher.tsx +0 -1
- package/src/app/(main)/settings/provider/(detail)/[id]/index.tsx +19 -0
- package/src/app/(main)/settings/provider/(detail)/[id]/page.tsx +95 -0
- package/src/app/(main)/settings/provider/(detail)/azure/page.tsx +119 -0
- package/src/app/(main)/settings/provider/(detail)/bedrock/page.tsx +91 -0
- package/src/app/(main)/settings/provider/(detail)/cloudflare/page.tsx +58 -0
- package/src/app/(main)/settings/provider/(detail)/github/page.tsx +67 -0
- package/src/app/(main)/settings/provider/(detail)/huggingface/page.tsx +67 -0
- package/src/app/(main)/settings/provider/(detail)/ollama/Checker.tsx +73 -0
- package/src/app/(main)/settings/provider/(detail)/ollama/page.tsx +34 -0
- package/src/app/(main)/settings/provider/(detail)/openai/page.tsx +23 -0
- package/src/app/(main)/settings/provider/(detail)/wenxin/page.tsx +61 -0
- package/src/app/(main)/settings/provider/(list)/Footer.tsx +36 -0
- package/src/app/(main)/settings/provider/(list)/ProviderGrid/Card.tsx +134 -0
- package/src/app/(main)/settings/provider/(list)/ProviderGrid/index.tsx +91 -0
- package/src/app/(main)/settings/provider/(list)/index.tsx +19 -0
- package/src/app/(main)/settings/provider/ProviderMenu/AddNew.tsx +28 -0
- package/src/app/(main)/settings/provider/ProviderMenu/All.tsx +29 -0
- package/src/app/(main)/settings/provider/ProviderMenu/Item.tsx +69 -0
- package/src/app/(main)/settings/provider/ProviderMenu/List.tsx +76 -0
- package/src/app/(main)/settings/provider/ProviderMenu/SearchResult.tsx +43 -0
- package/src/app/(main)/settings/provider/ProviderMenu/SkeletonList.tsx +60 -0
- package/src/app/(main)/settings/provider/ProviderMenu/SortProviderModal/GroupItem.tsx +30 -0
- package/src/app/(main)/settings/provider/ProviderMenu/SortProviderModal/index.tsx +91 -0
- package/src/app/(main)/settings/provider/ProviderMenu/index.tsx +80 -0
- package/src/app/(main)/settings/provider/_layout/Desktop.tsx +37 -0
- package/src/app/(main)/settings/provider/_layout/Mobile.tsx +14 -0
- package/src/app/(main)/settings/provider/const.ts +20 -0
- package/src/app/(main)/settings/provider/features/CreateNewProvider/index.tsx +146 -0
- package/src/app/(main)/settings/provider/features/ModelList/CreateNewModelModal/Form.tsx +105 -0
- package/src/app/(main)/settings/provider/features/ModelList/CreateNewModelModal/index.tsx +69 -0
- package/src/app/(main)/settings/provider/features/ModelList/DisabledModels.tsx +29 -0
- package/src/app/(main)/settings/provider/features/ModelList/EmptyModels.tsx +101 -0
- package/src/app/(main)/settings/provider/features/ModelList/EnabledModelList/index.tsx +85 -0
- package/src/app/(main)/settings/provider/features/ModelList/ModelConfigModal/Form.tsx +109 -0
- package/src/app/(main)/settings/provider/features/ModelList/ModelConfigModal/index.tsx +76 -0
- package/src/app/(main)/settings/provider/features/ModelList/ModelItem.tsx +346 -0
- package/src/app/(main)/settings/provider/features/ModelList/ModelTitle/Search.tsx +37 -0
- package/src/app/(main)/settings/provider/features/ModelList/ModelTitle/index.tsx +145 -0
- package/src/app/(main)/settings/provider/features/ModelList/SearchResult.tsx +67 -0
- package/src/app/(main)/settings/provider/features/ModelList/SkeletonList.tsx +63 -0
- package/src/app/(main)/settings/provider/features/ModelList/SortModelModal/ListItem.tsx +20 -0
- package/src/app/(main)/settings/provider/features/ModelList/SortModelModal/index.tsx +96 -0
- package/src/app/(main)/settings/provider/features/ModelList/index.tsx +59 -0
- package/src/app/(main)/settings/provider/features/ProviderConfig/Checker.tsx +120 -0
- package/src/app/(main)/settings/provider/features/ProviderConfig/SkeletonInput.tsx +5 -0
- package/src/app/(main)/settings/provider/features/ProviderConfig/UpdateProviderInfo/SettingModal.tsx +137 -0
- package/src/app/(main)/settings/provider/features/ProviderConfig/UpdateProviderInfo/index.tsx +49 -0
- package/src/app/(main)/settings/provider/features/ProviderConfig/index.tsx +343 -0
- package/src/app/(main)/settings/provider/layout.tsx +21 -0
- package/src/app/(main)/settings/provider/page.tsx +17 -0
- package/src/app/(main)/settings/provider/type.ts +5 -0
- package/src/app/(main)/settings/sync/features/DeviceInfo/Card.tsx +1 -1
- package/src/app/(main)/settings/sync/features/DeviceInfo/index.tsx +1 -1
- package/src/app/@modal/(.)changelog/modal/features/ReadDetail.tsx +1 -1
- package/src/app/@modal/(.)changelog/modal/features/VersionTag.tsx +1 -2
- package/src/app/@modal/(.)changelog/modal/layout.tsx +1 -1
- package/src/components/Cell/index.tsx +1 -1
- package/src/components/DragUpload/index.tsx +2 -3
- package/src/components/FeatureList/index.tsx +1 -1
- package/src/components/FileParsingStatus/EmbeddingStatus.tsx +1 -1
- package/src/components/FileParsingStatus/index.tsx +1 -1
- package/src/components/FunctionModal/style.tsx +2 -2
- package/src/components/GoBack/index.tsx +1 -2
- package/src/components/HotKeys/index.tsx +1 -1
- package/src/components/InstantSwitch/index.tsx +28 -0
- package/src/components/Menu/index.tsx +1 -1
- package/src/components/ModelSelect/index.tsx +2 -3
- package/src/components/Notification/index.tsx +2 -1
- package/src/components/StatisticCard/index.tsx +5 -6
- package/src/config/aiModels/ai21.ts +38 -0
- package/src/config/aiModels/ai360.ts +71 -0
- package/src/config/aiModels/anthropic.ts +152 -0
- package/src/config/aiModels/azure.ts +86 -0
- package/src/config/aiModels/baichuan.ts +107 -0
- package/src/config/aiModels/bedrock.ts +315 -0
- package/src/config/aiModels/cloudflare.ts +88 -0
- package/src/config/aiModels/deepseek.ts +27 -0
- package/src/config/aiModels/fireworksai.ts +232 -0
- package/src/config/aiModels/giteeai.ts +137 -0
- package/src/config/aiModels/github.ts +273 -0
- package/src/config/aiModels/google.ts +317 -0
- package/src/config/aiModels/groq.ts +202 -0
- package/src/config/aiModels/higress.ts +2828 -0
- package/src/config/aiModels/huggingface.ts +56 -0
- package/src/config/aiModels/hunyuan.ts +151 -0
- package/src/config/aiModels/index.ts +98 -0
- package/src/config/aiModels/internlm.ts +40 -0
- package/src/config/aiModels/minimax.ts +55 -0
- package/src/config/aiModels/mistral.ts +172 -0
- package/src/config/aiModels/moonshot.ts +44 -0
- package/src/config/aiModels/novita.ts +124 -0
- package/src/config/aiModels/ollama.ts +412 -0
- package/src/config/aiModels/openai.ts +537 -0
- package/src/config/aiModels/openrouter.ts +252 -0
- package/src/config/aiModels/perplexity.ts +67 -0
- package/src/config/aiModels/qwen.ts +302 -0
- package/src/config/aiModels/sensenova.ts +114 -0
- package/src/config/aiModels/siliconcloud.ts +679 -0
- package/src/config/aiModels/spark.ts +68 -0
- package/src/config/aiModels/stepfun.ts +153 -0
- package/src/config/aiModels/taichu.ts +19 -0
- package/src/config/aiModels/togetherai.ts +334 -0
- package/src/config/aiModels/upstage.ts +37 -0
- package/src/config/aiModels/wenxin.ts +171 -0
- package/src/config/aiModels/xai.ts +72 -0
- package/src/config/aiModels/zeroone.ts +156 -0
- package/src/config/aiModels/zhipu.ts +235 -0
- package/src/config/featureFlags/schema.ts +3 -0
- package/src/config/modelProviders/anthropic.ts +1 -0
- package/src/config/modelProviders/github.ts +0 -1
- package/src/config/modelProviders/google.ts +1 -0
- package/src/config/modelProviders/stepfun.ts +2 -0
- package/src/database/migrations/0013_add_ai_infra.sql +44 -0
- package/src/database/migrations/meta/0013_snapshot.json +3598 -0
- package/src/database/migrations/meta/_journal.json +7 -0
- package/src/database/repositories/aiInfra/index.ts +115 -0
- package/src/database/schemas/aiInfra.ts +69 -0
- package/src/database/schemas/index.ts +1 -0
- package/src/database/server/models/__tests__/aiModel.test.ts +318 -0
- package/src/database/server/models/__tests__/aiProvider.test.ts +373 -0
- package/src/database/server/models/aiModel.ts +250 -0
- package/src/database/server/models/aiProvider.ts +234 -0
- package/src/features/AgentSetting/AgentPrompt/index.tsx +2 -2
- package/src/features/ChatInput/ActionBar/Token/TokenTag.tsx +2 -1
- package/src/features/ChatInput/ActionBar/Tools/index.tsx +2 -3
- package/src/features/ChatInput/ActionBar/Upload/ServerMode.tsx +2 -3
- package/src/features/ChatInput/Desktop/FilePreview/FileItem/index.tsx +3 -2
- package/src/features/ChatInput/Desktop/FilePreview/FileList.tsx +2 -2
- package/src/features/ChatInput/Mobile/Files/FileItem/File.tsx +2 -2
- package/src/features/ChatInput/Mobile/InputArea/index.tsx +1 -1
- package/src/features/ChatInput/STT/common.tsx +1 -1
- package/src/features/Conversation/Error/style.tsx +2 -2
- package/src/features/Conversation/Messages/Assistant/FileChunks/Item/style.ts +2 -2
- package/src/features/Conversation/Messages/Assistant/FileChunks/index.tsx +1 -1
- package/src/features/Conversation/Messages/Assistant/ToolCallItem/Inspector/style.ts +2 -3
- package/src/features/Conversation/Messages/Assistant/ToolCallItem/style.ts +2 -3
- package/src/features/Conversation/Messages/User/FileListViewer/Item.tsx +0 -1
- package/src/features/Conversation/components/BackBottom/style.ts +2 -2
- package/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/Icon.tsx +2 -3
- package/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/index.tsx +3 -3
- package/src/features/Conversation/components/MarkdownElements/LobeThinking/Render.tsx +1 -1
- package/src/features/Conversation/components/OTPInput.tsx +2 -2
- package/src/features/DataImporter/Loading.tsx +1 -1
- package/src/features/FileManager/FileList/EmptyStatus.tsx +1 -1
- package/src/features/FileManager/FileList/index.tsx +1 -1
- package/src/features/FileManager/UploadDock/Item.tsx +1 -1
- package/src/features/FileManager/UploadDock/index.tsx +4 -4
- package/src/features/FileViewer/NotSupport/index.tsx +1 -1
- package/src/features/FileViewer/Renderer/MSDoc/index.tsx +0 -1
- package/src/features/FileViewer/Renderer/TXT/index.tsx +1 -1
- package/src/features/InitClientDB/EnableModal.tsx +1 -1
- package/src/features/InitClientDB/ErrorResult.tsx +1 -1
- package/src/features/InitClientDB/InitIndicator.tsx +1 -1
- package/src/features/KnowledgeBaseModal/AddFilesToKnowledgeBase/SelectForm.tsx +0 -1
- package/src/features/ModelSwitchPanel/index.tsx +2 -2
- package/src/features/PluginsUI/Render/Loading.tsx +0 -1
- package/src/features/Portal/Home/Body/Files/FileList/Item.tsx +1 -1
- package/src/features/Portal/Home/Body/Plugins/ArtifactList/Item/style.ts +1 -2
- package/src/features/Setting/SettingContainer.tsx +8 -1
- package/src/features/ShareModal/ShareImage/style.ts +2 -2
- package/src/features/ShareModal/style.ts +2 -2
- package/src/features/User/DataStatistics.tsx +1 -1
- package/src/hooks/useEnabledChatModels.ts +10 -1
- package/src/hooks/useModelSupportToolUse.ts +15 -0
- package/src/hooks/useModelSupportVision.ts +15 -0
- package/src/layout/AuthProvider/Clerk/useAppearance.ts +3 -3
- package/src/layout/GlobalProvider/AppTheme.tsx +1 -1
- package/src/layout/GlobalProvider/StoreInitialization.tsx +5 -0
- package/src/locales/default/common.ts +1 -0
- package/src/locales/default/modelProvider.ts +178 -0
- package/src/locales/default/setting.ts +1 -0
- package/src/server/modules/KeyVaultsEncrypt/index.ts +1 -1
- package/src/server/routers/lambda/aiModel.ts +128 -0
- package/src/server/routers/lambda/aiProvider.ts +127 -0
- package/src/server/routers/lambda/index.ts +4 -0
- package/src/services/__tests__/_auth.test.ts +16 -49
- package/src/services/__tests__/chat.test.ts +2 -0
- package/src/services/_auth.ts +42 -25
- package/src/services/aiModel.ts +52 -0
- package/src/services/aiProvider.ts +47 -0
- package/src/services/chat.ts +62 -18
- package/src/store/aiInfra/index.ts +2 -0
- package/src/store/aiInfra/initialState.ts +11 -0
- package/src/store/aiInfra/selectors.ts +2 -0
- package/src/store/aiInfra/slices/aiModel/action.ts +146 -0
- package/src/store/aiInfra/slices/aiModel/index.ts +3 -0
- package/src/store/aiInfra/slices/aiModel/initialState.ts +14 -0
- package/src/store/aiInfra/slices/aiModel/selectors.ts +63 -0
- package/src/store/aiInfra/slices/aiProvider/action.ts +208 -0
- package/src/store/aiInfra/slices/aiProvider/index.ts +3 -0
- package/src/store/aiInfra/slices/aiProvider/initialState.ts +32 -0
- package/src/store/aiInfra/slices/aiProvider/selectors.ts +99 -0
- package/src/store/aiInfra/store.ts +25 -0
- package/src/store/global/initialState.ts +1 -0
- package/src/store/serverConfig/selectors.test.ts +1 -0
- package/src/styles/global.ts +1 -1
- package/src/types/aiModel.ts +32 -6
- package/src/types/aiProvider.ts +11 -4
- package/src/utils/fetch/fetchSSE.ts +3 -1
@@ -0,0 +1,68 @@
|
|
1
|
+
import { AIChatModelCard } from '@/types/aiModel';
|
2
|
+
|
3
|
+
const sparkChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
contextWindowTokens: 8192,
|
6
|
+
description:
|
7
|
+
'Spark Lite 是一款轻量级大语言模型,具备极低的延迟与高效的处理能力,完全免费开放,支持实时在线搜索功能。其快速响应的特性使其在低算力设备上的推理应用和模型微调中表现出色,为用户带来出色的成本效益和智能体验,尤其在知识问答、内容生成及搜索场景下表现不俗。',
|
8
|
+
displayName: 'Spark Lite',
|
9
|
+
enabled: true,
|
10
|
+
id: 'lite',
|
11
|
+
maxOutput: 4096,
|
12
|
+
type: 'chat',
|
13
|
+
},
|
14
|
+
{
|
15
|
+
contextWindowTokens: 8192,
|
16
|
+
description:
|
17
|
+
'Spark Pro 是一款为专业领域优化的高性能大语言模型,专注数学、编程、医疗、教育等多个领域,并支持联网搜索及内置天气、日期等插件。其优化后模型在复杂知识问答、语言理解及高层次文本创作中展现出色表现和高效性能,是适合专业应用场景的理想选择。',
|
18
|
+
displayName: 'Spark Pro',
|
19
|
+
enabled: true,
|
20
|
+
id: 'generalv3',
|
21
|
+
maxOutput: 8192,
|
22
|
+
type: 'chat',
|
23
|
+
},
|
24
|
+
{
|
25
|
+
contextWindowTokens: 131_072,
|
26
|
+
description:
|
27
|
+
'Spark Pro 128K 配置了特大上下文处理能力,能够处理多达128K的上下文信息,特别适合需通篇分析和长期逻辑关联处理的长文内容,可在复杂文本沟通中提供流畅一致的逻辑与多样的引用支持。',
|
28
|
+
displayName: 'Spark Pro 128K',
|
29
|
+
enabled: true,
|
30
|
+
id: 'pro-128k',
|
31
|
+
maxOutput: 4096,
|
32
|
+
type: 'chat',
|
33
|
+
},
|
34
|
+
{
|
35
|
+
contextWindowTokens: 8192,
|
36
|
+
description:
|
37
|
+
'Spark Max 为功能最为全面的版本,支持联网搜索及众多内置插件。其全面优化的核心能力以及系统角色设定和函数调用功能,使其在各种复杂应用场景中的表现极为优异和出色。',
|
38
|
+
displayName: 'Spark Max',
|
39
|
+
enabled: true,
|
40
|
+
id: 'generalv3.5',
|
41
|
+
maxOutput: 8192,
|
42
|
+
type: 'chat',
|
43
|
+
},
|
44
|
+
{
|
45
|
+
contextWindowTokens: 32_768,
|
46
|
+
description:
|
47
|
+
'Spark Max 32K 配置了大上下文处理能力,更强的上下文理解和逻辑推理能力,支持32K tokens的文本输入,适用于长文档阅读、私有知识问答等场景',
|
48
|
+
displayName: 'Spark Max 32K',
|
49
|
+
enabled: true,
|
50
|
+
id: 'max-32k',
|
51
|
+
maxOutput: 8192,
|
52
|
+
type: 'chat',
|
53
|
+
},
|
54
|
+
{
|
55
|
+
contextWindowTokens: 8192,
|
56
|
+
description:
|
57
|
+
'Spark Ultra 是星火大模型系列中最为强大的版本,在升级联网搜索链路同时,提升对文本内容的理解和总结能力。它是用于提升办公生产力和准确响应需求的全方位解决方案,是引领行业的智能产品。',
|
58
|
+
displayName: 'Spark 4.0 Ultra',
|
59
|
+
enabled: true,
|
60
|
+
id: '4.0Ultra',
|
61
|
+
maxOutput: 8192,
|
62
|
+
type: 'chat',
|
63
|
+
},
|
64
|
+
];
|
65
|
+
|
66
|
+
export const allModels = [...sparkChatModels];
|
67
|
+
|
68
|
+
export default allModels;
|
@@ -0,0 +1,153 @@
|
|
1
|
+
import { AIChatModelCard } from '@/types/aiModel';
|
2
|
+
|
3
|
+
const stepfunChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
functionCall: true,
|
7
|
+
},
|
8
|
+
contextWindowTokens: 8000,
|
9
|
+
description: '高速模型,适合实时对话。',
|
10
|
+
displayName: 'Step 1 Flash',
|
11
|
+
enabled: true,
|
12
|
+
id: 'step-1-flash',
|
13
|
+
pricing: {
|
14
|
+
currency: 'CNY',
|
15
|
+
input: 1,
|
16
|
+
output: 4,
|
17
|
+
},
|
18
|
+
type: 'chat',
|
19
|
+
},
|
20
|
+
{
|
21
|
+
abilities: {
|
22
|
+
functionCall: true,
|
23
|
+
},
|
24
|
+
contextWindowTokens: 8000,
|
25
|
+
description: '小型模型,适合轻量级任务。',
|
26
|
+
displayName: 'Step 1 8K',
|
27
|
+
enabled: true,
|
28
|
+
id: 'step-1-8k',
|
29
|
+
pricing: {
|
30
|
+
currency: 'CNY',
|
31
|
+
input: 5,
|
32
|
+
output: 20,
|
33
|
+
},
|
34
|
+
type: 'chat',
|
35
|
+
},
|
36
|
+
{
|
37
|
+
abilities: {
|
38
|
+
functionCall: true,
|
39
|
+
},
|
40
|
+
contextWindowTokens: 32_000,
|
41
|
+
description: '支持中等长度的对话,适用于多种应用场景。',
|
42
|
+
displayName: 'Step 1 32K',
|
43
|
+
enabled: true,
|
44
|
+
id: 'step-1-32k',
|
45
|
+
pricing: {
|
46
|
+
currency: 'CNY',
|
47
|
+
input: 15,
|
48
|
+
output: 70,
|
49
|
+
},
|
50
|
+
type: 'chat',
|
51
|
+
},
|
52
|
+
{
|
53
|
+
abilities: {
|
54
|
+
functionCall: true,
|
55
|
+
},
|
56
|
+
contextWindowTokens: 128_000,
|
57
|
+
description: '平衡性能与成本,适合一般场景。',
|
58
|
+
displayName: 'Step 1 128K',
|
59
|
+
enabled: true,
|
60
|
+
id: 'step-1-128k',
|
61
|
+
pricing: {
|
62
|
+
currency: 'CNY',
|
63
|
+
input: 40,
|
64
|
+
output: 200,
|
65
|
+
},
|
66
|
+
type: 'chat',
|
67
|
+
},
|
68
|
+
{
|
69
|
+
abilities: {
|
70
|
+
functionCall: true,
|
71
|
+
},
|
72
|
+
contextWindowTokens: 256_000,
|
73
|
+
description: '具备超长上下文处理能力,尤其适合长文档分析。',
|
74
|
+
displayName: 'Step 1 256K',
|
75
|
+
id: 'step-1-256k',
|
76
|
+
pricing: {
|
77
|
+
currency: 'CNY',
|
78
|
+
input: 95,
|
79
|
+
output: 300,
|
80
|
+
},
|
81
|
+
type: 'chat',
|
82
|
+
},
|
83
|
+
{
|
84
|
+
abilities: {
|
85
|
+
functionCall: true,
|
86
|
+
},
|
87
|
+
contextWindowTokens: 16_000,
|
88
|
+
description: '支持大规模上下文交互,适合复杂对话场景。',
|
89
|
+
displayName: 'Step 2 16K',
|
90
|
+
enabled: true,
|
91
|
+
id: 'step-2-16k',
|
92
|
+
pricing: {
|
93
|
+
currency: 'CNY',
|
94
|
+
input: 38,
|
95
|
+
output: 120,
|
96
|
+
},
|
97
|
+
type: 'chat',
|
98
|
+
},
|
99
|
+
{
|
100
|
+
abilities: {
|
101
|
+
functionCall: true,
|
102
|
+
vision: true,
|
103
|
+
},
|
104
|
+
contextWindowTokens: 8000,
|
105
|
+
description: '小型视觉模型,适合基本的图文任务。',
|
106
|
+
displayName: 'Step 1V 8K',
|
107
|
+
enabled: true,
|
108
|
+
id: 'step-1v-8k',
|
109
|
+
pricing: {
|
110
|
+
currency: 'CNY',
|
111
|
+
input: 5,
|
112
|
+
output: 20,
|
113
|
+
},
|
114
|
+
type: 'chat',
|
115
|
+
},
|
116
|
+
{
|
117
|
+
abilities: {
|
118
|
+
functionCall: true,
|
119
|
+
vision: true,
|
120
|
+
},
|
121
|
+
contextWindowTokens: 32_000,
|
122
|
+
description: '支持视觉输入,增强多模态交互体验。',
|
123
|
+
displayName: 'Step 1V 32K',
|
124
|
+
enabled: true,
|
125
|
+
id: 'step-1v-32k',
|
126
|
+
pricing: {
|
127
|
+
currency: 'CNY',
|
128
|
+
input: 15,
|
129
|
+
output: 70,
|
130
|
+
},
|
131
|
+
type: 'chat',
|
132
|
+
},
|
133
|
+
{
|
134
|
+
abilities: {
|
135
|
+
vision: true,
|
136
|
+
},
|
137
|
+
contextWindowTokens: 32_000,
|
138
|
+
description: '该模型拥有强大的视频理解能力。',
|
139
|
+
displayName: 'Step 1.5V Mini',
|
140
|
+
enabled: true,
|
141
|
+
id: 'step-1.5v-mini',
|
142
|
+
pricing: {
|
143
|
+
currency: 'CNY',
|
144
|
+
input: 8,
|
145
|
+
output: 35,
|
146
|
+
},
|
147
|
+
type: 'chat',
|
148
|
+
},
|
149
|
+
];
|
150
|
+
|
151
|
+
export const allModels = [...stepfunChatModels];
|
152
|
+
|
153
|
+
export default allModels;
|
@@ -0,0 +1,19 @@
|
|
1
|
+
import { AIChatModelCard } from '@/types/aiModel';
|
2
|
+
|
3
|
+
const taichuChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
functionCall: true,
|
7
|
+
},
|
8
|
+
contextWindowTokens: 32_768,
|
9
|
+
description: 'Taichu 2.0 基于海量高质数据训练,具有更强的文本理解、内容创作、对话问答等能力',
|
10
|
+
displayName: 'Taichu 2.0',
|
11
|
+
enabled: true,
|
12
|
+
id: 'taichu_llm',
|
13
|
+
type: 'chat',
|
14
|
+
},
|
15
|
+
];
|
16
|
+
|
17
|
+
export const allModels = [...taichuChatModels];
|
18
|
+
|
19
|
+
export default allModels;
|
@@ -0,0 +1,334 @@
|
|
1
|
+
import { AIChatModelCard } from '@/types/aiModel';
|
2
|
+
|
3
|
+
const togetheraiChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
contextWindowTokens: 131_072,
|
6
|
+
description:
|
7
|
+
'LLaMA 3.2 旨在处理结合视觉和文本数据的任务。它在图像描述和视觉问答等任务中表现出色,跨越了语言生成和视觉推理之间的鸿沟。',
|
8
|
+
displayName: 'Llama 3.2 3B Instruct Turbo',
|
9
|
+
enabled: true,
|
10
|
+
id: 'meta-llama/Llama-3.2-3B-Instruct-Turbo',
|
11
|
+
type: 'chat',
|
12
|
+
},
|
13
|
+
{
|
14
|
+
abilities: {
|
15
|
+
vision: true,
|
16
|
+
},
|
17
|
+
contextWindowTokens: 131_072,
|
18
|
+
description:
|
19
|
+
'LLaMA 3.2 旨在处理结合视觉和文本数据的任务。它在图像描述和视觉问答等任务中表现出色,跨越了语言生成和视觉推理之间的鸿沟。',
|
20
|
+
displayName: 'Llama 3.2 11B Vision Instruct Turbo (Free)',
|
21
|
+
enabled: true,
|
22
|
+
id: 'meta-llama/Llama-Vision-Free',
|
23
|
+
type: 'chat',
|
24
|
+
},
|
25
|
+
{
|
26
|
+
abilities: {
|
27
|
+
vision: true,
|
28
|
+
},
|
29
|
+
contextWindowTokens: 131_072,
|
30
|
+
description:
|
31
|
+
'LLaMA 3.2 旨在处理结合视觉和文本数据的任务。它在图像描述和视觉问答等任务中表现出色,跨越了语言生成和视觉推理之间的鸿沟。',
|
32
|
+
displayName: 'Llama 3.2 11B Vision Instruct Turbo',
|
33
|
+
id: 'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo',
|
34
|
+
type: 'chat',
|
35
|
+
},
|
36
|
+
{
|
37
|
+
abilities: {
|
38
|
+
vision: true,
|
39
|
+
},
|
40
|
+
contextWindowTokens: 131_072,
|
41
|
+
description:
|
42
|
+
'LLaMA 3.2 旨在处理结合视觉和文本数据的任务。它在图像描述和视觉问答等任务中表现出色,跨越了语言生成和视觉推理之间的鸿沟。',
|
43
|
+
displayName: 'Llama 3.2 90B Vision Instruct Turbo',
|
44
|
+
enabled: true,
|
45
|
+
id: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
46
|
+
type: 'chat',
|
47
|
+
},
|
48
|
+
{
|
49
|
+
abilities: {
|
50
|
+
functionCall: true,
|
51
|
+
},
|
52
|
+
contextWindowTokens: 131_072,
|
53
|
+
description:
|
54
|
+
'Llama 3.1 8B 模型采用FP8量化,支持高达131,072个上下文标记,是开源模型中的佼佼者,适合复杂任务,表现优异于许多行业基准。',
|
55
|
+
displayName: 'Llama 3.1 8B Instruct Turbo',
|
56
|
+
enabled: true,
|
57
|
+
id: 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',
|
58
|
+
type: 'chat',
|
59
|
+
},
|
60
|
+
{
|
61
|
+
abilities: {
|
62
|
+
functionCall: true,
|
63
|
+
},
|
64
|
+
contextWindowTokens: 131_072,
|
65
|
+
description:
|
66
|
+
'Llama 3.1 70B 模型经过精细调整,适用于高负载应用,量化至FP8提供更高效的计算能力和准确性,确保在复杂场景中的卓越表现。',
|
67
|
+
displayName: 'Llama 3.1 70B Instruct Turbo',
|
68
|
+
enabled: true,
|
69
|
+
id: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
|
70
|
+
type: 'chat',
|
71
|
+
},
|
72
|
+
{
|
73
|
+
abilities: {
|
74
|
+
functionCall: true,
|
75
|
+
},
|
76
|
+
contextWindowTokens: 130_815,
|
77
|
+
description:
|
78
|
+
'405B 的 Llama 3.1 Turbo 模型,为大数据处理提供超大容量的上下文支持,在超大规模的人工智能应用中表现突出。',
|
79
|
+
displayName: 'Llama 3.1 405B Instruct Turbo',
|
80
|
+
enabled: true,
|
81
|
+
id: 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
|
82
|
+
type: 'chat',
|
83
|
+
},
|
84
|
+
{
|
85
|
+
contextWindowTokens: 32_768,
|
86
|
+
description:
|
87
|
+
'Llama 3.1 Nemotron 70B 是由 NVIDIA 定制的大型语言模型,旨在提高 LLM 生成的响应对用户查询的帮助程度。该模型在 Arena Hard、AlpacaEval 2 LC 和 GPT-4-Turbo MT-Bench 等基准测试中表现出色,截至 2024 年 10 月 1 日,在所有三个自动对齐基准测试中排名第一。该模型使用 RLHF(特别是 REINFORCE)、Llama-3.1-Nemotron-70B-Reward 和 HelpSteer2-Preference 提示在 Llama-3.1-70B-Instruct 模型基础上进行训练',
|
88
|
+
displayName: 'Llama 3.1 Nemotron 70B',
|
89
|
+
enabled: true,
|
90
|
+
id: 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
|
91
|
+
type: 'chat',
|
92
|
+
},
|
93
|
+
{
|
94
|
+
contextWindowTokens: 8192,
|
95
|
+
description: 'Llama 3 8B Instruct Turbo 是一款高效能的大语言模型,支持广泛的应用场景。',
|
96
|
+
displayName: 'Llama 3 8B Instruct Turbo',
|
97
|
+
id: 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo',
|
98
|
+
type: 'chat',
|
99
|
+
},
|
100
|
+
{
|
101
|
+
contextWindowTokens: 8192,
|
102
|
+
description: 'Llama 3 70B Instruct Turbo 提供卓越的语言理解和生成能力,适合最苛刻的计算任务。',
|
103
|
+
displayName: 'Llama 3 70B Instruct Turbo',
|
104
|
+
id: 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo',
|
105
|
+
type: 'chat',
|
106
|
+
},
|
107
|
+
{
|
108
|
+
contextWindowTokens: 8192,
|
109
|
+
description: 'Llama 3 8B Instruct Lite 适合资源受限的环境,提供出色的平衡性能。',
|
110
|
+
displayName: 'Llama 3 8B Instruct Lite',
|
111
|
+
id: 'meta-llama/Meta-Llama-3-8B-Instruct-Lite',
|
112
|
+
type: 'chat',
|
113
|
+
},
|
114
|
+
{
|
115
|
+
contextWindowTokens: 8192,
|
116
|
+
description: 'Llama 3 70B Instruct Lite 适合需要高效能和低延迟的环境。',
|
117
|
+
displayName: 'Llama 3 70B Instruct Lite',
|
118
|
+
id: 'meta-llama/Meta-Llama-3-70B-Instruct-Lite',
|
119
|
+
type: 'chat',
|
120
|
+
},
|
121
|
+
{
|
122
|
+
contextWindowTokens: 8192,
|
123
|
+
description: 'Llama 3 8B Instruct Reference 提供多语言支持,涵盖丰富的领域知识。',
|
124
|
+
displayName: 'Llama 3 8B Instruct Reference',
|
125
|
+
id: 'meta-llama/Llama-3-8b-chat-hf',
|
126
|
+
type: 'chat',
|
127
|
+
},
|
128
|
+
{
|
129
|
+
contextWindowTokens: 8192,
|
130
|
+
description: 'Llama 3 70B Instruct Reference 是功能强大的聊天模型,支持复杂的对话需求。',
|
131
|
+
displayName: 'Llama 3 70B Instruct Reference',
|
132
|
+
id: 'meta-llama/Llama-3-70b-chat-hf',
|
133
|
+
type: 'chat',
|
134
|
+
},
|
135
|
+
{
|
136
|
+
contextWindowTokens: 4096,
|
137
|
+
description: 'LLaMA-2 Chat (13B) 提供优秀的语言处理能力和出色的交互体验。',
|
138
|
+
displayName: 'LLaMA-2 Chat (13B)',
|
139
|
+
id: 'meta-llama/Llama-2-13b-chat-hf',
|
140
|
+
type: 'chat',
|
141
|
+
},
|
142
|
+
{
|
143
|
+
contextWindowTokens: 4096,
|
144
|
+
description: 'LLaMA-2 提供优秀的语言处理能力和出色的交互体验。',
|
145
|
+
displayName: 'LLaMA-2 (70B)',
|
146
|
+
id: 'meta-llama/Llama-2-70b-hf',
|
147
|
+
type: 'chat',
|
148
|
+
},
|
149
|
+
{
|
150
|
+
contextWindowTokens: 16_384,
|
151
|
+
description:
|
152
|
+
'Code Llama 是一款专注于代码生成和讨论的 LLM,结合广泛的编程语言支持,适用于开发者环境。',
|
153
|
+
displayName: 'CodeLlama 34B Instruct',
|
154
|
+
id: 'codellama/CodeLlama-34b-Instruct-hf',
|
155
|
+
type: 'chat',
|
156
|
+
},
|
157
|
+
{
|
158
|
+
contextWindowTokens: 8192,
|
159
|
+
description: 'Gemma 2 9B 由Google开发,提供高效的指令响应和综合能力。',
|
160
|
+
displayName: 'Gemma 2 9B',
|
161
|
+
enabled: true,
|
162
|
+
id: 'google/gemma-2-9b-it',
|
163
|
+
type: 'chat',
|
164
|
+
},
|
165
|
+
{
|
166
|
+
contextWindowTokens: 8192,
|
167
|
+
description: 'Gemma 2 27B 是一款通用大语言模型,具有优异的性能和广泛的应用场景。',
|
168
|
+
displayName: 'Gemma 2 27B',
|
169
|
+
enabled: true,
|
170
|
+
id: 'google/gemma-2-27b-it',
|
171
|
+
type: 'chat',
|
172
|
+
},
|
173
|
+
{
|
174
|
+
contextWindowTokens: 8192,
|
175
|
+
description: 'Gemma Instruct (2B) 提供基本的指令处理能力,适合轻量级应用。',
|
176
|
+
displayName: 'Gemma Instruct (2B)',
|
177
|
+
id: 'google/gemma-2b-it',
|
178
|
+
type: 'chat',
|
179
|
+
},
|
180
|
+
{
|
181
|
+
contextWindowTokens: 32_768,
|
182
|
+
description: 'Mistral (7B) Instruct v0.3 提供高效的计算能力和自然语言理解,适合广泛的应用。',
|
183
|
+
displayName: 'Mistral (7B) Instruct v0.3',
|
184
|
+
enabled: true,
|
185
|
+
id: 'mistralai/Mistral-7B-Instruct-v0.3',
|
186
|
+
type: 'chat',
|
187
|
+
},
|
188
|
+
{
|
189
|
+
contextWindowTokens: 32_768,
|
190
|
+
description: 'Mistral (7B) Instruct v0.2 提供改进的指令处理能力和更精确的结果。',
|
191
|
+
displayName: 'Mistral (7B) Instruct v0.2',
|
192
|
+
id: 'mistralai/Mistral-7B-Instruct-v0.2',
|
193
|
+
type: 'chat',
|
194
|
+
},
|
195
|
+
{
|
196
|
+
abilities: {
|
197
|
+
functionCall: true,
|
198
|
+
},
|
199
|
+
contextWindowTokens: 8192,
|
200
|
+
description: 'Mistral (7B) Instruct 以高性能著称,适用于多种语言任务。',
|
201
|
+
displayName: 'Mistral (7B) Instruct',
|
202
|
+
id: 'mistralai/Mistral-7B-Instruct-v0.1',
|
203
|
+
type: 'chat',
|
204
|
+
},
|
205
|
+
{
|
206
|
+
contextWindowTokens: 8192,
|
207
|
+
description:
|
208
|
+
'Mistral 7B是一款紧凑但高性能的模型,擅长批量处理和简单任务,如分类和文本生成,具有良好的推理能力。',
|
209
|
+
displayName: 'Mistral (7B)',
|
210
|
+
id: 'mistralai/Mistral-7B-v0.1',
|
211
|
+
type: 'chat',
|
212
|
+
},
|
213
|
+
{
|
214
|
+
abilities: {
|
215
|
+
functionCall: true,
|
216
|
+
},
|
217
|
+
contextWindowTokens: 32_768,
|
218
|
+
description: 'Mixtral-8x7B Instruct (46.7B) 提供高容量的计算框架,适合大规模数据处理。',
|
219
|
+
displayName: 'Mixtral-8x7B Instruct (46.7B)',
|
220
|
+
enabled: true,
|
221
|
+
id: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
222
|
+
type: 'chat',
|
223
|
+
},
|
224
|
+
{
|
225
|
+
contextWindowTokens: 32_768,
|
226
|
+
description:
|
227
|
+
'Mixtral 8x7B是一个稀疏专家模型,利用多个参数提高推理速度,适合处理多语言和代码生成任务。',
|
228
|
+
displayName: 'Mixtral-8x7B (46.7B)',
|
229
|
+
id: 'mistralai/Mixtral-8x7B-v0.1',
|
230
|
+
type: 'chat',
|
231
|
+
},
|
232
|
+
{
|
233
|
+
contextWindowTokens: 65_536,
|
234
|
+
description: 'Mixtral-8x22B Instruct (141B) 是一款超级大语言模型,支持极高的处理需求。',
|
235
|
+
displayName: 'Mixtral-8x22B Instruct (141B)',
|
236
|
+
enabled: true,
|
237
|
+
id: 'mistralai/Mixtral-8x22B-Instruct-v0.1',
|
238
|
+
type: 'chat',
|
239
|
+
},
|
240
|
+
{
|
241
|
+
contextWindowTokens: 65_536,
|
242
|
+
description:
|
243
|
+
'WizardLM 2 是微软AI提供的语言模型,在复杂对话、多语言、推理和智能助手领域表现尤为出色。',
|
244
|
+
displayName: 'WizardLM-2 8x22B',
|
245
|
+
id: 'microsoft/WizardLM-2-8x22B',
|
246
|
+
type: 'chat',
|
247
|
+
},
|
248
|
+
{
|
249
|
+
contextWindowTokens: 4096,
|
250
|
+
description: 'DeepSeek LLM Chat (67B) 是创新的 AI 模型 提供深度语言理解和互动能力。',
|
251
|
+
displayName: 'DeepSeek LLM Chat (67B)',
|
252
|
+
enabled: true,
|
253
|
+
id: 'deepseek-ai/deepseek-llm-67b-chat',
|
254
|
+
type: 'chat',
|
255
|
+
},
|
256
|
+
{
|
257
|
+
contextWindowTokens: 32_768,
|
258
|
+
description: 'QwQ模型是由 Qwen 团队开发的实验性研究模型,专注于增强 AI 推理能力。',
|
259
|
+
displayName: 'QwQ 32B Preview',
|
260
|
+
enabled: true,
|
261
|
+
id: 'Qwen/QwQ-32B-Preview',
|
262
|
+
type: 'chat',
|
263
|
+
},
|
264
|
+
{
|
265
|
+
contextWindowTokens: 32_768,
|
266
|
+
description: 'Qwen2.5 是全新的大型语言模型系列,旨在优化指令式任务的处理。',
|
267
|
+
displayName: 'Qwen 2.5 7B Instruct Turbo',
|
268
|
+
enabled: true,
|
269
|
+
id: 'Qwen/Qwen2.5-7B-Instruct-Turbo',
|
270
|
+
type: 'chat',
|
271
|
+
},
|
272
|
+
{
|
273
|
+
contextWindowTokens: 32_768,
|
274
|
+
description: 'Qwen2.5 是全新的大型语言模型系列,旨在优化指令式任务的处理。',
|
275
|
+
displayName: 'Qwen 2.5 72B Instruct Turbo',
|
276
|
+
enabled: true,
|
277
|
+
id: 'Qwen/Qwen2.5-72B-Instruct-Turbo',
|
278
|
+
type: 'chat',
|
279
|
+
},
|
280
|
+
{
|
281
|
+
contextWindowTokens: 32_768,
|
282
|
+
description:
|
283
|
+
'Qwen2.5 Coder 32B Instruct 是阿里云发布的代码特定大语言模型系列的最新版本。该模型在 Qwen2.5 的基础上,通过 5.5 万亿个 tokens 的训练,显著提升了代码生成、推理和修复能力。它不仅增强了编码能力,还保持了数学和通用能力的优势。模型为代码智能体等实际应用提供了更全面的基础',
|
284
|
+
displayName: 'Qwen 2.5 Coder 32B Instruct',
|
285
|
+
id: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
286
|
+
type: 'chat',
|
287
|
+
},
|
288
|
+
{
|
289
|
+
contextWindowTokens: 32_768,
|
290
|
+
description: 'Qwen 2 Instruct (72B) 为企业级应用提供精准的指令理解和响应。',
|
291
|
+
displayName: 'Qwen 2 Instruct (72B)',
|
292
|
+
id: 'Qwen/Qwen2-72B-Instruct',
|
293
|
+
type: 'chat',
|
294
|
+
},
|
295
|
+
{
|
296
|
+
contextWindowTokens: 32_768,
|
297
|
+
description: 'DBRX Instruct 提供高可靠性的指令处理能力,支持多行业应用。',
|
298
|
+
displayName: 'DBRX Instruct',
|
299
|
+
id: 'databricks/dbrx-instruct',
|
300
|
+
type: 'chat',
|
301
|
+
},
|
302
|
+
{
|
303
|
+
contextWindowTokens: 4096,
|
304
|
+
description: 'Upstage SOLAR Instruct v1 (11B) 适用于精细化指令任务,提供出色的语言处理能力。',
|
305
|
+
displayName: 'Upstage SOLAR Instruct v1 (11B)',
|
306
|
+
id: 'upstage/SOLAR-10.7B-Instruct-v1.0',
|
307
|
+
type: 'chat',
|
308
|
+
},
|
309
|
+
{
|
310
|
+
contextWindowTokens: 32_768,
|
311
|
+
description: 'Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) 是高精度的指令模型,适用于复杂计算。',
|
312
|
+
displayName: 'Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B)',
|
313
|
+
id: 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
|
314
|
+
type: 'chat',
|
315
|
+
},
|
316
|
+
{
|
317
|
+
contextWindowTokens: 4096,
|
318
|
+
description: 'MythoMax-L2 (13B) 是一种创新模型,适合多领域应用和复杂任务。',
|
319
|
+
displayName: 'MythoMax-L2 (13B)',
|
320
|
+
id: 'Gryphe/MythoMax-L2-13b',
|
321
|
+
type: 'chat',
|
322
|
+
},
|
323
|
+
{
|
324
|
+
contextWindowTokens: 32_768,
|
325
|
+
description: 'StripedHyena Nous (7B) 通过高效的策略和模型架构,提供增强的计算能力。',
|
326
|
+
displayName: 'StripedHyena Nous (7B)',
|
327
|
+
id: 'togethercomputer/StripedHyena-Nous-7B',
|
328
|
+
type: 'chat',
|
329
|
+
},
|
330
|
+
];
|
331
|
+
|
332
|
+
export const allModels = [...togetheraiChatModels];
|
333
|
+
|
334
|
+
export default allModels;
|
@@ -0,0 +1,37 @@
|
|
1
|
+
import { AIChatModelCard } from '@/types/aiModel';
|
2
|
+
|
3
|
+
const upstageChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
functionCall: true,
|
7
|
+
},
|
8
|
+
contextWindowTokens: 32_768,
|
9
|
+
description:
|
10
|
+
'Solar Mini 是一种紧凑型 LLM,性能优于 GPT-3.5,具备强大的多语言能力,支持英语和韩语,提供高效小巧的解决方案。',
|
11
|
+
displayName: 'Solar Mini',
|
12
|
+
enabled: true,
|
13
|
+
id: 'solar-1-mini-chat',
|
14
|
+
type: 'chat',
|
15
|
+
},
|
16
|
+
{
|
17
|
+
contextWindowTokens: 32_768,
|
18
|
+
description:
|
19
|
+
'Solar Mini (Ja) 扩展了 Solar Mini 的能力,专注于日语,同时在英语和韩语的使用中保持高效和卓越性能。',
|
20
|
+
displayName: 'Solar Mini (Ja)',
|
21
|
+
id: 'solar-1-mini-chat-ja',
|
22
|
+
type: 'chat',
|
23
|
+
},
|
24
|
+
{
|
25
|
+
contextWindowTokens: 4096,
|
26
|
+
description:
|
27
|
+
'Solar Pro 是 Upstage 推出的一款高智能LLM,专注于单GPU的指令跟随能力,IFEval得分80以上。目前支持英语,正式版本计划于2024年11月推出,将扩展语言支持和上下文长度。',
|
28
|
+
displayName: 'Solar Pro',
|
29
|
+
enabled: true,
|
30
|
+
id: 'solar-pro',
|
31
|
+
type: 'chat',
|
32
|
+
},
|
33
|
+
];
|
34
|
+
|
35
|
+
export const allModels = [...upstageChatModels];
|
36
|
+
|
37
|
+
export default allModels;
|