@lobehub/lobehub 2.0.0-next.15 → 2.0.0-next.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/README.md +2 -45
- package/README.zh-CN.md +2 -45
- package/changelog/v1.json +18 -0
- package/docs/self-hosting/advanced/feature-flags.mdx +0 -1
- package/docs/self-hosting/advanced/feature-flags.zh-CN.mdx +0 -1
- package/e2e/src/features/discover/smoke.feature +34 -1
- package/e2e/src/steps/discover/smoke.steps.ts +116 -4
- package/package.json +1 -1
- package/packages/model-runtime/src/utils/googleErrorParser.test.ts +125 -0
- package/packages/model-runtime/src/utils/googleErrorParser.ts +103 -77
- package/packages/types/src/serverConfig.ts +2 -6
- package/src/app/[variants]/(auth)/signup/[[...signup]]/page.tsx +1 -8
- package/src/app/[variants]/(main)/(mobile)/me/(home)/features/UserBanner.tsx +3 -6
- package/src/app/[variants]/(main)/discover/(list)/features/Pagination.tsx +1 -0
- package/src/app/[variants]/(main)/discover/(list)/features/SortButton/index.tsx +1 -1
- package/src/app/[variants]/(main)/discover/(list)/mcp/features/List/Item.tsx +1 -0
- package/src/app/[variants]/(main)/discover/(list)/model/features/List/Item.tsx +1 -0
- package/src/app/[variants]/(main)/discover/(list)/provider/features/List/Item.tsx +1 -0
- package/src/app/[variants]/(main)/discover/components/CategoryMenu.tsx +9 -1
- package/src/app/[variants]/(main)/labs/components/LabCard.tsx +3 -1
- package/src/app/[variants]/(main)/settings/provider/detail/azure/index.tsx +5 -7
- package/src/components/InvalidAPIKey/APIKeyForm/Bedrock.tsx +8 -13
- package/src/config/featureFlags/schema.test.ts +0 -2
- package/src/config/featureFlags/schema.ts +0 -6
- package/src/config/modelProviders/ai21.ts +1 -16
- package/src/config/modelProviders/ai302.ts +1 -128
- package/src/config/modelProviders/ai360.ts +1 -32
- package/src/config/modelProviders/anthropic.ts +1 -71
- package/src/config/modelProviders/azure.ts +1 -51
- package/src/config/modelProviders/baichuan.ts +1 -57
- package/src/config/modelProviders/bedrock.ts +1 -276
- package/src/config/modelProviders/cloudflare.ts +1 -64
- package/src/config/modelProviders/deepseek.ts +1 -19
- package/src/config/modelProviders/fireworksai.ts +1 -174
- package/src/config/modelProviders/giteeai.ts +1 -135
- package/src/config/modelProviders/github.ts +1 -254
- package/src/config/modelProviders/google.ts +1 -130
- package/src/config/modelProviders/groq.ts +1 -119
- package/src/config/modelProviders/higress.ts +1 -1713
- package/src/config/modelProviders/huggingface.ts +1 -54
- package/src/config/modelProviders/hunyuan.ts +1 -83
- package/src/config/modelProviders/infiniai.ts +1 -74
- package/src/config/modelProviders/internlm.ts +1 -20
- package/src/config/modelProviders/mistral.ts +1 -95
- package/src/config/modelProviders/modelscope.ts +1 -27
- package/src/config/modelProviders/moonshot.ts +1 -29
- package/src/config/modelProviders/novita.ts +1 -105
- package/src/config/modelProviders/ollama.ts +1 -325
- package/src/config/modelProviders/openai.ts +1 -242
- package/src/config/modelProviders/openrouter.ts +1 -240
- package/src/config/modelProviders/perplexity.ts +1 -45
- package/src/config/modelProviders/ppio.ts +1 -152
- package/src/config/modelProviders/qiniu.ts +1 -18
- package/src/config/modelProviders/qwen.ts +1 -245
- package/src/config/modelProviders/search1api.ts +1 -34
- package/src/config/modelProviders/sensenova.ts +1 -69
- package/src/config/modelProviders/siliconcloud.ts +1 -417
- package/src/config/modelProviders/spark.ts +1 -59
- package/src/config/modelProviders/stepfun.ts +1 -98
- package/src/config/modelProviders/taichu.ts +1 -18
- package/src/config/modelProviders/togetherai.ts +1 -274
- package/src/config/modelProviders/upstage.ts +1 -28
- package/src/config/modelProviders/wenxin.ts +1 -140
- package/src/config/modelProviders/xai.ts +1 -38
- package/src/config/modelProviders/zeroone.ts +1 -81
- package/src/config/modelProviders/zhipu.ts +1 -108
- package/src/helpers/isCanUseFC.ts +0 -8
- package/src/hooks/useEnabledChatModels.ts +0 -8
- package/src/hooks/useModelContextWindowTokens.ts +0 -8
- package/src/hooks/useModelHasContextWindowToken.ts +1 -10
- package/src/hooks/useModelSupportFiles.ts +1 -11
- package/src/hooks/useModelSupportReasoning.ts +1 -11
- package/src/hooks/useModelSupportToolUse.ts +1 -11
- package/src/hooks/useModelSupportVision.ts +1 -11
- package/src/layout/AuthProvider/Clerk/index.tsx +2 -16
- package/src/server/globalConfig/index.ts +0 -23
- package/src/server/routers/lambda/config/__snapshots__/index.test.ts.snap +175 -12
- package/src/server/routers/lambda/config/index.test.ts +36 -28
- package/src/services/chat/chat.test.ts +12 -0
- package/src/services/chat/helper.ts +7 -31
- package/src/services/models.ts +2 -11
- package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +41 -14
- package/src/store/global/store.ts +1 -7
- package/src/store/user/initialState.ts +1 -7
- package/src/store/user/selectors.ts +1 -5
- package/src/store/user/slices/common/action.ts +5 -4
- package/src/store/user/slices/settings/selectors/index.ts +1 -0
- package/src/store/user/slices/settings/selectors/keyVaults.ts +21 -0
- package/src/store/user/store.ts +0 -3
- package/src/tools/web-browsing/Render/Search/ConfigForm/Form.tsx +1 -1
- package/packages/utils/src/_deprecated/__snapshots__/parseModels.test.ts.snap +0 -104
- package/packages/utils/src/_deprecated/parseModels.test.ts +0 -287
- package/packages/utils/src/_deprecated/parseModels.ts +0 -165
- package/src/hooks/_header.ts +0 -23
- package/src/server/globalConfig/_deprecated.test.ts +0 -92
- package/src/server/globalConfig/_deprecated.ts +0 -41
- package/src/store/global/actions/clientDb.ts +0 -67
- package/src/store/user/slices/modelList/__snapshots__/action.test.ts.snap +0 -12
- package/src/store/user/slices/modelList/action.test.ts +0 -359
- package/src/store/user/slices/modelList/action.ts +0 -223
- package/src/store/user/slices/modelList/initialState.ts +0 -15
- package/src/store/user/slices/modelList/reducers/customModelCard.test.ts +0 -204
- package/src/store/user/slices/modelList/reducers/customModelCard.ts +0 -64
- package/src/store/user/slices/modelList/selectors/index.ts +0 -3
- package/src/store/user/slices/modelList/selectors/keyVaults.test.ts +0 -201
- package/src/store/user/slices/modelList/selectors/keyVaults.ts +0 -50
- package/src/store/user/slices/modelList/selectors/modelConfig.test.ts +0 -219
- package/src/store/user/slices/modelList/selectors/modelConfig.ts +0 -95
- package/src/store/user/slices/modelList/selectors/modelProvider.test.ts +0 -138
- package/src/store/user/slices/modelList/selectors/modelProvider.ts +0 -170
|
@@ -3,70 +3,7 @@ import { ModelProviderCard } from '@/types/llm';
|
|
|
3
3
|
// ref https://developers.cloudflare.com/workers-ai/models/#text-generation
|
|
4
4
|
// api https://developers.cloudflare.com/workers-ai/configuration/open-ai-compatibility
|
|
5
5
|
const Cloudflare: ModelProviderCard = {
|
|
6
|
-
chatModels: [
|
|
7
|
-
{
|
|
8
|
-
contextWindowTokens: 16_384,
|
|
9
|
-
displayName: 'DeepSeek R1 (Distill Qwen 32B)',
|
|
10
|
-
enabled: true,
|
|
11
|
-
id: '@cf/deepseek-ai/deepseek-r1-distill-qwen-32b',
|
|
12
|
-
},
|
|
13
|
-
{
|
|
14
|
-
contextWindowTokens: 2048,
|
|
15
|
-
displayName: 'gemma-7b-it',
|
|
16
|
-
id: '@hf/google/gemma-7b-it',
|
|
17
|
-
},
|
|
18
|
-
{
|
|
19
|
-
contextWindowTokens: 4096,
|
|
20
|
-
displayName: 'hermes-2-pro-mistral-7b',
|
|
21
|
-
// functionCall: true,
|
|
22
|
-
id: '@hf/nousresearch/hermes-2-pro-mistral-7b',
|
|
23
|
-
},
|
|
24
|
-
{
|
|
25
|
-
contextWindowTokens: 131_072,
|
|
26
|
-
displayName: 'llama 3.3 70b',
|
|
27
|
-
id: '@cf/meta/llama-3.3-70b-instruct-fp8-fast',
|
|
28
|
-
},
|
|
29
|
-
{
|
|
30
|
-
contextWindowTokens: 4096,
|
|
31
|
-
displayName: 'mistral-7b-instruct-v0.2',
|
|
32
|
-
id: '@hf/mistral/mistral-7b-instruct-v0.2',
|
|
33
|
-
},
|
|
34
|
-
{
|
|
35
|
-
contextWindowTokens: 32_768,
|
|
36
|
-
displayName: 'neural-chat-7b-v3-1-awq',
|
|
37
|
-
id: '@hf/thebloke/neural-chat-7b-v3-1-awq',
|
|
38
|
-
},
|
|
39
|
-
{
|
|
40
|
-
contextWindowTokens: 8192,
|
|
41
|
-
displayName: 'openchat-3.5-0106',
|
|
42
|
-
id: '@cf/openchat/openchat-3.5-0106',
|
|
43
|
-
},
|
|
44
|
-
{
|
|
45
|
-
contextWindowTokens: 32_768,
|
|
46
|
-
displayName: 'openhermes-2.5-mistral-7b-awq',
|
|
47
|
-
id: '@hf/thebloke/openhermes-2.5-mistral-7b-awq',
|
|
48
|
-
},
|
|
49
|
-
{
|
|
50
|
-
contextWindowTokens: 32_768,
|
|
51
|
-
displayName: 'qwen1.5-14b-chat-awq',
|
|
52
|
-
enabled: true,
|
|
53
|
-
id: '@cf/qwen/qwen1.5-14b-chat-awq',
|
|
54
|
-
},
|
|
55
|
-
{
|
|
56
|
-
contextWindowTokens: 4096,
|
|
57
|
-
displayName: 'starling-lm-7b-beta',
|
|
58
|
-
id: '@hf/nexusflow/starling-lm-7b-beta',
|
|
59
|
-
},
|
|
60
|
-
{
|
|
61
|
-
contextWindowTokens: 32_768,
|
|
62
|
-
displayName: 'zephyr-7b-beta-awq',
|
|
63
|
-
id: '@hf/thebloke/zephyr-7b-beta-awq',
|
|
64
|
-
},
|
|
65
|
-
{
|
|
66
|
-
displayName: 'meta-llama-3-8b-instruct',
|
|
67
|
-
id: '@hf/meta-llama/meta-llama-3-8b-instruct',
|
|
68
|
-
},
|
|
69
|
-
],
|
|
6
|
+
chatModels: [],
|
|
70
7
|
checkModel: '@hf/meta-llama/meta-llama-3-8b-instruct',
|
|
71
8
|
description: '在 Cloudflare 的全球网络上运行由无服务器 GPU 驱动的机器学习模型。',
|
|
72
9
|
disableBrowserRequest: true,
|
|
@@ -1,25 +1,7 @@
|
|
|
1
1
|
import { ModelProviderCard } from '@/types/llm';
|
|
2
2
|
|
|
3
3
|
const DeepSeek: ModelProviderCard = {
|
|
4
|
-
chatModels: [
|
|
5
|
-
{
|
|
6
|
-
contextWindowTokens: 131_072,
|
|
7
|
-
description:
|
|
8
|
-
'最新模型 DeepSeek-V3 多项评测成绩超越 Qwen2.5-72B 和 Llama-3.1-405B 等开源模型,性能对齐领军闭源模型 GPT-4o 与 Claude-3.5-Sonnet。',
|
|
9
|
-
displayName: 'DeepSeek V3.2 Exp',
|
|
10
|
-
enabled: true,
|
|
11
|
-
functionCall: true,
|
|
12
|
-
id: 'deepseek-chat',
|
|
13
|
-
},
|
|
14
|
-
{
|
|
15
|
-
contextWindowTokens: 131_072,
|
|
16
|
-
description:
|
|
17
|
-
'DeepSeek 推出的推理模型。在输出最终回答之前,模型会先输出一段思维链内容,以提升最终答案的准确性。',
|
|
18
|
-
displayName: 'DeepSeek V3.2 Exp Thinking',
|
|
19
|
-
enabled: true,
|
|
20
|
-
id: 'deepseek-reasoner',
|
|
21
|
-
},
|
|
22
|
-
],
|
|
4
|
+
chatModels: [],
|
|
23
5
|
checkModel: 'deepseek-chat',
|
|
24
6
|
description:
|
|
25
7
|
'DeepSeek 是一家专注于人工智能技术研究和应用的公司,其最新模型 DeepSeek-V3 多项评测成绩超越 Qwen2.5-72B 和 Llama-3.1-405B 等开源模型,性能对齐领军闭源模型 GPT-4o 与 Claude-3.5-Sonnet。',
|
|
@@ -3,180 +3,7 @@ import { ModelProviderCard } from '@/types/llm';
|
|
|
3
3
|
// ref: https://fireworks.ai/models?show=Serverless
|
|
4
4
|
// ref: https://fireworks.ai/pricing
|
|
5
5
|
const FireworksAI: ModelProviderCard = {
|
|
6
|
-
chatModels: [
|
|
7
|
-
{
|
|
8
|
-
contextWindowTokens: 131_072,
|
|
9
|
-
description:
|
|
10
|
-
'Llama 3.3 70B Instruct 是 Llama 3.1 70B 的 12 月更新版本。该模型在 Llama 3.1 70B(于 2024 年 7 月发布)的基础上进行了改进,增强了工具调用、多语言文本支持、数学和编程能力。该模型在推理、数学和指令遵循方面达到了行业领先水平,并且能够提供与 3.1 405B 相似的性能,同时在速度和成本上具有显著优势。',
|
|
11
|
-
displayName: 'Llama 3.3 70B Instruct',
|
|
12
|
-
enabled: true,
|
|
13
|
-
id: 'accounts/fireworks/models/llama-v3p3-70b-instruct',
|
|
14
|
-
},
|
|
15
|
-
{
|
|
16
|
-
contextWindowTokens: 131_072,
|
|
17
|
-
description:
|
|
18
|
-
'Llama 3.2 3B Instruct 是 Meta 推出的轻量级多语言模型。该模型专为高效运行而设计,相较于更大型的模型,具有显著的延迟和成本优势。其典型应用场景包括查询和提示重写,以及写作辅助。',
|
|
19
|
-
displayName: 'Llama 3.2 3B Instruct',
|
|
20
|
-
enabled: true,
|
|
21
|
-
id: 'accounts/fireworks/models/llama-v3p2-3b-instruct',
|
|
22
|
-
},
|
|
23
|
-
{
|
|
24
|
-
contextWindowTokens: 131_072,
|
|
25
|
-
description:
|
|
26
|
-
'Meta 推出的指令微调图像推理模型,拥有 110 亿参数。该模型针对视觉识别、图像推理、图片字幕生成以及图片相关的常规问答进行了优化。它能够理解视觉数据,如图表和图形,并通过生成文本描述图像细节,弥合视觉与语言之间的鸿沟。',
|
|
27
|
-
displayName: 'Llama 3.2 11B Vision Instruct',
|
|
28
|
-
enabled: true,
|
|
29
|
-
id: 'accounts/fireworks/models/llama-v3p2-11b-vision-instruct',
|
|
30
|
-
vision: true,
|
|
31
|
-
},
|
|
32
|
-
{
|
|
33
|
-
contextWindowTokens: 131_072,
|
|
34
|
-
description:
|
|
35
|
-
'Meta 推出的指令微调图像推理模型,拥有 900 亿参数。该模型针对视觉识别、图像推理、图片字幕生成以及图片相关的常规问答进行了优化。它能够理解视觉数据,如图表和图形,并通过生成文本描述图像细节,弥合视觉与语言之间的鸿沟。注意:该模型目前作为无服务器模型进行实验性提供。如果用于生产环境,请注意 Fireworks 可能会在短时间内取消部署该模型。',
|
|
36
|
-
displayName: 'Llama 3.2 90B Vision Instruct',
|
|
37
|
-
enabled: true,
|
|
38
|
-
id: 'accounts/fireworks/models/llama-v3p2-90b-vision-instruct',
|
|
39
|
-
vision: true,
|
|
40
|
-
},
|
|
41
|
-
{
|
|
42
|
-
contextWindowTokens: 131_072,
|
|
43
|
-
description:
|
|
44
|
-
'Meta Llama 3.1 系列是多语言大语言模型(LLM)集合,包含 8B、70B 和 405B 三种参数规模的预训练和指令微调生成模型。Llama 3.1 指令微调文本模型(8B、70B、405B)专为多语言对话应用优化,并在常见的行业基准测试中优于许多现有的开源和闭源聊天模型。',
|
|
45
|
-
displayName: 'Llama 3.1 8B Instruct',
|
|
46
|
-
id: 'accounts/fireworks/models/llama-v3p1-8b-instruct',
|
|
47
|
-
},
|
|
48
|
-
{
|
|
49
|
-
contextWindowTokens: 131_072,
|
|
50
|
-
description:
|
|
51
|
-
'Meta Llama 3.1 系列是多语言大语言模型(LLM)集合,包含 8B、70B 和 405B 三种参数规模的预训练和指令微调生成模型。Llama 3.1 指令微调文本模型(8B、70B、405B)专为多语言对话应用优化,并在常见的行业基准测试中优于许多现有的开源和闭源聊天模型。',
|
|
52
|
-
displayName: 'Llama 3.1 70B Instruct',
|
|
53
|
-
functionCall: true,
|
|
54
|
-
id: 'accounts/fireworks/models/llama-v3p1-70b-instruct',
|
|
55
|
-
},
|
|
56
|
-
{
|
|
57
|
-
contextWindowTokens: 131_072,
|
|
58
|
-
description:
|
|
59
|
-
'Meta Llama 3.1 系列是多语言大语言模型(LLM)集合,包含 8B、70B 和 405B 参数规模的预训练和指令微调生成模型。Llama 3.1 指令微调文本模型(8B、70B、405B)专为多语言对话场景优化,在常见的行业基准测试中优于许多现有的开源和闭源聊天模型。405B 是 Llama 3.1 家族中能力最强的模型。该模型采用 FP8 进行推理,与参考实现高度匹配。',
|
|
60
|
-
displayName: 'Llama 3.1 405B Instruct',
|
|
61
|
-
functionCall: true,
|
|
62
|
-
id: 'accounts/fireworks/models/llama-v3p1-405b-instruct',
|
|
63
|
-
},
|
|
64
|
-
{
|
|
65
|
-
contextWindowTokens: 8192,
|
|
66
|
-
description:
|
|
67
|
-
'Meta 开发并发布了 Meta Llama 3 系列大语言模型(LLM),这是一个包含 8B 和 70B 参数规模的预训练和指令微调生成文本模型的集合。Llama 3 指令微调模型专为对话应用场景优化,并在常见的行业基准测试中优于许多现有的开源聊天模型。',
|
|
68
|
-
displayName: 'Llama 3 8B Instruct',
|
|
69
|
-
id: 'accounts/fireworks/models/llama-v3-8b-instruct',
|
|
70
|
-
},
|
|
71
|
-
{
|
|
72
|
-
contextWindowTokens: 8192,
|
|
73
|
-
description:
|
|
74
|
-
'Meta 开发并发布了 Meta Llama 3 系列大语言模型(LLM),该系列包含 8B 和 70B 参数规模的预训练和指令微调生成文本模型。Llama 3 指令微调模型专为对话应用场景优化,并在常见的行业基准测试中优于许多现有的开源聊天模型。',
|
|
75
|
-
displayName: 'Llama 3 70B Instruct',
|
|
76
|
-
id: 'accounts/fireworks/models/llama-v3-70b-instruct',
|
|
77
|
-
},
|
|
78
|
-
{
|
|
79
|
-
contextWindowTokens: 8192,
|
|
80
|
-
description:
|
|
81
|
-
'Meta Llama 3 指令微调模型专为对话应用场景优化,并在常见的行业基准测试中优于许多现有的开源聊天模型。Llama 3 8B Instruct(HF 版本)是 Llama 3 8B Instruct 的原始 FP16 版本,其结果应与官方 Hugging Face 实现一致。',
|
|
82
|
-
displayName: 'Llama 3 8B Instruct (HF version)',
|
|
83
|
-
id: 'accounts/fireworks/models/llama-v3-8b-instruct-hf',
|
|
84
|
-
},
|
|
85
|
-
{
|
|
86
|
-
contextWindowTokens: 32_768,
|
|
87
|
-
description: '24B 参数模型,具备与更大型模型相当的最先进能力。',
|
|
88
|
-
displayName: 'Mistral Small 3 Instruct',
|
|
89
|
-
enabled: true,
|
|
90
|
-
id: 'accounts/fireworks/models/mistral-small-24b-instruct-2501',
|
|
91
|
-
},
|
|
92
|
-
{
|
|
93
|
-
contextWindowTokens: 32_768,
|
|
94
|
-
description:
|
|
95
|
-
'Mixtral MoE 8x7B Instruct 是 Mixtral MoE 8x7B 的指令微调版本,已启用聊天完成功能 API。',
|
|
96
|
-
displayName: 'Mixtral MoE 8x7B Instruct',
|
|
97
|
-
id: 'accounts/fireworks/models/mixtral-8x7b-instruct',
|
|
98
|
-
},
|
|
99
|
-
{
|
|
100
|
-
contextWindowTokens: 65_536,
|
|
101
|
-
description:
|
|
102
|
-
'Mixtral MoE 8x22B Instruct v0.1 是 Mixtral MoE 8x22B v0.1 的指令微调版本,已启用聊天完成功能 API。',
|
|
103
|
-
displayName: 'Mixtral MoE 8x22B Instruct',
|
|
104
|
-
functionCall: true,
|
|
105
|
-
id: 'accounts/fireworks/models/mixtral-8x22b-instruct',
|
|
106
|
-
},
|
|
107
|
-
{
|
|
108
|
-
contextWindowTokens: 32_064,
|
|
109
|
-
description:
|
|
110
|
-
'Phi-3-Vision-128K-Instruct 是一个轻量级的、最先进的开放多模态模型,基于包括合成数据和筛选后的公开网站数据集构建,重点关注文本和视觉方面的高质量、推理密集型数据。该模型属于 Phi-3 模型家族,其多模态版本支持 128K 上下文长度(以标记为单位)。该模型经过严格的增强过程,包括监督微调和直接偏好优化,以确保精确的指令遵循和强大的安全措施。',
|
|
111
|
-
displayName: 'Phi 3.5 Vision Instruct',
|
|
112
|
-
enabled: true,
|
|
113
|
-
id: 'accounts/fireworks/models/phi-3-vision-128k-instruct',
|
|
114
|
-
vision: true,
|
|
115
|
-
},
|
|
116
|
-
{
|
|
117
|
-
contextWindowTokens: 32_768,
|
|
118
|
-
description:
|
|
119
|
-
'MythoMix 的改进版,可能是其更为完善的变体,是 MythoLogic-L2 和 Huginn 的合并,采用了高度实验性的张量类型合并技术。由于其独特的性质,该模型在讲故事和角色扮演方面表现出色。',
|
|
120
|
-
displayName: 'MythoMax L2 13b',
|
|
121
|
-
id: 'accounts/fireworks/models/mythomax-l2-13b',
|
|
122
|
-
},
|
|
123
|
-
{
|
|
124
|
-
contextWindowTokens: 131_072,
|
|
125
|
-
description:
|
|
126
|
-
'Deepseek 提供的强大 Mixture-of-Experts (MoE) 语言模型,总参数量为 671B,每个标记激活 37B 参数。',
|
|
127
|
-
displayName: 'Deepseek V3',
|
|
128
|
-
enabled: true,
|
|
129
|
-
id: 'accounts/fireworks/models/deepseek-v3',
|
|
130
|
-
},
|
|
131
|
-
{
|
|
132
|
-
contextWindowTokens: 163_840,
|
|
133
|
-
description:
|
|
134
|
-
'DeepSeek-R1 是一款最先进的大型语言模型,经过强化学习和冷启动数据的优化,具有出色的推理、数学和编程性能。',
|
|
135
|
-
displayName: 'Deepseek R1',
|
|
136
|
-
enabled: true,
|
|
137
|
-
id: 'accounts/fireworks/models/deepseek-r1',
|
|
138
|
-
},
|
|
139
|
-
{
|
|
140
|
-
contextWindowTokens: 32_768,
|
|
141
|
-
description:
|
|
142
|
-
'Qwen QwQ 模型专注于推动 AI 推理,并展示了开放模型在推理能力上与闭源前沿模型匹敌的力量。QwQ-32B-Preview 是一个实验性发布版本,在 GPQA、AIME、MATH-500 和 LiveCodeBench 基准测试中,在分析和推理能力上可与 o1 相媲美,并超越 GPT-4o 和 Claude 3.5 Sonnet。注意:该模型目前作为无服务器模型进行实验性提供。如果用于生产环境,请注意 Fireworks 可能会在短时间内取消部署该模型。',
|
|
143
|
-
displayName: 'Qwen Qwq 32b Preview',
|
|
144
|
-
enabled: true,
|
|
145
|
-
id: 'accounts/fireworks/models/qwen-qwq-32b-preview',
|
|
146
|
-
},
|
|
147
|
-
{
|
|
148
|
-
contextWindowTokens: 32_768,
|
|
149
|
-
description:
|
|
150
|
-
'Qwen2.5 是由 Qwen 团队和阿里云开发的一系列仅解码语言模型,提供 0.5B、1.5B、3B、7B、14B、32B 和 72B 不同参数规模,并包含基础版和指令微调版。',
|
|
151
|
-
displayName: 'Qwen2.5 72B Instruct',
|
|
152
|
-
enabled: true,
|
|
153
|
-
id: 'accounts/fireworks/models/qwen2p5-72b-instruct',
|
|
154
|
-
},
|
|
155
|
-
{
|
|
156
|
-
contextWindowTokens: 32_768,
|
|
157
|
-
description: 'Qwen-VL 模型的 72B 版本是阿里巴巴最新迭代的成果,代表了近一年的创新。',
|
|
158
|
-
displayName: 'Qwen2 VL 72B Instruct',
|
|
159
|
-
enabled: true,
|
|
160
|
-
id: 'accounts/fireworks/models/qwen2-vl-72b-instruct',
|
|
161
|
-
vision: true,
|
|
162
|
-
},
|
|
163
|
-
{
|
|
164
|
-
contextWindowTokens: 32_768,
|
|
165
|
-
description:
|
|
166
|
-
'Qwen2.5-Coder 是最新一代专为代码设计的 Qwen 大型语言模型(前称为 CodeQwen)。注意:该模型目前作为无服务器模型进行实验性提供。如果用于生产环境,请注意 Fireworks 可能会在短时间内取消部署该模型。',
|
|
167
|
-
displayName: 'Qwen2.5-Coder-32B-Instruct',
|
|
168
|
-
enabled: true,
|
|
169
|
-
id: 'accounts/fireworks/models/qwen2p5-coder-32b-instruct',
|
|
170
|
-
},
|
|
171
|
-
{
|
|
172
|
-
contextWindowTokens: 32_768,
|
|
173
|
-
description:
|
|
174
|
-
'Yi-Large 是顶尖的大型语言模型之一,在 LMSYS 基准测试排行榜上,其表现仅次于 GPT-4、Gemini 1.5 Pro 和 Claude 3 Opus。它在多语言能力方面表现卓越,特别是在西班牙语、中文、日语、德语和法语方面。Yi-Large 还具有用户友好性,采用与 OpenAI 相同的 API 定义,便于集成。',
|
|
175
|
-
displayName: 'Yi-Large',
|
|
176
|
-
enabled: true,
|
|
177
|
-
id: 'accounts/yi-01-ai/models/yi-large',
|
|
178
|
-
},
|
|
179
|
-
],
|
|
6
|
+
chatModels: [],
|
|
180
7
|
checkModel: 'accounts/fireworks/models/llama-v3p2-3b-instruct',
|
|
181
8
|
description:
|
|
182
9
|
'Fireworks AI 是一家领先的高级语言模型服务商,专注于功能调用和多模态处理。其最新模型 Firefunction V2 基于 Llama-3,优化用于函数调用、对话及指令跟随。视觉语言模型 FireLLaVA-13B 支持图像和文本混合输入。其他 notable 模型包括 Llama 系列和 Mixtral 系列,提供高效的多语言指令跟随与生成支持。',
|
|
@@ -2,141 +2,7 @@ import { ModelProviderCard } from '@/types/llm';
|
|
|
2
2
|
|
|
3
3
|
// ref: https://ai.gitee.com/serverless-api/packages/1910
|
|
4
4
|
const GiteeAI: ModelProviderCard = {
|
|
5
|
-
chatModels: [
|
|
6
|
-
{
|
|
7
|
-
contextWindowTokens: 16_000,
|
|
8
|
-
description:
|
|
9
|
-
'Qwen2.5-72B-Instruct 支持 16k 上下文, 生成长文本超过 8K 。支持 function call 与外部系统无缝交互,极大提升了灵活性和扩展性。模型知识明显增加,并且大大提高了编码和数学能力, 多语言支持超过 29 种',
|
|
10
|
-
displayName: 'Qwen2.5 72B Instruct',
|
|
11
|
-
enabled: true,
|
|
12
|
-
functionCall: true,
|
|
13
|
-
id: 'Qwen2.5-72B-Instruct',
|
|
14
|
-
},
|
|
15
|
-
{
|
|
16
|
-
contextWindowTokens: 32_000,
|
|
17
|
-
description:
|
|
18
|
-
'Qwen2.5-32B-Instruct 是一款 320 亿参数的大语言模型,性能表现均衡,优化中文和多语言场景,支持智能问答、内容生成等应用。',
|
|
19
|
-
displayName: 'Qwen2.5 32B Instruct',
|
|
20
|
-
enabled: true,
|
|
21
|
-
id: 'Qwen2.5-32B-Instruct',
|
|
22
|
-
},
|
|
23
|
-
{
|
|
24
|
-
contextWindowTokens: 24_000,
|
|
25
|
-
description:
|
|
26
|
-
'Qwen2.5-14B-Instruct 是一款 140 亿参数的大语言模型,性能表现优秀,优化中文和多语言场景,支持智能问答、内容生成等应用。',
|
|
27
|
-
displayName: 'Qwen2.5 14B Instruct',
|
|
28
|
-
enabled: true,
|
|
29
|
-
id: 'Qwen2.5-14B-Instruct',
|
|
30
|
-
},
|
|
31
|
-
{
|
|
32
|
-
contextWindowTokens: 32_000,
|
|
33
|
-
description:
|
|
34
|
-
'Qwen2.5-7B-Instruct 是一款 70 亿参数的大语言模型,支持 function call 与外部系统无缝交互,极大提升了灵活性和扩展性。优化中文和多语言场景,支持智能问答、内容生成等应用。',
|
|
35
|
-
displayName: 'Qwen2.5 7B Instruct',
|
|
36
|
-
enabled: true,
|
|
37
|
-
functionCall: true,
|
|
38
|
-
id: 'Qwen2.5-7B-Instruct',
|
|
39
|
-
},
|
|
40
|
-
{
|
|
41
|
-
contextWindowTokens: 32_000,
|
|
42
|
-
description:
|
|
43
|
-
'Qwen2 是 Qwen 模型的最新系列,对比当前最优的开源模型,Qwen2-72B 在自然语言理解、知识、代码、数学及多语言等多项能力上均显著超越当前领先的模型。',
|
|
44
|
-
displayName: 'Qwen2 72B Instruct',
|
|
45
|
-
id: 'Qwen2-72B-Instruct',
|
|
46
|
-
},
|
|
47
|
-
{
|
|
48
|
-
contextWindowTokens: 24_000,
|
|
49
|
-
description:
|
|
50
|
-
'Qwen2 是 Qwen 模型的最新系列,能够超越同等规模的最优开源模型甚至更大规模的模型,Qwen2 7B 在多个评测上取得显著的优势,尤其是代码及中文理解上。',
|
|
51
|
-
displayName: 'Qwen2 7B Instruct',
|
|
52
|
-
id: 'Qwen2-7B-Instruct',
|
|
53
|
-
},
|
|
54
|
-
{
|
|
55
|
-
contextWindowTokens: 32_000,
|
|
56
|
-
description:
|
|
57
|
-
'Qwen2.5-Coder-32B-Instruct 是一款专为代码生成、代码理解和高效开发场景设计的大型语言模型,采用了业界领先的32B参数规模,能够满足多样化的编程需求。',
|
|
58
|
-
displayName: 'Qwen2.5 Coder 32B Instruct',
|
|
59
|
-
enabled: true,
|
|
60
|
-
id: 'Qwen2.5-Coder-32B-Instruct',
|
|
61
|
-
},
|
|
62
|
-
{
|
|
63
|
-
contextWindowTokens: 24_000,
|
|
64
|
-
description:
|
|
65
|
-
'Qwen2.5-Coder-14B-Instruct 是一款基于大规模预训练的编程指令模型,具备强大的代码理解和生成能力,能够高效地处理各种编程任务,特别适合智能代码编写、自动化脚本生成和编程问题解答。',
|
|
66
|
-
displayName: 'Qwen2.5 Coder 14B Instruct',
|
|
67
|
-
enabled: true,
|
|
68
|
-
id: 'Qwen2.5-Coder-14B-Instruct',
|
|
69
|
-
},
|
|
70
|
-
{
|
|
71
|
-
contextWindowTokens: 32_000,
|
|
72
|
-
description:
|
|
73
|
-
'Qwen2-VL-72B是一款强大的视觉语言模型,支持图像与文本的多模态处理,能够精确识别图像内容并生成相关描述或回答。',
|
|
74
|
-
displayName: 'Qwen2 VL 72B',
|
|
75
|
-
enabled: true,
|
|
76
|
-
id: 'Qwen2-VL-72B',
|
|
77
|
-
vision: true,
|
|
78
|
-
},
|
|
79
|
-
{
|
|
80
|
-
contextWindowTokens: 32_000,
|
|
81
|
-
description:
|
|
82
|
-
'InternVL2.5-26B是一款强大的视觉语言模型,支持图像与文本的多模态处理,能够精确识别图像内容并生成相关描述或回答。',
|
|
83
|
-
displayName: 'InternVL2.5 26B',
|
|
84
|
-
enabled: true,
|
|
85
|
-
id: 'InternVL2.5-26B',
|
|
86
|
-
vision: true,
|
|
87
|
-
},
|
|
88
|
-
{
|
|
89
|
-
contextWindowTokens: 32_000,
|
|
90
|
-
description:
|
|
91
|
-
'InternVL2-8B 是一款强大的视觉语言模型,支持图像与文本的多模态处理,能够精确识别图像内容并生成相关描述或回答。',
|
|
92
|
-
displayName: 'InternVL2 8B',
|
|
93
|
-
enabled: true,
|
|
94
|
-
id: 'InternVL2-8B',
|
|
95
|
-
vision: true,
|
|
96
|
-
},
|
|
97
|
-
{
|
|
98
|
-
contextWindowTokens: 32_000,
|
|
99
|
-
description:
|
|
100
|
-
'GLM-4-9B-Chat 在语义、数学、推理、代码和知识等多方面均表现出较高性能。还具备网页浏览、代码执行、自定义工具调用和长文本推理。 支持包括日语,韩语,德语在内的 26 种语言。',
|
|
101
|
-
displayName: 'GLM4 9B Chat',
|
|
102
|
-
enabled: true,
|
|
103
|
-
id: 'glm-4-9b-chat',
|
|
104
|
-
},
|
|
105
|
-
{
|
|
106
|
-
contextWindowTokens: 4000,
|
|
107
|
-
description:
|
|
108
|
-
'Yi-1.5-34B-Chat 在保持原系列模型优秀的通用语言能力的前提下,通过增量训练 5 千亿高质量 token,大幅提高了数学逻辑、代码能力。',
|
|
109
|
-
displayName: 'Yi 34B Chat',
|
|
110
|
-
enabled: true,
|
|
111
|
-
id: 'Yi-34B-Chat',
|
|
112
|
-
},
|
|
113
|
-
/*
|
|
114
|
-
// not compatible with OpenAI SDK
|
|
115
|
-
{
|
|
116
|
-
description:
|
|
117
|
-
'代码小浣熊是基于商汤大语言模型的软件智能研发助手,覆盖软件需求分析、架构设计、代码编写、软件测试等环节,满足用户代码编写、编程学习等各类需求。代码小浣熊支持 Python、Java、JavaScript、C++、Go、SQL 等 90+主流编程语言和 VS Code、IntelliJ IDEA 等主流 IDE。在实际应用中,代码小浣熊可帮助开发者提升编程效率超 50%。',
|
|
118
|
-
displayName: 'Code Raccoon v1',
|
|
119
|
-
enabled: true,
|
|
120
|
-
id: 'code-raccoon-v1',
|
|
121
|
-
},
|
|
122
|
-
*/
|
|
123
|
-
{
|
|
124
|
-
contextWindowTokens: 8000,
|
|
125
|
-
description:
|
|
126
|
-
'DeepSeek Coder 33B 是一个代码语言模型, 基于 2 万亿数据训练而成,其中 87% 为代码, 13% 为中英文语言。模型引入 16K 窗口大小和填空任务,提供项目级别的代码补全和片段填充功能。',
|
|
127
|
-
displayName: 'DeepSeek Coder 33B Instruct',
|
|
128
|
-
enabled: true,
|
|
129
|
-
id: 'deepseek-coder-33B-instruct',
|
|
130
|
-
},
|
|
131
|
-
{
|
|
132
|
-
contextWindowTokens: 32_000,
|
|
133
|
-
description:
|
|
134
|
-
'CodeGeeX4-ALL-9B 是一个多语言代码生成模型,支持包括代码补全和生成、代码解释器、网络搜索、函数调用、仓库级代码问答在内的全面功能,覆盖软件开发的各种场景。是参数少于 10B 的顶尖代码生成模型。',
|
|
135
|
-
displayName: 'CodeGeeX4 All 9B',
|
|
136
|
-
enabled: true,
|
|
137
|
-
id: 'codegeex4-all-9b',
|
|
138
|
-
},
|
|
139
|
-
],
|
|
5
|
+
chatModels: [],
|
|
140
6
|
checkModel: 'Qwen2.5-72B-Instruct',
|
|
141
7
|
description: 'Gitee AI 的 Serverless API 为 AI 开发者提供开箱即用的大模型推理 API 服务。',
|
|
142
8
|
disableBrowserRequest: true,
|