@lobehub/chat 1.16.8 → 1.16.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @lobehub/chat might be problematic. Click here for more details.

Files changed (37) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/package.json +1 -1
  3. package/src/config/modelProviders/ai360.ts +11 -0
  4. package/src/config/modelProviders/anthropic.ts +27 -18
  5. package/src/config/modelProviders/azure.ts +12 -3
  6. package/src/config/modelProviders/baichuan.ts +3 -1
  7. package/src/config/modelProviders/bedrock.ts +20 -18
  8. package/src/config/modelProviders/deepseek.ts +3 -1
  9. package/src/config/modelProviders/fireworksai.ts +33 -5
  10. package/src/config/modelProviders/google.ts +16 -13
  11. package/src/config/modelProviders/groq.ts +19 -8
  12. package/src/config/modelProviders/minimax.ts +8 -6
  13. package/src/config/modelProviders/mistral.ts +19 -3
  14. package/src/config/modelProviders/moonshot.ts +11 -1
  15. package/src/config/modelProviders/novita.ts +24 -0
  16. package/src/config/modelProviders/ollama.ts +58 -1
  17. package/src/config/modelProviders/openai.ts +52 -18
  18. package/src/config/modelProviders/openrouter.ts +21 -1
  19. package/src/config/modelProviders/perplexity.ts +19 -3
  20. package/src/config/modelProviders/qwen.ts +11 -8
  21. package/src/config/modelProviders/siliconcloud.ts +34 -1
  22. package/src/config/modelProviders/spark.ts +16 -7
  23. package/src/config/modelProviders/stepfun.ts +13 -1
  24. package/src/config/modelProviders/taichu.ts +7 -2
  25. package/src/config/modelProviders/togetherai.ts +38 -2
  26. package/src/config/modelProviders/upstage.ts +11 -4
  27. package/src/config/modelProviders/zeroone.ts +5 -1
  28. package/src/config/modelProviders/zhipu.ts +20 -18
  29. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +13 -6
  30. package/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json +1 -0
  31. package/src/server/routers/edge/config/__snapshots__/index.test.ts.snap +24 -4
  32. package/src/server/routers/edge/config/index.test.ts +3 -11
  33. package/src/store/user/slices/modelList/__snapshots__/action.test.ts.snap +12 -0
  34. package/src/store/user/slices/modelList/action.test.ts +3 -7
  35. package/src/types/llm.ts +1 -0
  36. package/src/utils/__snapshots__/parseModels.test.ts.snap +32 -0
  37. package/src/utils/parseModels.test.ts +1 -28
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.16.9](https://github.com/lobehub/lobe-chat/compare/v1.16.8...v1.16.9)
6
+
7
+ <sup>Released on **2024-09-12**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Add model and provider desc and url.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Add model and provider desc and url, closes [#3920](https://github.com/lobehub/lobe-chat/issues/3920) ([ea9ff00](https://github.com/lobehub/lobe-chat/commit/ea9ff00))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.16.8](https://github.com/lobehub/lobe-chat/compare/v1.16.7...v1.16.8)
6
31
 
7
32
  <sup>Released on **2024-09-12**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.16.8",
3
+ "version": "1.16.9",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -1,8 +1,11 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref: https://ai.360.cn/platform/docs/overview
3
4
  const Ai360: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
7
+ description:
8
+ '360GPT2 Pro 是 360 公司推出的高级自然语言处理模型,具备卓越的文本生成和理解能力,尤其在生成与创作领域表现出色,能够处理复杂的语言转换和角色演绎任务。',
6
9
  displayName: '360GPT2 Pro',
7
10
  enabled: true,
8
11
  functionCall: false,
@@ -16,6 +19,8 @@ const Ai360: ModelProviderCard = {
16
19
  tokens: 8192,
17
20
  },
18
21
  {
22
+ description:
23
+ '360GPT Pro 作为 360 AI 模型系列的重要成员,以高效的文本处理能力满足多样化的自然语言应用场景,支持长文本理解和多轮对话等功能。',
19
24
  displayName: '360GPT Pro',
20
25
  id: '360gpt-pro',
21
26
  maxOutput: 7000,
@@ -27,6 +32,8 @@ const Ai360: ModelProviderCard = {
27
32
  tokens: 8192,
28
33
  },
29
34
  {
35
+ description:
36
+ '360GPT Turbo 提供强大的计算和对话能力,具备出色的语义理解和生成效率,是企业和开发者理想的智能助理解决方案。',
30
37
  displayName: '360GPT Turbo',
31
38
  enabled: true,
32
39
  id: '360gpt-turbo',
@@ -39,6 +46,8 @@ const Ai360: ModelProviderCard = {
39
46
  tokens: 8192,
40
47
  },
41
48
  {
49
+ description:
50
+ '360GPT Turbo Responsibility 8K 强调语义安全和责任导向,专为对内容安全有高度要求的应用场景设计,确保用户体验的准确性与稳健性。',
42
51
  displayName: '360GPT Turbo Responsibility 8K',
43
52
  enabled: true,
44
53
  id: '360gpt-turbo-responsibility-8k',
@@ -52,6 +61,8 @@ const Ai360: ModelProviderCard = {
52
61
  },
53
62
  ],
54
63
  checkModel: '360gpt-turbo',
64
+ description:
65
+ '360 AI 是 360 公司推出的 AI 模型和服务平台,提供多种先进的自然语言处理模型,包括 360GPT2 Pro、360GPT Pro、360GPT Turbo 和 360GPT Turbo Responsibility 8K。这些模型结合了大规模参数和多模态能力,广泛应用于文本生成、语义理解、对话系统与代码生成等领域。通过灵活的定价策略,360 AI 满足多样化用户需求,支持开发者集成,推动智能化应用的革新和发展。',
55
66
  disableBrowserRequest: true,
56
67
  id: 'ai360',
57
68
  modelList: { showModelFetcher: true },
@@ -1,10 +1,11 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref: https://docs.anthropic.com/en/docs/about-claude/models#model-names
3
4
  const Anthropic: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
6
7
  description:
7
- 'Claude 3.5 Sonnet raises the industry bar for intelligence, outperforming competitor models and Claude 3 Opus on a wide range of evaluations, with the speed and cost of our mid-tier model, Claude 3 Sonnet.',
8
+ 'Claude 3.5 Sonnet 提供了超越 Opus 的能力和比 Sonnet 更快的速度,同时保持与 Sonnet 相同的价格。Sonnet 特别擅长编程、数据科学、视觉处理、代理任务。',
8
9
  displayName: 'Claude 3.5 Sonnet',
9
10
  enabled: true,
10
11
  functionCall: true,
@@ -22,30 +23,31 @@ const Anthropic: ModelProviderCard = {
22
23
  },
23
24
  {
24
25
  description:
25
- 'Ideal balance of intelligence and speed for enterprise workloads. Maximum utility at a lower price, dependable, balanced for scaled deployments',
26
- displayName: 'Claude 3 Sonnet',
26
+ 'Claude 3 Haiku Anthropic 的最快且最紧凑的模型,旨在实现近乎即时的响应。它具有快速且准确的定向性能。',
27
+ displayName: 'Claude 3 Haiku',
28
+ enabled: true,
27
29
  functionCall: true,
28
- id: 'claude-3-sonnet-20240229',
30
+ id: 'claude-3-haiku-20240307',
29
31
  maxOutput: 4096,
30
32
  pricing: {
31
- input: 3,
32
- output: 15,
33
+ input: 0.25,
34
+ output: 1.25,
33
35
  },
34
- releasedAt: '2024-02-29',
36
+ releasedAt: '2024-03-07',
35
37
  tokens: 200_000,
36
38
  vision: true,
37
39
  },
38
40
  {
39
41
  description:
40
- 'Most powerful model for highly complex tasks. Top-level performance, intelligence, fluency, and understanding',
41
- displayName: 'Claude 3 Opus',
42
+ 'Claude 3 Sonnet 在智能和速度方面为企业工作负载提供了理想的平衡。它以更低的价格提供最大效用,可靠且适合大规模部署。',
43
+ displayName: 'Claude 3 Sonnet',
42
44
  enabled: true,
43
45
  functionCall: true,
44
- id: 'claude-3-opus-20240229',
46
+ id: 'claude-3-sonnet-20240229',
45
47
  maxOutput: 4096,
46
48
  pricing: {
47
- input: 15,
48
- output: 75,
49
+ input: 3,
50
+ output: 15,
49
51
  },
50
52
  releasedAt: '2024-02-29',
51
53
  tokens: 200_000,
@@ -53,21 +55,23 @@ const Anthropic: ModelProviderCard = {
53
55
  },
54
56
  {
55
57
  description:
56
- 'Fastest and most compact model for near-instant responsiveness. Quick and accurate targeted performance',
57
- displayName: 'Claude 3 Haiku',
58
+ 'Claude 3 Opus Anthropic 用于处理高度复杂任务的最强大模型。它在性能、智能、流畅性和理解力方面表现卓越。',
59
+ displayName: 'Claude 3 Opus',
58
60
  enabled: true,
59
61
  functionCall: true,
60
- id: 'claude-3-haiku-20240307',
62
+ id: 'claude-3-opus-20240229',
61
63
  maxOutput: 4096,
62
64
  pricing: {
63
- input: 0.25,
64
- output: 1.25,
65
+ input: 15,
66
+ output: 75,
65
67
  },
66
- releasedAt: '2024-03-07',
68
+ releasedAt: '2024-02-29',
67
69
  tokens: 200_000,
68
70
  vision: true,
69
71
  },
70
72
  {
73
+ description:
74
+ 'Claude 2 为企业提供了关键能力的进步,包括业界领先的 200K token 上下文、大幅降低模型幻觉的发生率、系统提示以及一个新的测试功能:工具调用。',
71
75
  displayName: 'Claude 2.1',
72
76
  id: 'claude-2.1',
73
77
  maxOutput: 4096,
@@ -79,6 +83,8 @@ const Anthropic: ModelProviderCard = {
79
83
  tokens: 200_000,
80
84
  },
81
85
  {
86
+ description:
87
+ 'Claude 2 为企业提供了关键能力的进步,包括业界领先的 200K token 上下文、大幅降低模型幻觉的发生率、系统提示以及一个新的测试功能:工具调用。',
82
88
  displayName: 'Claude 2.0',
83
89
  id: 'claude-2.0',
84
90
  maxOutput: 4096,
@@ -90,6 +96,7 @@ const Anthropic: ModelProviderCard = {
90
96
  tokens: 100_000,
91
97
  },
92
98
  {
99
+ description: 'Anthropic 的模型用于低延迟、高吞吐量的文本生成,支持生成数百页的文本。',
93
100
  displayName: 'Claude Instant 1.2',
94
101
  id: 'claude-instant-1.2',
95
102
  maxOutput: 4096,
@@ -102,6 +109,8 @@ const Anthropic: ModelProviderCard = {
102
109
  },
103
110
  ],
104
111
  checkModel: 'claude-3-haiku-20240307',
112
+ description:
113
+ 'Anthropic 是一家专注于人工智能研究和开发的公司,提供了一系列先进的语言模型,如 Claude 3.5 Sonnet、Claude 3 Sonnet、Claude 3 Opus 和 Claude 3 Haiku。这些模型在智能、速度和成本之间取得了理想的平衡,适用于从企业级工作负载到快速响应的各种应用场景。Claude 3.5 Sonnet 作为其最新模型,在多项评估中表现优异,同时保持了较高的性价比。',
105
114
  id: 'anthropic',
106
115
  modelsUrl: 'https://docs.anthropic.com/en/docs/about-claude/models#model-names',
107
116
  name: 'Anthropic',
@@ -1,11 +1,12 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models
3
+ // ref: https://learn.microsoft.com/azure/ai-services/openai/concepts/models
4
4
  const Azure: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
7
  deploymentName: 'gpt-35-turbo',
8
- description: 'GPT 3.5 Turbo,适用于各种文本生成和理解任务',
8
+ description:
9
+ 'GPT 3.5 Turbo,OpenAI提供的高效模型,适用于聊天和文本生成任务,支持并行函数调用。',
9
10
  displayName: 'GPT 3.5 Turbo',
10
11
  enabled: true,
11
12
  functionCall: true,
@@ -15,6 +16,7 @@ const Azure: ModelProviderCard = {
15
16
  },
16
17
  {
17
18
  deploymentName: 'gpt-35-turbo-16k',
19
+ description: 'GPT 3.5 Turbo 16k,高容量文本生成模型,适合复杂任务。',
18
20
  displayName: 'GPT 3.5 Turbo',
19
21
  functionCall: true,
20
22
  id: 'gpt-35-turbo-16k',
@@ -22,6 +24,7 @@ const Azure: ModelProviderCard = {
22
24
  },
23
25
  {
24
26
  deploymentName: 'gpt-4-turbo',
27
+ description: 'GPT 4 Turbo,多模态模型,提供杰出的语言理解和生成能力,同时支持图像输入。',
25
28
  displayName: 'GPT 4 Turbo',
26
29
  enabled: true,
27
30
  functionCall: true,
@@ -31,7 +34,7 @@ const Azure: ModelProviderCard = {
31
34
  },
32
35
  {
33
36
  deploymentName: 'gpt-4-vision',
34
- description: 'GPT-4 视觉预览版,支持视觉任务',
37
+ description: 'GPT-4 视觉预览版,专为图像分析和处理任务设计。',
35
38
  displayName: 'GPT 4 Turbo with Vision Preview',
36
39
  id: 'gpt-4-vision-preview',
37
40
  tokens: 128_000,
@@ -39,6 +42,7 @@ const Azure: ModelProviderCard = {
39
42
  },
40
43
  {
41
44
  deploymentName: 'gpt-4o-mini',
45
+ description: 'GPT-4o Mini,小型高效模型,具备与GPT-4o相似的卓越性能。',
42
46
  displayName: 'GPT 4o Mini',
43
47
  enabled: true,
44
48
  functionCall: true,
@@ -48,6 +52,7 @@ const Azure: ModelProviderCard = {
48
52
  },
49
53
  {
50
54
  deploymentName: 'gpt-4o',
55
+ description: 'GPT-4o 是最新的多模态模型,结合高级文本和图像处理能力。',
51
56
  displayName: 'GPT 4o',
52
57
  enabled: true,
53
58
  functionCall: true,
@@ -57,8 +62,12 @@ const Azure: ModelProviderCard = {
57
62
  },
58
63
  ],
59
64
  defaultShowBrowserRequest: true,
65
+ description:
66
+ 'Azure 提供多种先进的AI模型,包括GPT-3.5和最新的GPT-4系列,支持多种数据类型和复杂任务,致力于安全、可靠和可持续的AI解决方案。',
60
67
  id: 'azure',
68
+ modelsUrl: 'https://learn.microsoft.com/azure/ai-services/openai/concepts/models',
61
69
  name: 'Azure',
70
+ url: 'https://azure.microsoft.com',
62
71
  };
63
72
 
64
73
  export default Azure;
@@ -1,6 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref https://platform.baichuan-ai.com/price
3
+ // ref: https://platform.baichuan-ai.com/price
4
4
  const Baichuan: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
@@ -62,6 +62,8 @@ const Baichuan: ModelProviderCard = {
62
62
  },
63
63
  ],
64
64
  checkModel: 'Baichuan3-Turbo',
65
+ description:
66
+ '百川智能是一家专注于人工智能大模型研发的公司,其模型在国内知识百科、长文本处理和生成创作等中文任务上表现卓越,超越了国外主流模型。百川智能还具备行业领先的多模态能力,在多项权威评测中表现优异。其模型包括 Baichuan 4、Baichuan 3 Turbo 和 Baichuan 3 Turbo 128k 等,分别针对不同应用场景进行优化,提供高性价比的解决方案。',
65
67
  id: 'baichuan',
66
68
  modelList: { showModelFetcher: true },
67
69
  modelsUrl: 'https://platform.baichuan-ai.com/price',
@@ -1,26 +1,26 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
4
- // ref https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/models
5
- // ref https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/models
3
+ // ref :https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
4
+ // ref :https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/models
5
+ // ref :https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/models
6
6
  const Bedrock: ModelProviderCard = {
7
7
  chatModels: [
8
8
  /*
9
9
  // TODO: Not support for now
10
10
  {
11
- description: 'Amazon Titan Text Lite is a light weight efficient model ideal for fine-tuning for English-language tasks, including like summarization and copywriting, where customers want a smaller, more cost-effective model that is also highly customizable.',
11
+ description: '亚马逊 Titan Text Lite 是一款轻量级高效模型,非常适合对英语任务进行微调,包括总结和文案编写等,客户希望有一个更小、更经济的模型,同时也非常可定制。',
12
12
  displayName: 'Titan Text G1 - Lite',
13
13
  id: 'amazon.titan-text-lite-v1',
14
14
  tokens: 4000,
15
15
  },
16
16
  {
17
- description: 'Amazon Titan Text Express has a context length of up to 8,000 tokens, making it well-suited for a wide range of advanced, general language tasks such as open-ended text generation and conversational chat, as well as support within Retrieval Augmented Generation (RAG). At launch, the model is optimized for English, with multilingual support for more than 100 additional languages available in preview.',
17
+ description: '亚马逊 Titan Text Express 的上下文长度可达 8,000 个标记,非常适合广泛的高级通用语言任务,如开放式文本生成和对话聊天,以及在检索增强生成 (RAG) 中的支持。在推出时,该模型针对英语进行了优化,预览版还支持其他 100 多种语言。',
18
18
  displayName: 'Titan Text G1 - Express',
19
19
  id: 'amazon.titan-text-express-v1',
20
20
  tokens: 8000,
21
21
  },
22
22
  {
23
- description: 'Titan Text Premier is a powerful and advanced model within the Titan Text family, designed to deliver superior performance across a wide range of enterprise applications. With its cutting-edge capabilities, it offers enhanced accuracy and exceptional results, making it an excellent choice for organizations seeking top-notch text processing solutions.',
23
+ description: 'Titan Text Premier Titan Text 系列中一款强大的先进模型,旨在为广泛的企业应用提供卓越的性能。凭借其尖端能力,它提供了更高的准确性和卓越的结果,是寻求一流文本处理解决方案的组织的绝佳选择。',
24
24
  displayName: 'Titan Text G1 - Premier',
25
25
  id: 'amazon.titan-text-premier-v1:0',
26
26
  tokens: 32_000,
@@ -28,7 +28,7 @@ const Bedrock: ModelProviderCard = {
28
28
  */
29
29
  {
30
30
  description:
31
- 'Claude 3.5 Sonnet raises the industry bar for intelligence, outperforming competitor models and Claude 3 Opus on a wide range of evaluations, with the speed and cost of our mid-tier model, Claude 3 Sonnet.',
31
+ 'Claude 3.5 Sonnet 提升了行业标准,性能超过竞争对手模型和 Claude 3 Opus,在广泛的评估中表现出色,同时具有我们中等层级模型的速度和成本。',
32
32
  displayName: 'Claude 3.5 Sonnet',
33
33
  enabled: true,
34
34
  functionCall: true,
@@ -42,7 +42,7 @@ const Bedrock: ModelProviderCard = {
42
42
  },
43
43
  {
44
44
  description:
45
- 'Claude 3 Sonnet by Anthropic strikes the ideal balance between intelligence and speed—particularly for enterprise workloads. It offers maximum utility at a lower price than competitors, and is engineered to be the dependable, high-endurance workhorse for scaled AI deployments. Claude 3 Sonnet can process images and return text outputs, and features a 200K context window.',
45
+ 'Anthropic 的 Claude 3 Sonnet 在智能和速度之间达到了理想的平衡——特别适合企业工作负载。它以低于竞争对手的价格提供最大的效用,并被设计成为可靠的、高耐用的主力机,适用于规模化的 AI 部署。Claude 3 Sonnet 可以处理图像并返回文本输出,具有 200K 的上下文窗口。',
46
46
  displayName: 'Claude 3 Sonnet',
47
47
  enabled: true,
48
48
  functionCall: true,
@@ -56,7 +56,7 @@ const Bedrock: ModelProviderCard = {
56
56
  },
57
57
  {
58
58
  description:
59
- 'Claude 3 Opus is Anthropic most powerful AI model, with state-of-the-art performance on highly complex tasks. It can navigate open-ended prompts and sight-unseen scenarios with remarkable fluency and human-like understanding. Claude 3 Opus shows us the frontier of what’s possible with generative AI. Claude 3 Opus can process images and return text outputs, and features a 200K context window.',
59
+ 'Claude 3 Opus Anthropic 最强大的 AI 模型,具有在高度复杂任务上的最先进性能。它可以处理开放式提示和未见过的场景,具有出色的流畅性和类人的理解能力。Claude 3 Opus 展示了生成 AI 可能性的前沿。Claude 3 Opus 可以处理图像并返回文本输出,具有 200K 的上下文窗口。',
60
60
  displayName: 'Claude 3 Opus',
61
61
  enabled: true,
62
62
  functionCall: true,
@@ -70,7 +70,7 @@ const Bedrock: ModelProviderCard = {
70
70
  },
71
71
  {
72
72
  description:
73
- 'Claude 3 Haiku is Anthropic fastest, most compact model for near-instant responsiveness. It answers simple queries and requests with speed. Customers will be able to build seamless AI experiences that mimic human interactions. Claude 3 Haiku can process images and return text outputs, and features a 200K context window.',
73
+ 'Claude 3 Haiku Anthropic 最快、最紧凑的模型,提供近乎即时的响应速度。它可以快速回答简单的查询和请求。客户将能够构建模仿人类互动的无缝 AI 体验。Claude 3 Haiku 可以处理图像并返回文本输出,具有 200K 的上下文窗口。',
74
74
  displayName: 'Claude 3 Haiku',
75
75
  enabled: true,
76
76
  functionCall: true,
@@ -84,7 +84,7 @@ const Bedrock: ModelProviderCard = {
84
84
  },
85
85
  {
86
86
  description:
87
- 'An update to Claude 2 that features double the context window, plus improvements across reliability, hallucination rates, and evidence-based accuracy in long document and RAG contexts.',
87
+ 'Claude 2 的更新版,具有双倍的上下文窗口,以及在长文档和 RAG 上下文中的可靠性、幻觉率和基于证据的准确性的改进。',
88
88
  displayName: 'Claude 2.1',
89
89
  id: 'anthropic.claude-v2:1',
90
90
  pricing: {
@@ -95,7 +95,7 @@ const Bedrock: ModelProviderCard = {
95
95
  },
96
96
  {
97
97
  description:
98
- 'Anthropic highly capable model across a wide range of tasks from sophisticated dialogue and creative content generation to detailed instruction following.',
98
+ 'Anthropic 在从复杂对话和创意内容生成到详细指令跟随的广泛任务中都表现出高度能力的模型。',
99
99
  displayName: 'Claude 2.0',
100
100
  id: 'anthropic.claude-v2',
101
101
  pricing: {
@@ -106,7 +106,7 @@ const Bedrock: ModelProviderCard = {
106
106
  },
107
107
  {
108
108
  description:
109
- 'A fast, affordable yet still very capable model, which can handle a range of tasks including casual dialogue, text analysis, summarization, and document question-answering.',
109
+ '一款快速、经济且仍然非常有能力的模型,可以处理包括日常对话、文本分析、总结和文档问答在内的一系列任务。',
110
110
  displayName: 'Claude Instant',
111
111
  id: 'anthropic.claude-instant-v1',
112
112
  pricing: {
@@ -117,7 +117,7 @@ const Bedrock: ModelProviderCard = {
117
117
  },
118
118
  {
119
119
  description:
120
- 'An update to Meta Llama 3 8B Instruct that includes an expanded 128K context length, multilinguality and improved reasoning capabilities. The Llama 3.1 offering of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 8B, 70B and 405B sizes (text in/text out). The Llama 3.1 instruction-tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Llama 3.1 is intended for commercial and research use in multiple languages. Instruction tuned text only models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. The Llama 3.1 models also support the ability to leverage the outputs of its models to improve other models including synthetic data generation and distillation. Llama 3.1 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.',
120
+ 'Meta Llama 3.1 8B Instruct 的更新版,包括扩展的 128K 上下文长度、多语言性和改进的推理能力。Llama 3.1 提供的多语言大型语言模型 (LLMs) 是一组预训练的、指令调整的生成模型,包括 8B70B 405B 大小 (文本输入/输出)Llama 3.1 指令调整的文本模型 (8B70B405B) 专为多语言对话用例进行了优化,并在常见的行业基准测试中超过了许多可用的开源聊天模型。Llama 3.1 旨在用于多种语言的商业和研究用途。指令调整的文本模型适用于类似助手的聊天,而预训练模型可以适应各种自然语言生成任务。Llama 3.1 模型还支持利用其模型的输出来改进其他模型,包括合成数据生成和精炼。Llama 3.1 是使用优化的变压器架构的自回归语言模型。调整版本使用监督微调 (SFT) 和带有人类反馈的强化学习 (RLHF) 来符合人类对帮助性和安全性的偏好。',
121
121
  displayName: 'Llama 3.1 8B Instruct',
122
122
  enabled: true,
123
123
  functionCall: true,
@@ -130,7 +130,7 @@ const Bedrock: ModelProviderCard = {
130
130
  },
131
131
  {
132
132
  description:
133
- 'An update to Meta Llama 3 70B Instruct that includes an expanded 128K context length, multilinguality and improved reasoning capabilities. The Llama 3.1 offering of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 8B, 70B and 405B sizes (text in/text out). The Llama 3.1 instruction-tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Llama 3.1 is intended for commercial and research use in multiple languages. Instruction tuned text only models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. The Llama 3.1 models also support the ability to leverage the outputs of its models to improve other models including synthetic data generation and distillation. Llama 3.1 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.',
133
+ 'Meta Llama 3.1 70B Instruct 的更新版,包括扩展的 128K 上下文长度、多语言性和改进的推理能力。Llama 3.1 提供的多语言大型语言模型 (LLMs) 是一组预训练的、指令调整的生成模型,包括 8B70B 405B 大小 (文本输入/输出)Llama 3.1 指令调整的文本模型 (8B70B405B) 专为多语言对话用例进行了优化,并在常见的行业基准测试中超过了许多可用的开源聊天模型。Llama 3.1 旨在用于多种语言的商业和研究用途。指令调整的文本模型适用于类似助手的聊天,而预训练模型可以适应各种自然语言生成任务。Llama 3.1 模型还支持利用其模型的输出来改进其他模型,包括合成数据生成和精炼。Llama 3.1 是使用优化的变压器架构的自回归语言模型。调整版本使用监督微调 (SFT) 和带有人类反馈的强化学习 (RLHF) 来符合人类对帮助性和安全性的偏好。',
134
134
  displayName: 'Llama 3.1 70B Instruct',
135
135
  enabled: true,
136
136
  functionCall: true,
@@ -143,7 +143,7 @@ const Bedrock: ModelProviderCard = {
143
143
  },
144
144
  {
145
145
  description:
146
- 'Meta Llama 3.1 405B Instruct is the largest and most powerful of the Llama 3.1 Instruct models that is a highly advanced model for conversational inference and reasoning, synthetic data generation, and a base to do specialized continual pre-training or fine-tuning on a specific domain. The Llama 3.1 offering of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 8B, 70B and 405B sizes (text in/text out). The Llama 3.1 instruction-tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Llama 3.1 is intended for commercial and research use in multiple languages. Instruction tuned text only models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. The Llama 3.1 models also support the ability to leverage the outputs of its models to improve other models including synthetic data generation and distillation. Llama 3.1 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.',
146
+ 'Meta Llama 3.1 405B Instruct Llama 3.1 Instruct 模型中最大、最强大的模型,是一款高度先进的对话推理和合成数据生成模型,也可以用作在特定领域进行专业持续预训练或微调的基础。Llama 3.1 提供的多语言大型语言模型 (LLMs) 是一组预训练的、指令调整的生成模型,包括 8B70B 405B 大小 (文本输入/输出)Llama 3.1 指令调整的文本模型 (8B70B405B) 专为多语言对话用例进行了优化,并在常见的行业基准测试中超过了许多可用的开源聊天模型。Llama 3.1 旨在用于多种语言的商业和研究用途。指令调整的文本模型适用于类似助手的聊天,而预训练模型可以适应各种自然语言生成任务。Llama 3.1 模型还支持利用其模型的输出来改进其他模型,包括合成数据生成和精炼。Llama 3.1 是使用优化的变压器架构的自回归语言模型。调整版本使用监督微调 (SFT) 和带有人类反馈的强化学习 (RLHF) 来符合人类对帮助性和安全性的偏好。',
147
147
  displayName: 'Llama 3.1 405B Instruct',
148
148
  enabled: true,
149
149
  functionCall: true,
@@ -156,7 +156,7 @@ const Bedrock: ModelProviderCard = {
156
156
  },
157
157
  {
158
158
  description:
159
- 'Meta Llama 3 is an accessible, open large language model (LLM) designed for developers, researchers, and businesses to build, experiment, and responsibly scale their generative AI ideas. Part of a foundational system, it serves as a bedrock for innovation in the global community. Ideal for limited computational power and resources, edge devices, and faster training times.',
159
+ 'Meta Llama 3 是一款面向开发者、研究人员和企业的开放大型语言模型 (LLM),旨在帮助他们构建、实验并负责任地扩展他们的生成 AI 想法。作为全球社区创新的基础系统的一部分,它非常适合计算能力和资源有限、边缘设备和更快的训练时间。',
160
160
  displayName: 'Llama 3 8B Instruct',
161
161
  id: 'meta.llama3-8b-instruct-v1:0',
162
162
  pricing: {
@@ -167,7 +167,7 @@ const Bedrock: ModelProviderCard = {
167
167
  },
168
168
  {
169
169
  description:
170
- 'Meta Llama 3 is an accessible, open large language model (LLM) designed for developers, researchers, and businesses to build, experiment, and responsibly scale their generative AI ideas. Part of a foundational system, it serves as a bedrock for innovation in the global community. Ideal for content creation, conversational AI, language understanding, R&D, and Enterprise applications.',
170
+ 'Meta Llama 3 是一款面向开发者、研究人员和企业的开放大型语言模型 (LLM),旨在帮助他们构建、实验并负责任地扩展他们的生成 AI 想法。作为全球社区创新的基础系统的一部分,它非常适合内容创建、对话 AI、语言理解、研发和企业应用。',
171
171
  displayName: 'Llama 3 70B Instruct',
172
172
  id: 'meta.llama3-70b-instruct-v1:0',
173
173
  pricing: {
@@ -276,6 +276,8 @@ const Bedrock: ModelProviderCard = {
276
276
  */
277
277
  ],
278
278
  checkModel: 'anthropic.claude-instant-v1',
279
+ description:
280
+ 'Bedrock 是亚马逊 AWS 提供的一项服务,专注于为企业提供先进的 AI 语言模型和视觉模型。其模型家族包括 Anthropic 的 Claude 系列、Meta 的 Llama 3.1 系列等,涵盖从轻量级到高性能的多种选择,支持文本生成、对话、图像处理等多种任务,适用于不同规模和需求的企业应用。',
279
281
  id: 'bedrock',
280
282
  modelsUrl: 'https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html',
281
283
  name: 'Bedrock',
@@ -1,6 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref https://platform.deepseek.com/api-docs/pricing
3
+ // ref: https://platform.deepseek.com/api-docs/pricing
4
4
  const DeepSeek: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
@@ -20,6 +20,8 @@ const DeepSeek: ModelProviderCard = {
20
20
  },
21
21
  ],
22
22
  checkModel: 'deepseek-chat',
23
+ description:
24
+ 'DeepSeek 是一家专注于人工智能技术研究和应用的公司,其最新模型 DeepSeek-V2.5 融合了通用对话和代码处理能力,并在人类偏好对齐、写作任务和指令跟随等方面实现了显著提升。',
23
25
  id: 'deepseek',
24
26
  modelList: { showModelFetcher: true },
25
27
  modelsUrl: 'https://platform.deepseek.com/api-docs/zh-cn/quick_start/pricing',
@@ -1,12 +1,12 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref https://fireworks.ai/models?show=Serverless
4
- // ref https://fireworks.ai/pricing
3
+ // ref: https://fireworks.ai/models?show=Serverless
4
+ // ref: https://fireworks.ai/pricing
5
5
  const FireworksAI: ModelProviderCard = {
6
6
  chatModels: [
7
7
  {
8
8
  description:
9
- 'Fireworks latest and most performant function-calling model. Firefunction-v2 is based on Llama-3 and trained to excel at function-calling as well as chat and instruction-following. See blog post for more details https://fireworks.ai/blog/firefunction-v2-launch-post',
9
+ 'Fireworks 公司最新推出的 Firefunction-v2 是一款性能卓越的函数调用模型,基于 Llama-3 开发,并通过大量优化,特别适用于函数调用、对话及指令跟随等场景。',
10
10
  displayName: 'Firefunction V2',
11
11
  enabled: true,
12
12
  functionCall: true,
@@ -14,7 +14,7 @@ const FireworksAI: ModelProviderCard = {
14
14
  tokens: 8192,
15
15
  },
16
16
  {
17
- description: 'Fireworks open-source function calling model.',
17
+ description: 'Fireworks 开源函数调用模型,提供卓越的指令执行能力和开放可定制的特性。',
18
18
  displayName: 'Firefunction V1',
19
19
  functionCall: true,
20
20
  id: 'accounts/fireworks/models/firefunction-v1',
@@ -22,7 +22,7 @@ const FireworksAI: ModelProviderCard = {
22
22
  },
23
23
  {
24
24
  description:
25
- 'Vision-language model allowing both image and text as inputs (single image is recommended), trained on OSS model generated training data and open sourced on huggingface at fireworks-ai/FireLLaVA-13b',
25
+ 'fireworks-ai/FireLLaVA-13b 是一款视觉语言模型,可以同时接收图像和文本输入,经过高质量数据训练,适合多模态任务。',
26
26
  displayName: 'FireLLaVA-13B',
27
27
  enabled: true,
28
28
  functionCall: false,
@@ -31,6 +31,8 @@ const FireworksAI: ModelProviderCard = {
31
31
  vision: true,
32
32
  },
33
33
  {
34
+ description:
35
+ 'Llama 3.1 8B 指令模型,专为多语言对话优化,能够在常见行业基准上超越多数开源及闭源模型。',
34
36
  displayName: 'Llama 3.1 8B Instruct',
35
37
  enabled: true,
36
38
  functionCall: false,
@@ -38,6 +40,8 @@ const FireworksAI: ModelProviderCard = {
38
40
  tokens: 131_072,
39
41
  },
40
42
  {
43
+ description:
44
+ 'Llama 3.1 70B 指令模型,提供卓越的自然语言理解和生成能力,是对话及分析任务的理想选择。',
41
45
  displayName: 'Llama 3.1 70B Instruct',
42
46
  enabled: true,
43
47
  functionCall: false,
@@ -45,6 +49,8 @@ const FireworksAI: ModelProviderCard = {
45
49
  tokens: 131_072,
46
50
  },
47
51
  {
52
+ description:
53
+ 'Llama 3.1 405B 指令模型,具备超大规模参数,适合复杂任务和高负载场景下的指令跟随。',
48
54
  displayName: 'Llama 3.1 405B Instruct',
49
55
  enabled: true,
50
56
  functionCall: false,
@@ -52,30 +58,38 @@ const FireworksAI: ModelProviderCard = {
52
58
  tokens: 131_072,
53
59
  },
54
60
  {
61
+ description: 'Llama 3 8B 指令模型,优化用于对话及多语言任务,表现卓越且高效。',
55
62
  displayName: 'Llama 3 8B Instruct',
56
63
  functionCall: false,
57
64
  id: 'accounts/fireworks/models/llama-v3-8b-instruct',
58
65
  tokens: 8192,
59
66
  },
60
67
  {
68
+ description: 'Llama 3 70B 指令模型,专为多语言对话和自然语言理解优化,性能优于多数竞争模型。',
61
69
  displayName: 'Llama 3 70B Instruct',
62
70
  functionCall: false,
63
71
  id: 'accounts/fireworks/models/llama-v3-70b-instruct',
64
72
  tokens: 8192,
65
73
  },
66
74
  {
75
+ description:
76
+ 'Llama 3 8B 指令模型(HF 版本),与官方实现结果一致,具备高度一致性和跨平台兼容性。',
67
77
  displayName: 'Llama 3 8B Instruct (HF version)',
68
78
  functionCall: false,
69
79
  id: 'accounts/fireworks/models/llama-v3-8b-instruct-hf',
70
80
  tokens: 8192,
71
81
  },
72
82
  {
83
+ description:
84
+ 'Llama 3 70B 指令模型(HF 版本),与官方实现结果保持一致,适合高质量的指令跟随任务。',
73
85
  displayName: 'Llama 3 70B Instruct (HF version)',
74
86
  functionCall: false,
75
87
  id: 'accounts/fireworks/models/llama-v3-70b-instruct-hf',
76
88
  tokens: 8192,
77
89
  },
78
90
  {
91
+ description:
92
+ 'Gemma 2 9B 指令模型,基于之前的Google技术,适合回答问题、总结和推理等多种文本生成任务。',
79
93
  displayName: 'Gemma 2 9B Instruct',
80
94
  enabled: true,
81
95
  functionCall: false,
@@ -83,6 +97,7 @@ const FireworksAI: ModelProviderCard = {
83
97
  tokens: 8192,
84
98
  },
85
99
  {
100
+ description: 'Mixtral MoE 8x7B 指令模型,多专家架构提供高效的指令跟随及执行。',
86
101
  displayName: 'Mixtral MoE 8x7B Instruct',
87
102
  enabled: true,
88
103
  functionCall: false,
@@ -90,6 +105,8 @@ const FireworksAI: ModelProviderCard = {
90
105
  tokens: 32_768,
91
106
  },
92
107
  {
108
+ description:
109
+ 'Mixtral MoE 8x22B 指令模型,大规模参数和多专家架构,全方位支持复杂任务的高效处理。',
93
110
  displayName: 'Mixtral MoE 8x22B Instruct',
94
111
  enabled: true,
95
112
  functionCall: false,
@@ -97,12 +114,16 @@ const FireworksAI: ModelProviderCard = {
97
114
  tokens: 65_536,
98
115
  },
99
116
  {
117
+ description:
118
+ 'Mixtral MoE 8x7B 指令模型(HF 版本),性能与官方实现一致,适合多种高效任务场景。',
100
119
  displayName: 'Mixtral MoE 8x7B Instruct (HF version)',
101
120
  functionCall: false,
102
121
  id: 'accounts/fireworks/models/mixtral-8x7b-instruct-hf',
103
122
  tokens: 32_768,
104
123
  },
105
124
  {
125
+ description:
126
+ 'Phi 3 Vision 指令模型,轻量级多模态模型,能够处理复杂的视觉和文本信息,具备较强的推理能力。',
106
127
  displayName: 'Phi 3 Vision Instruct',
107
128
  enabled: true,
108
129
  functionCall: false,
@@ -111,6 +132,7 @@ const FireworksAI: ModelProviderCard = {
111
132
  vision: true,
112
133
  },
113
134
  {
135
+ description: 'Yi-Large 模型,具备卓越的多语言处理能力,可用于各类语言生成和理解任务。',
114
136
  displayName: 'Yi-Large',
115
137
  enabled: true,
116
138
  functionCall: false,
@@ -118,18 +140,22 @@ const FireworksAI: ModelProviderCard = {
118
140
  tokens: 32_768,
119
141
  },
120
142
  {
143
+ description: 'StarCoder 7B 模型,针对80多种编程语言训练,拥有出色的编程填充能力和语境理解。',
121
144
  displayName: 'StarCoder 7B',
122
145
  functionCall: false,
123
146
  id: 'accounts/fireworks/models/starcoder-7b',
124
147
  tokens: 8192,
125
148
  },
126
149
  {
150
+ description:
151
+ 'StarCoder 15.5B 模型,支持高级编程任务,多语言能力增强,适合复杂代码生成和理解。',
127
152
  displayName: 'StarCoder 15.5B',
128
153
  functionCall: false,
129
154
  id: 'accounts/fireworks/models/starcoder-16b',
130
155
  tokens: 8192,
131
156
  },
132
157
  {
158
+ description: 'MythoMax L2 13B 模型,结合新颖的合并技术,擅长叙事和角色扮演。',
133
159
  displayName: 'MythoMax L2 13b',
134
160
  functionCall: false,
135
161
  id: 'accounts/fireworks/models/mythomax-l2-13b',
@@ -137,6 +163,8 @@ const FireworksAI: ModelProviderCard = {
137
163
  },
138
164
  ],
139
165
  checkModel: 'accounts/fireworks/models/firefunction-v2',
166
+ description:
167
+ 'Fireworks AI 是一家领先的高级语言模型服务商,专注于功能调用和多模态处理。其最新模型 Firefunction V2 基于 Llama-3,优化用于函数调用、对话及指令跟随。视觉语言模型 FireLLaVA-13B 支持图像和文本混合输入。其他 notable 模型包括 Llama 系列和 Mixtral 系列,提供高效的多语言指令跟随与生成支持。',
140
168
  id: 'fireworksai',
141
169
  modelList: { showModelFetcher: true },
142
170
  modelsUrl: 'https://fireworks.ai/models?show=Serverless',