@lobehub/chat 1.62.11 → 1.63.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/CHANGELOG.md +33 -0
  2. package/changelog/v1.json +12 -0
  3. package/locales/ar/chat.json +26 -0
  4. package/locales/ar/models.json +21 -0
  5. package/locales/bg-BG/chat.json +26 -0
  6. package/locales/bg-BG/models.json +21 -0
  7. package/locales/de-DE/chat.json +26 -0
  8. package/locales/de-DE/models.json +21 -0
  9. package/locales/en-US/chat.json +26 -0
  10. package/locales/en-US/models.json +21 -0
  11. package/locales/es-ES/chat.json +26 -0
  12. package/locales/es-ES/models.json +21 -0
  13. package/locales/fa-IR/chat.json +26 -0
  14. package/locales/fa-IR/models.json +21 -0
  15. package/locales/fr-FR/chat.json +26 -0
  16. package/locales/fr-FR/models.json +21 -0
  17. package/locales/it-IT/chat.json +26 -0
  18. package/locales/it-IT/models.json +21 -0
  19. package/locales/ja-JP/chat.json +26 -0
  20. package/locales/ja-JP/models.json +21 -0
  21. package/locales/ko-KR/chat.json +26 -0
  22. package/locales/ko-KR/models.json +21 -0
  23. package/locales/nl-NL/chat.json +26 -0
  24. package/locales/nl-NL/models.json +21 -0
  25. package/locales/pl-PL/chat.json +26 -0
  26. package/locales/pl-PL/models.json +21 -0
  27. package/locales/pt-BR/chat.json +26 -0
  28. package/locales/pt-BR/models.json +21 -0
  29. package/locales/ru-RU/chat.json +26 -0
  30. package/locales/ru-RU/models.json +21 -0
  31. package/locales/tr-TR/chat.json +26 -0
  32. package/locales/tr-TR/models.json +21 -0
  33. package/locales/vi-VN/chat.json +26 -0
  34. package/locales/vi-VN/models.json +21 -0
  35. package/locales/zh-CN/chat.json +27 -1
  36. package/locales/zh-CN/models.json +25 -4
  37. package/locales/zh-TW/chat.json +26 -0
  38. package/locales/zh-TW/models.json +21 -0
  39. package/package.json +1 -1
  40. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/Desktop/index.tsx +1 -0
  41. package/src/config/aiModels/google.ts +8 -0
  42. package/src/config/aiModels/groq.ts +111 -95
  43. package/src/config/aiModels/hunyuan.ts +36 -4
  44. package/src/config/aiModels/internlm.ts +4 -5
  45. package/src/config/aiModels/jina.ts +3 -0
  46. package/src/config/aiModels/mistral.ts +35 -21
  47. package/src/config/aiModels/novita.ts +293 -32
  48. package/src/config/aiModels/perplexity.ts +14 -2
  49. package/src/config/aiModels/qwen.ts +91 -37
  50. package/src/config/aiModels/sensenova.ts +70 -17
  51. package/src/config/aiModels/siliconcloud.ts +5 -3
  52. package/src/config/aiModels/stepfun.ts +19 -0
  53. package/src/config/aiModels/taichu.ts +4 -2
  54. package/src/config/aiModels/upstage.ts +24 -11
  55. package/src/config/modelProviders/openrouter.ts +1 -0
  56. package/src/config/modelProviders/qwen.ts +2 -1
  57. package/src/const/settings/agent.ts +1 -0
  58. package/src/database/repositories/aiInfra/index.test.ts +2 -5
  59. package/src/database/repositories/aiInfra/index.ts +6 -2
  60. package/src/database/schemas/message.ts +2 -1
  61. package/src/database/server/models/aiModel.ts +1 -1
  62. package/src/database/server/models/aiProvider.ts +6 -1
  63. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +38 -0
  64. package/src/features/ChatInput/ActionBar/Model/ExtendControls.tsx +40 -0
  65. package/src/features/ChatInput/ActionBar/Model/index.tsx +132 -0
  66. package/src/features/ChatInput/ActionBar/Params/index.tsx +2 -2
  67. package/src/features/ChatInput/ActionBar/Search/ExaIcon.tsx +15 -0
  68. package/src/features/ChatInput/ActionBar/Search/ModelBuiltinSearch.tsx +68 -0
  69. package/src/features/ChatInput/ActionBar/Search/SwitchPanel.tsx +167 -0
  70. package/src/features/ChatInput/ActionBar/Search/index.tsx +76 -0
  71. package/src/features/ChatInput/ActionBar/config.ts +4 -2
  72. package/src/features/Conversation/Messages/Assistant/SearchGrounding.tsx +153 -0
  73. package/src/features/Conversation/Messages/Assistant/index.tsx +7 -1
  74. package/src/features/ModelSelect/index.tsx +1 -1
  75. package/src/features/ModelSwitchPanel/index.tsx +2 -3
  76. package/src/hooks/useEnabledChatModels.ts +1 -1
  77. package/src/libs/agent-runtime/google/index.test.ts +142 -36
  78. package/src/libs/agent-runtime/google/index.ts +26 -51
  79. package/src/libs/agent-runtime/novita/__snapshots__/index.test.ts.snap +3 -3
  80. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +3 -3
  81. package/src/libs/agent-runtime/openrouter/index.ts +20 -20
  82. package/src/libs/agent-runtime/perplexity/index.test.ts +2 -2
  83. package/src/libs/agent-runtime/qwen/index.ts +38 -55
  84. package/src/libs/agent-runtime/types/chat.ts +6 -2
  85. package/src/libs/agent-runtime/utils/streams/google-ai.ts +29 -4
  86. package/src/libs/agent-runtime/utils/streams/openai.ts +1 -1
  87. package/src/libs/agent-runtime/utils/streams/protocol.ts +1 -1
  88. package/src/locales/default/chat.ts +28 -0
  89. package/src/services/chat.ts +10 -0
  90. package/src/store/agent/slices/chat/__snapshots__/selectors.test.ts.snap +1 -0
  91. package/src/store/agent/slices/chat/selectors.ts +6 -0
  92. package/src/store/aiInfra/slices/aiModel/selectors.ts +36 -0
  93. package/src/store/aiInfra/slices/aiProvider/initialState.ts +2 -2
  94. package/src/store/aiInfra/slices/aiProvider/selectors.ts +14 -0
  95. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +15 -5
  96. package/src/store/chat/slices/message/action.ts +1 -1
  97. package/src/store/user/slices/modelList/selectors/modelProvider.ts +1 -1
  98. package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +1 -0
  99. package/src/types/agent/index.ts +4 -0
  100. package/src/types/aiModel.ts +35 -8
  101. package/src/types/aiProvider.ts +7 -10
  102. package/src/types/message/base.ts +2 -5
  103. package/src/types/message/chat.ts +5 -3
  104. package/src/types/openai/chat.ts +5 -0
  105. package/src/types/search.ts +29 -0
  106. package/src/utils/fetch/fetchSSE.ts +11 -11
  107. package/src/features/ChatInput/ActionBar/ModelSwitch.tsx +0 -20
@@ -1,66 +1,62 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
+ // https://groq.com/pricing/
4
+ // https://console.groq.com/docs/models
5
+
3
6
  const groqChatModels: AIChatModelCard[] = [
4
7
  {
5
8
  abilities: {
9
+ functionCall: true,
6
10
  reasoning: true,
7
11
  },
8
- contextWindowTokens: 128_000,
9
- description:
10
- 'DeepSeek R1——DeepSeek 套件中更大更智能的模型——被蒸馏到 Llama 70B 架构中。基于基准测试和人工评估,该模型比原始 Llama 70B 更智能,尤其在需要数学和事实精确性的任务上表现出色。',
11
- displayName: 'DeepSeek R1 (Distil-Llama 70B)',
12
+ contextWindowTokens: 131_072,
13
+ displayName: 'DeepSeek R1 Distill Llama 70B',
12
14
  enabled: true,
13
15
  id: 'deepseek-r1-distill-llama-70b',
16
+ pricing: {
17
+ input: 0.75, // 0.75 - 5.00
18
+ output: 0.99, // 0.99 - 5.00
19
+ },
14
20
  type: 'chat',
15
21
  },
16
22
  {
17
- abilities: {
18
- functionCall: true,
19
- },
20
23
  contextWindowTokens: 131_072,
21
- description:
22
- 'Meta Llama 3.3 多语言大语言模型 ( LLM ) 是 70B(文本输入/文本输出)中的预训练和指令调整生成模型。 Llama 3.3 指令调整的纯文本模型针对多语言对话用例进行了优化,并且在常见行业基准上优于许多可用的开源和封闭式聊天模型。',
23
- displayName: 'Llama 3.3 70B',
24
- enabled: true,
25
- id: 'llama-3.3-70b-versatile',
26
- maxOutput: 8192,
24
+ displayName: 'DeepSeek R1 Distill Llama 70B SpecDec',
25
+ id: 'deepseek-r1-distill-llama-70b-specdec',
26
+ maxOutput: 16_384,
27
27
  pricing: {
28
- input: 0.05,
29
- output: 0.08,
28
+ input: 0.75,
29
+ output: 0.99,
30
30
  },
31
31
  type: 'chat',
32
32
  },
33
33
  {
34
34
  abilities: {
35
- vision: true,
35
+ functionCall: true,
36
+ reasoning: true,
36
37
  },
37
- contextWindowTokens: 8192,
38
- description:
39
- 'Llama 3.2 旨在处理结合视觉和文本数据的任务。它在图像描述和视觉问答等任务中表现出色,跨越了语言生成和视觉推理之间的鸿沟。',
40
- displayName: 'Llama 3.2 11B Vision (Preview)',
38
+ contextWindowTokens: 131_072,
39
+ displayName: 'DeepSeek R1 Distill Qwen 32B',
41
40
  enabled: true,
42
- id: 'llama-3.2-11b-vision-preview',
43
- maxOutput: 8192,
41
+ id: 'deepseek-r1-distill-qwen-32b',
42
+ maxOutput: 16_384,
44
43
  pricing: {
45
- input: 0.05,
46
- output: 0.08,
44
+ input: 0.69,
45
+ output: 0.69,
47
46
  },
48
47
  type: 'chat',
49
48
  },
50
49
  {
51
50
  abilities: {
52
- vision: true,
51
+ functionCall: true,
53
52
  },
54
53
  contextWindowTokens: 8192,
55
- description:
56
- 'Llama 3.2 旨在处理结合视觉和文本数据的任务。它在图像描述和视觉问答等任务中表现出色,跨越了语言生成和视觉推理之间的鸿沟。',
57
- displayName: 'Llama 3.2 90B Vision (Preview)',
58
- enabled: true,
59
- id: 'llama-3.2-90b-vision-preview',
60
- maxOutput: 8192,
54
+ description: 'Gemma 2 9B 是一款优化用于特定任务和工具整合的模型。',
55
+ displayName: 'Gemma 2 9B',
56
+ id: 'gemma2-9b-it',
61
57
  pricing: {
62
- input: 0.59,
63
- output: 0.79,
58
+ input: 0.2,
59
+ output: 0.2,
64
60
  },
65
61
  type: 'chat',
66
62
  },
@@ -69,10 +65,8 @@ const groqChatModels: AIChatModelCard[] = [
69
65
  functionCall: true,
70
66
  },
71
67
  contextWindowTokens: 131_072,
72
- description:
73
- 'Llama 3.1 8B 是一款高效能模型,提供了快速的文本生成能力,非常适合需要大规模效率和成本效益的应用场景。',
74
- displayName: 'Llama 3.1 8B',
75
- enabled: true,
68
+ description: 'Llama 3.1 8B 是一款高效能模型,提供了快速的文本生成能力,非常适合需要大规模效率和成本效益的应用场景。',
69
+ displayName: 'Llama 3.1 8B Instant',
76
70
  id: 'llama-3.1-8b-instant',
77
71
  maxOutput: 8192,
78
72
  pricing: {
@@ -83,60 +77,64 @@ const groqChatModels: AIChatModelCard[] = [
83
77
  },
84
78
  {
85
79
  abilities: {
86
- functionCall: true,
80
+ vision: true,
87
81
  },
88
82
  contextWindowTokens: 131_072,
89
- description:
90
- 'Llama 3.1 70B 提供更强大的AI推理能力,适合复杂应用,支持超多的计算处理并保证高效和准确率。',
91
- displayName: 'Llama 3.1 70B',
92
- enabled: true,
93
- id: 'llama-3.1-70b-versatile',
83
+ description: 'Llama 3.2 旨在处理结合视觉和文本数据的任务。它在图像描述和视觉问答等任务中表现出色,跨越了语言生成和视觉推理之间的鸿沟。',
84
+ displayName: 'Llama 3.2 11B Vision (Preview)',
85
+ id: 'llama-3.2-11b-vision-preview',
94
86
  maxOutput: 8192,
95
87
  pricing: {
96
- input: 0.59,
97
- output: 0.79,
88
+ input: 0.18,
89
+ output: 0.18,
98
90
  },
99
91
  type: 'chat',
100
92
  },
101
93
  {
102
94
  abilities: {
103
- functionCall: true,
95
+ vision: true,
104
96
  },
105
- contextWindowTokens: 8192,
106
- description: 'Llama 3 Groq 8B Tool Use 是针对高效工具使用优化的模型,支持快速并行计算。',
107
- displayName: 'Llama 3 Groq 8B Tool Use (Preview)',
108
- id: 'llama3-groq-8b-8192-tool-use-preview',
97
+ contextWindowTokens: 131_072,
98
+ description: 'Llama 3.2 旨在处理结合视觉和文本数据的任务。它在图像描述和视觉问答等任务中表现出色,跨越了语言生成和视觉推理之间的鸿沟。',
99
+ displayName: 'Llama 3.2 90B Vision (Preview)',
100
+ enabled: true,
101
+ id: 'llama-3.2-90b-vision-preview',
102
+ maxOutput: 8192,
109
103
  pricing: {
110
- input: 0.19,
111
- output: 0.19,
104
+ input: 0.9,
105
+ output: 0.9,
112
106
  },
113
107
  type: 'chat',
114
108
  },
115
109
  {
116
- abilities: {
117
- functionCall: true,
118
- },
119
- contextWindowTokens: 8192,
120
- description: 'Llama 3 Groq 70B Tool Use 提供强大的工具调用能力,支持复杂任务的高效处理。',
121
- displayName: 'Llama 3 Groq 70B Tool Use (Preview)',
122
- id: 'llama3-groq-70b-8192-tool-use-preview',
110
+ contextWindowTokens: 131_072,
111
+ displayName: 'Llama 3.2 1B (Preview)',
112
+ id: 'llama-3.2-1b-preview',
113
+ maxOutput: 8192,
123
114
  pricing: {
124
- input: 0.89,
125
- output: 0.89,
115
+ input: 0.04,
116
+ output: 0.04,
126
117
  },
127
118
  type: 'chat',
128
119
  },
129
120
  {
130
- abilities: {
131
- functionCall: true,
121
+ contextWindowTokens: 131_072,
122
+ displayName: 'Llama 3.2 3B (Preview)',
123
+ id: 'llama-3.2-3b-preview',
124
+ maxOutput: 8192,
125
+ pricing: {
126
+ input: 0.06,
127
+ output: 0.06,
132
128
  },
129
+ type: 'chat',
130
+ },
131
+ {
133
132
  contextWindowTokens: 8192,
134
- description: 'Meta Llama 3 8B 带来优质的推理效能,适合多场景应用需求。',
135
- displayName: 'Meta Llama 3 8B',
136
- id: 'llama3-8b-8192',
133
+ displayName: 'Llama 3.3 70B SpecDec',
134
+ id: 'llama-3.3-70b-specdec',
137
135
  pricing: {
138
- input: 0.05,
139
- output: 0.08,
136
+ input: 0.59,
137
+ output: 0.99,
140
138
  },
141
139
  type: 'chat',
142
140
  },
@@ -144,10 +142,12 @@ const groqChatModels: AIChatModelCard[] = [
144
142
  abilities: {
145
143
  functionCall: true,
146
144
  },
147
- contextWindowTokens: 8192,
148
- description: 'Meta Llama 3 70B 提供无与伦比的复杂性处理能力,为高要求项目量身定制。',
149
- displayName: 'Meta Llama 3 70B',
150
- id: 'llama3-70b-8192',
145
+ contextWindowTokens: 131_072,
146
+ description: 'Meta Llama 3.3 多语言大语言模型 ( LLM ) 是 70B(文本输入/文本输出)中的预训练和指令调整生成模型。 Llama 3.3 指令调整的纯文本模型针对多语言对话用例进行了优化,并且在常见行业基准上优于许多可用的开源和封闭式聊天模型。',
147
+ displayName: 'Llama 3.3 70B Versatile',
148
+ enabled: true,
149
+ id: 'llama-3.3-70b-versatile',
150
+ maxOutput: 32_768,
151
151
  pricing: {
152
152
  input: 0.59,
153
153
  output: 0.79,
@@ -155,31 +155,24 @@ const groqChatModels: AIChatModelCard[] = [
155
155
  type: 'chat',
156
156
  },
157
157
  {
158
- abilities: {
159
- functionCall: true,
160
- },
161
158
  contextWindowTokens: 8192,
162
- description: 'Gemma 2 9B 是一款优化用于特定任务和工具整合的模型。',
163
- displayName: 'Gemma 2 9B',
164
- enabled: true,
165
- id: 'gemma2-9b-it',
159
+ description: 'Meta Llama 3 70B 提供无与伦比的复杂性处理能力,为高要求项目量身定制。',
160
+ displayName: 'Llama 3 70B',
161
+ id: 'llama3-70b-8192',
166
162
  pricing: {
167
- input: 0.2,
168
- output: 0.2,
163
+ input: 0.59,
164
+ output: 0.79,
169
165
  },
170
166
  type: 'chat',
171
167
  },
172
168
  {
173
- abilities: {
174
- functionCall: true,
175
- },
176
169
  contextWindowTokens: 8192,
177
- description: 'Gemma 7B 适合中小规模任务处理,兼具成本效益。',
178
- displayName: 'Gemma 7B',
179
- id: 'gemma-7b-it',
170
+ description: 'Meta Llama 3 8B 带来优质的推理效能,适合多场景应用需求。',
171
+ displayName: 'Llama 3 8B',
172
+ id: 'llama3-8b-8192',
180
173
  pricing: {
181
- input: 0.07,
182
- output: 0.07,
174
+ input: 0.05,
175
+ output: 0.08,
183
176
  },
184
177
  type: 'chat',
185
178
  },
@@ -189,7 +182,7 @@ const groqChatModels: AIChatModelCard[] = [
189
182
  },
190
183
  contextWindowTokens: 32_768,
191
184
  description: 'Mixtral 8x7B 提供高容错的并行计算能力,适合复杂任务。',
192
- displayName: 'Mixtral 8x7B',
185
+ displayName: 'Mixtral 8x7B Instruct',
193
186
  id: 'mixtral-8x7b-32768',
194
187
  pricing: {
195
188
  input: 0.24,
@@ -199,12 +192,35 @@ const groqChatModels: AIChatModelCard[] = [
199
192
  },
200
193
  {
201
194
  abilities: {
202
- vision: true,
195
+ functionCall: true,
196
+ },
197
+ contextWindowTokens: 131_072,
198
+ displayName: 'Qwen 2.5 32B',
199
+ id: 'qwen-2.5-32b',
200
+ pricing: {
201
+ input: 0.79,
202
+ output: 0.79,
203
+ },
204
+ type: 'chat',
205
+ },
206
+ {
207
+ contextWindowTokens: 131_072,
208
+ displayName: 'Qwen 2.5 Coder 32B',
209
+ id: 'qwen-2.5-coder-32b',
210
+ pricing: {
211
+ input: 0.79,
212
+ output: 0.79,
213
+ },
214
+ type: 'chat',
215
+ },
216
+ {
217
+ contextWindowTokens: 8192,
218
+ displayName: 'Llama Guard 3 8B',
219
+ id: 'llama-guard-3-8b',
220
+ pricing: {
221
+ input: 0.2,
222
+ output: 0.2,
203
223
  },
204
- contextWindowTokens: 4096,
205
- description: 'LLaVA 1.5 7B 提供视觉处理能力融合,通过视觉信息输入生成复杂输出。',
206
- displayName: 'LLaVA 1.5 7B',
207
- id: 'llava-v1.5-7b-4096-preview',
208
224
  type: 'chat',
209
225
  },
210
226
  ];
@@ -1,5 +1,7 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
+ // https://cloud.tencent.com/document/product/1729/104753
4
+
3
5
  const hunyuanChatModels: AIChatModelCard[] = [
4
6
  {
5
7
  contextWindowTokens: 256_000,
@@ -30,7 +32,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
30
32
  input: 0.8,
31
33
  output: 2,
32
34
  },
33
- releasedAt: '2024-10-28',
35
+ releasedAt: '2025-02-10',
34
36
  type: 'chat',
35
37
  },
36
38
  {
@@ -46,7 +48,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
46
48
  input: 0.5,
47
49
  output: 2,
48
50
  },
49
- releasedAt: '2024-10-28',
51
+ releasedAt: '2025-02-10',
50
52
  type: 'chat',
51
53
  },
52
54
  {
@@ -56,7 +58,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
56
58
  contextWindowTokens: 32_000,
57
59
  description:
58
60
  '通用体验优化,包括NLP理解、文本创作、闲聊、知识问答、翻译、领域等;提升拟人性,优化模型情商;提升意图模糊时模型主动澄清能力;提升字词解析类问题的处理能力;提升创作的质量和可互动性;提升多轮体验。',
59
- displayName: 'Hunyuan Turbo',
61
+ displayName: 'Hunyuan Turbo Latest',
60
62
  enabled: true,
61
63
  id: 'hunyuan-turbo-latest',
62
64
  maxOutput: 4000,
@@ -135,7 +137,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
135
137
  input: 4,
136
138
  output: 12,
137
139
  },
138
- releasedAt: '2024-11-20',
140
+ releasedAt: '2025-02-10',
139
141
  type: 'chat',
140
142
  },
141
143
  {
@@ -264,6 +266,36 @@ const hunyuanChatModels: AIChatModelCard[] = [
264
266
  releasedAt: '2024-07-04',
265
267
  type: 'chat',
266
268
  },
269
+ {
270
+ contextWindowTokens: 8000,
271
+ description:
272
+ '支持中文和英语、日语、法语、葡萄牙语、西班牙语、土耳其语、俄语、阿拉伯语、韩语、意大利语、德语、越南语、马来语、印尼语15种语言互译,基于多场景翻译评测集自动化评估COMET评分,在十余种常用语种中外互译能力上整体优于市场同规模模型。',
273
+ displayName: 'Hunyuan Translation',
274
+ id: 'hunyuan-translation',
275
+ maxOutput: 4000,
276
+ pricing: {
277
+ currency: 'CNY',
278
+ input: 25,
279
+ output: 75,
280
+ },
281
+ releasedAt: '2024-10-25',
282
+ type: 'chat',
283
+ },
284
+ {
285
+ contextWindowTokens: 8000,
286
+ description:
287
+ '混元翻译模型支持自然语言对话式翻译;支持中文和英语、日语、法语、葡萄牙语、西班牙语、土耳其语、俄语、阿拉伯语、韩语、意大利语、德语、越南语、马来语、印尼语15种语言互译。',
288
+ displayName: 'Hunyuan Translation Lite',
289
+ id: 'hunyuan-translation-lite',
290
+ maxOutput: 4000,
291
+ pricing: {
292
+ currency: 'CNY',
293
+ input: 5,
294
+ output: 15,
295
+ },
296
+ releasedAt: '2024-11-25',
297
+ type: 'chat',
298
+ },
267
299
  ];
268
300
 
269
301
  export const allModels = [...hunyuanChatModels];
@@ -1,5 +1,7 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
+ // https://internlm.intern-ai.org.cn/api/document
4
+
3
5
  const internlmChatModels: AIChatModelCard[] = [
4
6
  {
5
7
  abilities: {
@@ -7,11 +9,10 @@ const internlmChatModels: AIChatModelCard[] = [
7
9
  },
8
10
  contextWindowTokens: 32_768,
9
11
  description:
10
- '我们最新的模型系列,有着卓越的推理性能,领跑同量级开源模型。默认指向我们最新发布的 InternLM3 系列模型',
12
+ '我们最新的模型系列,有着卓越的推理性能,领跑同量级开源模型。默认指向我们最新发布的 InternLM3 系列模型,当前指向 internlm3-8b-instruct。',
11
13
  displayName: 'InternLM3',
12
14
  enabled: true,
13
15
  id: 'internlm3-latest',
14
- maxOutput: 4096,
15
16
  pricing: {
16
17
  input: 0,
17
18
  output: 0,
@@ -24,11 +25,10 @@ const internlmChatModels: AIChatModelCard[] = [
24
25
  },
25
26
  contextWindowTokens: 32_768,
26
27
  description:
27
- '我们仍在维护的老版本模型,经过多轮迭代有着极其优异且稳定的性能,包含 7B、20B 多种模型参数量可选,支持 1M 的上下文长度以及更强的指令跟随和工具调用能力。默认指向我们最新发布的 InternLM2.5 系列模型',
28
+ '我们仍在维护的老版本模型,经过多轮迭代有着极其优异且稳定的性能,包含 7B、20B 多种模型参数量可选,支持 1M 的上下文长度以及更强的指令跟随和工具调用能力。默认指向我们最新发布的 InternLM2.5 系列模型,当前指向 internlm2.5-20b-chat。',
28
29
  displayName: 'InternLM2.5',
29
30
  enabled: true,
30
31
  id: 'internlm2.5-latest',
31
- maxOutput: 4096,
32
32
  pricing: {
33
33
  input: 0,
34
34
  output: 0,
@@ -43,7 +43,6 @@ const internlmChatModels: AIChatModelCard[] = [
43
43
  description: 'InternLM2 版本最大的模型,专注于高度复杂的任务',
44
44
  displayName: 'InternLM2 Pro Chat',
45
45
  id: 'internlm2-pro-chat',
46
- maxOutput: 4096,
47
46
  pricing: {
48
47
  input: 0,
49
48
  output: 0,
@@ -16,6 +16,9 @@ const jinaChatModels: AIChatModelCard[] = [
16
16
  input: 0.02,
17
17
  output: 0.02,
18
18
  },
19
+ settings: {
20
+ searchImpl: 'internal',
21
+ },
19
22
  type: 'chat',
20
23
  },
21
24
  ];
@@ -1,5 +1,8 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
+ // https://docs.mistral.ai/getting-started/models/models_overview/
4
+ // https://mistral.ai/products/la-plateforme#pricing
5
+
3
6
  const mistralChatModels: AIChatModelCard[] = [
4
7
  {
5
8
  abilities: {
@@ -12,8 +15,8 @@ const mistralChatModels: AIChatModelCard[] = [
12
15
  enabled: true,
13
16
  id: 'open-mistral-nemo',
14
17
  pricing: {
15
- input: 0.15,
16
- output: 0.15,
18
+ input: 0,
19
+ output: 0,
17
20
  },
18
21
  type: 'chat',
19
22
  },
@@ -21,14 +24,14 @@ const mistralChatModels: AIChatModelCard[] = [
21
24
  abilities: {
22
25
  functionCall: true,
23
26
  },
24
- contextWindowTokens: 128_000,
27
+ contextWindowTokens: 32_000,
25
28
  description: 'Mistral Small是成本效益高、快速且可靠的选项,适用于翻译、摘要和情感分析等用例。',
26
29
  displayName: 'Mistral Small',
27
30
  enabled: true,
28
31
  id: 'mistral-small-latest',
29
32
  pricing: {
30
- input: 0.2,
31
- output: 0.6,
33
+ input: 0.1,
34
+ output: 0.3,
32
35
  },
33
36
  type: 'chat',
34
37
  },
@@ -36,7 +39,7 @@ const mistralChatModels: AIChatModelCard[] = [
36
39
  abilities: {
37
40
  functionCall: true,
38
41
  },
39
- contextWindowTokens: 128_000,
42
+ contextWindowTokens: 131_072,
40
43
  description:
41
44
  'Mistral Large是旗舰大模型,擅长多语言任务、复杂推理和代码生成,是高端应用的理想选择。',
42
45
  displayName: 'Mistral Large',
@@ -49,14 +52,18 @@ const mistralChatModels: AIChatModelCard[] = [
49
52
  type: 'chat',
50
53
  },
51
54
  {
52
- contextWindowTokens: 32_768,
53
- description: 'Codestral是专注于代码生成的尖端生成模型,优化了中间填充和代码补全任务。',
55
+ abilities: {
56
+ functionCall: true,
57
+ },
58
+ contextWindowTokens: 256_000,
59
+ description: 'Codestral 是我们最先进的编码语言模型,第二个版本于2025年1月发布,专门从事低延迟、高频任务如中间填充(RST)、代码纠正和测试生成。',
54
60
  displayName: 'Codestral',
55
61
  id: 'codestral-latest',
56
62
  pricing: {
57
- input: 0.2,
58
- output: 0.6,
63
+ input: 0.3,
64
+ output: 0.9,
59
65
  },
66
+ releasedAt: '2025-01-13',
60
67
  type: 'chat',
61
68
  },
62
69
  {
@@ -64,7 +71,7 @@ const mistralChatModels: AIChatModelCard[] = [
64
71
  functionCall: true,
65
72
  vision: true,
66
73
  },
67
- contextWindowTokens: 128_000,
74
+ contextWindowTokens: 131_072,
68
75
  description:
69
76
  'Pixtral Large 是一款拥有 1240 亿参数的开源多模态模型,基于 Mistral Large 2 构建。这是我们多模态家族中的第二款模型,展现了前沿水平的图像理解能力。',
70
77
  displayName: 'Pixtral Large',
@@ -78,22 +85,26 @@ const mistralChatModels: AIChatModelCard[] = [
78
85
  },
79
86
  {
80
87
  abilities: {
88
+ functionCall: true,
81
89
  vision: true,
82
90
  },
83
- contextWindowTokens: 128_000,
91
+ contextWindowTokens: 131_072,
84
92
  description:
85
93
  'Pixtral 模型在图表和图理解、文档问答、多模态推理和指令遵循等任务上表现出强大的能力,能够以自然分辨率和宽高比摄入图像,还能够在长达 128K 令牌的长上下文窗口中处理任意数量的图像。',
86
94
  displayName: 'Pixtral 12B',
87
95
  enabled: true,
88
96
  id: 'pixtral-12b-2409',
89
97
  pricing: {
90
- input: 0.15,
91
- output: 0.15,
98
+ input: 0,
99
+ output: 0,
92
100
  },
93
101
  type: 'chat',
94
102
  },
95
103
  {
96
- contextWindowTokens: 128_000,
104
+ abilities: {
105
+ functionCall: true,
106
+ },
107
+ contextWindowTokens: 131_072,
97
108
  description: 'Ministral 3B 是Mistral的世界顶级边缘模型。',
98
109
  displayName: 'Ministral 3B',
99
110
  id: 'ministral-3b-latest',
@@ -104,7 +115,10 @@ const mistralChatModels: AIChatModelCard[] = [
104
115
  type: 'chat',
105
116
  },
106
117
  {
107
- contextWindowTokens: 128_000,
118
+ abilities: {
119
+ functionCall: true,
120
+ },
121
+ contextWindowTokens: 131_072,
108
122
  description: 'Ministral 8B 是Mistral的性价比极高的边缘模型。',
109
123
  displayName: 'Ministral 8B',
110
124
  id: 'ministral-8b-latest',
@@ -119,7 +133,7 @@ const mistralChatModels: AIChatModelCard[] = [
119
133
  description:
120
134
  'Mistral 7B是一款紧凑但高性能的模型,擅长批量处理和简单任务,如分类和文本生成,具有良好的推理能力。',
121
135
  displayName: 'Mistral 7B',
122
- id: 'open-mistral-7b',
136
+ id: 'open-mistral-7b', // Deprecated on 2025/03/30
123
137
  pricing: {
124
138
  input: 0.25,
125
139
  output: 0.25,
@@ -131,7 +145,7 @@ const mistralChatModels: AIChatModelCard[] = [
131
145
  description:
132
146
  'Mixtral 8x7B是一个稀疏专家模型,利用多个参数提高推理速度,适合处理多语言和代码生成任务。',
133
147
  displayName: 'Mixtral 8x7B',
134
- id: 'open-mixtral-8x7b',
148
+ id: 'open-mixtral-8x7b', // Deprecated on 2025/03/30
135
149
  pricing: {
136
150
  input: 0.7,
137
151
  output: 0.7,
@@ -146,7 +160,7 @@ const mistralChatModels: AIChatModelCard[] = [
146
160
  description:
147
161
  'Mixtral 8x22B是一个更大的专家模型,专注于复杂任务,提供出色的推理能力和更高的吞吐量。',
148
162
  displayName: 'Mixtral 8x22B',
149
- id: 'open-mixtral-8x22b',
163
+ id: 'open-mixtral-8x22b', // Deprecated on 2025/03/30
150
164
  pricing: {
151
165
  input: 2,
152
166
  output: 6,
@@ -160,8 +174,8 @@ const mistralChatModels: AIChatModelCard[] = [
160
174
  displayName: 'Codestral Mamba',
161
175
  id: 'open-codestral-mamba',
162
176
  pricing: {
163
- input: 0.15,
164
- output: 0.15,
177
+ input: 0,
178
+ output: 0,
165
179
  },
166
180
  type: 'chat',
167
181
  },