@lobehub/chat 1.62.10 → 1.63.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/CHANGELOG.md +66 -0
  2. package/changelog/v1.json +24 -0
  3. package/docs/self-hosting/environment-variables/model-provider.mdx +18 -0
  4. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +18 -0
  5. package/docs/self-hosting/server-database/sealos.mdx +5 -1
  6. package/locales/ar/chat.json +26 -0
  7. package/locales/ar/models.json +21 -0
  8. package/locales/bg-BG/chat.json +26 -0
  9. package/locales/bg-BG/models.json +21 -0
  10. package/locales/de-DE/chat.json +26 -0
  11. package/locales/de-DE/models.json +21 -0
  12. package/locales/en-US/chat.json +26 -0
  13. package/locales/en-US/models.json +21 -0
  14. package/locales/es-ES/chat.json +26 -0
  15. package/locales/es-ES/models.json +21 -0
  16. package/locales/fa-IR/chat.json +26 -0
  17. package/locales/fa-IR/models.json +21 -0
  18. package/locales/fr-FR/chat.json +26 -0
  19. package/locales/fr-FR/models.json +21 -0
  20. package/locales/it-IT/chat.json +26 -0
  21. package/locales/it-IT/models.json +21 -0
  22. package/locales/ja-JP/chat.json +26 -0
  23. package/locales/ja-JP/models.json +21 -0
  24. package/locales/ko-KR/chat.json +26 -0
  25. package/locales/ko-KR/models.json +21 -0
  26. package/locales/nl-NL/chat.json +26 -0
  27. package/locales/nl-NL/models.json +21 -0
  28. package/locales/pl-PL/chat.json +26 -0
  29. package/locales/pl-PL/models.json +21 -0
  30. package/locales/pt-BR/chat.json +26 -0
  31. package/locales/pt-BR/models.json +21 -0
  32. package/locales/ru-RU/chat.json +26 -0
  33. package/locales/ru-RU/models.json +21 -0
  34. package/locales/tr-TR/chat.json +26 -0
  35. package/locales/tr-TR/models.json +21 -0
  36. package/locales/vi-VN/chat.json +26 -0
  37. package/locales/vi-VN/models.json +21 -0
  38. package/locales/zh-CN/chat.json +27 -1
  39. package/locales/zh-CN/models.json +25 -4
  40. package/locales/zh-TW/chat.json +26 -0
  41. package/locales/zh-TW/models.json +21 -0
  42. package/package.json +3 -3
  43. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/Desktop/index.tsx +1 -0
  44. package/src/config/aiModels/google.ts +8 -0
  45. package/src/config/aiModels/groq.ts +111 -95
  46. package/src/config/aiModels/hunyuan.ts +36 -4
  47. package/src/config/aiModels/internlm.ts +4 -5
  48. package/src/config/aiModels/jina.ts +3 -0
  49. package/src/config/aiModels/mistral.ts +35 -21
  50. package/src/config/aiModels/novita.ts +293 -32
  51. package/src/config/aiModels/perplexity.ts +14 -2
  52. package/src/config/aiModels/qwen.ts +91 -37
  53. package/src/config/aiModels/sensenova.ts +70 -17
  54. package/src/config/aiModels/siliconcloud.ts +5 -3
  55. package/src/config/aiModels/stepfun.ts +19 -0
  56. package/src/config/aiModels/taichu.ts +4 -2
  57. package/src/config/aiModels/upstage.ts +24 -11
  58. package/src/config/modelProviders/openrouter.ts +1 -0
  59. package/src/config/modelProviders/qwen.ts +2 -1
  60. package/src/config/modelProviders/volcengine.ts +4 -1
  61. package/src/const/settings/agent.ts +1 -0
  62. package/src/database/repositories/aiInfra/index.test.ts +2 -5
  63. package/src/database/repositories/aiInfra/index.ts +6 -2
  64. package/src/database/schemas/message.ts +2 -1
  65. package/src/database/server/models/aiModel.ts +1 -1
  66. package/src/database/server/models/aiProvider.ts +6 -1
  67. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +38 -0
  68. package/src/features/ChatInput/ActionBar/Model/ExtendControls.tsx +40 -0
  69. package/src/features/ChatInput/ActionBar/Model/index.tsx +132 -0
  70. package/src/features/ChatInput/ActionBar/Params/index.tsx +2 -2
  71. package/src/features/ChatInput/ActionBar/Search/ExaIcon.tsx +15 -0
  72. package/src/features/ChatInput/ActionBar/Search/ModelBuiltinSearch.tsx +68 -0
  73. package/src/features/ChatInput/ActionBar/Search/SwitchPanel.tsx +167 -0
  74. package/src/features/ChatInput/ActionBar/Search/index.tsx +76 -0
  75. package/src/features/ChatInput/ActionBar/config.ts +4 -2
  76. package/src/features/Conversation/Messages/Assistant/SearchGrounding.tsx +153 -0
  77. package/src/features/Conversation/Messages/Assistant/index.tsx +7 -1
  78. package/src/features/ModelSelect/index.tsx +1 -1
  79. package/src/features/ModelSwitchPanel/index.tsx +2 -3
  80. package/src/hooks/useEnabledChatModels.ts +1 -1
  81. package/src/libs/agent-runtime/azureai/index.ts +21 -2
  82. package/src/libs/agent-runtime/google/index.test.ts +142 -36
  83. package/src/libs/agent-runtime/google/index.ts +26 -51
  84. package/src/libs/agent-runtime/novita/__snapshots__/index.test.ts.snap +3 -3
  85. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +3 -3
  86. package/src/libs/agent-runtime/openrouter/index.ts +20 -20
  87. package/src/libs/agent-runtime/perplexity/index.test.ts +2 -2
  88. package/src/libs/agent-runtime/qwen/index.ts +38 -55
  89. package/src/libs/agent-runtime/types/chat.ts +6 -2
  90. package/src/libs/agent-runtime/utils/streams/google-ai.ts +29 -4
  91. package/src/libs/agent-runtime/utils/streams/openai.ts +1 -1
  92. package/src/libs/agent-runtime/utils/streams/protocol.ts +1 -1
  93. package/src/locales/default/chat.ts +28 -0
  94. package/src/services/chat.ts +10 -0
  95. package/src/store/agent/slices/chat/__snapshots__/selectors.test.ts.snap +1 -0
  96. package/src/store/agent/slices/chat/selectors.ts +6 -0
  97. package/src/store/aiInfra/slices/aiModel/selectors.ts +36 -0
  98. package/src/store/aiInfra/slices/aiProvider/initialState.ts +2 -2
  99. package/src/store/aiInfra/slices/aiProvider/selectors.ts +14 -0
  100. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +15 -5
  101. package/src/store/chat/slices/message/action.ts +1 -1
  102. package/src/store/user/slices/modelList/selectors/modelProvider.ts +1 -1
  103. package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +1 -0
  104. package/src/types/agent/index.ts +4 -0
  105. package/src/types/aiModel.ts +35 -8
  106. package/src/types/aiProvider.ts +7 -10
  107. package/src/types/message/base.ts +2 -5
  108. package/src/types/message/chat.ts +5 -3
  109. package/src/types/openai/chat.ts +5 -0
  110. package/src/types/search.ts +29 -0
  111. package/src/utils/fetch/fetchSSE.ts +11 -11
  112. package/src/features/ChatInput/ActionBar/ModelSwitch.tsx +0 -20
@@ -1,31 +1,39 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
+ // https://novita.ai/pricing
4
+
3
5
  const novitaChatModels: AIChatModelCard[] = [
4
6
  {
5
- contextWindowTokens: 8192,
6
- description:
7
- 'Llama 3.1 8B Instruct 是 Meta 推出的最新版本,优化了高质量对话场景,表现优于许多领先的闭源模型。',
8
- displayName: 'Llama 3.1 8B Instruct',
7
+ contextWindowTokens: 131_072,
8
+ displayName: 'Llama 3.3 70B Instruct',
9
9
  enabled: true,
10
- id: 'meta-llama/llama-3.1-8b-instruct',
10
+ id: 'meta-llama/llama-3.3-70b-instruct',
11
+ pricing: {
12
+ input: 0.39,
13
+ output: 0.39,
14
+ },
11
15
  type: 'chat',
12
16
  },
13
17
  {
14
- contextWindowTokens: 131_072,
15
- description:
16
- 'Llama 3.1 70B Instruct 专为高质量对话而设计,在人类评估中表现突出,特别适合高交互场景。',
17
- displayName: 'Llama 3.1 70B Instruct',
18
- enabled: true,
19
- id: 'meta-llama/llama-3.1-70b-instruct',
18
+ contextWindowTokens: 16_384,
19
+ description: 'Llama 3.1 8B Instruct 是 Meta 推出的最新版本,优化了高质量对话场景,表现优于许多领先的闭源模型。',
20
+ displayName: 'Llama 3.1 8B Instruct',
21
+ id: 'meta-llama/llama-3.1-8b-instruct',
22
+ pricing: {
23
+ input: 0.05,
24
+ output: 0.05,
25
+ },
20
26
  type: 'chat',
21
27
  },
22
28
  {
23
29
  contextWindowTokens: 32_768,
24
- description:
25
- 'Llama 3.1 405B Instruct 是 Meta最新推出的版本,优化用于生成高质量对话,超越了许多领导闭源模型。',
26
- displayName: 'Llama 3.1 405B Instruct',
27
- enabled: true,
28
- id: 'meta-llama/llama-3.1-405b-instruct',
30
+ description: 'Llama 3.1 70B Instruct 专为高质量对话而设计,在人类评估中表现突出,特别适合高交互场景。',
31
+ displayName: 'Llama 3.1 70B Instruct',
32
+ id: 'meta-llama/llama-3.1-70b-instruct',
33
+ pricing: {
34
+ input: 0.34,
35
+ output: 0.39,
36
+ },
29
37
  type: 'chat',
30
38
  },
31
39
  {
@@ -33,6 +41,10 @@ const novitaChatModels: AIChatModelCard[] = [
33
41
  description: 'Llama 3 8B Instruct 优化了高质量对话场景,性能优于许多闭源模型。',
34
42
  displayName: 'Llama 3 8B Instruct',
35
43
  id: 'meta-llama/llama-3-8b-instruct',
44
+ pricing: {
45
+ input: 0.04,
46
+ output: 0.04,
47
+ },
36
48
  type: 'chat',
37
49
  },
38
50
  {
@@ -40,6 +52,10 @@ const novitaChatModels: AIChatModelCard[] = [
40
52
  description: 'Llama 3 70B Instruct 优化用于高质量对话场景,在各类人类评估中表现优异。',
41
53
  displayName: 'Llama 3 70B Instruct',
42
54
  id: 'meta-llama/llama-3-70b-instruct',
55
+ pricing: {
56
+ input: 0.51,
57
+ output: 0.74,
58
+ },
43
59
  type: 'chat',
44
60
  },
45
61
  {
@@ -48,14 +64,22 @@ const novitaChatModels: AIChatModelCard[] = [
48
64
  displayName: 'Gemma 2 9B',
49
65
  enabled: true,
50
66
  id: 'google/gemma-2-9b-it',
67
+ pricing: {
68
+ input: 0.08,
69
+ output: 0.08,
70
+ },
51
71
  type: 'chat',
52
72
  },
53
73
  {
54
- contextWindowTokens: 32_768,
74
+ contextWindowTokens: 131_072,
55
75
  description: 'Mistral Nemo 是多语言支持和高性能编程的7.3B参数模型。',
56
76
  displayName: 'Mistral Nemo',
57
77
  enabled: true,
58
78
  id: 'mistralai/mistral-nemo',
79
+ pricing: {
80
+ input: 0.17,
81
+ output: 0.17,
82
+ },
59
83
  type: 'chat',
60
84
  },
61
85
  {
@@ -64,14 +88,10 @@ const novitaChatModels: AIChatModelCard[] = [
64
88
  displayName: 'Mistral 7B Instruct',
65
89
  enabled: true,
66
90
  id: 'mistralai/mistral-7b-instruct',
67
- type: 'chat',
68
- },
69
- {
70
- contextWindowTokens: 32_768,
71
- description: 'WizardLM 2 7B 是微软AI最新的快速轻量化模型,性能接近于现有开源领导模型的10倍。',
72
- displayName: 'WizardLM 2 7B',
73
- enabled: true,
74
- id: 'microsoft/wizardlm 2-7b',
91
+ pricing: {
92
+ input: 0.059,
93
+ output: 0.059,
94
+ },
75
95
  type: 'chat',
76
96
  },
77
97
  {
@@ -80,6 +100,10 @@ const novitaChatModels: AIChatModelCard[] = [
80
100
  displayName: 'WizardLM-2 8x22B',
81
101
  enabled: true,
82
102
  id: 'microsoft/wizardlm-2-8x22b',
103
+ pricing: {
104
+ input: 0.62,
105
+ output: 0.62,
106
+ },
83
107
  type: 'chat',
84
108
  },
85
109
  {
@@ -87,6 +111,10 @@ const novitaChatModels: AIChatModelCard[] = [
87
111
  description: 'Dolphin Mixtral 8x22B 是一款为指令遵循、对话和编程设计的模型。',
88
112
  displayName: 'Dolphin Mixtral 8x22B',
89
113
  id: 'cognitivecomputations/dolphin-mixtral-8x22b',
114
+ pricing: {
115
+ input: 0.9,
116
+ output: 0.9,
117
+ },
90
118
  type: 'chat',
91
119
  },
92
120
  {
@@ -94,13 +122,10 @@ const novitaChatModels: AIChatModelCard[] = [
94
122
  description: 'Hermes 2 Pro Llama 3 8B 是 Nous Hermes 2的升级版本,包含最新的内部开发的数据集。',
95
123
  displayName: 'Hermes 2 Pro Llama 3 8B',
96
124
  id: 'nousresearch/hermes-2-pro-llama-3-8b',
97
- type: 'chat',
98
- },
99
- {
100
- contextWindowTokens: 32_768,
101
- description: 'Hermes 2 Mixtral 8x7B DPO 是一款高度灵活的多模型合并,旨在提供卓越的创造性体验。',
102
- displayName: 'Hermes 2 Mixtral 8x7B DPO',
103
- id: 'Nous-Hermes-2-Mixtral-8x7B-DPO',
125
+ pricing: {
126
+ input: 0.14,
127
+ output: 0.14,
128
+ },
104
129
  type: 'chat',
105
130
  },
106
131
  {
@@ -108,6 +133,10 @@ const novitaChatModels: AIChatModelCard[] = [
108
133
  description: 'MythoMax l2 13B 是一款合并了多个顶尖模型的创意与智能相结合的语言模型。',
109
134
  displayName: 'MythoMax l2 13B',
110
135
  id: 'gryphe/mythomax-l2-13b',
136
+ pricing: {
137
+ input: 0.09,
138
+ output: 0.09,
139
+ },
111
140
  type: 'chat',
112
141
  },
113
142
  {
@@ -115,6 +144,238 @@ const novitaChatModels: AIChatModelCard[] = [
115
144
  description: 'OpenChat 7B 是经过“C-RLFT(条件强化学习微调)”策略精调的开源语言模型库。',
116
145
  displayName: 'OpenChat 7B',
117
146
  id: 'openchat/openchat-7b',
147
+ pricing: {
148
+ input: 0.06,
149
+ output: 0.06,
150
+ },
151
+ type: 'chat',
152
+ },
153
+ {
154
+ abilities: {
155
+ reasoning: true,
156
+ },
157
+ contextWindowTokens: 64_000,
158
+ displayName: 'Deepseek R1',
159
+ enabled: true,
160
+ id: 'deepseek/deepseek-r1',
161
+ pricing: {
162
+ input: 4,
163
+ output: 4,
164
+ },
165
+ type: 'chat',
166
+ },
167
+ {
168
+ contextWindowTokens: 64_000,
169
+ displayName: 'Deepseek V3',
170
+ enabled: true,
171
+ id: 'deepseek/deepseek_v3',
172
+ pricing: {
173
+ input: 0.89,
174
+ output: 0.89,
175
+ },
176
+ type: 'chat',
177
+ },
178
+ {
179
+ abilities: {
180
+ reasoning: true,
181
+ },
182
+ contextWindowTokens: 32_000,
183
+ displayName: 'Deepseek R1 Distill Llama 70B',
184
+ id: 'deepseek/deepseek-r1-distill-llama-70b',
185
+ pricing: {
186
+ input: 0.8,
187
+ output: 0.8,
188
+ },
189
+ type: 'chat',
190
+ },
191
+ {
192
+ abilities: {
193
+ reasoning: true,
194
+ },
195
+ contextWindowTokens: 64_000,
196
+ displayName: 'Deepseek R1 Distill Qwen 14B',
197
+ id: 'deepseek/deepseek-r1-distill-qwen-14b',
198
+ pricing: {
199
+ input: 0.15,
200
+ output: 0.15,
201
+ },
202
+ type: 'chat',
203
+ },
204
+ {
205
+ abilities: {
206
+ reasoning: true,
207
+ },
208
+ contextWindowTokens: 64_000,
209
+ displayName: 'Deepseek R1 Distill Qwen 32B',
210
+ enabled: true,
211
+ id: 'deepseek/deepseek-r1-distill-qwen-32b',
212
+ pricing: {
213
+ input: 0.3,
214
+ output: 0.3,
215
+ },
216
+ type: 'chat',
217
+ },
218
+ {
219
+ contextWindowTokens: 8192,
220
+ displayName: 'L3 8B Stheno v3.2',
221
+ id: 'Sao10K/L3-8B-Stheno-v3.2',
222
+ pricing: {
223
+ input: 0.05,
224
+ output: 0.05,
225
+ },
226
+ type: 'chat',
227
+ },
228
+ {
229
+ abilities: {
230
+ reasoning: true,
231
+ },
232
+ contextWindowTokens: 32_000,
233
+ displayName: 'Deepseek R1 Distill Llama 8B',
234
+ id: 'deepseek/deepseek-r1-distill-llama-8b',
235
+ pricing: {
236
+ input: 0.04,
237
+ output: 0.04,
238
+ },
239
+ type: 'chat',
240
+ },
241
+ {
242
+ contextWindowTokens: 32_000,
243
+ displayName: 'Qwen 2.5 72B Instruct',
244
+ id: 'qwen/qwen-2.5-72b-instruct',
245
+ pricing: {
246
+ input: 0.38,
247
+ output: 0.4,
248
+ },
249
+ type: 'chat',
250
+ },
251
+ {
252
+ contextWindowTokens: 16_000,
253
+ displayName: 'L3 70B Euryale v2.1',
254
+ id: 'sao10k/l3-70b-euryale-v2.1',
255
+ pricing: {
256
+ input: 1.48,
257
+ output: 1.48,
258
+ },
259
+ type: 'chat',
260
+ },
261
+ {
262
+ contextWindowTokens: 4096,
263
+ displayName: 'Airoboros L2 70B',
264
+ id: 'jondurbin/airoboros-l2-70b',
265
+ pricing: {
266
+ input: 0.5,
267
+ output: 0.5,
268
+ },
269
+ type: 'chat',
270
+ },
271
+ {
272
+ contextWindowTokens: 4096,
273
+ displayName: 'Nous Hermes Llama2 13B',
274
+ id: 'nousresearch/nous-hermes-llama2-13b',
275
+ pricing: {
276
+ input: 0.17,
277
+ output: 0.17,
278
+ },
279
+ type: 'chat',
280
+ },
281
+ {
282
+ contextWindowTokens: 4096,
283
+ displayName: 'OpenHermes 2.5 Mistral 7B',
284
+ id: 'teknium/openhermes-2.5-mistral-7b',
285
+ pricing: {
286
+ input: 0.17,
287
+ output: 0.17,
288
+ },
289
+ type: 'chat',
290
+ },
291
+ {
292
+ contextWindowTokens: 4096,
293
+ displayName: 'Midnight Rose 70B',
294
+ id: 'sophosympatheia/midnight-rose-70b',
295
+ pricing: {
296
+ input: 0.8,
297
+ output: 0.8,
298
+ },
299
+ type: 'chat',
300
+ },
301
+ {
302
+ contextWindowTokens: 8192,
303
+ displayName: 'L3 8B Lunaris',
304
+ id: 'sao10k/l3-8b-lunaris',
305
+ pricing: {
306
+ input: 0.05,
307
+ output: 0.05,
308
+ },
309
+ type: 'chat',
310
+ },
311
+ {
312
+ contextWindowTokens: 32_768,
313
+ displayName: 'Qwen 2 VL 72B Instruct',
314
+ id: 'qwen/qwen-2-vl-72b-instruct',
315
+ pricing: {
316
+ input: 0.45,
317
+ output: 0.45,
318
+ },
319
+ type: 'chat',
320
+ },
321
+ {
322
+ contextWindowTokens: 131_000,
323
+ displayName: 'Llama 3.2 1B Instruct',
324
+ id: 'meta-llama/llama-3.2-1b-instruct',
325
+ pricing: {
326
+ input: 0.02,
327
+ output: 0.02,
328
+ },
329
+ type: 'chat',
330
+ },
331
+ {
332
+ contextWindowTokens: 32_768,
333
+ displayName: 'Llama 3.2 11B Vision Instruct',
334
+ id: 'meta-llama/llama-3.2-11b-vision-instruct',
335
+ pricing: {
336
+ input: 0.06,
337
+ output: 0.06,
338
+ },
339
+ type: 'chat',
340
+ },
341
+ {
342
+ contextWindowTokens: 32_768,
343
+ displayName: 'Llama 3.2 3B Instruct',
344
+ id: 'meta-llama/llama-3.2-3b-instruct',
345
+ pricing: {
346
+ input: 0.03,
347
+ output: 0.05,
348
+ },
349
+ type: 'chat',
350
+ },
351
+ {
352
+ contextWindowTokens: 8192,
353
+ displayName: 'Llama 3.1 8B Instruct BF16',
354
+ id: 'meta-llama/llama-3.1-8b-instruct-bf16',
355
+ pricing: {
356
+ input: 0.06,
357
+ output: 0.06,
358
+ },
359
+ type: 'chat',
360
+ },
361
+ {
362
+ contextWindowTokens: 16_000,
363
+ displayName: 'L31 70B Euryale v2.2',
364
+ id: 'sao10k/l31-70b-euryale-v2.2',
365
+ pricing: {
366
+ input: 1.48,
367
+ output: 1.48,
368
+ },
369
+ type: 'chat',
370
+ },
371
+ {
372
+ contextWindowTokens: 32_768,
373
+ displayName: 'Qwen 2 7B Instruct',
374
+ id: 'qwen/qwen-2-7b-instruct',
375
+ pricing: {
376
+ input: 0.054,
377
+ output: 0.054,
378
+ },
118
379
  type: 'chat',
119
380
  },
120
381
  ];
@@ -7,11 +7,14 @@ const perplexityChatModels: AIChatModelCard[] = [
7
7
  search: true,
8
8
  },
9
9
  contextWindowTokens: 127_072,
10
- description: '由 DeepSeek 推理模型提供支持的新 API 产品。',
10
+ description: '支持搜索上下文的高级搜索产品,支持高级查询和跟进。',
11
11
  displayName: 'Sonar Reasoning Pro',
12
12
  enabled: true,
13
13
  id: 'sonar-reasoning-pro',
14
14
  maxOutput: 8192,
15
+ settings: {
16
+ searchImpl: 'internal',
17
+ },
15
18
  type: 'chat',
16
19
  },
17
20
  {
@@ -20,11 +23,14 @@ const perplexityChatModels: AIChatModelCard[] = [
20
23
  search: true,
21
24
  },
22
25
  contextWindowTokens: 127_072,
23
- description: '由 DeepSeek 推理模型提供支持的新 API 产品。',
26
+ description: '支持搜索上下文的高级搜索产品,支持高级查询和跟进。',
24
27
  displayName: 'Sonar Reasoning',
25
28
  enabled: true,
26
29
  id: 'sonar-reasoning',
27
30
  maxOutput: 8192,
31
+ settings: {
32
+ searchImpl: 'internal',
33
+ },
28
34
  type: 'chat',
29
35
  },
30
36
  {
@@ -36,6 +42,9 @@ const perplexityChatModels: AIChatModelCard[] = [
36
42
  displayName: 'Sonar Pro',
37
43
  enabled: true,
38
44
  id: 'sonar-pro',
45
+ settings: {
46
+ searchImpl: 'internal',
47
+ },
39
48
  type: 'chat',
40
49
  },
41
50
  {
@@ -47,6 +56,9 @@ const perplexityChatModels: AIChatModelCard[] = [
47
56
  displayName: 'Sonar',
48
57
  enabled: true,
49
58
  id: 'sonar',
59
+ settings: {
60
+ searchImpl: 'internal',
61
+ },
50
62
  type: 'chat',
51
63
  },
52
64
  // The following will be deprecated on 02-22