@lobehub/chat 1.67.2 → 1.68.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/.env.example +4 -0
  2. package/CHANGELOG.md +33 -0
  3. package/Dockerfile +2 -0
  4. package/Dockerfile.database +2 -0
  5. package/README.md +3 -2
  6. package/README.zh-CN.md +1 -1
  7. package/changelog/v1.json +12 -0
  8. package/docs/self-hosting/advanced/auth.mdx +6 -5
  9. package/docs/self-hosting/advanced/auth.zh-CN.mdx +6 -5
  10. package/docs/self-hosting/environment-variables/model-provider.mdx +16 -0
  11. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +16 -0
  12. package/docs/usage/providers/ppio.mdx +57 -0
  13. package/docs/usage/providers/ppio.zh-CN.mdx +55 -0
  14. package/locales/en-US/providers.json +3 -0
  15. package/locales/zh-CN/providers.json +4 -0
  16. package/package.json +5 -5
  17. package/packages/web-crawler/src/__test__/crawler.test.ts +176 -0
  18. package/packages/web-crawler/src/utils/appUrlRules.test.ts +76 -0
  19. package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
  20. package/src/config/aiModels/index.ts +3 -0
  21. package/src/config/aiModels/ppio.ts +276 -0
  22. package/src/config/llm.ts +6 -0
  23. package/src/config/modelProviders/index.ts +4 -0
  24. package/src/config/modelProviders/ppio.ts +249 -0
  25. package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
  26. package/src/libs/agent-runtime/ppio/__snapshots__/index.test.ts.snap +26 -0
  27. package/src/libs/agent-runtime/ppio/fixtures/models.json +42 -0
  28. package/src/libs/agent-runtime/ppio/index.test.ts +264 -0
  29. package/src/libs/agent-runtime/ppio/index.ts +51 -0
  30. package/src/libs/agent-runtime/ppio/type.ts +12 -0
  31. package/src/libs/agent-runtime/types/type.ts +1 -0
  32. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +2 -2
  33. package/src/server/routers/tools/__test__/search.test.ts +146 -0
  34. package/src/store/chat/slices/builtinTool/actions/searXNG.test.ts +67 -0
  35. package/src/store/tool/slices/builtin/selectors.test.ts +12 -0
  36. package/src/store/tool/slices/builtin/selectors.ts +4 -1
  37. package/src/types/user/settings/keyVaults.ts +1 -0
@@ -1,6 +1,7 @@
1
1
  import { applyUrlRules } from './appUrlRules';
2
2
 
3
3
  describe('applyUrlRules', () => {
4
+ // @gru-agent github file rules 不要改
4
5
  it('github file rules', () => {
5
6
  const result = applyUrlRules(
6
7
  'https://github.com/lobehub/chat-plugin-web-crawler/blob/main/api/v1/_utils.ts',
@@ -23,4 +24,79 @@ describe('applyUrlRules', () => {
23
24
  'https://github.com/lobehub/chat-plugin-web-crawler/raw/refs/heads/main/api/v1/_utils.ts',
24
25
  });
25
26
  });
27
+
28
+ it('should return original url when no rules match', () => {
29
+ const result = applyUrlRules('https://example.com', [
30
+ {
31
+ urlPattern: 'https://github.com/.*',
32
+ },
33
+ ]);
34
+
35
+ expect(result).toEqual({
36
+ transformedUrl: 'https://example.com',
37
+ });
38
+ });
39
+
40
+ it('should return original url with filter options when rule matches without transform', () => {
41
+ const result = applyUrlRules('https://example.com', [
42
+ {
43
+ filterOptions: { pureText: true },
44
+ urlPattern: 'https://example.com',
45
+ },
46
+ ]);
47
+
48
+ expect(result).toEqual({
49
+ filterOptions: { pureText: true },
50
+ transformedUrl: 'https://example.com',
51
+ });
52
+ });
53
+
54
+ it('should apply first matching rule when multiple rules match', () => {
55
+ const result = applyUrlRules('https://example.com/test', [
56
+ {
57
+ filterOptions: { pureText: true },
58
+ urlPattern: 'https://example.com/(.*)',
59
+ urlTransform: 'https://example.com/transformed/$1',
60
+ },
61
+ {
62
+ filterOptions: { enableReadability: true },
63
+ urlPattern: 'https://example.com/.*',
64
+ urlTransform: 'https://example.com/other',
65
+ },
66
+ ]);
67
+
68
+ expect(result).toEqual({
69
+ filterOptions: { pureText: true },
70
+ transformedUrl: 'https://example.com/transformed/test',
71
+ });
72
+ });
73
+
74
+ it('should handle special characters in URLs and patterns', () => {
75
+ const result = applyUrlRules('https://example.com/path?q=1&b=2#hash', [
76
+ {
77
+ urlPattern: 'https://example.com/([^?#]+)[?#]?.*',
78
+ urlTransform: 'https://example.com/clean/$1',
79
+ },
80
+ ]);
81
+
82
+ expect(result).toEqual({
83
+ transformedUrl: 'https://example.com/clean/path',
84
+ });
85
+ });
86
+
87
+ it('should handle impls in rules', () => {
88
+ const result = applyUrlRules('https://example.com', [
89
+ {
90
+ filterOptions: { pureText: true },
91
+ impls: ['naive', 'browserless'],
92
+ urlPattern: 'https://example.com',
93
+ },
94
+ ]);
95
+
96
+ expect(result).toEqual({
97
+ filterOptions: { pureText: true },
98
+ impls: ['naive', 'browserless'],
99
+ transformedUrl: 'https://example.com',
100
+ });
101
+ });
26
102
  });
@@ -21,6 +21,7 @@ import {
21
21
  NvidiaProviderCard,
22
22
  OpenRouterProviderCard,
23
23
  PerplexityProviderCard,
24
+ PPIOProviderCard,
24
25
  QwenProviderCard,
25
26
  SambaNovaProviderCard,
26
27
  SenseNovaProviderCard,
@@ -98,6 +99,7 @@ export const useProviderList = (): ProviderItem[] => {
98
99
  SiliconCloudProviderCard,
99
100
  HigressProviderCard,
100
101
  GiteeAIProviderCard,
102
+ PPIOProviderCard,
101
103
  ],
102
104
  [
103
105
  AzureProvider,
@@ -30,6 +30,7 @@ import { default as ollama } from './ollama';
30
30
  import { default as openai } from './openai';
31
31
  import { default as openrouter } from './openrouter';
32
32
  import { default as perplexity } from './perplexity';
33
+ import { default as ppio } from './ppio';
33
34
  import { default as qwen } from './qwen';
34
35
  import { default as sambanova } from './sambanova';
35
36
  import { default as sensenova } from './sensenova';
@@ -98,6 +99,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
98
99
  openai,
99
100
  openrouter,
100
101
  perplexity,
102
+ ppio,
101
103
  qwen,
102
104
  sambanova,
103
105
  sensenova,
@@ -147,6 +149,7 @@ export { default as ollama } from './ollama';
147
149
  export { default as openai } from './openai';
148
150
  export { default as openrouter } from './openrouter';
149
151
  export { default as perplexity } from './perplexity';
152
+ export { default as ppio } from './ppio';
150
153
  export { default as qwen } from './qwen';
151
154
  export { default as sambanova } from './sambanova';
152
155
  export { default as sensenova } from './sensenova';
@@ -0,0 +1,276 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const ppioChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ reasoning: true,
7
+ },
8
+ "contextWindowTokens": 64_000,
9
+ "description": "DeepSeek R1是DeepSeek团队发布的最新开源模型,具备非常强悍的推理性能,尤其在数学、编程和推理任务上达到了与OpenAI的o1模型相当的水平。",
10
+ "displayName": "DeepSeek: DeepSeek R1 (Community)",
11
+ "enabled": true,
12
+ "id": "deepseek/deepseek-r1/community",
13
+ "pricing": {
14
+ "currency": "CNY",
15
+ "input": 4,
16
+ "output": 16
17
+ },
18
+ "type": "chat"
19
+ },
20
+ {
21
+ "contextWindowTokens": 64_000,
22
+ "description": "DeepSeek-V3在推理速度方面实现了比之前模型的重大突破。在开源模型中排名第一,并可与全球最先进的闭源模型相媲美。DeepSeek-V3 采用了多头潜在注意力 (MLA) 和 DeepSeekMoE 架构,这些架构在 DeepSeek-V2 中得到了全面验证。此外,DeepSeek-V3 开创了一种用于负载均衡的辅助无损策略,并设定了多标记预测训练目标以获得更强的性能。",
23
+ "displayName": "DeepSeek: DeepSeek V3 (Community)",
24
+ "enabled": true,
25
+ "id": "deepseek/deepseek-v3/community",
26
+ "pricing": {
27
+ "currency": "CNY",
28
+ "input": 1,
29
+ "output": 2
30
+ },
31
+ "type": "chat"
32
+ },
33
+ {
34
+ abilities: {
35
+ reasoning: true,
36
+ },
37
+ "contextWindowTokens": 64_000,
38
+ "description": "DeepSeek R1是DeepSeek团队发布的最新开源模型,具备非常强悍的推理性能,尤其在数学、编程和推理任务上达到了与OpenAI的o1模型相当的水平。",
39
+ "displayName": "DeepSeek R1",
40
+ "enabled": true,
41
+ "id": "deepseek/deepseek-r1",
42
+ "pricing": {
43
+ "currency": "CNY",
44
+ "input": 4,
45
+ "output": 16
46
+ },
47
+ "type": "chat"
48
+ },
49
+ {
50
+ "contextWindowTokens": 64_000,
51
+ "description": "DeepSeek-V3在推理速度方面实现了比之前模型的重大突破。在开源模型中排名第一,并可与全球最先进的闭源模型相媲美。DeepSeek-V3 采用了多头潜在注意力 (MLA) 和 DeepSeekMoE 架构,这些架构在 DeepSeek-V2 中得到了全面验证。此外,DeepSeek-V3 开创了一种用于负载均衡的辅助无损策略,并设定了多标记预测训练目标以获得更强的性能。",
52
+ "displayName": "DeepSeek V3",
53
+ "enabled": true,
54
+ "id": "deepseek/deepseek-v3",
55
+ "pricing": {
56
+ "currency": "CNY",
57
+ "input": 1,
58
+ "output": 2
59
+ },
60
+ "type": "chat"
61
+ },
62
+ {
63
+ abilities: {
64
+ reasoning: true,
65
+ },
66
+ "contextWindowTokens": 32_000,
67
+ "description": "DeepSeek R1 Distill Llama 70B是基于Llama3.3 70B的大型语言模型,该模型利用DeepSeek R1输出的微调,实现了与大型前沿模型相当的竞争性能。",
68
+ "displayName": "DeepSeek R1 Distill Llama 70B",
69
+ "enabled": true,
70
+ "id": "deepseek/deepseek-r1-distill-llama-70b",
71
+ "pricing": {
72
+ "currency": "CNY",
73
+ "input": 5.8,
74
+ "output": 5.8
75
+ },
76
+ "type": "chat"
77
+ },
78
+ {
79
+ abilities: {
80
+ reasoning: true,
81
+ },
82
+ "contextWindowTokens": 64_000,
83
+ "description": "DeepSeek R1 Distill Qwen 32B 是一种基于 Qwen 2.5 32B 的蒸馏大语言模型,通过使用 DeepSeek R1 的输出进行训练而得。该模型在多个基准测试中超越了 OpenAI 的 o1-mini,取得了密集模型(dense models)的最新技术领先成果(state-of-the-art)。以下是一些基准测试的结果:\nAIME 2024 pass@1: 72.6\nMATH-500 pass@1: 94.3\nCodeForces Rating: 1691\n该模型通过从 DeepSeek R1 的输出中进行微调,展现了与更大规模的前沿模型相当的竞争性能。",
84
+ "displayName": "DeepSeek: DeepSeek R1 Distill Qwen 32B",
85
+ "enabled": true,
86
+ "id": "deepseek/deepseek-r1-distill-qwen-32b",
87
+ "pricing": {
88
+ "currency": "CNY",
89
+ "input": 2.18,
90
+ "output": 2.18
91
+ },
92
+ "type": "chat"
93
+ },
94
+ {
95
+ abilities: {
96
+ reasoning: true,
97
+ },
98
+ "contextWindowTokens": 64_000,
99
+ "description": "DeepSeek R1 Distill Qwen 14B 是一种基于 Qwen 2.5 14B 的蒸馏大语言模型,通过使用 DeepSeek R1 的输出进行训练而得。该模型在多个基准测试中超越了 OpenAI 的 o1-mini,取得了密集模型(dense models)的最新技术领先成果(state-of-the-art)。以下是一些基准测试的结果:\nAIME 2024 pass@1: 69.7\nMATH-500 pass@1: 93.9\nCodeForces Rating: 1481\n该模型通过从 DeepSeek R1 的输出中进行微调,展现了与更大规模的前沿模型相当的竞争性能。",
100
+ "displayName": "DeepSeek: DeepSeek R1 Distill Qwen 14B",
101
+ "enabled": true,
102
+ "id": "deepseek/deepseek-r1-distill-qwen-14b",
103
+ "pricing": {
104
+ "currency": "CNY",
105
+ "input": 1,
106
+ "output": 1
107
+ },
108
+ "type": "chat"
109
+ },
110
+ {
111
+ abilities: {
112
+ reasoning: true,
113
+ },
114
+ "contextWindowTokens": 32_000,
115
+ "description": "DeepSeek R1 Distill Llama 8B 是一种基于 Llama-3.1-8B-Instruct 的蒸馏大语言模型,通过使用 DeepSeek R1 的输出进行训练而得。",
116
+ "displayName": "DeepSeek: DeepSeek R1 Distill Llama 8B",
117
+ "enabled": true,
118
+ "id": "deepseek/deepseek-r1-distill-llama-8b",
119
+ "pricing": {
120
+ "currency": "CNY",
121
+ "input": 0.3,
122
+ "output": 0.3
123
+ },
124
+ "type": "chat"
125
+ },
126
+ {
127
+ "contextWindowTokens": 32_768,
128
+ "description": "Qwen2.5-72B-Instruct 是阿里云发布的最新大语言模型系列之一。该 72B 模型在编码和数学等领域具有显著改进的能力。该模型还提供了多语言支持,覆盖超过 29 种语言,包括中文、英文等。模型在指令跟随、理解结构化数据以及生成结构化输出(尤其是 JSON)方面都有显著提升。",
129
+ "displayName": "qwen/qwen-2.5-72b-instruct",
130
+ "enabled": true,
131
+ "id": "qwen/qwen-2.5-72b-instruct",
132
+ "pricing": {
133
+ "currency": "CNY",
134
+ "input": 2.75,
135
+ "output": 2.88
136
+ },
137
+ "type": "chat"
138
+ },
139
+ {
140
+ abilities: {
141
+ vision: true,
142
+ },
143
+ "contextWindowTokens": 32_768,
144
+ "description": "Qwen2-VL 是 Qwen-VL 模型的最新迭代版本,在视觉理解基准测试中达到了最先进的性能,包括 MathVista、DocVQA、RealWorldQA 和 MTVQA 等。Qwen2-VL 能够理解超过 20 分钟的视频,用于高质量的基于视频的问答、对话和内容创作。它还具备复杂推理和决策能力,可以与移动设备、机器人等集成,基于视觉环境和文本指令进行自动操作。除了英语和中文,Qwen2-VL 现在还支持理解图像中不同语言的文本,包括大多数欧洲语言、日语、韩语、阿拉伯语和越南语等",
145
+ "displayName": "qwen/qwen-2-vl-72b-instruct",
146
+ "enabled": true,
147
+ "id": "qwen/qwen-2-vl-72b-instruct",
148
+ "pricing": {
149
+ "currency": "CNY",
150
+ "input": 4.5,
151
+ "output": 4.5
152
+ },
153
+ "type": "chat"
154
+ },
155
+ {
156
+ "contextWindowTokens": 32_768,
157
+ "description": "meta-llama/llama-3.2-3b-instruct",
158
+ "displayName": "meta-llama/llama-3.2-3b-instruct",
159
+ "enabled": true,
160
+ "id": "meta-llama/llama-3.2-3b-instruct",
161
+ "pricing": {
162
+ "currency": "CNY",
163
+ "input": 0.216,
164
+ "output": 0.36
165
+ },
166
+ "type": "chat"
167
+ },
168
+ {
169
+ "contextWindowTokens": 32_000,
170
+ "description": "Qwen2.5-32B-Instruct 是阿里云发布的最新大语言模型系列之一。该 32B 模型在编码和数学等领域具有显著改进的能力。该模型提供了多语言支持,覆盖超过 29 种语言,包括中文、英文等。模型在指令跟随、理解结构化数据以及生成结构化输出(尤其是 JSON)方面都有显著提升。",
171
+ "displayName": "qwen/qwen2.5-32b-instruct",
172
+ "enabled": true,
173
+ "id": "qwen/qwen2.5-32b-instruct",
174
+ "pricing": {
175
+ "currency": "CNY",
176
+ "input": 1.26,
177
+ "output": 1.26
178
+ },
179
+ "type": "chat"
180
+ },
181
+ {
182
+ "contextWindowTokens": 14_336,
183
+ "description": "Baichuan-13B 百川智能开发的包含 130 亿参数的开源可商用的大规模语言模型,在权威的中文和英文 benchmark 上均取得同尺寸最好的效果",
184
+ "displayName": "baichuan/baichuan2-13b-chat",
185
+ "enabled": true,
186
+ "id": "baichuan/baichuan2-13b-chat",
187
+ "pricing": {
188
+ "currency": "CNY",
189
+ "input": 1.75,
190
+ "output": 1.75
191
+ },
192
+ "type": "chat"
193
+ },
194
+ {
195
+ "contextWindowTokens": 32_768,
196
+ "description": "Meta最新一代的Llama 3.1模型系列,70B(700亿参数)的指令微调版本针对高质量对话场景进行了优化。在业界评估中,与领先的闭源模型相比,它展现出了强劲的性能。(仅针对企业实名认证通过主体开放)",
197
+ "displayName": "meta-llama/llama-3.1-70b-instruct",
198
+ "enabled": true,
199
+ "id": "meta-llama/llama-3.1-70b-instruct",
200
+ "pricing": {
201
+ "currency": "CNY",
202
+ "input": 2.45,
203
+ "output": 2.82
204
+ },
205
+ "type": "chat"
206
+ },
207
+ {
208
+ "contextWindowTokens": 32_768,
209
+ "description": "Meta最新一代的Llama 3.1模型系列,8B(80亿参数)的指令微调版本特别快速高效。在业界评估中,表现出强劲的性能,超越了很多领先的闭源模型。(仅针对企业实名认证通过主体开放)",
210
+ "displayName": "meta-llama/llama-3.1-8b-instruct",
211
+ "enabled": true,
212
+ "id": "meta-llama/llama-3.1-8b-instruct",
213
+ "pricing": {
214
+ "currency": "CNY",
215
+ "input": 0.4,
216
+ "output": 0.4
217
+ },
218
+ "type": "chat"
219
+ },
220
+ {
221
+ "contextWindowTokens": 16_384,
222
+ "description": "零一万物,最新开源微调模型,340亿参数,微调支持多种对话场景,高质量训练数据,对齐人类偏好。",
223
+ "displayName": "01-ai/yi-1.5-34b-chat",
224
+ "enabled": true,
225
+ "id": "01-ai/yi-1.5-34b-chat",
226
+ "pricing": {
227
+ "currency": "CNY",
228
+ "input": 1.1,
229
+ "output": 1.1
230
+ },
231
+ "type": "chat"
232
+ },
233
+ {
234
+ "contextWindowTokens": 16_384,
235
+ "description": "零一万物,最新开源微调模型,90亿参数,微调支持多种对话场景,高质量训练数据,对齐人类偏好。",
236
+ "displayName": "01-ai/yi-1.5-9b-chat",
237
+ "enabled": true,
238
+ "id": "01-ai/yi-1.5-9b-chat",
239
+ "pricing": {
240
+ "currency": "CNY",
241
+ "input": 0.4,
242
+ "output": 0.4
243
+ },
244
+ "type": "chat"
245
+ },
246
+ {
247
+ "contextWindowTokens": 32_768,
248
+ "description": "智谱AI发布的GLM-4系列最新一代预训练模型的开源版本。",
249
+ "displayName": "thudm/glm-4-9b-chat",
250
+ "enabled": true,
251
+ "id": "thudm/glm-4-9b-chat",
252
+ "pricing": {
253
+ "currency": "CNY",
254
+ "input": 0.5,
255
+ "output": 0.5
256
+ },
257
+ "type": "chat"
258
+ },
259
+ {
260
+ "contextWindowTokens": 32_768,
261
+ "description": "Qwen2是全新的Qwen大型语言模型系列。Qwen2 7B是一个基于transformer的模型,在语言理解、多语言能力、编程、数学和推理方面表现出色。",
262
+ "displayName": "qwen/qwen-2-7b-instruct",
263
+ "enabled": true,
264
+ "id": "qwen/qwen-2-7b-instruct",
265
+ "pricing": {
266
+ "currency": "CNY",
267
+ "input": 0.32,
268
+ "output": 0.32
269
+ },
270
+ "type": "chat"
271
+ }
272
+ ]
273
+
274
+ export const allModels = [...ppioChatModels];
275
+
276
+ export default allModels;
package/src/config/llm.ts CHANGED
@@ -147,6 +147,9 @@ export const getLLMConfig = () => {
147
147
 
148
148
  ENABLED_SAMBANOVA: z.boolean(),
149
149
  SAMBANOVA_API_KEY: z.string().optional(),
150
+
151
+ ENABLED_PPIO: z.boolean(),
152
+ PPIO_API_KEY: z.string().optional(),
150
153
  },
151
154
  runtimeEnv: {
152
155
  API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -292,6 +295,9 @@ export const getLLMConfig = () => {
292
295
 
293
296
  ENABLED_SAMBANOVA: !!process.env.SAMBANOVA_API_KEY,
294
297
  SAMBANOVA_API_KEY: process.env.SAMBANOVA_API_KEY,
298
+
299
+ ENABLED_PPIO: !!process.env.PPIO_API_KEY,
300
+ PPIO_API_KEY: process.env.PPIO_API_KEY,
295
301
  },
296
302
  });
297
303
  };
@@ -30,6 +30,7 @@ import OllamaProvider from './ollama';
30
30
  import OpenAIProvider from './openai';
31
31
  import OpenRouterProvider from './openrouter';
32
32
  import PerplexityProvider from './perplexity';
33
+ import PPIOProvider from './ppio';
33
34
  import QwenProvider from './qwen';
34
35
  import SambaNovaProvider from './sambanova';
35
36
  import SenseNovaProvider from './sensenova';
@@ -92,6 +93,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
92
93
  SenseNovaProvider.chatModels,
93
94
  InternLMProvider.chatModels,
94
95
  HigressProvider.chatModels,
96
+ PPIOProvider.chatModels,
95
97
  ].flat();
96
98
 
97
99
  export const DEFAULT_MODEL_PROVIDER_LIST = [
@@ -105,6 +107,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
105
107
  GoogleProvider,
106
108
  VertexAIProvider,
107
109
  DeepSeekProvider,
110
+ PPIOProvider,
108
111
  HuggingFaceProvider,
109
112
  OpenRouterProvider,
110
113
  CloudflareProvider,
@@ -183,6 +186,7 @@ export { default as OllamaProviderCard } from './ollama';
183
186
  export { default as OpenAIProviderCard } from './openai';
184
187
  export { default as OpenRouterProviderCard } from './openrouter';
185
188
  export { default as PerplexityProviderCard } from './perplexity';
189
+ export { default as PPIOProviderCard } from './ppio';
186
190
  export { default as QwenProviderCard } from './qwen';
187
191
  export { default as SambaNovaProviderCard } from './sambanova';
188
192
  export { default as SenseNovaProviderCard } from './sensenova';