@lobehub/chat 1.16.8 → 1.16.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @lobehub/chat might be problematic. Click here for more details.

Files changed (51) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/package.json +3 -3
  3. package/src/config/__tests__/auth.test.ts +200 -0
  4. package/src/config/auth.ts +97 -2
  5. package/src/config/modelProviders/ai360.ts +11 -0
  6. package/src/config/modelProviders/anthropic.ts +27 -18
  7. package/src/config/modelProviders/azure.ts +12 -3
  8. package/src/config/modelProviders/baichuan.ts +3 -1
  9. package/src/config/modelProviders/bedrock.ts +20 -18
  10. package/src/config/modelProviders/deepseek.ts +3 -1
  11. package/src/config/modelProviders/fireworksai.ts +33 -5
  12. package/src/config/modelProviders/google.ts +16 -13
  13. package/src/config/modelProviders/groq.ts +19 -8
  14. package/src/config/modelProviders/minimax.ts +8 -6
  15. package/src/config/modelProviders/mistral.ts +19 -3
  16. package/src/config/modelProviders/moonshot.ts +11 -1
  17. package/src/config/modelProviders/novita.ts +24 -0
  18. package/src/config/modelProviders/ollama.ts +58 -1
  19. package/src/config/modelProviders/openai.ts +52 -18
  20. package/src/config/modelProviders/openrouter.ts +21 -1
  21. package/src/config/modelProviders/perplexity.ts +19 -3
  22. package/src/config/modelProviders/qwen.ts +11 -8
  23. package/src/config/modelProviders/siliconcloud.ts +34 -1
  24. package/src/config/modelProviders/spark.ts +16 -7
  25. package/src/config/modelProviders/stepfun.ts +13 -1
  26. package/src/config/modelProviders/taichu.ts +7 -2
  27. package/src/config/modelProviders/togetherai.ts +38 -2
  28. package/src/config/modelProviders/upstage.ts +11 -4
  29. package/src/config/modelProviders/zeroone.ts +5 -1
  30. package/src/config/modelProviders/zhipu.ts +20 -18
  31. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +13 -6
  32. package/src/libs/agent-runtime/qwen/index.test.ts +20 -4
  33. package/src/libs/agent-runtime/qwen/index.ts +1 -1
  34. package/src/libs/next-auth/auth.config.ts +3 -1
  35. package/src/libs/next-auth/sso-providers/auth0.ts +5 -3
  36. package/src/libs/next-auth/sso-providers/authelia.ts +6 -6
  37. package/src/libs/next-auth/sso-providers/authentik.ts +5 -3
  38. package/src/libs/next-auth/sso-providers/azure-ad.ts +5 -3
  39. package/src/libs/next-auth/sso-providers/cloudflare-zero-trust.ts +4 -3
  40. package/src/libs/next-auth/sso-providers/generic-oidc.ts +3 -3
  41. package/src/libs/next-auth/sso-providers/github.ts +4 -2
  42. package/src/libs/next-auth/sso-providers/logto.ts +3 -3
  43. package/src/libs/next-auth/sso-providers/zitadel.ts +5 -3
  44. package/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json +1 -0
  45. package/src/server/routers/edge/config/__snapshots__/index.test.ts.snap +24 -4
  46. package/src/server/routers/edge/config/index.test.ts +3 -11
  47. package/src/store/user/slices/modelList/__snapshots__/action.test.ts.snap +12 -0
  48. package/src/store/user/slices/modelList/action.test.ts +3 -7
  49. package/src/types/llm.ts +1 -0
  50. package/src/utils/__snapshots__/parseModels.test.ts.snap +32 -0
  51. package/src/utils/parseModels.test.ts +1 -28
@@ -1,13 +1,13 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref https://open.bigmodel.cn/dev/howuse/model
3
+ // ref :https://open.bigmodel.cn/dev/howuse/model
4
4
  // api https://open.bigmodel.cn/dev/api#language
5
- // ref https://open.bigmodel.cn/modelcenter/square
5
+ // ref :https://open.bigmodel.cn/modelcenter/square
6
6
  const ZhiPu: ModelProviderCard = {
7
7
  chatModels: [
8
8
  {
9
9
  description:
10
- 'GLM-4-AllTools 是专门为支持智能体和相关任务而进一步优化的模型版本。它能够自主理解用户的意图,规划复杂的指令,并能够调用一个或多个工具(例如网络浏览器、代码解释器和文本生图像)以完成复杂的任务。',
10
+ 'GLM-4-AllTools 是一个多功能智能体模型,优化以支持复杂指令规划与工具调用,如网络浏览、代码解释和文本生成,适用于多任务执行。',
11
11
  displayName: 'GLM-4-AllTools',
12
12
  enabled: true,
13
13
  functionCall: true,
@@ -15,7 +15,8 @@ const ZhiPu: ModelProviderCard = {
15
15
  tokens: 128_000,
16
16
  },
17
17
  {
18
- description: '高智能旗舰:性能全面提升,长文本和复杂任务能力显著增强',
18
+ description:
19
+ 'GLM-4-Plus 作为高智能旗舰,具备强大的处理长文本和复杂任务的能力,性能全面提升。',
19
20
  displayName: 'GLM-4-Plus',
20
21
  enabled: true,
21
22
  functionCall: true,
@@ -23,7 +24,7 @@ const ZhiPu: ModelProviderCard = {
23
24
  tokens: 128_000,
24
25
  },
25
26
  {
26
- description: '高智能模型:适用于处理高度复杂和多样化的任务',
27
+ description: 'GLM-4-0520 是最新模型版本,专为高度复杂和多样化任务设计,表现卓越。',
27
28
  displayName: 'GLM-4-0520',
28
29
  enabled: true,
29
30
  functionCall: true,
@@ -31,14 +32,14 @@ const ZhiPu: ModelProviderCard = {
31
32
  tokens: 128_000,
32
33
  },
33
34
  {
34
- description: '旧版旗舰:发布于2024年1月16日,目前已被 GLM-4-0520 取代', // deprecated on 2025-06
35
+ description: 'GLM-4 是发布于2024年1月的旧旗舰版本,目前已被更强的 GLM-4-0520 取代。',
35
36
  displayName: 'GLM-4',
36
37
  functionCall: true,
37
38
  id: 'glm-4',
38
39
  tokens: 128_000,
39
40
  },
40
41
  {
41
- description: '性价比最高的版本,综合性能接近GLM-4,速度快,价格实惠',
42
+ description: 'GLM-4-Air 是性价比高的版本,性能接近GLM-4,提供快速度和实惠的价格。',
42
43
  displayName: 'GLM-4-Air',
43
44
  enabled: true,
44
45
  functionCall: true,
@@ -46,14 +47,14 @@ const ZhiPu: ModelProviderCard = {
46
47
  tokens: 128_000,
47
48
  },
48
49
  {
49
- description: 'GLM-4-Air 的高性能版本,效果不变,推理速度达到其2.6',
50
+ description: 'GLM-4-AirX 提供 GLM-4-Air 的高效版本,推理速度可达其2.6倍。',
50
51
  displayName: 'GLM-4-AirX',
51
52
  functionCall: true,
52
53
  id: 'glm-4-airx',
53
54
  tokens: 8192,
54
55
  },
55
56
  {
56
- description: '超长输入:专为处理超长文本和记忆型任务设计',
57
+ description: 'GLM-4-Long 支持超长文本输入,适合记忆型任务与大规模文档处理。',
57
58
  displayName: 'GLM-4-Long',
58
59
  enabled: true,
59
60
  functionCall: true,
@@ -61,7 +62,7 @@ const ZhiPu: ModelProviderCard = {
61
62
  tokens: 1_024_000,
62
63
  },
63
64
  {
64
- description: '适用简单任务,速度最快,价格最实惠的版本',
65
+ description: 'GLM-4-Flash 是处理简单任务的理想选择,速度最快且价格最优惠。',
65
66
  displayName: 'GLM-4-Flash',
66
67
  enabled: true,
67
68
  functionCall: true,
@@ -69,8 +70,7 @@ const ZhiPu: ModelProviderCard = {
69
70
  tokens: 128_000,
70
71
  },
71
72
  {
72
- description:
73
- '视频和图像理解:具备视频内容和多图片的理解能力',
73
+ description: 'GLM-4V-Plus 具备对视频内容及多图片的理解能力,适合多模态任务。',
74
74
  displayName: 'GLM-4V-Plus',
75
75
  enabled: true,
76
76
  id: 'glm-4v-plus',
@@ -78,8 +78,7 @@ const ZhiPu: ModelProviderCard = {
78
78
  vision: true,
79
79
  },
80
80
  {
81
- description:
82
- '图像理解:具备图像理解能力和推理能力',
81
+ description: 'GLM-4V 提供强大的图像理解与推理能力,支持多种视觉任务。',
83
82
  displayName: 'GLM-4V',
84
83
  enabled: true,
85
84
  id: 'glm-4v',
@@ -88,28 +87,31 @@ const ZhiPu: ModelProviderCard = {
88
87
  },
89
88
  {
90
89
  description:
91
- 'CodeGeeX是一款强大的AI编程助手,提供智能问答和代码补全功能,支持多种编程语言,帮助开发者提高编程效率。',
90
+ 'CodeGeeX-4 是强大的AI编程助手,支持多种编程语言的智能问答与代码补全,提升开发效率。',
92
91
  displayName: 'CodeGeeX-4',
93
92
  id: 'codegeex-4',
94
93
  tokens: 128_000,
95
94
  },
96
95
  {
97
- description:
98
- '支持基于人设的角色扮演、超长多轮的记忆、千人千面的角色对话,广泛应用于情感陪伴、游戏智能NPC、网红/明星/影视剧IP分身、数字人/虚拟主播、文字冒险游戏等拟人对话或游戏场景。',
96
+ description: 'CharGLM-3 专为角色扮演与情感陪伴设计,支持超长多轮记忆与个性化对话,应用广泛。',
99
97
  displayName: 'CharGLM-3',
100
98
  id: 'charglm-3',
101
99
  tokens: 4096,
102
100
  },
103
101
  {
104
- description: '心理模型:具备专业咨询能力,帮助用户理解情感并应对情绪问题',
102
+ description: 'Emohaa 是心理模型,具备专业咨询能力,帮助用户理解情感问题。',
105
103
  displayName: 'Emohaa',
106
104
  id: 'emohaa',
107
105
  tokens: 8192,
108
106
  },
109
107
  ],
110
108
  checkModel: 'glm-4-flash',
109
+ description:
110
+ '智谱 AI 提供多模态与语言模型的开放平台,支持广泛的AI应用场景,包括文本处理、图像理解与编程辅助等。',
111
111
  id: 'zhipu',
112
+ modelsUrl: 'https://open.bigmodel.cn/dev/howuse/model',
112
113
  name: 'ZhiPu',
114
+ url: 'https://zhipuai.cn',
113
115
  };
114
116
 
115
117
  export default ZhiPu;
@@ -14,7 +14,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
14
14
  "tokens": 16385,
15
15
  },
16
16
  {
17
- "description": "Currently points to gpt-3.5-turbo-16k-0613",
17
+ "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
18
18
  "displayName": "GPT-3.5 Turbo 16K",
19
19
  "id": "gpt-3.5-turbo-16k",
20
20
  "legacy": true,
@@ -25,7 +25,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
25
25
  "tokens": 16385,
26
26
  },
27
27
  {
28
- "description": "Currently points to gpt-3.5-turbo-16k-0613",
28
+ "description": "GPT-3.5 Turbo 是 OpenAI 的一款基础模型,结合了高效性和经济性,广泛用于文本生成、理解和分析,专为指导性提示进行调整,去除了与聊天相关的优化。",
29
29
  "id": "gpt-3.5-turbo-16k-0613",
30
30
  "legacy": true,
31
31
  "pricing": {
@@ -35,7 +35,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
35
35
  "tokens": 16385,
36
36
  },
37
37
  {
38
- "displayName": "GPT-4 Turbo Vision Preview (1106)",
38
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
39
39
  "id": "gpt-4-1106-vision-preview",
40
40
  "pricing": {
41
41
  "input": 10,
@@ -48,6 +48,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
48
48
  "id": "gpt-3.5-turbo-instruct-0914",
49
49
  },
50
50
  {
51
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
51
52
  "displayName": "GPT-4 Turbo Preview (0125)",
52
53
  "functionCall": true,
53
54
  "id": "gpt-4-0125-preview",
@@ -58,7 +59,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
58
59
  "tokens": 128000,
59
60
  },
60
61
  {
61
- "description": "Currently points to gpt-4-0125-preview",
62
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
62
63
  "displayName": "GPT-4 Turbo Preview",
63
64
  "functionCall": true,
64
65
  "id": "gpt-4-turbo-preview",
@@ -69,6 +70,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
69
70
  "tokens": 128000,
70
71
  },
71
72
  {
73
+ "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
72
74
  "displayName": "GPT-3.5 Turbo Instruct",
73
75
  "id": "gpt-3.5-turbo-instruct",
74
76
  "pricing": {
@@ -81,6 +83,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
81
83
  "id": "gpt-3.5-turbo-0301",
82
84
  },
83
85
  {
86
+ "description": "GPT-3.5 Turbo 是 OpenAI 的一款基础模型,结合了高效性和经济性,广泛用于文本生成、理解和分析,专为指导性提示进行调整,去除了与聊天相关的优化。",
84
87
  "displayName": "GPT-3.5 Turbo (0613)",
85
88
  "id": "gpt-3.5-turbo-0613",
86
89
  "legacy": true,
@@ -91,6 +94,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
91
94
  "tokens": 4096,
92
95
  },
93
96
  {
97
+ "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
94
98
  "displayName": "GPT-3.5 Turbo (1106)",
95
99
  "functionCall": true,
96
100
  "id": "gpt-3.5-turbo-1106",
@@ -101,6 +105,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
101
105
  "tokens": 16385,
102
106
  },
103
107
  {
108
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
104
109
  "displayName": "GPT-4 Turbo Preview (1106)",
105
110
  "functionCall": true,
106
111
  "id": "gpt-4-1106-preview",
@@ -111,7 +116,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
111
116
  "tokens": 128000,
112
117
  },
113
118
  {
114
- "description": "Currently points to gpt-4-1106-vision-preview",
119
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
115
120
  "displayName": "GPT-4 Turbo Vision Preview",
116
121
  "id": "gpt-4-vision-preview",
117
122
  "pricing": {
@@ -122,7 +127,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
122
127
  "vision": true,
123
128
  },
124
129
  {
125
- "description": "Currently points to gpt-4-0613",
130
+ "description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。",
126
131
  "displayName": "GPT-4",
127
132
  "functionCall": true,
128
133
  "id": "gpt-4",
@@ -133,6 +138,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
133
138
  "tokens": 8192,
134
139
  },
135
140
  {
141
+ "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
136
142
  "displayName": "GPT-3.5 Turbo (0125)",
137
143
  "functionCall": true,
138
144
  "id": "gpt-3.5-turbo-0125",
@@ -143,6 +149,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
143
149
  "tokens": 16385,
144
150
  },
145
151
  {
152
+ "description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。",
146
153
  "displayName": "GPT-4 (0613)",
147
154
  "functionCall": true,
148
155
  "id": "gpt-4-0613",
@@ -161,13 +161,11 @@ describe('LobeQwenAI', () => {
161
161
  vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
162
162
  new ReadableStream() as any,
163
163
  );
164
-
165
164
  await instance.chat({
166
165
  messages: [{ content: 'Hello', role: 'user' }],
167
166
  model: 'qwen-turbo',
168
167
  temperature: temp,
169
168
  });
170
-
171
169
  expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
172
170
  expect.objectContaining({
173
171
  messages: expect.any(Array),
@@ -183,13 +181,11 @@ describe('LobeQwenAI', () => {
183
181
  vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
184
182
  new ReadableStream() as any,
185
183
  );
186
-
187
184
  await instance.chat({
188
185
  messages: [{ content: 'Hello', role: 'user' }],
189
186
  model: 'qwen-turbo',
190
187
  temperature: 1.5,
191
188
  });
192
-
193
189
  expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
194
190
  expect.objectContaining({
195
191
  messages: expect.any(Array),
@@ -199,6 +195,26 @@ describe('LobeQwenAI', () => {
199
195
  expect.any(Object),
200
196
  );
201
197
  });
198
+
199
+ it('should set temperature to Float', async () => {
200
+ const createMock = vi.fn().mockResolvedValue(new ReadableStream() as any);
201
+ vi.spyOn(instance['client'].chat.completions, 'create').mockImplementation(createMock);
202
+ await instance.chat({
203
+ messages: [{ content: 'Hello', role: 'user' }],
204
+ model: 'qwen-turbo',
205
+ temperature: 1,
206
+ });
207
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
208
+ expect.objectContaining({
209
+ messages: expect.any(Array),
210
+ model: 'qwen-turbo',
211
+ temperature: expect.any(Number),
212
+ }),
213
+ expect.any(Object),
214
+ );
215
+ const callArgs = createMock.mock.calls[0][0];
216
+ expect(Number.isInteger(callArgs.temperature)).toBe(false); // Temperature is always not an integer
217
+ });
202
218
  });
203
219
 
204
220
  describe('Error', () => {
@@ -111,7 +111,7 @@ export class LobeQwenAI implements LobeRuntimeAI {
111
111
  temperature:
112
112
  temperature === 0 || temperature >= 2
113
113
  ? undefined
114
- : temperature,
114
+ : (temperature === 1 ? 0.999 : temperature), // 'temperature' must be Float
115
115
  top_p: top_p && top_p >= 1 ? 0.999 : top_p,
116
116
  };
117
117
 
@@ -1,4 +1,5 @@
1
1
  import type { NextAuthConfig } from 'next-auth';
2
+ import urlJoin from 'url-join';
2
3
 
3
4
  import { authEnv } from '@/config/auth';
4
5
 
@@ -40,6 +41,7 @@ export default {
40
41
  },
41
42
  },
42
43
  providers: initSSOProviders(),
44
+ redirectProxyUrl: process.env.APP_URL ? urlJoin(process.env.APP_URL, '/api/auth') : undefined,
43
45
  secret: authEnv.NEXT_AUTH_SECRET,
44
- trustHost: true,
46
+ trustHost: process.env?.AUTH_TRUST_HOST ? process.env.AUTH_TRUST_HOST === 'true' : true,
45
47
  } satisfies NextAuthConfig;
@@ -11,9 +11,11 @@ const provider = {
11
11
  // Specify auth scope, at least include 'openid email'
12
12
  // all scopes in Auth0 ref: https://auth0.com/docs/get-started/apis/scopes/openid-connect-scopes#standard-claims
13
13
  authorization: { params: { scope: 'openid email profile' } },
14
- clientId: authEnv.AUTH0_CLIENT_ID,
15
- clientSecret: authEnv.AUTH0_CLIENT_SECRET,
16
- issuer: authEnv.AUTH0_ISSUER,
14
+ // TODO(NextAuth ENVs Migration): Remove once nextauth envs migration time end
15
+ clientId: authEnv.AUTH0_CLIENT_ID ?? process.env.AUTH_AUTH0_ID,
16
+ clientSecret: authEnv.AUTH0_CLIENT_SECRET ?? process.env.AUTH_AUTH0_SECRET,
17
+ issuer: authEnv.AUTH0_ISSUER ?? process.env.AUTH_AUTH0_ISSUER,
18
+ // Remove End
17
19
  profile(profile) {
18
20
  return {
19
21
  email: profile.email,
@@ -6,11 +6,11 @@ import { CommonProviderConfig } from './sso.config';
6
6
 
7
7
  export type AutheliaProfile = {
8
8
  // The users display name
9
- email: string;
9
+ email: string;
10
10
  // The users email
11
- groups: string[];
11
+ groups: string[];
12
12
  // The username the user used to login with
13
- name: string;
13
+ name: string;
14
14
  preferred_username: string; // The users groups
15
15
  sub: string; // The users id
16
16
  };
@@ -21,10 +21,10 @@ const provider = {
21
21
  ...CommonProviderConfig,
22
22
  authorization: { params: { scope: 'openid email profile' } },
23
23
  checks: ['state', 'pkce'],
24
- clientId: authEnv.AUTHELIA_CLIENT_ID,
25
- clientSecret: authEnv.AUTHELIA_CLIENT_SECRET,
24
+ clientId: authEnv.AUTHELIA_CLIENT_ID ?? process.env.AUTH_AUTHELIA_ID,
25
+ clientSecret: authEnv.AUTHELIA_CLIENT_SECRET ?? process.env.AUTH_AUTHELIA_SECRET,
26
26
  id: 'authelia',
27
- issuer: authEnv.AUTHELIA_ISSUER,
27
+ issuer: authEnv.AUTHELIA_ISSUER ?? process.env.AUTH_AUTHELIA_ISSUER,
28
28
  name: 'Authelia',
29
29
  profile(profile) {
30
30
  return {
@@ -11,9 +11,11 @@ const provider = {
11
11
  // Specify auth scope, at least include 'openid email'
12
12
  // all scopes in Authentik ref: https://goauthentik.io/docs/providers/oauth2
13
13
  authorization: { params: { scope: 'openid email profile' } },
14
- clientId: authEnv.AUTHENTIK_CLIENT_ID,
15
- clientSecret: authEnv.AUTHENTIK_CLIENT_SECRET,
16
- issuer: authEnv.AUTHENTIK_ISSUER,
14
+ // TODO(NextAuth ENVs Migration): Remove once nextauth envs migration time end
15
+ clientId: authEnv.AUTHENTIK_CLIENT_ID ?? process.env.AUTH_AUTHENTIK_ID,
16
+ clientSecret: authEnv.AUTHENTIK_CLIENT_SECRET ?? process.env.AUTH_AUTHENTIK_SECRET,
17
+ issuer: authEnv.AUTHENTIK_ISSUER ?? process.env.AUTH_AUTHENTIK_ISSUER,
18
+ // Remove end
17
19
  // TODO(NextAuth): map unique user id to `providerAccountId` field
18
20
  // profile(profile) {
19
21
  // return {
@@ -11,9 +11,11 @@ const provider = {
11
11
  // Specify auth scope, at least include 'openid email'
12
12
  // all scopes in Azure AD ref: https://learn.microsoft.com/en-us/entra/identity-platform/scopes-oidc#openid-connect-scopes
13
13
  authorization: { params: { scope: 'openid email profile' } },
14
- clientId: authEnv.AZURE_AD_CLIENT_ID,
15
- clientSecret: authEnv.AZURE_AD_CLIENT_SECRET,
16
- tenantId: authEnv.AZURE_AD_TENANT_ID,
14
+ // TODO(NextAuth ENVs Migration): Remove once nextauth envs migration time end
15
+ clientId: authEnv.AZURE_AD_CLIENT_ID ?? process.env.AUTH_AZURE_AD_ID,
16
+ clientSecret: authEnv.AZURE_AD_CLIENT_SECRET ?? process.env.AUTH_AZURE_AD_SECRET,
17
+ tenantId: authEnv.AZURE_AD_TENANT_ID ?? process.env.AUTH_AZURE_AD_TENANT_ID,
18
+ // Remove end
17
19
  // TODO(NextAuth): map unique user id to `providerAccountId` field
18
20
  // profile(profile) {
19
21
  // return {
@@ -16,10 +16,11 @@ const provider = {
16
16
  ...CommonProviderConfig,
17
17
  authorization: { params: { scope: 'openid email profile' } },
18
18
  checks: ['state', 'pkce'],
19
- clientId: authEnv.CLOUDFLARE_ZERO_TRUST_CLIENT_ID,
20
- clientSecret: authEnv.CLOUDFLARE_ZERO_TRUST_CLIENT_SECRET,
19
+ clientId: authEnv.CLOUDFLARE_ZERO_TRUST_CLIENT_ID ?? process.env.AUTH_CLOUDFLARE_ZERO_TRUST_ID,
20
+ clientSecret:
21
+ authEnv.CLOUDFLARE_ZERO_TRUST_CLIENT_SECRET ?? process.env.AUTH_CLOUDFLARE_ZERO_TRUST_SECRET,
21
22
  id: 'cloudflare-zero-trust',
22
- issuer: authEnv.CLOUDFLARE_ZERO_TRUST_ISSUER,
23
+ issuer: authEnv.CLOUDFLARE_ZERO_TRUST_ISSUER ?? process.env.AUTH_CLOUDFLARE_ZERO_TRUST_ISSUER,
23
24
  name: 'Cloudflare Zero Trust',
24
25
  profile(profile) {
25
26
  return {
@@ -19,10 +19,10 @@ const provider = {
19
19
  ...CommonProviderConfig,
20
20
  authorization: { params: { scope: 'email openid profile' } },
21
21
  checks: ['state', 'pkce'],
22
- clientId: authEnv.GENERIC_OIDC_CLIENT_ID,
23
- clientSecret: authEnv.GENERIC_OIDC_CLIENT_SECRET,
22
+ clientId: authEnv.GENERIC_OIDC_CLIENT_ID ?? process.env.AUTH_GENERIC_OIDC_ID,
23
+ clientSecret: authEnv.GENERIC_OIDC_CLIENT_SECRET ?? process.env.AUTH_GENERIC_OIDC_SECRET,
24
24
  id: 'generic-oidc',
25
- issuer: authEnv.GENERIC_OIDC_ISSUER,
25
+ issuer: authEnv.GENERIC_OIDC_ISSUER ?? process.env.AUTH_GENERIC_OIDC_ISSUER,
26
26
  name: 'Generic OIDC',
27
27
  profile(profile) {
28
28
  return {
@@ -10,8 +10,10 @@ const provider = {
10
10
  ...CommonProviderConfig,
11
11
  // Specify auth scope, at least include 'openid email'
12
12
  authorization: { params: { scope: 'read:user user:email' } },
13
- clientId: authEnv.GITHUB_CLIENT_ID,
14
- clientSecret: authEnv.GITHUB_CLIENT_SECRET,
13
+ // TODO(NextAuth ENVs Migration): Remove once nextauth envs migration time end
14
+ clientId: authEnv.GITHUB_CLIENT_ID ?? process.env.AUTH_GITHUB_ID,
15
+ clientSecret: authEnv.GITHUB_CLIENT_SECRET ?? process.env.AUTH_GITHUB_SECRET,
16
+ // Remove end
15
17
  profile: (profile) => {
16
18
  return {
17
19
  email: profile.email,
@@ -41,9 +41,9 @@ const provider = {
41
41
  },
42
42
  // You can get the issuer value from the Logto Application Details page,
43
43
  // in the field "Issuer endpoint"
44
- clientId: authEnv.LOGTO_CLIENT_ID,
45
- clientSecret: authEnv.LOGTO_CLIENT_SECRET,
46
- issuer: authEnv.LOGTO_ISSUER,
44
+ clientId: authEnv.LOGTO_CLIENT_ID ?? process.env.AUTH_LOGTO_ID,
45
+ clientSecret: authEnv.LOGTO_CLIENT_SECRET ?? process.env.AUTH_LOGTO_SECRET,
46
+ issuer: authEnv.LOGTO_ISSUER ?? process.env.AUTH_LOGTO_ISSUER,
47
47
  }),
48
48
  };
49
49
 
@@ -7,9 +7,11 @@ const provider = {
7
7
  provider: Zitadel({
8
8
  // Available scopes in ZITADEL: https://zitadel.com/docs/apis/openidoauth/scopes
9
9
  authorization: { params: { scope: 'openid email profile' } },
10
- clientId: authEnv.ZITADEL_CLIENT_ID,
11
- clientSecret: authEnv.ZITADEL_CLIENT_SECRET,
12
- issuer: authEnv.ZITADEL_ISSUER,
10
+ // TODO(NextAuth ENVs Migration): Remove once nextauth envs migration time end
11
+ clientId: authEnv.ZITADEL_CLIENT_ID ?? process.env.AUTH_ZITADEL_ID,
12
+ clientSecret: authEnv.ZITADEL_CLIENT_SECRET ?? process.env.AUTH_ZITADEL_SECRET,
13
+ issuer: authEnv.ZITADEL_ISSUER ?? process.env.AUTH_ZITADEL_ISSUER,
14
+ // Remove end
13
15
  // TODO(NextAuth): map unique user id to `providerAccountId` field
14
16
  // profile(profile) {
15
17
  // return {
@@ -45,6 +45,7 @@
45
45
  "endpoint": "",
46
46
  "customModelCards": [
47
47
  {
48
+ "description": "LLaVA 是结合视觉编码器和 Vicuna 的多模态模型,用于强大的视觉和语言理解。",
48
49
  "displayName": "LLaVA 7B",
49
50
  "enabled": true,
50
51
  "id": "llava",
@@ -20,6 +20,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
20
20
  "id": "claude-2",
21
21
  },
22
22
  {
23
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
23
24
  "displayName": "gpt-4-32k",
24
25
  "enabled": true,
25
26
  "functionCall": true,
@@ -37,6 +38,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
37
38
  exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST > should work correct with gpt-4 1`] = `
38
39
  [
39
40
  {
41
+ "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
40
42
  "displayName": "GPT-3.5 Turbo (1106)",
41
43
  "enabled": true,
42
44
  "functionCall": true,
@@ -60,7 +62,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
60
62
  "tokens": 16385,
61
63
  },
62
64
  {
63
- "description": "Currently points to gpt-3.5-turbo-16k-0613",
65
+ "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
64
66
  "displayName": "GPT-3.5 Turbo 16K",
65
67
  "enabled": true,
66
68
  "id": "gpt-3.5-turbo-16k",
@@ -72,7 +74,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
72
74
  "tokens": 16385,
73
75
  },
74
76
  {
75
- "description": "Currently points to gpt-4-0613",
77
+ "description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。",
76
78
  "displayName": "GPT-4",
77
79
  "enabled": true,
78
80
  "functionCall": true,
@@ -84,7 +86,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
84
86
  "tokens": 8192,
85
87
  },
86
88
  {
87
- "description": "Currently points to gpt-4-32k-0613",
89
+ "description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。",
88
90
  "displayName": "GPT-4 32K",
89
91
  "enabled": true,
90
92
  "functionCall": true,
@@ -96,6 +98,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
96
98
  "tokens": 32768,
97
99
  },
98
100
  {
101
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
99
102
  "displayName": "GPT-4 Turbo Preview (1106)",
100
103
  "enabled": true,
101
104
  "functionCall": true,
@@ -107,7 +110,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
107
110
  "tokens": 128000,
108
111
  },
109
112
  {
110
- "description": "Currently points to gpt-4-1106-vision-preview",
113
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
111
114
  "displayName": "GPT-4 Turbo Vision Preview",
112
115
  "enabled": true,
113
116
  "id": "gpt-4-vision-preview",
@@ -121,6 +124,21 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
121
124
  ]
122
125
  `;
123
126
 
127
+ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST > show the hidden model 1`] = `
128
+ {
129
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
130
+ "displayName": "GPT-4 Turbo Preview (1106)",
131
+ "enabled": true,
132
+ "functionCall": true,
133
+ "id": "gpt-4-1106-preview",
134
+ "pricing": {
135
+ "input": 10,
136
+ "output": 30,
137
+ },
138
+ "tokens": 128000,
139
+ }
140
+ `;
141
+
124
142
  exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_LIST > custom deletion, addition, and renaming of models 1`] = `
125
143
  {
126
144
  "enabled": false,
@@ -130,6 +148,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_
130
148
  ],
131
149
  "serverModelCards": [
132
150
  {
151
+ "description": "Google 的 Gemma 7B 具有出色的计算效率,适适用于多种硬件架构,如GPU和TPU。",
133
152
  "displayName": "Google: Gemma 7B (free)",
134
153
  "enabled": true,
135
154
  "functionCall": false,
@@ -138,6 +157,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_
138
157
  "vision": false,
139
158
  },
140
159
  {
160
+ "description": "Mistral 7B Instruct 是一款高效的多语言模型,优化用于对话和问答,能在资源受限的环境中表现出色。",
141
161
  "displayName": "Mistral 7B Instruct (free)",
142
162
  "enabled": true,
143
163
  "functionCall": false,
@@ -90,17 +90,9 @@ describe('configRouter', () => {
90
90
 
91
91
  const result = response.languageModel?.openai?.serverModelCards;
92
92
 
93
- expect(result?.find((o) => o.id === 'gpt-4-1106-preview')).toEqual({
94
- displayName: 'GPT-4 Turbo Preview (1106)',
95
- functionCall: true,
96
- enabled: true,
97
- id: 'gpt-4-1106-preview',
98
- tokens: 128000,
99
- pricing: {
100
- input: 10,
101
- output: 30,
102
- },
103
- });
93
+ const model = result?.find((o) => o.id === 'gpt-4-1106-preview');
94
+
95
+ expect(model).toMatchSnapshot();
104
96
 
105
97
  process.env.OPENAI_MODEL_LIST = '';
106
98
  });
@@ -0,0 +1,12 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`LLMSettingsSliceAction > refreshModelProviderList > visible 1`] = `
4
+ {
5
+ "description": "LLaVA 是结合视觉编码器和 Vicuna 的多模态模型,用于强大的视觉和语言理解。",
6
+ "displayName": "LLaVA 7B",
7
+ "enabled": true,
8
+ "id": "llava",
9
+ "tokens": 4096,
10
+ "vision": true,
11
+ }
12
+ `;
@@ -154,13 +154,9 @@ describe('LLMSettingsSliceAction', () => {
154
154
 
155
155
  const ollamaList = result.current.modelProviderList.find((r) => r.id === 'ollama');
156
156
  // Assert that setModelProviderConfig was not called
157
- expect(ollamaList?.chatModels.find((c) => c.id === 'llava')).toEqual({
158
- displayName: 'LLaVA 7B',
159
- enabled: true,
160
- id: 'llava',
161
- tokens: 4096,
162
- vision: true,
163
- });
157
+ const model = ollamaList?.chatModels.find((c) => c.id === 'llava');
158
+
159
+ expect(model).toMatchSnapshot();
164
160
  });
165
161
 
166
162
  it('modelProviderListForModelSelect should return only enabled providers', () => {
package/src/types/llm.ts CHANGED
@@ -80,6 +80,7 @@ export interface ModelProviderCard {
80
80
  * @default false
81
81
  */
82
82
  defaultShowBrowserRequest?: boolean;
83
+ description?: string;
83
84
  /**
84
85
  * some provider server like stepfun and aliyun don't support browser request,
85
86
  * So we should disable it