@lobehub/chat 1.80.0 → 1.80.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/changelog/v1.json +21 -0
  3. package/docs/development/basic/feature-development.mdx +370 -619
  4. package/docs/development/basic/feature-development.zh-CN.mdx +368 -611
  5. package/package.json +1 -1
  6. package/src/app/[variants]/oauth/consent/[uid]/Client.tsx +36 -23
  7. package/src/app/[variants]/oauth/consent/[uid]/page.tsx +2 -0
  8. package/src/config/aiModels/azure.ts +79 -1
  9. package/src/config/aiModels/azureai.ts +181 -0
  10. package/src/config/aiModels/google.ts +36 -2
  11. package/src/config/aiModels/groq.ts +31 -3
  12. package/src/config/aiModels/hunyuan.ts +54 -18
  13. package/src/config/aiModels/moonshot.ts +17 -17
  14. package/src/config/aiModels/novita.ts +25 -30
  15. package/src/config/aiModels/siliconcloud.ts +80 -2
  16. package/src/config/aiModels/stepfun.ts +40 -31
  17. package/src/config/aiModels/tencentcloud.ts +7 -6
  18. package/src/config/aiModels/volcengine.ts +1 -0
  19. package/src/config/aiModels/zhipu.ts +91 -27
  20. package/src/const/settings/knowledge.ts +2 -2
  21. package/src/features/ChatInput/ActionBar/Upload/ClientMode.tsx +7 -6
  22. package/src/hooks/useModelSupportFiles.ts +15 -0
  23. package/src/libs/agent-runtime/stepfun/index.ts +7 -1
  24. package/src/libs/agent-runtime/zhipu/index.ts +17 -10
  25. package/src/libs/oidc-provider/config.ts +0 -3
  26. package/src/libs/trpc/edge/index.ts +0 -4
  27. package/src/libs/trpc/lambda/context.ts +90 -6
  28. package/src/libs/trpc/lambda/index.ts +2 -1
  29. package/src/libs/trpc/lambda/middleware/oidcAuth.ts +14 -0
  30. package/src/libs/trpc/middleware/userAuth.ts +2 -4
  31. package/src/server/services/oidc/index.ts +71 -0
  32. package/src/store/aiInfra/slices/aiModel/selectors.ts +7 -0
  33. package/src/utils/parseModels.test.ts +19 -3
  34. package/src/utils/server/__tests__/auth.test.ts +45 -1
  35. package/src/utils/server/auth.ts +26 -2
  36. package/docs/development/basic/feature-development-new.mdx +0 -465
  37. package/docs/development/basic/feature-development-new.zh-CN.mdx +0 -465
@@ -8,7 +8,6 @@ const zhipuChatModels: AIChatModelCard[] = [
8
8
  contextWindowTokens: 16_384,
9
9
  description: 'GLM-Zero-Preview具备强大的复杂推理能力,在逻辑推理、数学、编程等领域表现优异。',
10
10
  displayName: 'GLM-Zero-Preview',
11
- enabled: true,
12
11
  id: 'glm-zero-preview',
13
12
  pricing: {
14
13
  currency: 'CNY',
@@ -17,6 +16,67 @@ const zhipuChatModels: AIChatModelCard[] = [
17
16
  },
18
17
  type: 'chat',
19
18
  },
19
+ {
20
+ abilities: {
21
+ reasoning: true,
22
+ search: true,
23
+ },
24
+ contextWindowTokens: 32_000,
25
+ description: '推理模型: 具备强大推理能力,适用于需要深度推理的任务。',
26
+ displayName: 'GLM-Z1-Air',
27
+ id: 'glm-z1-air',
28
+ maxOutput: 30_000,
29
+ pricing: {
30
+ currency: 'CNY',
31
+ input: 0.5,
32
+ output: 0.5,
33
+ },
34
+ settings: {
35
+ searchImpl: 'params',
36
+ },
37
+ type: 'chat',
38
+ },
39
+ {
40
+ abilities: {
41
+ reasoning: true,
42
+ search: true,
43
+ },
44
+ contextWindowTokens: 32_000,
45
+ description: '极速推理:具有超快的推理速度和强大的推理效果。',
46
+ displayName: 'GLM-Z1-AirX',
47
+ id: 'glm-z1-airx',
48
+ maxOutput: 30_000,
49
+ pricing: {
50
+ currency: 'CNY',
51
+ input: 5,
52
+ output: 5,
53
+ },
54
+ settings: {
55
+ searchImpl: 'params',
56
+ },
57
+ type: 'chat',
58
+ },
59
+ {
60
+ abilities: {
61
+ reasoning: true,
62
+ search: true,
63
+ },
64
+ contextWindowTokens: 32_000,
65
+ description: 'GLM-Z1 系列具备强大的复杂推理能力,在逻辑推理、数学、编程等领域表现优异。最大上下文长度为32K。',
66
+ displayName: 'GLM-Z1-Flash',
67
+ enabled: true,
68
+ id: 'glm-z1-flash',
69
+ maxOutput: 30_000,
70
+ pricing: {
71
+ currency: 'CNY',
72
+ input: 0,
73
+ output: 0,
74
+ },
75
+ settings: {
76
+ searchImpl: 'params',
77
+ },
78
+ type: 'chat',
79
+ },
20
80
  {
21
81
  abilities: {
22
82
  functionCall: true,
@@ -24,9 +84,10 @@ const zhipuChatModels: AIChatModelCard[] = [
24
84
  },
25
85
  contextWindowTokens: 128_000,
26
86
  description: 'GLM-4-Flash 是处理简单任务的理想选择,速度最快且免费。',
27
- displayName: 'GLM-4-Flash',
87
+ displayName: 'GLM-4-Flash-250414',
28
88
  enabled: true,
29
- id: 'glm-4-flash',
89
+ id: 'glm-4-flash-250414',
90
+ maxOutput: 4000,
30
91
  pricing: {
31
92
  currency: 'CNY',
32
93
  input: 0,
@@ -45,8 +106,8 @@ const zhipuChatModels: AIChatModelCard[] = [
45
106
  contextWindowTokens: 128_000,
46
107
  description: 'GLM-4-FlashX 是Flash的增强版本,超快推理速度。',
47
108
  displayName: 'GLM-4-FlashX',
48
- enabled: true,
49
109
  id: 'glm-4-flashx',
110
+ maxOutput: 4000,
50
111
  pricing: {
51
112
  currency: 'CNY',
52
113
  input: 0.1,
@@ -66,6 +127,7 @@ const zhipuChatModels: AIChatModelCard[] = [
66
127
  description: 'GLM-4-Long 支持超长文本输入,适合记忆型任务与大规模文档处理。',
67
128
  displayName: 'GLM-4-Long',
68
129
  id: 'glm-4-long',
130
+ maxOutput: 4000,
69
131
  pricing: {
70
132
  currency: 'CNY',
71
133
  input: 1,
@@ -81,15 +143,15 @@ const zhipuChatModels: AIChatModelCard[] = [
81
143
  functionCall: true,
82
144
  search: true,
83
145
  },
84
- contextWindowTokens: 128_000,
146
+ contextWindowTokens: 32_000,
85
147
  description: 'GLM-4-Air 是性价比高的版本,性能接近GLM-4,提供快速度和实惠的价格。',
86
- displayName: 'GLM-4-Air',
87
- enabled: true,
88
- id: 'glm-4-air',
148
+ displayName: 'GLM-4-Air-250414',
149
+ id: 'glm-4-air-250414',
150
+ maxOutput: 4000,
89
151
  pricing: {
90
152
  currency: 'CNY',
91
- input: 1,
92
- output: 1,
153
+ input: 0.5,
154
+ output: 0.5,
93
155
  },
94
156
  settings: {
95
157
  searchImpl: 'params',
@@ -104,8 +166,8 @@ const zhipuChatModels: AIChatModelCard[] = [
104
166
  contextWindowTokens: 8192,
105
167
  description: 'GLM-4-AirX 提供 GLM-4-Air 的高效版本,推理速度可达其2.6倍。',
106
168
  displayName: 'GLM-4-AirX',
107
- enabled: true,
108
169
  id: 'glm-4-airx',
170
+ maxOutput: 4000,
109
171
  pricing: {
110
172
  currency: 'CNY',
111
173
  input: 10,
@@ -144,8 +206,8 @@ const zhipuChatModels: AIChatModelCard[] = [
144
206
  contextWindowTokens: 128_000,
145
207
  description: 'GLM-4-Plus 作为高智能旗舰,具备强大的处理长文本和复杂任务的能力,性能全面提升。',
146
208
  displayName: 'GLM-4-Plus',
147
- enabled: true,
148
209
  id: 'glm-4-plus',
210
+ maxOutput: 4000,
149
211
  pricing: {
150
212
  currency: 'CNY',
151
213
  input: 50,
@@ -164,7 +226,7 @@ const zhipuChatModels: AIChatModelCard[] = [
164
226
  contextWindowTokens: 128_000,
165
227
  description: 'GLM-4-0520 是最新模型版本,专为高度复杂和多样化任务设计,表现卓越。',
166
228
  displayName: 'GLM-4-0520',
167
- id: 'glm-4-0520',
229
+ id: 'glm-4-0520', // 弃用时间 2025年12月30日
168
230
  pricing: {
169
231
  currency: 'CNY',
170
232
  input: 100,
@@ -183,7 +245,7 @@ const zhipuChatModels: AIChatModelCard[] = [
183
245
  contextWindowTokens: 128_000,
184
246
  description: 'GLM-4 是发布于2024年1月的旧旗舰版本,目前已被更强的 GLM-4-0520 取代。',
185
247
  displayName: 'GLM-4',
186
- id: 'glm-4',
248
+ id: 'glm-4', // 弃用时间 2025年6月30日
187
249
  pricing: {
188
250
  currency: 'CNY',
189
251
  input: 100,
@@ -198,7 +260,7 @@ const zhipuChatModels: AIChatModelCard[] = [
198
260
  abilities: {
199
261
  vision: true,
200
262
  },
201
- contextWindowTokens: 8192,
263
+ contextWindowTokens: 4096,
202
264
  description:
203
265
  'GLM-4V-Flash 专注于高效的单一图像理解,适用于快速图像解析的场景,例如实时图像分析或批量图像处理。',
204
266
  displayName: 'GLM-4V-Flash',
@@ -218,13 +280,12 @@ const zhipuChatModels: AIChatModelCard[] = [
218
280
  },
219
281
  contextWindowTokens: 8192,
220
282
  description: 'GLM-4V-Plus 具备对视频内容及多图片的理解能力,适合多模态任务。',
221
- displayName: 'GLM-4V-Plus',
222
- enabled: true,
223
- id: 'glm-4v-plus',
283
+ displayName: 'GLM-4V-Plus-0111',
284
+ id: 'glm-4v-plus-0111',
224
285
  pricing: {
225
286
  currency: 'CNY',
226
- input: 10,
227
- output: 10,
287
+ input: 4,
288
+ output: 4,
228
289
  },
229
290
  type: 'chat',
230
291
  },
@@ -232,7 +293,7 @@ const zhipuChatModels: AIChatModelCard[] = [
232
293
  abilities: {
233
294
  vision: true,
234
295
  },
235
- contextWindowTokens: 2048,
296
+ contextWindowTokens: 4096,
236
297
  description: 'GLM-4V 提供强大的图像理解与推理能力,支持多种视觉任务。',
237
298
  displayName: 'GLM-4V',
238
299
  id: 'glm-4v',
@@ -249,6 +310,7 @@ const zhipuChatModels: AIChatModelCard[] = [
249
310
  'CodeGeeX-4 是强大的AI编程助手,支持多种编程语言的智能问答与代码补全,提升开发效率。',
250
311
  displayName: 'CodeGeeX-4',
251
312
  id: 'codegeex-4',
313
+ maxOutput: 32_000,
252
314
  pricing: {
253
315
  currency: 'CNY',
254
316
  input: 0.1,
@@ -257,14 +319,15 @@ const zhipuChatModels: AIChatModelCard[] = [
257
319
  type: 'chat',
258
320
  },
259
321
  {
260
- contextWindowTokens: 4096,
261
- description: 'CharGLM-3 专为角色扮演与情感陪伴设计,支持超长多轮记忆与个性化对话,应用广泛。',
262
- displayName: 'CharGLM-3',
263
- id: 'charglm-3',
322
+ contextWindowTokens: 8192,
323
+ description: 'CharGLM-4 专为角色扮演与情感陪伴设计,支持超长多轮记忆与个性化对话,应用广泛。',
324
+ displayName: 'CharGLM-4',
325
+ id: 'charglm-4',
326
+ maxOutput: 4000,
264
327
  pricing: {
265
328
  currency: 'CNY',
266
- input: 15,
267
- output: 15,
329
+ input: 1,
330
+ output: 1,
268
331
  },
269
332
  type: 'chat',
270
333
  },
@@ -273,6 +336,7 @@ const zhipuChatModels: AIChatModelCard[] = [
273
336
  description: 'Emohaa 是心理模型,具备专业咨询能力,帮助用户理解情感问题。',
274
337
  displayName: 'Emohaa',
275
338
  id: 'emohaa',
339
+ maxOutput: 4000,
276
340
  pricing: {
277
341
  currency: 'CNY',
278
342
  input: 15,
@@ -2,7 +2,7 @@ import { FilesConfig, FilesConfigItem } from '@/types/user/settings/filesConfig'
2
2
 
3
3
  import {
4
4
  DEFAULT_EMBEDDING_MODEL,
5
- DEFAULT_PROVIDER,
5
+ DEFAULT_EMBEDDING_PROVIDER,
6
6
  DEFAULT_RERANK_MODEL,
7
7
  DEFAULT_RERANK_PROVIDER,
8
8
  DEFAULT_RERANK_QUERY_MODE,
@@ -10,7 +10,7 @@ import {
10
10
 
11
11
  export const DEFAULT_FILE_EMBEDDING_MODEL_ITEM: FilesConfigItem = {
12
12
  model: DEFAULT_EMBEDDING_MODEL,
13
- provider: DEFAULT_PROVIDER,
13
+ provider: DEFAULT_EMBEDDING_PROVIDER,
14
14
  };
15
15
 
16
16
  export const DEFAULT_FILE_RERANK_MODEL_ITEM: FilesConfigItem = {
@@ -7,8 +7,8 @@ import { useTranslation } from 'react-i18next';
7
7
  import { useAgentStore } from '@/store/agent';
8
8
  import { agentSelectors } from '@/store/agent/slices/chat';
9
9
  import { useFileStore } from '@/store/file';
10
- import { useUserStore } from '@/store/user';
11
- import { modelProviderSelectors } from '@/store/user/selectors';
10
+ import { useModelSupportFiles } from "@/hooks/useModelSupportFiles";
11
+ import { useModelSupportVision } from "@/hooks/useModelSupportVision";
12
12
 
13
13
  const FileUpload = memo(() => {
14
14
  const { t } = useTranslation('chat');
@@ -16,10 +16,11 @@ const FileUpload = memo(() => {
16
16
  const upload = useFileStore((s) => s.uploadChatFiles);
17
17
 
18
18
  const model = useAgentStore(agentSelectors.currentAgentModel);
19
- const [canUpload, enabledFiles] = useUserStore((s) => [
20
- modelProviderSelectors.isModelEnabledUpload(model)(s),
21
- modelProviderSelectors.isModelEnabledFiles(model)(s),
22
- ]);
19
+ const provider = useAgentStore(agentSelectors.currentAgentModelProvider);
20
+
21
+ const enabledFiles = useModelSupportFiles(model, provider);
22
+ const supportVision = useModelSupportVision(model, provider);
23
+ const canUpload = enabledFiles || supportVision;
23
24
 
24
25
  return (
25
26
  <Upload
@@ -0,0 +1,15 @@
1
+ import { isDeprecatedEdition } from '@/const/version';
2
+ import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
3
+ import { useUserStore } from '@/store/user';
4
+ import { modelProviderSelectors } from '@/store/user/selectors';
5
+
6
+ export const useModelSupportFiles = (model: string, provider: string) => {
7
+ const newValue = useAiInfraStore(aiModelSelectors.isModelSupportFiles(model, provider));
8
+
9
+ // TODO: remove this in V2.0
10
+ const oldValue = useUserStore(modelProviderSelectors.isModelEnabledFiles(model));
11
+ if (isDeprecatedEdition) return oldValue;
12
+ //
13
+
14
+ return newValue;
15
+ };
@@ -46,9 +46,14 @@ export const LobeStepfunAI = LobeOpenAICompatibleFactory({
46
46
 
47
47
  const visionKeywords = [
48
48
  'step-1o-',
49
+ 'step-r1-v-',
49
50
  'step-1v-',
50
51
  ];
51
52
 
53
+ const reasoningKeywords = [
54
+ 'step-r1-',
55
+ ];
56
+
52
57
  const modelsPage = await client.models.list() as any;
53
58
  const modelList: StepfunModelCard[] = modelsPage.data;
54
59
 
@@ -66,7 +71,8 @@ export const LobeStepfunAI = LobeOpenAICompatibleFactory({
66
71
  || false,
67
72
  id: model.id,
68
73
  reasoning:
69
- knownModel?.abilities?.reasoning
74
+ reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
75
+ || knownModel?.abilities?.reasoning
70
76
  || false,
71
77
  vision:
72
78
  visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
@@ -50,23 +50,30 @@ export const LobeZhipuAI = LobeOpenAICompatibleFactory({
50
50
  } as any;
51
51
  },
52
52
  },
53
- constructorOptions: {
54
- defaultHeaders: {
55
- 'Bigmodel-Organization': 'lobehub',
56
- 'Bigmodel-project': 'lobechat',
57
- },
58
- },
59
53
  debug: {
60
54
  chatCompletion: () => process.env.DEBUG_ZHIPU_CHAT_COMPLETION === '1',
61
55
  },
62
56
  models: async ({ client }) => {
63
57
  const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
64
58
 
59
+ const reasoningKeywords = [
60
+ 'glm-zero',
61
+ 'glm-z1',
62
+ ];
63
+
65
64
  // ref: https://open.bigmodel.cn/console/modelcenter/square
66
- client.baseURL = 'https://open.bigmodel.cn/api/fine-tuning/model_center/list?pageSize=100&pageNum=1';
65
+ const url = 'https://open.bigmodel.cn/api/fine-tuning/model_center/list?pageSize=100&pageNum=1';
66
+ const response = await fetch(url, {
67
+ headers: {
68
+ 'Authorization': `Bearer ${client.apiKey}`,
69
+ 'Bigmodel-Organization': 'lobehub',
70
+ 'Bigmodel-Project': 'lobechat',
71
+ },
72
+ method: 'GET',
73
+ });
74
+ const json = await response.json();
67
75
 
68
- const modelsPage = await client.models.list() as any;
69
- const modelList: ZhipuModelCard[] = modelsPage.body.rows;
76
+ const modelList: ZhipuModelCard[] = json.rows;
70
77
 
71
78
  return modelList
72
79
  .map((model) => {
@@ -83,7 +90,7 @@ export const LobeZhipuAI = LobeOpenAICompatibleFactory({
83
90
  || false,
84
91
  id: model.modelCode,
85
92
  reasoning:
86
- model.modelCode.toLowerCase().includes('glm-zero-preview')
93
+ reasoningKeywords.some(keyword => model.modelCode.toLowerCase().includes(keyword))
87
94
  || knownModel?.abilities?.reasoning
88
95
  || false,
89
96
  vision:
@@ -11,9 +11,6 @@ export const defaultClients: ClientMetadata[] = [
11
11
  // 仅支持授权码流程
12
12
  grant_types: ['authorization_code', 'refresh_token'],
13
13
 
14
- // 明确指明是原生应用
15
- isFirstParty: true,
16
-
17
14
  logo_uri: 'https://hub-apac-1.lobeobjects.space/lobehub-desktop-icon.png',
18
15
 
19
16
  // 桌面端注册的自定义协议回调(使用反向域名格式)
@@ -10,7 +10,6 @@
10
10
  import { DESKTOP_USER_ID } from '@/const/desktop';
11
11
  import { isDesktop } from '@/const/version';
12
12
 
13
- import { userAuth } from '../middleware/userAuth';
14
13
  import { edgeTrpc } from './init';
15
14
  import { jwtPayloadChecker } from './middleware/jwtPayload';
16
15
 
@@ -30,9 +29,6 @@ export const publicProcedure = edgeTrpc.procedure.use(({ next, ctx }) => {
30
29
  });
31
30
  });
32
31
 
33
- // procedure that asserts that the user is logged in
34
- export const authedProcedure = edgeTrpc.procedure.use(userAuth);
35
-
36
32
  // procedure that asserts that the user add the password
37
33
  export const passwordProcedure = edgeTrpc.procedure.use(jwtPayloadChecker);
38
34
 
@@ -1,14 +1,31 @@
1
+ import debug from 'debug';
1
2
  import { User } from 'next-auth';
2
3
  import { NextRequest } from 'next/server';
3
4
 
4
5
  import { JWTPayload, LOBE_CHAT_AUTH_HEADER, enableClerk, enableNextAuth } from '@/const/auth';
6
+ import { oidcEnv } from '@/envs/oidc';
5
7
  import { ClerkAuth, IClerkAuth } from '@/libs/clerk-auth';
8
+ import { extractBearerToken } from '@/utils/server/auth';
9
+
10
+ // Create context logger namespace
11
+ const log = debug('lobe-trpc:lambda:context');
12
+
13
+ export interface OIDCAuth {
14
+ // Other OIDC information that might be needed (optional, as payload contains all info)
15
+ [key: string]: any;
16
+ // OIDC token data (now the complete payload)
17
+ payload: any;
18
+ // User ID
19
+ sub: string;
20
+ }
6
21
 
7
22
  export interface AuthContext {
8
23
  authorizationHeader?: string | null;
9
24
  clerkAuth?: IClerkAuth;
10
25
  jwtPayload?: JWTPayload | null;
11
26
  nextAuth?: User;
27
+ // Add OIDC authentication information
28
+ oidcAuth?: OIDCAuth | null;
12
29
  userId?: string | null;
13
30
  }
14
31
 
@@ -20,13 +37,18 @@ export const createContextInner = async (params?: {
20
37
  authorizationHeader?: string | null;
21
38
  clerkAuth?: IClerkAuth;
22
39
  nextAuth?: User;
40
+ oidcAuth?: OIDCAuth | null;
23
41
  userId?: string | null;
24
- }): Promise<AuthContext> => ({
25
- authorizationHeader: params?.authorizationHeader,
26
- clerkAuth: params?.clerkAuth,
27
- nextAuth: params?.nextAuth,
28
- userId: params?.userId,
29
- });
42
+ }): Promise<AuthContext> => {
43
+ log('createContextInner called with params: %O', params);
44
+ return {
45
+ authorizationHeader: params?.authorizationHeader,
46
+ clerkAuth: params?.clerkAuth,
47
+ nextAuth: params?.nextAuth,
48
+ oidcAuth: params?.oidcAuth,
49
+ userId: params?.userId,
50
+ };
51
+ };
30
52
 
31
53
  export type LambdaContext = Awaited<ReturnType<typeof createContextInner>>;
32
54
 
@@ -35,23 +57,76 @@ export type LambdaContext = Awaited<ReturnType<typeof createContextInner>>;
35
57
  * @link https://trpc.io/docs/v11/context
36
58
  */
37
59
  export const createLambdaContext = async (request: NextRequest): Promise<LambdaContext> => {
60
+ log('createLambdaContext called for request');
38
61
  // for API-response caching see https://trpc.io/docs/v11/caching
39
62
 
40
63
  const authorization = request.headers.get(LOBE_CHAT_AUTH_HEADER);
64
+ log('LobeChat Authorization header: %s', authorization ? 'exists' : 'not found');
41
65
 
42
66
  let userId;
43
67
  let auth;
68
+ let oidcAuth = null;
69
+
70
+ // Prioritize checking the standard Authorization header for OIDC Bearer Token validation
71
+ if (oidcEnv.ENABLE_OIDC) {
72
+ log('OIDC enabled, attempting OIDC authentication');
73
+ const standardAuthorization = request.headers.get('Authorization');
74
+ log('Standard Authorization header: %s', standardAuthorization ? 'exists' : 'not found');
75
+
76
+ try {
77
+ // Use extractBearerToken from utils
78
+ const bearerToken = extractBearerToken(standardAuthorization);
79
+
80
+ log('Extracted Bearer Token: %s', bearerToken ? 'valid' : 'invalid');
81
+ if (bearerToken) {
82
+ const { OIDCService } = await import('@/server/services/oidc');
83
+
84
+ // Initialize OIDC service
85
+ log('Initializing OIDC service');
86
+ const oidcService = await OIDCService.initialize();
87
+ // Validate token using OIDCService
88
+ log('Validating OIDC token');
89
+ const tokenInfo = await oidcService.validateToken(bearerToken);
90
+ oidcAuth = {
91
+ payload: tokenInfo.tokenData,
92
+ ...tokenInfo.tokenData, // Spread payload into oidcAuth
93
+ sub: tokenInfo.userId, // Use tokenData as payload
94
+ };
95
+ userId = tokenInfo.userId;
96
+ log('OIDC authentication successful, userId: %s', userId);
97
+
98
+ // If OIDC authentication is successful, return context immediately
99
+ log('OIDC authentication successful, creating context and returning');
100
+ return createContextInner({
101
+ // Preserve original LobeChat Authorization Header (if any)
102
+ authorizationHeader: authorization,
103
+ oidcAuth,
104
+ userId,
105
+ });
106
+ }
107
+ } catch (error) {
108
+ // If OIDC authentication fails, log error and continue with other authentication methods
109
+ if (standardAuthorization?.startsWith('Bearer ')) {
110
+ log('OIDC authentication failed, error: %O', error);
111
+ console.error('OIDC authentication failed, trying other methods:', error);
112
+ }
113
+ }
114
+ }
44
115
 
116
+ // If OIDC is not enabled or validation fails, try LobeChat custom Header and other authentication methods
45
117
  if (enableClerk) {
118
+ log('Attempting Clerk authentication');
46
119
  const clerkAuth = new ClerkAuth();
47
120
  const result = clerkAuth.getAuthFromRequest(request);
48
121
  auth = result.clerkAuth;
49
122
  userId = result.userId;
123
+ log('Clerk authentication result, userId: %s', userId || 'not authenticated');
50
124
 
51
125
  return createContextInner({ authorizationHeader: authorization, clerkAuth: auth, userId });
52
126
  }
53
127
 
54
128
  if (enableNextAuth) {
129
+ log('Attempting NextAuth authentication');
55
130
  try {
56
131
  const { default: NextAuthEdge } = await import('@/libs/next-auth/edge');
57
132
 
@@ -59,12 +134,21 @@ export const createLambdaContext = async (request: NextRequest): Promise<LambdaC
59
134
  if (session && session?.user?.id) {
60
135
  auth = session.user;
61
136
  userId = session.user.id;
137
+ log('NextAuth authentication successful, userId: %s', userId);
138
+ } else {
139
+ log('NextAuth authentication failed, no valid session');
62
140
  }
63
141
  return createContextInner({ authorizationHeader: authorization, nextAuth: auth, userId });
64
142
  } catch (e) {
143
+ log('NextAuth authentication error: %O', e);
65
144
  console.error('next auth err', e);
66
145
  }
67
146
  }
68
147
 
148
+ // Final return, userId may be undefined
149
+ log(
150
+ 'All authentication methods attempted, returning final context, userId: %s',
151
+ userId || 'not authenticated',
152
+ );
69
153
  return createContextInner({ authorizationHeader: authorization, userId });
70
154
  };
@@ -12,6 +12,7 @@ import { isDesktop } from '@/const/version';
12
12
 
13
13
  import { userAuth } from '../middleware/userAuth';
14
14
  import { trpc } from './init';
15
+ import { oidcAuth } from './middleware/oidcAuth';
15
16
 
16
17
  /**
17
18
  * Create a router
@@ -30,7 +31,7 @@ export const publicProcedure = trpc.procedure.use(({ next, ctx }) => {
30
31
  });
31
32
 
32
33
  // procedure that asserts that the user is logged in
33
- export const authedProcedure = trpc.procedure.use(userAuth);
34
+ export const authedProcedure = trpc.procedure.use(oidcAuth).use(userAuth);
34
35
 
35
36
  /**
36
37
  * Create a server-side caller
@@ -0,0 +1,14 @@
1
+ import { trpc } from '../init';
2
+
3
+ export const oidcAuth = trpc.middleware(async (opts) => {
4
+ const { ctx, next } = opts;
5
+
6
+ // 检查 OIDC 认证
7
+ if (ctx.oidcAuth) {
8
+ return next({
9
+ ctx: { oidcAuth: ctx.oidcAuth, userId: ctx.oidcAuth.sub },
10
+ });
11
+ }
12
+
13
+ return next();
14
+ });
@@ -26,9 +26,7 @@ export const userAuth = trpc.middleware(async (opts) => {
26
26
  }
27
27
 
28
28
  return opts.next({
29
- ctx: {
30
- // user value is known to be non-null now
31
- userId: ctx.userId,
32
- },
29
+ // ✅ user value is known to be non-null now
30
+ ctx: { userId: ctx.userId },
33
31
  });
34
32
  });
@@ -1,3 +1,4 @@
1
+ import { TRPCError } from '@trpc/server';
1
2
  import debug from 'debug';
2
3
 
3
4
  import { createContextForInteractionDetails } from '@/libs/oidc-provider/http-adapter';
@@ -19,6 +20,76 @@ export class OIDCService {
19
20
  return new OIDCService(provider);
20
21
  }
21
22
 
23
+ /**
24
+ * 验证 OIDC Bearer Token 并返回用户信息
25
+ * 使用 oidc-provider 实例的 AccessToken.find 方法验证 token
26
+ *
27
+ * @param token - Bearer Token
28
+ * @returns 包含用户ID和Token数据的对象
29
+ * @throws 如果token无效或OIDC未启用则抛出 TRPCError
30
+ */
31
+ async validateToken(token: string) {
32
+ try {
33
+ log('Validating access token using AccessToken.find');
34
+
35
+ // 使用 oidc-provider 的 AccessToken 查找和验证方法
36
+ const accessToken = await this.provider.AccessToken.find(token);
37
+
38
+ if (!accessToken) {
39
+ log('Access token not found, expired, or consumed');
40
+ throw new TRPCError({
41
+ code: 'UNAUTHORIZED',
42
+ message: 'Access token 无效、已过期或已被使用',
43
+ });
44
+ }
45
+
46
+ // 从 accessToken 实例中获取必要的数据
47
+ // 注意:accessToken 没有 payload() 方法,而是直接访问其属性
48
+ const userId = accessToken.accountId; // 用户 ID 通常存储在 accountId 属性中
49
+ const clientId = accessToken.clientId;
50
+
51
+ // 如果需要更多的声明信息,可以从 accessToken 的其他属性中获取
52
+ // 例如,scopes、claims、exp 等
53
+ const tokenData = {
54
+ client_id: clientId,
55
+ exp: accessToken.exp,
56
+ iat: accessToken.iat,
57
+ jti: accessToken.jti,
58
+ scope: accessToken.scope,
59
+ // OIDC 标准中,sub 字段表示用户 ID
60
+ sub: userId,
61
+ };
62
+
63
+ if (!userId) {
64
+ log('Access token does not contain user ID (accountId)');
65
+ throw new TRPCError({
66
+ code: 'UNAUTHORIZED',
67
+ message: 'Access token 中未包含用户 ID',
68
+ });
69
+ }
70
+
71
+ log('Access token validated successfully for user: %s', userId);
72
+ return {
73
+ // 包含 token 原始数据,可用于获取更多信息
74
+ accessToken,
75
+ // 构建的 token 数据对象
76
+ tokenData,
77
+ // 用户 ID
78
+ userId,
79
+ };
80
+ } catch (error) {
81
+ if (error instanceof TRPCError) throw error;
82
+
83
+ // AccessToken.find 可能抛出特定错误
84
+ log('Error validating access token with AccessToken.find: %O', error);
85
+ console.error('OIDC 令牌验证错误:', error);
86
+ throw new TRPCError({
87
+ code: 'UNAUTHORIZED',
88
+ message: `OIDC 认证失败: ${(error as Error).message}`,
89
+ });
90
+ }
91
+ }
92
+
22
93
  async getInteractionDetails(uid: string) {
23
94
  const { req, res } = await createContextForInteractionDetails(uid);
24
95
  return this.provider.interactionDetails(req, res);