@lobehub/lobehub 2.0.0-next.200 → 2.0.0-next.202

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (130) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/chat.json +2 -0
  4. package/locales/ar/models.json +64 -7
  5. package/locales/ar/plugin.json +2 -1
  6. package/locales/ar/providers.json +1 -0
  7. package/locales/bg-BG/chat.json +2 -0
  8. package/locales/bg-BG/models.json +49 -5
  9. package/locales/bg-BG/plugin.json +2 -1
  10. package/locales/bg-BG/providers.json +1 -0
  11. package/locales/de-DE/chat.json +2 -0
  12. package/locales/de-DE/models.json +36 -7
  13. package/locales/de-DE/plugin.json +2 -1
  14. package/locales/de-DE/providers.json +1 -0
  15. package/locales/en-US/chat.json +2 -0
  16. package/locales/en-US/models.json +10 -10
  17. package/locales/en-US/plugin.json +2 -1
  18. package/locales/en-US/providers.json +1 -0
  19. package/locales/es-ES/chat.json +2 -0
  20. package/locales/es-ES/models.json +106 -7
  21. package/locales/es-ES/plugin.json +2 -1
  22. package/locales/es-ES/providers.json +1 -0
  23. package/locales/fa-IR/chat.json +2 -0
  24. package/locales/fa-IR/models.json +83 -5
  25. package/locales/fa-IR/plugin.json +2 -1
  26. package/locales/fa-IR/providers.json +1 -0
  27. package/locales/fr-FR/chat.json +2 -0
  28. package/locales/fr-FR/models.json +38 -7
  29. package/locales/fr-FR/plugin.json +2 -1
  30. package/locales/fr-FR/providers.json +1 -0
  31. package/locales/it-IT/chat.json +2 -0
  32. package/locales/it-IT/models.json +40 -5
  33. package/locales/it-IT/plugin.json +2 -1
  34. package/locales/it-IT/providers.json +1 -0
  35. package/locales/ja-JP/chat.json +2 -0
  36. package/locales/ja-JP/models.json +84 -7
  37. package/locales/ja-JP/plugin.json +2 -1
  38. package/locales/ja-JP/providers.json +1 -0
  39. package/locales/ko-KR/chat.json +2 -0
  40. package/locales/ko-KR/models.json +65 -7
  41. package/locales/ko-KR/plugin.json +2 -1
  42. package/locales/ko-KR/providers.json +1 -0
  43. package/locales/nl-NL/chat.json +2 -0
  44. package/locales/nl-NL/models.json +62 -5
  45. package/locales/nl-NL/plugin.json +2 -1
  46. package/locales/nl-NL/providers.json +1 -0
  47. package/locales/pl-PL/chat.json +2 -0
  48. package/locales/pl-PL/models.json +85 -0
  49. package/locales/pl-PL/plugin.json +2 -1
  50. package/locales/pl-PL/providers.json +1 -0
  51. package/locales/pt-BR/chat.json +2 -0
  52. package/locales/pt-BR/models.json +37 -6
  53. package/locales/pt-BR/plugin.json +2 -1
  54. package/locales/pt-BR/providers.json +1 -0
  55. package/locales/ru-RU/chat.json +2 -0
  56. package/locales/ru-RU/models.json +36 -7
  57. package/locales/ru-RU/plugin.json +2 -1
  58. package/locales/ru-RU/providers.json +1 -0
  59. package/locales/tr-TR/chat.json +2 -0
  60. package/locales/tr-TR/models.json +28 -7
  61. package/locales/tr-TR/plugin.json +2 -1
  62. package/locales/tr-TR/providers.json +1 -0
  63. package/locales/vi-VN/chat.json +2 -0
  64. package/locales/vi-VN/models.json +62 -5
  65. package/locales/vi-VN/plugin.json +2 -1
  66. package/locales/vi-VN/providers.json +1 -0
  67. package/locales/zh-CN/chat.json +2 -0
  68. package/locales/zh-CN/models.json +87 -6
  69. package/locales/zh-CN/plugin.json +2 -1
  70. package/locales/zh-CN/providers.json +1 -0
  71. package/locales/zh-TW/chat.json +2 -0
  72. package/locales/zh-TW/models.json +71 -7
  73. package/locales/zh-TW/plugin.json +2 -1
  74. package/locales/zh-TW/providers.json +1 -0
  75. package/package.json +2 -2
  76. package/packages/builtin-tool-gtd/src/client/Inspector/ExecTask/index.tsx +30 -15
  77. package/packages/builtin-tool-gtd/src/manifest.ts +1 -1
  78. package/packages/model-runtime/src/core/ModelRuntime.test.ts +44 -86
  79. package/packages/types/src/aiChat.ts +0 -1
  80. package/packages/types/src/message/ui/chat.ts +1 -1
  81. package/src/app/(backend)/middleware/auth/index.ts +16 -2
  82. package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +30 -15
  83. package/src/app/(backend)/webapi/chat/[provider]/route.ts +44 -40
  84. package/src/app/(backend)/webapi/models/[provider]/pull/route.ts +4 -3
  85. package/src/app/(backend)/webapi/models/[provider]/route.test.ts +36 -13
  86. package/src/app/(backend)/webapi/models/[provider]/route.ts +4 -11
  87. package/src/app/[variants]/(desktop)/desktop-onboarding/index.tsx +8 -2
  88. package/src/features/Conversation/Messages/AssistantGroup/Tool/Render/index.tsx +21 -23
  89. package/src/features/Conversation/Messages/AssistantGroup/components/ContentBlock.tsx +16 -3
  90. package/src/features/Conversation/Messages/Task/TaskDetailPanel/index.tsx +17 -20
  91. package/src/features/Conversation/Messages/Tasks/shared/ErrorState.tsx +16 -11
  92. package/src/features/Conversation/Messages/Tasks/shared/InitializingState.tsx +6 -20
  93. package/src/features/Conversation/Messages/Tasks/shared/ProcessingState.tsx +10 -20
  94. package/src/features/User/DataStatistics.tsx +4 -4
  95. package/src/hooks/useQueryParam.ts +0 -2
  96. package/src/libs/trpc/async/asyncAuth.ts +0 -2
  97. package/src/libs/trpc/async/context.ts +3 -11
  98. package/src/locales/default/chat.ts +2 -0
  99. package/src/locales/default/plugin.ts +2 -1
  100. package/src/server/modules/AgentRuntime/RuntimeExecutors.ts +6 -6
  101. package/src/server/modules/AgentRuntime/__tests__/RuntimeExecutors.test.ts +3 -3
  102. package/src/server/modules/AgentRuntime/factory.ts +39 -20
  103. package/src/server/modules/ModelRuntime/index.ts +138 -1
  104. package/src/server/routers/async/__tests__/caller.test.ts +22 -27
  105. package/src/server/routers/async/caller.ts +4 -6
  106. package/src/server/routers/async/file.ts +10 -5
  107. package/src/server/routers/async/image.ts +5 -4
  108. package/src/server/routers/async/ragEval.ts +7 -5
  109. package/src/server/routers/lambda/__tests__/aiChat.test.ts +8 -37
  110. package/src/server/routers/lambda/aiChat.ts +5 -21
  111. package/src/server/routers/lambda/chunk.ts +9 -28
  112. package/src/server/routers/lambda/image.ts +1 -7
  113. package/src/server/routers/lambda/ragEval.ts +1 -1
  114. package/src/server/routers/lambda/userMemories/reembed.ts +4 -1
  115. package/src/server/routers/lambda/userMemories/search.ts +7 -7
  116. package/src/server/routers/lambda/userMemories/shared.ts +8 -10
  117. package/src/server/routers/lambda/userMemories/tools.ts +140 -118
  118. package/src/server/routers/lambda/userMemories.test.ts +3 -7
  119. package/src/server/routers/lambda/userMemories.ts +44 -29
  120. package/src/server/services/agentRuntime/AgentRuntimeService.test.ts +87 -0
  121. package/src/server/services/agentRuntime/AgentRuntimeService.ts +53 -2
  122. package/src/server/services/agentRuntime/__tests__/executeSync.test.ts +2 -6
  123. package/src/server/services/agentRuntime/__tests__/stepLifecycleCallbacks.test.ts +1 -1
  124. package/src/server/services/chunk/index.ts +6 -5
  125. package/src/server/services/toolExecution/types.ts +1 -2
  126. package/src/services/__tests__/_url.test.ts +0 -1
  127. package/src/services/_url.ts +0 -3
  128. package/src/services/aiChat.ts +5 -12
  129. package/src/store/chat/slices/aiChat/actions/streamingExecutor.ts +0 -2
  130. package/src/app/(backend)/webapi/text-to-image/[provider]/route.ts +0 -74
@@ -4,6 +4,7 @@ import {
4
4
  DEFAULT_USER_MEMORY_EMBEDDING_DIMENSIONS,
5
5
  DEFAULT_USER_MEMORY_EMBEDDING_MODEL_ITEM,
6
6
  } from '@lobechat/const';
7
+ import { type LobeChatDatabase } from '@lobechat/database';
7
8
  import {
8
9
  AddIdentityActionSchema,
9
10
  ContextMemoryItemSchema,
@@ -33,10 +34,9 @@ import {
33
34
  userMemoriesPreferences,
34
35
  } from '@/database/schemas';
35
36
  import { authedProcedure, router } from '@/libs/trpc/lambda';
36
- import { keyVaults, serverDatabase } from '@/libs/trpc/lambda/middleware';
37
+ import { serverDatabase } from '@/libs/trpc/lambda/middleware';
37
38
  import { getServerDefaultFilesConfig } from '@/server/globalConfig';
38
- import { initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
39
- import { type ClientSecretPayload } from '@/types/auth';
39
+ import { initModelRuntimeFromDB } from '@/server/modules/ModelRuntime';
40
40
 
41
41
  const EMPTY_SEARCH_RESULT: SearchMemoryResult = {
42
42
  contexts: [],
@@ -45,8 +45,9 @@ const EMPTY_SEARCH_RESULT: SearchMemoryResult = {
45
45
  };
46
46
 
47
47
  type MemorySearchContext = {
48
- jwtPayload: ClientSecretPayload;
49
48
  memoryModel: UserMemoryModel;
49
+ serverDB: LobeChatDatabase;
50
+ userId: string;
50
51
  };
51
52
 
52
53
  type MemorySearchResult = Awaited<ReturnType<UserMemoryModel['searchWithEmbedding']>>;
@@ -108,12 +109,12 @@ const searchUserMemories = async (
108
109
  ctx: MemorySearchContext,
109
110
  input: z.infer<typeof searchMemorySchema>,
110
111
  ): Promise<SearchMemoryResult> => {
111
- const agentRuntime = await initModelRuntimeWithUserPayload(ModelProvider.OpenAI, ctx.jwtPayload);
112
-
113
- const { model: embeddingModel } =
112
+ const { provider, model: embeddingModel } =
114
113
  getServerDefaultFilesConfig().embeddingModel || DEFAULT_USER_MEMORY_EMBEDDING_MODEL_ITEM;
114
+ // Read user's provider config from database
115
+ const modelRuntime = await initModelRuntimeFromDB(ctx.serverDB, ctx.userId, provider);
115
116
 
116
- const queryEmbeddings = await agentRuntime.embeddings({
117
+ const queryEmbeddings = await modelRuntime.embeddings({
117
118
  dimensions: DEFAULT_USER_MEMORY_EMBEDDING_DIMENSIONS,
118
119
  input: input.query,
119
120
  model: embeddingModel,
@@ -133,11 +134,10 @@ const searchUserMemories = async (
133
134
  return mapMemorySearchResult(layeredResults);
134
135
  };
135
136
 
136
- const getEmbeddingRuntime = async (jwtPayload: ClientSecretPayload) => {
137
- const agentRuntime = await initModelRuntimeWithUserPayload(
138
- ENABLE_BUSINESS_FEATURES ? BRANDING_PROVIDER : ModelProvider.OpenAI,
139
- jwtPayload,
140
- );
137
+ const getEmbeddingRuntime = async (serverDB: LobeChatDatabase, userId: string) => {
138
+ const provider = ENABLE_BUSINESS_FEATURES ? BRANDING_PROVIDER : ModelProvider.OpenAI;
139
+ // Read user's provider config from database
140
+ const agentRuntime = await initModelRuntimeFromDB(serverDB, userId, provider);
141
141
  const { model: embeddingModel } =
142
142
  getServerDefaultFilesConfig().embeddingModel || DEFAULT_USER_MEMORY_EMBEDDING_MODEL_ITEM;
143
143
 
@@ -197,17 +197,14 @@ const normalizeEmbeddable = (value?: string | null): string | undefined => {
197
197
  return trimmed.length > 0 ? trimmed : undefined;
198
198
  };
199
199
 
200
- const memoryProcedure = authedProcedure
201
- .use(serverDatabase)
202
- .use(keyVaults)
203
- .use(async (opts) => {
204
- const { ctx } = opts;
205
- return opts.next({
206
- ctx: {
207
- memoryModel: new UserMemoryModel(ctx.serverDB, ctx.userId),
208
- },
209
- });
200
+ const memoryProcedure = authedProcedure.use(serverDatabase).use(async (opts) => {
201
+ const { ctx } = opts;
202
+ return opts.next({
203
+ ctx: {
204
+ memoryModel: new UserMemoryModel(ctx.serverDB, ctx.userId),
205
+ },
210
206
  });
207
+ });
211
208
 
212
209
  export const userMemoriesRouter = router({
213
210
  getMemoryDetail: memoryProcedure
@@ -316,7 +313,10 @@ export const userMemoriesRouter = router({
316
313
  .mutation(async ({ ctx, input }) => {
317
314
  try {
318
315
  const options = input ?? {};
319
- const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(ctx.jwtPayload);
316
+ const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(
317
+ ctx.serverDB,
318
+ ctx.userId,
319
+ );
320
320
  const concurrency = options.concurrency ?? 10;
321
321
  const shouldProcess = (key: ReEmbedTableKey) =>
322
322
  !options.only || options.only.length === 0 || options.only.includes(key);
@@ -743,7 +743,10 @@ export const userMemoriesRouter = router({
743
743
  .input(ContextMemoryItemSchema)
744
744
  .mutation(async ({ input, ctx }) => {
745
745
  try {
746
- const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(ctx.jwtPayload);
746
+ const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(
747
+ ctx.serverDB,
748
+ ctx.userId,
749
+ );
747
750
  const embed = createEmbedder(agentRuntime, embeddingModel);
748
751
 
749
752
  const summaryEmbedding = await embed(input.summary);
@@ -795,7 +798,10 @@ export const userMemoriesRouter = router({
795
798
  .input(ExperienceMemoryItemSchema)
796
799
  .mutation(async ({ input, ctx }) => {
797
800
  try {
798
- const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(ctx.jwtPayload);
801
+ const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(
802
+ ctx.serverDB,
803
+ ctx.userId,
804
+ );
799
805
  const embed = createEmbedder(agentRuntime, embeddingModel);
800
806
 
801
807
  const summaryEmbedding = await embed(input.summary);
@@ -848,7 +854,10 @@ export const userMemoriesRouter = router({
848
854
  .input(AddIdentityActionSchema)
849
855
  .mutation(async ({ input, ctx }) => {
850
856
  try {
851
- const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(ctx.jwtPayload);
857
+ const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(
858
+ ctx.serverDB,
859
+ ctx.userId,
860
+ );
852
861
  const embed = createEmbedder(agentRuntime, embeddingModel);
853
862
 
854
863
  const summaryEmbedding = await embed(input.summary);
@@ -913,7 +922,10 @@ export const userMemoriesRouter = router({
913
922
  .input(PreferenceMemoryItemSchema)
914
923
  .mutation(async ({ input, ctx }) => {
915
924
  try {
916
- const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(ctx.jwtPayload);
925
+ const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(
926
+ ctx.serverDB,
927
+ ctx.userId,
928
+ );
917
929
  const embed = createEmbedder(agentRuntime, embeddingModel);
918
930
 
919
931
  const summaryEmbedding = await embed(input.summary);
@@ -1003,7 +1015,10 @@ export const userMemoriesRouter = router({
1003
1015
  .input(UpdateIdentityActionSchema)
1004
1016
  .mutation(async ({ input, ctx }) => {
1005
1017
  try {
1006
- const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(ctx.jwtPayload);
1018
+ const { agentRuntime, embeddingModel } = await getEmbeddingRuntime(
1019
+ ctx.serverDB,
1020
+ ctx.userId,
1021
+ );
1007
1022
  const embed = createEmbedder(agentRuntime, embeddingModel);
1008
1023
 
1009
1024
  let summaryVector1024: number[] | null | undefined;
@@ -364,6 +364,93 @@ describe('AgentRuntimeService', () => {
364
364
  });
365
365
  });
366
366
 
367
+ it('should call onComplete with error in finalState when execution fails', async () => {
368
+ const error = new Error('Runtime error');
369
+ const mockRuntime = { step: vi.fn().mockRejectedValue(error) };
370
+ vi.spyOn(service as any, 'createAgentRuntime').mockReturnValue({ runtime: mockRuntime });
371
+
372
+ // Register onComplete callback
373
+ const mockOnComplete = vi.fn();
374
+ service.registerStepCallbacks('test-operation-1', {
375
+ onComplete: mockOnComplete,
376
+ });
377
+
378
+ await expect(service.executeStep(mockParams)).rejects.toThrow('Runtime error');
379
+
380
+ // Verify onComplete is called with error in finalState as ChatMessageError
381
+ // ChatErrorType.InternalServerError = 500
382
+ expect(mockOnComplete).toHaveBeenCalledWith(
383
+ expect.objectContaining({
384
+ operationId: 'test-operation-1',
385
+ reason: 'error',
386
+ finalState: expect.objectContaining({
387
+ error: expect.objectContaining({
388
+ type: 500, // ChatErrorType.InternalServerError
389
+ message: 'Runtime error',
390
+ body: expect.objectContaining({ name: 'Error' }),
391
+ }),
392
+ }),
393
+ }),
394
+ );
395
+ });
396
+
397
+ it('should call onComplete with ChatCompletionErrorPayload in finalState', async () => {
398
+ // Simulate LLM error format: { errorType: 'InvalidProviderAPIKey', error: { ... } }
399
+ const llmError = {
400
+ errorType: 'InvalidProviderAPIKey',
401
+ error: { status: 401 },
402
+ provider: 'openai',
403
+ };
404
+ const mockRuntime = { step: vi.fn().mockRejectedValue(llmError) };
405
+ vi.spyOn(service as any, 'createAgentRuntime').mockReturnValue({ runtime: mockRuntime });
406
+
407
+ // Register onComplete callback
408
+ const mockOnComplete = vi.fn();
409
+ service.registerStepCallbacks('test-operation-1', {
410
+ onComplete: mockOnComplete,
411
+ });
412
+
413
+ await expect(service.executeStep(mockParams)).rejects.toEqual(llmError);
414
+
415
+ // Verify error is formatted correctly with type from errorType
416
+ expect(mockOnComplete).toHaveBeenCalledWith(
417
+ expect.objectContaining({
418
+ operationId: 'test-operation-1',
419
+ reason: 'error',
420
+ finalState: expect.objectContaining({
421
+ error: expect.objectContaining({
422
+ type: 'InvalidProviderAPIKey',
423
+ message: 'InvalidProviderAPIKey',
424
+ body: expect.objectContaining({ status: 401 }),
425
+ }),
426
+ }),
427
+ }),
428
+ );
429
+ });
430
+
431
+ it('should save error state to coordinator for later retrieval (inMemory mode fix)', async () => {
432
+ const error = new Error('Test error for inMemory mode');
433
+ const mockRuntime = { step: vi.fn().mockRejectedValue(error) };
434
+ vi.spyOn(service as any, 'createAgentRuntime').mockReturnValue({ runtime: mockRuntime });
435
+
436
+ // Spy on coordinator.saveAgentState to verify it's called with error state
437
+ const saveStateSpy = vi.spyOn((service as any).coordinator, 'saveAgentState');
438
+
439
+ await expect(service.executeStep(mockParams)).rejects.toThrow('Test error for inMemory mode');
440
+
441
+ // Verify saveAgentState is called with error state before onComplete
442
+ expect(saveStateSpy).toHaveBeenCalledWith(
443
+ 'test-operation-1',
444
+ expect.objectContaining({
445
+ error: expect.objectContaining({
446
+ type: 500, // ChatErrorType.InternalServerError
447
+ message: 'Test error for inMemory mode',
448
+ }),
449
+ status: 'error',
450
+ }),
451
+ );
452
+ });
453
+
367
454
  it('should handle human intervention', async () => {
368
455
  const paramsWithIntervention = {
369
456
  ...mockParams,
@@ -4,6 +4,7 @@ import {
4
4
  type AgentState,
5
5
  GeneralChatAgent,
6
6
  } from '@lobechat/agent-runtime';
7
+ import { AgentRuntimeErrorType, ChatErrorType, type ChatMessageError } from '@lobechat/types';
7
8
  import debug from 'debug';
8
9
  import urlJoin from 'url-join';
9
10
 
@@ -41,6 +42,43 @@ import type {
41
42
 
42
43
  const log = debug('lobe-server:agent-runtime-service');
43
44
 
45
+ /**
46
+ * Formats an error into ChatMessageError structure
47
+ * Handles various error formats from LLM execution and other sources
48
+ */
49
+ function formatErrorForState(error: unknown): ChatMessageError {
50
+ // Handle ChatCompletionErrorPayload format from LLM errors
51
+ // e.g., { errorType: 'InvalidProviderAPIKey', error: { ... }, provider: 'openai' }
52
+ if (error && typeof error === 'object' && 'errorType' in error) {
53
+ const payload = error as {
54
+ error?: unknown;
55
+ errorType: ChatMessageError['type'];
56
+ message?: string;
57
+ };
58
+ return {
59
+ body: payload.error || error,
60
+ message: payload.message || String(payload.errorType),
61
+ type: payload.errorType,
62
+ };
63
+ }
64
+
65
+ // Handle standard Error objects
66
+ if (error instanceof Error) {
67
+ return {
68
+ body: { name: error.name },
69
+ message: error.message,
70
+ type: ChatErrorType.InternalServerError,
71
+ };
72
+ }
73
+
74
+ // Fallback for unknown error types
75
+ return {
76
+ body: error,
77
+ message: String(error),
78
+ type: AgentRuntimeErrorType.AgentRuntimeError,
79
+ };
80
+ }
81
+
44
82
  export interface AgentRuntimeServiceOptions {
45
83
  /**
46
84
  * Coordinator configuration options
@@ -92,6 +130,7 @@ export class AgentRuntimeService {
92
130
 
93
131
  return urlJoin(baseUrl, '/api/agent');
94
132
  }
133
+ private serverDB: LobeChatDatabase;
95
134
  private userId: string;
96
135
  private messageModel: MessageModel;
97
136
 
@@ -107,6 +146,7 @@ export class AgentRuntimeService {
107
146
  });
108
147
  this.queueService =
109
148
  options?.queueService === null ? null : (options?.queueService ?? new QueueService());
149
+ this.serverDB = db;
110
150
  this.userId = userId;
111
151
  this.messageModel = new MessageModel(db, this.userId);
112
152
 
@@ -431,12 +471,22 @@ export class AgentRuntimeService {
431
471
  type: 'error',
432
472
  });
433
473
 
474
+ // Build and save error state so it's persisted for later retrieval
475
+ const errorState = await this.coordinator.loadAgentState(operationId);
476
+ const finalStateWithError = {
477
+ ...errorState!,
478
+ error: formatErrorForState(error),
479
+ status: 'error' as const,
480
+ };
481
+
482
+ // Save the error state to coordinator so getOperationStatus can retrieve it
483
+ await this.coordinator.saveAgentState(operationId, finalStateWithError);
484
+
434
485
  // Also call onComplete callback when execution fails
435
486
  if (callbacks?.onComplete) {
436
487
  try {
437
- const errorState = await this.coordinator.loadAgentState(operationId);
438
488
  await callbacks.onComplete({
439
- finalState: errorState!,
489
+ finalState: finalStateWithError,
440
490
  operationId,
441
491
  reason: 'error',
442
492
  });
@@ -794,6 +844,7 @@ export class AgentRuntimeService {
794
844
  const executorContext: RuntimeExecutorContext = {
795
845
  messageModel: this.messageModel,
796
846
  operationId,
847
+ serverDB: this.serverDB,
797
848
  stepIndex,
798
849
  streamManager: this.streamManager,
799
850
  toolExecutionService: this.toolExecutionService,
@@ -20,7 +20,7 @@ vi.mock('@/database/models/message', () => ({
20
20
  // Mock ModelRuntime
21
21
  vi.mock('@/server/modules/ModelRuntime', () => ({
22
22
  initializeRuntimeOptions: vi.fn(),
23
- initModelRuntimeWithUserPayload: vi.fn().mockReturnValue({
23
+ initModelRuntimeFromDB: vi.fn().mockResolvedValue({
24
24
  chat: vi.fn(),
25
25
  }),
26
26
  ApiKeyManager: vi.fn().mockImplementation(() => ({
@@ -269,11 +269,7 @@ describe('AgentRuntimeService.executeSync', () => {
269
269
  await streamEventManager.publishAgentRuntimeEnd(operationId, 1, { status: 'done' });
270
270
  }, 10);
271
271
 
272
- const event = await streamEventManager.waitForEvent(
273
- operationId,
274
- 'agent_runtime_end',
275
- 1000,
276
- );
272
+ const event = await streamEventManager.waitForEvent(operationId, 'agent_runtime_end', 1000);
277
273
 
278
274
  expect(event.type).toBe('agent_runtime_end');
279
275
  });
@@ -25,7 +25,7 @@ vi.mock('@/server/modules/ModelRuntime', () => ({
25
25
  getApiKey: vi.fn(),
26
26
  })),
27
27
  initializeRuntimeOptions: vi.fn(),
28
- initModelRuntimeWithUserPayload: vi.fn().mockReturnValue({
28
+ initModelRuntimeFromDB: vi.fn().mockResolvedValue({
29
29
  chat: vi.fn(),
30
30
  }),
31
31
  }));
@@ -1,5 +1,4 @@
1
1
  import { type LobeChatDatabase } from '@lobechat/database';
2
- import { type ClientSecretPayload } from '@lobechat/types';
3
2
 
4
3
  import { AsyncTaskModel } from '@/database/models/asyncTask';
5
4
  import { FileModel } from '@/database/models/file';
@@ -31,7 +30,7 @@ export class ChunkService {
31
30
  return this.chunkClient.chunkContent(params);
32
31
  }
33
32
 
34
- async asyncEmbeddingFileChunks(fileId: string, payload: ClientSecretPayload) {
33
+ async asyncEmbeddingFileChunks(fileId: string) {
35
34
  const result = await this.fileModel.findById(fileId);
36
35
 
37
36
  if (!result) return;
@@ -44,7 +43,8 @@ export class ChunkService {
44
43
 
45
44
  await this.fileModel.update(fileId, { embeddingTaskId: asyncTaskId });
46
45
 
47
- const asyncCaller = await createAsyncCaller({ jwtPayload: payload, userId: this.userId });
46
+ // Async router will read keyVaults from DB, no need to pass jwtPayload
47
+ const asyncCaller = await createAsyncCaller({ userId: this.userId });
48
48
 
49
49
  // trigger embedding task asynchronously
50
50
  try {
@@ -67,7 +67,7 @@ export class ChunkService {
67
67
  /**
68
68
  * parse file to chunks with async task
69
69
  */
70
- async asyncParseFileToChunks(fileId: string, payload: ClientSecretPayload, skipExist?: boolean) {
70
+ async asyncParseFileToChunks(fileId: string, skipExist?: boolean) {
71
71
  const result = await this.fileModel.findById(fileId);
72
72
 
73
73
  if (!result) return;
@@ -83,7 +83,8 @@ export class ChunkService {
83
83
 
84
84
  await this.fileModel.update(fileId, { chunkTaskId: asyncTaskId });
85
85
 
86
- const asyncCaller = await createAsyncCaller({ jwtPayload: payload, userId: this.userId });
86
+ // Async router will read keyVaults from DB, no need to pass jwtPayload
87
+ const asyncCaller = await createAsyncCaller({ userId: this.userId });
87
88
 
88
89
  // trigger parse file task asynchronously
89
90
  asyncCaller.file.parseFileToChunks({ fileId: fileId, taskId: asyncTaskId }).catch(async (e) => {
@@ -1,10 +1,9 @@
1
1
  import { type LobeToolManifest } from '@lobechat/context-engine';
2
- import { type ChatToolPayload, type ClientSecretPayload } from '@lobechat/types';
2
+ import { type ChatToolPayload } from '@lobechat/types';
3
3
 
4
4
  export interface ToolExecutionContext {
5
5
  toolManifestMap: Record<string, LobeToolManifest>;
6
6
  userId?: string;
7
- userPayload?: ClientSecretPayload;
8
7
  }
9
8
 
10
9
  export interface ToolExecutionResult {
@@ -17,7 +17,6 @@ describe('API_ENDPOINTS', () => {
17
17
  expect(API_ENDPOINTS.chat('openai')).toBe('/webapi/chat/openai');
18
18
  expect(API_ENDPOINTS.models('anthropic')).toBe('/webapi/models/anthropic');
19
19
  expect(API_ENDPOINTS.modelPull('azure')).toBe('/webapi/models/azure/pull');
20
- expect(API_ENDPOINTS.images('dalle')).toBe('/webapi/text-to-image/dalle');
21
20
  expect(API_ENDPOINTS.tts('openai')).toBe('/webapi/tts/openai');
22
21
  });
23
22
  });
@@ -21,9 +21,6 @@ export const API_ENDPOINTS = {
21
21
  modelPull: (provider: string) =>
22
22
  withElectronProtocolIfElectron(`/webapi/models/${provider}/pull`),
23
23
 
24
- // image
25
- images: (provider: string) => withElectronProtocolIfElectron(`/webapi/text-to-image/${provider}`),
26
-
27
24
  // STT
28
25
  stt: withElectronProtocolIfElectron('/webapi/stt/openai'),
29
26
 
@@ -2,7 +2,6 @@ import { type SendMessageServerParams, type StructureOutputParams } from '@lobec
2
2
  import { cleanObject } from '@lobechat/utils';
3
3
 
4
4
  import { lambdaClient } from '@/libs/trpc/client';
5
- import { createXorKeyVaultsPayload } from '@/services/_auth';
6
5
 
7
6
  class AiChatService {
8
7
  sendMessageInServer = async (
@@ -15,17 +14,11 @@ class AiChatService {
15
14
  });
16
15
  };
17
16
 
18
- generateJSON = async (
19
- params: Omit<StructureOutputParams, 'keyVaultsPayload'>,
20
- abortController: AbortController,
21
- ) => {
22
- return lambdaClient.aiChat.outputJSON.mutate(
23
- { ...params, keyVaultsPayload: createXorKeyVaultsPayload(params.provider) },
24
- {
25
- context: { showNotification: false },
26
- signal: abortController?.signal,
27
- },
28
- );
17
+ generateJSON = async (params: StructureOutputParams, abortController: AbortController) => {
18
+ return lambdaClient.aiChat.outputJSON.mutate(params, {
19
+ context: { showNotification: false },
20
+ signal: abortController?.signal,
21
+ });
29
22
  };
30
23
 
31
24
  // sendGroupMessageInServer = async (params: SendMessageServerParams) => {
@@ -164,8 +164,6 @@ export const streamingExecutor: StateCreator<
164
164
  const operation = operationId ? get().operations[operationId] : undefined;
165
165
  const scope = operation?.context.scope;
166
166
 
167
- console.log('[internal_createAgentState] Operation scope:', { operationId, scope });
168
-
169
167
  // Resolve agent config with builtin agent runtime config merged
170
168
  // This ensures runtime plugins (e.g., 'lobe-agent-builder' for Agent Builder) are included
171
169
  const { agentConfig: agentConfigData, plugins: pluginIds } = resolveAgentConfig({
@@ -1,74 +0,0 @@
1
- import { type ChatCompletionErrorPayload, type TextToImagePayload } from '@lobechat/model-runtime';
2
- import { ChatErrorType } from '@lobechat/types';
3
- import { NextResponse } from 'next/server';
4
-
5
- import { checkAuth } from '@/app/(backend)/middleware/auth';
6
- import { initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
7
- import { createErrorResponse } from '@/utils/errorResponse';
8
-
9
- export const preferredRegion = [
10
- 'arn1',
11
- 'bom1',
12
- 'cdg1',
13
- 'cle1',
14
- 'cpt1',
15
- 'dub1',
16
- 'fra1',
17
- 'gru1',
18
- 'hnd1',
19
- 'iad1',
20
- 'icn1',
21
- 'kix1',
22
- 'lhr1',
23
- 'pdx1',
24
- 'sfo1',
25
- 'sin1',
26
- 'syd1',
27
- ];
28
-
29
- // return NextResponse.json(
30
- // {
31
- // body: {
32
- // endpoint: 'https://ai****ix.com/v1',
33
- // error: {
34
- // code: 'content_policy_violation',
35
- // message:
36
- // 'Your request was rejected as a result of our safety system. Image descriptions generated from your prompt may contain text that is not allowed by our safety system. If you believe this was done in error, your request may succeed if retried, or by adjusting your prompt.',
37
- // param: null,
38
- // type: 'invalid_request_error',
39
- // },
40
- // provider: 'openai',
41
- // },
42
- // errorType: 'OpenAIBizError',
43
- // },
44
- // { status: 400 },
45
- // );
46
-
47
- export const POST = checkAuth(async (req: Request, { params, jwtPayload }) => {
48
- const provider = (await params)!.provider!;
49
-
50
- try {
51
- // ============ 1. init chat model ============ //
52
- const agentRuntime = await initModelRuntimeWithUserPayload(provider, jwtPayload);
53
-
54
- // ============ 2. create chat completion ============ //
55
-
56
- const data = (await req.json()) as TextToImagePayload;
57
-
58
- const images = await agentRuntime.textToImage(data);
59
-
60
- return NextResponse.json(images);
61
- } catch (e) {
62
- const {
63
- errorType = ChatErrorType.InternalServerError,
64
- error: errorContent,
65
- ...res
66
- } = e as ChatCompletionErrorPayload;
67
-
68
- const error = errorContent || e;
69
- // track the error at server side
70
- console.error(`Route: [${provider}] ${errorType}:`, error);
71
-
72
- return createErrorResponse(errorType, { error, ...res, provider });
73
- }
74
- });