@lobehub/chat 1.61.6 → 1.62.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml +8 -0
  2. package/.github/ISSUE_TEMPLATE/config.yml +4 -1
  3. package/CHANGELOG.md +58 -0
  4. package/changelog/v1.json +21 -0
  5. package/locales/ar/components.json +1 -0
  6. package/locales/bg-BG/components.json +1 -0
  7. package/locales/de-DE/components.json +1 -0
  8. package/locales/en-US/components.json +4 -3
  9. package/locales/es-ES/components.json +1 -0
  10. package/locales/fa-IR/components.json +1 -0
  11. package/locales/fr-FR/components.json +1 -0
  12. package/locales/it-IT/components.json +1 -0
  13. package/locales/ja-JP/components.json +1 -0
  14. package/locales/ko-KR/components.json +1 -0
  15. package/locales/nl-NL/components.json +1 -0
  16. package/locales/pl-PL/components.json +1 -0
  17. package/locales/pt-BR/components.json +1 -0
  18. package/locales/ru-RU/components.json +1 -0
  19. package/locales/tr-TR/components.json +1 -0
  20. package/locales/vi-VN/components.json +1 -0
  21. package/locales/zh-CN/components.json +2 -1
  22. package/locales/zh-TW/components.json +1 -0
  23. package/package.json +2 -2
  24. package/src/components/ModelSelect/index.tsx +24 -2
  25. package/src/components/Thinking/index.tsx +7 -2
  26. package/src/config/aiModels/jina.ts +7 -5
  27. package/src/config/aiModels/perplexity.ts +8 -0
  28. package/src/config/llm.ts +8 -0
  29. package/src/config/modelProviders/sambanova.ts +4 -1
  30. package/src/database/client/migrations.json +12 -8
  31. package/src/database/migrations/0015_add_message_search_metadata.sql +2 -0
  32. package/src/database/migrations/meta/0015_snapshot.json +3616 -0
  33. package/src/database/migrations/meta/_journal.json +7 -0
  34. package/src/database/schemas/message.ts +3 -1
  35. package/src/database/server/models/message.ts +2 -0
  36. package/src/features/Conversation/components/ChatItem/index.tsx +10 -1
  37. package/src/features/Conversation/components/MarkdownElements/Thinking/Render.tsx +5 -1
  38. package/src/features/Conversation/components/MarkdownElements/remarkPlugins/createRemarkCustomTagPlugin.ts +1 -0
  39. package/src/features/Conversation/components/MarkdownElements/remarkPlugins/getNodeContent.test.ts +107 -0
  40. package/src/features/Conversation/components/MarkdownElements/remarkPlugins/getNodeContent.ts +6 -0
  41. package/src/libs/agent-runtime/perplexity/index.test.ts +156 -12
  42. package/src/libs/agent-runtime/utils/streams/anthropic.ts +3 -3
  43. package/src/libs/agent-runtime/utils/streams/bedrock/claude.ts +6 -2
  44. package/src/libs/agent-runtime/utils/streams/bedrock/llama.ts +3 -3
  45. package/src/libs/agent-runtime/utils/streams/google-ai.ts +3 -3
  46. package/src/libs/agent-runtime/utils/streams/ollama.ts +3 -3
  47. package/src/libs/agent-runtime/utils/streams/openai.ts +26 -8
  48. package/src/libs/agent-runtime/utils/streams/protocol.ts +33 -8
  49. package/src/libs/agent-runtime/utils/streams/vertex-ai.ts +3 -3
  50. package/src/locales/default/components.ts +1 -0
  51. package/src/server/services/nextAuthUser/index.test.ts +109 -0
  52. package/src/services/user/client.test.ts +10 -0
  53. package/src/services/user/server.test.ts +149 -0
  54. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +17 -6
  55. package/src/store/chat/slices/message/action.ts +12 -7
  56. package/src/types/aiModel.ts +5 -0
  57. package/src/types/message/base.ts +13 -0
  58. package/src/types/message/chat.ts +3 -2
  59. package/src/utils/fetch/fetchSSE.ts +17 -1
@@ -2,8 +2,16 @@ import { ChatStreamCallbacks } from '@/libs/agent-runtime';
2
2
 
3
3
  import { AgentRuntimeErrorType } from '../../error';
4
4
 
5
- export interface StreamStack {
5
+ /**
6
+ * context in the stream to save temporarily data
7
+ */
8
+ export interface StreamContext {
6
9
  id: string;
10
+ /**
11
+ * As pplx citations is in every chunk, but we only need to return it once
12
+ * this flag is used to check if the pplx citation is returned,and then not return it again
13
+ */
14
+ returnedPplxCitation?: boolean;
7
15
  tool?: {
8
16
  id: string;
9
17
  index: number;
@@ -15,7 +23,20 @@ export interface StreamStack {
15
23
  export interface StreamProtocolChunk {
16
24
  data: any;
17
25
  id?: string;
18
- type: 'text' | 'tool_calls' | 'data' | 'stop' | 'error' | 'reasoning';
26
+ type: // pure text
27
+ | 'text'
28
+ // Tools use
29
+ | 'tool_calls'
30
+ // Model Thinking
31
+ | 'reasoning'
32
+ // Search or Grounding
33
+ | 'citations'
34
+ // stop signal
35
+ | 'stop'
36
+ // Error
37
+ | 'error'
38
+ // unknown data result
39
+ | 'data';
19
40
  }
20
41
 
21
42
  export interface StreamToolCallChunkData {
@@ -85,16 +106,20 @@ export const convertIterableToStream = <T>(stream: AsyncIterable<T>) => {
85
106
  * Create a transformer to convert the response into an SSE format
86
107
  */
87
108
  export const createSSEProtocolTransformer = (
88
- transformer: (chunk: any, stack: StreamStack) => StreamProtocolChunk,
89
- streamStack?: StreamStack,
109
+ transformer: (chunk: any, stack: StreamContext) => StreamProtocolChunk | StreamProtocolChunk[],
110
+ streamStack?: StreamContext,
90
111
  ) =>
91
112
  new TransformStream({
92
113
  transform: (chunk, controller) => {
93
- const { type, id, data } = transformer(chunk, streamStack || { id: '' });
114
+ const result = transformer(chunk, streamStack || { id: '' });
115
+
116
+ const buffers = Array.isArray(result) ? result : [result];
94
117
 
95
- controller.enqueue(`id: ${id}\n`);
96
- controller.enqueue(`event: ${type}\n`);
97
- controller.enqueue(`data: ${JSON.stringify(data)}\n\n`);
118
+ buffers.forEach(({ type, id, data }) => {
119
+ controller.enqueue(`id: ${id}\n`);
120
+ controller.enqueue(`event: ${type}\n`);
121
+ controller.enqueue(`data: ${JSON.stringify(data)}\n\n`);
122
+ });
98
123
  },
99
124
  });
100
125
 
@@ -4,8 +4,8 @@ import { nanoid } from '@/utils/uuid';
4
4
 
5
5
  import { ChatStreamCallbacks } from '../../types';
6
6
  import {
7
+ StreamContext,
7
8
  StreamProtocolChunk,
8
- StreamStack,
9
9
  createCallbacksTransformer,
10
10
  createSSEProtocolTransformer,
11
11
  generateToolCallId,
@@ -13,7 +13,7 @@ import {
13
13
 
14
14
  const transformVertexAIStream = (
15
15
  chunk: GenerateContentResponse,
16
- stack: StreamStack,
16
+ stack: StreamContext,
17
17
  ): StreamProtocolChunk => {
18
18
  // maybe need another structure to add support for multiple choices
19
19
  const candidates = chunk.candidates;
@@ -67,7 +67,7 @@ export const VertexAIStream = (
67
67
  rawStream: ReadableStream<EnhancedGenerateContentResponse>,
68
68
  callbacks?: ChatStreamCallbacks,
69
69
  ) => {
70
- const streamStack: StreamStack = { id: 'chat_' + nanoid() };
70
+ const streamStack: StreamContext = { id: 'chat_' + nanoid() };
71
71
 
72
72
  return rawStream
73
73
  .pipeThrough(createSSEProtocolTransformer(transformVertexAIStream, streamStack))
@@ -79,6 +79,7 @@ export default {
79
79
  file: '该模型支持上传文件读取与识别',
80
80
  functionCall: '该模型支持函数调用(Function Call)',
81
81
  reasoning: '该模型支持深度思考',
82
+ search: '该模型支持联网搜索',
82
83
  tokens: '该模型单个会话最多支持 {{tokens}} Tokens',
83
84
  vision: '该模型支持视觉识别',
84
85
  },
@@ -0,0 +1,109 @@
1
+ // @vitest-environment node
2
+ import { NextResponse } from 'next/server';
3
+ import { beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import { UserItem } from '@/database/schemas';
6
+ import { serverDB } from '@/database/server';
7
+ import { UserModel } from '@/database/server/models/user';
8
+ import { pino } from '@/libs/logger';
9
+ import { LobeNextAuthDbAdapter } from '@/libs/next-auth/adapter';
10
+
11
+ import { NextAuthUserService } from './index';
12
+
13
+ vi.mock('@/libs/logger', () => ({
14
+ pino: {
15
+ info: vi.fn(),
16
+ warn: vi.fn(),
17
+ },
18
+ }));
19
+
20
+ vi.mock('@/database/server/models/user');
21
+ vi.mock('@/database/server');
22
+
23
+ describe('NextAuthUserService', () => {
24
+ let service: NextAuthUserService;
25
+
26
+ beforeEach(() => {
27
+ vi.clearAllMocks();
28
+ service = new NextAuthUserService();
29
+ });
30
+
31
+ describe('safeUpdateUser', () => {
32
+ const mockUser = {
33
+ id: 'user-123',
34
+ email: 'test@example.com',
35
+ };
36
+
37
+ const mockAccount = {
38
+ provider: 'github',
39
+ providerAccountId: '12345',
40
+ };
41
+
42
+ const mockUpdateData: Partial<UserItem> = {
43
+ avatar: 'https://example.com/avatar.jpg',
44
+ email: 'new@example.com',
45
+ fullName: 'Test User',
46
+ };
47
+
48
+ it('should update user when user is found', async () => {
49
+ const mockUserModel = {
50
+ updateUser: vi.fn().mockResolvedValue({}),
51
+ };
52
+
53
+ vi.mocked(UserModel).mockImplementation(() => mockUserModel as any);
54
+
55
+ // Mock the adapter directly on the service instance
56
+ service.adapter = {
57
+ getUserByAccount: vi.fn().mockResolvedValue(mockUser),
58
+ };
59
+
60
+ const response = await service.safeUpdateUser(mockAccount, mockUpdateData);
61
+
62
+ expect(pino.info).toHaveBeenCalledWith(
63
+ `updating user "${JSON.stringify(mockAccount)}" due to webhook`,
64
+ );
65
+
66
+ expect(service.adapter.getUserByAccount).toHaveBeenCalledWith(mockAccount);
67
+ expect(UserModel).toHaveBeenCalledWith(serverDB, mockUser.id);
68
+ expect(mockUserModel.updateUser).toHaveBeenCalledWith(mockUpdateData);
69
+
70
+ expect(response).toBeInstanceOf(NextResponse);
71
+ expect(response.status).toBe(200);
72
+ const data = await response.json();
73
+ expect(data).toEqual({ message: 'user updated', success: true });
74
+ });
75
+
76
+ it('should handle case when user is not found', async () => {
77
+ // Mock the adapter directly on the service instance
78
+ service.adapter = {
79
+ getUserByAccount: vi.fn().mockResolvedValue(null),
80
+ };
81
+
82
+ const response = await service.safeUpdateUser(mockAccount, mockUpdateData);
83
+
84
+ expect(pino.warn).toHaveBeenCalledWith(
85
+ `[${mockAccount.provider}]: Webhooks handler user "${JSON.stringify(mockAccount)}" update for "${JSON.stringify(mockUpdateData)}", but no user was found by the providerAccountId.`,
86
+ );
87
+
88
+ expect(UserModel).not.toHaveBeenCalled();
89
+
90
+ expect(response).toBeInstanceOf(NextResponse);
91
+ expect(response.status).toBe(200);
92
+ const data = await response.json();
93
+ expect(data).toEqual({ message: 'user updated', success: true });
94
+ });
95
+
96
+ it('should handle errors during user update', async () => {
97
+ const mockError = new Error('Database error');
98
+
99
+ // Mock the adapter directly on the service instance
100
+ service.adapter = {
101
+ getUserByAccount: vi.fn().mockRejectedValue(mockError),
102
+ };
103
+
104
+ await expect(service.safeUpdateUser(mockAccount, mockUpdateData)).rejects.toThrow(mockError);
105
+
106
+ expect(UserModel).not.toHaveBeenCalled();
107
+ });
108
+ });
109
+ });
@@ -95,4 +95,14 @@ describe('ClientService', () => {
95
95
  expect(spyOn).toHaveBeenCalledWith(newPreference);
96
96
  expect(spyOn).toHaveBeenCalledTimes(1);
97
97
  });
98
+
99
+ it('should return empty array for getUserSSOProviders', async () => {
100
+ const providers = await clientService.getUserSSOProviders();
101
+ expect(providers).toEqual([]);
102
+ });
103
+
104
+ it('should do nothing when unlinkSSOProvider is called', async () => {
105
+ const result = await clientService.unlinkSSOProvider('google', '123');
106
+ expect(result).toBeUndefined();
107
+ });
98
108
  });
@@ -0,0 +1,149 @@
1
+ import { DeepPartial } from 'utility-types';
2
+ import { describe, expect, it, vi } from 'vitest';
3
+
4
+ import { lambdaClient } from '@/libs/trpc/client';
5
+ import { UserInitializationState, UserPreference } from '@/types/user';
6
+ import { UserSettings } from '@/types/user/settings';
7
+
8
+ import { ServerService } from './server';
9
+
10
+ vi.mock('@/libs/trpc/client', () => ({
11
+ lambdaClient: {
12
+ user: {
13
+ getUserRegistrationDuration: {
14
+ query: vi.fn(),
15
+ },
16
+ getUserState: {
17
+ query: vi.fn(),
18
+ },
19
+ getUserSSOProviders: {
20
+ query: vi.fn(),
21
+ },
22
+ unlinkSSOProvider: {
23
+ mutate: vi.fn(),
24
+ },
25
+ makeUserOnboarded: {
26
+ mutate: vi.fn(),
27
+ },
28
+ updatePreference: {
29
+ mutate: vi.fn(),
30
+ },
31
+ updateGuide: {
32
+ mutate: vi.fn(),
33
+ },
34
+ updateSettings: {
35
+ mutate: vi.fn(),
36
+ },
37
+ resetSettings: {
38
+ mutate: vi.fn(),
39
+ },
40
+ },
41
+ },
42
+ }));
43
+
44
+ describe('ServerService', () => {
45
+ const service = new ServerService();
46
+
47
+ it('should get user registration duration', async () => {
48
+ const mockData = {
49
+ createdAt: '2023-01-01',
50
+ duration: 100,
51
+ updatedAt: '2023-01-02',
52
+ };
53
+ vi.mocked(lambdaClient.user.getUserRegistrationDuration.query).mockResolvedValue(mockData);
54
+
55
+ const result = await service.getUserRegistrationDuration();
56
+ expect(result).toEqual(mockData);
57
+ });
58
+
59
+ it('should get user state', async () => {
60
+ const mockState: UserInitializationState = {
61
+ isOnboard: true,
62
+ preference: {
63
+ telemetry: true,
64
+ },
65
+ settings: {},
66
+ };
67
+ vi.mocked(lambdaClient.user.getUserState.query).mockResolvedValue(mockState);
68
+
69
+ const result = await service.getUserState();
70
+ expect(result).toEqual(mockState);
71
+ });
72
+
73
+ it('should get user SSO providers', async () => {
74
+ const mockProviders = [
75
+ {
76
+ provider: 'google',
77
+ providerAccountId: '123',
78
+ userId: 'user1',
79
+ type: 'oauth' as const,
80
+ access_token: 'token',
81
+ token_type: 'bearer' as const,
82
+ expires_at: 123,
83
+ scope: 'email profile',
84
+ },
85
+ ];
86
+ vi.mocked(lambdaClient.user.getUserSSOProviders.query).mockResolvedValue(mockProviders);
87
+
88
+ const result = await service.getUserSSOProviders();
89
+ expect(result).toEqual(mockProviders);
90
+ });
91
+
92
+ it('should unlink SSO provider', async () => {
93
+ const provider = 'google';
94
+ const providerAccountId = '123';
95
+ await service.unlinkSSOProvider(provider, providerAccountId);
96
+
97
+ expect(lambdaClient.user.unlinkSSOProvider.mutate).toHaveBeenCalledWith({
98
+ provider,
99
+ providerAccountId,
100
+ });
101
+ });
102
+
103
+ it('should make user onboarded', async () => {
104
+ await service.makeUserOnboarded();
105
+ expect(lambdaClient.user.makeUserOnboarded.mutate).toHaveBeenCalled();
106
+ });
107
+
108
+ it('should update user preference', async () => {
109
+ const preference: Partial<UserPreference> = {
110
+ telemetry: true,
111
+ useCmdEnterToSend: true,
112
+ };
113
+ await service.updatePreference(preference);
114
+ expect(lambdaClient.user.updatePreference.mutate).toHaveBeenCalledWith(preference);
115
+ });
116
+
117
+ it('should update user guide', async () => {
118
+ const guide = {
119
+ moveSettingsToAvatar: true,
120
+ topic: false,
121
+ uploadFileInKnowledgeBase: true,
122
+ };
123
+ await service.updateGuide(guide);
124
+ expect(lambdaClient.user.updateGuide.mutate).toHaveBeenCalledWith(guide);
125
+ });
126
+
127
+ it('should update user settings', async () => {
128
+ const settings: DeepPartial<UserSettings> = {
129
+ defaultAgent: {
130
+ config: {
131
+ model: 'gpt-4',
132
+ provider: 'openai',
133
+ },
134
+ meta: {
135
+ avatar: 'avatar',
136
+ description: 'test agent',
137
+ },
138
+ },
139
+ };
140
+ const signal = new AbortController().signal;
141
+ await service.updateUserSettings(settings, signal);
142
+ expect(lambdaClient.user.updateSettings.mutate).toHaveBeenCalledWith(settings, { signal });
143
+ });
144
+
145
+ it('should reset user settings', async () => {
146
+ await service.resetUserSettings();
147
+ expect(lambdaClient.user.resetSettings.mutate).toHaveBeenCalled();
148
+ });
149
+ });
@@ -455,7 +455,7 @@ export const generateAIChat: StateCreator<
455
455
  await messageService.updateMessageError(messageId, error);
456
456
  await refreshMessages();
457
457
  },
458
- onFinish: async (content, { traceId, observationId, toolCalls, reasoning }) => {
458
+ onFinish: async (content, { traceId, observationId, toolCalls, reasoning, citations }) => {
459
459
  // if there is traceId, update it
460
460
  if (traceId) {
461
461
  msgTraceId = traceId;
@@ -470,15 +470,26 @@ export const generateAIChat: StateCreator<
470
470
  }
471
471
 
472
472
  // update the content after fetch result
473
- await internal_updateMessageContent(
474
- messageId,
475
- content,
473
+ await internal_updateMessageContent(messageId, content, {
476
474
  toolCalls,
477
- !!reasoning ? { content: reasoning, duration } : undefined,
478
- );
475
+ reasoning: !!reasoning ? { content: reasoning, duration } : undefined,
476
+ search: !!citations ? { citations } : undefined,
477
+ });
479
478
  },
480
479
  onMessageHandle: async (chunk) => {
481
480
  switch (chunk.type) {
481
+ case 'citations': {
482
+ // if there is no citations, then stop
483
+ if (!chunk.citations || chunk.citations.length <= 0) return;
484
+
485
+ internal_dispatchMessage({
486
+ id: messageId,
487
+ type: 'updateMessage',
488
+ value: { search: { citations: chunk.citations } },
489
+ });
490
+ break;
491
+ }
492
+
482
493
  case 'text': {
483
494
  output += chunk.text;
484
495
 
@@ -16,6 +16,7 @@ import {
16
16
  ChatMessage,
17
17
  ChatMessageError,
18
18
  CreateMessageParams,
19
+ GroundingSearch,
19
20
  MessageToolCall,
20
21
  ModelReasoning,
21
22
  } from '@/types/message';
@@ -73,8 +74,11 @@ export interface ChatMessageAction {
73
74
  internal_updateMessageContent: (
74
75
  id: string,
75
76
  content: string,
76
- toolCalls?: MessageToolCall[],
77
- reasoning?: ModelReasoning,
77
+ extra?: {
78
+ toolCalls?: MessageToolCall[];
79
+ reasoning?: ModelReasoning;
80
+ search?: GroundingSearch;
81
+ },
78
82
  ) => Promise<void>;
79
83
  /**
80
84
  * update the message error with optimistic update
@@ -272,17 +276,17 @@ export const chatMessage: StateCreator<
272
276
  await messageService.updateMessage(id, { error });
273
277
  await get().refreshMessages();
274
278
  },
275
- internal_updateMessageContent: async (id, content, toolCalls, reasoning) => {
279
+ internal_updateMessageContent: async (id, content, extra) => {
276
280
  const { internal_dispatchMessage, refreshMessages, internal_transformToolCalls } = get();
277
281
 
278
282
  // Due to the async update method and refresh need about 100ms
279
283
  // we need to update the message content at the frontend to avoid the update flick
280
284
  // refs: https://medium.com/@kyledeguzmanx/what-are-optimistic-updates-483662c3e171
281
- if (toolCalls) {
285
+ if (extra?.toolCalls) {
282
286
  internal_dispatchMessage({
283
287
  id,
284
288
  type: 'updateMessage',
285
- value: { tools: internal_transformToolCalls(toolCalls) },
289
+ value: { tools: internal_transformToolCalls(extra?.toolCalls) },
286
290
  });
287
291
  } else {
288
292
  internal_dispatchMessage({ id, type: 'updateMessage', value: { content } });
@@ -290,8 +294,9 @@ export const chatMessage: StateCreator<
290
294
 
291
295
  await messageService.updateMessage(id, {
292
296
  content,
293
- tools: toolCalls ? internal_transformToolCalls(toolCalls) : undefined,
294
- reasoning,
297
+ tools: extra?.toolCalls ? internal_transformToolCalls(extra?.toolCalls) : undefined,
298
+ reasoning: extra?.reasoning,
299
+ search: extra?.search,
295
300
  });
296
301
  await refreshMessages();
297
302
  },
@@ -34,6 +34,11 @@ export interface ModelAbilities {
34
34
  * whether model supports reasoning
35
35
  */
36
36
  reasoning?: boolean;
37
+ /**
38
+ * whether model supports search web
39
+ */
40
+ search?: boolean;
41
+
37
42
  /**
38
43
  * whether model supports vision
39
44
  */
@@ -1,3 +1,15 @@
1
+ export interface CitationItem {
2
+ id?: string;
3
+ onlyUrl?: boolean;
4
+ title?: string;
5
+ url: string;
6
+ }
7
+
8
+ export interface GroundingSearch {
9
+ citations?: CitationItem[];
10
+ searchQueries?: string[];
11
+ }
12
+
1
13
  export interface ModelReasoning {
2
14
  content?: string;
3
15
  duration?: number;
@@ -20,6 +32,7 @@ export interface MessageItem {
20
32
  quotaId: string | null;
21
33
  reasoning: ModelReasoning | null;
22
34
  role: string;
35
+ search: GroundingSearch | null;
23
36
  sessionId: string | null;
24
37
  threadId: string | null;
25
38
  // jsonb type
@@ -2,7 +2,7 @@ import { IPluginErrorType } from '@lobehub/chat-plugin-sdk';
2
2
 
3
3
  import { ILobeAgentRuntimeErrorType } from '@/libs/agent-runtime';
4
4
  import { ErrorType } from '@/types/fetch';
5
- import { MessageRoleType, ModelReasoning } from '@/types/message/base';
5
+ import { GroundingSearch, MessageRoleType, ModelReasoning } from '@/types/message/base';
6
6
  import { ChatPluginPayload, ChatToolPayload } from '@/types/message/tools';
7
7
  import { Translate } from '@/types/message/translate';
8
8
  import { MetaData } from '@/types/meta';
@@ -100,11 +100,12 @@ export interface ChatMessage {
100
100
  ragRawQuery?: string | null;
101
101
 
102
102
  reasoning?: ModelReasoning | null;
103
-
104
103
  /**
105
104
  * message role type
106
105
  */
107
106
  role: MessageRoleType;
107
+
108
+ search?: GroundingSearch | null;
108
109
  sessionId?: string;
109
110
  threadId?: string | null;
110
111
  tool_call_id?: string;
@@ -6,6 +6,7 @@ import { ChatErrorType } from '@/types/fetch';
6
6
  import { SmoothingParams } from '@/types/llm';
7
7
  import {
8
8
  ChatMessageError,
9
+ CitationItem,
9
10
  MessageToolCall,
10
11
  MessageToolCallChunk,
11
12
  MessageToolCallSchema,
@@ -20,6 +21,7 @@ type SSEFinishType = 'done' | 'error' | 'abort';
20
21
  export type OnFinishHandler = (
21
22
  text: string,
22
23
  context: {
24
+ citations?: CitationItem[];
23
25
  observationId?: string | null;
24
26
  reasoning?: string;
25
27
  toolCalls?: MessageToolCall[];
@@ -38,6 +40,11 @@ export interface MessageReasoningChunk {
38
40
  type: 'reasoning';
39
41
  }
40
42
 
43
+ export interface MessageCitationsChunk {
44
+ citations: CitationItem[];
45
+ type: 'citations';
46
+ }
47
+
41
48
  interface MessageToolCallsChunk {
42
49
  isAnimationActives?: boolean[];
43
50
  tool_calls: MessageToolCall[];
@@ -50,7 +57,7 @@ export interface FetchSSEOptions {
50
57
  onErrorHandle?: (error: ChatMessageError) => void;
51
58
  onFinish?: OnFinishHandler;
52
59
  onMessageHandle?: (
53
- chunk: MessageTextChunk | MessageToolCallsChunk | MessageReasoningChunk,
60
+ chunk: MessageTextChunk | MessageToolCallsChunk | MessageReasoningChunk | MessageCitationsChunk,
54
61
  ) => void;
55
62
  smoothing?: SmoothingParams | boolean;
56
63
  }
@@ -279,6 +286,7 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio
279
286
  startSpeed: smoothingSpeed,
280
287
  });
281
288
 
289
+ let citations: CitationItem[] | undefined = undefined;
282
290
  await fetchEventSource(url, {
283
291
  body: options.body,
284
292
  fetch: options?.fetcher,
@@ -350,6 +358,13 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio
350
358
 
351
359
  break;
352
360
  }
361
+
362
+ case 'citations': {
363
+ citations = data;
364
+ options.onMessageHandle?.({ citations: data, type: 'citations' });
365
+ break;
366
+ }
367
+
353
368
  case 'reasoning': {
354
369
  if (textSmoothing) {
355
370
  thinkingController.pushToQueue(data);
@@ -419,6 +434,7 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio
419
434
  }
420
435
 
421
436
  await options?.onFinish?.(output, {
437
+ citations,
422
438
  observationId,
423
439
  reasoning: !!thinking ? thinking : undefined,
424
440
  toolCalls,