@lobehub/chat 1.79.4 → 1.79.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/package.json +3 -3
  4. package/src/app/(backend)/webapi/chat/[provider]/route.ts +1 -4
  5. package/src/app/(backend)/webapi/plugin/gateway/route.ts +1 -0
  6. package/src/app/(backend)/webapi/trace/route.ts +6 -1
  7. package/src/const/trace.ts +2 -4
  8. package/src/database/models/__tests__/_util.ts +4 -2
  9. package/src/libs/agent-runtime/AgentRuntime.test.ts +11 -17
  10. package/src/libs/agent-runtime/helpers/index.ts +1 -0
  11. package/src/{utils/fetch/__tests__ → libs/agent-runtime/helpers}/parseToolCalls.test.ts +1 -2
  12. package/src/libs/agent-runtime/index.ts +1 -0
  13. package/src/libs/agent-runtime/types/chat.ts +41 -9
  14. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +4 -2
  15. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +4 -7
  16. package/src/libs/agent-runtime/utils/streams/bedrock/llama.test.ts +6 -14
  17. package/src/libs/agent-runtime/utils/streams/google-ai.test.ts +3 -6
  18. package/src/libs/agent-runtime/utils/streams/ollama.test.ts +3 -9
  19. package/src/libs/agent-runtime/utils/streams/openai.test.ts +5 -8
  20. package/src/libs/agent-runtime/utils/streams/protocol.ts +55 -10
  21. package/src/libs/agent-runtime/utils/streams/qwen.test.ts +3 -6
  22. package/src/libs/agent-runtime/utils/streams/spark.test.ts +63 -60
  23. package/src/libs/agent-runtime/utils/streams/vertex-ai.test.ts +3 -7
  24. package/src/libs/agent-runtime/zhipu/index.test.ts +2 -2
  25. package/src/server/modules/AgentRuntime/index.ts +4 -75
  26. package/src/server/modules/AgentRuntime/trace.ts +107 -0
  27. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +6 -0
  28. package/src/utils/fetch/fetchSSE.ts +1 -1
  29. /package/src/{utils/fetch → libs/agent-runtime/helpers}/parseToolCalls.ts +0 -0
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.79.5](https://github.com/lobehub/lobe-chat/compare/v1.79.4...v1.79.5)
6
+
7
+ <sup>Released on **2025-04-10**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix langfuse intergation.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Fix langfuse intergation, closes [#7367](https://github.com/lobehub/lobe-chat/issues/7367) ([22b5236](https://github.com/lobehub/lobe-chat/commit/22b5236))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.79.4](https://github.com/lobehub/lobe-chat/compare/v1.79.3...v1.79.4)
6
31
 
7
32
  <sup>Released on **2025-04-10**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Fix langfuse intergation."
6
+ ]
7
+ },
8
+ "date": "2025-04-10",
9
+ "version": "1.79.5"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.79.4",
3
+ "version": "1.79.5",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -189,8 +189,8 @@
189
189
  "js-sha256": "^0.11.0",
190
190
  "jsonl-parse-stringify": "^1.0.3",
191
191
  "langchain": "^0.3.19",
192
- "langfuse": "3.29.1",
193
- "langfuse-core": "3.29.1",
192
+ "langfuse": "^3.37.1",
193
+ "langfuse-core": "^3.37.1",
194
194
  "lodash-es": "^4.17.21",
195
195
  "lucide-react": "^0.487.0",
196
196
  "mammoth": "^1.9.0",
@@ -33,10 +33,7 @@ export const POST = checkAuth(async (req: Request, { params, jwtPayload, createR
33
33
  let traceOptions = {};
34
34
  // If user enable trace
35
35
  if (tracePayload?.enabled) {
36
- traceOptions = createTraceOptions(data, {
37
- provider,
38
- trace: tracePayload,
39
- });
36
+ traceOptions = createTraceOptions(data, { provider, trace: tracePayload });
40
37
  }
41
38
 
42
39
  return await agentRuntime.chat(data, {
@@ -52,6 +52,7 @@ export const POST = async (req: Request) => {
52
52
  return createErrorResponse(result.error as ErrorType);
53
53
  }
54
54
 
55
+ // TODO: need to be replace by better telemetry system
55
56
  // add trace
56
57
  const tracePayload = getTracePayload(req);
57
58
  const traceClient = new TraceClient();
@@ -1,3 +1,5 @@
1
+ import { after } from 'next/server';
2
+
1
3
  import { TraceEventType } from '@/const/trace';
2
4
  import { TraceClient } from '@/libs/traces';
3
5
  import { TraceEventBasePayload, TraceEventPayloads } from '@/types/trace';
@@ -35,6 +37,9 @@ export const POST = async (req: Request) => {
35
37
  }
36
38
  }
37
39
 
38
- await traceClient.shutdownAsync();
40
+ after(async () => {
41
+ await traceClient.shutdownAsync();
42
+ });
43
+
39
44
  return new Response(undefined, { status: 201 });
40
45
  };
@@ -7,15 +7,13 @@ export enum TraceNameMap {
7
7
  Conversation = 'Conversation',
8
8
  EmojiPicker = 'Emoji Picker',
9
9
  FetchPluginAPI = 'Fetch Plugin API',
10
- InvokePlugin = 'Invoke Plugin',
11
10
  LanguageDetect = 'Language Detect',
11
+ SearchIntentRecognition = 'Search Intent Recognition',
12
12
  SummaryAgentDescription = 'Summary Agent Description',
13
13
  SummaryAgentTags = 'Summary Agent Tags',
14
14
  SummaryAgentTitle = 'Summary Agent Title',
15
15
  SummaryTopicTitle = 'Summary Topic Title',
16
16
  Translator = 'Translator',
17
- // mean user have relative events
18
- UserEvents = 'User Events',
19
17
  }
20
18
 
21
19
  export enum TraceEventType {
@@ -33,7 +31,7 @@ export enum TraceTagMap {
33
31
  Chat = 'Chat Competition',
34
32
  SystemChain = 'System Chain',
35
33
  ToolCalling = 'Tool Calling',
36
- ToolsCall = 'Tools Call',
34
+ ToolsCalling = 'Tools Calling',
37
35
  }
38
36
 
39
37
  export interface TracePayload {
@@ -1,11 +1,13 @@
1
1
  import { clientDB, initializeDB } from '@/database/client/db';
2
- import { getTestDBInstance } from '@/database/core/dbForTest';
3
2
  import { LobeChatDatabase } from '@/database/type';
4
3
 
5
4
  const isServerDBMode = process.env.TEST_SERVER_DB === '1';
6
5
 
7
6
  export const getTestDB = async () => {
8
- if (isServerDBMode) return await getTestDBInstance();
7
+ if (isServerDBMode) {
8
+ const { getTestDBInstance } = await import('@/database/core/dbForTest');
9
+ return await getTestDBInstance();
10
+ }
9
11
 
10
12
  await initializeDB();
11
13
  return clientDB as LobeChatDatabase;
@@ -66,10 +66,7 @@ const testRuntime = (providerId: string, payload?: any) => {
66
66
  let mockModelRuntime: AgentRuntime;
67
67
  beforeEach(async () => {
68
68
  const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
69
- mockModelRuntime = await AgentRuntime.initializeWithProvider(
70
- ModelProvider.OpenAI,
71
- jwtPayload,
72
- );
69
+ mockModelRuntime = await AgentRuntime.initializeWithProvider(ModelProvider.OpenAI, jwtPayload);
73
70
  });
74
71
 
75
72
  describe('AgentRuntime', () => {
@@ -112,7 +109,7 @@ describe('AgentRuntime', () => {
112
109
  provider: 'openai',
113
110
  trace: {
114
111
  traceId: 'test-trace-id',
115
- traceName: TraceNameMap.Conversation,
112
+ traceName: TraceNameMap.SummaryTopicTitle,
116
113
  sessionId: 'test-session-id',
117
114
  topicId: 'test-topic-id',
118
115
  tags: [],
@@ -136,7 +133,7 @@ describe('AgentRuntime', () => {
136
133
  provider: 'openai',
137
134
  trace: {
138
135
  traceId: 'test-trace-id',
139
- traceName: TraceNameMap.Conversation,
136
+ traceName: TraceNameMap.SummaryTopicTitle,
140
137
  sessionId: 'test-session-id',
141
138
  topicId: 'test-topic-id',
142
139
  tags: [],
@@ -147,7 +144,7 @@ describe('AgentRuntime', () => {
147
144
 
148
145
  const updateMock = vi.fn();
149
146
 
150
- it('should call experimental_onToolCall correctly', async () => {
147
+ it('should call onToolsCalling correctly', async () => {
151
148
  vi.spyOn(langfuseCfg, 'getLangfuseConfig').mockReturnValue({
152
149
  ENABLE_LANGFUSE: true,
153
150
  LANGFUSE_PUBLIC_KEY: 'abc',
@@ -157,9 +154,9 @@ describe('AgentRuntime', () => {
157
154
  // 使用 spyOn 模拟 chat 方法
158
155
  vi.spyOn(LobeOpenAI.prototype, 'chat').mockImplementation(
159
156
  async (payload, { callback }: any) => {
160
- // 模拟 experimental_onToolCall 回调的触发
161
- if (callback?.experimental_onToolCall) {
162
- await callback.experimental_onToolCall();
157
+ // 模拟 onToolCall 回调的触发
158
+ if (callback?.onToolsCalling) {
159
+ await callback.onToolsCalling();
163
160
  }
164
161
  return new Response('abc');
165
162
  },
@@ -168,7 +165,7 @@ describe('AgentRuntime', () => {
168
165
 
169
166
  await mockModelRuntime.chat(payload, createTraceOptions(payload, options));
170
167
 
171
- expect(updateMock).toHaveBeenCalledWith({ tags: ['Tools Call'] });
168
+ expect(updateMock).toHaveBeenCalledWith({ tags: ['Tools Calling'] });
172
169
  });
173
170
  it('should call onStart correctly', async () => {
174
171
  vi.spyOn(langfuseCfg, 'getLangfuseConfig').mockReturnValue({
@@ -204,7 +201,7 @@ describe('AgentRuntime', () => {
204
201
  vi.spyOn(LobeOpenAI.prototype, 'chat').mockImplementation(
205
202
  async (payload, { callback }: any) => {
206
203
  if (callback?.onCompletion) {
207
- await callback.onCompletion('Test completion');
204
+ await callback.onCompletion({ text: 'Test completion' });
208
205
  }
209
206
  return new Response('Success');
210
207
  },
@@ -215,14 +212,11 @@ describe('AgentRuntime', () => {
215
212
  // Verify onCompletion was called with expected output
216
213
  expect(updateMock).toHaveBeenCalledWith({
217
214
  endTime: expect.any(Date),
218
- metadata: {
219
- provider: 'openai',
220
- tools: undefined,
221
- },
215
+ metadata: {},
222
216
  output: 'Test completion',
223
217
  });
224
218
  });
225
- it('should call onFinal correctly', async () => {
219
+ it.skip('should call onFinal correctly', async () => {
226
220
  vi.spyOn(langfuseCfg, 'getLangfuseConfig').mockReturnValue({
227
221
  ENABLE_LANGFUSE: true,
228
222
  LANGFUSE_PUBLIC_KEY: 'abc',
@@ -0,0 +1 @@
1
+ export * from './parseToolCalls';
@@ -1,7 +1,6 @@
1
1
  import { describe, expect, it } from 'vitest';
2
- import { ZodError } from 'zod';
3
2
 
4
- import { parseToolCalls } from '../parseToolCalls';
3
+ import { parseToolCalls } from './parseToolCalls';
5
4
 
6
5
  describe('parseToolCalls', () => {
7
6
  it('should create add new item', () => {
@@ -8,6 +8,7 @@ export { LobeDeepSeekAI } from './deepseek';
8
8
  export * from './error';
9
9
  export { LobeGoogleAI } from './google';
10
10
  export { LobeGroq } from './groq';
11
+ export * from './helpers';
11
12
  export { LobeMinimaxAI } from './minimax';
12
13
  export { LobeMistralAI } from './mistral';
13
14
  export { LobeMoonshotAI } from './moonshot';
@@ -1,4 +1,25 @@
1
- import { MessageToolCall } from '@/types/message';
1
+ import { DeepPartial } from 'utility-types';
2
+
3
+ import { ModelTokensUsage, ToolFunction } from '@/types/message';
4
+
5
+ export interface MessageToolCall {
6
+ /**
7
+ * The function that the model called.
8
+ */
9
+ function: ToolFunction;
10
+
11
+ /**
12
+ * The ID of the tool call.
13
+ */
14
+ id: string;
15
+
16
+ /**
17
+ * The type of the tool. Currently, only `function` is supported.
18
+ */
19
+ type: 'function' | string;
20
+ }
21
+
22
+ export type MessageToolCallChunk = DeepPartial<MessageToolCall> & { index: number };
2
23
 
3
24
  export type LLMRoleType = 'user' | 'system' | 'assistant' | 'function' | 'tool';
4
25
 
@@ -165,18 +186,29 @@ export interface ChatCompletionTool {
165
186
  type: 'function';
166
187
  }
167
188
 
189
+ interface OnFinishData {
190
+ grounding?: any;
191
+ text: string;
192
+ thinking?: string;
193
+ toolsCalling?: MessageToolCall[];
194
+ usage?: ModelTokensUsage;
195
+ }
196
+
168
197
  export interface ChatStreamCallbacks {
198
+ onCompletion?: (data: OnFinishData) => Promise<void> | void;
169
199
  /**
170
- * `onCompletion`: Called for each tokenized message.
200
+ * `onFinal`: Called once when the stream is closed with the final completion message.
171
201
  **/
172
- onCompletion?: (completion: string) => Promise<void> | void;
173
- /** `onFinal`: Called once when the stream is closed with the final completion message. */
174
- onFinal?: (completion: string) => Promise<void> | void;
202
+ onFinal?: (data: OnFinishData) => Promise<void> | void;
203
+ onGrounding?: (grounding: any) => Promise<void> | void;
175
204
  /** `onStart`: Called once when the stream is initialized. */
176
205
  onStart?: () => Promise<void> | void;
177
206
  /** `onText`: Called for each text chunk. */
178
- onText?: (text: string) => Promise<void> | void;
179
- /** `onToken`: Called for each tokenized message. */
180
- onToken?: (token: string) => Promise<void> | void;
181
- onToolCall?: () => Promise<void> | void;
207
+ onText?: (content: string) => Promise<void> | void;
208
+ onThinking?: (content: string) => Promise<void> | void;
209
+ onToolsCalling?: (data: {
210
+ chunk: MessageToolCallChunk[];
211
+ toolsCalling: MessageToolCall[];
212
+ }) => Promise<void> | void;
213
+ onUsage?: (usage: ModelTokensUsage) => Promise<void> | void;
182
214
  }
@@ -776,7 +776,7 @@ describe('LobeOpenAICompatibleFactory', () => {
776
776
  // 准备 callback 和 headers
777
777
  const mockCallback: ChatStreamCallbacks = {
778
778
  onStart: vi.fn(),
779
- onToken: vi.fn(),
779
+ onCompletion: vi.fn(),
780
780
  };
781
781
  const mockHeaders = { 'Custom-Header': 'TestValue' };
782
782
 
@@ -793,7 +793,9 @@ describe('LobeOpenAICompatibleFactory', () => {
793
793
  // 验证 callback 被调用
794
794
  await result.text(); // 确保流被消费
795
795
  expect(mockCallback.onStart).toHaveBeenCalled();
796
- expect(mockCallback.onToken).toHaveBeenCalledWith('hello');
796
+ expect(mockCallback.onCompletion).toHaveBeenCalledWith({
797
+ text: 'hello',
798
+ });
797
799
 
798
800
  // 验证 headers 被正确传递
799
801
  expect(result.headers.get('Custom-Header')).toEqual('TestValue');
@@ -58,13 +58,11 @@ describe('AnthropicStream', () => {
58
58
 
59
59
  const onStartMock = vi.fn();
60
60
  const onTextMock = vi.fn();
61
- const onTokenMock = vi.fn();
62
61
  const onCompletionMock = vi.fn();
63
62
 
64
63
  const protocolStream = AnthropicStream(mockAnthropicStream, {
65
64
  onStart: onStartMock,
66
65
  onText: onTextMock,
67
- onToken: onTokenMock,
68
66
  onCompletion: onCompletionMock,
69
67
  });
70
68
 
@@ -92,9 +90,8 @@ describe('AnthropicStream', () => {
92
90
  ]);
93
91
 
94
92
  expect(onStartMock).toHaveBeenCalledTimes(1);
95
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
96
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
97
- expect(onTokenMock).toHaveBeenCalledTimes(2);
93
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
94
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
98
95
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
99
96
  });
100
97
 
@@ -168,7 +165,7 @@ describe('AnthropicStream', () => {
168
165
  const onToolCallMock = vi.fn();
169
166
 
170
167
  const protocolStream = AnthropicStream(mockReadableStream, {
171
- onToolCall: onToolCallMock,
168
+ onToolsCalling: onToolCallMock,
172
169
  });
173
170
 
174
171
  const decoder = new TextDecoder();
@@ -320,7 +317,7 @@ describe('AnthropicStream', () => {
320
317
  const onToolCallMock = vi.fn();
321
318
 
322
319
  const protocolStream = AnthropicStream(mockReadableStream, {
323
- onToolCall: onToolCallMock,
320
+ onToolsCalling: onToolCallMock,
324
321
  });
325
322
 
326
323
  const decoder = new TextDecoder();
@@ -20,13 +20,11 @@ describe('AWSBedrockLlamaStream', () => {
20
20
 
21
21
  const onStartMock = vi.fn();
22
22
  const onTextMock = vi.fn();
23
- const onTokenMock = vi.fn();
24
23
  const onCompletionMock = vi.fn();
25
24
 
26
25
  const protocolStream = AWSBedrockLlamaStream(mockBedrockStream, {
27
26
  onStart: onStartMock,
28
27
  onText: onTextMock,
29
- onToken: onTokenMock,
30
28
  onCompletion: onCompletionMock,
31
29
  });
32
30
 
@@ -51,9 +49,8 @@ describe('AWSBedrockLlamaStream', () => {
51
49
  ]);
52
50
 
53
51
  expect(onStartMock).toHaveBeenCalledTimes(1);
54
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
55
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
56
- expect(onTokenMock).toHaveBeenCalledTimes(2);
52
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
53
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
57
54
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
58
55
  });
59
56
 
@@ -73,13 +70,11 @@ describe('AWSBedrockLlamaStream', () => {
73
70
 
74
71
  const onStartMock = vi.fn();
75
72
  const onTextMock = vi.fn();
76
- const onTokenMock = vi.fn();
77
73
  const onCompletionMock = vi.fn();
78
74
 
79
75
  const protocolStream = AWSBedrockLlamaStream(mockBedrockStream, {
80
76
  onStart: onStartMock,
81
77
  onText: onTextMock,
82
- onToken: onTokenMock,
83
78
  onCompletion: onCompletionMock,
84
79
  });
85
80
 
@@ -104,9 +99,8 @@ describe('AWSBedrockLlamaStream', () => {
104
99
  ]);
105
100
 
106
101
  expect(onStartMock).toHaveBeenCalledTimes(1);
107
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
108
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
109
- expect(onTokenMock).toHaveBeenCalledTimes(2);
102
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
103
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
110
104
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
111
105
  });
112
106
 
@@ -143,7 +137,6 @@ describe('AWSBedrockLlamaStream', () => {
143
137
  const protocolStream = AWSBedrockLlamaStream(mockBedrockStream, {
144
138
  onStart: onStartMock,
145
139
  onText: onTextMock,
146
- onToken: onTokenMock,
147
140
  onCompletion: onCompletionMock,
148
141
  });
149
142
 
@@ -168,9 +161,8 @@ describe('AWSBedrockLlamaStream', () => {
168
161
  ]);
169
162
 
170
163
  expect(onStartMock).toHaveBeenCalledTimes(1);
171
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
172
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
173
- expect(onTokenMock).toHaveBeenCalledTimes(2);
164
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
165
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
174
166
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
175
167
  });
176
168
 
@@ -30,15 +30,13 @@ describe('GoogleGenerativeAIStream', () => {
30
30
 
31
31
  const onStartMock = vi.fn();
32
32
  const onTextMock = vi.fn();
33
- const onTokenMock = vi.fn();
34
33
  const onToolCallMock = vi.fn();
35
34
  const onCompletionMock = vi.fn();
36
35
 
37
36
  const protocolStream = GoogleGenerativeAIStream(mockGoogleStream, {
38
37
  onStart: onStartMock,
39
38
  onText: onTextMock,
40
- onToken: onTokenMock,
41
- onToolCall: onToolCallMock,
39
+ onToolsCalling: onToolCallMock,
42
40
  onCompletion: onCompletionMock,
43
41
  });
44
42
 
@@ -68,9 +66,8 @@ describe('GoogleGenerativeAIStream', () => {
68
66
  ]);
69
67
 
70
68
  expect(onStartMock).toHaveBeenCalledTimes(1);
71
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
72
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
73
- expect(onTokenMock).toHaveBeenCalledTimes(2);
69
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
70
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
74
71
  expect(onToolCallMock).toHaveBeenCalledTimes(1);
75
72
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
76
73
  });
@@ -22,13 +22,11 @@ describe('OllamaStream', () => {
22
22
 
23
23
  const onStartMock = vi.fn();
24
24
  const onTextMock = vi.fn();
25
- const onTokenMock = vi.fn();
26
25
  const onCompletionMock = vi.fn();
27
26
 
28
27
  const protocolStream = OllamaStream(mockOllamaStream, {
29
28
  onStart: onStartMock,
30
29
  onText: onTextMock,
31
- onToken: onTokenMock,
32
30
  onCompletion: onCompletionMock,
33
31
  });
34
32
 
@@ -53,9 +51,8 @@ describe('OllamaStream', () => {
53
51
  ]);
54
52
 
55
53
  expect(onStartMock).toHaveBeenCalledTimes(1);
56
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
57
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
58
- expect(onTokenMock).toHaveBeenCalledTimes(2);
54
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
55
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
59
56
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
60
57
  });
61
58
 
@@ -100,16 +97,14 @@ describe('OllamaStream', () => {
100
97
  });
101
98
  const onStartMock = vi.fn();
102
99
  const onTextMock = vi.fn();
103
- const onTokenMock = vi.fn();
104
100
  const onToolCall = vi.fn();
105
101
  const onCompletionMock = vi.fn();
106
102
 
107
103
  const protocolStream = OllamaStream(mockOllamaStream, {
108
104
  onStart: onStartMock,
109
105
  onText: onTextMock,
110
- onToken: onTokenMock,
111
106
  onCompletion: onCompletionMock,
112
- onToolCall,
107
+ onToolsCalling: onToolCall,
113
108
  });
114
109
 
115
110
  const decoder = new TextDecoder();
@@ -134,7 +129,6 @@ describe('OllamaStream', () => {
134
129
  expect(onTextMock).toHaveBeenCalledTimes(0);
135
130
  expect(onStartMock).toHaveBeenCalledTimes(1);
136
131
  expect(onToolCall).toHaveBeenCalledTimes(1);
137
- expect(onTokenMock).toHaveBeenCalledTimes(0);
138
132
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
139
133
  });
140
134
  });
@@ -44,14 +44,12 @@ describe('OpenAIStream', () => {
44
44
 
45
45
  const onStartMock = vi.fn();
46
46
  const onTextMock = vi.fn();
47
- const onTokenMock = vi.fn();
48
47
  const onCompletionMock = vi.fn();
49
48
 
50
49
  const protocolStream = OpenAIStream(mockOpenAIStream, {
51
50
  callbacks: {
52
51
  onStart: onStartMock,
53
52
  onText: onTextMock,
54
- onToken: onTokenMock,
55
53
  onCompletion: onCompletionMock,
56
54
  },
57
55
  });
@@ -77,9 +75,8 @@ describe('OpenAIStream', () => {
77
75
  ]);
78
76
 
79
77
  expect(onStartMock).toHaveBeenCalledTimes(1);
80
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
81
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
82
- expect(onTokenMock).toHaveBeenCalledTimes(2);
78
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
79
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
83
80
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
84
81
  });
85
82
 
@@ -195,7 +192,7 @@ describe('OpenAIStream', () => {
195
192
 
196
193
  const protocolStream = OpenAIStream(mockOpenAIStream, {
197
194
  callbacks: {
198
- onToolCall: onToolCallMock,
195
+ onToolsCalling: onToolCallMock,
199
196
  },
200
197
  });
201
198
 
@@ -578,7 +575,7 @@ describe('OpenAIStream', () => {
578
575
 
579
576
  const protocolStream = OpenAIStream(mockOpenAIStream, {
580
577
  callbacks: {
581
- onToolCall: onToolCallMock,
578
+ onToolsCalling: onToolCallMock,
582
579
  },
583
580
  });
584
581
 
@@ -711,7 +708,7 @@ describe('OpenAIStream', () => {
711
708
 
712
709
  const protocolStream = OpenAIStream(mockOpenAIStream, {
713
710
  callbacks: {
714
- onToolCall: onToolCallMock,
711
+ onToolsCalling: onToolCallMock,
715
712
  },
716
713
  });
717
714
 
@@ -1,7 +1,8 @@
1
- import { ChatStreamCallbacks } from '@/libs/agent-runtime';
2
1
  import { ModelTokensUsage } from '@/types/message';
3
2
 
4
3
  import { AgentRuntimeErrorType } from '../../error';
4
+ import { parseToolCalls } from '../../helpers';
5
+ import { ChatStreamCallbacks } from '../../types';
5
6
 
6
7
  /**
7
8
  * context in the stream to save temporarily data
@@ -140,18 +141,31 @@ export const createSSEProtocolTransformer = (
140
141
 
141
142
  export function createCallbacksTransformer(cb: ChatStreamCallbacks | undefined) {
142
143
  const textEncoder = new TextEncoder();
143
- let aggregatedResponse = '';
144
- let currentType = '';
144
+ let aggregatedText = '';
145
+ let aggregatedThinking: string | undefined = undefined;
146
+ let usage: ModelTokensUsage | undefined;
147
+ let grounding: any;
148
+ let toolsCalling: any;
149
+
150
+ let currentType = '' as unknown as StreamProtocolChunk['type'];
145
151
  const callbacks = cb || {};
146
152
 
147
153
  return new TransformStream({
148
154
  async flush(): Promise<void> {
155
+ const data = {
156
+ grounding,
157
+ text: aggregatedText,
158
+ thinking: aggregatedThinking,
159
+ toolsCalling,
160
+ usage,
161
+ };
162
+
149
163
  if (callbacks.onCompletion) {
150
- await callbacks.onCompletion(aggregatedResponse);
164
+ await callbacks.onCompletion(data);
151
165
  }
152
166
 
153
167
  if (callbacks.onFinal) {
154
- await callbacks.onFinal(aggregatedResponse);
168
+ await callbacks.onFinal(data);
155
169
  }
156
170
  },
157
171
 
@@ -164,22 +178,53 @@ export function createCallbacksTransformer(cb: ChatStreamCallbacks | undefined)
164
178
 
165
179
  // track the type of the chunk
166
180
  if (chunk.startsWith('event:')) {
167
- currentType = chunk.split('event:')[1].trim();
181
+ currentType = chunk.split('event:')[1].trim() as unknown as StreamProtocolChunk['type'];
168
182
  }
169
183
  // if the message is a data chunk, handle the callback
170
184
  else if (chunk.startsWith('data:')) {
171
185
  const content = chunk.split('data:')[1].trim();
172
186
 
187
+ let data: any = undefined;
188
+ try {
189
+ data = JSON.parse(content);
190
+ } catch {}
191
+
192
+ if (!data) return;
193
+
173
194
  switch (currentType) {
174
195
  case 'text': {
175
- await callbacks.onText?.(content);
176
- await callbacks.onToken?.(JSON.parse(content));
196
+ aggregatedText += data;
197
+ await callbacks.onText?.(data);
198
+ break;
199
+ }
200
+
201
+ case 'reasoning': {
202
+ if (!aggregatedThinking) {
203
+ aggregatedThinking = '';
204
+ }
205
+
206
+ aggregatedThinking += data;
207
+ await callbacks.onThinking?.(data);
208
+ break;
209
+ }
210
+
211
+ case 'usage': {
212
+ usage = data;
213
+ await callbacks.onUsage?.(data);
214
+ break;
215
+ }
216
+
217
+ case 'grounding': {
218
+ grounding = data;
219
+ await callbacks.onGrounding?.(data);
177
220
  break;
178
221
  }
179
222
 
180
223
  case 'tool_calls': {
181
- // TODO: make on ToolCall callback
182
- await callbacks.onToolCall?.();
224
+ if (!toolsCalling) toolsCalling = [];
225
+ toolsCalling = parseToolCalls(toolsCalling, data);
226
+
227
+ await callbacks.onToolsCalling?.({ chunk: data, toolsCalling });
183
228
  }
184
229
  }
185
230
  }
@@ -43,13 +43,11 @@ describe('QwenAIStream', () => {
43
43
 
44
44
  const onStartMock = vi.fn();
45
45
  const onTextMock = vi.fn();
46
- const onTokenMock = vi.fn();
47
46
  const onCompletionMock = vi.fn();
48
47
 
49
48
  const protocolStream = QwenAIStream(mockOpenAIStream, {
50
49
  onStart: onStartMock,
51
50
  onText: onTextMock,
52
- onToken: onTokenMock,
53
51
  onCompletion: onCompletionMock,
54
52
  });
55
53
 
@@ -74,9 +72,8 @@ describe('QwenAIStream', () => {
74
72
  ]);
75
73
 
76
74
  expect(onStartMock).toHaveBeenCalledTimes(1);
77
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
78
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
79
- expect(onTokenMock).toHaveBeenCalledTimes(2);
75
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
76
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
80
77
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
81
78
  });
82
79
 
@@ -114,7 +111,7 @@ describe('QwenAIStream', () => {
114
111
  const onToolCallMock = vi.fn();
115
112
 
116
113
  const protocolStream = QwenAIStream(mockOpenAIStream, {
117
- onToolCall: onToolCallMock,
114
+ onToolsCalling: onToolCallMock,
118
115
  });
119
116
 
120
117
  const decoder = new TextDecoder();
@@ -1,41 +1,42 @@
1
+ import type OpenAI from 'openai';
1
2
  import { beforeAll, describe, expect, it, vi } from 'vitest';
3
+
2
4
  import { SparkAIStream, transformSparkResponseToStream } from './spark';
3
- import type OpenAI from 'openai';
4
5
 
5
6
  describe('SparkAIStream', () => {
6
7
  beforeAll(() => {});
7
8
 
8
9
  it('should transform non-streaming response to stream', async () => {
9
10
  const mockResponse = {
10
- id: "cha000ceba6@dx193d200b580b8f3532",
11
- object: "chat.completion",
11
+ id: 'cha000ceba6@dx193d200b580b8f3532',
12
+ object: 'chat.completion',
12
13
  created: 1734395014,
13
- model: "max-32k",
14
+ model: 'max-32k',
14
15
  choices: [
15
16
  {
16
17
  message: {
17
- role: "assistant",
18
- content: "",
18
+ role: 'assistant',
19
+ content: '',
19
20
  refusal: null,
20
21
  tool_calls: {
21
- type: "function",
22
+ type: 'function',
22
23
  function: {
23
24
  arguments: '{"city":"Shanghai"}',
24
- name: "realtime-weather____fetchCurrentWeather"
25
+ name: 'realtime-weather____fetchCurrentWeather',
25
26
  },
26
- id: "call_1"
27
- }
27
+ id: 'call_1',
28
+ },
28
29
  },
29
30
  index: 0,
30
31
  logprobs: null,
31
- finish_reason: "tool_calls"
32
- }
32
+ finish_reason: 'tool_calls',
33
+ },
33
34
  ],
34
35
  usage: {
35
36
  prompt_tokens: 8,
36
37
  completion_tokens: 0,
37
- total_tokens: 8
38
- }
38
+ total_tokens: 8,
39
+ },
39
40
  } as unknown as OpenAI.ChatCompletion;
40
41
 
41
42
  const stream = transformSparkResponseToStream(mockResponse);
@@ -48,15 +49,17 @@ describe('SparkAIStream', () => {
48
49
  }
49
50
 
50
51
  expect(chunks).toHaveLength(2);
51
- expect(chunks[0].choices[0].delta.tool_calls).toEqual([{
52
- function: {
53
- arguments: '{"city":"Shanghai"}',
54
- name: "realtime-weather____fetchCurrentWeather"
52
+ expect(chunks[0].choices[0].delta.tool_calls).toEqual([
53
+ {
54
+ function: {
55
+ arguments: '{"city":"Shanghai"}',
56
+ name: 'realtime-weather____fetchCurrentWeather',
57
+ },
58
+ id: 'call_1',
59
+ index: 0,
60
+ type: 'function',
55
61
  },
56
- id: "call_1",
57
- index: 0,
58
- type: "function"
59
- }]);
62
+ ]);
60
63
  expect(chunks[1].choices[0].finish_reason).toBeDefined();
61
64
  });
62
65
 
@@ -64,36 +67,36 @@ describe('SparkAIStream', () => {
64
67
  const mockStream = new ReadableStream({
65
68
  start(controller) {
66
69
  controller.enqueue({
67
- id: "cha000b0bf9@dx193d1ffa61cb894532",
68
- object: "chat.completion.chunk",
70
+ id: 'cha000b0bf9@dx193d1ffa61cb894532',
71
+ object: 'chat.completion.chunk',
69
72
  created: 1734395014,
70
- model: "max-32k",
73
+ model: 'max-32k',
71
74
  choices: [
72
75
  {
73
76
  delta: {
74
- role: "assistant",
75
- content: "",
77
+ role: 'assistant',
78
+ content: '',
76
79
  tool_calls: {
77
- type: "function",
80
+ type: 'function',
78
81
  function: {
79
82
  arguments: '{"city":"Shanghai"}',
80
- name: "realtime-weather____fetchCurrentWeather"
83
+ name: 'realtime-weather____fetchCurrentWeather',
81
84
  },
82
- id: "call_1"
83
- }
85
+ id: 'call_1',
86
+ },
84
87
  },
85
- index: 0
86
- }
87
- ]
88
+ index: 0,
89
+ },
90
+ ],
88
91
  } as unknown as OpenAI.ChatCompletionChunk);
89
92
  controller.close();
90
- }
93
+ },
91
94
  });
92
95
 
93
96
  const onToolCallMock = vi.fn();
94
97
 
95
98
  const protocolStream = SparkAIStream(mockStream, {
96
- onToolCall: onToolCallMock
99
+ onToolsCalling: onToolCallMock,
97
100
  });
98
101
 
99
102
  const decoder = new TextDecoder();
@@ -107,7 +110,7 @@ describe('SparkAIStream', () => {
107
110
  expect(chunks).toEqual([
108
111
  'id: cha000b0bf9@dx193d1ffa61cb894532\n',
109
112
  'event: tool_calls\n',
110
- `data: [{\"function\":{\"arguments\":\"{\\\"city\\\":\\\"Shanghai\\\"}\",\"name\":\"realtime-weather____fetchCurrentWeather\"},\"id\":\"call_1\",\"index\":0,\"type\":\"function\"}]\n\n`
113
+ `data: [{\"function\":{\"arguments\":\"{\\\"city\\\":\\\"Shanghai\\\"}\",\"name\":\"realtime-weather____fetchCurrentWeather\"},\"id\":\"call_1\",\"index\":0,\"type\":\"function\"}]\n\n`,
111
114
  ]);
112
115
 
113
116
  expect(onToolCallMock).toHaveBeenCalledTimes(1);
@@ -117,43 +120,43 @@ describe('SparkAIStream', () => {
117
120
  const mockStream = new ReadableStream({
118
121
  start(controller) {
119
122
  controller.enqueue({
120
- id: "test-id",
121
- object: "chat.completion.chunk",
123
+ id: 'test-id',
124
+ object: 'chat.completion.chunk',
122
125
  created: 1734395014,
123
- model: "max-32k",
126
+ model: 'max-32k',
124
127
  choices: [
125
128
  {
126
129
  delta: {
127
- content: "Hello",
128
- role: "assistant"
130
+ content: 'Hello',
131
+ role: 'assistant',
129
132
  },
130
- index: 0
131
- }
132
- ]
133
+ index: 0,
134
+ },
135
+ ],
133
136
  } as OpenAI.ChatCompletionChunk);
134
137
  controller.enqueue({
135
- id: "test-id",
136
- object: "chat.completion.chunk",
138
+ id: 'test-id',
139
+ object: 'chat.completion.chunk',
137
140
  created: 1734395014,
138
- model: "max-32k",
141
+ model: 'max-32k',
139
142
  choices: [
140
143
  {
141
144
  delta: {
142
- content: " World",
143
- role: "assistant"
145
+ content: ' World',
146
+ role: 'assistant',
144
147
  },
145
- index: 0
146
- }
147
- ]
148
+ index: 0,
149
+ },
150
+ ],
148
151
  } as OpenAI.ChatCompletionChunk);
149
152
  controller.close();
150
- }
153
+ },
151
154
  });
152
155
 
153
156
  const onTextMock = vi.fn();
154
-
157
+
155
158
  const protocolStream = SparkAIStream(mockStream, {
156
- onText: onTextMock
159
+ onText: onTextMock,
157
160
  });
158
161
 
159
162
  const decoder = new TextDecoder();
@@ -170,18 +173,18 @@ describe('SparkAIStream', () => {
170
173
  'data: "Hello"\n\n',
171
174
  'id: test-id\n',
172
175
  'event: text\n',
173
- 'data: " World"\n\n'
176
+ 'data: " World"\n\n',
174
177
  ]);
175
178
 
176
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
177
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" World"');
179
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
180
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' World');
178
181
  });
179
182
 
180
183
  it('should handle empty stream', async () => {
181
184
  const mockStream = new ReadableStream({
182
185
  start(controller) {
183
186
  controller.close();
184
- }
187
+ },
185
188
  });
186
189
 
187
190
  const protocolStream = SparkAIStream(mockStream);
@@ -99,15 +99,13 @@ describe('VertexAIStream', () => {
99
99
 
100
100
  const onStartMock = vi.fn();
101
101
  const onTextMock = vi.fn();
102
- const onTokenMock = vi.fn();
103
102
  const onToolCallMock = vi.fn();
104
103
  const onCompletionMock = vi.fn();
105
104
 
106
105
  const protocolStream = VertexAIStream(mockGoogleStream, {
107
106
  onStart: onStartMock,
108
107
  onText: onTextMock,
109
- onToken: onTokenMock,
110
- onToolCall: onToolCallMock,
108
+ onToolsCalling: onToolCallMock,
111
109
  onCompletion: onCompletionMock,
112
110
  });
113
111
 
@@ -132,7 +130,7 @@ describe('VertexAIStream', () => {
132
130
  ]);
133
131
 
134
132
  expect(onStartMock).toHaveBeenCalledTimes(1);
135
- expect(onTokenMock).toHaveBeenCalledTimes(2);
133
+ expect(onTextMock).toHaveBeenCalledTimes(2);
136
134
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
137
135
  });
138
136
 
@@ -202,15 +200,13 @@ describe('VertexAIStream', () => {
202
200
 
203
201
  const onStartMock = vi.fn();
204
202
  const onTextMock = vi.fn();
205
- const onTokenMock = vi.fn();
206
203
  const onToolCallMock = vi.fn();
207
204
  const onCompletionMock = vi.fn();
208
205
 
209
206
  const protocolStream = VertexAIStream(mockGoogleStream, {
210
207
  onStart: onStartMock,
211
208
  onText: onTextMock,
212
- onToken: onTokenMock,
213
- onToolCall: onToolCallMock,
209
+ onToolsCalling: onToolCallMock,
214
210
  onCompletion: onCompletionMock,
215
211
  });
216
212
 
@@ -63,7 +63,7 @@ describe('LobeZhipuAI', () => {
63
63
  // 准备 callback 和 headers
64
64
  const mockCallback: ChatStreamCallbacks = {
65
65
  onStart: vi.fn(),
66
- onToken: vi.fn(),
66
+ onText: vi.fn(),
67
67
  };
68
68
  const mockHeaders = { 'Custom-Header': 'TestValue' };
69
69
 
@@ -80,7 +80,7 @@ describe('LobeZhipuAI', () => {
80
80
  // 验证 callback 被调用
81
81
  await result.text(); // 确保流被消费
82
82
  expect(mockCallback.onStart).toHaveBeenCalled();
83
- expect(mockCallback.onToken).toHaveBeenCalledWith('hello');
83
+ expect(mockCallback.onText).toHaveBeenCalledWith('hello');
84
84
 
85
85
  // 验证 headers 被正确传递
86
86
  expect(result.headers.get('Custom-Header')).toEqual('TestValue');
@@ -1,22 +1,10 @@
1
1
  import { getLLMConfig } from '@/config/llm';
2
2
  import { JWTPayload } from '@/const/auth';
3
- import { INBOX_SESSION_ID } from '@/const/session';
4
- import {
5
- LOBE_CHAT_OBSERVATION_ID,
6
- LOBE_CHAT_TRACE_ID,
7
- TracePayload,
8
- TraceTagMap,
9
- } from '@/const/trace';
10
- import { AgentRuntime, ChatStreamPayload, ModelProvider } from '@/libs/agent-runtime';
11
- import { TraceClient } from '@/libs/traces';
3
+ import { AgentRuntime, ModelProvider } from '@/libs/agent-runtime';
12
4
 
13
5
  import apiKeyManager from './apiKeyManager';
14
6
 
15
- export interface AgentChatOptions {
16
- enableTrace?: boolean;
17
- provider: string;
18
- trace?: TracePayload;
19
- }
7
+ export * from './trace';
20
8
 
21
9
  /**
22
10
  * Retrieves the options object from environment and apikeymanager
@@ -26,7 +14,7 @@ export interface AgentChatOptions {
26
14
  * @param payload - The JWT payload.
27
15
  * @returns The options object.
28
16
  */
29
- const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
17
+ const getParamsFromPayload = (provider: string, payload: JWTPayload) => {
30
18
  const llmConfig = getLLMConfig() as Record<string, any>;
31
19
 
32
20
  switch (provider) {
@@ -131,66 +119,7 @@ export const initAgentRuntimeWithUserPayload = (
131
119
  params: any = {},
132
120
  ) => {
133
121
  return AgentRuntime.initializeWithProvider(provider, {
134
- ...getLlmOptionsFromPayload(provider, payload),
122
+ ...getParamsFromPayload(provider, payload),
135
123
  ...params,
136
124
  });
137
125
  };
138
-
139
- export const createTraceOptions = (
140
- payload: ChatStreamPayload,
141
- { trace: tracePayload, provider }: AgentChatOptions,
142
- ) => {
143
- const { messages, model, tools, ...parameters } = payload;
144
- // create a trace to monitor the completion
145
- const traceClient = new TraceClient();
146
- const trace = traceClient.createTrace({
147
- id: tracePayload?.traceId,
148
- input: messages,
149
- metadata: { provider },
150
- name: tracePayload?.traceName,
151
- sessionId: `${tracePayload?.sessionId || INBOX_SESSION_ID}@${tracePayload?.topicId || 'start'}`,
152
- tags: tracePayload?.tags,
153
- userId: tracePayload?.userId,
154
- });
155
-
156
- const generation = trace?.generation({
157
- input: messages,
158
- metadata: { provider },
159
- model,
160
- modelParameters: parameters as any,
161
- name: `Chat Completion (${provider})`,
162
- startTime: new Date(),
163
- });
164
-
165
- return {
166
- callback: {
167
- experimental_onToolCall: async () => {
168
- trace?.update({
169
- tags: [...(tracePayload?.tags || []), TraceTagMap.ToolsCall],
170
- });
171
- },
172
-
173
- onCompletion: async (completion: string) => {
174
- generation?.update({
175
- endTime: new Date(),
176
- metadata: { provider, tools },
177
- output: completion,
178
- });
179
-
180
- trace?.update({ output: completion });
181
- },
182
-
183
- onFinal: async () => {
184
- await traceClient.shutdownAsync();
185
- },
186
-
187
- onStart: () => {
188
- generation?.update({ completionStartTime: new Date() });
189
- },
190
- },
191
- headers: {
192
- [LOBE_CHAT_OBSERVATION_ID]: generation?.id,
193
- [LOBE_CHAT_TRACE_ID]: trace?.id,
194
- },
195
- };
196
- };
@@ -0,0 +1,107 @@
1
+ import { after } from 'next/server';
2
+
3
+ import { INBOX_SESSION_ID } from '@/const/session';
4
+ import {
5
+ LOBE_CHAT_OBSERVATION_ID,
6
+ LOBE_CHAT_TRACE_ID,
7
+ TracePayload,
8
+ TraceTagMap,
9
+ } from '@/const/trace';
10
+ import { ChatStreamCallbacks, ChatStreamPayload } from '@/libs/agent-runtime';
11
+ import { TraceClient } from '@/libs/traces';
12
+
13
+ export interface AgentChatOptions {
14
+ enableTrace?: boolean;
15
+ provider: string;
16
+ trace?: TracePayload;
17
+ }
18
+
19
+ export const createTraceOptions = (
20
+ payload: ChatStreamPayload,
21
+ { trace: tracePayload, provider }: AgentChatOptions,
22
+ ) => {
23
+ const { messages, model, tools, ...parameters } = payload;
24
+ // create a trace to monitor the completion
25
+ const traceClient = new TraceClient();
26
+ const messageLength = messages.length;
27
+ const systemRole = messages.find((message) => message.role === 'system')?.content;
28
+
29
+ const trace = traceClient.createTrace({
30
+ id: tracePayload?.traceId,
31
+ input: messages,
32
+ metadata: { messageLength, model, provider, systemRole, tools },
33
+ name: tracePayload?.traceName,
34
+ sessionId: tracePayload?.topicId
35
+ ? tracePayload.topicId
36
+ : `${tracePayload?.sessionId || INBOX_SESSION_ID}@default`,
37
+ tags: tracePayload?.tags,
38
+ userId: tracePayload?.userId,
39
+ });
40
+
41
+ const generation = trace?.generation({
42
+ input: messages,
43
+ metadata: { messageLength, model, provider },
44
+ model,
45
+ modelParameters: parameters as any,
46
+ name: `Chat Completion (${provider})`,
47
+ startTime: new Date(),
48
+ });
49
+
50
+ return {
51
+ callback: {
52
+ onCompletion: async ({ text, thinking, usage, grounding, toolsCalling }) => {
53
+ const output =
54
+ // if the toolsCalling is not empty, we need to return the toolsCalling
55
+ !!toolsCalling && toolsCalling.length > 0
56
+ ? !!text
57
+ ? // tools calling with thinking and text
58
+ { text, thinking, toolsCalling }
59
+ : toolsCalling
60
+ : !!thinking
61
+ ? { text, thinking }
62
+ : text;
63
+
64
+ generation?.update({
65
+ endTime: new Date(),
66
+ metadata: { grounding, thinking },
67
+ output,
68
+ usage: usage
69
+ ? {
70
+ completionTokens: usage.outputTextTokens,
71
+ input: usage.totalInputTokens,
72
+ output: usage.totalOutputTokens,
73
+ promptTokens: usage.inputTextTokens,
74
+ totalTokens: usage.totalTokens,
75
+ }
76
+ : undefined,
77
+ });
78
+
79
+ trace?.update({ output });
80
+ },
81
+
82
+ onFinal: () => {
83
+ after(async () => {
84
+ try {
85
+ await traceClient.shutdownAsync();
86
+ } catch (e) {
87
+ console.error('TraceClient shutdown error:', e);
88
+ }
89
+ });
90
+ },
91
+
92
+ onStart: () => {
93
+ generation?.update({ completionStartTime: new Date() });
94
+ },
95
+
96
+ onToolsCalling: async () => {
97
+ trace?.update({
98
+ tags: [...(tracePayload?.tags || []), TraceTagMap.ToolsCalling],
99
+ });
100
+ },
101
+ } as ChatStreamCallbacks,
102
+ headers: {
103
+ [LOBE_CHAT_OBSERVATION_ID]: generation?.id,
104
+ [LOBE_CHAT_TRACE_ID]: trace?.id,
105
+ },
106
+ };
107
+ };
@@ -388,6 +388,12 @@ export const generateAIChat: StateCreator<
388
388
  });
389
389
  }
390
390
  },
391
+ trace: {
392
+ traceId: params?.traceId,
393
+ sessionId: get().activeId,
394
+ topicId: get().activeTopicId,
395
+ traceName: TraceNameMap.SearchIntentRecognition,
396
+ },
391
397
  abortController,
392
398
  onMessageHandle: async (chunk) => {
393
399
  if (chunk.type === 'tool_calls') {
@@ -2,6 +2,7 @@ import { isObject } from 'lodash-es';
2
2
 
3
3
  import { MESSAGE_CANCEL_FLAT } from '@/const/message';
4
4
  import { LOBE_CHAT_OBSERVATION_ID, LOBE_CHAT_TRACE_ID } from '@/const/trace';
5
+ import { parseToolCalls } from '@/libs/agent-runtime';
5
6
  import { ChatErrorType } from '@/types/fetch';
6
7
  import { SmoothingParams } from '@/types/llm';
7
8
  import {
@@ -18,7 +19,6 @@ import { nanoid } from '@/utils/uuid';
18
19
 
19
20
  import { fetchEventSource } from './fetchEventSource';
20
21
  import { getMessageError } from './parseError';
21
- import { parseToolCalls } from './parseToolCalls';
22
22
 
23
23
  type SSEFinishType = 'done' | 'error' | 'abort';
24
24