@lobehub/chat 1.79.4 → 1.79.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/docs/self-hosting/advanced/online-search.zh-CN.mdx +59 -0
- package/package.json +3 -3
- package/src/app/(backend)/webapi/chat/[provider]/route.ts +1 -4
- package/src/app/(backend)/webapi/plugin/gateway/route.ts +1 -0
- package/src/app/(backend)/webapi/trace/route.ts +6 -1
- package/src/const/trace.ts +2 -4
- package/src/database/models/__tests__/_util.ts +4 -2
- package/src/libs/agent-runtime/AgentRuntime.test.ts +11 -17
- package/src/libs/agent-runtime/helpers/index.ts +1 -0
- package/src/{utils/fetch/__tests__ → libs/agent-runtime/helpers}/parseToolCalls.test.ts +1 -2
- package/src/libs/agent-runtime/index.ts +1 -0
- package/src/libs/agent-runtime/types/chat.ts +41 -9
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +4 -2
- package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +4 -7
- package/src/libs/agent-runtime/utils/streams/bedrock/llama.test.ts +6 -14
- package/src/libs/agent-runtime/utils/streams/google-ai.test.ts +3 -6
- package/src/libs/agent-runtime/utils/streams/ollama.test.ts +3 -9
- package/src/libs/agent-runtime/utils/streams/openai.test.ts +5 -8
- package/src/libs/agent-runtime/utils/streams/protocol.ts +55 -10
- package/src/libs/agent-runtime/utils/streams/qwen.test.ts +3 -6
- package/src/libs/agent-runtime/utils/streams/spark.test.ts +63 -60
- package/src/libs/agent-runtime/utils/streams/vertex-ai.test.ts +3 -7
- package/src/libs/agent-runtime/xai/index.ts +10 -0
- package/src/libs/agent-runtime/zhipu/index.test.ts +2 -2
- package/src/server/modules/AgentRuntime/index.ts +4 -75
- package/src/server/modules/AgentRuntime/trace.ts +107 -0
- package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +6 -0
- package/src/utils/fetch/fetchSSE.ts +1 -1
- /package/src/{utils/fetch → libs/agent-runtime/helpers}/parseToolCalls.ts +0 -0
@@ -44,14 +44,12 @@ describe('OpenAIStream', () => {
|
|
44
44
|
|
45
45
|
const onStartMock = vi.fn();
|
46
46
|
const onTextMock = vi.fn();
|
47
|
-
const onTokenMock = vi.fn();
|
48
47
|
const onCompletionMock = vi.fn();
|
49
48
|
|
50
49
|
const protocolStream = OpenAIStream(mockOpenAIStream, {
|
51
50
|
callbacks: {
|
52
51
|
onStart: onStartMock,
|
53
52
|
onText: onTextMock,
|
54
|
-
onToken: onTokenMock,
|
55
53
|
onCompletion: onCompletionMock,
|
56
54
|
},
|
57
55
|
});
|
@@ -77,9 +75,8 @@ describe('OpenAIStream', () => {
|
|
77
75
|
]);
|
78
76
|
|
79
77
|
expect(onStartMock).toHaveBeenCalledTimes(1);
|
80
|
-
expect(onTextMock).toHaveBeenNthCalledWith(1, '
|
81
|
-
expect(onTextMock).toHaveBeenNthCalledWith(2, '
|
82
|
-
expect(onTokenMock).toHaveBeenCalledTimes(2);
|
78
|
+
expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
|
79
|
+
expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
|
83
80
|
expect(onCompletionMock).toHaveBeenCalledTimes(1);
|
84
81
|
});
|
85
82
|
|
@@ -195,7 +192,7 @@ describe('OpenAIStream', () => {
|
|
195
192
|
|
196
193
|
const protocolStream = OpenAIStream(mockOpenAIStream, {
|
197
194
|
callbacks: {
|
198
|
-
|
195
|
+
onToolsCalling: onToolCallMock,
|
199
196
|
},
|
200
197
|
});
|
201
198
|
|
@@ -578,7 +575,7 @@ describe('OpenAIStream', () => {
|
|
578
575
|
|
579
576
|
const protocolStream = OpenAIStream(mockOpenAIStream, {
|
580
577
|
callbacks: {
|
581
|
-
|
578
|
+
onToolsCalling: onToolCallMock,
|
582
579
|
},
|
583
580
|
});
|
584
581
|
|
@@ -711,7 +708,7 @@ describe('OpenAIStream', () => {
|
|
711
708
|
|
712
709
|
const protocolStream = OpenAIStream(mockOpenAIStream, {
|
713
710
|
callbacks: {
|
714
|
-
|
711
|
+
onToolsCalling: onToolCallMock,
|
715
712
|
},
|
716
713
|
});
|
717
714
|
|
@@ -1,7 +1,8 @@
|
|
1
|
-
import { ChatStreamCallbacks } from '@/libs/agent-runtime';
|
2
1
|
import { ModelTokensUsage } from '@/types/message';
|
3
2
|
|
4
3
|
import { AgentRuntimeErrorType } from '../../error';
|
4
|
+
import { parseToolCalls } from '../../helpers';
|
5
|
+
import { ChatStreamCallbacks } from '../../types';
|
5
6
|
|
6
7
|
/**
|
7
8
|
* context in the stream to save temporarily data
|
@@ -140,18 +141,31 @@ export const createSSEProtocolTransformer = (
|
|
140
141
|
|
141
142
|
export function createCallbacksTransformer(cb: ChatStreamCallbacks | undefined) {
|
142
143
|
const textEncoder = new TextEncoder();
|
143
|
-
let
|
144
|
-
let
|
144
|
+
let aggregatedText = '';
|
145
|
+
let aggregatedThinking: string | undefined = undefined;
|
146
|
+
let usage: ModelTokensUsage | undefined;
|
147
|
+
let grounding: any;
|
148
|
+
let toolsCalling: any;
|
149
|
+
|
150
|
+
let currentType = '' as unknown as StreamProtocolChunk['type'];
|
145
151
|
const callbacks = cb || {};
|
146
152
|
|
147
153
|
return new TransformStream({
|
148
154
|
async flush(): Promise<void> {
|
155
|
+
const data = {
|
156
|
+
grounding,
|
157
|
+
text: aggregatedText,
|
158
|
+
thinking: aggregatedThinking,
|
159
|
+
toolsCalling,
|
160
|
+
usage,
|
161
|
+
};
|
162
|
+
|
149
163
|
if (callbacks.onCompletion) {
|
150
|
-
await callbacks.onCompletion(
|
164
|
+
await callbacks.onCompletion(data);
|
151
165
|
}
|
152
166
|
|
153
167
|
if (callbacks.onFinal) {
|
154
|
-
await callbacks.onFinal(
|
168
|
+
await callbacks.onFinal(data);
|
155
169
|
}
|
156
170
|
},
|
157
171
|
|
@@ -164,22 +178,53 @@ export function createCallbacksTransformer(cb: ChatStreamCallbacks | undefined)
|
|
164
178
|
|
165
179
|
// track the type of the chunk
|
166
180
|
if (chunk.startsWith('event:')) {
|
167
|
-
currentType = chunk.split('event:')[1].trim();
|
181
|
+
currentType = chunk.split('event:')[1].trim() as unknown as StreamProtocolChunk['type'];
|
168
182
|
}
|
169
183
|
// if the message is a data chunk, handle the callback
|
170
184
|
else if (chunk.startsWith('data:')) {
|
171
185
|
const content = chunk.split('data:')[1].trim();
|
172
186
|
|
187
|
+
let data: any = undefined;
|
188
|
+
try {
|
189
|
+
data = JSON.parse(content);
|
190
|
+
} catch {}
|
191
|
+
|
192
|
+
if (!data) return;
|
193
|
+
|
173
194
|
switch (currentType) {
|
174
195
|
case 'text': {
|
175
|
-
|
176
|
-
await callbacks.
|
196
|
+
aggregatedText += data;
|
197
|
+
await callbacks.onText?.(data);
|
198
|
+
break;
|
199
|
+
}
|
200
|
+
|
201
|
+
case 'reasoning': {
|
202
|
+
if (!aggregatedThinking) {
|
203
|
+
aggregatedThinking = '';
|
204
|
+
}
|
205
|
+
|
206
|
+
aggregatedThinking += data;
|
207
|
+
await callbacks.onThinking?.(data);
|
208
|
+
break;
|
209
|
+
}
|
210
|
+
|
211
|
+
case 'usage': {
|
212
|
+
usage = data;
|
213
|
+
await callbacks.onUsage?.(data);
|
214
|
+
break;
|
215
|
+
}
|
216
|
+
|
217
|
+
case 'grounding': {
|
218
|
+
grounding = data;
|
219
|
+
await callbacks.onGrounding?.(data);
|
177
220
|
break;
|
178
221
|
}
|
179
222
|
|
180
223
|
case 'tool_calls': {
|
181
|
-
|
182
|
-
|
224
|
+
if (!toolsCalling) toolsCalling = [];
|
225
|
+
toolsCalling = parseToolCalls(toolsCalling, data);
|
226
|
+
|
227
|
+
await callbacks.onToolsCalling?.({ chunk: data, toolsCalling });
|
183
228
|
}
|
184
229
|
}
|
185
230
|
}
|
@@ -43,13 +43,11 @@ describe('QwenAIStream', () => {
|
|
43
43
|
|
44
44
|
const onStartMock = vi.fn();
|
45
45
|
const onTextMock = vi.fn();
|
46
|
-
const onTokenMock = vi.fn();
|
47
46
|
const onCompletionMock = vi.fn();
|
48
47
|
|
49
48
|
const protocolStream = QwenAIStream(mockOpenAIStream, {
|
50
49
|
onStart: onStartMock,
|
51
50
|
onText: onTextMock,
|
52
|
-
onToken: onTokenMock,
|
53
51
|
onCompletion: onCompletionMock,
|
54
52
|
});
|
55
53
|
|
@@ -74,9 +72,8 @@ describe('QwenAIStream', () => {
|
|
74
72
|
]);
|
75
73
|
|
76
74
|
expect(onStartMock).toHaveBeenCalledTimes(1);
|
77
|
-
expect(onTextMock).toHaveBeenNthCalledWith(1, '
|
78
|
-
expect(onTextMock).toHaveBeenNthCalledWith(2, '
|
79
|
-
expect(onTokenMock).toHaveBeenCalledTimes(2);
|
75
|
+
expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
|
76
|
+
expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
|
80
77
|
expect(onCompletionMock).toHaveBeenCalledTimes(1);
|
81
78
|
});
|
82
79
|
|
@@ -114,7 +111,7 @@ describe('QwenAIStream', () => {
|
|
114
111
|
const onToolCallMock = vi.fn();
|
115
112
|
|
116
113
|
const protocolStream = QwenAIStream(mockOpenAIStream, {
|
117
|
-
|
114
|
+
onToolsCalling: onToolCallMock,
|
118
115
|
});
|
119
116
|
|
120
117
|
const decoder = new TextDecoder();
|
@@ -1,41 +1,42 @@
|
|
1
|
+
import type OpenAI from 'openai';
|
1
2
|
import { beforeAll, describe, expect, it, vi } from 'vitest';
|
3
|
+
|
2
4
|
import { SparkAIStream, transformSparkResponseToStream } from './spark';
|
3
|
-
import type OpenAI from 'openai';
|
4
5
|
|
5
6
|
describe('SparkAIStream', () => {
|
6
7
|
beforeAll(() => {});
|
7
8
|
|
8
9
|
it('should transform non-streaming response to stream', async () => {
|
9
10
|
const mockResponse = {
|
10
|
-
id:
|
11
|
-
object:
|
11
|
+
id: 'cha000ceba6@dx193d200b580b8f3532',
|
12
|
+
object: 'chat.completion',
|
12
13
|
created: 1734395014,
|
13
|
-
model:
|
14
|
+
model: 'max-32k',
|
14
15
|
choices: [
|
15
16
|
{
|
16
17
|
message: {
|
17
|
-
role:
|
18
|
-
content:
|
18
|
+
role: 'assistant',
|
19
|
+
content: '',
|
19
20
|
refusal: null,
|
20
21
|
tool_calls: {
|
21
|
-
type:
|
22
|
+
type: 'function',
|
22
23
|
function: {
|
23
24
|
arguments: '{"city":"Shanghai"}',
|
24
|
-
name:
|
25
|
+
name: 'realtime-weather____fetchCurrentWeather',
|
25
26
|
},
|
26
|
-
id:
|
27
|
-
}
|
27
|
+
id: 'call_1',
|
28
|
+
},
|
28
29
|
},
|
29
30
|
index: 0,
|
30
31
|
logprobs: null,
|
31
|
-
finish_reason:
|
32
|
-
}
|
32
|
+
finish_reason: 'tool_calls',
|
33
|
+
},
|
33
34
|
],
|
34
35
|
usage: {
|
35
36
|
prompt_tokens: 8,
|
36
37
|
completion_tokens: 0,
|
37
|
-
total_tokens: 8
|
38
|
-
}
|
38
|
+
total_tokens: 8,
|
39
|
+
},
|
39
40
|
} as unknown as OpenAI.ChatCompletion;
|
40
41
|
|
41
42
|
const stream = transformSparkResponseToStream(mockResponse);
|
@@ -48,15 +49,17 @@ describe('SparkAIStream', () => {
|
|
48
49
|
}
|
49
50
|
|
50
51
|
expect(chunks).toHaveLength(2);
|
51
|
-
expect(chunks[0].choices[0].delta.tool_calls).toEqual([
|
52
|
-
|
53
|
-
|
54
|
-
|
52
|
+
expect(chunks[0].choices[0].delta.tool_calls).toEqual([
|
53
|
+
{
|
54
|
+
function: {
|
55
|
+
arguments: '{"city":"Shanghai"}',
|
56
|
+
name: 'realtime-weather____fetchCurrentWeather',
|
57
|
+
},
|
58
|
+
id: 'call_1',
|
59
|
+
index: 0,
|
60
|
+
type: 'function',
|
55
61
|
},
|
56
|
-
|
57
|
-
index: 0,
|
58
|
-
type: "function"
|
59
|
-
}]);
|
62
|
+
]);
|
60
63
|
expect(chunks[1].choices[0].finish_reason).toBeDefined();
|
61
64
|
});
|
62
65
|
|
@@ -64,36 +67,36 @@ describe('SparkAIStream', () => {
|
|
64
67
|
const mockStream = new ReadableStream({
|
65
68
|
start(controller) {
|
66
69
|
controller.enqueue({
|
67
|
-
id:
|
68
|
-
object:
|
70
|
+
id: 'cha000b0bf9@dx193d1ffa61cb894532',
|
71
|
+
object: 'chat.completion.chunk',
|
69
72
|
created: 1734395014,
|
70
|
-
model:
|
73
|
+
model: 'max-32k',
|
71
74
|
choices: [
|
72
75
|
{
|
73
76
|
delta: {
|
74
|
-
role:
|
75
|
-
content:
|
77
|
+
role: 'assistant',
|
78
|
+
content: '',
|
76
79
|
tool_calls: {
|
77
|
-
type:
|
80
|
+
type: 'function',
|
78
81
|
function: {
|
79
82
|
arguments: '{"city":"Shanghai"}',
|
80
|
-
name:
|
83
|
+
name: 'realtime-weather____fetchCurrentWeather',
|
81
84
|
},
|
82
|
-
id:
|
83
|
-
}
|
85
|
+
id: 'call_1',
|
86
|
+
},
|
84
87
|
},
|
85
|
-
index: 0
|
86
|
-
}
|
87
|
-
]
|
88
|
+
index: 0,
|
89
|
+
},
|
90
|
+
],
|
88
91
|
} as unknown as OpenAI.ChatCompletionChunk);
|
89
92
|
controller.close();
|
90
|
-
}
|
93
|
+
},
|
91
94
|
});
|
92
95
|
|
93
96
|
const onToolCallMock = vi.fn();
|
94
97
|
|
95
98
|
const protocolStream = SparkAIStream(mockStream, {
|
96
|
-
|
99
|
+
onToolsCalling: onToolCallMock,
|
97
100
|
});
|
98
101
|
|
99
102
|
const decoder = new TextDecoder();
|
@@ -107,7 +110,7 @@ describe('SparkAIStream', () => {
|
|
107
110
|
expect(chunks).toEqual([
|
108
111
|
'id: cha000b0bf9@dx193d1ffa61cb894532\n',
|
109
112
|
'event: tool_calls\n',
|
110
|
-
`data: [{\"function\":{\"arguments\":\"{\\\"city\\\":\\\"Shanghai\\\"}\",\"name\":\"realtime-weather____fetchCurrentWeather\"},\"id\":\"call_1\",\"index\":0,\"type\":\"function\"}]\n\n
|
113
|
+
`data: [{\"function\":{\"arguments\":\"{\\\"city\\\":\\\"Shanghai\\\"}\",\"name\":\"realtime-weather____fetchCurrentWeather\"},\"id\":\"call_1\",\"index\":0,\"type\":\"function\"}]\n\n`,
|
111
114
|
]);
|
112
115
|
|
113
116
|
expect(onToolCallMock).toHaveBeenCalledTimes(1);
|
@@ -117,43 +120,43 @@ describe('SparkAIStream', () => {
|
|
117
120
|
const mockStream = new ReadableStream({
|
118
121
|
start(controller) {
|
119
122
|
controller.enqueue({
|
120
|
-
id:
|
121
|
-
object:
|
123
|
+
id: 'test-id',
|
124
|
+
object: 'chat.completion.chunk',
|
122
125
|
created: 1734395014,
|
123
|
-
model:
|
126
|
+
model: 'max-32k',
|
124
127
|
choices: [
|
125
128
|
{
|
126
129
|
delta: {
|
127
|
-
content:
|
128
|
-
role:
|
130
|
+
content: 'Hello',
|
131
|
+
role: 'assistant',
|
129
132
|
},
|
130
|
-
index: 0
|
131
|
-
}
|
132
|
-
]
|
133
|
+
index: 0,
|
134
|
+
},
|
135
|
+
],
|
133
136
|
} as OpenAI.ChatCompletionChunk);
|
134
137
|
controller.enqueue({
|
135
|
-
id:
|
136
|
-
object:
|
138
|
+
id: 'test-id',
|
139
|
+
object: 'chat.completion.chunk',
|
137
140
|
created: 1734395014,
|
138
|
-
model:
|
141
|
+
model: 'max-32k',
|
139
142
|
choices: [
|
140
143
|
{
|
141
144
|
delta: {
|
142
|
-
content:
|
143
|
-
role:
|
145
|
+
content: ' World',
|
146
|
+
role: 'assistant',
|
144
147
|
},
|
145
|
-
index: 0
|
146
|
-
}
|
147
|
-
]
|
148
|
+
index: 0,
|
149
|
+
},
|
150
|
+
],
|
148
151
|
} as OpenAI.ChatCompletionChunk);
|
149
152
|
controller.close();
|
150
|
-
}
|
153
|
+
},
|
151
154
|
});
|
152
155
|
|
153
156
|
const onTextMock = vi.fn();
|
154
|
-
|
157
|
+
|
155
158
|
const protocolStream = SparkAIStream(mockStream, {
|
156
|
-
onText: onTextMock
|
159
|
+
onText: onTextMock,
|
157
160
|
});
|
158
161
|
|
159
162
|
const decoder = new TextDecoder();
|
@@ -170,18 +173,18 @@ describe('SparkAIStream', () => {
|
|
170
173
|
'data: "Hello"\n\n',
|
171
174
|
'id: test-id\n',
|
172
175
|
'event: text\n',
|
173
|
-
'data: " World"\n\n'
|
176
|
+
'data: " World"\n\n',
|
174
177
|
]);
|
175
178
|
|
176
|
-
expect(onTextMock).toHaveBeenNthCalledWith(1, '
|
177
|
-
expect(onTextMock).toHaveBeenNthCalledWith(2, '
|
179
|
+
expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
|
180
|
+
expect(onTextMock).toHaveBeenNthCalledWith(2, ' World');
|
178
181
|
});
|
179
182
|
|
180
183
|
it('should handle empty stream', async () => {
|
181
184
|
const mockStream = new ReadableStream({
|
182
185
|
start(controller) {
|
183
186
|
controller.close();
|
184
|
-
}
|
187
|
+
},
|
185
188
|
});
|
186
189
|
|
187
190
|
const protocolStream = SparkAIStream(mockStream);
|
@@ -99,15 +99,13 @@ describe('VertexAIStream', () => {
|
|
99
99
|
|
100
100
|
const onStartMock = vi.fn();
|
101
101
|
const onTextMock = vi.fn();
|
102
|
-
const onTokenMock = vi.fn();
|
103
102
|
const onToolCallMock = vi.fn();
|
104
103
|
const onCompletionMock = vi.fn();
|
105
104
|
|
106
105
|
const protocolStream = VertexAIStream(mockGoogleStream, {
|
107
106
|
onStart: onStartMock,
|
108
107
|
onText: onTextMock,
|
109
|
-
|
110
|
-
onToolCall: onToolCallMock,
|
108
|
+
onToolsCalling: onToolCallMock,
|
111
109
|
onCompletion: onCompletionMock,
|
112
110
|
});
|
113
111
|
|
@@ -132,7 +130,7 @@ describe('VertexAIStream', () => {
|
|
132
130
|
]);
|
133
131
|
|
134
132
|
expect(onStartMock).toHaveBeenCalledTimes(1);
|
135
|
-
expect(
|
133
|
+
expect(onTextMock).toHaveBeenCalledTimes(2);
|
136
134
|
expect(onCompletionMock).toHaveBeenCalledTimes(1);
|
137
135
|
});
|
138
136
|
|
@@ -202,15 +200,13 @@ describe('VertexAIStream', () => {
|
|
202
200
|
|
203
201
|
const onStartMock = vi.fn();
|
204
202
|
const onTextMock = vi.fn();
|
205
|
-
const onTokenMock = vi.fn();
|
206
203
|
const onToolCallMock = vi.fn();
|
207
204
|
const onCompletionMock = vi.fn();
|
208
205
|
|
209
206
|
const protocolStream = VertexAIStream(mockGoogleStream, {
|
210
207
|
onStart: onStartMock,
|
211
208
|
onText: onTextMock,
|
212
|
-
|
213
|
-
onToolCall: onToolCallMock,
|
209
|
+
onToolsCalling: onToolCallMock,
|
214
210
|
onCompletion: onCompletionMock,
|
215
211
|
});
|
216
212
|
|
@@ -12,6 +12,16 @@ export const LobeXAI = LobeOpenAICompatibleFactory({
|
|
12
12
|
chatCompletion: {
|
13
13
|
// xAI API does not support stream_options: { include_usage: true }
|
14
14
|
excludeUsage: true,
|
15
|
+
handlePayload: (payload) => {
|
16
|
+
const { frequency_penalty, model, presence_penalty, ...rest } = payload;
|
17
|
+
|
18
|
+
return {
|
19
|
+
...rest,
|
20
|
+
frequency_penalty: model.includes('grok-3-mini') ? undefined : frequency_penalty,
|
21
|
+
model,
|
22
|
+
presence_penalty: model.includes('grok-3-mini') ? undefined : presence_penalty,
|
23
|
+
} as any;
|
24
|
+
},
|
15
25
|
},
|
16
26
|
debug: {
|
17
27
|
chatCompletion: () => process.env.DEBUG_XAI_CHAT_COMPLETION === '1',
|
@@ -63,7 +63,7 @@ describe('LobeZhipuAI', () => {
|
|
63
63
|
// 准备 callback 和 headers
|
64
64
|
const mockCallback: ChatStreamCallbacks = {
|
65
65
|
onStart: vi.fn(),
|
66
|
-
|
66
|
+
onText: vi.fn(),
|
67
67
|
};
|
68
68
|
const mockHeaders = { 'Custom-Header': 'TestValue' };
|
69
69
|
|
@@ -80,7 +80,7 @@ describe('LobeZhipuAI', () => {
|
|
80
80
|
// 验证 callback 被调用
|
81
81
|
await result.text(); // 确保流被消费
|
82
82
|
expect(mockCallback.onStart).toHaveBeenCalled();
|
83
|
-
expect(mockCallback.
|
83
|
+
expect(mockCallback.onText).toHaveBeenCalledWith('hello');
|
84
84
|
|
85
85
|
// 验证 headers 被正确传递
|
86
86
|
expect(result.headers.get('Custom-Header')).toEqual('TestValue');
|
@@ -1,22 +1,10 @@
|
|
1
1
|
import { getLLMConfig } from '@/config/llm';
|
2
2
|
import { JWTPayload } from '@/const/auth';
|
3
|
-
import {
|
4
|
-
import {
|
5
|
-
LOBE_CHAT_OBSERVATION_ID,
|
6
|
-
LOBE_CHAT_TRACE_ID,
|
7
|
-
TracePayload,
|
8
|
-
TraceTagMap,
|
9
|
-
} from '@/const/trace';
|
10
|
-
import { AgentRuntime, ChatStreamPayload, ModelProvider } from '@/libs/agent-runtime';
|
11
|
-
import { TraceClient } from '@/libs/traces';
|
3
|
+
import { AgentRuntime, ModelProvider } from '@/libs/agent-runtime';
|
12
4
|
|
13
5
|
import apiKeyManager from './apiKeyManager';
|
14
6
|
|
15
|
-
export
|
16
|
-
enableTrace?: boolean;
|
17
|
-
provider: string;
|
18
|
-
trace?: TracePayload;
|
19
|
-
}
|
7
|
+
export * from './trace';
|
20
8
|
|
21
9
|
/**
|
22
10
|
* Retrieves the options object from environment and apikeymanager
|
@@ -26,7 +14,7 @@ export interface AgentChatOptions {
|
|
26
14
|
* @param payload - The JWT payload.
|
27
15
|
* @returns The options object.
|
28
16
|
*/
|
29
|
-
const
|
17
|
+
const getParamsFromPayload = (provider: string, payload: JWTPayload) => {
|
30
18
|
const llmConfig = getLLMConfig() as Record<string, any>;
|
31
19
|
|
32
20
|
switch (provider) {
|
@@ -131,66 +119,7 @@ export const initAgentRuntimeWithUserPayload = (
|
|
131
119
|
params: any = {},
|
132
120
|
) => {
|
133
121
|
return AgentRuntime.initializeWithProvider(provider, {
|
134
|
-
...
|
122
|
+
...getParamsFromPayload(provider, payload),
|
135
123
|
...params,
|
136
124
|
});
|
137
125
|
};
|
138
|
-
|
139
|
-
export const createTraceOptions = (
|
140
|
-
payload: ChatStreamPayload,
|
141
|
-
{ trace: tracePayload, provider }: AgentChatOptions,
|
142
|
-
) => {
|
143
|
-
const { messages, model, tools, ...parameters } = payload;
|
144
|
-
// create a trace to monitor the completion
|
145
|
-
const traceClient = new TraceClient();
|
146
|
-
const trace = traceClient.createTrace({
|
147
|
-
id: tracePayload?.traceId,
|
148
|
-
input: messages,
|
149
|
-
metadata: { provider },
|
150
|
-
name: tracePayload?.traceName,
|
151
|
-
sessionId: `${tracePayload?.sessionId || INBOX_SESSION_ID}@${tracePayload?.topicId || 'start'}`,
|
152
|
-
tags: tracePayload?.tags,
|
153
|
-
userId: tracePayload?.userId,
|
154
|
-
});
|
155
|
-
|
156
|
-
const generation = trace?.generation({
|
157
|
-
input: messages,
|
158
|
-
metadata: { provider },
|
159
|
-
model,
|
160
|
-
modelParameters: parameters as any,
|
161
|
-
name: `Chat Completion (${provider})`,
|
162
|
-
startTime: new Date(),
|
163
|
-
});
|
164
|
-
|
165
|
-
return {
|
166
|
-
callback: {
|
167
|
-
experimental_onToolCall: async () => {
|
168
|
-
trace?.update({
|
169
|
-
tags: [...(tracePayload?.tags || []), TraceTagMap.ToolsCall],
|
170
|
-
});
|
171
|
-
},
|
172
|
-
|
173
|
-
onCompletion: async (completion: string) => {
|
174
|
-
generation?.update({
|
175
|
-
endTime: new Date(),
|
176
|
-
metadata: { provider, tools },
|
177
|
-
output: completion,
|
178
|
-
});
|
179
|
-
|
180
|
-
trace?.update({ output: completion });
|
181
|
-
},
|
182
|
-
|
183
|
-
onFinal: async () => {
|
184
|
-
await traceClient.shutdownAsync();
|
185
|
-
},
|
186
|
-
|
187
|
-
onStart: () => {
|
188
|
-
generation?.update({ completionStartTime: new Date() });
|
189
|
-
},
|
190
|
-
},
|
191
|
-
headers: {
|
192
|
-
[LOBE_CHAT_OBSERVATION_ID]: generation?.id,
|
193
|
-
[LOBE_CHAT_TRACE_ID]: trace?.id,
|
194
|
-
},
|
195
|
-
};
|
196
|
-
};
|