@lobehub/chat 1.68.6 → 1.68.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/package.json +1 -1
- package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +2 -2
- package/src/database/server/models/aiModel.ts +1 -0
- package/src/libs/agent-runtime/AgentRuntime.test.ts +76 -255
- package/src/libs/agent-runtime/AgentRuntime.ts +13 -338
- package/src/libs/agent-runtime/azureOpenai/index.test.ts +9 -9
- package/src/libs/agent-runtime/azureOpenai/index.ts +6 -6
- package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +898 -48
- package/src/libs/agent-runtime/openrouter/index.ts +24 -8
- package/src/libs/agent-runtime/openrouter/type.ts +11 -0
- package/src/libs/agent-runtime/runtimeMap.ts +97 -0
- package/src/libs/agent-runtime/vertexai/index.ts +3 -1
- package/src/server/modules/AgentRuntime/index.ts +3 -2
- package/src/services/chat.ts +4 -6
- package/src/types/llm.ts +1 -1
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.68.8](https://github.com/lobehub/lobe-chat/compare/v1.68.7...v1.68.8)
|
6
|
+
|
7
|
+
<sup>Released on **2025-03-05**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Improve openrouter models info.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Improve openrouter models info, closes [#6708](https://github.com/lobehub/lobe-chat/issues/6708) ([5693e68](https://github.com/lobehub/lobe-chat/commit/5693e68))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.68.7](https://github.com/lobehub/lobe-chat/compare/v1.68.6...v1.68.7)
|
31
|
+
|
32
|
+
<sup>Released on **2025-03-05**</sup>
|
33
|
+
|
34
|
+
#### ♻ Code Refactoring
|
35
|
+
|
36
|
+
- **misc**: Refactor agent runtime to better code format.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### Code refactoring
|
44
|
+
|
45
|
+
- **misc**: Refactor agent runtime to better code format, closes [#6284](https://github.com/lobehub/lobe-chat/issues/6284) ([fc1ed4a](https://github.com/lobehub/lobe-chat/commit/fc1ed4a))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.68.6](https://github.com/lobehub/lobe-chat/compare/v1.68.5...v1.68.6)
|
6
56
|
|
7
57
|
<sup>Released on **2025-03-05**</sup>
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"improvements": [
|
5
|
+
"Improve openrouter models info."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-03-05",
|
9
|
+
"version": "1.68.8"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"improvements": [
|
14
|
+
"Refactor agent runtime to better code format."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-03-05",
|
18
|
+
"version": "1.68.7"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"fixes": [
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.68.
|
3
|
+
"version": "1.68.8",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -71,7 +71,7 @@ describe('POST handler', () => {
|
|
71
71
|
|
72
72
|
// migrate to new AgentRuntime init api
|
73
73
|
const spy = vi
|
74
|
-
.spyOn(AgentRuntime, '
|
74
|
+
.spyOn(AgentRuntime, 'initializeWithProvider')
|
75
75
|
.mockResolvedValue(new AgentRuntime(mockRuntime));
|
76
76
|
|
77
77
|
// 调用 POST 函数
|
@@ -117,7 +117,7 @@ describe('POST handler', () => {
|
|
117
117
|
|
118
118
|
const mockRuntime: LobeRuntimeAI = { baseURL: 'abc', chat: vi.fn() };
|
119
119
|
|
120
|
-
vi.spyOn(AgentRuntime, '
|
120
|
+
vi.spyOn(AgentRuntime, 'initializeWithProvider').mockResolvedValue(
|
121
121
|
new AgentRuntime(mockRuntime),
|
122
122
|
);
|
123
123
|
|
@@ -1,265 +1,96 @@
|
|
1
1
|
// @vitest-environment node
|
2
2
|
import { Langfuse } from 'langfuse';
|
3
3
|
import { LangfuseGenerationClient, LangfuseTraceClient } from 'langfuse-core';
|
4
|
-
import { ClientOptions } from 'openai';
|
5
4
|
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
6
5
|
|
7
6
|
import * as langfuseCfg from '@/config/langfuse';
|
8
7
|
import { JWTPayload } from '@/const/auth';
|
9
8
|
import { TraceNameMap } from '@/const/trace';
|
10
|
-
import {
|
11
|
-
|
12
|
-
ChatStreamPayload,
|
13
|
-
LobeAnthropicAI,
|
14
|
-
LobeAzureOpenAI,
|
15
|
-
LobeBedrockAI,
|
16
|
-
LobeGoogleAI,
|
17
|
-
LobeMistralAI,
|
18
|
-
LobeMoonshotAI,
|
19
|
-
LobeOllamaAI,
|
20
|
-
LobeOpenAI,
|
21
|
-
LobeOpenRouterAI,
|
22
|
-
LobePerplexityAI,
|
23
|
-
LobeRuntimeAI,
|
24
|
-
LobeTogetherAI,
|
25
|
-
LobeZhipuAI,
|
26
|
-
ModelProvider,
|
27
|
-
} from '@/libs/agent-runtime';
|
28
|
-
import { LobeStepfunAI } from '@/libs/agent-runtime/stepfun';
|
9
|
+
import { AgentRuntime, ChatStreamPayload, LobeOpenAI, ModelProvider } from '@/libs/agent-runtime';
|
10
|
+
import { providerRuntimeMap } from '@/libs/agent-runtime/runtimeMap';
|
29
11
|
import { createTraceOptions } from '@/server/modules/AgentRuntime';
|
30
12
|
|
31
13
|
import { AgentChatOptions } from './AgentRuntime';
|
32
|
-
import { LobeBedrockAIParams } from './bedrock';
|
33
|
-
|
34
|
-
// 模拟依赖项
|
35
|
-
vi.mock('@/config/server', () => ({
|
36
|
-
getServerConfig: vi.fn(() => ({
|
37
|
-
// 确保为每个provider提供必要的配置信息
|
38
|
-
OPENAI_API_KEY: 'test-openai-key',
|
39
|
-
GOOGLE_API_KEY: 'test-google-key',
|
40
|
-
|
41
|
-
AZURE_API_KEY: 'test-azure-key',
|
42
|
-
AZURE_ENDPOINT: 'endpoint',
|
43
|
-
|
44
|
-
ZHIPU_API_KEY: 'test.zhipu-key',
|
45
|
-
MOONSHOT_API_KEY: 'test-moonshot-key',
|
46
|
-
AWS_SECRET_ACCESS_KEY: 'test-aws-secret',
|
47
|
-
AWS_ACCESS_KEY_ID: 'test-aws-id',
|
48
|
-
AWS_REGION: 'test-aws-region',
|
49
|
-
OLLAMA_PROXY_URL: 'test-ollama-url',
|
50
|
-
PERPLEXITY_API_KEY: 'test-perplexity-key',
|
51
|
-
ANTHROPIC_API_KEY: 'test-anthropic-key',
|
52
|
-
MISTRAL_API_KEY: 'test-mistral-key',
|
53
|
-
OPENROUTER_API_KEY: 'test-openrouter-key',
|
54
|
-
TOGETHERAI_API_KEY: 'test-togetherai-key',
|
55
|
-
})),
|
56
|
-
}));
|
57
14
|
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
expect(
|
104
|
-
}
|
105
|
-
|
106
|
-
it('should initialize with AzureAI correctly', async () => {
|
107
|
-
const jwtPayload = {
|
108
|
-
apiKey: 'user-azure-key',
|
109
|
-
baseURL: 'user-azure-endpoint',
|
110
|
-
apiVersion: '2024-06-01',
|
111
|
-
};
|
112
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Azure, {
|
113
|
-
azure: jwtPayload,
|
114
|
-
});
|
115
|
-
|
116
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobeAzureOpenAI);
|
117
|
-
});
|
118
|
-
});
|
119
|
-
|
120
|
-
describe('ZhiPu AI provider', () => {
|
121
|
-
it('should initialize correctly', async () => {
|
122
|
-
const jwtPayload: JWTPayload = { apiKey: 'zhipu.user-key' };
|
123
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.ZhiPu, {
|
124
|
-
zhipu: jwtPayload,
|
125
|
-
});
|
126
|
-
|
127
|
-
// 假设 LobeZhipuAI 是 ZhiPu 提供者的实现类
|
128
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobeZhipuAI);
|
129
|
-
});
|
130
|
-
});
|
131
|
-
|
132
|
-
describe('Google provider', () => {
|
133
|
-
it('should initialize correctly', async () => {
|
134
|
-
const jwtPayload: JWTPayload = { apiKey: 'user-google-key' };
|
135
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Google, {
|
136
|
-
google: jwtPayload,
|
137
|
-
});
|
138
|
-
|
139
|
-
// 假设 LobeGoogleAI 是 Google 提供者的实现类
|
140
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobeGoogleAI);
|
141
|
-
});
|
142
|
-
});
|
143
|
-
|
144
|
-
describe('Moonshot AI provider', () => {
|
145
|
-
it('should initialize correctly', async () => {
|
146
|
-
const jwtPayload: JWTPayload = { apiKey: 'user-moonshot-key' };
|
147
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Moonshot, {
|
148
|
-
moonshot: jwtPayload,
|
149
|
-
});
|
150
|
-
|
151
|
-
// 假设 LobeMoonshotAI 是 Moonshot 提供者的实现类
|
152
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobeMoonshotAI);
|
153
|
-
});
|
154
|
-
});
|
155
|
-
|
156
|
-
describe('Bedrock AI provider', () => {
|
157
|
-
it('should initialize correctly with payload apiKey', async () => {
|
158
|
-
const jwtPayload: LobeBedrockAIParams = {
|
159
|
-
accessKeyId: 'user-aws-id',
|
160
|
-
accessKeySecret: 'user-aws-secret',
|
161
|
-
region: 'user-aws-region',
|
162
|
-
};
|
163
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Bedrock, {
|
164
|
-
bedrock: jwtPayload,
|
165
|
-
});
|
166
|
-
|
167
|
-
// 假设 LobeBedrockAI 是 Bedrock 提供者的实现类
|
168
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobeBedrockAI);
|
169
|
-
});
|
170
|
-
});
|
171
|
-
|
172
|
-
describe('Ollama provider', () => {
|
173
|
-
it('should initialize correctly', async () => {
|
174
|
-
const jwtPayload: JWTPayload = { baseURL: 'https://user-ollama-url' };
|
175
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Ollama, {
|
176
|
-
ollama: jwtPayload,
|
177
|
-
});
|
178
|
-
|
179
|
-
// 假设 LobeOllamaAI 是 Ollama 提供者的实现类
|
180
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobeOllamaAI);
|
181
|
-
});
|
182
|
-
});
|
183
|
-
|
184
|
-
describe('Perplexity AI provider', () => {
|
185
|
-
it('should initialize correctly', async () => {
|
186
|
-
const jwtPayload: JWTPayload = { apiKey: 'user-perplexity-key' };
|
187
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Perplexity, {
|
188
|
-
perplexity: jwtPayload,
|
189
|
-
});
|
190
|
-
|
191
|
-
// 假设 LobePerplexityAI 是 Perplexity 提供者的实现类
|
192
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobePerplexityAI);
|
193
|
-
});
|
194
|
-
});
|
195
|
-
|
196
|
-
describe('Anthropic AI provider', () => {
|
197
|
-
it('should initialize correctly', async () => {
|
198
|
-
const jwtPayload: JWTPayload = { apiKey: 'user-anthropic-key' };
|
199
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Anthropic, {
|
200
|
-
anthropic: jwtPayload,
|
201
|
-
});
|
202
|
-
|
203
|
-
// 假设 LobeAnthropicAI 是 Anthropic 提供者的实现类
|
204
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobeAnthropicAI);
|
205
|
-
});
|
206
|
-
});
|
207
|
-
|
208
|
-
describe('Mistral AI provider', () => {
|
209
|
-
it('should initialize correctly', async () => {
|
210
|
-
const jwtPayload: JWTPayload = { apiKey: 'user-mistral-key' };
|
211
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Mistral, {
|
212
|
-
mistral: jwtPayload,
|
213
|
-
});
|
214
|
-
|
215
|
-
// 假设 LobeMistralAI 是 Mistral 提供者的实现类
|
216
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobeMistralAI);
|
217
|
-
});
|
15
|
+
const specialProviders = [
|
16
|
+
{ id: 'openai', payload: { apiKey: 'user-openai-key', baseURL: 'user-endpoint' } },
|
17
|
+
{
|
18
|
+
id: ModelProvider.Azure,
|
19
|
+
payload: {
|
20
|
+
apiKey: 'user-azure-key',
|
21
|
+
baseURL: 'user-azure-endpoint',
|
22
|
+
apiVersion: '2024-06-01',
|
23
|
+
},
|
24
|
+
},
|
25
|
+
{
|
26
|
+
id: ModelProvider.AzureAI,
|
27
|
+
payload: {
|
28
|
+
apiKey: 'user-azure-key',
|
29
|
+
baseURL: 'user-azure-endpoint',
|
30
|
+
},
|
31
|
+
},
|
32
|
+
{
|
33
|
+
id: ModelProvider.Bedrock,
|
34
|
+
payload: {
|
35
|
+
accessKeyId: 'user-aws-id',
|
36
|
+
accessKeySecret: 'user-aws-secret',
|
37
|
+
region: 'user-aws-region',
|
38
|
+
},
|
39
|
+
},
|
40
|
+
{
|
41
|
+
id: ModelProvider.Ollama,
|
42
|
+
payload: { baseURL: 'https://user-ollama-url' },
|
43
|
+
},
|
44
|
+
{
|
45
|
+
id: ModelProvider.Cloudflare,
|
46
|
+
payload: { baseURLOrAccountID: 'https://user-ollama-url' },
|
47
|
+
},
|
48
|
+
];
|
49
|
+
|
50
|
+
const testRuntime = (providerId: string, payload?: any) => {
|
51
|
+
describe(`${providerId} provider runtime`, () => {
|
52
|
+
it('should initialize correctly', async () => {
|
53
|
+
const jwtPayload: JWTPayload = { apiKey: 'user-key', ...payload };
|
54
|
+
const runtime = await AgentRuntime.initializeWithProvider(providerId, jwtPayload);
|
55
|
+
|
56
|
+
// @ts-ignore
|
57
|
+
expect(runtime['_runtime']).toBeInstanceOf(providerRuntimeMap[providerId]);
|
58
|
+
|
59
|
+
if (payload?.baseURL) {
|
60
|
+
expect(runtime['_runtime'].baseURL).toBe(payload.baseURL);
|
61
|
+
}
|
218
62
|
});
|
63
|
+
});
|
64
|
+
};
|
65
|
+
|
66
|
+
let mockModelRuntime: AgentRuntime;
|
67
|
+
beforeEach(async () => {
|
68
|
+
const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
|
69
|
+
mockModelRuntime = await AgentRuntime.initializeWithProvider(
|
70
|
+
ModelProvider.OpenAI,
|
71
|
+
jwtPayload,
|
72
|
+
);
|
73
|
+
});
|
219
74
|
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.OpenRouter, {
|
224
|
-
openrouter: jwtPayload,
|
225
|
-
});
|
75
|
+
describe('AgentRuntime', () => {
|
76
|
+
describe('should initialize with various providers', () => {
|
77
|
+
const providers = Object.values(ModelProvider);
|
226
78
|
|
227
|
-
|
228
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobeOpenRouterAI);
|
229
|
-
});
|
230
|
-
});
|
79
|
+
const specialProviderIds = [ModelProvider.VertexAI, ...specialProviders.map((p) => p.id)];
|
231
80
|
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Stepfun, {
|
236
|
-
stepfun: jwtPayload,
|
237
|
-
});
|
81
|
+
const generalTestProviders = providers.filter(
|
82
|
+
(provider) => !specialProviderIds.includes(provider),
|
83
|
+
);
|
238
84
|
|
239
|
-
|
240
|
-
|
85
|
+
generalTestProviders.forEach((provider) => {
|
86
|
+
testRuntime(provider);
|
241
87
|
});
|
242
88
|
|
243
|
-
|
244
|
-
it('should initialize correctly', async () => {
|
245
|
-
const jwtPayload: JWTPayload = { apiKey: 'user-togetherai-key' };
|
246
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.TogetherAI, {
|
247
|
-
togetherai: jwtPayload,
|
248
|
-
});
|
249
|
-
|
250
|
-
// 假设 LobeTogetherAI 是 TogetherAI 提供者的实现类
|
251
|
-
expect(runtime['_runtime']).toBeInstanceOf(LobeTogetherAI);
|
252
|
-
});
|
253
|
-
});
|
89
|
+
specialProviders.forEach(({ id, payload }) => testRuntime(id, payload));
|
254
90
|
});
|
255
91
|
|
256
92
|
describe('AgentRuntime chat method', () => {
|
257
93
|
it('should run correctly', async () => {
|
258
|
-
const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
|
259
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.OpenAI, {
|
260
|
-
openai: jwtPayload,
|
261
|
-
});
|
262
|
-
|
263
94
|
const payload: ChatStreamPayload = {
|
264
95
|
messages: [{ role: 'user', content: 'Hello, world!' }],
|
265
96
|
model: 'text-davinci-002',
|
@@ -268,14 +99,9 @@ describe('AgentRuntime', () => {
|
|
268
99
|
|
269
100
|
vi.spyOn(LobeOpenAI.prototype, 'chat').mockResolvedValue(new Response(''));
|
270
101
|
|
271
|
-
await
|
102
|
+
await mockModelRuntime.chat(payload);
|
272
103
|
});
|
273
104
|
it('should handle options correctly', async () => {
|
274
|
-
const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
|
275
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.OpenAI, {
|
276
|
-
openai: jwtPayload,
|
277
|
-
});
|
278
|
-
|
279
105
|
const payload: ChatStreamPayload = {
|
280
106
|
messages: [{ role: 'user', content: 'Hello, world!' }],
|
281
107
|
model: 'text-davinci-002',
|
@@ -296,15 +122,10 @@ describe('AgentRuntime', () => {
|
|
296
122
|
|
297
123
|
vi.spyOn(LobeOpenAI.prototype, 'chat').mockResolvedValue(new Response(''));
|
298
124
|
|
299
|
-
await
|
125
|
+
await mockModelRuntime.chat(payload, createTraceOptions(payload, options));
|
300
126
|
});
|
301
127
|
|
302
128
|
describe('callback', async () => {
|
303
|
-
const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
|
304
|
-
const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.OpenAI, {
|
305
|
-
openai: jwtPayload,
|
306
|
-
});
|
307
|
-
|
308
129
|
const payload: ChatStreamPayload = {
|
309
130
|
messages: [{ role: 'user', content: 'Hello, world!' }],
|
310
131
|
model: 'text-davinci-002',
|
@@ -345,7 +166,7 @@ describe('AgentRuntime', () => {
|
|
345
166
|
);
|
346
167
|
vi.spyOn(LangfuseTraceClient.prototype, 'update').mockImplementation(updateMock);
|
347
168
|
|
348
|
-
await
|
169
|
+
await mockModelRuntime.chat(payload, createTraceOptions(payload, options));
|
349
170
|
|
350
171
|
expect(updateMock).toHaveBeenCalledWith({ tags: ['Tools Call'] });
|
351
172
|
});
|
@@ -366,7 +187,7 @@ describe('AgentRuntime', () => {
|
|
366
187
|
},
|
367
188
|
);
|
368
189
|
|
369
|
-
await
|
190
|
+
await mockModelRuntime.chat(payload, createTraceOptions(payload, options));
|
370
191
|
|
371
192
|
// Verify onStart was called
|
372
193
|
expect(updateMock).toHaveBeenCalledWith({ completionStartTime: expect.any(Date) });
|
@@ -389,7 +210,7 @@ describe('AgentRuntime', () => {
|
|
389
210
|
},
|
390
211
|
);
|
391
212
|
|
392
|
-
await
|
213
|
+
await mockModelRuntime.chat(payload, createTraceOptions(payload, options));
|
393
214
|
|
394
215
|
// Verify onCompletion was called with expected output
|
395
216
|
expect(updateMock).toHaveBeenCalledWith({
|
@@ -419,7 +240,7 @@ describe('AgentRuntime', () => {
|
|
419
240
|
const shutdownAsyncMock = vi.fn();
|
420
241
|
vi.spyOn(Langfuse.prototype, 'shutdownAsync').mockImplementation(shutdownAsyncMock);
|
421
242
|
|
422
|
-
await
|
243
|
+
await mockModelRuntime.chat(payload, createTraceOptions(payload, options));
|
423
244
|
|
424
245
|
// Verify onCompletion was called with expected output
|
425
246
|
expect(shutdownAsyncMock).toHaveBeenCalled();
|